summaryrefslogtreecommitdiffstats
path: root/third_party/aom/av1/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/aom/av1/encoder')
-rw-r--r--third_party/aom/av1/encoder/allintra_vis.c4
-rw-r--r--third_party/aom/av1/encoder/aq_cyclicrefresh.c50
-rw-r--r--third_party/aom/av1/encoder/arm/neon/av1_error_sve.c2
-rw-r--r--third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c58
-rw-r--r--third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c92
-rw-r--r--third_party/aom/av1/encoder/av1_temporal_denoiser.c8
-rw-r--r--third_party/aom/av1/encoder/bitstream.c19
-rw-r--r--third_party/aom/av1/encoder/bitstream.h1
-rw-r--r--third_party/aom/av1/encoder/block.h3
-rw-r--r--third_party/aom/av1/encoder/cnn.c10
-rw-r--r--third_party/aom/av1/encoder/encode_strategy.c27
-rw-r--r--third_party/aom/av1/encoder/encodeframe.c20
-rw-r--r--third_party/aom/av1/encoder/encodeframe_utils.c6
-rw-r--r--third_party/aom/av1/encoder/encoder.c94
-rw-r--r--third_party/aom/av1/encoder/encoder.h9
-rw-r--r--third_party/aom/av1/encoder/encoder_alloc.h3
-rw-r--r--third_party/aom/av1/encoder/encoder_utils.c20
-rw-r--r--third_party/aom/av1/encoder/encodetxb.c26
-rw-r--r--third_party/aom/av1/encoder/ethread.c8
-rw-r--r--third_party/aom/av1/encoder/firstpass.c1
-rw-r--r--third_party/aom/av1/encoder/global_motion.c82
-rw-r--r--third_party/aom/av1/encoder/global_motion.h32
-rw-r--r--third_party/aom/av1/encoder/global_motion_facade.c47
-rw-r--r--third_party/aom/av1/encoder/k_means_template.h10
-rw-r--r--third_party/aom/av1/encoder/lookahead.c19
-rw-r--r--third_party/aom/av1/encoder/lookahead.h20
-rw-r--r--third_party/aom/av1/encoder/nonrd_pickmode.c7
-rw-r--r--third_party/aom/av1/encoder/palette.c2
-rw-r--r--third_party/aom/av1/encoder/palette.h2
-rw-r--r--third_party/aom/av1/encoder/partition_search.c48
-rw-r--r--third_party/aom/av1/encoder/partition_strategy.c2
-rw-r--r--third_party/aom/av1/encoder/pass2_strategy.c100
-rw-r--r--third_party/aom/av1/encoder/pickcdef.c2
-rw-r--r--third_party/aom/av1/encoder/picklpf.c21
-rw-r--r--third_party/aom/av1/encoder/pickrst.c111
-rw-r--r--third_party/aom/av1/encoder/ratectrl.c120
-rw-r--r--third_party/aom/av1/encoder/ratectrl.h3
-rw-r--r--third_party/aom/av1/encoder/speed_features.c9
-rw-r--r--third_party/aom/av1/encoder/speed_features.h7
-rw-r--r--third_party/aom/av1/encoder/superres_scale.c2
-rw-r--r--third_party/aom/av1/encoder/svc_layercontext.c12
-rw-r--r--third_party/aom/av1/encoder/svc_layercontext.h15
-rw-r--r--third_party/aom/av1/encoder/temporal_filter.c21
-rw-r--r--third_party/aom/av1/encoder/temporal_filter.h2
-rw-r--r--third_party/aom/av1/encoder/tpl_model.c3
-rw-r--r--third_party/aom/av1/encoder/tpl_model.h1
-rw-r--r--third_party/aom/av1/encoder/tune_butteraugli.c10
-rw-r--r--third_party/aom/av1/encoder/tune_vmaf.c105
-rw-r--r--third_party/aom/av1/encoder/tune_vmaf.h6
-rw-r--r--third_party/aom/av1/encoder/tx_search.c23
-rw-r--r--third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c6
-rw-r--r--third_party/aom/av1/encoder/x86/cnn_avx2.c2
52 files changed, 733 insertions, 580 deletions
diff --git a/third_party/aom/av1/encoder/allintra_vis.c b/third_party/aom/av1/encoder/allintra_vis.c
index 8dcef5fc85..87becb80ef 100644
--- a/third_party/aom/av1/encoder/allintra_vis.c
+++ b/third_party/aom/av1/encoder/allintra_vis.c
@@ -13,6 +13,8 @@
#include "config/aom_config.h"
+#include "aom_util/aom_pthread.h"
+
#if CONFIG_TFLITE
#include "tensorflow/lite/c/c_api.h"
#include "av1/encoder/deltaq4_model.c"
@@ -588,7 +590,7 @@ void av1_set_mb_wiener_variance(AV1_COMP *cpi) {
&cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0))
+ NULL, cpi->alloc_pyramid, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
av1_alloc_mb_wiener_var_pred_buf(&cpi->common, &cpi->td);
diff --git a/third_party/aom/av1/encoder/aq_cyclicrefresh.c b/third_party/aom/av1/encoder/aq_cyclicrefresh.c
index f48ff11e51..1aa8dde323 100644
--- a/third_party/aom/av1/encoder/aq_cyclicrefresh.c
+++ b/third_party/aom/av1/encoder/aq_cyclicrefresh.c
@@ -15,6 +15,7 @@
#include "av1/common/pred_common.h"
#include "av1/common/seg_common.h"
#include "av1/encoder/aq_cyclicrefresh.h"
+#include "av1/encoder/encoder_utils.h"
#include "av1/encoder/ratectrl.h"
#include "av1/encoder/segmentation.h"
#include "av1/encoder/tokenize.h"
@@ -295,6 +296,7 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
const CommonModeInfoParams *const mi_params = &cm->mi_params;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
unsigned char *const seg_map = cpi->enc_seg.map;
+ unsigned char *const active_map_4x4 = cpi->active_map.map;
int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
int xmis, ymis, x, y;
uint64_t sb_sad = 0;
@@ -302,7 +304,12 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
uint64_t thresh_sad = INT64_MAX;
const int mi_rows = mi_params->mi_rows, mi_cols = mi_params->mi_cols;
const int mi_stride = mi_cols;
- memset(seg_map, CR_SEGMENT_ID_BASE, mi_rows * mi_cols);
+ // Don't set seg_map to 0 if active_maps is enabled. Active_maps will set
+ // seg_map to either 7 or 0 (AM_SEGMENT_ID_INACTIVE/ACTIVE), and cyclic
+ // refresh set below (segment 1 or 2) will only be set for ACTIVE blocks.
+ if (!cpi->active_map.enabled) {
+ memset(seg_map, CR_SEGMENT_ID_BASE, mi_rows * mi_cols);
+ }
sb_cols = (mi_cols + cm->seq_params->mib_size - 1) / cm->seq_params->mib_size;
sb_rows = (mi_rows + cm->seq_params->mib_size - 1) / cm->seq_params->mib_size;
sbs_in_frame = sb_cols * sb_rows;
@@ -357,7 +364,10 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
// for possible boost/refresh (segment 1). The segment id may get
// reset to 0 later if block gets coded anything other than low motion.
// If the block_sad (sb_sad) is very low label it for refresh anyway.
- if (cr->map[bl_index2] == 0 || sb_sad < thresh_sad_low) {
+ // If active_maps is enabled, only allow for setting on ACTIVE blocks.
+ if ((cr->map[bl_index2] == 0 || sb_sad < thresh_sad_low) &&
+ (!cpi->active_map.enabled ||
+ active_map_4x4[bl_index2] == AM_SEGMENT_ID_ACTIVE)) {
sum_map += 4;
} else if (cr->map[bl_index2] < 0) {
cr->map[bl_index2]++;
@@ -380,7 +390,8 @@ static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
cr->sb_index = i;
if (cr->target_num_seg_blocks == 0) {
// Disable segmentation, seg_map is already set to 0 above.
- av1_disable_segmentation(&cm->seg);
+ // Don't disable if active_map is being used.
+ if (!cpi->active_map.enabled) av1_disable_segmentation(&cm->seg);
}
}
@@ -423,8 +434,6 @@ void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) {
// function av1_cyclic_reset_segment_skip(). Skipping over
// 4x4 will therefore have small bdrate loss (~0.2%), so
// we use it only for speed > 9 for now.
- // Also if loop-filter deltas is applied via segment, then
- // we need to set cr->skip_over4x4 = 1.
cr->skip_over4x4 = (cpi->oxcf.speed > 9) ? 1 : 0;
// should we enable cyclic refresh on this frame.
@@ -450,6 +459,15 @@ void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) {
else
cr->percent_refresh = 10 + cr->percent_refresh_adjustment;
+ if (cpi->active_map.enabled) {
+ // Scale down the percent_refresh to target the active blocks only.
+ cr->percent_refresh =
+ cr->percent_refresh * (100 - cpi->rc.percent_blocks_inactive) / 100;
+ if (cr->percent_refresh == 0) {
+ cr->apply_cyclic_refresh = 0;
+ }
+ }
+
cr->max_qdelta_perc = 60;
cr->time_for_refresh = 0;
cr->use_block_sad_scene_det =
@@ -543,10 +561,14 @@ void av1_cyclic_refresh_setup(AV1_COMP *const cpi) {
if (resolution_change) av1_cyclic_refresh_reset_resize(cpi);
if (!cr->apply_cyclic_refresh) {
- // Set segmentation map to 0 and disable.
- unsigned char *const seg_map = cpi->enc_seg.map;
- memset(seg_map, 0, cm->mi_params.mi_rows * cm->mi_params.mi_cols);
- av1_disable_segmentation(&cm->seg);
+ // Don't disable and set seg_map to 0 if active_maps is enabled, unless
+ // whole frame is set as inactive (since we only apply cyclic_refresh to
+ // active blocks).
+ if (!cpi->active_map.enabled || cpi->rc.percent_blocks_inactive == 100) {
+ unsigned char *const seg_map = cpi->enc_seg.map;
+ memset(seg_map, 0, cm->mi_params.mi_rows * cm->mi_params.mi_cols);
+ av1_disable_segmentation(&cm->seg);
+ }
if (frame_is_intra_only(cm) || scene_change_detected ||
cpi->ppi->rtc_ref.bias_recovery_frame) {
cr->sb_index = 0;
@@ -574,9 +596,11 @@ void av1_cyclic_refresh_setup(AV1_COMP *const cpi) {
cr->thresh_rate_sb = INT64_MAX;
}
// Set up segmentation.
- // Clear down the segment map.
av1_enable_segmentation(&cm->seg);
- av1_clearall_segfeatures(seg);
+ if (!cpi->active_map.enabled) {
+ // Clear down the segment map, only if active_maps is not enabled.
+ av1_clearall_segfeatures(seg);
+ }
// Note: setting temporal_update has no effect, as the seg-map coding method
// (temporal or spatial) is determined in
@@ -644,6 +668,10 @@ void av1_cyclic_refresh_reset_resize(AV1_COMP *const cpi) {
int av1_cyclic_refresh_disable_lf_cdef(AV1_COMP *const cpi) {
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const int qindex = cpi->common.quant_params.base_qindex;
+ if (cpi->active_map.enabled &&
+ cpi->rc.percent_blocks_inactive >
+ cpi->sf.rt_sf.thresh_active_maps_skip_lf_cdef)
+ return 1;
if (cpi->rc.frames_since_key > 30 && cr->percent_refresh > 0 &&
cr->counter_encode_maxq_scene_change > 300 / cr->percent_refresh &&
cpi->rc.frame_source_sad < 1000 &&
diff --git a/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c b/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c
index 63aad0b785..52803a9838 100644
--- a/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c
+++ b/third_party/aom/av1/encoder/arm/neon/av1_error_sve.c
@@ -14,7 +14,7 @@
#include "config/aom_config.h"
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/arm/dot_sve.h"
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
#include "aom_dsp/arm/mem_neon.h"
int64_t av1_block_error_sve(const tran_low_t *coeff, const tran_low_t *dqcoeff,
diff --git a/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c b/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c
index 5a52e701a2..919521fec7 100644
--- a/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c
+++ b/third_party/aom/av1/encoder/arm/neon/temporal_filter_neon_dotprod.c
@@ -23,7 +23,15 @@
#define SSE_STRIDE (BW + 4)
// clang-format off
+// Table used to pad the first and last columns and apply the sliding window.
+DECLARE_ALIGNED(16, static const uint8_t, kLoadPad[4][16]) = {
+ { 2, 2, 2, 3, 4, 255, 255, 255, 255, 2, 2, 3, 4, 5, 255, 255 },
+ { 255, 255, 2, 3, 4, 5, 6, 255, 255, 255, 255, 3, 4, 5, 6, 7 },
+ { 0, 1, 2, 3, 4, 255, 255, 255, 255, 1, 2, 3, 4, 5, 255, 255 },
+ { 255, 255, 2, 3, 4, 5, 5, 255, 255, 255, 255, 3, 4, 5, 5, 5 }
+};
+// For columns that don't need to be padded it's just a simple mask.
DECLARE_ALIGNED(16, static const uint8_t, kSlidingWindowMask[]) = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00,
@@ -56,22 +64,6 @@ static INLINE void get_abs_diff(const uint8_t *frame1, const uint32_t stride1,
} while (++i < block_height);
}
-static INLINE uint8x16_t load_and_pad(const uint8_t *src, const uint32_t col,
- const uint32_t block_width) {
- uint8x8_t s = vld1_u8(src);
-
- if (col == 0) {
- const uint8_t lane2 = vget_lane_u8(s, 2);
- s = vset_lane_u8(lane2, s, 0);
- s = vset_lane_u8(lane2, s, 1);
- } else if (col >= block_width - 4) {
- const uint8_t lane5 = vget_lane_u8(s, 5);
- s = vset_lane_u8(lane5, s, 6);
- s = vset_lane_u8(lane5, s, 7);
- }
- return vcombine_u8(s, s);
-}
-
static void apply_temporal_filter(
const uint8_t *frame, const unsigned int stride, const uint32_t block_width,
const uint32_t block_height, const int *subblock_mses,
@@ -84,6 +76,10 @@ static void apply_temporal_filter(
uint32_t acc_5x5_neon[BH][BW];
const uint8x16x2_t vmask = vld1q_u8_x2(kSlidingWindowMask);
+ const uint8x16_t pad_tbl0 = vld1q_u8(kLoadPad[0]);
+ const uint8x16_t pad_tbl1 = vld1q_u8(kLoadPad[1]);
+ const uint8x16_t pad_tbl2 = vld1q_u8(kLoadPad[2]);
+ const uint8x16_t pad_tbl3 = vld1q_u8(kLoadPad[3]);
// Traverse 4 columns at a time - first and last two columns need padding.
for (uint32_t col = 0; col < block_width; col += 4) {
@@ -92,9 +88,18 @@ static void apply_temporal_filter(
// Load, pad (for first and last two columns) and mask 3 rows from the top.
for (int i = 2; i < 5; i++) {
- const uint8x16_t s = load_and_pad(src, col, block_width);
- vsrc[i][0] = vandq_u8(s, vmask.val[0]);
- vsrc[i][1] = vandq_u8(s, vmask.val[1]);
+ uint8x8_t s = vld1_u8(src);
+ uint8x16_t s_dup = vcombine_u8(s, s);
+ if (col == 0) {
+ vsrc[i][0] = vqtbl1q_u8(s_dup, pad_tbl0);
+ vsrc[i][1] = vqtbl1q_u8(s_dup, pad_tbl1);
+ } else if (col >= block_width - 4) {
+ vsrc[i][0] = vqtbl1q_u8(s_dup, pad_tbl2);
+ vsrc[i][1] = vqtbl1q_u8(s_dup, pad_tbl3);
+ } else {
+ vsrc[i][0] = vandq_u8(s_dup, vmask.val[0]);
+ vsrc[i][1] = vandq_u8(s_dup, vmask.val[1]);
+ }
src += SSE_STRIDE;
}
@@ -130,9 +135,18 @@ static void apply_temporal_filter(
if (row <= block_height - 4) {
// Load next row into the bottom of the sliding window.
- uint8x16_t s = load_and_pad(src, col, block_width);
- vsrc[4][0] = vandq_u8(s, vmask.val[0]);
- vsrc[4][1] = vandq_u8(s, vmask.val[1]);
+ uint8x8_t s = vld1_u8(src);
+ uint8x16_t s_dup = vcombine_u8(s, s);
+ if (col == 0) {
+ vsrc[4][0] = vqtbl1q_u8(s_dup, pad_tbl0);
+ vsrc[4][1] = vqtbl1q_u8(s_dup, pad_tbl1);
+ } else if (col >= block_width - 4) {
+ vsrc[4][0] = vqtbl1q_u8(s_dup, pad_tbl2);
+ vsrc[4][1] = vqtbl1q_u8(s_dup, pad_tbl3);
+ } else {
+ vsrc[4][0] = vandq_u8(s_dup, vmask.val[0]);
+ vsrc[4][1] = vandq_u8(s_dup, vmask.val[1]);
+ }
src += SSE_STRIDE;
} else {
// Pad the bottom 2 rows.
diff --git a/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c b/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c
new file mode 100644
index 0000000000..521601a3f3
--- /dev/null
+++ b/third_party/aom/av1/encoder/arm/neon/wedge_utils_sve.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom_dsp/arm/aom_neon_sve_bridge.h"
+#include "aom_dsp/arm/sum_neon.h"
+#include "av1/common/reconinter.h"
+
+uint64_t av1_wedge_sse_from_residuals_sve(const int16_t *r1, const int16_t *d,
+ const uint8_t *m, int N) {
+ assert(N % 64 == 0);
+
+ // Predicate pattern with first 8 elements true.
+ const svbool_t pattern = svptrue_pat_b16(SV_VL8);
+ int64x2_t sse[2] = { vdupq_n_s64(0), vdupq_n_s64(0) };
+
+ int i = 0;
+ do {
+ int32x4_t sum[4];
+ int16x8_t sum_s16[2];
+
+ const int16x8_t r1_l = vld1q_s16(r1 + i);
+ const int16x8_t r1_h = vld1q_s16(r1 + i + 8);
+ const int16x8_t d_l = vld1q_s16(d + i);
+ const int16x8_t d_h = vld1q_s16(d + i + 8);
+
+ // Use a zero-extending load to widen the vector elements.
+ const int16x8_t m_l = svget_neonq_s16(svld1ub_s16(pattern, m + i));
+ const int16x8_t m_h = svget_neonq_s16(svld1ub_s16(pattern, m + i + 8));
+
+ sum[0] = vshll_n_s16(vget_low_s16(r1_l), WEDGE_WEIGHT_BITS);
+ sum[1] = vshll_n_s16(vget_high_s16(r1_l), WEDGE_WEIGHT_BITS);
+ sum[2] = vshll_n_s16(vget_low_s16(r1_h), WEDGE_WEIGHT_BITS);
+ sum[3] = vshll_n_s16(vget_high_s16(r1_h), WEDGE_WEIGHT_BITS);
+
+ sum[0] = vmlal_s16(sum[0], vget_low_s16(m_l), vget_low_s16(d_l));
+ sum[1] = vmlal_s16(sum[1], vget_high_s16(m_l), vget_high_s16(d_l));
+ sum[2] = vmlal_s16(sum[2], vget_low_s16(m_h), vget_low_s16(d_h));
+ sum[3] = vmlal_s16(sum[3], vget_high_s16(m_h), vget_high_s16(d_h));
+
+ sum_s16[0] = vcombine_s16(vqmovn_s32(sum[0]), vqmovn_s32(sum[1]));
+ sum_s16[1] = vcombine_s16(vqmovn_s32(sum[2]), vqmovn_s32(sum[3]));
+
+ sse[0] = aom_sdotq_s16(sse[0], sum_s16[0], sum_s16[0]);
+ sse[1] = aom_sdotq_s16(sse[1], sum_s16[1], sum_s16[1]);
+
+ i += 16;
+ } while (i < N);
+
+ const uint64_t csse =
+ (uint64_t)horizontal_add_s64x2(vaddq_s64(sse[0], sse[1]));
+ return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
+}
+
+int8_t av1_wedge_sign_from_residuals_sve(const int16_t *ds, const uint8_t *m,
+ int N, int64_t limit) {
+ assert(N % 16 == 0);
+
+ // Predicate pattern with first 8 elements true.
+ svbool_t pattern = svptrue_pat_b16(SV_VL8);
+ int64x2_t acc_l = vdupq_n_s64(0);
+ int64x2_t acc_h = vdupq_n_s64(0);
+
+ do {
+ const int16x8_t ds_l = vld1q_s16(ds);
+ const int16x8_t ds_h = vld1q_s16(ds + 8);
+
+ // Use a zero-extending load to widen the vector elements.
+ const int16x8_t m_l = svget_neonq_s16(svld1ub_s16(pattern, m));
+ const int16x8_t m_h = svget_neonq_s16(svld1ub_s16(pattern, m + 8));
+
+ acc_l = aom_sdotq_s16(acc_l, ds_l, m_l);
+ acc_h = aom_sdotq_s16(acc_h, ds_h, m_h);
+
+ ds += 16;
+ m += 16;
+ N -= 16;
+ } while (N != 0);
+
+ const int64x2_t sum = vaddq_s64(acc_l, acc_h);
+ return horizontal_add_s64x2(sum) > limit;
+}
diff --git a/third_party/aom/av1/encoder/av1_temporal_denoiser.c b/third_party/aom/av1/encoder/av1_temporal_denoiser.c
index 3012df6311..d4a1625612 100644
--- a/third_party/aom/av1/encoder/av1_temporal_denoiser.c
+++ b/third_party/aom/av1/encoder/av1_temporal_denoiser.c
@@ -489,7 +489,7 @@ static int av1_denoiser_realloc_svc_helper(AV1_COMMON *cm,
&denoiser->running_avg_y[fb_idx], cm->width, cm->height,
cm->seq_params->subsampling_x, cm->seq_params->subsampling_y,
cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
if (fail) {
av1_denoiser_free(denoiser);
return 1;
@@ -577,7 +577,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser,
fail = aom_alloc_frame_buffer(
&denoiser->running_avg_y[i + denoiser->num_ref_frames * layer],
denoise_width, denoise_height, ssx, ssy, use_highbitdepth, border,
- legacy_byte_alignment, 0, 0);
+ legacy_byte_alignment, false, 0);
if (fail) {
av1_denoiser_free(denoiser);
return 1;
@@ -589,7 +589,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser,
fail = aom_alloc_frame_buffer(
&denoiser->mc_running_avg_y[layer], denoise_width, denoise_height, ssx,
- ssy, use_highbitdepth, border, legacy_byte_alignment, 0, 0);
+ ssy, use_highbitdepth, border, legacy_byte_alignment, false, 0);
if (fail) {
av1_denoiser_free(denoiser);
return 1;
@@ -600,7 +600,7 @@ int av1_denoiser_alloc(AV1_COMMON *cm, struct SVC *svc, AV1_DENOISER *denoiser,
// layer.
fail = aom_alloc_frame_buffer(&denoiser->last_source, width, height, ssx, ssy,
use_highbitdepth, border, legacy_byte_alignment,
- 0, 0);
+ false, 0);
if (fail) {
av1_denoiser_free(denoiser);
return 1;
diff --git a/third_party/aom/av1/encoder/bitstream.c b/third_party/aom/av1/encoder/bitstream.c
index 219784fedf..9981871147 100644
--- a/third_party/aom/av1/encoder/bitstream.c
+++ b/third_party/aom/av1/encoder/bitstream.c
@@ -3391,8 +3391,8 @@ int av1_write_uleb_obu_size(size_t obu_header_size, size_t obu_payload_size,
return AOM_CODEC_OK;
}
-size_t av1_obu_memmove(size_t obu_header_size, size_t obu_payload_size,
- uint8_t *data) {
+static size_t obu_memmove(size_t obu_header_size, size_t obu_payload_size,
+ uint8_t *data) {
const size_t length_field_size = aom_uleb_size_in_bytes(obu_payload_size);
const size_t move_dst_offset = length_field_size + obu_header_size;
const size_t move_src_offset = obu_header_size;
@@ -3581,7 +3581,7 @@ static void write_large_scale_tile_obu_size(
*total_size += lst_obu->tg_hdr_size;
const uint32_t obu_payload_size = *total_size - lst_obu->tg_hdr_size;
const size_t length_field_size =
- av1_obu_memmove(lst_obu->tg_hdr_size, obu_payload_size, dst);
+ obu_memmove(lst_obu->tg_hdr_size, obu_payload_size, dst);
if (av1_write_uleb_obu_size(lst_obu->tg_hdr_size, obu_payload_size, dst) !=
AOM_CODEC_OK)
assert(0);
@@ -3806,7 +3806,7 @@ void av1_write_last_tile_info(
const uint32_t obu_payload_size =
(uint32_t)(*curr_tg_data_size) - obu_header_size;
const size_t length_field_size =
- av1_obu_memmove(obu_header_size, obu_payload_size, curr_tg_start);
+ obu_memmove(obu_header_size, obu_payload_size, curr_tg_start);
if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size,
curr_tg_start) != AOM_CODEC_OK) {
assert(0);
@@ -4015,8 +4015,8 @@ static void write_tile_obu_size(AV1_COMP *const cpi, uint8_t *const dst,
// to pack the smaller bitstream of such frames. This function computes the
// number of required number of workers based on setup time overhead and job
// dispatch time overhead for given tiles and available workers.
-int calc_pack_bs_mt_workers(const TileDataEnc *tile_data, int num_tiles,
- int avail_workers, bool pack_bs_mt_enabled) {
+static int calc_pack_bs_mt_workers(const TileDataEnc *tile_data, int num_tiles,
+ int avail_workers, bool pack_bs_mt_enabled) {
if (!pack_bs_mt_enabled) return 1;
uint64_t frame_abs_sum_level = 0;
@@ -4141,8 +4141,7 @@ static size_t av1_write_metadata_array(AV1_COMP *const cpi, uint8_t *dst) {
OBU_METADATA, 0, dst);
obu_payload_size =
av1_write_metadata_obu(current_metadata, dst + obu_header_size);
- length_field_size =
- av1_obu_memmove(obu_header_size, obu_payload_size, dst);
+ length_field_size = obu_memmove(obu_header_size, obu_payload_size, dst);
if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, dst) ==
AOM_CODEC_OK) {
const size_t obu_size = obu_header_size + obu_payload_size;
@@ -4192,7 +4191,7 @@ int av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size,
obu_payload_size =
av1_write_sequence_header_obu(cm->seq_params, data + obu_header_size);
const size_t length_field_size =
- av1_obu_memmove(obu_header_size, obu_payload_size, data);
+ obu_memmove(obu_header_size, obu_payload_size, data);
if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, data) !=
AOM_CODEC_OK) {
return AOM_CODEC_ERROR;
@@ -4217,7 +4216,7 @@ int av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size,
obu_payload_size = write_frame_header_obu(cpi, &cpi->td.mb.e_mbd, &saved_wb,
data + obu_header_size, 1);
- length_field = av1_obu_memmove(obu_header_size, obu_payload_size, data);
+ length_field = obu_memmove(obu_header_size, obu_payload_size, data);
if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size, data) !=
AOM_CODEC_OK) {
return AOM_CODEC_ERROR;
diff --git a/third_party/aom/av1/encoder/bitstream.h b/third_party/aom/av1/encoder/bitstream.h
index 12e8a630db..d037039593 100644
--- a/third_party/aom/av1/encoder/bitstream.h
+++ b/third_party/aom/av1/encoder/bitstream.h
@@ -21,6 +21,7 @@ extern "C" {
#include "av1/common/enums.h"
#include "av1/encoder/level.h"
#include "aom_dsp/bitwriter.h"
+#include "aom_util/aom_pthread.h"
struct aom_write_bit_buffer;
struct AV1_COMP;
diff --git a/third_party/aom/av1/encoder/block.h b/third_party/aom/av1/encoder/block.h
index 33d2d8c2a0..1baf3f942e 100644
--- a/third_party/aom/av1/encoder/block.h
+++ b/third_party/aom/av1/encoder/block.h
@@ -1348,6 +1348,9 @@ typedef struct macroblock {
//! Motion vector from superblock MV derived from int_pro_motion() in
// the variance_partitioning.
int_mv sb_me_mv;
+ //! Flag to indicate if a fixed partition should be used, only if the
+ // speed feature rt_sf->use_fast_fixed_part is enabled.
+ int sb_force_fixed_part;
//! SSE of the current predictor.
unsigned int pred_sse[REF_FRAMES];
//! Prediction for ML based partition.
diff --git a/third_party/aom/av1/encoder/cnn.c b/third_party/aom/av1/encoder/cnn.c
index 598b362753..b019ace685 100644
--- a/third_party/aom/av1/encoder/cnn.c
+++ b/third_party/aom/av1/encoder/cnn.c
@@ -138,14 +138,16 @@ static bool concat_tensor(const TENSOR *src, TENSOR *dst) {
return true;
}
-int check_tensor_equal_dims(TENSOR *t1, TENSOR *t2) {
+#ifndef NDEBUG
+static int check_tensor_equal_dims(TENSOR *t1, TENSOR *t2) {
return (t1->width == t2->width && t1->height == t2->height);
}
-int check_tensor_equal_size(TENSOR *t1, TENSOR *t2) {
+static int check_tensor_equal_size(TENSOR *t1, TENSOR *t2) {
return (t1->channels == t2->channels && t1->width == t2->width &&
t1->height == t2->height);
}
+#endif // NDEBUG
void av1_find_cnn_layer_output_size(int in_width, int in_height,
const CNN_LAYER_CONFIG *layer_config,
@@ -189,8 +191,8 @@ void av1_find_cnn_layer_output_size(int in_width, int in_height,
}
}
-void find_cnn_out_channels(const CNN_LAYER_CONFIG *layer_config,
- int channels_per_branch[]) {
+static void find_cnn_out_channels(const CNN_LAYER_CONFIG *layer_config,
+ int channels_per_branch[]) {
int branch = layer_config->branch;
const CNN_BRANCH_CONFIG *branch_config = &layer_config->branch_config;
for (int b = 0; b < CNN_MAX_BRANCHES; ++b) {
diff --git a/third_party/aom/av1/encoder/encode_strategy.c b/third_party/aom/av1/encoder/encode_strategy.c
index 35ca83c3f4..db77dc0e3c 100644
--- a/third_party/aom/av1/encoder/encode_strategy.c
+++ b/third_party/aom/av1/encoder/encode_strategy.c
@@ -712,20 +712,6 @@ int av1_get_refresh_frame_flags(
}
#if !CONFIG_REALTIME_ONLY
-void setup_mi(AV1_COMP *const cpi, YV12_BUFFER_CONFIG *src) {
- AV1_COMMON *const cm = &cpi->common;
- const int num_planes = av1_num_planes(cm);
- MACROBLOCK *const x = &cpi->td.mb;
- MACROBLOCKD *const xd = &x->e_mbd;
-
- av1_setup_src_planes(x, src, 0, 0, num_planes, cm->seq_params->sb_size);
-
- av1_setup_block_planes(xd, cm->seq_params->subsampling_x,
- cm->seq_params->subsampling_y, num_planes);
-
- set_mi_offsets(&cm->mi_params, xd, 0, 0);
-}
-
// Apply temporal filtering to source frames and encode the filtered frame.
// If the current frame does not require filtering, this function is identical
// to av1_encode() except that tpl is not performed.
@@ -819,7 +805,7 @@ static int denoise_and_encode(AV1_COMP *const cpi, uint8_t *const dest,
oxcf->frm_dim_cfg.height, cm->seq_params->subsampling_x,
cm->seq_params->subsampling_y, cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0);
+ NULL, cpi->alloc_pyramid, 0);
if (ret)
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate tf_buf_second_arf");
@@ -923,7 +909,7 @@ static int denoise_and_encode(AV1_COMP *const cpi, uint8_t *const dest,
if (apply_filtering && is_psnr_calc_enabled(cpi)) {
cpi->source = av1_realloc_and_scale_if_required(
cm, source_buffer, &cpi->scaled_source, cm->features.interp_filter, 0,
- false, true, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, true, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
cpi->unscaled_source = source_buffer;
}
#if CONFIG_COLLECT_COMPONENT_TIMING
@@ -1702,8 +1688,7 @@ int av1_encode_strategy(AV1_COMP *const cpi, size_t *const size,
// This is used in rtc temporal filter case. Use true source in the PSNR
// calculation.
- if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf &&
- cpi->common.current_frame.frame_type != KEY_FRAME) {
+ if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf) {
assert(cpi->orig_source.buffer_alloc_sz > 0);
cpi->source = &cpi->orig_source;
}
@@ -1758,9 +1743,9 @@ int av1_encode_strategy(AV1_COMP *const cpi, size_t *const size,
cpi->svc.temporal_layer_id == 0 &&
cpi->unscaled_source->y_width == cpi->svc.source_last_TL0.y_width &&
cpi->unscaled_source->y_height == cpi->svc.source_last_TL0.y_height) {
- aom_yv12_copy_y(cpi->unscaled_source, &cpi->svc.source_last_TL0);
- aom_yv12_copy_u(cpi->unscaled_source, &cpi->svc.source_last_TL0);
- aom_yv12_copy_v(cpi->unscaled_source, &cpi->svc.source_last_TL0);
+ aom_yv12_copy_y(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1);
+ aom_yv12_copy_u(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1);
+ aom_yv12_copy_v(cpi->unscaled_source, &cpi->svc.source_last_TL0, 1);
}
return AOM_CODEC_OK;
diff --git a/third_party/aom/av1/encoder/encodeframe.c b/third_party/aom/av1/encoder/encodeframe.c
index e2213a8355..a9214f77c2 100644
--- a/third_party/aom/av1/encoder/encodeframe.c
+++ b/third_party/aom/av1/encoder/encodeframe.c
@@ -23,7 +23,7 @@
#include "aom_dsp/binary_codes_writer.h"
#include "aom_ports/mem.h"
#include "aom_ports/aom_timer.h"
-
+#include "aom_util/aom_pthread.h"
#if CONFIG_MISMATCH_DEBUG
#include "aom_util/debug_util.h"
#endif // CONFIG_MISMATCH_DEBUG
@@ -536,8 +536,8 @@ static AOM_INLINE void encode_nonrd_sb(AV1_COMP *cpi, ThreadData *td,
#endif
// Set the partition
if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip ||
- (sf->rt_sf.use_fast_fixed_part &&
- x->content_state_sb.source_sad_nonrd < kMedSad)) {
+ (sf->rt_sf.use_fast_fixed_part && x->sb_force_fixed_part == 1 &&
+ !frame_is_intra_only(cm))) {
// set a fixed-size partition
av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
BLOCK_SIZE bsize_select = sf->part_sf.fixed_partition_size;
@@ -1054,8 +1054,13 @@ static AOM_INLINE bool is_calc_src_content_needed(AV1_COMP *cpi,
// The threshold is determined based on kLowSad and kHighSad threshold and
// test results.
- const uint64_t thresh_low = 15000;
- const uint64_t thresh_high = 40000;
+ uint64_t thresh_low = 15000;
+ uint64_t thresh_high = 40000;
+
+ if (cpi->sf.rt_sf.increase_source_sad_thresh) {
+ thresh_low = thresh_low << 1;
+ thresh_high = thresh_high << 1;
+ }
if (avg_64x64_blk_sad > thresh_low && avg_64x64_blk_sad < thresh_high) {
do_calc_src_content = false;
@@ -1203,6 +1208,7 @@ static AOM_INLINE void encode_sb_row(AV1_COMP *cpi, ThreadData *td,
x->sb_me_block = 0;
x->sb_me_partition = 0;
x->sb_me_mv.as_int = 0;
+ x->sb_force_fixed_part = 1;
if (cpi->oxcf.mode == ALLINTRA) {
x->intra_sb_rdmult_modifier = 128;
@@ -1231,7 +1237,7 @@ static AOM_INLINE void encode_sb_row(AV1_COMP *cpi, ThreadData *td,
// Grade the temporal variation of the sb, the grade will be used to decide
// fast mode search strategy for coding blocks
- grade_source_content_sb(cpi, x, tile_data, mi_row, mi_col);
+ if (!seg_skip) grade_source_content_sb(cpi, x, tile_data, mi_row, mi_col);
// encode the superblock
if (use_nonrd_mode) {
@@ -2337,7 +2343,7 @@ void av1_encode_frame(AV1_COMP *cpi) {
// a source or a ref frame should have an image pyramid allocated.
// Check here so that issues can be caught early in debug mode
#if !defined(NDEBUG) && !CONFIG_REALTIME_ONLY
- if (cpi->image_pyramid_levels > 0) {
+ if (cpi->alloc_pyramid) {
assert(cpi->source->y_pyramid);
for (int ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame);
diff --git a/third_party/aom/av1/encoder/encodeframe_utils.c b/third_party/aom/av1/encoder/encodeframe_utils.c
index 949837184a..a8e4a88396 100644
--- a/third_party/aom/av1/encoder/encodeframe_utils.c
+++ b/third_party/aom/av1/encoder/encodeframe_utils.c
@@ -15,6 +15,7 @@
#include "av1/encoder/encoder.h"
#include "av1/encoder/encodeframe_utils.h"
+#include "av1/encoder/encoder_utils.h"
#include "av1/encoder/rdopt.h"
void av1_set_ssim_rdmult(const AV1_COMP *const cpi, int *errorperbit,
@@ -306,6 +307,7 @@ void av1_update_state(const AV1_COMP *const cpi, ThreadData *td,
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ &&
+ mi_addr->segment_id != AM_SEGMENT_ID_INACTIVE &&
!cpi->rc.rtc_external_ratectrl) {
av1_cyclic_refresh_update_segment(cpi, x, mi_row, mi_col, bsize,
ctx->rd_stats.rate, ctx->rd_stats.dist,
@@ -1431,6 +1433,10 @@ void av1_source_content_sb(AV1_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
if ((tmp_sse - tmp_variance) < (sum_sq_thresh >> 1))
x->content_state_sb.low_sumdiff = 1;
+ if (tmp_sse > ((avg_source_sse_threshold_high * 7) >> 3) &&
+ !x->content_state_sb.lighting_change && !x->content_state_sb.low_sumdiff)
+ x->sb_force_fixed_part = 0;
+
if (!cpi->sf.rt_sf.use_rtc_tf || cpi->rc.high_source_sad ||
cpi->rc.frame_source_sad > 20000 || cpi->svc.number_spatial_layers > 1)
return;
diff --git a/third_party/aom/av1/encoder/encoder.c b/third_party/aom/av1/encoder/encoder.c
index fe053af5cc..1ddbfda08b 100644
--- a/third_party/aom/av1/encoder/encoder.c
+++ b/third_party/aom/av1/encoder/encoder.c
@@ -35,6 +35,7 @@
#include "aom_ports/aom_timer.h"
#include "aom_ports/mem.h"
#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_pthread.h"
#if CONFIG_BITSTREAM_DEBUG
#include "aom_util/debug_util.h"
#endif // CONFIG_BITSTREAM_DEBUG
@@ -152,24 +153,33 @@ int av1_set_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
unsigned char *const active_map_4x4 = cpi->active_map.map;
const int mi_rows = mi_params->mi_rows;
const int mi_cols = mi_params->mi_cols;
- const int row_scale = mi_size_high_log2[BLOCK_16X16];
- const int col_scale = mi_size_wide_log2[BLOCK_16X16];
cpi->active_map.update = 0;
- assert(mi_rows % 2 == 0);
- assert(mi_cols % 2 == 0);
+ cpi->rc.percent_blocks_inactive = 0;
+ assert(mi_rows % 2 == 0 && mi_rows > 0);
+ assert(mi_cols % 2 == 0 && mi_cols > 0);
if (new_map_16x16) {
- for (int r = 0; r < (mi_rows >> row_scale); ++r) {
- for (int c = 0; c < (mi_cols >> col_scale); ++c) {
- const uint8_t val = new_map_16x16[r * cols + c]
+ int num_samples = 0;
+ int num_blocks_inactive = 0;
+ for (int r = 0; r < mi_rows; r += 4) {
+ for (int c = 0; c < mi_cols; c += 4) {
+ const uint8_t val = new_map_16x16[(r >> 2) * cols + (c >> 2)]
? AM_SEGMENT_ID_ACTIVE
: AM_SEGMENT_ID_INACTIVE;
- active_map_4x4[(2 * r + 0) * mi_cols + (c + 0)] = val;
- active_map_4x4[(2 * r + 0) * mi_cols + (c + 1)] = val;
- active_map_4x4[(2 * r + 1) * mi_cols + (c + 0)] = val;
- active_map_4x4[(2 * r + 1) * mi_cols + (c + 1)] = val;
+ num_samples++;
+ if (val == AM_SEGMENT_ID_INACTIVE) num_blocks_inactive++;
+ const int row_max = AOMMIN(4, mi_rows - r);
+ const int col_max = AOMMIN(4, mi_cols - c);
+ for (int x = 0; x < row_max; ++x) {
+ for (int y = 0; y < col_max; ++y) {
+ active_map_4x4[(r + x) * mi_cols + (c + y)] = val;
+ }
+ }
}
}
cpi->active_map.enabled = 1;
+ cpi->active_map.update = 1;
+ cpi->rc.percent_blocks_inactive =
+ (num_blocks_inactive * 100) / num_samples;
}
return 0;
}
@@ -943,14 +953,9 @@ void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf,
#if CONFIG_REALTIME_ONLY
assert(!oxcf->tool_cfg.enable_global_motion);
- cpi->image_pyramid_levels = 0;
+ cpi->alloc_pyramid = false;
#else
- if (oxcf->tool_cfg.enable_global_motion) {
- cpi->image_pyramid_levels =
- global_motion_pyr_levels[default_global_motion_method];
- } else {
- cpi->image_pyramid_levels = 0;
- }
+ cpi->alloc_pyramid = oxcf->tool_cfg.enable_global_motion;
#endif // CONFIG_REALTIME_ONLY
}
@@ -2208,7 +2213,7 @@ void av1_set_frame_size(AV1_COMP *cpi, int width, int height) {
&cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0))
+ NULL, cpi->alloc_pyramid, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
@@ -2389,7 +2394,10 @@ static void loopfilter_frame(AV1_COMP *cpi, AV1_COMMON *cm) {
const int use_loopfilter =
is_loopfilter_used(cm) && !cpi->mt_info.pipeline_lpf_mt_with_enc;
- const int use_cdef = is_cdef_used(cm);
+ const int use_cdef =
+ is_cdef_used(cm) && (!cpi->active_map.enabled ||
+ cpi->rc.percent_blocks_inactive <=
+ cpi->sf.rt_sf.thresh_active_maps_skip_lf_cdef);
const int use_superres = av1_superres_scaled(cm);
const int use_restoration = is_restoration_used(cm);
@@ -2498,7 +2506,8 @@ static int encode_without_recode(AV1_COMP *cpi) {
&cpi->svc.source_last_TL0, cpi->oxcf.frm_dim_cfg.width,
cpi->oxcf.frm_dim_cfg.height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0)) {
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false,
+ 0)) {
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate buffer for source_last_TL0");
}
@@ -2547,7 +2556,7 @@ static int encode_without_recode(AV1_COMP *cpi) {
cpi->source = av1_realloc_and_scale_if_required(
cm, unscaled, &cpi->scaled_source, filter_scaler, phase_scaler, true,
- false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
if (frame_is_intra_only(cm) || resize_pending != 0) {
const int current_size =
(cm->mi_params.mi_rows * cm->mi_params.mi_cols) >> 2;
@@ -2570,7 +2579,7 @@ static int encode_without_recode(AV1_COMP *cpi) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source, filter_scaler,
phase_scaler, true, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
if (cpi->sf.rt_sf.use_temporal_noise_estimate) {
@@ -2647,12 +2656,8 @@ static int encode_without_recode(AV1_COMP *cpi) {
av1_setup_frame(cpi);
}
}
-
- if (q_cfg->aq_mode == CYCLIC_REFRESH_AQ) {
- suppress_active_map(cpi);
- av1_cyclic_refresh_setup(cpi);
- }
av1_apply_active_map(cpi);
+ if (q_cfg->aq_mode == CYCLIC_REFRESH_AQ) av1_cyclic_refresh_setup(cpi);
if (cm->seg.enabled) {
if (!cm->seg.update_data && cm->prev_frame) {
segfeatures_copy(&cm->seg, &cm->prev_frame->seg);
@@ -2667,26 +2672,26 @@ static int encode_without_recode(AV1_COMP *cpi) {
cm->cur_frame->seg.enabled = cm->seg.enabled;
// This is for rtc temporal filtering case.
- if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf &&
- cm->current_frame.frame_type != KEY_FRAME) {
+ if (is_psnr_calc_enabled(cpi) && cpi->sf.rt_sf.use_rtc_tf) {
const SequenceHeader *seq_params = cm->seq_params;
if (cpi->orig_source.buffer_alloc_sz == 0 ||
- cpi->last_source->y_width != cpi->source->y_width ||
- cpi->last_source->y_height != cpi->source->y_height) {
+ cpi->rc.prev_coded_width != cpi->oxcf.frm_dim_cfg.width ||
+ cpi->rc.prev_coded_height != cpi->oxcf.frm_dim_cfg.height) {
// Allocate a source buffer to store the true source for psnr calculation.
if (aom_alloc_frame_buffer(
&cpi->orig_source, cpi->oxcf.frm_dim_cfg.width,
cpi->oxcf.frm_dim_cfg.height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0))
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false,
+ 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate scaled buffer");
}
- aom_yv12_copy_y(cpi->source, &cpi->orig_source);
- aom_yv12_copy_u(cpi->source, &cpi->orig_source);
- aom_yv12_copy_v(cpi->source, &cpi->orig_source);
+ aom_yv12_copy_y(cpi->source, &cpi->orig_source, 1);
+ aom_yv12_copy_u(cpi->source, &cpi->orig_source, 1);
+ aom_yv12_copy_v(cpi->source, &cpi->orig_source, 1);
}
#if CONFIG_COLLECT_COMPONENT_TIMING
@@ -2725,9 +2730,9 @@ static int encode_without_recode(AV1_COMP *cpi) {
(cm->width != cpi->unscaled_source->y_crop_width ||
cm->height != cpi->unscaled_source->y_crop_height)) {
cpi->scaled_last_source_available = 1;
- aom_yv12_copy_y(&cpi->scaled_source, &cpi->scaled_last_source);
- aom_yv12_copy_u(&cpi->scaled_source, &cpi->scaled_last_source);
- aom_yv12_copy_v(&cpi->scaled_source, &cpi->scaled_last_source);
+ aom_yv12_copy_y(&cpi->scaled_source, &cpi->scaled_last_source, 1);
+ aom_yv12_copy_u(&cpi->scaled_source, &cpi->scaled_last_source, 1);
+ aom_yv12_copy_v(&cpi->scaled_source, &cpi->scaled_last_source, 1);
}
#if CONFIG_COLLECT_COMPONENT_TIMING
@@ -2846,7 +2851,7 @@ static int encode_with_recode_loop(AV1_COMP *cpi, size_t *size, uint8_t *dest) {
}
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, EIGHTTAP_REGULAR, 0,
- false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
#if CONFIG_TUNE_BUTTERAUGLI
if (oxcf->tune_cfg.tuning == AOM_TUNE_BUTTERAUGLI) {
@@ -2866,7 +2871,7 @@ static int encode_with_recode_loop(AV1_COMP *cpi, size_t *size, uint8_t *dest) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
EIGHTTAP_REGULAR, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
int scale_references = 0;
@@ -4042,7 +4047,7 @@ int av1_encode(AV1_COMP *const cpi, uint8_t *const dest,
}
#if CONFIG_DENOISE
-static int apply_denoise_2d(AV1_COMP *cpi, YV12_BUFFER_CONFIG *sd,
+static int apply_denoise_2d(AV1_COMP *cpi, const YV12_BUFFER_CONFIG *sd,
int block_size, float noise_level,
int64_t time_stamp, int64_t end_time) {
AV1_COMMON *const cm = &cpi->common;
@@ -4077,7 +4082,7 @@ static int apply_denoise_2d(AV1_COMP *cpi, YV12_BUFFER_CONFIG *sd,
#endif
int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags,
- YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ const YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
int64_t end_time) {
AV1_COMMON *const cm = &cpi->common;
const SequenceHeader *const seq_params = cm->seq_params;
@@ -4139,8 +4144,7 @@ int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags,
#endif // CONFIG_DENOISE
if (av1_lookahead_push(cpi->ppi->lookahead, sd, time_stamp, end_time,
- use_highbitdepth, cpi->image_pyramid_levels,
- frame_flags)) {
+ use_highbitdepth, cpi->alloc_pyramid, frame_flags)) {
aom_set_error(cm->error, AOM_CODEC_ERROR, "av1_lookahead_push() failed");
res = -1;
}
diff --git a/third_party/aom/av1/encoder/encoder.h b/third_party/aom/av1/encoder/encoder.h
index e87ab9be1f..4de5d426ce 100644
--- a/third_party/aom/av1/encoder/encoder.h
+++ b/third_party/aom/av1/encoder/encoder.h
@@ -21,6 +21,7 @@
#include "config/aom_config.h"
#include "aom/aomcx.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/av1_common_int.h"
@@ -3631,10 +3632,10 @@ typedef struct AV1_COMP {
unsigned int zeromv_skip_thresh_exit_part[BLOCK_SIZES_ALL];
/*!
- * Number of downsampling pyramid levels to allocate for each frame
+ * Should we allocate a downsampling pyramid for each frame buffer?
* This is currently only used for global motion
*/
- int image_pyramid_levels;
+ bool alloc_pyramid;
#if CONFIG_SALIENCY_MAP
/*!
@@ -3808,7 +3809,7 @@ int av1_init_parallel_frame_context(const AV1_COMP_DATA *const first_cpi_data,
* copy of the pointer.
*/
int av1_receive_raw_frame(AV1_COMP *cpi, aom_enc_frame_flags_t frame_flags,
- YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ const YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
int64_t end_time_stamp);
/*!\brief Encode a frame
@@ -4310,7 +4311,7 @@ static AOM_INLINE int is_psnr_calc_enabled(const AV1_COMP *cpi) {
const AV1_COMMON *const cm = &cpi->common;
return cpi->ppi->b_calculate_psnr && !is_stat_generation_stage(cpi) &&
- cm->show_frame;
+ cm->show_frame && !cpi->is_dropped_frame;
}
static INLINE int is_frame_resize_pending(const AV1_COMP *const cpi) {
diff --git a/third_party/aom/av1/encoder/encoder_alloc.h b/third_party/aom/av1/encoder/encoder_alloc.h
index ce48496d48..f24d4b0a10 100644
--- a/third_party/aom/av1/encoder/encoder_alloc.h
+++ b/third_party/aom/av1/encoder/encoder_alloc.h
@@ -439,8 +439,7 @@ static AOM_INLINE YV12_BUFFER_CONFIG *realloc_and_scale_source(
&cpi->scaled_source, scaled_width, scaled_height,
cm->seq_params->subsampling_x, cm->seq_params->subsampling_y,
cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS,
- cm->features.byte_alignment, NULL, NULL, NULL,
- cpi->image_pyramid_levels, 0))
+ cm->features.byte_alignment, NULL, NULL, NULL, cpi->alloc_pyramid, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to reallocate scaled source buffer");
assert(cpi->scaled_source.y_crop_width == scaled_width);
diff --git a/third_party/aom/av1/encoder/encoder_utils.c b/third_party/aom/av1/encoder/encoder_utils.c
index c35873d207..1f81a530c9 100644
--- a/third_party/aom/av1/encoder/encoder_utils.c
+++ b/third_party/aom/av1/encoder/encoder_utils.c
@@ -9,8 +9,11 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+#include <string.h>
+
#include "aom/aomcx.h"
+#include "av1/common/av1_common_int.h"
#include "av1/encoder/bitstream.h"
#include "av1/encoder/encodeframe.h"
#include "av1/encoder/encoder.h"
@@ -421,11 +424,13 @@ void av1_apply_active_map(AV1_COMP *cpi) {
struct segmentation *const seg = &cpi->common.seg;
unsigned char *const seg_map = cpi->enc_seg.map;
const unsigned char *const active_map = cpi->active_map.map;
- int i;
assert(AM_SEGMENT_ID_ACTIVE == CR_SEGMENT_ID_BASE);
- if (frame_is_intra_only(&cpi->common)) {
+ // Disable the active_maps on intra_only frames or if the
+ // input map for the current frame has no inactive blocks.
+ if (frame_is_intra_only(&cpi->common) ||
+ cpi->rc.percent_blocks_inactive == 0) {
cpi->active_map.enabled = 0;
cpi->active_map.update = 1;
}
@@ -434,8 +439,7 @@ void av1_apply_active_map(AV1_COMP *cpi) {
if (cpi->active_map.enabled) {
const int num_mis =
cpi->common.mi_params.mi_rows * cpi->common.mi_params.mi_cols;
- for (i = 0; i < num_mis; ++i)
- if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
+ memcpy(seg_map, active_map, sizeof(active_map[0]) * num_mis);
av1_enable_segmentation(seg);
av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF_Y_H);
@@ -725,7 +729,7 @@ void av1_scale_references(AV1_COMP *cpi, const InterpFilter filter,
RefCntBuffer *ref_fb = get_ref_frame_buf(cm, ref_frame);
if (aom_yv12_realloc_with_new_border(
&ref_fb->buf, AOM_BORDER_IN_PIXELS,
- cm->features.byte_alignment, cpi->image_pyramid_levels,
+ cm->features.byte_alignment, cpi->alloc_pyramid,
num_planes) != 0) {
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
@@ -749,7 +753,7 @@ void av1_scale_references(AV1_COMP *cpi, const InterpFilter filter,
&new_fb->buf, cm->width, cm->height,
cm->seq_params->subsampling_x, cm->seq_params->subsampling_y,
cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS,
- cm->features.byte_alignment, NULL, NULL, NULL, 0, 0)) {
+ cm->features.byte_alignment, NULL, NULL, NULL, false, 0)) {
if (force_scaling) {
// Release the reference acquired in the get_free_fb() call above.
--new_fb->ref_count;
@@ -1087,12 +1091,12 @@ void av1_determine_sc_tools_with_encoding(AV1_COMP *cpi, const int q_orig) {
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, cm->features.interp_filter,
- 0, false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ 0, false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
if (cpi->unscaled_last_source != NULL) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
cm->features.interp_filter, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
av1_setup_frame(cpi);
diff --git a/third_party/aom/av1/encoder/encodetxb.c b/third_party/aom/av1/encoder/encodetxb.c
index 5fe2a497c7..701c5489fe 100644
--- a/third_party/aom/av1/encoder/encodetxb.c
+++ b/third_party/aom/av1/encoder/encodetxb.c
@@ -134,14 +134,14 @@ int av1_get_eob_pos_token(const int eob, int *const extra) {
}
#if CONFIG_ENTROPY_STATS
-void av1_update_eob_context(int cdf_idx, int eob, TX_SIZE tx_size,
- TX_CLASS tx_class, PLANE_TYPE plane,
- FRAME_CONTEXT *ec_ctx, FRAME_COUNTS *counts,
- uint8_t allow_update_cdf) {
+static void update_eob_context(int cdf_idx, int eob, TX_SIZE tx_size,
+ TX_CLASS tx_class, PLANE_TYPE plane,
+ FRAME_CONTEXT *ec_ctx, FRAME_COUNTS *counts,
+ uint8_t allow_update_cdf) {
#else
-void av1_update_eob_context(int eob, TX_SIZE tx_size, TX_CLASS tx_class,
- PLANE_TYPE plane, FRAME_CONTEXT *ec_ctx,
- uint8_t allow_update_cdf) {
+static void update_eob_context(int eob, TX_SIZE tx_size, TX_CLASS tx_class,
+ PLANE_TYPE plane, FRAME_CONTEXT *ec_ctx,
+ uint8_t allow_update_cdf) {
#endif
int eob_extra;
const int eob_pt = av1_get_eob_pos_token(eob, &eob_extra);
@@ -623,11 +623,11 @@ void av1_update_and_record_txb_context(int plane, int block, int blk_row,
td->rd_counts.tx_type_used[tx_size][tx_type]++;
#if CONFIG_ENTROPY_STATS
- av1_update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx,
- td->counts, allow_update_cdf);
+ update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx,
+ td->counts, allow_update_cdf);
#else
- av1_update_eob_context(eob, tx_size, tx_class, plane_type, ec_ctx,
- allow_update_cdf);
+ update_eob_context(eob, tx_size, tx_class, plane_type, ec_ctx,
+ allow_update_cdf);
#endif
DECLARE_ALIGNED(16, int8_t, coeff_contexts[MAX_TX_SQUARE]);
@@ -785,8 +785,8 @@ void av1_record_txb_context(int plane, int block, int blk_row, int blk_col,
#if CONFIG_ENTROPY_STATS
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
- av1_update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx,
- td->counts, 0 /*allow_update_cdf*/);
+ update_eob_context(cdf_idx, eob, tx_size, tx_class, plane_type, ec_ctx,
+ td->counts, 0 /*allow_update_cdf*/);
DECLARE_ALIGNED(16, int8_t, coeff_contexts[MAX_TX_SQUARE]);
av1_get_nz_map_contexts(levels, scan, eob, tx_size, tx_class,
diff --git a/third_party/aom/av1/encoder/ethread.c b/third_party/aom/av1/encoder/ethread.c
index d6a806d504..755535ba51 100644
--- a/third_party/aom/av1/encoder/ethread.c
+++ b/third_party/aom/av1/encoder/ethread.c
@@ -12,6 +12,8 @@
#include <assert.h>
#include <stdbool.h>
+#include "aom_util/aom_pthread.h"
+
#include "av1/common/warped_motion.h"
#include "av1/common/thread_common.h"
@@ -1415,7 +1417,7 @@ static AOM_INLINE void sync_fpmt_workers(AV1_PRIMARY *ppi,
int num_workers = ppi->p_mt_info.p_num_workers;
int had_error = 0;
// Points to error in the earliest display order frame in the parallel set.
- const struct aom_internal_error_info *error;
+ const struct aom_internal_error_info *error = NULL;
// Encoding ends.
for (int i = num_workers - 1; i >= 0; --i) {
@@ -2227,8 +2229,8 @@ void av1_tpl_dealloc(AV1TplRowMultiThreadSync *tpl_sync) {
}
// Allocate memory for tpl row synchronization.
-void av1_tpl_alloc(AV1TplRowMultiThreadSync *tpl_sync, AV1_COMMON *cm,
- int mb_rows) {
+static void av1_tpl_alloc(AV1TplRowMultiThreadSync *tpl_sync, AV1_COMMON *cm,
+ int mb_rows) {
tpl_sync->rows = mb_rows;
#if CONFIG_MULTITHREAD
{
diff --git a/third_party/aom/av1/encoder/firstpass.c b/third_party/aom/av1/encoder/firstpass.c
index e20b6c177e..b94a50714a 100644
--- a/third_party/aom/av1/encoder/firstpass.c
+++ b/third_party/aom/av1/encoder/firstpass.c
@@ -22,6 +22,7 @@
#include "aom_ports/mem.h"
#include "aom_scale/aom_scale.h"
#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/entropymv.h"
#include "av1/common/quant_common.h"
diff --git a/third_party/aom/av1/encoder/global_motion.c b/third_party/aom/av1/encoder/global_motion.c
index 73910de121..0ae47809c6 100644
--- a/third_party/aom/av1/encoder/global_motion.c
+++ b/third_party/aom/av1/encoder/global_motion.c
@@ -30,83 +30,6 @@
// Border over which to compute the global motion
#define ERRORADV_BORDER 0
-/* clang-format off */
-// Error metric used for global motion evaluation.
-// For 8-bit input, the pixel error used to index this table will always
-// be between -255 and +255. But for 10- and 12-bit input, we use interpolation
-// which means that we need to support indices of -256 and +256 as well.
-// Therefore, the table is offset so that logical index 0 corresponds to
-// error_measure_lut[256].
-const int error_measure_lut[513] = {
- // pow 0.7
- 16384, 16384, 16339, 16294, 16249, 16204, 16158, 16113,
- 16068, 16022, 15977, 15932, 15886, 15840, 15795, 15749,
- 15703, 15657, 15612, 15566, 15520, 15474, 15427, 15381,
- 15335, 15289, 15242, 15196, 15149, 15103, 15056, 15010,
- 14963, 14916, 14869, 14822, 14775, 14728, 14681, 14634,
- 14587, 14539, 14492, 14445, 14397, 14350, 14302, 14254,
- 14206, 14159, 14111, 14063, 14015, 13967, 13918, 13870,
- 13822, 13773, 13725, 13676, 13628, 13579, 13530, 13481,
- 13432, 13383, 13334, 13285, 13236, 13187, 13137, 13088,
- 13038, 12988, 12939, 12889, 12839, 12789, 12739, 12689,
- 12639, 12588, 12538, 12487, 12437, 12386, 12335, 12285,
- 12234, 12183, 12132, 12080, 12029, 11978, 11926, 11875,
- 11823, 11771, 11719, 11667, 11615, 11563, 11511, 11458,
- 11406, 11353, 11301, 11248, 11195, 11142, 11089, 11036,
- 10982, 10929, 10875, 10822, 10768, 10714, 10660, 10606,
- 10552, 10497, 10443, 10388, 10333, 10279, 10224, 10168,
- 10113, 10058, 10002, 9947, 9891, 9835, 9779, 9723,
- 9666, 9610, 9553, 9497, 9440, 9383, 9326, 9268,
- 9211, 9153, 9095, 9037, 8979, 8921, 8862, 8804,
- 8745, 8686, 8627, 8568, 8508, 8449, 8389, 8329,
- 8269, 8208, 8148, 8087, 8026, 7965, 7903, 7842,
- 7780, 7718, 7656, 7593, 7531, 7468, 7405, 7341,
- 7278, 7214, 7150, 7086, 7021, 6956, 6891, 6826,
- 6760, 6695, 6628, 6562, 6495, 6428, 6361, 6293,
- 6225, 6157, 6089, 6020, 5950, 5881, 5811, 5741,
- 5670, 5599, 5527, 5456, 5383, 5311, 5237, 5164,
- 5090, 5015, 4941, 4865, 4789, 4713, 4636, 4558,
- 4480, 4401, 4322, 4242, 4162, 4080, 3998, 3916,
- 3832, 3748, 3663, 3577, 3490, 3402, 3314, 3224,
- 3133, 3041, 2948, 2854, 2758, 2661, 2562, 2461,
- 2359, 2255, 2148, 2040, 1929, 1815, 1698, 1577,
- 1452, 1323, 1187, 1045, 894, 731, 550, 339,
- 0, 339, 550, 731, 894, 1045, 1187, 1323,
- 1452, 1577, 1698, 1815, 1929, 2040, 2148, 2255,
- 2359, 2461, 2562, 2661, 2758, 2854, 2948, 3041,
- 3133, 3224, 3314, 3402, 3490, 3577, 3663, 3748,
- 3832, 3916, 3998, 4080, 4162, 4242, 4322, 4401,
- 4480, 4558, 4636, 4713, 4789, 4865, 4941, 5015,
- 5090, 5164, 5237, 5311, 5383, 5456, 5527, 5599,
- 5670, 5741, 5811, 5881, 5950, 6020, 6089, 6157,
- 6225, 6293, 6361, 6428, 6495, 6562, 6628, 6695,
- 6760, 6826, 6891, 6956, 7021, 7086, 7150, 7214,
- 7278, 7341, 7405, 7468, 7531, 7593, 7656, 7718,
- 7780, 7842, 7903, 7965, 8026, 8087, 8148, 8208,
- 8269, 8329, 8389, 8449, 8508, 8568, 8627, 8686,
- 8745, 8804, 8862, 8921, 8979, 9037, 9095, 9153,
- 9211, 9268, 9326, 9383, 9440, 9497, 9553, 9610,
- 9666, 9723, 9779, 9835, 9891, 9947, 10002, 10058,
- 10113, 10168, 10224, 10279, 10333, 10388, 10443, 10497,
- 10552, 10606, 10660, 10714, 10768, 10822, 10875, 10929,
- 10982, 11036, 11089, 11142, 11195, 11248, 11301, 11353,
- 11406, 11458, 11511, 11563, 11615, 11667, 11719, 11771,
- 11823, 11875, 11926, 11978, 12029, 12080, 12132, 12183,
- 12234, 12285, 12335, 12386, 12437, 12487, 12538, 12588,
- 12639, 12689, 12739, 12789, 12839, 12889, 12939, 12988,
- 13038, 13088, 13137, 13187, 13236, 13285, 13334, 13383,
- 13432, 13481, 13530, 13579, 13628, 13676, 13725, 13773,
- 13822, 13870, 13918, 13967, 14015, 14063, 14111, 14159,
- 14206, 14254, 14302, 14350, 14397, 14445, 14492, 14539,
- 14587, 14634, 14681, 14728, 14775, 14822, 14869, 14916,
- 14963, 15010, 15056, 15103, 15149, 15196, 15242, 15289,
- 15335, 15381, 15427, 15474, 15520, 15566, 15612, 15657,
- 15703, 15749, 15795, 15840, 15886, 15932, 15977, 16022,
- 16068, 16113, 16158, 16204, 16249, 16294, 16339, 16384,
- 16384,
-};
-/* clang-format on */
-
int av1_is_enough_erroradvantage(double best_erroradvantage, int params_cost) {
return best_erroradvantage < erroradv_tr &&
best_erroradvantage * params_cost < erroradv_prod_tr;
@@ -541,6 +464,11 @@ int64_t av1_refine_integerized_param(
}
wm->wmtype = get_wmtype(wm);
+ // Recompute shear params for the refined model
+ // This should never fail, because we only ever consider warp-able models
+ if (!av1_get_shear_params(wm)) {
+ assert(0);
+ }
return best_error;
}
diff --git a/third_party/aom/av1/encoder/global_motion.h b/third_party/aom/av1/encoder/global_motion.h
index 8c9c60f0f5..de46a0e1f2 100644
--- a/third_party/aom/av1/encoder/global_motion.h
+++ b/third_party/aom/av1/encoder/global_motion.h
@@ -15,6 +15,7 @@
#include "aom/aom_integer.h"
#include "aom_dsp/flow_estimation/flow_estimation.h"
#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "aom_util/aom_thread.h"
#ifdef __cplusplus
@@ -97,37 +98,6 @@ void av1_compute_feature_segmentation_map(uint8_t *segment_map, int width,
int height, int *inliers,
int num_inliers);
-extern const int error_measure_lut[513];
-
-static INLINE int error_measure(int err) {
- return error_measure_lut[256 + err];
-}
-
-#if CONFIG_AV1_HIGHBITDEPTH
-static INLINE int highbd_error_measure(int err, int bd) {
- const int b = bd - 8;
- const int bmask = (1 << b) - 1;
- const int v = (1 << b);
-
- // Split error into two parts and do an interpolated table lookup
- // To compute the table index and interpolation value, we want to calculate
- // the quotient and remainder of err / 2^b. But it is very important that
- // the division must round down, and the remainder must be positive,
- // ie. in the range [0, 2^b).
- //
- // In C, the >> and & operators do what we want, but the / and % operators
- // give the wrong results for negative inputs. So we must use >> and & here.
- //
- // For example, if bd == 10 and err == -5, compare the results:
- // (-5) >> 2 = -2, (-5) & 3 = 3
- // vs. (-5) / 4 = -1, (-5) % 4 = -1
- const int e1 = err >> b;
- const int e2 = err & bmask;
- return error_measure_lut[256 + e1] * (v - e2) +
- error_measure_lut[257 + e1] * e2;
-}
-#endif // CONFIG_AV1_HIGHBITDEPTH
-
int64_t av1_segmented_frame_error(int use_hbd, int bd, const uint8_t *ref,
int ref_stride, uint8_t *dst, int dst_stride,
int p_width, int p_height,
diff --git a/third_party/aom/av1/encoder/global_motion_facade.c b/third_party/aom/av1/encoder/global_motion_facade.c
index 02a4e70ed3..687eeee18a 100644
--- a/third_party/aom/av1/encoder/global_motion_facade.c
+++ b/third_party/aom/av1/encoder/global_motion_facade.c
@@ -89,6 +89,7 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
assert(ref_buf[frame] != NULL);
int bit_depth = cpi->common.seq_params->bit_depth;
GlobalMotionMethod global_motion_method = default_global_motion_method;
+ int downsample_level = cpi->sf.gm_sf.downsample_level;
int num_refinements = cpi->sf.gm_sf.num_refinement_steps;
bool mem_alloc_failed = false;
@@ -99,9 +100,10 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
double best_erroradv = erroradv_tr;
for (TransformationType model = FIRST_GLOBAL_TRANS_TYPE;
model <= LAST_GLOBAL_TRANS_TYPE; ++model) {
- if (!aom_compute_global_motion(
- model, cpi->source, ref_buf[frame], bit_depth, global_motion_method,
- motion_models, RANSAC_NUM_MOTIONS, &mem_alloc_failed)) {
+ if (!aom_compute_global_motion(model, cpi->source, ref_buf[frame],
+ bit_depth, global_motion_method,
+ downsample_level, motion_models,
+ RANSAC_NUM_MOTIONS, &mem_alloc_failed)) {
if (mem_alloc_failed) {
aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate global motion buffers");
@@ -115,6 +117,9 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
WarpedMotionParams tmp_wm_params;
av1_convert_model_to_params(motion_models[i].params, &tmp_wm_params);
+ // Check that the generated model is warp-able
+ if (!av1_get_shear_params(&tmp_wm_params)) continue;
+
// Skip models that we won't use (IDENTITY or TRANSLATION)
//
// For IDENTITY type models, we don't need to evaluate anything because
@@ -151,6 +156,14 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
double erroradvantage = (double)warp_error / ref_frame_error;
+ // Check that the model signaling cost is not too high
+ if (!av1_is_enough_erroradvantage(
+ erroradvantage,
+ gm_get_params_cost(&tmp_wm_params, ref_params,
+ cm->features.allow_high_precision_mv))) {
+ continue;
+ }
+
if (erroradvantage < best_erroradv) {
best_erroradv = erroradvantage;
// Save the wm_params modified by
@@ -161,34 +174,6 @@ static AOM_INLINE void compute_global_motion_for_ref_frame(
}
}
}
-
- if (!av1_get_shear_params(&cm->global_motion[frame]))
- cm->global_motion[frame] = default_warp_params;
-
-#if 0
- // We never choose translational models, so this code is disabled
- if (cm->global_motion[frame].wmtype == TRANSLATION) {
- cm->global_motion[frame].wmmat[0] =
- convert_to_trans_prec(cm->features.allow_high_precision_mv,
- cm->global_motion[frame].wmmat[0]) *
- GM_TRANS_ONLY_DECODE_FACTOR;
- cm->global_motion[frame].wmmat[1] =
- convert_to_trans_prec(cm->features.allow_high_precision_mv,
- cm->global_motion[frame].wmmat[1]) *
- GM_TRANS_ONLY_DECODE_FACTOR;
- }
-#endif
-
- if (cm->global_motion[frame].wmtype == IDENTITY) return;
-
- // If the best error advantage found doesn't meet the threshold for
- // this motion type, revert to IDENTITY.
- if (!av1_is_enough_erroradvantage(
- best_erroradv,
- gm_get_params_cost(&cm->global_motion[frame], ref_params,
- cm->features.allow_high_precision_mv))) {
- cm->global_motion[frame] = default_warp_params;
- }
}
// Computes global motion for the given reference frame.
diff --git a/third_party/aom/av1/encoder/k_means_template.h b/third_party/aom/av1/encoder/k_means_template.h
index 4be2038a6f..239029345d 100644
--- a/third_party/aom/av1/encoder/k_means_template.h
+++ b/third_party/aom/av1/encoder/k_means_template.h
@@ -24,6 +24,9 @@
#define RENAME_(x, y) AV1_K_MEANS_RENAME(x, y)
#define RENAME(x) RENAME_(x, AV1_K_MEANS_DIM)
+#define K_MEANS_RENAME_C(x, y) x##_dim##y##_c
+#define RENAME_C_(x, y) K_MEANS_RENAME_C(x, y)
+#define RENAME_C(x) RENAME_C_(x, AV1_K_MEANS_DIM)
// Though we want to compute the smallest L2 norm, in 1 dimension,
// it is equivalent to find the smallest L1 norm and then square it.
@@ -41,8 +44,8 @@ static int RENAME(calc_dist)(const int16_t *p1, const int16_t *p2) {
#endif
}
-void RENAME(av1_calc_indices)(const int16_t *data, const int16_t *centroids,
- uint8_t *indices, int64_t *dist, int n, int k) {
+void RENAME_C(av1_calc_indices)(const int16_t *data, const int16_t *centroids,
+ uint8_t *indices, int64_t *dist, int n, int k) {
if (dist) {
*dist = 0;
}
@@ -149,3 +152,6 @@ void RENAME(av1_k_means)(const int16_t *data, int16_t *centroids,
}
#undef RENAME_
#undef RENAME
+#undef K_MEANS_RENAME_C
+#undef RENAME_C_
+#undef RENAME_C
diff --git a/third_party/aom/av1/encoder/lookahead.c b/third_party/aom/av1/encoder/lookahead.c
index 9ef9b88675..476c91ab95 100644
--- a/third_party/aom/av1/encoder/lookahead.c
+++ b/third_party/aom/av1/encoder/lookahead.c
@@ -46,7 +46,7 @@ struct lookahead_ctx *av1_lookahead_init(
unsigned int width, unsigned int height, unsigned int subsampling_x,
unsigned int subsampling_y, int use_highbitdepth, unsigned int depth,
const int border_in_pixels, int byte_alignment, int num_lap_buffers,
- bool is_all_intra, int num_pyramid_levels) {
+ bool is_all_intra, bool alloc_pyramid) {
int lag_in_frames = AOMMAX(1, depth);
// For all-intra frame encoding, previous source frames are not required.
@@ -82,7 +82,7 @@ struct lookahead_ctx *av1_lookahead_init(
if (aom_realloc_frame_buffer(
&ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
use_highbitdepth, border_in_pixels, byte_alignment, NULL, NULL,
- NULL, num_pyramid_levels, 0)) {
+ NULL, alloc_pyramid, 0)) {
goto fail;
}
}
@@ -100,7 +100,7 @@ int av1_lookahead_full(const struct lookahead_ctx *ctx) {
int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
int64_t ts_start, int64_t ts_end, int use_highbitdepth,
- int num_pyramid_levels, aom_enc_frame_flags_t flags) {
+ bool alloc_pyramid, aom_enc_frame_flags_t flags) {
int width = src->y_crop_width;
int height = src->y_crop_height;
int uv_width = src->uv_crop_width;
@@ -124,9 +124,9 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
height != buf->img.y_crop_height ||
uv_width != buf->img.uv_crop_width ||
uv_height != buf->img.uv_crop_height;
- larger_dimensions = width > buf->img.y_width || height > buf->img.y_height ||
- uv_width > buf->img.uv_width ||
- uv_height > buf->img.uv_height;
+ larger_dimensions =
+ width > buf->img.y_crop_width || height > buf->img.y_crop_height ||
+ uv_width > buf->img.uv_crop_width || uv_height > buf->img.uv_crop_height;
assert(!larger_dimensions || new_dimensions);
if (larger_dimensions) {
@@ -134,11 +134,15 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
memset(&new_img, 0, sizeof(new_img));
if (aom_alloc_frame_buffer(&new_img, width, height, subsampling_x,
subsampling_y, use_highbitdepth,
- AOM_BORDER_IN_PIXELS, 0, num_pyramid_levels, 0))
+ AOM_BORDER_IN_PIXELS, 0, alloc_pyramid, 0))
return 1;
aom_free_frame_buffer(&buf->img);
buf->img = new_img;
} else if (new_dimensions) {
+ buf->img.y_width = src->y_width;
+ buf->img.y_height = src->y_height;
+ buf->img.uv_width = src->uv_width;
+ buf->img.uv_height = src->uv_height;
buf->img.y_crop_width = src->y_crop_width;
buf->img.y_crop_height = src->y_crop_height;
buf->img.uv_crop_width = src->uv_crop_width;
@@ -146,7 +150,6 @@ int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
buf->img.subsampling_x = src->subsampling_x;
buf->img.subsampling_y = src->subsampling_y;
}
- // Partial copy not implemented yet
av1_copy_and_extend_frame(src, &buf->img);
buf->ts_start = ts_start;
diff --git a/third_party/aom/av1/encoder/lookahead.h b/third_party/aom/av1/encoder/lookahead.h
index c0e6d222f5..41eca87fa3 100644
--- a/third_party/aom/av1/encoder/lookahead.h
+++ b/third_party/aom/av1/encoder/lookahead.h
@@ -70,7 +70,7 @@ struct lookahead_ctx *av1_lookahead_init(
unsigned int width, unsigned int height, unsigned int subsampling_x,
unsigned int subsampling_y, int use_highbitdepth, unsigned int depth,
const int border_in_pixels, int byte_alignment, int num_lap_buffers,
- bool is_all_intra, int num_pyramid_levels);
+ bool is_all_intra, bool alloc_pyramid);
/**\brief Destroys the lookahead stage
*/
@@ -85,18 +85,18 @@ int av1_lookahead_full(const struct lookahead_ctx *ctx);
* This function will copy the source image into a new framebuffer with
* the expected stride/border.
*
- * \param[in] ctx Pointer to the lookahead context
- * \param[in] src Pointer to the image to enqueue
- * \param[in] ts_start Timestamp for the start of this frame
- * \param[in] ts_end Timestamp for the end of this frame
- * \param[in] use_highbitdepth Tell if HBD is used
- * \param[in] num_pyramid_levels Number of pyramid levels to allocate
- for each frame buffer
- * \param[in] flags Flags set on this frame
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] src Pointer to the image to enqueue
+ * \param[in] ts_start Timestamp for the start of this frame
+ * \param[in] ts_end Timestamp for the end of this frame
+ * \param[in] use_highbitdepth Tell if HBD is used
+ * \param[in] alloc_pyramid Whether to allocate a downsampling pyramid
+ * for each frame buffer
+ * \param[in] flags Flags set on this frame
*/
int av1_lookahead_push(struct lookahead_ctx *ctx, const YV12_BUFFER_CONFIG *src,
int64_t ts_start, int64_t ts_end, int use_highbitdepth,
- int num_pyramid_levels, aom_enc_frame_flags_t flags);
+ bool alloc_pyramid, aom_enc_frame_flags_t flags);
/**\brief Get the next source buffer to encode
*
diff --git a/third_party/aom/av1/encoder/nonrd_pickmode.c b/third_party/aom/av1/encoder/nonrd_pickmode.c
index f939b6d1fa..57c74f66d5 100644
--- a/third_party/aom/av1/encoder/nonrd_pickmode.c
+++ b/third_party/aom/av1/encoder/nonrd_pickmode.c
@@ -2357,6 +2357,10 @@ static AOM_FORCE_INLINE bool skip_inter_mode_nonrd(
*ref_frame2 = NONE_FRAME;
}
+ if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP) &&
+ (*this_mode != GLOBALMV || *ref_frame != LAST_FRAME))
+ return true;
+
if (x->sb_me_block && *ref_frame == LAST_FRAME) {
// We want to make sure to test the superblock MV:
// so don't skip (return false) for NEAREST_LAST or NEAR_LAST if they
@@ -3241,7 +3245,8 @@ void av1_nonrd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
inter_pred_params_sr.conv_params =
get_conv_params(/*do_average=*/0, AOM_PLANE_Y, xd->bd);
- x->block_is_zero_sad = x->content_state_sb.source_sad_nonrd == kZeroSad;
+ x->block_is_zero_sad = x->content_state_sb.source_sad_nonrd == kZeroSad ||
+ segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP);
if (cpi->oxcf.tune_cfg.content == AOM_CONTENT_SCREEN &&
!x->force_zeromv_skip_for_blk &&
x->content_state_sb.source_sad_nonrd != kZeroSad &&
diff --git a/third_party/aom/av1/encoder/palette.c b/third_party/aom/av1/encoder/palette.c
index 7f79e9596e..45b56199c6 100644
--- a/third_party/aom/av1/encoder/palette.c
+++ b/third_party/aom/av1/encoder/palette.c
@@ -480,7 +480,7 @@ struct ColorCount {
int count;
};
-int color_count_comp(const void *c1, const void *c2) {
+static int color_count_comp(const void *c1, const void *c2) {
const struct ColorCount *color_count1 = (const struct ColorCount *)c1;
const struct ColorCount *color_count2 = (const struct ColorCount *)c2;
if (color_count1->count > color_count2->count) return -1;
diff --git a/third_party/aom/av1/encoder/palette.h b/third_party/aom/av1/encoder/palette.h
index 7da863a0cc..30886d37ae 100644
--- a/third_party/aom/av1/encoder/palette.h
+++ b/third_party/aom/av1/encoder/palette.h
@@ -26,7 +26,7 @@ struct PICK_MODE_CONTEXT;
struct macroblock;
/*!\cond */
-#define AV1_K_MEANS_RENAME(func, dim) func##_dim##dim##_c
+#define AV1_K_MEANS_RENAME(func, dim) func##_dim##dim
void AV1_K_MEANS_RENAME(av1_k_means, 1)(const int16_t *data, int16_t *centroids,
uint8_t *indices, int n, int k,
diff --git a/third_party/aom/av1/encoder/partition_search.c b/third_party/aom/av1/encoder/partition_search.c
index 1c17b09ee1..61d49a23f2 100644
--- a/third_party/aom/av1/encoder/partition_search.c
+++ b/third_party/aom/av1/encoder/partition_search.c
@@ -2144,8 +2144,9 @@ static void encode_b_nonrd(const AV1_COMP *const cpi, TileDataEnc *tile_data,
}
if (tile_data->allow_update_cdf) update_stats(&cpi->common, td);
}
- if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ && mbmi->skip_txfm &&
- !cpi->rc.rtc_external_ratectrl && cm->seg.enabled)
+ if ((cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ ||
+ cpi->active_map.enabled) &&
+ mbmi->skip_txfm && !cpi->rc.rtc_external_ratectrl && cm->seg.enabled)
av1_cyclic_reset_segment_skip(cpi, x, mi_row, mi_col, bsize, dry_run);
// TODO(Ravi/Remya): Move this copy function to a better logical place
// This function will copy the best mode information from block
@@ -2254,6 +2255,8 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data,
const AQ_MODE aq_mode = cpi->oxcf.q_cfg.aq_mode;
TxfmSearchInfo *txfm_info = &x->txfm_search_info;
int i;
+ const int seg_skip =
+ segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
// This is only needed for real time/allintra row-mt enabled multi-threaded
// encoding with cost update frequency set to COST_UPD_TILE/COST_UPD_OFF.
@@ -2276,15 +2279,17 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data,
}
for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i];
- x->force_zeromv_skip_for_blk =
- get_force_zeromv_skip_flag_for_blk(cpi, x, bsize);
+ if (!seg_skip) {
+ x->force_zeromv_skip_for_blk =
+ get_force_zeromv_skip_flag_for_blk(cpi, x, bsize);
- // Source variance may be already compute at superblock level, so no need
- // to recompute, unless bsize < sb_size or source_variance is not yet set.
- if (!x->force_zeromv_skip_for_blk &&
- (x->source_variance == UINT_MAX || bsize < cm->seq_params->sb_size))
- x->source_variance = av1_get_perpixel_variance_facade(
- cpi, xd, &x->plane[0].src, bsize, AOM_PLANE_Y);
+ // Source variance may be already compute at superblock level, so no need
+ // to recompute, unless bsize < sb_size or source_variance is not yet set.
+ if (!x->force_zeromv_skip_for_blk &&
+ (x->source_variance == UINT_MAX || bsize < cm->seq_params->sb_size))
+ x->source_variance = av1_get_perpixel_variance_facade(
+ cpi, xd, &x->plane[0].src, bsize, AOM_PLANE_Y);
+ }
// Save rdmult before it might be changed, so it can be restored later.
const int orig_rdmult = x->rdmult;
@@ -2305,16 +2310,13 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data,
#if CONFIG_COLLECT_COMPONENT_TIMING
start_timing(cpi, nonrd_pick_inter_mode_sb_time);
#endif
- if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- RD_STATS invalid_rd;
- av1_invalid_rd_stats(&invalid_rd);
- // TODO(kyslov): add av1_nonrd_pick_inter_mode_sb_seg_skip
- av1_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, mi_row, mi_col,
- rd_cost, bsize, ctx,
- invalid_rd.rdcost);
- } else {
- av1_nonrd_pick_inter_mode_sb(cpi, tile_data, x, rd_cost, bsize, ctx);
+ if (seg_skip) {
+ x->force_zeromv_skip_for_blk = 1;
+ // TODO(marpan): Consider adding a function for nonrd:
+ // av1_nonrd_pick_inter_mode_sb_seg_skip(), instead of setting
+ // x->force_zeromv_skip flag and entering av1_nonrd_pick_inter_mode_sb().
}
+ av1_nonrd_pick_inter_mode_sb(cpi, tile_data, x, rd_cost, bsize, ctx);
#if CONFIG_COLLECT_COMPONENT_TIMING
end_timing(cpi, nonrd_pick_inter_mode_sb_time);
#endif
@@ -2322,10 +2324,12 @@ static void pick_sb_modes_nonrd(AV1_COMP *const cpi, TileDataEnc *tile_data,
if (cpi->sf.rt_sf.skip_cdef_sb) {
// cdef_strength is initialized to 1 which means skip_cdef, and is updated
// here. Check to see is skipping cdef is allowed.
+ // Always allow cdef_skip for seg_skip = 1.
const int allow_cdef_skipping =
- cpi->rc.frames_since_key > 10 && !cpi->rc.high_source_sad &&
- !(x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_U)] ||
- x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_V)]);
+ seg_skip ||
+ (cpi->rc.frames_since_key > 10 && !cpi->rc.high_source_sad &&
+ !(x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_U)] ||
+ x->color_sensitivity[COLOR_SENS_IDX(AOM_PLANE_V)]));
// Find the corresponding 64x64 block. It'll be the 128x128 block if that's
// the block size.
diff --git a/third_party/aom/av1/encoder/partition_strategy.c b/third_party/aom/av1/encoder/partition_strategy.c
index ce06313579..1d62f128c7 100644
--- a/third_party/aom/av1/encoder/partition_strategy.c
+++ b/third_party/aom/av1/encoder/partition_strategy.c
@@ -1761,7 +1761,7 @@ void av1_prune_partitions_by_max_min_bsize(SuperBlockEnc *sb_enc,
// Decide whether to evaluate the AB partition specified by part_type based on
// split and HORZ/VERT info
-int evaluate_ab_partition_based_on_split(
+static int evaluate_ab_partition_based_on_split(
const PC_TREE *pc_tree, PARTITION_TYPE rect_part,
const RD_RECT_PART_WIN_INFO *rect_part_win_info, int qindex, int split_idx1,
int split_idx2) {
diff --git a/third_party/aom/av1/encoder/pass2_strategy.c b/third_party/aom/av1/encoder/pass2_strategy.c
index a9442ffc1a..bd8620c2be 100644
--- a/third_party/aom/av1/encoder/pass2_strategy.c
+++ b/third_party/aom/av1/encoder/pass2_strategy.c
@@ -158,28 +158,12 @@ static int frame_max_bits(const RATE_CONTROL *rc,
return (int)max_bits;
}
-static const double q_pow_term[(QINDEX_RANGE >> 5) + 1] = { 0.65, 0.70, 0.75,
- 0.80, 0.85, 0.90,
- 0.95, 0.95, 0.95 };
-#define ERR_DIVISOR 96.0
-static double calc_correction_factor(double err_per_mb, int q) {
- const double error_term = err_per_mb / ERR_DIVISOR;
- const int index = q >> 5;
- // Adjustment to power term based on qindex
- const double power_term =
- q_pow_term[index] +
- (((q_pow_term[index + 1] - q_pow_term[index]) * (q % 32)) / 32.0);
- assert(error_term >= 0.0);
- return fclamp(pow(error_term, power_term), 0.05, 5.0);
-}
-
// Based on history adjust expectations of bits per macroblock.
static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
TWO_PASS *const twopass = &cpi->ppi->twopass;
const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
// Based on recent history adjust expectations of bits per macroblock.
- double damp_fac = AOMMAX(5.0, rate_err_tol / 10.0);
double rate_err_factor = 1.0;
const double adj_limit = AOMMAX(0.2, (double)(100 - rate_err_tol) / 200.0);
const double min_fac = 1.0 - adj_limit;
@@ -214,9 +198,7 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
}
int err_estimate = p_rc->rate_error_estimate;
- int64_t bits_left = twopass->bits_left;
int64_t total_actual_bits = p_rc->total_actual_bits;
- int64_t bits_off_target = p_rc->vbr_bits_off_target;
double rolling_arf_group_actual_bits =
(double)twopass->rolling_arf_group_actual_bits;
double rolling_arf_group_target_bits =
@@ -231,10 +213,6 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
: 0;
total_actual_bits = simulate_parallel_frame ? p_rc->temp_total_actual_bits
: p_rc->total_actual_bits;
- bits_off_target = simulate_parallel_frame ? p_rc->temp_vbr_bits_off_target
- : p_rc->vbr_bits_off_target;
- bits_left =
- simulate_parallel_frame ? p_rc->temp_bits_left : twopass->bits_left;
rolling_arf_group_target_bits =
(double)(simulate_parallel_frame
? p_rc->temp_rolling_arf_group_target_bits
@@ -247,21 +225,21 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
: p_rc->rate_error_estimate;
#endif
- if (p_rc->bits_off_target && total_actual_bits > 0) {
- if (cpi->ppi->lap_enabled) {
- rate_err_factor = rolling_arf_group_actual_bits /
- DOUBLE_DIVIDE_CHECK(rolling_arf_group_target_bits);
+ if ((p_rc->bits_off_target && total_actual_bits > 0) &&
+ (rolling_arf_group_target_bits >= 1.0)) {
+ if (rolling_arf_group_actual_bits > rolling_arf_group_target_bits) {
+ double error_fraction =
+ (rolling_arf_group_actual_bits - rolling_arf_group_target_bits) /
+ rolling_arf_group_target_bits;
+ error_fraction = (error_fraction > 1.0) ? 1.0 : error_fraction;
+ rate_err_factor = 1.0 + error_fraction;
} else {
- rate_err_factor = 1.0 - ((double)(bits_off_target) /
- AOMMAX(total_actual_bits, bits_left));
+ double error_fraction =
+ (rolling_arf_group_target_bits - rolling_arf_group_actual_bits) /
+ rolling_arf_group_target_bits;
+ rate_err_factor = 1.0 - error_fraction;
}
- // Adjustment is damped if this is 1 pass with look ahead processing
- // (as there are only ever a few frames of data) and for all but the first
- // GOP in normal two pass.
- if ((twopass->bpm_factor != 1.0) || cpi->ppi->lap_enabled) {
- rate_err_factor = 1.0 + ((rate_err_factor - 1.0) / damp_fac);
- }
rate_err_factor = AOMMAX(min_fac, AOMMIN(max_fac, rate_err_factor));
}
@@ -270,36 +248,38 @@ static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
if ((rate_err_factor < 1.0 && err_estimate >= 0) ||
(rate_err_factor > 1.0 && err_estimate <= 0)) {
twopass->bpm_factor *= rate_err_factor;
- if (rate_err_tol >= 100) {
- twopass->bpm_factor =
- AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
- } else {
- twopass->bpm_factor = AOMMAX(0.1, AOMMIN(10.0, twopass->bpm_factor));
- }
+ twopass->bpm_factor = AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
}
}
-static int qbpm_enumerator(int rate_err_tol) {
- return 1200000 + ((300000 * AOMMIN(75, AOMMAX(rate_err_tol - 25, 0))) / 75);
+static const double q_div_term[(QINDEX_RANGE >> 5) + 1] = { 32.0, 40.0, 46.0,
+ 52.0, 56.0, 60.0,
+ 64.0, 68.0, 72.0 };
+#define EPMB_SCALER 1250000
+static double calc_correction_factor(double err_per_mb, int q) {
+ double power_term = 0.90;
+ const int index = q >> 5;
+ const double divisor =
+ q_div_term[index] +
+ (((q_div_term[index + 1] - q_div_term[index]) * (q % 32)) / 32.0);
+ double error_term = EPMB_SCALER * pow(err_per_mb, power_term);
+ return error_term / divisor;
}
// Similar to find_qindex_by_rate() function in ratectrl.c, but includes
// calculation of a correction_factor.
static int find_qindex_by_rate_with_correction(
int desired_bits_per_mb, aom_bit_depth_t bit_depth, double error_per_mb,
- double group_weight_factor, int rate_err_tol, int best_qindex,
- int worst_qindex) {
+ double group_weight_factor, int best_qindex, int worst_qindex) {
assert(best_qindex <= worst_qindex);
int low = best_qindex;
int high = worst_qindex;
while (low < high) {
const int mid = (low + high) >> 1;
- const double mid_factor = calc_correction_factor(error_per_mb, mid);
+ const double q_factor = calc_correction_factor(error_per_mb, mid);
const double q = av1_convert_qindex_to_q(mid, bit_depth);
- const int enumerator = qbpm_enumerator(rate_err_tol);
- const int mid_bits_per_mb =
- (int)((enumerator * mid_factor * group_weight_factor) / q);
+ const int mid_bits_per_mb = (int)((q_factor * group_weight_factor) / q);
if (mid_bits_per_mb > desired_bits_per_mb) {
low = mid + 1;
@@ -359,8 +339,8 @@ static int get_twopass_worst_quality(AV1_COMP *cpi, const double av_frame_err,
// content at the given rate.
int q = find_qindex_by_rate_with_correction(
target_norm_bits_per_mb, cpi->common.seq_params->bit_depth,
- av_err_per_mb, cpi->ppi->twopass.bpm_factor, rate_err_tol,
- rc->best_quality, rc->worst_quality);
+ av_err_per_mb, cpi->ppi->twopass.bpm_factor, rc->best_quality,
+ rc->worst_quality);
// Restriction on active max q for constrained quality mode.
if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level);
@@ -4235,12 +4215,13 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) {
twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0);
// If the rate control is drifting consider adjustment to min or maxq.
- if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref) {
+ if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref &&
+ (p_rc->rolling_target_bits > 0)) {
int minq_adj_limit;
int maxq_adj_limit;
minq_adj_limit =
(rc_cfg->mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
- maxq_adj_limit = rc->worst_quality - rc->active_worst_quality;
+ maxq_adj_limit = (rc->worst_quality - rc->active_worst_quality);
// Undershoot
if ((rc_cfg->under_shoot_pct < 100) &&
@@ -4252,8 +4233,9 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) {
if ((pct_error >= rc_cfg->under_shoot_pct) &&
(p_rc->rate_error_estimate > 0)) {
twopass->extend_minq += 1;
+ twopass->extend_maxq -= 1;
}
- twopass->extend_maxq -= 1;
+
// Overshoot
} else if ((rc_cfg->over_shoot_pct < 100) &&
(p_rc->rolling_actual_bits > p_rc->rolling_target_bits)) {
@@ -4265,18 +4247,8 @@ void av1_twopass_postencode_update(AV1_COMP *cpi) {
if ((pct_error >= rc_cfg->over_shoot_pct) &&
(p_rc->rate_error_estimate < 0)) {
twopass->extend_maxq += 1;
+ twopass->extend_minq -= 1;
}
- twopass->extend_minq -= 1;
- } else {
- // Adjustment for extreme local overshoot.
- // Only applies when normal adjustment above is not used (e.g.
- // when threshold is set to 100).
- if (rc->projected_frame_size > (2 * rc->base_frame_target) &&
- rc->projected_frame_size > (2 * rc->avg_frame_bandwidth))
- ++twopass->extend_maxq;
- // Unwind extreme overshoot adjustment.
- else if (p_rc->rolling_target_bits > p_rc->rolling_actual_bits)
- --twopass->extend_maxq;
}
twopass->extend_minq =
clamp(twopass->extend_minq, -minq_adj_limit, minq_adj_limit);
diff --git a/third_party/aom/av1/encoder/pickcdef.c b/third_party/aom/av1/encoder/pickcdef.c
index 232a2f9edb..ed5fa55f17 100644
--- a/third_party/aom/av1/encoder/pickcdef.c
+++ b/third_party/aom/av1/encoder/pickcdef.c
@@ -894,7 +894,7 @@ void av1_cdef_search(AV1_COMP *cpi) {
int rdmult = cpi->td.mb.rdmult;
for (int i = 0; i <= 3; i++) {
if (i > max_signaling_bits) break;
- int best_lev0[CDEF_MAX_STRENGTHS];
+ int best_lev0[CDEF_MAX_STRENGTHS] = { 0 };
int best_lev1[CDEF_MAX_STRENGTHS] = { 0 };
const int nb_strengths = 1 << i;
uint64_t tot_mse;
diff --git a/third_party/aom/av1/encoder/picklpf.c b/third_party/aom/av1/encoder/picklpf.c
index 9084d3f13a..a504535028 100644
--- a/third_party/aom/av1/encoder/picklpf.c
+++ b/third_party/aom/av1/encoder/picklpf.c
@@ -27,12 +27,25 @@
#include "av1/encoder/encoder.h"
#include "av1/encoder/picklpf.h"
+// AV1 loop filter applies to the whole frame according to mi_rows and mi_cols,
+// which are calculated based on aligned width and aligned height,
+// In addition, if super res is enabled, it copies the whole frame
+// according to the aligned width and height (av1_superres_upscale()).
+// So we need to copy the whole filtered region, instead of the cropped region.
+// For example, input image size is: 160x90.
+// Then src->y_crop_width = 160, src->y_crop_height = 90.
+// The aligned frame size is: src->y_width = 160, src->y_height = 96.
+// AV1 aligns frame size to a multiple of 8, if there is
+// chroma subsampling, it is able to ensure the chroma is also
+// an integer number of mi units. mi unit is 4x4, 8 = 4 * 2, and 2 luma mi
+// units correspond to 1 chroma mi unit if there is subsampling.
+// See: aom_realloc_frame_buffer() in yv12config.c.
static void yv12_copy_plane(const YV12_BUFFER_CONFIG *src_bc,
YV12_BUFFER_CONFIG *dst_bc, int plane) {
switch (plane) {
- case 0: aom_yv12_copy_y(src_bc, dst_bc); break;
- case 1: aom_yv12_copy_u(src_bc, dst_bc); break;
- case 2: aom_yv12_copy_v(src_bc, dst_bc); break;
+ case 0: aom_yv12_copy_y(src_bc, dst_bc, 0); break;
+ case 1: aom_yv12_copy_u(src_bc, dst_bc, 0); break;
+ case 2: aom_yv12_copy_v(src_bc, dst_bc, 0); break;
default: assert(plane >= 0 && plane <= 2); break;
}
}
@@ -311,7 +324,7 @@ void av1_pick_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
&cpi->last_frame_uf, cm->width, cm->height,
seq_params->subsampling_x, seq_params->subsampling_y,
seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, NULL, NULL, NULL, 0, 0))
+ cm->features.byte_alignment, NULL, NULL, NULL, false, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate last frame buffer");
diff --git a/third_party/aom/av1/encoder/pickrst.c b/third_party/aom/av1/encoder/pickrst.c
index 6429064175..b0d0d0bb78 100644
--- a/third_party/aom/av1/encoder/pickrst.c
+++ b/third_party/aom/av1/encoder/pickrst.c
@@ -1103,6 +1103,39 @@ static INLINE int wrap_index(int i, int wiener_win) {
return (i >= wiener_halfwin1 ? wiener_win - 1 - i : i);
}
+// Splits each w[i] into smaller components w1[i] and w2[i] such that
+// w[i] = w1[i] * WIENER_TAP_SCALE_FACTOR + w2[i].
+static INLINE void split_wiener_filter_coefficients(int wiener_win,
+ const int32_t *w,
+ int32_t *w1, int32_t *w2) {
+ for (int i = 0; i < wiener_win; i++) {
+ w1[i] = w[i] / WIENER_TAP_SCALE_FACTOR;
+ w2[i] = w[i] - w1[i] * WIENER_TAP_SCALE_FACTOR;
+ assert(w[i] == w1[i] * WIENER_TAP_SCALE_FACTOR + w2[i]);
+ }
+}
+
+// Calculates x * w / WIENER_TAP_SCALE_FACTOR, where
+// w = w1 * WIENER_TAP_SCALE_FACTOR + w2.
+//
+// The multiplication x * w may overflow, so we multiply x by the components of
+// w (w1 and w2) and combine the multiplication with the division.
+static INLINE int64_t multiply_and_scale(int64_t x, int32_t w1, int32_t w2) {
+ // Let y = x * w / WIENER_TAP_SCALE_FACTOR
+ // = x * (w1 * WIENER_TAP_SCALE_FACTOR + w2) / WIENER_TAP_SCALE_FACTOR
+ const int64_t y = x * w1 + x * w2 / WIENER_TAP_SCALE_FACTOR;
+ // Double-check the calculation using __int128.
+ // TODO(wtc): Remove after 2024-04-30.
+#if !defined(NDEBUG) && defined(__GNUC__) && defined(__LP64__)
+ const int32_t w = w1 * WIENER_TAP_SCALE_FACTOR + w2;
+ const __int128 z = (__int128)x * w / WIENER_TAP_SCALE_FACTOR;
+ assert(z >= INT64_MIN);
+ assert(z <= INT64_MAX);
+ assert(y == (int64_t)z);
+#endif
+ return y;
+}
+
// Solve linear equations to find Wiener filter tap values
// Taps are output scaled by WIENER_FILT_STEP
static int linsolve_wiener(int n, int64_t *A, int stride, int64_t *b,
@@ -1175,10 +1208,12 @@ static int linsolve_wiener(int n, int64_t *A, int stride, int64_t *b,
// Fix vector b, update vector a
static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc,
- int64_t **Hc, int32_t *a, int32_t *b) {
+ int64_t **Hc, int32_t *a,
+ const int32_t *b) {
int i, j;
int64_t S[WIENER_WIN];
int64_t A[WIENER_HALFWIN1], B[WIENER_HALFWIN1 * WIENER_HALFWIN1];
+ int32_t b1[WIENER_WIN], b2[WIENER_WIN];
const int wiener_win2 = wiener_win * wiener_win;
const int wiener_halfwin1 = (wiener_win >> 1) + 1;
memset(A, 0, sizeof(A));
@@ -1189,16 +1224,7 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc,
A[jj] += Mc[i][j] * b[i] / WIENER_TAP_SCALE_FACTOR;
}
}
-
- // b/274668506: This is the dual branch for the issue in b/272139363. The fix
- // is similar. See comments in update_b_sep_sym() below.
- int32_t max_b_l = 0;
- for (int l = 0; l < wiener_win; ++l) {
- const int32_t abs_b_l = abs(b[l]);
- if (abs_b_l > max_b_l) max_b_l = abs_b_l;
- }
- const int scale_threshold = 128 * WIENER_TAP_SCALE_FACTOR;
- const int scaler = max_b_l < scale_threshold ? 1 : 4;
+ split_wiener_filter_coefficients(wiener_win, b, b1, b2);
for (i = 0; i < wiener_win; i++) {
for (j = 0; j < wiener_win; j++) {
@@ -1207,10 +1233,17 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc,
const int kk = wrap_index(k, wiener_win);
for (l = 0; l < wiener_win; ++l) {
const int ll = wrap_index(l, wiener_win);
- B[ll * wiener_halfwin1 + kk] +=
- Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] /
- (scaler * WIENER_TAP_SCALE_FACTOR) * b[j] /
- (WIENER_TAP_SCALE_FACTOR / scaler);
+ // Calculate
+ // B[ll * wiener_halfwin1 + kk] +=
+ // Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] /
+ // WIENER_TAP_SCALE_FACTOR * b[j] / WIENER_TAP_SCALE_FACTOR;
+ //
+ // The last multiplication may overflow, so we combine the last
+ // multiplication with the last division.
+ const int64_t x = Hc[j * wiener_win + i][k * wiener_win2 + l] * b[i] /
+ WIENER_TAP_SCALE_FACTOR;
+ // b[j] = b1[j] * WIENER_TAP_SCALE_FACTOR + b2[j]
+ B[ll * wiener_halfwin1 + kk] += multiply_and_scale(x, b1[j], b2[j]);
}
}
}
@@ -1246,10 +1279,12 @@ static AOM_INLINE void update_a_sep_sym(int wiener_win, int64_t **Mc,
// Fix vector a, update vector b
static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc,
- int64_t **Hc, int32_t *a, int32_t *b) {
+ int64_t **Hc, const int32_t *a,
+ int32_t *b) {
int i, j;
int64_t S[WIENER_WIN];
int64_t A[WIENER_HALFWIN1], B[WIENER_HALFWIN1 * WIENER_HALFWIN1];
+ int32_t a1[WIENER_WIN], a2[WIENER_WIN];
const int wiener_win2 = wiener_win * wiener_win;
const int wiener_halfwin1 = (wiener_win >> 1) + 1;
memset(A, 0, sizeof(A));
@@ -1260,32 +1295,7 @@ static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc,
A[ii] += Mc[i][j] * a[j] / WIENER_TAP_SCALE_FACTOR;
}
}
-
- // b/272139363: The computation,
- // Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
- // WIENER_TAP_SCALE_FACTOR * a[l] / WIENER_TAP_SCALE_FACTOR;
- // may generate a signed-integer-overflow. Conditionally scale the terms to
- // avoid a potential overflow.
- //
- // Hc contains accumulated correlation statistics and it is desired to leave
- // as much room as possible for Hc. It was experimentally observed that the
- // primary issue manifests itself with the second, a[l], multiply. For
- // max_a_l < WIENER_TAP_SCALE_FACTOR the first multiply with a[k] should not
- // increase dynamic range and the second multiply should hence be safe.
- // Thereafter a safe scale_threshold depends on the actual operational range
- // of Hc. The largest scale_threshold is expected to depend on bit-depth
- // (av1_compute_stats_highbd_c() scales highbd to 8-bit) and maximum
- // restoration-unit size (256), leading up to 32-bit positive numbers in Hc.
- // Noting that the caller, wiener_decompose_sep_sym(), initializes a[...]
- // to a range smaller than 16 bits, the scale_threshold is set as below for
- // convenience.
- int32_t max_a_l = 0;
- for (int l = 0; l < wiener_win; ++l) {
- const int32_t abs_a_l = abs(a[l]);
- if (abs_a_l > max_a_l) max_a_l = abs_a_l;
- }
- const int scale_threshold = 128 * WIENER_TAP_SCALE_FACTOR;
- const int scaler = max_a_l < scale_threshold ? 1 : 4;
+ split_wiener_filter_coefficients(wiener_win, a, a1, a2);
for (i = 0; i < wiener_win; i++) {
const int ii = wrap_index(i, wiener_win);
@@ -1294,10 +1304,17 @@ static AOM_INLINE void update_b_sep_sym(int wiener_win, int64_t **Mc,
int k, l;
for (k = 0; k < wiener_win; ++k) {
for (l = 0; l < wiener_win; ++l) {
- B[jj * wiener_halfwin1 + ii] +=
- Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
- (scaler * WIENER_TAP_SCALE_FACTOR) * a[l] /
- (WIENER_TAP_SCALE_FACTOR / scaler);
+ // Calculate
+ // B[jj * wiener_halfwin1 + ii] +=
+ // Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
+ // WIENER_TAP_SCALE_FACTOR * a[l] / WIENER_TAP_SCALE_FACTOR;
+ //
+ // The last multiplication may overflow, so we combine the last
+ // multiplication with the last division.
+ const int64_t x = Hc[i * wiener_win + j][k * wiener_win2 + l] * a[k] /
+ WIENER_TAP_SCALE_FACTOR;
+ // a[l] = a1[l] * WIENER_TAP_SCALE_FACTOR + a2[l]
+ B[jj * wiener_halfwin1 + ii] += multiply_and_scale(x, a1[l], a2[l]);
}
}
}
@@ -2050,7 +2067,7 @@ void av1_pick_filter_restoration(const YV12_BUFFER_CONFIG *src, AV1_COMP *cpi) {
&cpi->trial_frame_rst, cm->superres_upscaled_width,
cm->superres_upscaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, highbd, AOM_RESTORATION_FRAME_BORDER,
- cm->features.byte_alignment, NULL, NULL, NULL, 0, 0))
+ cm->features.byte_alignment, NULL, NULL, NULL, false, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate trial restored frame buffer");
diff --git a/third_party/aom/av1/encoder/ratectrl.c b/third_party/aom/av1/encoder/ratectrl.c
index df86380272..7639484df5 100644
--- a/third_party/aom/av1/encoder/ratectrl.c
+++ b/third_party/aom/av1/encoder/ratectrl.c
@@ -30,6 +30,7 @@
#include "av1/common/seg_common.h"
#include "av1/encoder/encodemv.h"
+#include "av1/encoder/encoder_utils.h"
#include "av1/encoder/encode_strategy.h"
#include "av1/encoder/gop_structure.h"
#include "av1/encoder/random.h"
@@ -405,10 +406,10 @@ void av1_primary_rc_init(const AV1EncoderConfig *oxcf,
p_rc->rate_correction_factors[KF_STD] = 1.0;
p_rc->bits_off_target = p_rc->starting_buffer_level;
- p_rc->rolling_target_bits =
- (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate);
- p_rc->rolling_actual_bits =
- (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate);
+ p_rc->rolling_target_bits = AOMMAX(
+ 1, (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate));
+ p_rc->rolling_actual_bits = AOMMAX(
+ 1, (int)(oxcf->rc_cfg.target_bandwidth / oxcf->input_cfg.init_framerate));
}
void av1_rc_init(const AV1EncoderConfig *oxcf, RATE_CONTROL *rc) {
@@ -439,6 +440,7 @@ void av1_rc_init(const AV1EncoderConfig *oxcf, RATE_CONTROL *rc) {
rc->rtc_external_ratectrl = 0;
rc->frame_level_fast_extra_bits = 0;
rc->use_external_qp_one_pass = 0;
+ rc->percent_blocks_inactive = 0;
}
static bool check_buffer_below_thresh(AV1_COMP *cpi, int64_t buffer_level,
@@ -1719,41 +1721,39 @@ static void adjust_active_best_and_worst_quality(const AV1_COMP *cpi,
const AV1_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
- const RefreshFrameInfo *const refresh_frame = &cpi->refresh_frame;
int active_best_quality = *active_best;
int active_worst_quality = *active_worst;
#if CONFIG_FPMT_TEST
- const int simulate_parallel_frame =
- cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 &&
- cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE;
- int extend_minq = simulate_parallel_frame ? p_rc->temp_extend_minq
- : cpi->ppi->twopass.extend_minq;
- int extend_maxq = simulate_parallel_frame ? p_rc->temp_extend_maxq
- : cpi->ppi->twopass.extend_maxq;
#endif
// Extension to max or min Q if undershoot or overshoot is outside
// the permitted range.
if (cpi->oxcf.rc_cfg.mode != AOM_Q) {
+#if CONFIG_FPMT_TEST
+ const int simulate_parallel_frame =
+ cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 &&
+ cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE;
+ const int extend_minq = simulate_parallel_frame
+ ? p_rc->temp_extend_minq
+ : cpi->ppi->twopass.extend_minq;
+ const int extend_maxq = simulate_parallel_frame
+ ? p_rc->temp_extend_maxq
+ : cpi->ppi->twopass.extend_maxq;
+ const RefreshFrameInfo *const refresh_frame = &cpi->refresh_frame;
if (frame_is_intra_only(cm) ||
(!rc->is_src_frame_alt_ref &&
(refresh_frame->golden_frame || is_intrl_arf_boost ||
refresh_frame->alt_ref_frame))) {
-#if CONFIG_FPMT_TEST
active_best_quality -= extend_minq;
active_worst_quality += (extend_maxq / 2);
-#else
- active_best_quality -= cpi->ppi->twopass.extend_minq / 4;
- active_worst_quality += (cpi->ppi->twopass.extend_maxq / 2);
-#endif
} else {
-#if CONFIG_FPMT_TEST
active_best_quality -= extend_minq / 2;
active_worst_quality += extend_maxq;
+ }
#else
- active_best_quality -= cpi->ppi->twopass.extend_minq / 4;
- active_worst_quality += cpi->ppi->twopass.extend_maxq;
+ (void)is_intrl_arf_boost;
+ active_best_quality -= cpi->ppi->twopass.extend_minq / 8;
+ active_worst_quality += cpi->ppi->twopass.extend_maxq / 4;
#endif
- }
}
#ifndef STRICT_RC
@@ -2991,6 +2991,24 @@ void av1_set_rtc_reference_structure_one_layer(AV1_COMP *cpi, int gf_update) {
cpi->rt_reduce_num_ref_buffers &= (rtc_ref->ref_idx[2] < 7);
}
+static int set_block_is_active(unsigned char *const active_map_4x4, int mi_cols,
+ int mi_rows, int sbi_col, int sbi_row, int sh,
+ int num_4x4) {
+ int r = sbi_row << sh;
+ int c = sbi_col << sh;
+ const int row_max = AOMMIN(num_4x4, mi_rows - r);
+ const int col_max = AOMMIN(num_4x4, mi_cols - c);
+ // Active map is set for 16x16 blocks, so only need to
+ // check over16x16,
+ for (int x = 0; x < row_max; x += 4) {
+ for (int y = 0; y < col_max; y += 4) {
+ if (active_map_4x4[(r + x) * mi_cols + (c + y)] == AM_SEGMENT_ID_ACTIVE)
+ return 1;
+ }
+ }
+ return 0;
+}
+
/*!\brief Check for scene detection, for 1 pass real-time mode.
*
* Compute average source sad (temporal sad: between current source and
@@ -3093,11 +3111,26 @@ static void rc_scene_detection_onepass_rt(AV1_COMP *cpi,
sizeof(*cpi->src_sad_blk_64x64)));
}
}
+ const CommonModeInfoParams *const mi_params = &cpi->common.mi_params;
+ const int mi_cols = mi_params->mi_cols;
+ const int mi_rows = mi_params->mi_rows;
+ int sh = (cm->seq_params->sb_size == BLOCK_128X128) ? 5 : 4;
+ int num_4x4 = (cm->seq_params->sb_size == BLOCK_128X128) ? 32 : 16;
+ unsigned char *const active_map_4x4 = cpi->active_map.map;
// Avoid bottom and right border.
for (int sbi_row = 0; sbi_row < sb_rows - border; ++sbi_row) {
for (int sbi_col = 0; sbi_col < sb_cols; ++sbi_col) {
- tmp_sad = cpi->ppi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y,
- last_src_ystride);
+ int block_is_active = 1;
+ if (cpi->active_map.enabled && rc->percent_blocks_inactive > 0) {
+ block_is_active = set_block_is_active(active_map_4x4, mi_cols, mi_rows,
+ sbi_col, sbi_row, sh, num_4x4);
+ }
+ if (block_is_active) {
+ tmp_sad = cpi->ppi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y,
+ last_src_ystride);
+ } else {
+ tmp_sad = 0;
+ }
if (cpi->src_sad_blk_64x64 != NULL)
cpi->src_sad_blk_64x64[sbi_col + sbi_row * sb_cols] = tmp_sad;
if (check_light_change) {
@@ -3456,8 +3489,13 @@ void av1_get_one_pass_rt_params(AV1_COMP *cpi, FRAME_TYPE *const frame_type,
}
}
}
- // Check for scene change: for SVC check on base spatial layer only.
- if (cpi->sf.rt_sf.check_scene_detection && svc->spatial_layer_id == 0) {
+ if (cpi->active_map.enabled && cpi->rc.percent_blocks_inactive == 100) {
+ rc->frame_source_sad = 0;
+ rc->avg_source_sad = (3 * rc->avg_source_sad + rc->frame_source_sad) >> 2;
+ rc->percent_blocks_with_motion = 0;
+ rc->high_source_sad = 0;
+ } else if (cpi->sf.rt_sf.check_scene_detection &&
+ svc->spatial_layer_id == 0) {
if (rc->prev_coded_width == cm->width &&
rc->prev_coded_height == cm->height) {
rc_scene_detection_onepass_rt(cpi, frame_input);
@@ -3522,6 +3560,10 @@ void av1_get_one_pass_rt_params(AV1_COMP *cpi, FRAME_TYPE *const frame_type,
}
}
+#define CHECK_INTER_LAYER_PRED(ref_frame) \
+ ((cpi->ref_frame_flags & av1_ref_frame_flag_list[ref_frame]) && \
+ (av1_check_ref_is_low_spatial_res_super_frame(cpi, ref_frame)))
+
int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) {
AV1_COMMON *const cm = &cpi->common;
PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
@@ -3532,12 +3574,26 @@ int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) {
int target_bits_per_mb;
double q2;
int enumerator;
+ int inter_layer_pred_on = 0;
int is_screen_content = (cpi->oxcf.tune_cfg.content == AOM_CONTENT_SCREEN);
- *q = (3 * cpi->rc.worst_quality + *q) >> 2;
- // For screen content use the max-q set by the user to allow for less
- // overshoot on slide changes.
- if (is_screen_content) *q = cpi->rc.worst_quality;
cpi->cyclic_refresh->counter_encode_maxq_scene_change = 0;
+ if (cpi->svc.spatial_layer_id > 0) {
+ // For spatial layers: check if inter-layer (spatial) prediction is used
+ // (check if any reference is being used that is the lower spatial layer),
+ inter_layer_pred_on = CHECK_INTER_LAYER_PRED(LAST_FRAME) ||
+ CHECK_INTER_LAYER_PRED(GOLDEN_FRAME) ||
+ CHECK_INTER_LAYER_PRED(ALTREF_FRAME);
+ }
+ // If inter-layer prediction is on: we expect to pull up the quality from
+ // the lower spatial layer, so we can use a lower q.
+ if (cpi->svc.spatial_layer_id > 0 && inter_layer_pred_on) {
+ *q = (cpi->rc.worst_quality + *q) >> 1;
+ } else {
+ *q = (3 * cpi->rc.worst_quality + *q) >> 2;
+ // For screen content use the max-q set by the user to allow for less
+ // overshoot on slide changes.
+ if (is_screen_content) *q = cpi->rc.worst_quality;
+ }
// Adjust avg_frame_qindex, buffer_level, and rate correction factors, as
// these parameters will affect QP selection for subsequent frames. If they
// have settled down to a very different (low QP) state, then not adjusting
@@ -3566,8 +3622,10 @@ int av1_encodedframe_overshoot_cbr(AV1_COMP *cpi, int *q) {
rate_correction_factor;
}
// For temporal layers: reset the rate control parameters across all
- // temporal layers.
- if (cpi->svc.number_temporal_layers > 1) {
+ // temporal layers. Only do it for spatial enhancement layers when
+ // inter_layer_pred_on is not set (off).
+ if (cpi->svc.number_temporal_layers > 1 &&
+ (cpi->svc.spatial_layer_id == 0 || inter_layer_pred_on == 0)) {
SVC *svc = &cpi->svc;
for (int tl = 0; tl < svc->number_temporal_layers; ++tl) {
int sl = svc->spatial_layer_id;
diff --git a/third_party/aom/av1/encoder/ratectrl.h b/third_party/aom/av1/encoder/ratectrl.h
index 6802ad42d0..5121a909f4 100644
--- a/third_party/aom/av1/encoder/ratectrl.h
+++ b/third_party/aom/av1/encoder/ratectrl.h
@@ -249,6 +249,9 @@ typedef struct {
// signals if number of blocks with motion is high
int percent_blocks_with_motion;
+ // signals percentage of 16x16 blocks that are inactive, via active_maps
+ int percent_blocks_inactive;
+
// Maximum value of source sad across all blocks of frame.
uint64_t max_block_source_sad;
diff --git a/third_party/aom/av1/encoder/speed_features.c b/third_party/aom/av1/encoder/speed_features.c
index 63d69cadc5..256b6fc9eb 100644
--- a/third_party/aom/av1/encoder/speed_features.c
+++ b/third_party/aom/av1/encoder/speed_features.c
@@ -1177,6 +1177,7 @@ static void set_good_speed_features_framesize_independent(
sf->mv_sf.subpel_search_method = SUBPEL_TREE_PRUNED_MORE;
sf->gm_sf.prune_zero_mv_with_sse = 2;
+ sf->gm_sf.downsample_level = 1;
sf->part_sf.simple_motion_search_prune_agg =
allow_screen_content_tools ? SIMPLE_AGG_LVL0 : SIMPLE_AGG_LVL2;
@@ -1282,6 +1283,8 @@ static void set_good_speed_features_framesize_independent(
sf->hl_sf.disable_extra_sc_testing = 1;
sf->hl_sf.second_alt_ref_filtering = 0;
+ sf->gm_sf.downsample_level = 2;
+
sf->inter_sf.prune_inter_modes_based_on_tpl = boosted ? 0 : 3;
sf->inter_sf.selective_ref_frame = 6;
sf->inter_sf.prune_single_ref = is_boosted_arf2_bwd_type ? 0 : 2;
@@ -1465,6 +1468,7 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi,
if (is_360p_or_larger) {
sf->part_sf.fixed_partition_size = BLOCK_32X32;
sf->rt_sf.use_fast_fixed_part = 1;
+ sf->mv_sf.subpel_force_stop = HALF_PEL;
}
sf->rt_sf.increase_source_sad_thresh = 1;
sf->rt_sf.part_early_exit_zeromv = 2;
@@ -1472,6 +1476,7 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi,
for (int i = 0; i < BLOCK_SIZES; ++i) {
sf->rt_sf.intra_y_mode_bsize_mask_nrd[i] = INTRA_DC;
}
+ sf->rt_sf.hybrid_intra_pickmode = 0;
}
// Setting for SVC, or when the ref_frame_config control is
// used to set the reference structure.
@@ -1572,13 +1577,13 @@ static void set_rt_speed_feature_framesize_dependent(const AV1_COMP *const cpi,
sf->rt_sf.screen_content_cdef_filter_qindex_thresh = 80;
sf->rt_sf.part_early_exit_zeromv = 1;
sf->rt_sf.nonrd_aggressive_skip = 1;
+ sf->rt_sf.thresh_active_maps_skip_lf_cdef = 90;
}
if (speed >= 11) {
sf->rt_sf.skip_lf_screen = 2;
sf->rt_sf.skip_cdef_sb = 2;
sf->rt_sf.part_early_exit_zeromv = 2;
sf->rt_sf.prune_palette_nonrd = 1;
- sf->rt_sf.set_zeromv_skip_based_on_source_sad = 2;
sf->rt_sf.increase_color_thresh_palette = 0;
}
sf->rt_sf.use_nonrd_altref_frame = 0;
@@ -1974,6 +1979,7 @@ static AOM_INLINE void init_gm_sf(GLOBAL_MOTION_SPEED_FEATURES *gm_sf) {
gm_sf->prune_ref_frame_for_gm_search = 0;
gm_sf->prune_zero_mv_with_sse = 0;
gm_sf->disable_gm_search_based_on_stats = 0;
+ gm_sf->downsample_level = 0;
gm_sf->num_refinement_steps = GM_MAX_REFINEMENT_STEPS;
}
@@ -2270,6 +2276,7 @@ static AOM_INLINE void init_rt_sf(REAL_TIME_SPEED_FEATURES *rt_sf) {
rt_sf->part_early_exit_zeromv = 0;
rt_sf->sse_early_term_inter_search = EARLY_TERM_DISABLED;
rt_sf->skip_lf_screen = 0;
+ rt_sf->thresh_active_maps_skip_lf_cdef = 100;
rt_sf->sad_based_adp_altref_lag = 0;
rt_sf->partition_direct_merging = 0;
rt_sf->var_part_based_on_qidx = 0;
diff --git a/third_party/aom/av1/encoder/speed_features.h b/third_party/aom/av1/encoder/speed_features.h
index 60c000e4f4..d59cb38a71 100644
--- a/third_party/aom/av1/encoder/speed_features.h
+++ b/third_party/aom/av1/encoder/speed_features.h
@@ -587,6 +587,9 @@ typedef struct GLOBAL_MOTION_SPEED_FEATURES {
// GF group
int disable_gm_search_based_on_stats;
+ // Downsampling pyramid level to use for global motion estimation
+ int downsample_level;
+
// Number of refinement steps to apply after initial model generation
int num_refinement_steps;
} GLOBAL_MOTION_SPEED_FEATURES;
@@ -1771,6 +1774,10 @@ typedef struct REAL_TIME_SPEED_FEATURES {
// where rc->high_source_sad = 0 (no slide-changes).
int skip_lf_screen;
+ // Threshold on the active/inactive region percent to disable
+ // the loopfilter and cdef. Setting to 100 disables this feature.
+ int thresh_active_maps_skip_lf_cdef;
+
// For nonrd: early exit out of variance partition that sets the
// block size to superblock size, and sets mode to zeromv-last skip.
// 0: disabled
diff --git a/third_party/aom/av1/encoder/superres_scale.c b/third_party/aom/av1/encoder/superres_scale.c
index 3b47909b15..41225d55ae 100644
--- a/third_party/aom/av1/encoder/superres_scale.c
+++ b/third_party/aom/av1/encoder/superres_scale.c
@@ -404,7 +404,7 @@ void av1_superres_post_encode(AV1_COMP *cpi) {
assert(!is_lossless_requested(&cpi->oxcf.rc_cfg));
assert(!cm->features.all_lossless);
- av1_superres_upscale(cm, NULL, cpi->image_pyramid_levels);
+ av1_superres_upscale(cm, NULL, cpi->alloc_pyramid);
// If regular resizing is occurring the source will need to be downscaled to
// match the upscaled superres resolution. Otherwise the original source is
diff --git a/third_party/aom/av1/encoder/svc_layercontext.c b/third_party/aom/av1/encoder/svc_layercontext.c
index 2c99cb89b8..33da3afbd3 100644
--- a/third_party/aom/av1/encoder/svc_layercontext.c
+++ b/third_party/aom/av1/encoder/svc_layercontext.c
@@ -203,8 +203,10 @@ void av1_update_temporal_layer_framerate(AV1_COMP *const cpi) {
}
}
-static AOM_INLINE bool check_ref_is_low_spatial_res_super_frame(
- int ref_frame, const SVC *svc, const RTC_REF *rtc_ref) {
+bool av1_check_ref_is_low_spatial_res_super_frame(AV1_COMP *const cpi,
+ int ref_frame) {
+ SVC *svc = &cpi->svc;
+ RTC_REF *const rtc_ref = &cpi->ppi->rtc_ref;
int ref_frame_idx = rtc_ref->ref_idx[ref_frame - 1];
return rtc_ref->buffer_time_index[ref_frame_idx] == svc->current_superframe &&
rtc_ref->buffer_spatial_layer[ref_frame_idx] <=
@@ -253,13 +255,13 @@ void av1_restore_layer_context(AV1_COMP *const cpi) {
// previous spatial layer(s) at the same time (current_superframe).
if (rtc_ref->set_ref_frame_config && svc->force_zero_mode_spatial_ref &&
cpi->sf.rt_sf.use_nonrd_pick_mode) {
- if (check_ref_is_low_spatial_res_super_frame(LAST_FRAME, svc, rtc_ref)) {
+ if (av1_check_ref_is_low_spatial_res_super_frame(cpi, LAST_FRAME)) {
svc->skip_mvsearch_last = 1;
}
- if (check_ref_is_low_spatial_res_super_frame(GOLDEN_FRAME, svc, rtc_ref)) {
+ if (av1_check_ref_is_low_spatial_res_super_frame(cpi, GOLDEN_FRAME)) {
svc->skip_mvsearch_gf = 1;
}
- if (check_ref_is_low_spatial_res_super_frame(ALTREF_FRAME, svc, rtc_ref)) {
+ if (av1_check_ref_is_low_spatial_res_super_frame(cpi, ALTREF_FRAME)) {
svc->skip_mvsearch_altref = 1;
}
}
diff --git a/third_party/aom/av1/encoder/svc_layercontext.h b/third_party/aom/av1/encoder/svc_layercontext.h
index 93118be2d4..d56ea77791 100644
--- a/third_party/aom/av1/encoder/svc_layercontext.h
+++ b/third_party/aom/av1/encoder/svc_layercontext.h
@@ -223,6 +223,21 @@ void av1_update_layer_context_change_config(struct AV1_COMP *const cpi,
*/
void av1_update_temporal_layer_framerate(struct AV1_COMP *const cpi);
+/*!\brief Prior to check if reference is lower spatial layer at the same
+ * timestamp/superframe.
+ *
+ * \ingroup SVC
+ * \callgraph
+ * \callergraph
+ *
+ * \param[in] cpi Top level encoder structure
+ * \param[in] ref_frame Reference frame
+ *
+ * \return True if the ref_frame if lower spatial layer, otherwise false.
+ */
+bool av1_check_ref_is_low_spatial_res_super_frame(struct AV1_COMP *const cpi,
+ int ref_frame);
+
/*!\brief Prior to encoding the frame, set the layer context, for the current
layer to be encoded, to the cpi struct.
*
diff --git a/third_party/aom/av1/encoder/temporal_filter.c b/third_party/aom/av1/encoder/temporal_filter.c
index 7d4d25de6a..e8cc145030 100644
--- a/third_party/aom/av1/encoder/temporal_filter.c
+++ b/third_party/aom/av1/encoder/temporal_filter.c
@@ -463,12 +463,12 @@ static void tf_build_predictor(const YV12_BUFFER_CONFIG *ref_frame,
// Returns:
// Nothing will be returned. But the content to which `accum` and `pred`
// point will be modified.
-void tf_apply_temporal_filter_self(const YV12_BUFFER_CONFIG *ref_frame,
- const MACROBLOCKD *mbd,
- const BLOCK_SIZE block_size,
- const int mb_row, const int mb_col,
- const int num_planes, uint32_t *accum,
- uint16_t *count) {
+static void tf_apply_temporal_filter_self(const YV12_BUFFER_CONFIG *ref_frame,
+ const MACROBLOCKD *mbd,
+ const BLOCK_SIZE block_size,
+ const int mb_row, const int mb_col,
+ const int num_planes, uint32_t *accum,
+ uint16_t *count) {
// Block information.
const int mb_height = block_size_high[block_size];
const int mb_width = block_size_wide[block_size];
@@ -564,9 +564,10 @@ static INLINE void compute_square_diff(const uint8_t *ref, const int ref_offset,
// Returns:
// Nothing will be returned. But the content to which `luma_sse_sum` points
// will be modified.
-void compute_luma_sq_error_sum(uint32_t *square_diff, uint32_t *luma_sse_sum,
- int block_height, int block_width,
- int ss_x_shift, int ss_y_shift) {
+static void compute_luma_sq_error_sum(uint32_t *square_diff,
+ uint32_t *luma_sse_sum, int block_height,
+ int block_width, int ss_x_shift,
+ int ss_y_shift) {
for (int i = 0; i < block_height; ++i) {
for (int j = 0; j < block_width; ++j) {
for (int ii = 0; ii < (1 << ss_y_shift); ++ii) {
@@ -1456,7 +1457,7 @@ bool av1_tf_info_alloc(TEMPORAL_FILTER_INFO *tf_info, const AV1_COMP *cpi) {
oxcf->frm_dim_cfg.height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0)) {
+ NULL, cpi->alloc_pyramid, 0)) {
return false;
}
}
diff --git a/third_party/aom/av1/encoder/temporal_filter.h b/third_party/aom/av1/encoder/temporal_filter.h
index 6504b91b66..a40fb039b9 100644
--- a/third_party/aom/av1/encoder/temporal_filter.h
+++ b/third_party/aom/av1/encoder/temporal_filter.h
@@ -14,6 +14,8 @@
#include <stdbool.h>
+#include "aom_util/aom_pthread.h"
+
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/third_party/aom/av1/encoder/tpl_model.c b/third_party/aom/av1/encoder/tpl_model.c
index ca60e4981e..86f5485a26 100644
--- a/third_party/aom/av1/encoder/tpl_model.c
+++ b/third_party/aom/av1/encoder/tpl_model.c
@@ -19,6 +19,7 @@
#include "config/aom_scale_rtcd.h"
#include "aom/aom_codec.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/av1_common_int.h"
#include "av1/common/enums.h"
@@ -193,7 +194,7 @@ void av1_setup_tpl_buffers(AV1_PRIMARY *const ppi,
&tpl_data->tpl_rec_pool[frame], width, height,
seq_params->subsampling_x, seq_params->subsampling_y,
seq_params->use_highbitdepth, tpl_data->border_in_pixels,
- byte_alignment, 0, alloc_y_plane_only))
+ byte_alignment, false, alloc_y_plane_only))
aom_internal_error(&ppi->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
diff --git a/third_party/aom/av1/encoder/tpl_model.h b/third_party/aom/av1/encoder/tpl_model.h
index bcd58216c5..0150c702f9 100644
--- a/third_party/aom/av1/encoder/tpl_model.h
+++ b/third_party/aom/av1/encoder/tpl_model.h
@@ -30,6 +30,7 @@ struct TPL_INFO;
#include "config/aom_config.h"
#include "aom_scale/yv12config.h"
+#include "aom_util/aom_pthread.h"
#include "av1/common/mv.h"
#include "av1/common/scale.h"
diff --git a/third_party/aom/av1/encoder/tune_butteraugli.c b/third_party/aom/av1/encoder/tune_butteraugli.c
index 92fc4b2a92..4381af6a8b 100644
--- a/third_party/aom/av1/encoder/tune_butteraugli.c
+++ b/third_party/aom/av1/encoder/tune_butteraugli.c
@@ -209,7 +209,7 @@ void av1_setup_butteraugli_source(AV1_COMP *cpi) {
if (dst->buffer_alloc_sz == 0) {
aom_alloc_frame_buffer(
dst, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0);
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0);
}
av1_copy_and_extend_frame(cpi->source, dst);
@@ -218,7 +218,7 @@ void av1_setup_butteraugli_source(AV1_COMP *cpi) {
aom_alloc_frame_buffer(
resized_dst, width / resize_factor, height / resize_factor, ss_x, ss_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
}
if (!av1_resize_and_extend_frame_nonnormative(
cpi->source, resized_dst, bit_depth, av1_num_planes(cm))) {
@@ -244,7 +244,7 @@ void av1_setup_butteraugli_rdmult_and_restore_source(AV1_COMP *cpi, double K) {
aom_alloc_frame_buffer(
&resized_recon, width / resize_factor, height / resize_factor, ss_x, ss_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
copy_img(&cpi->common.cur_frame->buf, &resized_recon, width / resize_factor,
height / resize_factor);
@@ -267,12 +267,12 @@ void av1_setup_butteraugli_rdmult(AV1_COMP *cpi) {
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, cm->features.interp_filter,
- 0, false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ 0, false, false, cpi->oxcf.border_in_pixels, cpi->alloc_pyramid);
if (cpi->unscaled_last_source != NULL) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
cm->features.interp_filter, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->alloc_pyramid);
}
av1_setup_butteraugli_source(cpi);
diff --git a/third_party/aom/av1/encoder/tune_vmaf.c b/third_party/aom/av1/encoder/tune_vmaf.c
index 4e5ffa387c..91db3db726 100644
--- a/third_party/aom/av1/encoder/tune_vmaf.c
+++ b/third_party/aom/av1/encoder/tune_vmaf.c
@@ -288,10 +288,10 @@ static AOM_INLINE void gaussian_blur(const int bit_depth,
}
}
-static AOM_INLINE double cal_approx_vmaf(const AV1_COMP *const cpi,
- double source_variance,
- YV12_BUFFER_CONFIG *const source,
- YV12_BUFFER_CONFIG *const sharpened) {
+static AOM_INLINE double cal_approx_vmaf(
+ const AV1_COMP *const cpi, double source_variance,
+ const YV12_BUFFER_CONFIG *const source,
+ const YV12_BUFFER_CONFIG *const sharpened) {
const int bit_depth = cpi->td.mb.e_mbd.bd;
const bool cal_vmaf_neg =
cpi->oxcf.tune_cfg.tuning == AOM_TUNE_VMAF_NEG_MAX_GAIN;
@@ -305,11 +305,11 @@ static AOM_INLINE double cal_approx_vmaf(const AV1_COMP *const cpi,
}
static double find_best_frame_unsharp_amount_loop(
- const AV1_COMP *const cpi, YV12_BUFFER_CONFIG *const source,
- YV12_BUFFER_CONFIG *const blurred, YV12_BUFFER_CONFIG *const sharpened,
- double best_vmaf, const double baseline_variance,
- const double unsharp_amount_start, const double step_size,
- const int max_loop_count, const double max_amount) {
+ const AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const source,
+ const YV12_BUFFER_CONFIG *const blurred,
+ const YV12_BUFFER_CONFIG *const sharpened, double best_vmaf,
+ const double baseline_variance, const double unsharp_amount_start,
+ const double step_size, const int max_loop_count, const double max_amount) {
const double min_amount = 0.0;
int loop_count = 0;
double approx_vmaf = best_vmaf;
@@ -328,13 +328,11 @@ static double find_best_frame_unsharp_amount_loop(
return AOMMIN(max_amount, AOMMAX(unsharp_amount, min_amount));
}
-static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG *const source,
- YV12_BUFFER_CONFIG *const blurred,
- const double unsharp_amount_start,
- const double step_size,
- const int max_loop_count,
- const double max_filter_amount) {
+static double find_best_frame_unsharp_amount(
+ const AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const source,
+ const YV12_BUFFER_CONFIG *const blurred, const double unsharp_amount_start,
+ const double step_size, const int max_loop_count,
+ const double max_filter_amount) {
const AV1_COMMON *const cm = &cpi->common;
const int width = source->y_width;
const int height = source->y_height;
@@ -343,7 +341,7 @@ static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi,
aom_alloc_frame_buffer(
&sharpened, width, height, source->subsampling_x, source->subsampling_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
const double baseline_variance = frame_average_variance(cpi, source);
double unsharp_amount;
@@ -376,7 +374,7 @@ static double find_best_frame_unsharp_amount(const AV1_COMP *const cpi,
}
void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG *const source) {
+ const YV12_BUFFER_CONFIG *const source) {
const AV1_COMMON *const cm = &cpi->common;
const int bit_depth = cpi->td.mb.e_mbd.bd;
const int width = source->y_width;
@@ -395,7 +393,7 @@ void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi,
aom_alloc_frame_buffer(
&blurred, width, height, source->subsampling_x, source->subsampling_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
gaussian_blur(bit_depth, source, &blurred);
unsharp(cpi, source, &blurred, source, best_frame_unsharp_amount);
@@ -403,7 +401,7 @@ void av1_vmaf_neg_preprocessing(AV1_COMP *const cpi,
}
void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG *const source) {
+ const YV12_BUFFER_CONFIG *const source) {
const AV1_COMMON *const cm = &cpi->common;
const int bit_depth = cpi->td.mb.e_mbd.bd;
const int width = source->y_width;
@@ -415,11 +413,11 @@ void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi,
aom_alloc_frame_buffer(
&source_extended, width, height, source->subsampling_x,
source->subsampling_y, cm->seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0);
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(
&blurred, width, height, source->subsampling_x, source->subsampling_y,
cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
av1_copy_and_extend_frame(source, &source_extended);
gaussian_blur(bit_depth, &source_extended, &blurred);
@@ -442,7 +440,7 @@ void av1_vmaf_frame_preprocessing(AV1_COMP *const cpi,
}
void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG *const source) {
+ const YV12_BUFFER_CONFIG *const source) {
const AV1_COMMON *const cm = &cpi->common;
const int width = source->y_width;
const int height = source->y_height;
@@ -455,11 +453,11 @@ void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi,
memset(&source_extended, 0, sizeof(source_extended));
aom_alloc_frame_buffer(
&blurred, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0);
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&source_extended, width, height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
av1_copy_and_extend_frame(source, &source_extended);
gaussian_blur(bit_depth, &source_extended, &blurred);
@@ -495,11 +493,11 @@ void av1_vmaf_blk_preprocessing(AV1_COMP *const cpi,
aom_alloc_frame_buffer(&source_block, block_w, block_h, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&blurred_block, block_w, block_h, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
@@ -622,7 +620,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) {
aom_alloc_frame_buffer(
&resized_source, y_width / resize_factor, y_height / resize_factor, ss_x,
ss_y, cm->seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
if (!av1_resize_and_extend_frame_nonnormative(
cpi->source, &resized_source, bit_depth, av1_num_planes(cm))) {
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
@@ -643,7 +641,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) {
aom_alloc_frame_buffer(&blurred, resized_y_width, resized_y_height, ss_x,
ss_y, cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
gaussian_blur(bit_depth, &resized_source, &blurred);
YV12_BUFFER_CONFIG recon;
@@ -651,7 +649,7 @@ void av1_set_mb_vmaf_rdmult_scaling(AV1_COMP *cpi) {
aom_alloc_frame_buffer(&recon, resized_y_width, resized_y_height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_yv12_copy_frame(&resized_source, &recon, 1);
VmafContext *vmaf_context;
@@ -830,15 +828,15 @@ static double calc_vmaf_motion_score(const AV1_COMP *const cpi,
aom_alloc_frame_buffer(&blurred_cur, y_width, y_height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&blurred_last, y_width, y_height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&blurred_next, y_width, y_height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
gaussian_blur(bit_depth, cur, &blurred_cur);
gaussian_blur(bit_depth, last, &blurred_last);
@@ -881,8 +879,8 @@ static double calc_vmaf_motion_score(const AV1_COMP *const cpi,
}
static AOM_INLINE void get_neighbor_frames(const AV1_COMP *const cpi,
- YV12_BUFFER_CONFIG **last,
- YV12_BUFFER_CONFIG **next) {
+ const YV12_BUFFER_CONFIG **last,
+ const YV12_BUFFER_CONFIG **next) {
const AV1_COMMON *const cm = &cpi->common;
const GF_GROUP *gf_group = &cpi->ppi->gf_group;
const int src_index =
@@ -920,7 +918,7 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) {
if (approx_sse < sse_threshold || approx_dvmaf < vmaf_threshold) {
return current_qindex;
}
- YV12_BUFFER_CONFIG *cur_buf = cpi->source;
+ const YV12_BUFFER_CONFIG *cur_buf = cpi->source;
if (cm->show_frame == 0) {
const int src_index = gf_group->arf_src_offset[cpi->gf_frame_index];
struct lookahead_entry *cur_entry = av1_lookahead_peek(
@@ -929,7 +927,7 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) {
}
assert(cur_buf);
- YV12_BUFFER_CONFIG *next_buf, *last_buf;
+ const YV12_BUFFER_CONFIG *next_buf, *last_buf;
get_neighbor_frames(cpi, &last_buf, &next_buf);
assert(last_buf);
@@ -954,8 +952,8 @@ int av1_get_vmaf_base_qindex(const AV1_COMP *const cpi, int current_qindex) {
static AOM_INLINE double cal_approx_score(
AV1_COMP *const cpi, double src_variance, double new_variance,
- double src_score, YV12_BUFFER_CONFIG *const src,
- YV12_BUFFER_CONFIG *const recon_sharpened) {
+ double src_score, const YV12_BUFFER_CONFIG *const src,
+ const YV12_BUFFER_CONFIG *const recon_sharpened) {
double score;
const uint32_t bit_depth = cpi->td.mb.e_mbd.bd;
const bool cal_vmaf_neg =
@@ -967,11 +965,12 @@ static AOM_INLINE double cal_approx_score(
static double find_best_frame_unsharp_amount_loop_neg(
AV1_COMP *const cpi, double src_variance, double base_score,
- YV12_BUFFER_CONFIG *const src, YV12_BUFFER_CONFIG *const recon,
- YV12_BUFFER_CONFIG *const ref, YV12_BUFFER_CONFIG *const src_blurred,
- YV12_BUFFER_CONFIG *const recon_blurred,
- YV12_BUFFER_CONFIG *const src_sharpened,
- YV12_BUFFER_CONFIG *const recon_sharpened, FULLPEL_MV *mvs,
+ const YV12_BUFFER_CONFIG *const src, const YV12_BUFFER_CONFIG *const recon,
+ const YV12_BUFFER_CONFIG *const ref,
+ const YV12_BUFFER_CONFIG *const src_blurred,
+ const YV12_BUFFER_CONFIG *const recon_blurred,
+ const YV12_BUFFER_CONFIG *const src_sharpened,
+ const YV12_BUFFER_CONFIG *const recon_sharpened, FULLPEL_MV *mvs,
double best_score, const double unsharp_amount_start,
const double step_size, const int max_loop_count, const double max_amount) {
const double min_amount = 0.0;
@@ -999,8 +998,8 @@ static double find_best_frame_unsharp_amount_loop_neg(
}
static double find_best_frame_unsharp_amount_neg(
- AV1_COMP *const cpi, YV12_BUFFER_CONFIG *const src,
- YV12_BUFFER_CONFIG *const recon, YV12_BUFFER_CONFIG *const ref,
+ AV1_COMP *const cpi, const YV12_BUFFER_CONFIG *const src,
+ const YV12_BUFFER_CONFIG *const recon, const YV12_BUFFER_CONFIG *const ref,
double base_score, const double unsharp_amount_start,
const double step_size, const int max_loop_count,
const double max_filter_amount) {
@@ -1023,18 +1022,18 @@ static double find_best_frame_unsharp_amount_neg(
aom_alloc_frame_buffer(&recon_sharpened, width, height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&src_sharpened, width, height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(&recon_blurred, width, height, ss_x, ss_y,
cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels,
- cm->features.byte_alignment, 0, 0);
+ cm->features.byte_alignment, false, 0);
aom_alloc_frame_buffer(
&src_blurred, width, height, ss_x, ss_y, cm->seq_params->use_highbitdepth,
- cpi->oxcf.border_in_pixels, cm->features.byte_alignment, 0, 0);
+ cpi->oxcf.border_in_pixels, cm->features.byte_alignment, false, 0);
gaussian_blur(bit_depth, recon, &recon_blurred);
gaussian_blur(bit_depth, src, &src_blurred);
@@ -1076,8 +1075,8 @@ static double find_best_frame_unsharp_amount_neg(
}
void av1_update_vmaf_curve(AV1_COMP *cpi) {
- YV12_BUFFER_CONFIG *source = cpi->source;
- YV12_BUFFER_CONFIG *recon = &cpi->common.cur_frame->buf;
+ const YV12_BUFFER_CONFIG *source = cpi->source;
+ const YV12_BUFFER_CONFIG *recon = &cpi->common.cur_frame->buf;
const int bit_depth = cpi->td.mb.e_mbd.bd;
const GF_GROUP *const gf_group = &cpi->ppi->gf_group;
const int layer_depth =
@@ -1099,7 +1098,7 @@ void av1_update_vmaf_curve(AV1_COMP *cpi) {
}
if (cpi->oxcf.tune_cfg.tuning == AOM_TUNE_VMAF_NEG_MAX_GAIN) {
- YV12_BUFFER_CONFIG *last, *next;
+ const YV12_BUFFER_CONFIG *last, *next;
get_neighbor_frames(cpi, &last, &next);
double best_unsharp_amount_start =
get_layer_value(cpi->vmaf_info.last_frame_unsharp_amount, layer_depth);
diff --git a/third_party/aom/av1/encoder/tune_vmaf.h b/third_party/aom/av1/encoder/tune_vmaf.h
index a04a29e6fe..404fd1029a 100644
--- a/third_party/aom/av1/encoder/tune_vmaf.h
+++ b/third_party/aom/av1/encoder/tune_vmaf.h
@@ -43,13 +43,13 @@ typedef struct {
struct AV1_COMP;
void av1_vmaf_blk_preprocessing(struct AV1_COMP *cpi,
- YV12_BUFFER_CONFIG *source);
+ const YV12_BUFFER_CONFIG *source);
void av1_vmaf_frame_preprocessing(struct AV1_COMP *cpi,
- YV12_BUFFER_CONFIG *source);
+ const YV12_BUFFER_CONFIG *source);
void av1_vmaf_neg_preprocessing(struct AV1_COMP *cpi,
- YV12_BUFFER_CONFIG *source);
+ const YV12_BUFFER_CONFIG *source);
void av1_set_mb_vmaf_rdmult_scaling(struct AV1_COMP *cpi);
diff --git a/third_party/aom/av1/encoder/tx_search.c b/third_party/aom/av1/encoder/tx_search.c
index 7292c01191..5dcc08c0ff 100644
--- a/third_party/aom/av1/encoder/tx_search.c
+++ b/third_party/aom/av1/encoder/tx_search.c
@@ -1109,13 +1109,11 @@ static INLINE void dist_block_tx_domain(MACROBLOCK *x, int plane, int block,
*out_sse = RIGHT_SIGNED_SHIFT(this_sse, shift);
}
-uint16_t prune_txk_type_separ(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
- int block, TX_SIZE tx_size, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, int *txk_map,
- int16_t allowed_tx_mask, int prune_factor,
- const TXB_CTX *const txb_ctx,
- int reduced_tx_set_used, int64_t ref_best_rd,
- int num_sel) {
+static uint16_t prune_txk_type_separ(
+ const AV1_COMP *cpi, MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
+ int blk_row, int blk_col, BLOCK_SIZE plane_bsize, int *txk_map,
+ int16_t allowed_tx_mask, int prune_factor, const TXB_CTX *const txb_ctx,
+ int reduced_tx_set_used, int64_t ref_best_rd, int num_sel) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
@@ -1255,11 +1253,12 @@ uint16_t prune_txk_type_separ(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
return prune;
}
-uint16_t prune_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
- int block, TX_SIZE tx_size, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, int *txk_map,
- uint16_t allowed_tx_mask, int prune_factor,
- const TXB_CTX *const txb_ctx, int reduced_tx_set_used) {
+static uint16_t prune_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
+ int block, TX_SIZE tx_size, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
+ int *txk_map, uint16_t allowed_tx_mask,
+ int prune_factor, const TXB_CTX *const txb_ctx,
+ int reduced_tx_set_used) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
int tx_type;
diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
index a4def754b0..31cc37db7a 100644
--- a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
+++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
@@ -2638,6 +2638,11 @@ void av1_lowbd_fwd_txfm2d_16x64_sse2(const int16_t *input, int32_t *output,
}
}
+// Include top-level function only for 32-bit x86, to support Valgrind.
+// For normal use, we require SSE4.1, so av1_lowbd_fwd_txfm_sse4_1 will be used
+// instead of this function. However, 32-bit Valgrind does not support SSE4.1,
+// so we include a fallback to SSE2 to improve performance
+#if AOM_ARCH_X86
static FwdTxfm2dFunc fwd_txfm2d_func_ls[TX_SIZES_ALL] = {
av1_lowbd_fwd_txfm2d_4x4_sse2, // 4x4 transform
av1_lowbd_fwd_txfm2d_8x8_sse2, // 8x8 transform
@@ -2671,3 +2676,4 @@ void av1_lowbd_fwd_txfm_sse2(const int16_t *src_diff, tran_low_t *coeff,
fwd_txfm2d_func(src_diff, coeff, diff_stride, txfm_param->tx_type,
txfm_param->bd);
}
+#endif // AOM_ARCH_X86
diff --git a/third_party/aom/av1/encoder/x86/cnn_avx2.c b/third_party/aom/av1/encoder/x86/cnn_avx2.c
index ee93b3d5a0..9c26a56641 100644
--- a/third_party/aom/av1/encoder/x86/cnn_avx2.c
+++ b/third_party/aom/av1/encoder/x86/cnn_avx2.c
@@ -466,7 +466,7 @@ static INLINE void cnn_convolve_no_maxpool_padding_valid_layer2_avx2(
// As per the layer config set by av1_intra_mode_cnn_partition_cnn_config,
// the filter_width and filter_height are equal to 2 for layer >= 1. So
// convolution happens at 2x2 for layer >= 1.
-void cnn_convolve_no_maxpool_padding_valid_2x2_avx2(
+static void cnn_convolve_no_maxpool_padding_valid_2x2_avx2(
const float **input, int in_width, int in_height, int in_stride,
const CNN_LAYER_CONFIG *const layer_config, float **output, int out_stride,
int start_idx, const int cstep, const int channel_step) {