summaryrefslogtreecommitdiffstats
path: root/media/libvpx/libvpx/vp9/decoder
diff options
context:
space:
mode:
Diffstat (limited to 'media/libvpx/libvpx/vp9/decoder')
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_decodeframe.c3057
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_decodeframe.h35
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_decodemv.c848
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_decodemv.h29
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_decoder.c584
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_decoder.h189
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_detokenize.c333
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_detokenize.h30
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_dsubexp.c72
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_dsubexp.h26
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_job_queue.c124
-rw-r--r--media/libvpx/libvpx/vp9/decoder/vp9_job_queue.h45
12 files changed, 5372 insertions, 0 deletions
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_decodeframe.c b/media/libvpx/libvpx/vp9/decoder/vp9_decodeframe.c
new file mode 100644
index 0000000000..2a27e6fdb3
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_decodeframe.c
@@ -0,0 +1,3057 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdlib.h> // qsort()
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+
+#include "vpx_dsp/bitreader_buffer.h"
+#include "vpx_dsp/bitreader.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+#include "vpx_ports/mem_ops.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vpx_util/vpx_thread.h"
+#if CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
+#include "vpx_util/vpx_debug_util.h"
+#endif // CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
+
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_idct.h"
+#include "vp9/common/vp9_thread_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_tile_common.h"
+
+#include "vp9/decoder/vp9_decodeframe.h"
+#include "vp9/decoder/vp9_detokenize.h"
+#include "vp9/decoder/vp9_decodemv.h"
+#include "vp9/decoder/vp9_decoder.h"
+#include "vp9/decoder/vp9_dsubexp.h"
+#include "vp9/decoder/vp9_job_queue.h"
+
+#define MAX_VP9_HEADER_SIZE 80
+
+typedef int (*predict_recon_func)(TileWorkerData *twd, MODE_INFO *const mi,
+ int plane, int row, int col, TX_SIZE tx_size);
+
+typedef void (*intra_recon_func)(TileWorkerData *twd, MODE_INFO *const mi,
+ int plane, int row, int col, TX_SIZE tx_size);
+
+static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
+ return len != 0 && len <= (size_t)(end - start);
+}
+
+static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) {
+ const int data = vpx_rb_read_literal(rb, get_unsigned_bits(max));
+ return data > max ? max : data;
+}
+
+static TX_MODE read_tx_mode(vpx_reader *r) {
+ TX_MODE tx_mode = vpx_read_literal(r, 2);
+ if (tx_mode == ALLOW_32X32) tx_mode += vpx_read_bit(r);
+ return tx_mode;
+}
+
+static void read_tx_mode_probs(struct tx_probs *tx_probs, vpx_reader *r) {
+ int i, j;
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
+ for (j = 0; j < TX_SIZES - 3; ++j)
+ vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
+ for (j = 0; j < TX_SIZES - 2; ++j)
+ vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
+ for (j = 0; j < TX_SIZES - 1; ++j)
+ vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
+}
+
+static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vpx_reader *r) {
+ int i, j;
+ for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
+ for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
+ vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
+}
+
+static void read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) {
+ int i, j;
+ for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+ for (j = 0; j < INTER_MODES - 1; ++j)
+ vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
+}
+
+static REFERENCE_MODE read_frame_reference_mode(const VP9_COMMON *cm,
+ vpx_reader *r) {
+ if (vp9_compound_reference_allowed(cm)) {
+ return vpx_read_bit(r)
+ ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT : COMPOUND_REFERENCE)
+ : SINGLE_REFERENCE;
+ } else {
+ return SINGLE_REFERENCE;
+ }
+}
+
+static void read_frame_reference_mode_probs(VP9_COMMON *cm, vpx_reader *r) {
+ FRAME_CONTEXT *const fc = cm->fc;
+ int i;
+
+ if (cm->reference_mode == REFERENCE_MODE_SELECT)
+ for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
+ vp9_diff_update_prob(r, &fc->comp_inter_prob[i]);
+
+ if (cm->reference_mode != COMPOUND_REFERENCE)
+ for (i = 0; i < REF_CONTEXTS; ++i) {
+ vp9_diff_update_prob(r, &fc->single_ref_prob[i][0]);
+ vp9_diff_update_prob(r, &fc->single_ref_prob[i][1]);
+ }
+
+ if (cm->reference_mode != SINGLE_REFERENCE)
+ for (i = 0; i < REF_CONTEXTS; ++i)
+ vp9_diff_update_prob(r, &fc->comp_ref_prob[i]);
+}
+
+static void update_mv_probs(vpx_prob *p, int n, vpx_reader *r) {
+ int i;
+ for (i = 0; i < n; ++i)
+ if (vpx_read(r, MV_UPDATE_PROB)) p[i] = (vpx_read_literal(r, 7) << 1) | 1;
+}
+
+static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) {
+ int i, j;
+
+ update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
+
+ for (i = 0; i < 2; ++i) {
+ nmv_component *const comp_ctx = &ctx->comps[i];
+ update_mv_probs(&comp_ctx->sign, 1, r);
+ update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
+ update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
+ update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
+ }
+
+ for (i = 0; i < 2; ++i) {
+ nmv_component *const comp_ctx = &ctx->comps[i];
+ for (j = 0; j < CLASS0_SIZE; ++j)
+ update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r);
+ update_mv_probs(comp_ctx->fp, 3, r);
+ }
+
+ if (allow_hp) {
+ for (i = 0; i < 2; ++i) {
+ nmv_component *const comp_ctx = &ctx->comps[i];
+ update_mv_probs(&comp_ctx->class0_hp, 1, r);
+ update_mv_probs(&comp_ctx->hp, 1, r);
+ }
+ }
+}
+
+static void inverse_transform_block_inter(MACROBLOCKD *xd, int plane,
+ const TX_SIZE tx_size, uint8_t *dst,
+ int stride, int eob) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ tran_low_t *const dqcoeff = pd->dqcoeff;
+ assert(eob > 0);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
+ if (xd->lossless) {
+ vp9_highbd_iwht4x4_add(dqcoeff, dst16, stride, eob, xd->bd);
+ } else {
+ switch (tx_size) {
+ case TX_4X4:
+ vp9_highbd_idct4x4_add(dqcoeff, dst16, stride, eob, xd->bd);
+ break;
+ case TX_8X8:
+ vp9_highbd_idct8x8_add(dqcoeff, dst16, stride, eob, xd->bd);
+ break;
+ case TX_16X16:
+ vp9_highbd_idct16x16_add(dqcoeff, dst16, stride, eob, xd->bd);
+ break;
+ case TX_32X32:
+ vp9_highbd_idct32x32_add(dqcoeff, dst16, stride, eob, xd->bd);
+ break;
+ default: assert(0 && "Invalid transform size");
+ }
+ }
+ } else {
+ if (xd->lossless) {
+ vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
+ } else {
+ switch (tx_size) {
+ case TX_4X4: vp9_idct4x4_add(dqcoeff, dst, stride, eob); break;
+ case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, stride, eob); break;
+ case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, stride, eob); break;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break;
+ default: assert(0 && "Invalid transform size"); return;
+ }
+ }
+ }
+#else
+ if (xd->lossless) {
+ vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
+ } else {
+ switch (tx_size) {
+ case TX_4X4: vp9_idct4x4_add(dqcoeff, dst, stride, eob); break;
+ case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, stride, eob); break;
+ case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, stride, eob); break;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break;
+ default: assert(0 && "Invalid transform size"); return;
+ }
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ if (eob == 1) {
+ dqcoeff[0] = 0;
+ } else {
+ if (tx_size <= TX_16X16 && eob <= 10)
+ memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
+ else if (tx_size == TX_32X32 && eob <= 34)
+ memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
+ else
+ memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
+ }
+}
+
+static void inverse_transform_block_intra(MACROBLOCKD *xd, int plane,
+ const TX_TYPE tx_type,
+ const TX_SIZE tx_size, uint8_t *dst,
+ int stride, int eob) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ tran_low_t *const dqcoeff = pd->dqcoeff;
+ assert(eob > 0);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
+ if (xd->lossless) {
+ vp9_highbd_iwht4x4_add(dqcoeff, dst16, stride, eob, xd->bd);
+ } else {
+ switch (tx_size) {
+ case TX_4X4:
+ vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst16, stride, eob, xd->bd);
+ break;
+ case TX_8X8:
+ vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst16, stride, eob, xd->bd);
+ break;
+ case TX_16X16:
+ vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst16, stride, eob, xd->bd);
+ break;
+ case TX_32X32:
+ vp9_highbd_idct32x32_add(dqcoeff, dst16, stride, eob, xd->bd);
+ break;
+ default: assert(0 && "Invalid transform size");
+ }
+ }
+ } else {
+ if (xd->lossless) {
+ vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
+ } else {
+ switch (tx_size) {
+ case TX_4X4: vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); break;
+ case TX_8X8: vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break;
+ case TX_16X16:
+ vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
+ break;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break;
+ default: assert(0 && "Invalid transform size"); return;
+ }
+ }
+ }
+#else
+ if (xd->lossless) {
+ vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
+ } else {
+ switch (tx_size) {
+ case TX_4X4: vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); break;
+ case TX_8X8: vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break;
+ case TX_16X16:
+ vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
+ break;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break;
+ default: assert(0 && "Invalid transform size"); return;
+ }
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ if (eob == 1) {
+ dqcoeff[0] = 0;
+ } else {
+ if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
+ memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
+ else if (tx_size == TX_32X32 && eob <= 34)
+ memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
+ else
+ memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
+ }
+}
+
+static void predict_and_reconstruct_intra_block(TileWorkerData *twd,
+ MODE_INFO *const mi, int plane,
+ int row, int col,
+ TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &twd->xd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ PREDICTION_MODE mode = (plane == 0) ? mi->mode : mi->uv_mode;
+ uint8_t *dst;
+ dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
+
+ if (mi->sb_type < BLOCK_8X8)
+ if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
+
+ vp9_predict_intra_block(xd, pd->n4_wl, tx_size, mode, dst, pd->dst.stride,
+ dst, pd->dst.stride, col, row, plane);
+
+ if (!mi->skip) {
+ const TX_TYPE tx_type =
+ (plane || xd->lossless) ? DCT_DCT : intra_mode_to_tx_type_lookup[mode];
+ const scan_order *sc = (plane || xd->lossless)
+ ? &vp9_default_scan_orders[tx_size]
+ : &vp9_scan_orders[tx_size][tx_type];
+ const int eob = vp9_decode_block_tokens(twd, plane, sc, col, row, tx_size,
+ mi->segment_id);
+ if (eob > 0) {
+ inverse_transform_block_intra(xd, plane, tx_type, tx_size, dst,
+ pd->dst.stride, eob);
+ }
+ }
+}
+
+static void parse_intra_block_row_mt(TileWorkerData *twd, MODE_INFO *const mi,
+ int plane, int row, int col,
+ TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &twd->xd;
+ PREDICTION_MODE mode = (plane == 0) ? mi->mode : mi->uv_mode;
+
+ if (mi->sb_type < BLOCK_8X8)
+ if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
+
+ if (!mi->skip) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const TX_TYPE tx_type =
+ (plane || xd->lossless) ? DCT_DCT : intra_mode_to_tx_type_lookup[mode];
+ const scan_order *sc = (plane || xd->lossless)
+ ? &vp9_default_scan_orders[tx_size]
+ : &vp9_scan_orders[tx_size][tx_type];
+ *pd->eob = vp9_decode_block_tokens(twd, plane, sc, col, row, tx_size,
+ mi->segment_id);
+ /* Keep the alignment to 16 */
+ pd->dqcoeff += (16 << (tx_size << 1));
+ pd->eob++;
+ }
+}
+
+static void predict_and_reconstruct_intra_block_row_mt(TileWorkerData *twd,
+ MODE_INFO *const mi,
+ int plane, int row,
+ int col,
+ TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &twd->xd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ PREDICTION_MODE mode = (plane == 0) ? mi->mode : mi->uv_mode;
+ uint8_t *dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
+
+ if (mi->sb_type < BLOCK_8X8)
+ if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
+
+ vp9_predict_intra_block(xd, pd->n4_wl, tx_size, mode, dst, pd->dst.stride,
+ dst, pd->dst.stride, col, row, plane);
+
+ if (!mi->skip) {
+ const TX_TYPE tx_type =
+ (plane || xd->lossless) ? DCT_DCT : intra_mode_to_tx_type_lookup[mode];
+ if (*pd->eob > 0) {
+ inverse_transform_block_intra(xd, plane, tx_type, tx_size, dst,
+ pd->dst.stride, *pd->eob);
+ }
+ /* Keep the alignment to 16 */
+ pd->dqcoeff += (16 << (tx_size << 1));
+ pd->eob++;
+ }
+}
+
+static int reconstruct_inter_block(TileWorkerData *twd, MODE_INFO *const mi,
+ int plane, int row, int col, TX_SIZE tx_size,
+ int mi_row, int mi_col) {
+ MACROBLOCKD *const xd = &twd->xd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const scan_order *sc = &vp9_default_scan_orders[tx_size];
+ const int eob = vp9_decode_block_tokens(twd, plane, sc, col, row, tx_size,
+ mi->segment_id);
+ uint8_t *dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
+
+ if (eob > 0) {
+ inverse_transform_block_inter(xd, plane, tx_size, dst, pd->dst.stride, eob);
+ }
+#if CONFIG_MISMATCH_DEBUG
+ {
+ int pixel_c, pixel_r;
+ int blk_w = 1 << (tx_size + TX_UNIT_SIZE_LOG2);
+ int blk_h = 1 << (tx_size + TX_UNIT_SIZE_LOG2);
+ mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, col, row,
+ pd->subsampling_x, pd->subsampling_y);
+ mismatch_check_block_tx(dst, pd->dst.stride, plane, pixel_c, pixel_r, blk_w,
+ blk_h, xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH);
+ }
+#else
+ (void)mi_row;
+ (void)mi_col;
+#endif
+ return eob;
+}
+
+static int parse_inter_block_row_mt(TileWorkerData *twd, MODE_INFO *const mi,
+ int plane, int row, int col,
+ TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &twd->xd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const scan_order *sc = &vp9_default_scan_orders[tx_size];
+ const int eob = vp9_decode_block_tokens(twd, plane, sc, col, row, tx_size,
+ mi->segment_id);
+
+ *pd->eob = eob;
+ pd->dqcoeff += (16 << (tx_size << 1));
+ pd->eob++;
+
+ return eob;
+}
+
+static int reconstruct_inter_block_row_mt(TileWorkerData *twd,
+ MODE_INFO *const mi, int plane,
+ int row, int col, TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &twd->xd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const int eob = *pd->eob;
+
+ (void)mi;
+ if (eob > 0) {
+ inverse_transform_block_inter(
+ xd, plane, tx_size, &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
+ pd->dst.stride, eob);
+ }
+ pd->dqcoeff += (16 << (tx_size << 1));
+ pd->eob++;
+
+ return eob;
+}
+
+static void build_mc_border(const uint8_t *src, int src_stride, uint8_t *dst,
+ int dst_stride, int x, int y, int b_w, int b_h,
+ int w, int h) {
+ // Get a pointer to the start of the real data for this row.
+ const uint8_t *ref_row = src - x - y * src_stride;
+
+ if (y >= h)
+ ref_row += (h - 1) * src_stride;
+ else if (y > 0)
+ ref_row += y * src_stride;
+
+ do {
+ int right = 0, copy;
+ int left = x < 0 ? -x : 0;
+
+ if (left > b_w) left = b_w;
+
+ if (x + b_w > w) right = x + b_w - w;
+
+ if (right > b_w) right = b_w;
+
+ copy = b_w - left - right;
+
+ if (left) memset(dst, ref_row[0], left);
+
+ if (copy) memcpy(dst + left, ref_row + x + left, copy);
+
+ if (right) memset(dst + left + copy, ref_row[w - 1], right);
+
+ dst += dst_stride;
+ ++y;
+
+ if (y > 0 && y < h) ref_row += src_stride;
+ } while (--b_h);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void high_build_mc_border(const uint8_t *src8, int src_stride,
+ uint16_t *dst, int dst_stride, int x, int y,
+ int b_w, int b_h, int w, int h) {
+ // Get a pointer to the start of the real data for this row.
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *ref_row = src - x - y * src_stride;
+
+ if (y >= h)
+ ref_row += (h - 1) * src_stride;
+ else if (y > 0)
+ ref_row += y * src_stride;
+
+ do {
+ int right = 0, copy;
+ int left = x < 0 ? -x : 0;
+
+ if (left > b_w) left = b_w;
+
+ if (x + b_w > w) right = x + b_w - w;
+
+ if (right > b_w) right = b_w;
+
+ copy = b_w - left - right;
+
+ if (left) vpx_memset16(dst, ref_row[0], left);
+
+ if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
+
+ if (right) vpx_memset16(dst + left + copy, ref_row[w - 1], right);
+
+ dst += dst_stride;
+ ++y;
+
+ if (y > 0 && y < h) ref_row += src_stride;
+ } while (--b_h);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void extend_and_predict(TileWorkerData *twd, const uint8_t *buf_ptr1,
+ int pre_buf_stride, int x0, int y0, int b_w,
+ int b_h, int frame_width, int frame_height,
+ int border_offset, uint8_t *const dst,
+ int dst_buf_stride, int subpel_x, int subpel_y,
+ const InterpKernel *kernel,
+ const struct scale_factors *sf, MACROBLOCKD *xd,
+ int w, int h, int ref, int xs, int ys) {
+ uint16_t *mc_buf_high = twd->extend_and_predict_buf;
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w, x0, y0,
+ b_w, b_h, frame_width, frame_height);
+ highbd_inter_predictor(mc_buf_high + border_offset, b_w,
+ CONVERT_TO_SHORTPTR(dst), dst_buf_stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
+ } else {
+ build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w, x0,
+ y0, b_w, b_h, frame_width, frame_height);
+ inter_predictor(((uint8_t *)mc_buf_high) + border_offset, b_w, dst,
+ dst_buf_stride, subpel_x, subpel_y, sf, w, h, ref, kernel,
+ xs, ys);
+ }
+}
+#else
+static void extend_and_predict(TileWorkerData *twd, const uint8_t *buf_ptr1,
+ int pre_buf_stride, int x0, int y0, int b_w,
+ int b_h, int frame_width, int frame_height,
+ int border_offset, uint8_t *const dst,
+ int dst_buf_stride, int subpel_x, int subpel_y,
+ const InterpKernel *kernel,
+ const struct scale_factors *sf, int w, int h,
+ int ref, int xs, int ys) {
+ uint8_t *mc_buf = (uint8_t *)twd->extend_and_predict_buf;
+ const uint8_t *buf_ptr;
+
+ build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w, x0, y0, b_w, b_h,
+ frame_width, frame_height);
+ buf_ptr = mc_buf + border_offset;
+
+ inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf, w,
+ h, ref, kernel, xs, ys);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+static void dec_build_inter_predictors(
+ TileWorkerData *twd, MACROBLOCKD *xd, int plane, int bw, int bh, int x,
+ int y, int w, int h, int mi_x, int mi_y, const InterpKernel *kernel,
+ const struct scale_factors *sf, struct buf_2d *pre_buf,
+ struct buf_2d *dst_buf, const MV *mv, RefCntBuffer *ref_frame_buf,
+ int is_scaled, int ref) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
+ MV32 scaled_mv;
+ int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride,
+ subpel_x, subpel_y;
+ uint8_t *ref_frame, *buf_ptr;
+
+ // Get reference frame pointer, width and height.
+ if (plane == 0) {
+ frame_width = ref_frame_buf->buf.y_crop_width;
+ frame_height = ref_frame_buf->buf.y_crop_height;
+ ref_frame = ref_frame_buf->buf.y_buffer;
+ } else {
+ frame_width = ref_frame_buf->buf.uv_crop_width;
+ frame_height = ref_frame_buf->buf.uv_crop_height;
+ ref_frame =
+ plane == 1 ? ref_frame_buf->buf.u_buffer : ref_frame_buf->buf.v_buffer;
+ }
+
+ if (is_scaled) {
+ const MV mv_q4 = clamp_mv_to_umv_border_sb(
+ xd, mv, bw, bh, pd->subsampling_x, pd->subsampling_y);
+ // Co-ordinate of containing block to pixel precision.
+ int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
+ int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
+#if 0 // CONFIG_BETTER_HW_COMPATIBILITY
+ assert(xd->mi[0]->sb_type != BLOCK_4X8 &&
+ xd->mi[0]->sb_type != BLOCK_8X4);
+ assert(mv_q4.row == mv->row * (1 << (1 - pd->subsampling_y)) &&
+ mv_q4.col == mv->col * (1 << (1 - pd->subsampling_x)));
+#endif
+ // Co-ordinate of the block to 1/16th pixel precision.
+ x0_16 = (x_start + x) << SUBPEL_BITS;
+ y0_16 = (y_start + y) << SUBPEL_BITS;
+
+ // Co-ordinate of current block in reference frame
+ // to 1/16th pixel precision.
+ x0_16 = sf->scale_value_x(x0_16, sf);
+ y0_16 = sf->scale_value_y(y0_16, sf);
+
+ // Map the top left corner of the block into the reference frame.
+ x0 = sf->scale_value_x(x_start + x, sf);
+ y0 = sf->scale_value_y(y_start + y, sf);
+
+ // Scale the MV and incorporate the sub-pixel offset of the block
+ // in the reference frame.
+ scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ xs = sf->x_step_q4;
+ ys = sf->y_step_q4;
+ } else {
+ // Co-ordinate of containing block to pixel precision.
+ x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
+ y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
+
+ // Co-ordinate of the block to 1/16th pixel precision.
+ x0_16 = x0 << SUBPEL_BITS;
+ y0_16 = y0 << SUBPEL_BITS;
+
+ scaled_mv.row = mv->row * (1 << (1 - pd->subsampling_y));
+ scaled_mv.col = mv->col * (1 << (1 - pd->subsampling_x));
+ xs = ys = 16;
+ }
+ subpel_x = scaled_mv.col & SUBPEL_MASK;
+ subpel_y = scaled_mv.row & SUBPEL_MASK;
+
+ // Calculate the top left corner of the best matching block in the
+ // reference frame.
+ x0 += scaled_mv.col >> SUBPEL_BITS;
+ y0 += scaled_mv.row >> SUBPEL_BITS;
+ x0_16 += scaled_mv.col;
+ y0_16 += scaled_mv.row;
+
+ // Get reference block pointer.
+ buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
+ buf_stride = pre_buf->stride;
+
+ // Do border extension if there is motion or the
+ // width/height is not a multiple of 8 pixels.
+ if (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) ||
+ (frame_height & 0x7)) {
+ int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
+
+ // Get reference block bottom right horizontal coordinate.
+ int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
+ int x_pad = 0, y_pad = 0;
+
+ if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
+ x0 -= VP9_INTERP_EXTEND - 1;
+ x1 += VP9_INTERP_EXTEND;
+ x_pad = 1;
+ }
+
+ if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
+ y0 -= VP9_INTERP_EXTEND - 1;
+ y1 += VP9_INTERP_EXTEND;
+ y_pad = 1;
+ }
+
+ // Skip border extension if block is inside the frame.
+ if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
+ y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
+ // Extend the border.
+ const uint8_t *const buf_ptr1 = ref_frame + y0 * buf_stride + x0;
+ const int b_w = x1 - x0 + 1;
+ const int b_h = y1 - y0 + 1;
+ const int border_offset = y_pad * 3 * b_w + x_pad * 3;
+
+ extend_and_predict(twd, buf_ptr1, buf_stride, x0, y0, b_w, b_h,
+ frame_width, frame_height, border_offset, dst,
+ dst_buf->stride, subpel_x, subpel_y, kernel, sf,
+#if CONFIG_VP9_HIGHBITDEPTH
+ xd,
+#endif
+ w, h, ref, xs, ys);
+ return;
+ }
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ highbd_inter_predictor(CONVERT_TO_SHORTPTR(buf_ptr), buf_stride,
+ CONVERT_TO_SHORTPTR(dst), dst_buf->stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
+ } else {
+ inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys);
+ }
+#else
+ inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, subpel_y,
+ sf, w, h, ref, kernel, xs, ys);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+}
+
+static void dec_build_inter_predictors_sb(TileWorkerData *twd,
+ VP9Decoder *const pbi,
+ MACROBLOCKD *xd, int mi_row,
+ int mi_col) {
+ int plane;
+ const int mi_x = mi_col * MI_SIZE;
+ const int mi_y = mi_row * MI_SIZE;
+ const MODE_INFO *mi = xd->mi[0];
+ const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
+ const BLOCK_SIZE sb_type = mi->sb_type;
+ const int is_compound = has_second_ref(mi);
+ int ref;
+ int is_scaled;
+
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
+ const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
+ RefBuffer *ref_buf = &pbi->common.frame_refs[frame - LAST_FRAME];
+ const struct scale_factors *const sf = &ref_buf->sf;
+ const int idx = ref_buf->idx;
+ BufferPool *const pool = pbi->common.buffer_pool;
+ RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
+
+ if (!vp9_is_valid_scale(sf))
+ vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ "Reference frame has invalid dimensions");
+
+ is_scaled = vp9_is_scaled(sf);
+ vp9_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col,
+ is_scaled ? sf : NULL);
+ xd->block_refs[ref] = ref_buf;
+
+ if (sb_type < BLOCK_8X8) {
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ struct buf_2d *const dst_buf = &pd->dst;
+ const int num_4x4_w = pd->n4_w;
+ const int num_4x4_h = pd->n4_h;
+ const int n4w_x4 = 4 * num_4x4_w;
+ const int n4h_x4 = 4 * num_4x4_h;
+ struct buf_2d *const pre_buf = &pd->pre[ref];
+ int i = 0, x, y;
+ for (y = 0; y < num_4x4_h; ++y) {
+ for (x = 0; x < num_4x4_w; ++x) {
+ const MV mv = average_split_mvs(pd, mi, ref, i++);
+ dec_build_inter_predictors(twd, xd, plane, n4w_x4, n4h_x4, 4 * x,
+ 4 * y, 4, 4, mi_x, mi_y, kernel, sf,
+ pre_buf, dst_buf, &mv, ref_frame_buf,
+ is_scaled, ref);
+ }
+ }
+ }
+ } else {
+ const MV mv = mi->mv[ref].as_mv;
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ struct buf_2d *const dst_buf = &pd->dst;
+ const int num_4x4_w = pd->n4_w;
+ const int num_4x4_h = pd->n4_h;
+ const int n4w_x4 = 4 * num_4x4_w;
+ const int n4h_x4 = 4 * num_4x4_h;
+ struct buf_2d *const pre_buf = &pd->pre[ref];
+ dec_build_inter_predictors(twd, xd, plane, n4w_x4, n4h_x4, 0, 0, n4w_x4,
+ n4h_x4, mi_x, mi_y, kernel, sf, pre_buf,
+ dst_buf, &mv, ref_frame_buf, is_scaled, ref);
+ }
+ }
+ }
+}
+
+static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ struct macroblockd_plane *const pd = &xd->plane[i];
+ memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_w);
+ memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_h);
+ }
+}
+
+static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl,
+ int bhl) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].n4_w = (bw << 1) >> xd->plane[i].subsampling_x;
+ xd->plane[i].n4_h = (bh << 1) >> xd->plane[i].subsampling_y;
+ xd->plane[i].n4_wl = bwl - xd->plane[i].subsampling_x;
+ xd->plane[i].n4_hl = bhl - xd->plane[i].subsampling_y;
+ }
+}
+
+static MODE_INFO *set_offsets_recon(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ int mi_row, int mi_col, int bw, int bh,
+ int bwl, int bhl) {
+ const int offset = mi_row * cm->mi_stride + mi_col;
+ const TileInfo *const tile = &xd->tile;
+ xd->mi = cm->mi_grid_visible + offset;
+
+ set_plane_n4(xd, bw, bh, bwl, bhl);
+
+ set_skip_context(xd, mi_row, mi_col);
+
+ // Distance of Mb to the various image edges. These are specified to 8th pel
+ // as they are always compared to values that are in 1/8th pel units
+ set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
+
+ vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ return xd->mi[0];
+}
+
+static MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ BLOCK_SIZE bsize, int mi_row, int mi_col, int bw,
+ int bh, int x_mis, int y_mis, int bwl, int bhl) {
+ const int offset = mi_row * cm->mi_stride + mi_col;
+ int x, y;
+ const TileInfo *const tile = &xd->tile;
+
+ xd->mi = cm->mi_grid_visible + offset;
+ xd->mi[0] = &cm->mi[offset];
+ // TODO(slavarnway): Generate sb_type based on bwl and bhl, instead of
+ // passing bsize from decode_partition().
+ xd->mi[0]->sb_type = bsize;
+ for (y = 0; y < y_mis; ++y)
+ for (x = !y; x < x_mis; ++x) {
+ xd->mi[y * cm->mi_stride + x] = xd->mi[0];
+ }
+
+ set_plane_n4(xd, bw, bh, bwl, bhl);
+
+ set_skip_context(xd, mi_row, mi_col);
+
+ // Distance of Mb to the various image edges. These are specified to 8th pel
+ // as they are always compared to values that are in 1/8th pel units
+ set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
+
+ vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ return xd->mi[0];
+}
+
+static INLINE int predict_recon_inter(MACROBLOCKD *xd, MODE_INFO *mi,
+ TileWorkerData *twd,
+ predict_recon_func func) {
+ int eobtotal = 0;
+ int plane;
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size;
+ const int num_4x4_w = pd->n4_w;
+ const int num_4x4_h = pd->n4_h;
+ const int step = (1 << tx_size);
+ int row, col;
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h + (xd->mb_to_bottom_edge >= 0
+ ? 0
+ : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+
+ xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide;
+ xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high;
+
+ for (row = 0; row < max_blocks_high; row += step)
+ for (col = 0; col < max_blocks_wide; col += step)
+ eobtotal += func(twd, mi, plane, row, col, tx_size);
+ }
+ return eobtotal;
+}
+
+static INLINE void predict_recon_intra(MACROBLOCKD *xd, MODE_INFO *mi,
+ TileWorkerData *twd,
+ intra_recon_func func) {
+ int plane;
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size;
+ const int num_4x4_w = pd->n4_w;
+ const int num_4x4_h = pd->n4_h;
+ const int step = (1 << tx_size);
+ int row, col;
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h + (xd->mb_to_bottom_edge >= 0
+ ? 0
+ : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+
+ xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide;
+ xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high;
+
+ for (row = 0; row < max_blocks_high; row += step)
+ for (col = 0; col < max_blocks_wide; col += step)
+ func(twd, mi, plane, row, col, tx_size);
+ }
+}
+
+static void decode_block(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize, int bwl, int bhl) {
+ VP9_COMMON *const cm = &pbi->common;
+ const int less8x8 = bsize < BLOCK_8X8;
+ const int bw = 1 << (bwl - 1);
+ const int bh = 1 << (bhl - 1);
+ const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+ vpx_reader *r = &twd->bit_reader;
+ MACROBLOCKD *const xd = &twd->xd;
+
+ MODE_INFO *mi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
+ y_mis, bwl, bhl);
+
+ if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
+ const BLOCK_SIZE uv_subsize =
+ ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
+ if (uv_subsize == BLOCK_INVALID)
+ vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid block size.");
+ }
+
+ vp9_read_mode_info(twd, pbi, mi_row, mi_col, x_mis, y_mis);
+
+ if (mi->skip) {
+ dec_reset_skip_context(xd);
+ }
+
+ if (!is_inter_block(mi)) {
+ int plane;
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size;
+ const int num_4x4_w = pd->n4_w;
+ const int num_4x4_h = pd->n4_h;
+ const int step = (1 << tx_size);
+ int row, col;
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h + (xd->mb_to_bottom_edge >= 0
+ ? 0
+ : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+
+ xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide;
+ xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high;
+
+ for (row = 0; row < max_blocks_high; row += step)
+ for (col = 0; col < max_blocks_wide; col += step)
+ predict_and_reconstruct_intra_block(twd, mi, plane, row, col,
+ tx_size);
+ }
+ } else {
+ // Prediction
+ dec_build_inter_predictors_sb(twd, pbi, xd, mi_row, mi_col);
+#if CONFIG_MISMATCH_DEBUG
+ {
+ int plane;
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const struct macroblockd_plane *pd = &xd->plane[plane];
+ int pixel_c, pixel_r;
+ const BLOCK_SIZE plane_bsize =
+ get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), &xd->plane[plane]);
+ const int bw = get_block_width(plane_bsize);
+ const int bh = get_block_height(plane_bsize);
+ mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, 0, 0,
+ pd->subsampling_x, pd->subsampling_y);
+ mismatch_check_block_pre(pd->dst.buf, pd->dst.stride, plane, pixel_c,
+ pixel_r, bw, bh,
+ xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH);
+ }
+ }
+#endif
+
+ // Reconstruction
+ if (!mi->skip) {
+ int eobtotal = 0;
+ int plane;
+
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size;
+ const int num_4x4_w = pd->n4_w;
+ const int num_4x4_h = pd->n4_h;
+ const int step = (1 << tx_size);
+ int row, col;
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h +
+ (xd->mb_to_bottom_edge >= 0
+ ? 0
+ : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+
+ xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide;
+ xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high;
+
+ for (row = 0; row < max_blocks_high; row += step)
+ for (col = 0; col < max_blocks_wide; col += step)
+ eobtotal += reconstruct_inter_block(twd, mi, plane, row, col,
+ tx_size, mi_row, mi_col);
+ }
+
+ if (!less8x8 && eobtotal == 0) mi->skip = 1; // skip loopfilter
+ }
+ }
+
+ xd->corrupted |= vpx_reader_has_error(r);
+
+ if (cm->lf.filter_level) {
+ vp9_build_mask(cm, mi, mi_row, mi_col, bw, bh);
+ }
+}
+
+static void recon_block(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize, int bwl, int bhl) {
+ VP9_COMMON *const cm = &pbi->common;
+ const int bw = 1 << (bwl - 1);
+ const int bh = 1 << (bhl - 1);
+ MACROBLOCKD *const xd = &twd->xd;
+
+ MODE_INFO *mi = set_offsets_recon(cm, xd, mi_row, mi_col, bw, bh, bwl, bhl);
+
+ if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
+ const BLOCK_SIZE uv_subsize =
+ ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
+ if (uv_subsize == BLOCK_INVALID)
+ vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid block size.");
+ }
+
+ if (!is_inter_block(mi)) {
+ predict_recon_intra(xd, mi, twd,
+ predict_and_reconstruct_intra_block_row_mt);
+ } else {
+ // Prediction
+ dec_build_inter_predictors_sb(twd, pbi, xd, mi_row, mi_col);
+
+ // Reconstruction
+ if (!mi->skip) {
+ predict_recon_inter(xd, mi, twd, reconstruct_inter_block_row_mt);
+ }
+ }
+
+ vp9_build_mask(cm, mi, mi_row, mi_col, bw, bh);
+}
+
+static void parse_block(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize, int bwl, int bhl) {
+ VP9_COMMON *const cm = &pbi->common;
+ const int bw = 1 << (bwl - 1);
+ const int bh = 1 << (bhl - 1);
+ const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+ vpx_reader *r = &twd->bit_reader;
+ MACROBLOCKD *const xd = &twd->xd;
+
+ MODE_INFO *mi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
+ y_mis, bwl, bhl);
+
+ if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
+ const BLOCK_SIZE uv_subsize =
+ ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
+ if (uv_subsize == BLOCK_INVALID)
+ vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid block size.");
+ }
+
+ vp9_read_mode_info(twd, pbi, mi_row, mi_col, x_mis, y_mis);
+
+ if (mi->skip) {
+ dec_reset_skip_context(xd);
+ }
+
+ if (!is_inter_block(mi)) {
+ predict_recon_intra(xd, mi, twd, parse_intra_block_row_mt);
+ } else {
+ if (!mi->skip) {
+ tran_low_t *dqcoeff[MAX_MB_PLANE];
+ int *eob[MAX_MB_PLANE];
+ int plane;
+ int eobtotal;
+ // Based on eobtotal and bsize, this may be mi->skip may be set to true
+ // In that case dqcoeff and eob need to be backed up and restored as
+ // recon_block will not increment these pointers for skip cases
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ dqcoeff[plane] = pd->dqcoeff;
+ eob[plane] = pd->eob;
+ }
+ eobtotal = predict_recon_inter(xd, mi, twd, parse_inter_block_row_mt);
+
+ if (bsize >= BLOCK_8X8 && eobtotal == 0) {
+ mi->skip = 1; // skip loopfilter
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ struct macroblockd_plane *pd = &xd->plane[plane];
+ pd->dqcoeff = dqcoeff[plane];
+ pd->eob = eob[plane];
+ }
+ }
+ }
+ }
+
+ xd->corrupted |= vpx_reader_has_error(r);
+}
+
+static INLINE int dec_partition_plane_context(TileWorkerData *twd, int mi_row,
+ int mi_col, int bsl) {
+ const PARTITION_CONTEXT *above_ctx = twd->xd.above_seg_context + mi_col;
+ const PARTITION_CONTEXT *left_ctx =
+ twd->xd.left_seg_context + (mi_row & MI_MASK);
+ int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
+
+ // assert(bsl >= 0);
+
+ return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
+}
+
+static INLINE void dec_update_partition_context(TileWorkerData *twd, int mi_row,
+ int mi_col, BLOCK_SIZE subsize,
+ int bw) {
+ PARTITION_CONTEXT *const above_ctx = twd->xd.above_seg_context + mi_col;
+ PARTITION_CONTEXT *const left_ctx =
+ twd->xd.left_seg_context + (mi_row & MI_MASK);
+
+ // update the partition context at the end notes. set partition bits
+ // of block sizes larger than the current one to be one, and partition
+ // bits of smaller block sizes to be zero.
+ memset(above_ctx, partition_context_lookup[subsize].above, bw);
+ memset(left_ctx, partition_context_lookup[subsize].left, bw);
+}
+
+static PARTITION_TYPE read_partition(TileWorkerData *twd, int mi_row,
+ int mi_col, int has_rows, int has_cols,
+ int bsl) {
+ const int ctx = dec_partition_plane_context(twd, mi_row, mi_col, bsl);
+ const vpx_prob *const probs = twd->xd.partition_probs[ctx];
+ FRAME_COUNTS *counts = twd->xd.counts;
+ PARTITION_TYPE p;
+ vpx_reader *r = &twd->bit_reader;
+
+ if (has_rows && has_cols)
+ p = (PARTITION_TYPE)vpx_read_tree(r, vp9_partition_tree, probs);
+ else if (!has_rows && has_cols)
+ p = vpx_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
+ else if (has_rows && !has_cols)
+ p = vpx_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
+ else
+ p = PARTITION_SPLIT;
+
+ if (counts) ++counts->partition[ctx][p];
+
+ return p;
+}
+
+// TODO(slavarnway): eliminate bsize and subsize in future commits
+static void decode_partition(TileWorkerData *twd, VP9Decoder *const pbi,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int n4x4_l2) {
+ VP9_COMMON *const cm = &pbi->common;
+ const int n8x8_l2 = n4x4_l2 - 1;
+ const int num_8x8_wh = 1 << n8x8_l2;
+ const int hbs = num_8x8_wh >> 1;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE subsize;
+ const int has_rows = (mi_row + hbs) < cm->mi_rows;
+ const int has_cols = (mi_col + hbs) < cm->mi_cols;
+ MACROBLOCKD *const xd = &twd->xd;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+ partition = read_partition(twd, mi_row, mi_col, has_rows, has_cols, n8x8_l2);
+ subsize = subsize_lookup[partition][bsize]; // get_subsize(bsize, partition);
+ if (!hbs) {
+ // calculate bmode block dimensions (log 2)
+ xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT);
+ xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ);
+ decode_block(twd, pbi, mi_row, mi_col, subsize, 1, 1);
+ } else {
+ switch (partition) {
+ case PARTITION_NONE:
+ decode_block(twd, pbi, mi_row, mi_col, subsize, n4x4_l2, n4x4_l2);
+ break;
+ case PARTITION_HORZ:
+ decode_block(twd, pbi, mi_row, mi_col, subsize, n4x4_l2, n8x8_l2);
+ if (has_rows)
+ decode_block(twd, pbi, mi_row + hbs, mi_col, subsize, n4x4_l2,
+ n8x8_l2);
+ break;
+ case PARTITION_VERT:
+ decode_block(twd, pbi, mi_row, mi_col, subsize, n8x8_l2, n4x4_l2);
+ if (has_cols)
+ decode_block(twd, pbi, mi_row, mi_col + hbs, subsize, n8x8_l2,
+ n4x4_l2);
+ break;
+ case PARTITION_SPLIT:
+ decode_partition(twd, pbi, mi_row, mi_col, subsize, n8x8_l2);
+ decode_partition(twd, pbi, mi_row, mi_col + hbs, subsize, n8x8_l2);
+ decode_partition(twd, pbi, mi_row + hbs, mi_col, subsize, n8x8_l2);
+ decode_partition(twd, pbi, mi_row + hbs, mi_col + hbs, subsize,
+ n8x8_l2);
+ break;
+ default: assert(0 && "Invalid partition type");
+ }
+ }
+
+ // update partition context
+ if (bsize >= BLOCK_8X8 &&
+ (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
+ dec_update_partition_context(twd, mi_row, mi_col, subsize, num_8x8_wh);
+}
+
+static void process_partition(TileWorkerData *twd, VP9Decoder *const pbi,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int n4x4_l2, int parse_recon_flag,
+ process_block_fn_t process_block) {
+ VP9_COMMON *const cm = &pbi->common;
+ const int n8x8_l2 = n4x4_l2 - 1;
+ const int num_8x8_wh = 1 << n8x8_l2;
+ const int hbs = num_8x8_wh >> 1;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE subsize;
+ const int has_rows = (mi_row + hbs) < cm->mi_rows;
+ const int has_cols = (mi_col + hbs) < cm->mi_cols;
+ MACROBLOCKD *const xd = &twd->xd;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+ if (parse_recon_flag & PARSE) {
+ *xd->partition =
+ read_partition(twd, mi_row, mi_col, has_rows, has_cols, n8x8_l2);
+ }
+
+ partition = *xd->partition;
+ xd->partition++;
+
+ subsize = get_subsize(bsize, partition);
+ if (!hbs) {
+ // calculate bmode block dimensions (log 2)
+ xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT);
+ xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ);
+ process_block(twd, pbi, mi_row, mi_col, subsize, 1, 1);
+ } else {
+ switch (partition) {
+ case PARTITION_NONE:
+ process_block(twd, pbi, mi_row, mi_col, subsize, n4x4_l2, n4x4_l2);
+ break;
+ case PARTITION_HORZ:
+ process_block(twd, pbi, mi_row, mi_col, subsize, n4x4_l2, n8x8_l2);
+ if (has_rows)
+ process_block(twd, pbi, mi_row + hbs, mi_col, subsize, n4x4_l2,
+ n8x8_l2);
+ break;
+ case PARTITION_VERT:
+ process_block(twd, pbi, mi_row, mi_col, subsize, n8x8_l2, n4x4_l2);
+ if (has_cols)
+ process_block(twd, pbi, mi_row, mi_col + hbs, subsize, n8x8_l2,
+ n4x4_l2);
+ break;
+ case PARTITION_SPLIT:
+ process_partition(twd, pbi, mi_row, mi_col, subsize, n8x8_l2,
+ parse_recon_flag, process_block);
+ process_partition(twd, pbi, mi_row, mi_col + hbs, subsize, n8x8_l2,
+ parse_recon_flag, process_block);
+ process_partition(twd, pbi, mi_row + hbs, mi_col, subsize, n8x8_l2,
+ parse_recon_flag, process_block);
+ process_partition(twd, pbi, mi_row + hbs, mi_col + hbs, subsize,
+ n8x8_l2, parse_recon_flag, process_block);
+ break;
+ default: assert(0 && "Invalid partition type");
+ }
+ }
+
+ if (parse_recon_flag & PARSE) {
+ // update partition context
+ if ((bsize == BLOCK_8X8 || partition != PARTITION_SPLIT) &&
+ bsize >= BLOCK_8X8)
+ dec_update_partition_context(twd, mi_row, mi_col, subsize, num_8x8_wh);
+ }
+}
+
+static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
+ size_t read_size,
+ struct vpx_internal_error_info *error_info,
+ vpx_reader *r, vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state) {
+ // Validate the calculated partition length. If the buffer described by the
+ // partition can't be fully read then throw an error.
+ if (!read_is_valid(data, read_size, data_end))
+ vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt tile length");
+
+ if (vpx_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
+ vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder %d", 1);
+}
+
+static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
+ vpx_reader *r) {
+ int i, j, k, l, m;
+
+ if (vpx_read_bit(r))
+ for (i = 0; i < PLANE_TYPES; ++i)
+ for (j = 0; j < REF_TYPES; ++j)
+ for (k = 0; k < COEF_BANDS; ++k)
+ for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
+ for (m = 0; m < UNCONSTRAINED_NODES; ++m)
+ vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
+}
+
+static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, vpx_reader *r) {
+ const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
+ TX_SIZE tx_size;
+ for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
+ read_coef_probs_common(fc->coef_probs[tx_size], r);
+}
+
+static void setup_segmentation(struct segmentation *seg,
+ struct vpx_read_bit_buffer *rb) {
+ int i, j;
+
+ seg->update_map = 0;
+ seg->update_data = 0;
+
+ seg->enabled = vpx_rb_read_bit(rb);
+ if (!seg->enabled) return;
+
+ // Segmentation map update
+ seg->update_map = vpx_rb_read_bit(rb);
+ if (seg->update_map) {
+ for (i = 0; i < SEG_TREE_PROBS; i++)
+ seg->tree_probs[i] =
+ vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB;
+
+ seg->temporal_update = vpx_rb_read_bit(rb);
+ if (seg->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++)
+ seg->pred_probs[i] =
+ vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB;
+ } else {
+ for (i = 0; i < PREDICTION_PROBS; i++) seg->pred_probs[i] = MAX_PROB;
+ }
+ }
+
+ // Segmentation data update
+ seg->update_data = vpx_rb_read_bit(rb);
+ if (seg->update_data) {
+ seg->abs_delta = vpx_rb_read_bit(rb);
+
+ vp9_clearall_segfeatures(seg);
+
+ for (i = 0; i < MAX_SEGMENTS; i++) {
+ for (j = 0; j < SEG_LVL_MAX; j++) {
+ int data = 0;
+ const int feature_enabled = vpx_rb_read_bit(rb);
+ if (feature_enabled) {
+ vp9_enable_segfeature(seg, i, j);
+ data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
+ if (vp9_is_segfeature_signed(j))
+ data = vpx_rb_read_bit(rb) ? -data : data;
+ }
+ vp9_set_segdata(seg, i, j, data);
+ }
+ }
+ }
+}
+
+static void setup_loopfilter(struct loopfilter *lf,
+ struct vpx_read_bit_buffer *rb) {
+ lf->filter_level = vpx_rb_read_literal(rb, 6);
+ lf->sharpness_level = vpx_rb_read_literal(rb, 3);
+
+ // Read in loop filter deltas applied at the MB level based on mode or ref
+ // frame.
+ lf->mode_ref_delta_update = 0;
+
+ lf->mode_ref_delta_enabled = vpx_rb_read_bit(rb);
+ if (lf->mode_ref_delta_enabled) {
+ lf->mode_ref_delta_update = vpx_rb_read_bit(rb);
+ if (lf->mode_ref_delta_update) {
+ int i;
+
+ for (i = 0; i < MAX_REF_LF_DELTAS; i++)
+ if (vpx_rb_read_bit(rb))
+ lf->ref_deltas[i] = vpx_rb_read_signed_literal(rb, 6);
+
+ for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
+ if (vpx_rb_read_bit(rb))
+ lf->mode_deltas[i] = vpx_rb_read_signed_literal(rb, 6);
+ }
+ }
+}
+
+static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) {
+ return vpx_rb_read_bit(rb) ? vpx_rb_read_signed_literal(rb, 4) : 0;
+}
+
+static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ struct vpx_read_bit_buffer *rb) {
+ cm->base_qindex = vpx_rb_read_literal(rb, QINDEX_BITS);
+ cm->y_dc_delta_q = read_delta_q(rb);
+ cm->uv_dc_delta_q = read_delta_q(rb);
+ cm->uv_ac_delta_q = read_delta_q(rb);
+ cm->dequant_bit_depth = cm->bit_depth;
+ xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ xd->bd = (int)cm->bit_depth;
+#endif
+}
+
+static void setup_segmentation_dequant(VP9_COMMON *const cm) {
+ // Build y/uv dequant values based on segmentation.
+ if (cm->seg.enabled) {
+ int i;
+ for (i = 0; i < MAX_SEGMENTS; ++i) {
+ const int qindex = vp9_get_qindex(&cm->seg, i, cm->base_qindex);
+ cm->y_dequant[i][0] =
+ vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[i][1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
+ cm->uv_dequant[i][0] =
+ vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ cm->uv_dequant[i][1] =
+ vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ }
+ } else {
+ const int qindex = cm->base_qindex;
+ // When segmentation is disabled, only the first value is used. The
+ // remaining are don't cares.
+ cm->y_dequant[0][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[0][1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
+ cm->uv_dequant[0][0] =
+ vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ cm->uv_dequant[0][1] =
+ vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ }
+}
+
+static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) {
+ const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH, EIGHTTAP,
+ EIGHTTAP_SHARP, BILINEAR };
+ return vpx_rb_read_bit(rb) ? SWITCHABLE
+ : literal_to_filter[vpx_rb_read_literal(rb, 2)];
+}
+
+static void setup_render_size(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+ cm->render_width = cm->width;
+ cm->render_height = cm->height;
+ if (vpx_rb_read_bit(rb))
+ vp9_read_frame_size(rb, &cm->render_width, &cm->render_height);
+}
+
+static void resize_mv_buffer(VP9_COMMON *cm) {
+ vpx_free(cm->cur_frame->mvs);
+ cm->cur_frame->mi_rows = cm->mi_rows;
+ cm->cur_frame->mi_cols = cm->mi_cols;
+ CHECK_MEM_ERROR(cm, cm->cur_frame->mvs,
+ (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+ sizeof(*cm->cur_frame->mvs)));
+}
+
+static void resize_context_buffers(VP9_COMMON *cm, int width, int height) {
+#if CONFIG_SIZE_LIMIT
+ if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Dimensions of %dx%d beyond allowed size of %dx%d.",
+ width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
+#endif
+ if (cm->width != width || cm->height != height) {
+ const int new_mi_rows =
+ ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
+ const int new_mi_cols =
+ ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
+
+ // Allocations in vp9_alloc_context_buffers() depend on individual
+ // dimensions as well as the overall size.
+ if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
+ if (vp9_alloc_context_buffers(cm, width, height)) {
+ // The cm->mi_* values have been cleared and any existing context
+ // buffers have been freed. Clear cm->width and cm->height to be
+ // consistent and to force a realloc next time.
+ cm->width = 0;
+ cm->height = 0;
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate context buffers");
+ }
+ } else {
+ vp9_set_mb_mi(cm, width, height);
+ }
+ vp9_init_context_buffers(cm);
+ cm->width = width;
+ cm->height = height;
+ }
+ if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows ||
+ cm->mi_cols > cm->cur_frame->mi_cols) {
+ resize_mv_buffer(cm);
+ }
+}
+
+static void setup_frame_size(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+ int width, height;
+ BufferPool *const pool = cm->buffer_pool;
+ vp9_read_frame_size(rb, &width, &height);
+ resize_context_buffers(cm, width, height);
+ setup_render_size(cm, rb);
+
+ if (vpx_realloc_frame_buffer(
+ get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth,
+#endif
+ VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
+ &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
+ pool->cb_priv)) {
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffer");
+ }
+
+ pool->frame_bufs[cm->new_fb_idx].released = 0;
+ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
+ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
+ pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
+ pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
+ pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
+ pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
+ pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
+}
+
+static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
+ int ref_xss, int ref_yss,
+ vpx_bit_depth_t this_bit_depth,
+ int this_xss, int this_yss) {
+ return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
+ ref_yss == this_yss;
+}
+
+static void setup_frame_size_with_refs(VP9_COMMON *cm,
+ struct vpx_read_bit_buffer *rb) {
+ int width, height;
+ int found = 0, i;
+ int has_valid_ref_frame = 0;
+ BufferPool *const pool = cm->buffer_pool;
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ if (vpx_rb_read_bit(rb)) {
+ if (cm->frame_refs[i].idx != INVALID_IDX) {
+ YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
+ width = buf->y_crop_width;
+ height = buf->y_crop_height;
+ found = 1;
+ break;
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Failed to decode frame size");
+ }
+ }
+ }
+
+ if (!found) vp9_read_frame_size(rb, &width, &height);
+
+ if (width <= 0 || height <= 0)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame size");
+
+ // Check to make sure at least one of frames that this frame references
+ // has valid dimensions.
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ RefBuffer *const ref_frame = &cm->frame_refs[i];
+ has_valid_ref_frame |=
+ (ref_frame->idx != INVALID_IDX &&
+ valid_ref_frame_size(ref_frame->buf->y_crop_width,
+ ref_frame->buf->y_crop_height, width, height));
+ }
+ if (!has_valid_ref_frame)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Referenced frame has invalid size");
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ RefBuffer *const ref_frame = &cm->frame_refs[i];
+ if (ref_frame->idx == INVALID_IDX ||
+ !valid_ref_frame_img_fmt(ref_frame->buf->bit_depth,
+ ref_frame->buf->subsampling_x,
+ ref_frame->buf->subsampling_y, cm->bit_depth,
+ cm->subsampling_x, cm->subsampling_y))
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Referenced frame has incompatible color format");
+ }
+
+ resize_context_buffers(cm, width, height);
+ setup_render_size(cm, rb);
+
+ if (vpx_realloc_frame_buffer(
+ get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth,
+#endif
+ VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
+ &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
+ pool->cb_priv)) {
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffer");
+ }
+
+ pool->frame_bufs[cm->new_fb_idx].released = 0;
+ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
+ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
+ pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
+ pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
+ pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
+ pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
+ pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
+}
+
+static void setup_tile_info(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+ int min_log2_tile_cols, max_log2_tile_cols, max_ones;
+ vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+
+ // columns
+ max_ones = max_log2_tile_cols - min_log2_tile_cols;
+ cm->log2_tile_cols = min_log2_tile_cols;
+ while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++;
+
+ if (cm->log2_tile_cols > 6)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid number of tile columns");
+
+ // rows
+ cm->log2_tile_rows = vpx_rb_read_bit(rb);
+ if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb);
+}
+
+// Reads the next tile returning its size and adjusting '*data' accordingly
+// based on 'is_last'.
+static void get_tile_buffer(const uint8_t *const data_end, int is_last,
+ struct vpx_internal_error_info *error_info,
+ const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state, TileBuffer *buf) {
+ size_t size;
+
+ if (!is_last) {
+ if (!read_is_valid(*data, 4, data_end))
+ vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt tile length");
+
+ if (decrypt_cb) {
+ uint8_t be_data[4];
+ decrypt_cb(decrypt_state, *data, be_data, 4);
+ size = mem_get_be32(be_data);
+ } else {
+ size = mem_get_be32(*data);
+ }
+ *data += 4;
+
+ if (size > (size_t)(data_end - *data))
+ vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt tile size");
+ } else {
+ size = data_end - *data;
+ }
+
+ buf->data = *data;
+ buf->size = size;
+
+ *data += size;
+}
+
+static void get_tile_buffers(VP9Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, int tile_cols,
+ int tile_rows,
+ TileBuffer (*tile_buffers)[1 << 6]) {
+ int r, c;
+
+ for (r = 0; r < tile_rows; ++r) {
+ for (c = 0; c < tile_cols; ++c) {
+ const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
+ TileBuffer *const buf = &tile_buffers[r][c];
+ buf->col = c;
+ get_tile_buffer(data_end, is_last, &pbi->common.error, &data,
+ pbi->decrypt_cb, pbi->decrypt_state, buf);
+ }
+ }
+}
+
+static void map_write(RowMTWorkerData *const row_mt_worker_data, int map_idx,
+ int sync_idx) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&row_mt_worker_data->recon_sync_mutex[sync_idx]);
+ row_mt_worker_data->recon_map[map_idx] = 1;
+ pthread_cond_signal(&row_mt_worker_data->recon_sync_cond[sync_idx]);
+ pthread_mutex_unlock(&row_mt_worker_data->recon_sync_mutex[sync_idx]);
+#else
+ (void)row_mt_worker_data;
+ (void)map_idx;
+ (void)sync_idx;
+#endif // CONFIG_MULTITHREAD
+}
+
+static void map_read(RowMTWorkerData *const row_mt_worker_data, int map_idx,
+ int sync_idx) {
+#if CONFIG_MULTITHREAD
+ volatile int8_t *map = row_mt_worker_data->recon_map + map_idx;
+ pthread_mutex_t *const mutex =
+ &row_mt_worker_data->recon_sync_mutex[sync_idx];
+ pthread_mutex_lock(mutex);
+ while (!(*map)) {
+ pthread_cond_wait(&row_mt_worker_data->recon_sync_cond[sync_idx], mutex);
+ }
+ pthread_mutex_unlock(mutex);
+#else
+ (void)row_mt_worker_data;
+ (void)map_idx;
+ (void)sync_idx;
+#endif // CONFIG_MULTITHREAD
+}
+
+static int lpf_map_write_check(VP9LfSync *lf_sync, int row, int num_tile_cols) {
+ int return_val = 0;
+#if CONFIG_MULTITHREAD
+ int corrupted;
+ pthread_mutex_lock(lf_sync->lf_mutex);
+ corrupted = lf_sync->corrupted;
+ pthread_mutex_unlock(lf_sync->lf_mutex);
+ if (!corrupted) {
+ pthread_mutex_lock(&lf_sync->recon_done_mutex[row]);
+ lf_sync->num_tiles_done[row] += 1;
+ if (num_tile_cols == lf_sync->num_tiles_done[row]) return_val = 1;
+ pthread_mutex_unlock(&lf_sync->recon_done_mutex[row]);
+ }
+#else
+ (void)lf_sync;
+ (void)row;
+ (void)num_tile_cols;
+#endif
+ return return_val;
+}
+
+static void vp9_tile_done(VP9Decoder *pbi) {
+#if CONFIG_MULTITHREAD
+ int terminate;
+ RowMTWorkerData *const row_mt_worker_data = pbi->row_mt_worker_data;
+ const int all_parse_done = 1 << pbi->common.log2_tile_cols;
+ pthread_mutex_lock(&row_mt_worker_data->recon_done_mutex);
+ row_mt_worker_data->num_tiles_done++;
+ terminate = all_parse_done == row_mt_worker_data->num_tiles_done;
+ pthread_mutex_unlock(&row_mt_worker_data->recon_done_mutex);
+ if (terminate) {
+ vp9_jobq_terminate(&row_mt_worker_data->jobq);
+ }
+#else
+ (void)pbi;
+#endif
+}
+
+static void vp9_jobq_alloc(VP9Decoder *pbi) {
+ VP9_COMMON *const cm = &pbi->common;
+ RowMTWorkerData *const row_mt_worker_data = pbi->row_mt_worker_data;
+ const int aligned_rows = mi_cols_aligned_to_sb(cm->mi_rows);
+ const int sb_rows = aligned_rows >> MI_BLOCK_SIZE_LOG2;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const size_t jobq_size = (tile_cols * sb_rows * 2 + sb_rows) * sizeof(Job);
+
+ if (jobq_size > row_mt_worker_data->jobq_size) {
+ vpx_free(row_mt_worker_data->jobq_buf);
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->jobq_buf, vpx_calloc(1, jobq_size));
+ vp9_jobq_init(&row_mt_worker_data->jobq, row_mt_worker_data->jobq_buf,
+ jobq_size);
+ row_mt_worker_data->jobq_size = jobq_size;
+ }
+}
+
+static void recon_tile_row(TileWorkerData *tile_data, VP9Decoder *pbi,
+ int mi_row, int is_last_row, VP9LfSync *lf_sync,
+ int cur_tile_col) {
+ VP9_COMMON *const cm = &pbi->common;
+ RowMTWorkerData *const row_mt_worker_data = pbi->row_mt_worker_data;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const int sb_cols = aligned_cols >> MI_BLOCK_SIZE_LOG2;
+ const int cur_sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
+ int mi_col_start = tile_data->xd.tile.mi_col_start;
+ int mi_col_end = tile_data->xd.tile.mi_col_end;
+ int mi_col;
+
+ vp9_zero(tile_data->xd.left_context);
+ vp9_zero(tile_data->xd.left_seg_context);
+ for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += MI_BLOCK_SIZE) {
+ const int c = mi_col >> MI_BLOCK_SIZE_LOG2;
+ int plane;
+ const int sb_num = (cur_sb_row * (aligned_cols >> MI_BLOCK_SIZE_LOG2) + c);
+
+ // Top Dependency
+ if (cur_sb_row) {
+ map_read(row_mt_worker_data, ((cur_sb_row - 1) * sb_cols) + c,
+ ((cur_sb_row - 1) * tile_cols) + cur_tile_col);
+ }
+
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ tile_data->xd.plane[plane].eob =
+ row_mt_worker_data->eob[plane] + (sb_num << EOBS_PER_SB_LOG2);
+ tile_data->xd.plane[plane].dqcoeff =
+ row_mt_worker_data->dqcoeff[plane] + (sb_num << DQCOEFFS_PER_SB_LOG2);
+ }
+ tile_data->xd.partition =
+ row_mt_worker_data->partition + (sb_num * PARTITIONS_PER_SB);
+ process_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4, RECON,
+ recon_block);
+ if (cm->lf.filter_level && !cm->skip_loop_filter) {
+ // Queue LPF_JOB
+ int is_lpf_job_ready = 0;
+
+ if (mi_col + MI_BLOCK_SIZE >= mi_col_end) {
+ // Checks if this row has been decoded in all tiles
+ is_lpf_job_ready = lpf_map_write_check(lf_sync, cur_sb_row, tile_cols);
+
+ if (is_lpf_job_ready) {
+ Job lpf_job;
+ lpf_job.job_type = LPF_JOB;
+ if (cur_sb_row > 0) {
+ lpf_job.row_num = mi_row - MI_BLOCK_SIZE;
+ vp9_jobq_queue(&row_mt_worker_data->jobq, &lpf_job,
+ sizeof(lpf_job));
+ }
+ if (is_last_row) {
+ lpf_job.row_num = mi_row;
+ vp9_jobq_queue(&row_mt_worker_data->jobq, &lpf_job,
+ sizeof(lpf_job));
+ }
+ }
+ }
+ }
+ map_write(row_mt_worker_data, (cur_sb_row * sb_cols) + c,
+ (cur_sb_row * tile_cols) + cur_tile_col);
+ }
+}
+
+static void parse_tile_row(TileWorkerData *tile_data, VP9Decoder *pbi,
+ int mi_row, int cur_tile_col, uint8_t **data_end) {
+ int mi_col;
+ VP9_COMMON *const cm = &pbi->common;
+ RowMTWorkerData *const row_mt_worker_data = pbi->row_mt_worker_data;
+ TileInfo *tile = &tile_data->xd.tile;
+ TileBuffer *const buf = &pbi->tile_buffers[cur_tile_col];
+ const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+
+ vp9_zero(tile_data->dqcoeff);
+ vp9_tile_init(tile, cm, 0, cur_tile_col);
+
+ /* Update reader only at the beginning of each row in a tile */
+ if (mi_row == 0) {
+ setup_token_decoder(buf->data, *data_end, buf->size, &tile_data->error_info,
+ &tile_data->bit_reader, pbi->decrypt_cb,
+ pbi->decrypt_state);
+ }
+ vp9_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
+ tile_data->xd.error_info = &tile_data->error_info;
+
+ vp9_zero(tile_data->xd.left_context);
+ vp9_zero(tile_data->xd.left_seg_context);
+ for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
+ mi_col += MI_BLOCK_SIZE) {
+ const int r = mi_row >> MI_BLOCK_SIZE_LOG2;
+ const int c = mi_col >> MI_BLOCK_SIZE_LOG2;
+ int plane;
+ const int sb_num = (r * (aligned_cols >> MI_BLOCK_SIZE_LOG2) + c);
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ tile_data->xd.plane[plane].eob =
+ row_mt_worker_data->eob[plane] + (sb_num << EOBS_PER_SB_LOG2);
+ tile_data->xd.plane[plane].dqcoeff =
+ row_mt_worker_data->dqcoeff[plane] + (sb_num << DQCOEFFS_PER_SB_LOG2);
+ }
+ tile_data->xd.partition =
+ row_mt_worker_data->partition + sb_num * PARTITIONS_PER_SB;
+ process_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4, PARSE,
+ parse_block);
+ }
+}
+
+static int row_decode_worker_hook(void *arg1, void *arg2) {
+ ThreadData *const thread_data = (ThreadData *)arg1;
+ uint8_t **data_end = (uint8_t **)arg2;
+ VP9Decoder *const pbi = thread_data->pbi;
+ VP9_COMMON *const cm = &pbi->common;
+ RowMTWorkerData *const row_mt_worker_data = pbi->row_mt_worker_data;
+ const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const int aligned_rows = mi_cols_aligned_to_sb(cm->mi_rows);
+ const int sb_rows = aligned_rows >> MI_BLOCK_SIZE_LOG2;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ Job job;
+ LFWorkerData *lf_data = thread_data->lf_data;
+ VP9LfSync *lf_sync = thread_data->lf_sync;
+ volatile int corrupted = 0;
+ TileWorkerData *volatile tile_data_recon = NULL;
+
+ while (!vp9_jobq_dequeue(&row_mt_worker_data->jobq, &job, sizeof(job), 1)) {
+ int mi_col;
+ const int mi_row = job.row_num;
+
+ if (job.job_type == LPF_JOB) {
+ lf_data->start = mi_row;
+ lf_data->stop = lf_data->start + MI_BLOCK_SIZE;
+
+ if (cm->lf.filter_level && !cm->skip_loop_filter &&
+ mi_row < cm->mi_rows) {
+ vp9_loopfilter_job(lf_data, lf_sync);
+ }
+ } else if (job.job_type == RECON_JOB) {
+ const int cur_sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
+ const int is_last_row = sb_rows - 1 == cur_sb_row;
+ int mi_col_start, mi_col_end;
+ if (!tile_data_recon)
+ CHECK_MEM_ERROR(cm, tile_data_recon,
+ vpx_memalign(32, sizeof(TileWorkerData)));
+
+ tile_data_recon->xd = pbi->mb;
+ vp9_tile_init(&tile_data_recon->xd.tile, cm, 0, job.tile_col);
+ vp9_init_macroblockd(cm, &tile_data_recon->xd, tile_data_recon->dqcoeff);
+ mi_col_start = tile_data_recon->xd.tile.mi_col_start;
+ mi_col_end = tile_data_recon->xd.tile.mi_col_end;
+
+ if (setjmp(tile_data_recon->error_info.jmp)) {
+ const int sb_cols = aligned_cols >> MI_BLOCK_SIZE_LOG2;
+ tile_data_recon->error_info.setjmp = 0;
+ corrupted = 1;
+ for (mi_col = mi_col_start; mi_col < mi_col_end;
+ mi_col += MI_BLOCK_SIZE) {
+ const int c = mi_col >> MI_BLOCK_SIZE_LOG2;
+ map_write(row_mt_worker_data, (cur_sb_row * sb_cols) + c,
+ (cur_sb_row * tile_cols) + job.tile_col);
+ }
+ if (is_last_row) {
+ vp9_tile_done(pbi);
+ }
+ continue;
+ }
+
+ tile_data_recon->error_info.setjmp = 1;
+ tile_data_recon->xd.error_info = &tile_data_recon->error_info;
+
+ recon_tile_row(tile_data_recon, pbi, mi_row, is_last_row, lf_sync,
+ job.tile_col);
+
+ if (corrupted)
+ vpx_internal_error(&tile_data_recon->error_info,
+ VPX_CODEC_CORRUPT_FRAME,
+ "Failed to decode tile data");
+
+ if (is_last_row) {
+ vp9_tile_done(pbi);
+ }
+ } else if (job.job_type == PARSE_JOB) {
+ TileWorkerData *const tile_data = &pbi->tile_worker_data[job.tile_col];
+
+ if (setjmp(tile_data->error_info.jmp)) {
+ tile_data->error_info.setjmp = 0;
+ corrupted = 1;
+ vp9_tile_done(pbi);
+ continue;
+ }
+
+ tile_data->xd = pbi->mb;
+ tile_data->xd.counts =
+ cm->frame_parallel_decoding_mode ? 0 : &tile_data->counts;
+
+ tile_data->error_info.setjmp = 1;
+
+ parse_tile_row(tile_data, pbi, mi_row, job.tile_col, data_end);
+
+ corrupted |= tile_data->xd.corrupted;
+ if (corrupted)
+ vpx_internal_error(&tile_data->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Failed to decode tile data");
+
+ /* Queue in the recon_job for this row */
+ {
+ Job recon_job;
+ recon_job.row_num = mi_row;
+ recon_job.tile_col = job.tile_col;
+ recon_job.job_type = RECON_JOB;
+ vp9_jobq_queue(&row_mt_worker_data->jobq, &recon_job,
+ sizeof(recon_job));
+ }
+
+ /* Queue next parse job */
+ if (mi_row + MI_BLOCK_SIZE < cm->mi_rows) {
+ Job parse_job;
+ parse_job.row_num = mi_row + MI_BLOCK_SIZE;
+ parse_job.tile_col = job.tile_col;
+ parse_job.job_type = PARSE_JOB;
+ vp9_jobq_queue(&row_mt_worker_data->jobq, &parse_job,
+ sizeof(parse_job));
+ }
+ }
+ }
+
+ vpx_free(tile_data_recon);
+ return !corrupted;
+}
+
+static const uint8_t *decode_tiles(VP9Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end) {
+ VP9_COMMON *const cm = &pbi->common;
+ const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int tile_rows = 1 << cm->log2_tile_rows;
+ TileBuffer tile_buffers[4][1 << 6];
+ int tile_row, tile_col;
+ int mi_row, mi_col;
+ TileWorkerData *tile_data = NULL;
+
+ if (cm->lf.filter_level && !cm->skip_loop_filter &&
+ pbi->lf_worker.data1 == NULL) {
+ CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
+ vpx_memalign(32, sizeof(LFWorkerData)));
+ pbi->lf_worker.hook = vp9_loop_filter_worker;
+ if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Loop filter thread creation failed");
+ }
+ }
+
+ if (cm->lf.filter_level && !cm->skip_loop_filter) {
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
+ // Be sure to sync as we might be resuming after a failed frame decode.
+ winterface->sync(&pbi->lf_worker);
+ vp9_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
+ pbi->mb.plane);
+ }
+
+ assert(tile_rows <= 4);
+ assert(tile_cols <= (1 << 6));
+
+ // Note: this memset assumes above_context[0], [1] and [2]
+ // are allocated as part of the same buffer.
+ memset(cm->above_context, 0,
+ sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
+
+ memset(cm->above_seg_context, 0,
+ sizeof(*cm->above_seg_context) * aligned_cols);
+
+ vp9_reset_lfm(cm);
+
+ get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
+
+ // Load all tile information into tile_data.
+ for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
+ for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
+ const TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
+ tile_data = pbi->tile_worker_data + tile_cols * tile_row + tile_col;
+ tile_data->xd = pbi->mb;
+ tile_data->xd.corrupted = 0;
+ tile_data->xd.counts =
+ cm->frame_parallel_decoding_mode ? NULL : &cm->counts;
+ vp9_zero(tile_data->dqcoeff);
+ vp9_tile_init(&tile_data->xd.tile, cm, tile_row, tile_col);
+ setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
+ &tile_data->bit_reader, pbi->decrypt_cb,
+ pbi->decrypt_state);
+ vp9_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
+ }
+ }
+
+ for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
+ TileInfo tile;
+ vp9_tile_set_row(&tile, cm, tile_row);
+ for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
+ mi_row += MI_BLOCK_SIZE) {
+ for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
+ const int col =
+ pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col;
+ tile_data = pbi->tile_worker_data + tile_cols * tile_row + col;
+ vp9_tile_set_col(&tile, cm, col);
+ vp9_zero(tile_data->xd.left_context);
+ vp9_zero(tile_data->xd.left_seg_context);
+ for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
+ mi_col += MI_BLOCK_SIZE) {
+ if (pbi->row_mt == 1) {
+ int plane;
+ RowMTWorkerData *const row_mt_worker_data = pbi->row_mt_worker_data;
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ tile_data->xd.plane[plane].eob = row_mt_worker_data->eob[plane];
+ tile_data->xd.plane[plane].dqcoeff =
+ row_mt_worker_data->dqcoeff[plane];
+ }
+ tile_data->xd.partition = row_mt_worker_data->partition;
+ process_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4,
+ PARSE, parse_block);
+
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ tile_data->xd.plane[plane].eob = row_mt_worker_data->eob[plane];
+ tile_data->xd.plane[plane].dqcoeff =
+ row_mt_worker_data->dqcoeff[plane];
+ }
+ tile_data->xd.partition = row_mt_worker_data->partition;
+ process_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4,
+ RECON, recon_block);
+ } else {
+ decode_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4);
+ }
+ }
+ pbi->mb.corrupted |= tile_data->xd.corrupted;
+ if (pbi->mb.corrupted)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Failed to decode tile data");
+ }
+ // Loopfilter one row.
+ if (cm->lf.filter_level && !cm->skip_loop_filter) {
+ const int lf_start = mi_row - MI_BLOCK_SIZE;
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
+
+ // delay the loopfilter by 1 macroblock row.
+ if (lf_start < 0) continue;
+
+ // decoding has completed: finish up the loop filter in this thread.
+ if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue;
+
+ winterface->sync(&pbi->lf_worker);
+ lf_data->start = lf_start;
+ lf_data->stop = mi_row;
+ if (pbi->max_threads > 1) {
+ winterface->launch(&pbi->lf_worker);
+ } else {
+ winterface->execute(&pbi->lf_worker);
+ }
+ }
+ }
+ }
+
+ // Loopfilter remaining rows in the frame.
+ if (cm->lf.filter_level && !cm->skip_loop_filter) {
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
+ winterface->sync(&pbi->lf_worker);
+ lf_data->start = lf_data->stop;
+ lf_data->stop = cm->mi_rows;
+ winterface->execute(&pbi->lf_worker);
+ }
+
+ // Get last tile data.
+ tile_data = pbi->tile_worker_data + tile_cols * tile_rows - 1;
+
+ return vpx_reader_find_end(&tile_data->bit_reader);
+}
+
+static void set_rows_after_error(VP9LfSync *lf_sync, int start_row, int mi_rows,
+ int num_tiles_left, int total_num_tiles) {
+ do {
+ int mi_row;
+ const int aligned_rows = mi_cols_aligned_to_sb(mi_rows);
+ const int sb_rows = (aligned_rows >> MI_BLOCK_SIZE_LOG2);
+ const int corrupted = 1;
+ for (mi_row = start_row; mi_row < mi_rows; mi_row += MI_BLOCK_SIZE) {
+ const int is_last_row = (sb_rows - 1 == mi_row >> MI_BLOCK_SIZE_LOG2);
+ vp9_set_row(lf_sync, total_num_tiles, mi_row >> MI_BLOCK_SIZE_LOG2,
+ is_last_row, corrupted);
+ }
+ /* If there are multiple tiles, the second tile should start marking row
+ * progress from row 0.
+ */
+ start_row = 0;
+ } while (num_tiles_left--);
+}
+
+// On entry 'tile_data->data_end' points to the end of the input frame, on exit
+// it is updated to reflect the bitreader position of the final tile column if
+// present in the tile buffer group or NULL otherwise.
+static int tile_worker_hook(void *arg1, void *arg2) {
+ TileWorkerData *const tile_data = (TileWorkerData *)arg1;
+ VP9Decoder *const pbi = (VP9Decoder *)arg2;
+
+ TileInfo *volatile tile = &tile_data->xd.tile;
+ const int final_col = (1 << pbi->common.log2_tile_cols) - 1;
+ const uint8_t *volatile bit_reader_end = NULL;
+ VP9_COMMON *cm = &pbi->common;
+
+ LFWorkerData *lf_data = tile_data->lf_data;
+ VP9LfSync *lf_sync = tile_data->lf_sync;
+
+ volatile int mi_row = 0;
+ volatile int n = tile_data->buf_start;
+ tile_data->error_info.setjmp = 1;
+
+ if (setjmp(tile_data->error_info.jmp)) {
+ tile_data->error_info.setjmp = 0;
+ tile_data->xd.corrupted = 1;
+ tile_data->data_end = NULL;
+ if (pbi->lpf_mt_opt && cm->lf.filter_level && !cm->skip_loop_filter) {
+ const int num_tiles_left = tile_data->buf_end - n;
+ const int mi_row_start = mi_row;
+ set_rows_after_error(lf_sync, mi_row_start, cm->mi_rows, num_tiles_left,
+ 1 << cm->log2_tile_cols);
+ }
+ return 0;
+ }
+
+ tile_data->xd.corrupted = 0;
+
+ do {
+ int mi_col;
+ const TileBuffer *const buf = pbi->tile_buffers + n;
+
+ /* Initialize to 0 is safe since we do not deal with streams that have
+ * more than one row of tiles. (So tile->mi_row_start will be 0)
+ */
+ assert(cm->log2_tile_rows == 0);
+ mi_row = 0;
+ vp9_zero(tile_data->dqcoeff);
+ vp9_tile_init(tile, &pbi->common, 0, buf->col);
+ setup_token_decoder(buf->data, tile_data->data_end, buf->size,
+ &tile_data->error_info, &tile_data->bit_reader,
+ pbi->decrypt_cb, pbi->decrypt_state);
+ vp9_init_macroblockd(&pbi->common, &tile_data->xd, tile_data->dqcoeff);
+ // init resets xd.error_info
+ tile_data->xd.error_info = &tile_data->error_info;
+
+ for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
+ mi_row += MI_BLOCK_SIZE) {
+ vp9_zero(tile_data->xd.left_context);
+ vp9_zero(tile_data->xd.left_seg_context);
+ for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
+ mi_col += MI_BLOCK_SIZE) {
+ decode_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4);
+ }
+ if (pbi->lpf_mt_opt && cm->lf.filter_level && !cm->skip_loop_filter) {
+ const int aligned_rows = mi_cols_aligned_to_sb(cm->mi_rows);
+ const int sb_rows = (aligned_rows >> MI_BLOCK_SIZE_LOG2);
+ const int is_last_row = (sb_rows - 1 == mi_row >> MI_BLOCK_SIZE_LOG2);
+ vp9_set_row(lf_sync, 1 << cm->log2_tile_cols,
+ mi_row >> MI_BLOCK_SIZE_LOG2, is_last_row,
+ tile_data->xd.corrupted);
+ }
+ }
+
+ if (buf->col == final_col) {
+ bit_reader_end = vpx_reader_find_end(&tile_data->bit_reader);
+ }
+ } while (!tile_data->xd.corrupted && ++n <= tile_data->buf_end);
+
+ if (pbi->lpf_mt_opt && n < tile_data->buf_end && cm->lf.filter_level &&
+ !cm->skip_loop_filter) {
+ /* This was not incremented in the tile loop, so increment before tiles left
+ * calculation
+ */
+ ++n;
+ set_rows_after_error(lf_sync, 0, cm->mi_rows, tile_data->buf_end - n,
+ 1 << cm->log2_tile_cols);
+ }
+
+ if (pbi->lpf_mt_opt && !tile_data->xd.corrupted && cm->lf.filter_level &&
+ !cm->skip_loop_filter) {
+ vp9_loopfilter_rows(lf_data, lf_sync);
+ }
+
+ tile_data->data_end = bit_reader_end;
+ return !tile_data->xd.corrupted;
+}
+
+// sorts in descending order
+static int compare_tile_buffers(const void *a, const void *b) {
+ const TileBuffer *const buf_a = (const TileBuffer *)a;
+ const TileBuffer *const buf_b = (const TileBuffer *)b;
+ return (buf_a->size < buf_b->size) - (buf_a->size > buf_b->size);
+}
+
+static INLINE void init_mt(VP9Decoder *pbi) {
+ int n;
+ VP9_COMMON *const cm = &pbi->common;
+ VP9LfSync *lf_row_sync = &pbi->lf_row_sync;
+ const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+
+ if (pbi->num_tile_workers == 0) {
+ const int num_threads = pbi->max_threads;
+ CHECK_MEM_ERROR(cm, pbi->tile_workers,
+ vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
+ for (n = 0; n < num_threads; ++n) {
+ VPxWorker *const worker = &pbi->tile_workers[n];
+ ++pbi->num_tile_workers;
+
+ winterface->init(worker);
+ if (n < num_threads - 1 && !winterface->reset(worker)) {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Tile decoder thread creation failed");
+ }
+ }
+ }
+
+ // Initialize LPF
+ if ((pbi->lpf_mt_opt || pbi->row_mt) && cm->lf.filter_level &&
+ !cm->skip_loop_filter) {
+ vp9_lpf_mt_init(lf_row_sync, cm, cm->lf.filter_level,
+ pbi->num_tile_workers);
+ }
+
+ // Note: this memset assumes above_context[0], [1] and [2]
+ // are allocated as part of the same buffer.
+ memset(cm->above_context, 0,
+ sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
+
+ memset(cm->above_seg_context, 0,
+ sizeof(*cm->above_seg_context) * aligned_mi_cols);
+
+ vp9_reset_lfm(cm);
+}
+
+static const uint8_t *decode_tiles_row_wise_mt(VP9Decoder *pbi,
+ const uint8_t *data,
+ const uint8_t *data_end) {
+ VP9_COMMON *const cm = &pbi->common;
+ RowMTWorkerData *const row_mt_worker_data = pbi->row_mt_worker_data;
+ const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int tile_rows = 1 << cm->log2_tile_rows;
+ const int num_workers = pbi->max_threads;
+ int i, n;
+ int col;
+ int corrupted = 0;
+ const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
+ const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
+ VP9LfSync *lf_row_sync = &pbi->lf_row_sync;
+ YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
+
+ assert(tile_cols <= (1 << 6));
+ assert(tile_rows == 1);
+ (void)tile_rows;
+
+ memset(row_mt_worker_data->recon_map, 0,
+ sb_rows * sb_cols * sizeof(*row_mt_worker_data->recon_map));
+
+ init_mt(pbi);
+
+ // Reset tile decoding hook
+ for (n = 0; n < num_workers; ++n) {
+ VPxWorker *const worker = &pbi->tile_workers[n];
+ ThreadData *const thread_data = &pbi->row_mt_worker_data->thread_data[n];
+ winterface->sync(worker);
+
+ if (cm->lf.filter_level && !cm->skip_loop_filter) {
+ thread_data->lf_sync = lf_row_sync;
+ thread_data->lf_data = &thread_data->lf_sync->lfdata[n];
+ vp9_loop_filter_data_reset(thread_data->lf_data, new_fb, cm,
+ pbi->mb.plane);
+ }
+
+ thread_data->pbi = pbi;
+
+ worker->hook = row_decode_worker_hook;
+ worker->data1 = thread_data;
+ worker->data2 = (void *)&row_mt_worker_data->data_end;
+ }
+
+ for (col = 0; col < tile_cols; ++col) {
+ TileWorkerData *const tile_data = &pbi->tile_worker_data[col];
+ tile_data->xd = pbi->mb;
+ tile_data->xd.counts =
+ cm->frame_parallel_decoding_mode ? NULL : &tile_data->counts;
+ }
+
+ /* Reset the jobq to start of the jobq buffer */
+ vp9_jobq_reset(&row_mt_worker_data->jobq);
+ row_mt_worker_data->num_tiles_done = 0;
+ row_mt_worker_data->data_end = NULL;
+
+ // Load tile data into tile_buffers
+ get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows,
+ &pbi->tile_buffers);
+
+ // Initialize thread frame counts.
+ if (!cm->frame_parallel_decoding_mode) {
+ for (col = 0; col < tile_cols; ++col) {
+ TileWorkerData *const tile_data = &pbi->tile_worker_data[col];
+ vp9_zero(tile_data->counts);
+ }
+ }
+
+ // queue parse jobs for 0th row of every tile
+ for (col = 0; col < tile_cols; ++col) {
+ Job parse_job;
+ parse_job.row_num = 0;
+ parse_job.tile_col = col;
+ parse_job.job_type = PARSE_JOB;
+ vp9_jobq_queue(&row_mt_worker_data->jobq, &parse_job, sizeof(parse_job));
+ }
+
+ for (i = 0; i < num_workers; ++i) {
+ VPxWorker *const worker = &pbi->tile_workers[i];
+ worker->had_error = 0;
+ if (i == num_workers - 1) {
+ winterface->execute(worker);
+ } else {
+ winterface->launch(worker);
+ }
+ }
+
+ for (; n > 0; --n) {
+ VPxWorker *const worker = &pbi->tile_workers[n - 1];
+ // TODO(jzern): The tile may have specific error data associated with
+ // its vpx_internal_error_info which could be propagated to the main info
+ // in cm. Additionally once the threads have been synced and an error is
+ // detected, there's no point in continuing to decode tiles.
+ corrupted |= !winterface->sync(worker);
+ }
+
+ pbi->mb.corrupted = corrupted;
+
+ {
+ /* Set data end */
+ TileWorkerData *const tile_data = &pbi->tile_worker_data[tile_cols - 1];
+ row_mt_worker_data->data_end = vpx_reader_find_end(&tile_data->bit_reader);
+ }
+
+ // Accumulate thread frame counts.
+ if (!cm->frame_parallel_decoding_mode) {
+ for (i = 0; i < tile_cols; ++i) {
+ TileWorkerData *const tile_data = &pbi->tile_worker_data[i];
+ vp9_accumulate_frame_counts(&cm->counts, &tile_data->counts, 1);
+ }
+ }
+
+ return row_mt_worker_data->data_end;
+}
+
+static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end) {
+ VP9_COMMON *const cm = &pbi->common;
+ const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ const uint8_t *bit_reader_end = NULL;
+ VP9LfSync *lf_row_sync = &pbi->lf_row_sync;
+ YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int tile_rows = 1 << cm->log2_tile_rows;
+ const int num_workers = VPXMIN(pbi->max_threads, tile_cols);
+ int n;
+
+ assert(tile_cols <= (1 << 6));
+ assert(tile_rows == 1);
+ (void)tile_rows;
+
+ init_mt(pbi);
+
+ // Reset tile decoding hook
+ for (n = 0; n < num_workers; ++n) {
+ VPxWorker *const worker = &pbi->tile_workers[n];
+ TileWorkerData *const tile_data =
+ &pbi->tile_worker_data[n + pbi->total_tiles];
+ winterface->sync(worker);
+
+ if (pbi->lpf_mt_opt && cm->lf.filter_level && !cm->skip_loop_filter) {
+ tile_data->lf_sync = lf_row_sync;
+ tile_data->lf_data = &tile_data->lf_sync->lfdata[n];
+ vp9_loop_filter_data_reset(tile_data->lf_data, new_fb, cm, pbi->mb.plane);
+ tile_data->lf_data->y_only = 0;
+ }
+
+ tile_data->xd = pbi->mb;
+ tile_data->xd.counts =
+ cm->frame_parallel_decoding_mode ? NULL : &tile_data->counts;
+ worker->hook = tile_worker_hook;
+ worker->data1 = tile_data;
+ worker->data2 = pbi;
+ }
+
+ // Load tile data into tile_buffers
+ get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows,
+ &pbi->tile_buffers);
+
+ // Sort the buffers based on size in descending order.
+ qsort(pbi->tile_buffers, tile_cols, sizeof(pbi->tile_buffers[0]),
+ compare_tile_buffers);
+
+ if (num_workers == tile_cols) {
+ // Rearrange the tile buffers such that the largest, and
+ // presumably the most difficult, tile will be decoded in the main thread.
+ // This should help minimize the number of instances where the main thread
+ // is waiting for a worker to complete.
+ const TileBuffer largest = pbi->tile_buffers[0];
+ memmove(pbi->tile_buffers, pbi->tile_buffers + 1,
+ (tile_cols - 1) * sizeof(pbi->tile_buffers[0]));
+ pbi->tile_buffers[tile_cols - 1] = largest;
+ } else {
+ int start = 0, end = tile_cols - 2;
+ TileBuffer tmp;
+
+ // Interleave the tiles to distribute the load between threads, assuming a
+ // larger tile implies it is more difficult to decode.
+ while (start < end) {
+ tmp = pbi->tile_buffers[start];
+ pbi->tile_buffers[start] = pbi->tile_buffers[end];
+ pbi->tile_buffers[end] = tmp;
+ start += 2;
+ end -= 2;
+ }
+ }
+
+ // Initialize thread frame counts.
+ if (!cm->frame_parallel_decoding_mode) {
+ for (n = 0; n < num_workers; ++n) {
+ TileWorkerData *const tile_data =
+ (TileWorkerData *)pbi->tile_workers[n].data1;
+ vp9_zero(tile_data->counts);
+ }
+ }
+
+ {
+ const int base = tile_cols / num_workers;
+ const int remain = tile_cols % num_workers;
+ int buf_start = 0;
+
+ for (n = 0; n < num_workers; ++n) {
+ const int count = base + (remain + n) / num_workers;
+ VPxWorker *const worker = &pbi->tile_workers[n];
+ TileWorkerData *const tile_data = (TileWorkerData *)worker->data1;
+
+ tile_data->buf_start = buf_start;
+ tile_data->buf_end = buf_start + count - 1;
+ tile_data->data_end = data_end;
+ buf_start += count;
+
+ worker->had_error = 0;
+ if (n == num_workers - 1) {
+ assert(tile_data->buf_end == tile_cols - 1);
+ winterface->execute(worker);
+ } else {
+ winterface->launch(worker);
+ }
+ }
+
+ for (; n > 0; --n) {
+ VPxWorker *const worker = &pbi->tile_workers[n - 1];
+ TileWorkerData *const tile_data = (TileWorkerData *)worker->data1;
+ // TODO(jzern): The tile may have specific error data associated with
+ // its vpx_internal_error_info which could be propagated to the main info
+ // in cm. Additionally once the threads have been synced and an error is
+ // detected, there's no point in continuing to decode tiles.
+ pbi->mb.corrupted |= !winterface->sync(worker);
+ if (!bit_reader_end) bit_reader_end = tile_data->data_end;
+ }
+ }
+
+ // Accumulate thread frame counts.
+ if (!cm->frame_parallel_decoding_mode) {
+ for (n = 0; n < num_workers; ++n) {
+ TileWorkerData *const tile_data =
+ (TileWorkerData *)pbi->tile_workers[n].data1;
+ vp9_accumulate_frame_counts(&cm->counts, &tile_data->counts, 1);
+ }
+ }
+
+ assert(bit_reader_end || pbi->mb.corrupted);
+ return bit_reader_end;
+}
+
+static void error_handler(void *data) {
+ VP9_COMMON *const cm = (VP9_COMMON *)data;
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
+}
+
+static void read_bitdepth_colorspace_sampling(VP9_COMMON *cm,
+ struct vpx_read_bit_buffer *rb) {
+ if (cm->profile >= PROFILE_2) {
+ cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth = 1;
+#endif
+ } else {
+ cm->bit_depth = VPX_BITS_8;
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth = 0;
+#endif
+ }
+ cm->color_space = vpx_rb_read_literal(rb, 3);
+ if (cm->color_space != VPX_CS_SRGB) {
+ cm->color_range = (vpx_color_range_t)vpx_rb_read_bit(rb);
+ if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
+ cm->subsampling_x = vpx_rb_read_bit(rb);
+ cm->subsampling_y = vpx_rb_read_bit(rb);
+ if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "4:2:0 color not supported in profile 1 or 3");
+ if (vpx_rb_read_bit(rb))
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Reserved bit set");
+ } else {
+ cm->subsampling_y = cm->subsampling_x = 1;
+ }
+ } else {
+ cm->color_range = VPX_CR_FULL_RANGE;
+ if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
+ // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
+ // 4:2:2 or 4:4:0 chroma sampling is not allowed.
+ cm->subsampling_y = cm->subsampling_x = 0;
+ if (vpx_rb_read_bit(rb))
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Reserved bit set");
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "4:4:4 color not supported in profile 0 or 2");
+ }
+ }
+}
+
+static INLINE void flush_all_fb_on_key(VP9_COMMON *cm) {
+ if (cm->frame_type == KEY_FRAME && cm->current_video_frame > 0) {
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ BufferPool *const pool = cm->buffer_pool;
+ int i;
+ for (i = 0; i < FRAME_BUFFERS; ++i) {
+ if (i == cm->new_fb_idx) continue;
+ frame_bufs[i].ref_count = 0;
+ if (!frame_bufs[i].released) {
+ pool->release_fb_cb(pool->cb_priv, &frame_bufs[i].raw_frame_buffer);
+ frame_bufs[i].released = 1;
+ }
+ }
+ }
+}
+
+static size_t read_uncompressed_header(VP9Decoder *pbi,
+ struct vpx_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
+ BufferPool *const pool = cm->buffer_pool;
+ RefCntBuffer *const frame_bufs = pool->frame_bufs;
+ int i, mask, ref_index = 0;
+ size_t sz;
+
+ cm->last_frame_type = cm->frame_type;
+ cm->last_intra_only = cm->intra_only;
+
+ if (vpx_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame marker");
+
+ cm->profile = vp9_read_profile(rb);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (cm->profile >= MAX_PROFILES)
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Unsupported bitstream profile");
+#else
+ if (cm->profile >= PROFILE_2)
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Unsupported bitstream profile");
+#endif
+
+ cm->show_existing_frame = vpx_rb_read_bit(rb);
+ if (cm->show_existing_frame) {
+ // Show an existing frame directly.
+ const int frame_to_show = cm->ref_frame_map[vpx_rb_read_literal(rb, 3)];
+ if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Buffer %d does not contain a decoded frame",
+ frame_to_show);
+ }
+
+ ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
+ pbi->refresh_frame_flags = 0;
+ cm->lf.filter_level = 0;
+ cm->show_frame = 1;
+
+ return 0;
+ }
+
+ cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb);
+ cm->show_frame = vpx_rb_read_bit(rb);
+ cm->error_resilient_mode = vpx_rb_read_bit(rb);
+
+ if (cm->frame_type == KEY_FRAME) {
+ if (!vp9_read_sync_code(rb))
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame sync code");
+
+ read_bitdepth_colorspace_sampling(cm, rb);
+ pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
+
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ cm->frame_refs[i].idx = INVALID_IDX;
+ cm->frame_refs[i].buf = NULL;
+ }
+
+ setup_frame_size(cm, rb);
+ if (pbi->need_resync) {
+ memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+ flush_all_fb_on_key(cm);
+ pbi->need_resync = 0;
+ }
+ } else {
+ cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb);
+
+ cm->reset_frame_context =
+ cm->error_resilient_mode ? 0 : vpx_rb_read_literal(rb, 2);
+
+ if (cm->intra_only) {
+ if (!vp9_read_sync_code(rb))
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame sync code");
+ if (cm->profile > PROFILE_0) {
+ read_bitdepth_colorspace_sampling(cm, rb);
+ } else {
+ // NOTE: The intra-only frame header does not include the specification
+ // of either the color format or color sub-sampling in profile 0. VP9
+ // specifies that the default color format should be YUV 4:2:0 in this
+ // case (normative).
+ cm->color_space = VPX_CS_BT_601;
+ cm->color_range = VPX_CR_STUDIO_RANGE;
+ cm->subsampling_y = cm->subsampling_x = 1;
+ cm->bit_depth = VPX_BITS_8;
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth = 0;
+#endif
+ }
+
+ pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
+ setup_frame_size(cm, rb);
+ if (pbi->need_resync) {
+ memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+ pbi->need_resync = 0;
+ }
+ } else if (pbi->need_resync != 1) { /* Skip if need resync */
+ pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2);
+ const int idx = cm->ref_frame_map[ref];
+ RefBuffer *const ref_frame = &cm->frame_refs[i];
+ ref_frame->idx = idx;
+ ref_frame->buf = &frame_bufs[idx].buf;
+ cm->ref_frame_sign_bias[LAST_FRAME + i] = vpx_rb_read_bit(rb);
+ }
+
+ setup_frame_size_with_refs(cm, rb);
+
+ cm->allow_high_precision_mv = vpx_rb_read_bit(rb);
+ cm->interp_filter = read_interp_filter(rb);
+
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ RefBuffer *const ref_buf = &cm->frame_refs[i];
+#if CONFIG_VP9_HIGHBITDEPTH
+ vp9_setup_scale_factors_for_frame(
+ &ref_buf->sf, ref_buf->buf->y_crop_width,
+ ref_buf->buf->y_crop_height, cm->width, cm->height,
+ cm->use_highbitdepth);
+#else
+ vp9_setup_scale_factors_for_frame(
+ &ref_buf->sf, ref_buf->buf->y_crop_width,
+ ref_buf->buf->y_crop_height, cm->width, cm->height);
+#endif
+ }
+ }
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
+#endif
+ get_frame_new_buffer(cm)->color_space = cm->color_space;
+ get_frame_new_buffer(cm)->color_range = cm->color_range;
+ get_frame_new_buffer(cm)->render_width = cm->render_width;
+ get_frame_new_buffer(cm)->render_height = cm->render_height;
+
+ if (pbi->need_resync) {
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Keyframe / intra-only frame required to reset decoder"
+ " state");
+ }
+
+ if (!cm->error_resilient_mode) {
+ cm->refresh_frame_context = vpx_rb_read_bit(rb);
+ cm->frame_parallel_decoding_mode = vpx_rb_read_bit(rb);
+ if (!cm->frame_parallel_decoding_mode) vp9_zero(cm->counts);
+ } else {
+ cm->refresh_frame_context = 0;
+ cm->frame_parallel_decoding_mode = 1;
+ }
+
+ // This flag will be overridden by the call to vp9_setup_past_independence
+ // below, forcing the use of context 0 for those frame types.
+ cm->frame_context_idx = vpx_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
+
+ // Generate next_ref_frame_map.
+ for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
+ if (mask & 1) {
+ cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
+ ++frame_bufs[cm->new_fb_idx].ref_count;
+ } else {
+ cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
+ }
+ // Current thread holds the reference frame.
+ if (cm->ref_frame_map[ref_index] >= 0)
+ ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
+ ++ref_index;
+ }
+
+ for (; ref_index < REF_FRAMES; ++ref_index) {
+ cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
+ // Current thread holds the reference frame.
+ if (cm->ref_frame_map[ref_index] >= 0)
+ ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
+ }
+ pbi->hold_ref_buf = 1;
+
+ if (frame_is_intra_only(cm) || cm->error_resilient_mode)
+ vp9_setup_past_independence(cm);
+
+ setup_loopfilter(&cm->lf, rb);
+ setup_quantization(cm, &pbi->mb, rb);
+ setup_segmentation(&cm->seg, rb);
+ setup_segmentation_dequant(cm);
+
+ setup_tile_info(cm, rb);
+ if (pbi->row_mt == 1) {
+ int num_sbs = 1;
+ const int aligned_rows = mi_cols_aligned_to_sb(cm->mi_rows);
+ const int sb_rows = aligned_rows >> MI_BLOCK_SIZE_LOG2;
+ const int num_jobs = sb_rows << cm->log2_tile_cols;
+
+ if (pbi->row_mt_worker_data == NULL) {
+ CHECK_MEM_ERROR(cm, pbi->row_mt_worker_data,
+ vpx_calloc(1, sizeof(*pbi->row_mt_worker_data)));
+#if CONFIG_MULTITHREAD
+ pthread_mutex_init(&pbi->row_mt_worker_data->recon_done_mutex, NULL);
+#endif
+ }
+
+ if (pbi->max_threads > 1) {
+ const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const int sb_cols = aligned_cols >> MI_BLOCK_SIZE_LOG2;
+
+ num_sbs = sb_cols * sb_rows;
+ }
+
+ if (num_sbs > pbi->row_mt_worker_data->num_sbs ||
+ num_jobs > pbi->row_mt_worker_data->num_jobs) {
+ vp9_dec_free_row_mt_mem(pbi->row_mt_worker_data);
+ vp9_dec_alloc_row_mt_mem(pbi->row_mt_worker_data, cm, num_sbs,
+ pbi->max_threads, num_jobs);
+ }
+ vp9_jobq_alloc(pbi);
+ }
+ sz = vpx_rb_read_literal(rb, 16);
+
+ if (sz == 0)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid header size");
+
+ return sz;
+}
+
+static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
+ size_t partition_size) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ FRAME_CONTEXT *const fc = cm->fc;
+ vpx_reader r;
+ int k;
+
+ if (vpx_reader_init(&r, data, partition_size, pbi->decrypt_cb,
+ pbi->decrypt_state))
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder 0");
+
+ cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
+ if (cm->tx_mode == TX_MODE_SELECT) read_tx_mode_probs(&fc->tx_probs, &r);
+ read_coef_probs(fc, cm->tx_mode, &r);
+
+ for (k = 0; k < SKIP_CONTEXTS; ++k)
+ vp9_diff_update_prob(&r, &fc->skip_probs[k]);
+
+ if (!frame_is_intra_only(cm)) {
+ nmv_context *const nmvc = &fc->nmvc;
+ int i, j;
+
+ read_inter_mode_probs(fc, &r);
+
+ if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
+
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+ vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]);
+
+ cm->reference_mode = read_frame_reference_mode(cm, &r);
+ if (cm->reference_mode != SINGLE_REFERENCE)
+ vp9_setup_compound_reference_mode(cm);
+ read_frame_reference_mode_probs(cm, &r);
+
+ for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
+ for (i = 0; i < INTRA_MODES - 1; ++i)
+ vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
+
+ for (j = 0; j < PARTITION_CONTEXTS; ++j)
+ for (i = 0; i < PARTITION_TYPES - 1; ++i)
+ vp9_diff_update_prob(&r, &fc->partition_prob[j][i]);
+
+ read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
+ }
+
+ return vpx_reader_has_error(&r);
+}
+
+static struct vpx_read_bit_buffer *init_read_bit_buffer(
+ VP9Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data,
+ const uint8_t *data_end, uint8_t clear_data[MAX_VP9_HEADER_SIZE]) {
+ rb->bit_offset = 0;
+ rb->error_handler = error_handler;
+ rb->error_handler_data = &pbi->common;
+ if (pbi->decrypt_cb) {
+ const int n = (int)VPXMIN(MAX_VP9_HEADER_SIZE, data_end - data);
+ pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
+ rb->bit_buffer = clear_data;
+ rb->bit_buffer_end = clear_data + n;
+ } else {
+ rb->bit_buffer = data;
+ rb->bit_buffer_end = data_end;
+ }
+ return rb;
+}
+
+//------------------------------------------------------------------------------
+
+int vp9_read_sync_code(struct vpx_read_bit_buffer *const rb) {
+ return vpx_rb_read_literal(rb, 8) == VP9_SYNC_CODE_0 &&
+ vpx_rb_read_literal(rb, 8) == VP9_SYNC_CODE_1 &&
+ vpx_rb_read_literal(rb, 8) == VP9_SYNC_CODE_2;
+}
+
+void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
+ int *height) {
+ *width = vpx_rb_read_literal(rb, 16) + 1;
+ *height = vpx_rb_read_literal(rb, 16) + 1;
+}
+
+BITSTREAM_PROFILE vp9_read_profile(struct vpx_read_bit_buffer *rb) {
+ int profile = vpx_rb_read_bit(rb);
+ profile |= vpx_rb_read_bit(rb) << 1;
+ if (profile > 2) profile += vpx_rb_read_bit(rb);
+ return (BITSTREAM_PROFILE)profile;
+}
+
+void vp9_decode_frame(VP9Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ struct vpx_read_bit_buffer rb;
+ int context_updated = 0;
+ uint8_t clear_data[MAX_VP9_HEADER_SIZE];
+ const size_t first_partition_size = read_uncompressed_header(
+ pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
+ const int tile_rows = 1 << cm->log2_tile_rows;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
+#if CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
+ bitstream_queue_set_frame_read(cm->current_video_frame * 2 + cm->show_frame);
+#endif
+#if CONFIG_MISMATCH_DEBUG
+ mismatch_move_frame_idx_r();
+#endif
+ xd->cur_buf = new_fb;
+
+ if (!first_partition_size) {
+ // showing a frame directly
+ *p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2);
+ return;
+ }
+
+ data += vpx_rb_bytes_read(&rb);
+ if (!read_is_valid(data, first_partition_size, data_end))
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt header length");
+
+ cm->use_prev_frame_mvs =
+ !cm->error_resilient_mode && cm->width == cm->last_width &&
+ cm->height == cm->last_height && !cm->last_intra_only &&
+ cm->last_show_frame && (cm->last_frame_type != KEY_FRAME);
+
+ vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+
+ *cm->fc = cm->frame_contexts[cm->frame_context_idx];
+ if (!cm->fc->initialized)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Uninitialized entropy context.");
+
+ xd->corrupted = 0;
+ new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
+ if (new_fb->corrupted)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Decode failed. Frame data header is corrupted.");
+
+ if (cm->lf.filter_level && !cm->skip_loop_filter) {
+ vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
+ }
+
+ if (pbi->tile_worker_data == NULL ||
+ (tile_cols * tile_rows) != pbi->total_tiles) {
+ const int num_tile_workers =
+ tile_cols * tile_rows + ((pbi->max_threads > 1) ? pbi->max_threads : 0);
+ const size_t twd_size = num_tile_workers * sizeof(*pbi->tile_worker_data);
+ // Ensure tile data offsets will be properly aligned. This may fail on
+ // platforms without DECLARE_ALIGNED().
+ assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
+ vpx_free(pbi->tile_worker_data);
+ CHECK_MEM_ERROR(cm, pbi->tile_worker_data, vpx_memalign(32, twd_size));
+ pbi->total_tiles = tile_rows * tile_cols;
+ }
+
+ if (pbi->max_threads > 1 && tile_rows == 1 &&
+ (tile_cols > 1 || pbi->row_mt == 1)) {
+ if (pbi->row_mt == 1) {
+ *p_data_end =
+ decode_tiles_row_wise_mt(pbi, data + first_partition_size, data_end);
+ } else {
+ // Multi-threaded tile decoder
+ *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
+ if (!pbi->lpf_mt_opt) {
+ if (!xd->corrupted) {
+ if (!cm->skip_loop_filter) {
+ // If multiple threads are used to decode tiles, then we use those
+ // threads to do parallel loopfiltering.
+ vp9_loop_filter_frame_mt(
+ new_fb, cm, pbi->mb.plane, cm->lf.filter_level, 0, 0,
+ pbi->tile_workers, pbi->num_tile_workers, &pbi->lf_row_sync);
+ }
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Decode failed. Frame data is corrupted.");
+ }
+ }
+ }
+ } else {
+ *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
+ }
+
+ if (!xd->corrupted) {
+ if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
+ vp9_adapt_coef_probs(cm);
+
+ if (!frame_is_intra_only(cm)) {
+ vp9_adapt_mode_probs(cm);
+ vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+ }
+ }
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Decode failed. Frame data is corrupted.");
+ }
+
+ // Non frame parallel update frame context here.
+ if (cm->refresh_frame_context && !context_updated)
+ cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+}
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_decodeframe.h b/media/libvpx/libvpx/vp9/decoder/vp9_decodeframe.h
new file mode 100644
index 0000000000..ba95e72344
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_decodeframe.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP9_DECODER_VP9_DECODEFRAME_H_
+#define VPX_VP9_DECODER_VP9_DECODEFRAME_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "vp9/common/vp9_enums.h"
+
+struct VP9Decoder;
+struct vpx_read_bit_buffer;
+
+int vp9_read_sync_code(struct vpx_read_bit_buffer *const rb);
+void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
+ int *height);
+BITSTREAM_PROFILE vp9_read_profile(struct vpx_read_bit_buffer *rb);
+
+void vp9_decode_frame(struct VP9Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP9_DECODER_VP9_DECODEFRAME_H_
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_decodemv.c b/media/libvpx/libvpx/vp9/decoder/vp9_decodemv.c
new file mode 100644
index 0000000000..8a8d2ad86e
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_decodemv.c
@@ -0,0 +1,848 @@
+/*
+ Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#include "vp9/decoder/vp9_decodemv.h"
+#include "vp9/decoder/vp9_decodeframe.h"
+
+#include "vpx_dsp/vpx_dsp_common.h"
+
+static PREDICTION_MODE read_intra_mode(vpx_reader *r, const vpx_prob *p) {
+ return (PREDICTION_MODE)vpx_read_tree(r, vp9_intra_mode_tree, p);
+}
+
+static PREDICTION_MODE read_intra_mode_y(VP9_COMMON *cm, MACROBLOCKD *xd,
+ vpx_reader *r, int size_group) {
+ const PREDICTION_MODE y_mode =
+ read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
+ FRAME_COUNTS *counts = xd->counts;
+ if (counts) ++counts->y_mode[size_group][y_mode];
+ return y_mode;
+}
+
+static PREDICTION_MODE read_intra_mode_uv(VP9_COMMON *cm, MACROBLOCKD *xd,
+ vpx_reader *r,
+ PREDICTION_MODE y_mode) {
+ const PREDICTION_MODE uv_mode =
+ read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
+ FRAME_COUNTS *counts = xd->counts;
+ if (counts) ++counts->uv_mode[y_mode][uv_mode];
+ return uv_mode;
+}
+
+static PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, MACROBLOCKD *xd,
+ vpx_reader *r, int ctx) {
+ const int mode =
+ vpx_read_tree(r, vp9_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
+ FRAME_COUNTS *counts = xd->counts;
+ if (counts) ++counts->inter_mode[ctx][mode];
+
+ return NEARESTMV + mode;
+}
+
+static int read_segment_id(vpx_reader *r, const struct segmentation *seg) {
+ return vpx_read_tree(r, vp9_segment_tree, seg->tree_probs);
+}
+
+static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
+ TX_SIZE max_tx_size, vpx_reader *r) {
+ FRAME_COUNTS *counts = xd->counts;
+ const int ctx = get_tx_size_context(xd);
+ const vpx_prob *tx_probs = get_tx_probs(max_tx_size, ctx, &cm->fc->tx_probs);
+ int tx_size = vpx_read(r, tx_probs[0]);
+ if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
+ tx_size += vpx_read(r, tx_probs[1]);
+ if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
+ tx_size += vpx_read(r, tx_probs[2]);
+ }
+
+ if (counts) ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size];
+ return (TX_SIZE)tx_size;
+}
+
+static INLINE TX_SIZE read_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int allow_select, vpx_reader *r) {
+ TX_MODE tx_mode = cm->tx_mode;
+ BLOCK_SIZE bsize = xd->mi[0]->sb_type;
+ const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+ if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8)
+ return read_selected_tx_size(cm, xd, max_tx_size, r);
+ else
+ return VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]);
+}
+
+static int dec_get_segment_id(const VP9_COMMON *cm, const uint8_t *segment_ids,
+ int mi_offset, int x_mis, int y_mis) {
+ int x, y, segment_id = INT_MAX;
+
+ for (y = 0; y < y_mis; y++)
+ for (x = 0; x < x_mis; x++)
+ segment_id =
+ VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
+
+ assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
+ return segment_id;
+}
+
+static void set_segment_id(VP9_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+ int segment_id) {
+ int x, y;
+
+ assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
+
+ for (y = 0; y < y_mis; y++)
+ for (x = 0; x < x_mis; x++)
+ cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
+}
+
+static void copy_segment_id(const VP9_COMMON *cm,
+ const uint8_t *last_segment_ids,
+ uint8_t *current_segment_ids, int mi_offset,
+ int x_mis, int y_mis) {
+ int x, y;
+
+ for (y = 0; y < y_mis; y++)
+ for (x = 0; x < x_mis; x++)
+ current_segment_ids[mi_offset + y * cm->mi_cols + x] =
+ last_segment_ids ? last_segment_ids[mi_offset + y * cm->mi_cols + x]
+ : 0;
+}
+
+static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset, int x_mis,
+ int y_mis, vpx_reader *r) {
+ struct segmentation *const seg = &cm->seg;
+ int segment_id;
+
+ if (!seg->enabled) return 0; // Default for disabled segmentation
+
+ if (!seg->update_map) {
+ copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
+ mi_offset, x_mis, y_mis);
+ return 0;
+ }
+
+ segment_id = read_segment_id(r, seg);
+ set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
+ return segment_id;
+}
+
+static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ int mi_row, int mi_col, vpx_reader *r,
+ int x_mis, int y_mis) {
+ struct segmentation *const seg = &cm->seg;
+ MODE_INFO *const mi = xd->mi[0];
+ int predicted_segment_id, segment_id;
+ const int mi_offset = mi_row * cm->mi_cols + mi_col;
+
+ if (!seg->enabled) return 0; // Default for disabled segmentation
+
+ predicted_segment_id = cm->last_frame_seg_map
+ ? dec_get_segment_id(cm, cm->last_frame_seg_map,
+ mi_offset, x_mis, y_mis)
+ : 0;
+
+ if (!seg->update_map) {
+ copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
+ mi_offset, x_mis, y_mis);
+ return predicted_segment_id;
+ }
+
+ if (seg->temporal_update) {
+ const vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
+ mi->seg_id_predicted = vpx_read(r, pred_prob);
+ segment_id =
+ mi->seg_id_predicted ? predicted_segment_id : read_segment_id(r, seg);
+ } else {
+ segment_id = read_segment_id(r, seg);
+ }
+ set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
+ return segment_id;
+}
+
+static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+ vpx_reader *r) {
+ if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
+ return 1;
+ } else {
+ const int ctx = vp9_get_skip_context(xd);
+ const int skip = vpx_read(r, cm->fc->skip_probs[ctx]);
+ FRAME_COUNTS *counts = xd->counts;
+ if (counts) ++counts->skip[ctx][skip];
+ return skip;
+ }
+}
+
+static void read_intra_frame_mode_info(VP9_COMMON *const cm,
+ MACROBLOCKD *const xd, int mi_row,
+ int mi_col, vpx_reader *r, int x_mis,
+ int y_mis) {
+ MODE_INFO *const mi = xd->mi[0];
+ const MODE_INFO *above_mi = xd->above_mi;
+ const MODE_INFO *left_mi = xd->left_mi;
+ const BLOCK_SIZE bsize = mi->sb_type;
+ int i;
+ const int mi_offset = mi_row * cm->mi_cols + mi_col;
+
+ mi->segment_id = read_intra_segment_id(cm, mi_offset, x_mis, y_mis, r);
+ mi->skip = read_skip(cm, xd, mi->segment_id, r);
+ mi->tx_size = read_tx_size(cm, xd, 1, r);
+ mi->ref_frame[0] = INTRA_FRAME;
+ mi->ref_frame[1] = NONE;
+
+ switch (bsize) {
+ case BLOCK_4X4:
+ for (i = 0; i < 4; ++i)
+ mi->bmi[i].as_mode =
+ read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, i));
+ mi->mode = mi->bmi[3].as_mode;
+ break;
+ case BLOCK_4X8:
+ mi->bmi[0].as_mode = mi->bmi[2].as_mode =
+ read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
+ mi->bmi[1].as_mode = mi->bmi[3].as_mode = mi->mode =
+ read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 1));
+ break;
+ case BLOCK_8X4:
+ mi->bmi[0].as_mode = mi->bmi[1].as_mode =
+ read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
+ mi->bmi[2].as_mode = mi->bmi[3].as_mode = mi->mode =
+ read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 2));
+ break;
+ default:
+ mi->mode = read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
+ }
+
+ mi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mi->mode]);
+}
+
+static int read_mv_component(vpx_reader *r, const nmv_component *mvcomp,
+ int usehp) {
+ int mag, d, fr, hp;
+ const int sign = vpx_read(r, mvcomp->sign);
+ const int mv_class = vpx_read_tree(r, vp9_mv_class_tree, mvcomp->classes);
+ const int class0 = mv_class == MV_CLASS_0;
+
+ // Integer part
+ if (class0) {
+ d = vpx_read(r, mvcomp->class0[0]);
+ mag = 0;
+ } else {
+ int i;
+ const int n = mv_class + CLASS0_BITS - 1; // number of bits
+
+ d = 0;
+ for (i = 0; i < n; ++i) d |= vpx_read(r, mvcomp->bits[i]) << i;
+ mag = CLASS0_SIZE << (mv_class + 2);
+ }
+
+ // Fractional part
+ fr = vpx_read_tree(r, vp9_mv_fp_tree,
+ class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
+
+ // High precision part (if hp is not used, the default value of the hp is 1)
+ hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
+
+ // Result
+ mag += ((d << 3) | (fr << 1) | hp) + 1;
+ return sign ? -mag : mag;
+}
+
+static INLINE void read_mv(vpx_reader *r, MV *mv, const MV *ref,
+ const nmv_context *ctx, nmv_context_counts *counts,
+ int allow_hp) {
+ const MV_JOINT_TYPE joint_type =
+ (MV_JOINT_TYPE)vpx_read_tree(r, vp9_mv_joint_tree, ctx->joints);
+ const int use_hp = allow_hp && use_mv_hp(ref);
+ MV diff = { 0, 0 };
+
+ if (mv_joint_vertical(joint_type))
+ diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
+
+ if (mv_joint_horizontal(joint_type))
+ diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
+
+ vp9_inc_mv(&diff, counts);
+
+ mv->row = ref->row + diff.row;
+ mv->col = ref->col + diff.col;
+}
+
+static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm,
+ const MACROBLOCKD *xd,
+ vpx_reader *r) {
+ if (cm->reference_mode == REFERENCE_MODE_SELECT) {
+ const int ctx = vp9_get_reference_mode_context(cm, xd);
+ const REFERENCE_MODE mode =
+ (REFERENCE_MODE)vpx_read(r, cm->fc->comp_inter_prob[ctx]);
+ FRAME_COUNTS *counts = xd->counts;
+ if (counts) ++counts->comp_inter[ctx][mode];
+ return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
+ } else {
+ return cm->reference_mode;
+ }
+}
+
+// Read the referncence frame
+static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ vpx_reader *r, int segment_id,
+ MV_REFERENCE_FRAME ref_frame[2]) {
+ FRAME_CONTEXT *const fc = cm->fc;
+ FRAME_COUNTS *counts = xd->counts;
+
+ if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
+ ref_frame[0] = (MV_REFERENCE_FRAME)get_segdata(&cm->seg, segment_id,
+ SEG_LVL_REF_FRAME);
+ ref_frame[1] = NONE;
+ } else {
+ const REFERENCE_MODE mode = read_block_reference_mode(cm, xd, r);
+ // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
+ if (mode == COMPOUND_REFERENCE) {
+ const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
+ const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
+ const int bit = vpx_read(r, fc->comp_ref_prob[ctx]);
+ if (counts) ++counts->comp_ref[ctx][bit];
+ ref_frame[idx] = cm->comp_fixed_ref;
+ ref_frame[!idx] = cm->comp_var_ref[bit];
+ } else if (mode == SINGLE_REFERENCE) {
+ const int ctx0 = vp9_get_pred_context_single_ref_p1(xd);
+ const int bit0 = vpx_read(r, fc->single_ref_prob[ctx0][0]);
+ if (counts) ++counts->single_ref[ctx0][0][bit0];
+ if (bit0) {
+ const int ctx1 = vp9_get_pred_context_single_ref_p2(xd);
+ const int bit1 = vpx_read(r, fc->single_ref_prob[ctx1][1]);
+ if (counts) ++counts->single_ref[ctx1][1][bit1];
+ ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
+ } else {
+ ref_frame[0] = LAST_FRAME;
+ }
+
+ ref_frame[1] = NONE;
+ } else {
+ assert(0 && "Invalid prediction mode.");
+ }
+ }
+}
+
+static INLINE INTERP_FILTER read_switchable_interp_filter(VP9_COMMON *const cm,
+ MACROBLOCKD *const xd,
+ vpx_reader *r) {
+ const int ctx = get_pred_context_switchable_interp(xd);
+ const INTERP_FILTER type = (INTERP_FILTER)vpx_read_tree(
+ r, vp9_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
+ FRAME_COUNTS *counts = xd->counts;
+ if (counts) ++counts->switchable_interp[ctx][type];
+ return type;
+}
+
+static void read_intra_block_mode_info(VP9_COMMON *const cm,
+ MACROBLOCKD *const xd, MODE_INFO *mi,
+ vpx_reader *r) {
+ const BLOCK_SIZE bsize = mi->sb_type;
+ int i;
+
+ switch (bsize) {
+ case BLOCK_4X4:
+ for (i = 0; i < 4; ++i)
+ mi->bmi[i].as_mode = read_intra_mode_y(cm, xd, r, 0);
+ mi->mode = mi->bmi[3].as_mode;
+ break;
+ case BLOCK_4X8:
+ mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, r, 0);
+ mi->bmi[1].as_mode = mi->bmi[3].as_mode = mi->mode =
+ read_intra_mode_y(cm, xd, r, 0);
+ break;
+ case BLOCK_8X4:
+ mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, r, 0);
+ mi->bmi[2].as_mode = mi->bmi[3].as_mode = mi->mode =
+ read_intra_mode_y(cm, xd, r, 0);
+ break;
+ default: mi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]);
+ }
+
+ mi->uv_mode = read_intra_mode_uv(cm, xd, r, mi->mode);
+
+ // Initialize interp_filter here so we do not have to check for inter block
+ // modes in get_pred_context_switchable_interp()
+ mi->interp_filter = SWITCHABLE_FILTERS;
+
+ mi->ref_frame[0] = INTRA_FRAME;
+ mi->ref_frame[1] = NONE;
+}
+
+static INLINE int is_mv_valid(const MV *mv) {
+ return mv->row > MV_LOW && mv->row < MV_UPP && mv->col > MV_LOW &&
+ mv->col < MV_UPP;
+}
+
+static INLINE void copy_mv_pair(int_mv *dst, const int_mv *src) {
+ memcpy(dst, src, sizeof(*dst) * 2);
+}
+
+static INLINE void zero_mv_pair(int_mv *dst) {
+ memset(dst, 0, sizeof(*dst) * 2);
+}
+
+static INLINE int assign_mv(VP9_COMMON *cm, MACROBLOCKD *xd,
+ PREDICTION_MODE mode, int_mv mv[2],
+ int_mv ref_mv[2], int_mv near_nearest_mv[2],
+ int is_compound, int allow_hp, vpx_reader *r) {
+ int i;
+ int ret = 1;
+
+ switch (mode) {
+ case NEWMV: {
+ FRAME_COUNTS *counts = xd->counts;
+ nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
+ for (i = 0; i < 1 + is_compound; ++i) {
+ read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts,
+ allow_hp);
+ ret = ret && is_mv_valid(&mv[i].as_mv);
+ }
+ break;
+ }
+ case NEARMV:
+ case NEARESTMV: {
+ copy_mv_pair(mv, near_nearest_mv);
+ break;
+ }
+ case ZEROMV: {
+ zero_mv_pair(mv);
+ break;
+ }
+ default: { return 0; }
+ }
+ return ret;
+}
+
+static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ int segment_id, vpx_reader *r) {
+ if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
+ return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
+ } else {
+ const int ctx = get_intra_inter_context(xd);
+ const int is_inter = vpx_read(r, cm->fc->intra_inter_prob[ctx]);
+ FRAME_COUNTS *counts = xd->counts;
+ if (counts) ++counts->intra_inter[ctx][is_inter];
+ return is_inter;
+ }
+}
+
+// This macro is used to add a motion vector mv_ref list if it isn't
+// already in the list. If it's the second motion vector or early_break
+// it will also skip all additional processing and jump to Done!
+#define ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done) \
+ do { \
+ if (refmv_count) { \
+ if ((mv).as_int != (mv_ref_list)[0].as_int) { \
+ (mv_ref_list)[(refmv_count)] = (mv); \
+ refmv_count++; \
+ goto Done; \
+ } \
+ } else { \
+ (mv_ref_list)[(refmv_count)++] = (mv); \
+ if (early_break) goto Done; \
+ } \
+ } while (0)
+
+// If either reference frame is different, not INTRA, and they
+// are different from each other scale and add the mv to our list.
+#define IF_DIFF_REF_FRAME_ADD_MV_EB(mbmi, ref_frame, ref_sign_bias, \
+ refmv_count, mv_ref_list, Done) \
+ do { \
+ if (is_inter_block(mbmi)) { \
+ if ((mbmi)->ref_frame[0] != ref_frame) \
+ ADD_MV_REF_LIST_EB(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \
+ refmv_count, mv_ref_list, Done); \
+ if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != ref_frame && \
+ (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \
+ ADD_MV_REF_LIST_EB(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \
+ refmv_count, mv_ref_list, Done); \
+ } \
+ } while (0)
+
+// This function searches the neighborhood of a given MB/SB
+// to try and find candidate reference vectors.
+static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
+ PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame,
+ const POSITION *const mv_ref_search,
+ int_mv *mv_ref_list, int mi_row, int mi_col,
+ int block) {
+ const int *ref_sign_bias = cm->ref_frame_sign_bias;
+ int i, refmv_count = 0;
+ int different_ref_found = 0;
+ const MV_REF *const prev_frame_mvs =
+ cm->use_prev_frame_mvs
+ ? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col
+ : NULL;
+ const TileInfo *const tile = &xd->tile;
+ // If mode is nearestmv or newmv (uses nearestmv as a reference) then stop
+ // searching after the first mv is found.
+ const int early_break = (mode != NEARMV);
+
+ // Blank the reference vector list
+ memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
+
+ i = 0;
+ if (block >= 0) {
+ // If the size < 8x8 we get the mv from the bmi substructure for the
+ // nearest two blocks.
+ for (i = 0; i < 2; ++i) {
+ const POSITION *const mv_ref = &mv_ref_search[i];
+ if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ const MODE_INFO *const candidate_mi =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
+ different_ref_found = 1;
+
+ if (candidate_mi->ref_frame[0] == ref_frame)
+ ADD_MV_REF_LIST_EB(
+ get_sub_block_mv(candidate_mi, 0, mv_ref->col, block),
+ refmv_count, mv_ref_list, Done);
+ else if (candidate_mi->ref_frame[1] == ref_frame)
+ ADD_MV_REF_LIST_EB(
+ get_sub_block_mv(candidate_mi, 1, mv_ref->col, block),
+ refmv_count, mv_ref_list, Done);
+ }
+ }
+ }
+
+ // Check the rest of the neighbors in much the same way
+ // as before except we don't need to keep track of sub blocks or
+ // mode counts.
+ for (; i < MVREF_NEIGHBOURS; ++i) {
+ const POSITION *const mv_ref = &mv_ref_search[i];
+ if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ const MODE_INFO *const candidate =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
+ different_ref_found = 1;
+
+ if (candidate->ref_frame[0] == ref_frame)
+ ADD_MV_REF_LIST_EB(candidate->mv[0], refmv_count, mv_ref_list, Done);
+ else if (candidate->ref_frame[1] == ref_frame)
+ ADD_MV_REF_LIST_EB(candidate->mv[1], refmv_count, mv_ref_list, Done);
+ }
+ }
+
+ // Check the last frame's mode and mv info.
+ if (prev_frame_mvs) {
+ if (prev_frame_mvs->ref_frame[0] == ref_frame) {
+ ADD_MV_REF_LIST_EB(prev_frame_mvs->mv[0], refmv_count, mv_ref_list, Done);
+ } else if (prev_frame_mvs->ref_frame[1] == ref_frame) {
+ ADD_MV_REF_LIST_EB(prev_frame_mvs->mv[1], refmv_count, mv_ref_list, Done);
+ }
+ }
+
+ // Since we couldn't find 2 mvs from the same reference frame
+ // go back through the neighbors and find motion vectors from
+ // different reference frames.
+ if (different_ref_found) {
+ for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
+ const POSITION *mv_ref = &mv_ref_search[i];
+ if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ const MODE_INFO *const candidate =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
+
+ // If the candidate is INTRA we don't want to consider its mv.
+ IF_DIFF_REF_FRAME_ADD_MV_EB(candidate, ref_frame, ref_sign_bias,
+ refmv_count, mv_ref_list, Done);
+ }
+ }
+ }
+
+ // Since we still don't have a candidate we'll try the last frame.
+ if (prev_frame_mvs) {
+ if (prev_frame_mvs->ref_frame[0] != ref_frame &&
+ prev_frame_mvs->ref_frame[0] > INTRA_FRAME) {
+ int_mv mv = prev_frame_mvs->mv[0];
+ if (ref_sign_bias[prev_frame_mvs->ref_frame[0]] !=
+ ref_sign_bias[ref_frame]) {
+ mv.as_mv.row *= -1;
+ mv.as_mv.col *= -1;
+ }
+ ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done);
+ }
+
+ if (prev_frame_mvs->ref_frame[1] > INTRA_FRAME &&
+ prev_frame_mvs->ref_frame[1] != ref_frame &&
+ prev_frame_mvs->mv[1].as_int != prev_frame_mvs->mv[0].as_int) {
+ int_mv mv = prev_frame_mvs->mv[1];
+ if (ref_sign_bias[prev_frame_mvs->ref_frame[1]] !=
+ ref_sign_bias[ref_frame]) {
+ mv.as_mv.row *= -1;
+ mv.as_mv.col *= -1;
+ }
+ ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done);
+ }
+ }
+
+ if (mode == NEARMV)
+ refmv_count = MAX_MV_REF_CANDIDATES;
+ else
+ // we only care about the nearestmv for the remaining modes
+ refmv_count = 1;
+
+Done:
+ // Clamp vectors
+ for (i = 0; i < refmv_count; ++i) clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
+
+ return refmv_count;
+}
+
+static void append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
+ const POSITION *const mv_ref_search,
+ PREDICTION_MODE b_mode, int block,
+ int ref, int mi_row, int mi_col,
+ int_mv *best_sub8x8) {
+ int_mv mv_list[MAX_MV_REF_CANDIDATES];
+ MODE_INFO *const mi = xd->mi[0];
+ b_mode_info *bmi = mi->bmi;
+ int n;
+ int refmv_count;
+
+ assert(MAX_MV_REF_CANDIDATES == 2);
+
+ switch (block) {
+ case 0:
+ refmv_count =
+ dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref], mv_ref_search,
+ mv_list, mi_row, mi_col, block);
+ best_sub8x8->as_int = mv_list[refmv_count - 1].as_int;
+ break;
+ case 1:
+ case 2:
+ if (b_mode == NEARESTMV) {
+ best_sub8x8->as_int = bmi[0].as_mv[ref].as_int;
+ } else {
+ dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref], mv_ref_search,
+ mv_list, mi_row, mi_col, block);
+ best_sub8x8->as_int = 0;
+ for (n = 0; n < 2; ++n)
+ if (bmi[0].as_mv[ref].as_int != mv_list[n].as_int) {
+ best_sub8x8->as_int = mv_list[n].as_int;
+ break;
+ }
+ }
+ break;
+ case 3:
+ if (b_mode == NEARESTMV) {
+ best_sub8x8->as_int = bmi[2].as_mv[ref].as_int;
+ } else {
+ best_sub8x8->as_int = 0;
+ if (bmi[2].as_mv[ref].as_int != bmi[1].as_mv[ref].as_int) {
+ best_sub8x8->as_int = bmi[1].as_mv[ref].as_int;
+ break;
+ }
+ if (bmi[2].as_mv[ref].as_int != bmi[0].as_mv[ref].as_int) {
+ best_sub8x8->as_int = bmi[0].as_mv[ref].as_int;
+ break;
+ }
+ dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref], mv_ref_search,
+ mv_list, mi_row, mi_col, block);
+ for (n = 0; n < 2; ++n)
+ if (bmi[2].as_mv[ref].as_int != mv_list[n].as_int) {
+ best_sub8x8->as_int = mv_list[n].as_int;
+ break;
+ }
+ }
+ break;
+ default: assert(0 && "Invalid block index.");
+ }
+}
+
+static uint8_t get_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd,
+ const POSITION *const mv_ref_search, int mi_row,
+ int mi_col) {
+ int i;
+ int context_counter = 0;
+ const TileInfo *const tile = &xd->tile;
+
+ // Get mode count from nearest 2 blocks
+ for (i = 0; i < 2; ++i) {
+ const POSITION *const mv_ref = &mv_ref_search[i];
+ if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ const MODE_INFO *const candidate =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
+ // Keep counts for entropy encoding.
+ context_counter += mode_2_counter[candidate->mode];
+ }
+ }
+
+ return counter_to_context[context_counter];
+}
+
+static void read_inter_block_mode_info(VP9Decoder *const pbi,
+ MACROBLOCKD *const xd,
+ MODE_INFO *const mi, int mi_row,
+ int mi_col, vpx_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ const BLOCK_SIZE bsize = mi->sb_type;
+ const int allow_hp = cm->allow_high_precision_mv;
+ int_mv best_ref_mvs[2] = { { 0 }, { 0 } };
+ int ref, is_compound;
+ uint8_t inter_mode_ctx;
+ const POSITION *const mv_ref_search = mv_ref_blocks[bsize];
+
+ read_ref_frames(cm, xd, r, mi->segment_id, mi->ref_frame);
+ is_compound = has_second_ref(mi);
+ inter_mode_ctx = get_mode_context(cm, xd, mv_ref_search, mi_row, mi_col);
+
+ if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) {
+ mi->mode = ZEROMV;
+ if (bsize < BLOCK_8X8) {
+ vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid usage of segement feature on small blocks");
+ return;
+ }
+ } else {
+ if (bsize >= BLOCK_8X8)
+ mi->mode = read_inter_mode(cm, xd, r, inter_mode_ctx);
+ }
+
+ mi->interp_filter = (cm->interp_filter == SWITCHABLE)
+ ? read_switchable_interp_filter(cm, xd, r)
+ : cm->interp_filter;
+
+ if (bsize < BLOCK_8X8) {
+ const int num_4x4_w = 1 << xd->bmode_blocks_wl;
+ const int num_4x4_h = 1 << xd->bmode_blocks_hl;
+ int idx, idy;
+ PREDICTION_MODE b_mode;
+ int got_mv_refs_for_new = 0;
+ int_mv best_sub8x8[2];
+ const uint32_t invalid_mv = 0x80008000;
+ // Initialize the 2nd element as even though it won't be used meaningfully
+ // if is_compound is false, copying/clamping it may trigger a MSan warning.
+ best_sub8x8[1].as_int = invalid_mv;
+ for (idy = 0; idy < 2; idy += num_4x4_h) {
+ for (idx = 0; idx < 2; idx += num_4x4_w) {
+ const int j = idy * 2 + idx;
+ b_mode = read_inter_mode(cm, xd, r, inter_mode_ctx);
+
+ if (b_mode == NEARESTMV || b_mode == NEARMV) {
+ for (ref = 0; ref < 1 + is_compound; ++ref)
+ append_sub8x8_mvs_for_idx(cm, xd, mv_ref_search, b_mode, j, ref,
+ mi_row, mi_col, &best_sub8x8[ref]);
+ } else if (b_mode == NEWMV && !got_mv_refs_for_new) {
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
+ int_mv tmp_mvs[MAX_MV_REF_CANDIDATES];
+ const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
+
+ dec_find_mv_refs(cm, xd, NEWMV, frame, mv_ref_search, tmp_mvs,
+ mi_row, mi_col, -1);
+
+ lower_mv_precision(&tmp_mvs[0].as_mv, allow_hp);
+ best_ref_mvs[ref] = tmp_mvs[0];
+ got_mv_refs_for_new = 1;
+ }
+ }
+
+ if (!assign_mv(cm, xd, b_mode, mi->bmi[j].as_mv, best_ref_mvs,
+ best_sub8x8, is_compound, allow_hp, r)) {
+ xd->corrupted |= 1;
+ break;
+ }
+
+ if (num_4x4_h == 2) mi->bmi[j + 2] = mi->bmi[j];
+ if (num_4x4_w == 2) mi->bmi[j + 1] = mi->bmi[j];
+ }
+ }
+
+ mi->mode = b_mode;
+
+ copy_mv_pair(mi->mv, mi->bmi[3].as_mv);
+ } else {
+ if (mi->mode != ZEROMV) {
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
+ int_mv tmp_mvs[MAX_MV_REF_CANDIDATES];
+ const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
+ int refmv_count =
+ dec_find_mv_refs(cm, xd, mi->mode, frame, mv_ref_search, tmp_mvs,
+ mi_row, mi_col, -1);
+ lower_mv_precision(&tmp_mvs[refmv_count - 1].as_mv, allow_hp);
+ best_ref_mvs[ref] = tmp_mvs[refmv_count - 1];
+ }
+ }
+ xd->corrupted |= !assign_mv(cm, xd, mi->mode, mi->mv, best_ref_mvs,
+ best_ref_mvs, is_compound, allow_hp, r);
+ }
+}
+
+static void read_inter_frame_mode_info(VP9Decoder *const pbi,
+ MACROBLOCKD *const xd, int mi_row,
+ int mi_col, vpx_reader *r, int x_mis,
+ int y_mis) {
+ VP9_COMMON *const cm = &pbi->common;
+ MODE_INFO *const mi = xd->mi[0];
+ int inter_block;
+
+ mi->segment_id =
+ read_inter_segment_id(cm, xd, mi_row, mi_col, r, x_mis, y_mis);
+ mi->skip = read_skip(cm, xd, mi->segment_id, r);
+ inter_block = read_is_inter_block(cm, xd, mi->segment_id, r);
+ mi->tx_size = read_tx_size(cm, xd, !mi->skip || !inter_block, r);
+
+ if (inter_block)
+ read_inter_block_mode_info(pbi, xd, mi, mi_row, mi_col, r);
+ else
+ read_intra_block_mode_info(cm, xd, mi, r);
+}
+
+static INLINE void copy_ref_frame_pair(MV_REFERENCE_FRAME *dst,
+ const MV_REFERENCE_FRAME *src) {
+ memcpy(dst, src, sizeof(*dst) * 2);
+}
+
+void vp9_read_mode_info(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row,
+ int mi_col, int x_mis, int y_mis) {
+ vpx_reader *r = &twd->bit_reader;
+ MACROBLOCKD *const xd = &twd->xd;
+ VP9_COMMON *const cm = &pbi->common;
+ MODE_INFO *const mi = xd->mi[0];
+ MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+ int w, h;
+
+ if (frame_is_intra_only(cm)) {
+ read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r, x_mis, y_mis);
+ } else {
+ // Cache mi->ref_frame and mi->mv so that the compiler can prove that they
+ // are constant for the duration of the loop and avoids reloading them.
+ MV_REFERENCE_FRAME mi_ref_frame[2];
+ int_mv mi_mv[2];
+
+ read_inter_frame_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
+
+ copy_ref_frame_pair(mi_ref_frame, mi->ref_frame);
+ copy_mv_pair(mi_mv, mi->mv);
+
+ for (h = 0; h < y_mis; ++h) {
+ for (w = 0; w < x_mis; ++w) {
+ MV_REF *const mv = frame_mvs + w;
+ copy_ref_frame_pair(mv->ref_frame, mi_ref_frame);
+ copy_mv_pair(mv->mv, mi_mv);
+ }
+ frame_mvs += cm->mi_cols;
+ }
+ }
+#if 0 // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
+ if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
+ (xd->above_mi == NULL || xd->left_mi == NULL) &&
+ !is_inter_block(mi) && need_top_left[mi->uv_mode])
+ assert(0);
+#endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
+}
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_decodemv.h b/media/libvpx/libvpx/vp9/decoder/vp9_decodemv.h
new file mode 100644
index 0000000000..11b45ace06
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_decodemv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP9_DECODER_VP9_DECODEMV_H_
+#define VPX_VP9_DECODER_VP9_DECODEMV_H_
+
+#include "vpx_dsp/bitreader.h"
+
+#include "vp9/decoder/vp9_decoder.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp9_read_mode_info(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row,
+ int mi_col, int x_mis, int y_mis);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP9_DECODER_VP9_DECODEMV_H_
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_decoder.c b/media/libvpx/libvpx/vp9/decoder/vp9_decoder.c
new file mode 100644
index 0000000000..7db8ed72d5
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_decoder.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <stdio.h>
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/system_state.h"
+#include "vpx_ports/vpx_once.h"
+#include "vpx_ports/vpx_timer.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vpx_util/vpx_thread.h"
+
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#if CONFIG_VP9_POSTPROC
+#include "vp9/common/vp9_postproc.h"
+#endif
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_reconintra.h"
+
+#include "vp9/decoder/vp9_decodeframe.h"
+#include "vp9/decoder/vp9_decoder.h"
+#include "vp9/decoder/vp9_detokenize.h"
+
+static void initialize_dec(void) {
+ static volatile int init_done = 0;
+
+ if (!init_done) {
+ vp9_rtcd();
+ vpx_dsp_rtcd();
+ vpx_scale_rtcd();
+ vp9_init_intra_predictors();
+ init_done = 1;
+ }
+}
+
+static void vp9_dec_setup_mi(VP9_COMMON *cm) {
+ cm->mi = cm->mip + cm->mi_stride + 1;
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
+ memset(cm->mi_grid_base, 0,
+ cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
+}
+
+void vp9_dec_alloc_row_mt_mem(RowMTWorkerData *row_mt_worker_data,
+ VP9_COMMON *cm, int num_sbs, int max_threads,
+ int num_jobs) {
+ int plane;
+ const size_t dqcoeff_size = (num_sbs << DQCOEFFS_PER_SB_LOG2) *
+ sizeof(*row_mt_worker_data->dqcoeff[0]);
+ row_mt_worker_data->num_jobs = num_jobs;
+#if CONFIG_MULTITHREAD
+ {
+ int i;
+ CHECK_MEM_ERROR(
+ cm, row_mt_worker_data->recon_sync_mutex,
+ vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_mutex) * num_jobs));
+ if (row_mt_worker_data->recon_sync_mutex) {
+ for (i = 0; i < num_jobs; ++i) {
+ pthread_mutex_init(&row_mt_worker_data->recon_sync_mutex[i], NULL);
+ }
+ }
+
+ CHECK_MEM_ERROR(
+ cm, row_mt_worker_data->recon_sync_cond,
+ vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_cond) * num_jobs));
+ if (row_mt_worker_data->recon_sync_cond) {
+ for (i = 0; i < num_jobs; ++i) {
+ pthread_cond_init(&row_mt_worker_data->recon_sync_cond[i], NULL);
+ }
+ }
+ }
+#endif
+ row_mt_worker_data->num_sbs = num_sbs;
+ for (plane = 0; plane < 3; ++plane) {
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->dqcoeff[plane],
+ vpx_memalign(16, dqcoeff_size));
+ memset(row_mt_worker_data->dqcoeff[plane], 0, dqcoeff_size);
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->eob[plane],
+ vpx_calloc(num_sbs << EOBS_PER_SB_LOG2,
+ sizeof(*row_mt_worker_data->eob[plane])));
+ }
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->partition,
+ vpx_calloc(num_sbs * PARTITIONS_PER_SB,
+ sizeof(*row_mt_worker_data->partition)));
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->recon_map,
+ vpx_calloc(num_sbs, sizeof(*row_mt_worker_data->recon_map)));
+
+ // allocate memory for thread_data
+ if (row_mt_worker_data->thread_data == NULL) {
+ const size_t thread_size =
+ max_threads * sizeof(*row_mt_worker_data->thread_data);
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->thread_data,
+ vpx_memalign(32, thread_size));
+ }
+}
+
+void vp9_dec_free_row_mt_mem(RowMTWorkerData *row_mt_worker_data) {
+ if (row_mt_worker_data != NULL) {
+ int plane;
+#if CONFIG_MULTITHREAD
+ int i;
+ if (row_mt_worker_data->recon_sync_mutex != NULL) {
+ for (i = 0; i < row_mt_worker_data->num_jobs; ++i) {
+ pthread_mutex_destroy(&row_mt_worker_data->recon_sync_mutex[i]);
+ }
+ vpx_free(row_mt_worker_data->recon_sync_mutex);
+ row_mt_worker_data->recon_sync_mutex = NULL;
+ }
+ if (row_mt_worker_data->recon_sync_cond != NULL) {
+ for (i = 0; i < row_mt_worker_data->num_jobs; ++i) {
+ pthread_cond_destroy(&row_mt_worker_data->recon_sync_cond[i]);
+ }
+ vpx_free(row_mt_worker_data->recon_sync_cond);
+ row_mt_worker_data->recon_sync_cond = NULL;
+ }
+#endif
+ for (plane = 0; plane < 3; ++plane) {
+ vpx_free(row_mt_worker_data->eob[plane]);
+ row_mt_worker_data->eob[plane] = NULL;
+ vpx_free(row_mt_worker_data->dqcoeff[plane]);
+ row_mt_worker_data->dqcoeff[plane] = NULL;
+ }
+ vpx_free(row_mt_worker_data->partition);
+ row_mt_worker_data->partition = NULL;
+ vpx_free(row_mt_worker_data->recon_map);
+ row_mt_worker_data->recon_map = NULL;
+ vpx_free(row_mt_worker_data->thread_data);
+ row_mt_worker_data->thread_data = NULL;
+ }
+}
+
+static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
+ cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
+ if (!cm->mip) return 1;
+ cm->mi_alloc_size = mi_size;
+ cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ if (!cm->mi_grid_base) return 1;
+ return 0;
+}
+
+static void vp9_dec_free_mi(VP9_COMMON *cm) {
+#if CONFIG_VP9_POSTPROC
+ // MFQE allocates an additional mip and swaps it with cm->mip.
+ vpx_free(cm->postproc_state.prev_mip);
+ cm->postproc_state.prev_mip = NULL;
+#endif
+ vpx_free(cm->mip);
+ cm->mip = NULL;
+ vpx_free(cm->mi_grid_base);
+ cm->mi_grid_base = NULL;
+ cm->mi_alloc_size = 0;
+}
+
+VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
+ VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
+ VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
+
+ if (!cm) return NULL;
+
+ vp9_zero(*pbi);
+
+ if (setjmp(cm->error.jmp)) {
+ cm->error.setjmp = 0;
+ vp9_decoder_remove(pbi);
+ return NULL;
+ }
+
+ cm->error.setjmp = 1;
+
+ CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(
+ cm, cm->frame_contexts,
+ (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
+
+ pbi->need_resync = 1;
+ once(initialize_dec);
+
+ // Initialize the references to not point to any frame buffers.
+ memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+ memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map));
+
+ init_frame_indexes(cm);
+ pbi->ready_for_new_data = 1;
+ pbi->common.buffer_pool = pool;
+
+ cm->bit_depth = VPX_BITS_8;
+ cm->dequant_bit_depth = VPX_BITS_8;
+
+ cm->alloc_mi = vp9_dec_alloc_mi;
+ cm->free_mi = vp9_dec_free_mi;
+ cm->setup_mi = vp9_dec_setup_mi;
+
+ vp9_loop_filter_init(cm);
+
+ cm->error.setjmp = 0;
+
+ vpx_get_worker_interface()->init(&pbi->lf_worker);
+
+ return pbi;
+}
+
+void vp9_decoder_remove(VP9Decoder *pbi) {
+ int i;
+
+ if (!pbi) return;
+
+ vpx_get_worker_interface()->end(&pbi->lf_worker);
+ vpx_free(pbi->lf_worker.data1);
+
+ for (i = 0; i < pbi->num_tile_workers; ++i) {
+ VPxWorker *const worker = &pbi->tile_workers[i];
+ vpx_get_worker_interface()->end(worker);
+ }
+
+ vpx_free(pbi->tile_worker_data);
+ vpx_free(pbi->tile_workers);
+
+ if (pbi->num_tile_workers > 0) {
+ vp9_loop_filter_dealloc(&pbi->lf_row_sync);
+ }
+
+ if (pbi->row_mt == 1) {
+ vp9_dec_free_row_mt_mem(pbi->row_mt_worker_data);
+ if (pbi->row_mt_worker_data != NULL) {
+ vp9_jobq_deinit(&pbi->row_mt_worker_data->jobq);
+ vpx_free(pbi->row_mt_worker_data->jobq_buf);
+#if CONFIG_MULTITHREAD
+ pthread_mutex_destroy(&pbi->row_mt_worker_data->recon_done_mutex);
+#endif
+ }
+ vpx_free(pbi->row_mt_worker_data);
+ }
+
+ vp9_remove_common(&pbi->common);
+ vpx_free(pbi);
+}
+
+static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
+ const YV12_BUFFER_CONFIG *b) {
+ return a->y_height == b->y_height && a->y_width == b->y_width &&
+ a->uv_height == b->uv_height && a->uv_width == b->uv_width;
+}
+
+vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9_COMMON *cm = &pbi->common;
+
+ /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
+ * encoder is using the frame buffers for. This is just a stub to keep the
+ * vpxenc --test-decode functionality working, and will be replaced in a
+ * later commit that adds VP9-specific controls for this functionality.
+ */
+ if (ref_frame_flag == VP9_LAST_FLAG) {
+ const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
+ if (cfg == NULL) {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "No 'last' reference frame");
+ return VPX_CODEC_ERROR;
+ }
+ if (!equal_dimensions(cfg, sd))
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ else
+ vpx_yv12_copy_frame(cfg, sd);
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
+ }
+
+ return cm->error.error_code;
+}
+
+vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ int idx;
+ YV12_BUFFER_CONFIG *ref_buf = NULL;
+
+ // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
+ // encoder is using the frame buffers for. This is just a stub to keep the
+ // vpxenc --test-decode functionality working, and will be replaced in a
+ // later commit that adds VP9-specific controls for this functionality.
+ // (Yunqing) The set_reference control depends on the following setting in
+ // encoder.
+ // cpi->lst_fb_idx = 0;
+ // cpi->gld_fb_idx = 1;
+ // cpi->alt_fb_idx = 2;
+ if (ref_frame_flag == VP9_LAST_FLAG) {
+ idx = cm->ref_frame_map[0];
+ } else if (ref_frame_flag == VP9_GOLD_FLAG) {
+ idx = cm->ref_frame_map[1];
+ } else if (ref_frame_flag == VP9_ALT_FLAG) {
+ idx = cm->ref_frame_map[2];
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
+ return cm->error.error_code;
+ }
+
+ if (idx < 0 || idx >= FRAME_BUFFERS) {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Invalid reference frame map");
+ return cm->error.error_code;
+ }
+
+ // Get the destination reference buffer.
+ ref_buf = &cm->buffer_pool->frame_bufs[idx].buf;
+
+ if (!equal_dimensions(ref_buf, sd)) {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ } else {
+ // Overwrite the reference frame buffer.
+ vpx_yv12_copy_frame(sd, ref_buf);
+ }
+
+ return cm->error.error_code;
+}
+
+/* If any buffer updating is signaled it should be done here. */
+static void swap_frame_buffers(VP9Decoder *pbi) {
+ int ref_index = 0, mask;
+ VP9_COMMON *const cm = &pbi->common;
+ BufferPool *const pool = cm->buffer_pool;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+
+ for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ // Current thread releases the holding of reference frame.
+ decrease_ref_count(old_idx, frame_bufs, pool);
+
+ // Release the reference frame in reference map.
+ if (mask & 1) {
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ }
+ cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
+ ++ref_index;
+ }
+
+ // Current thread releases the holding of reference frame.
+ for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
+ }
+ pbi->hold_ref_buf = 0;
+ cm->frame_to_show = get_frame_new_buffer(cm);
+
+ --frame_bufs[cm->new_fb_idx].ref_count;
+
+ // Invalidate these references until the next frame starts.
+ for (ref_index = 0; ref_index < 3; ref_index++)
+ cm->frame_refs[ref_index].idx = -1;
+}
+
+static void release_fb_on_decoder_exit(VP9Decoder *pbi) {
+ const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ VP9_COMMON *volatile const cm = &pbi->common;
+ BufferPool *volatile const pool = cm->buffer_pool;
+ RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
+ int i;
+
+ // Synchronize all threads immediately as a subsequent decode call may
+ // cause a resize invalidating some allocations.
+ winterface->sync(&pbi->lf_worker);
+ for (i = 0; i < pbi->num_tile_workers; ++i) {
+ winterface->sync(&pbi->tile_workers[i]);
+ }
+
+ // Release all the reference buffers if worker thread is holding them.
+ if (pbi->hold_ref_buf == 1) {
+ int ref_index = 0, mask;
+ for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ // Current thread releases the holding of reference frame.
+ decrease_ref_count(old_idx, frame_bufs, pool);
+
+ // Release the reference frame in reference map.
+ if (mask & 1) {
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ }
+ ++ref_index;
+ }
+
+ // Current thread releases the holding of reference frame.
+ for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ }
+ pbi->hold_ref_buf = 0;
+ }
+}
+
+int vp9_receive_compressed_data(VP9Decoder *pbi, size_t size,
+ const uint8_t **psource) {
+ VP9_COMMON *volatile const cm = &pbi->common;
+ BufferPool *volatile const pool = cm->buffer_pool;
+ RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
+ const uint8_t *source = *psource;
+ int retcode = 0;
+ cm->error.error_code = VPX_CODEC_OK;
+
+ if (size == 0) {
+ // This is used to signal that we are missing frames.
+ // We do not know if the missing frame(s) was supposed to update
+ // any of the reference buffers, but we act conservative and
+ // mark only the last buffer as corrupted.
+ //
+ // TODO(jkoleszar): Error concealment is undefined and non-normative
+ // at this point, but if it becomes so, [0] may not always be the correct
+ // thing to do here.
+ if (cm->frame_refs[0].idx > 0) {
+ assert(cm->frame_refs[0].buf != NULL);
+ cm->frame_refs[0].buf->corrupted = 1;
+ }
+ }
+
+ pbi->ready_for_new_data = 0;
+
+ // Check if the previous frame was a frame without any references to it.
+ if (cm->new_fb_idx >= 0 && frame_bufs[cm->new_fb_idx].ref_count == 0 &&
+ !frame_bufs[cm->new_fb_idx].released) {
+ pool->release_fb_cb(pool->cb_priv,
+ &frame_bufs[cm->new_fb_idx].raw_frame_buffer);
+ frame_bufs[cm->new_fb_idx].released = 1;
+ }
+
+ // Find a free frame buffer. Return error if can not find any.
+ cm->new_fb_idx = get_free_fb(cm);
+ if (cm->new_fb_idx == INVALID_IDX) {
+ pbi->ready_for_new_data = 1;
+ release_fb_on_decoder_exit(pbi);
+ vpx_clear_system_state();
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Unable to find free frame buffer");
+ return cm->error.error_code;
+ }
+
+ // Assign a MV array to the frame buffer.
+ cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
+
+ pbi->hold_ref_buf = 0;
+ pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
+
+ if (setjmp(cm->error.jmp)) {
+ cm->error.setjmp = 0;
+ pbi->ready_for_new_data = 1;
+ release_fb_on_decoder_exit(pbi);
+ // Release current frame.
+ decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
+ vpx_clear_system_state();
+ return -1;
+ }
+
+ cm->error.setjmp = 1;
+ vp9_decode_frame(pbi, source, source + size, psource);
+
+ swap_frame_buffers(pbi);
+
+ vpx_clear_system_state();
+
+ if (!cm->show_existing_frame) {
+ cm->last_show_frame = cm->show_frame;
+ cm->prev_frame = cm->cur_frame;
+ if (cm->seg.enabled) vp9_swap_current_and_last_seg_map(cm);
+ }
+
+ if (cm->show_frame) cm->cur_show_frame_fb_idx = cm->new_fb_idx;
+
+ // Update progress in frame parallel decode.
+ cm->last_width = cm->width;
+ cm->last_height = cm->height;
+ if (cm->show_frame) {
+ cm->current_video_frame++;
+ }
+
+ cm->error.setjmp = 0;
+ return retcode;
+}
+
+int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
+ vp9_ppflags_t *flags) {
+ VP9_COMMON *const cm = &pbi->common;
+ int ret = -1;
+#if !CONFIG_VP9_POSTPROC
+ (void)*flags;
+#endif
+
+ if (pbi->ready_for_new_data == 1) return ret;
+
+ pbi->ready_for_new_data = 1;
+
+ /* no raw frame to show!!! */
+ if (!cm->show_frame) return ret;
+
+ pbi->ready_for_new_data = 1;
+
+#if CONFIG_VP9_POSTPROC
+ if (!cm->show_existing_frame) {
+ ret = vp9_post_proc_frame(cm, sd, flags, cm->width);
+ } else {
+ *sd = *cm->frame_to_show;
+ ret = 0;
+ }
+#else
+ *sd = *cm->frame_to_show;
+ ret = 0;
+#endif /*!CONFIG_POSTPROC*/
+ vpx_clear_system_state();
+ return ret;
+}
+
+vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz,
+ uint32_t sizes[8], int *count,
+ vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state) {
+ // A chunk ending with a byte matching 0xc0 is an invalid chunk unless
+ // it is a super frame index. If the last byte of real video compression
+ // data is 0xc0 the encoder must add a 0 byte. If we have the marker but
+ // not the associated matching marker byte at the front of the index we have
+ // an invalid bitstream and need to return an error.
+
+ uint8_t marker;
+
+ assert(data_sz);
+ marker = read_marker(decrypt_cb, decrypt_state, data + data_sz - 1);
+ *count = 0;
+
+ if ((marker & 0xe0) == 0xc0) {
+ const uint32_t frames = (marker & 0x7) + 1;
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const size_t index_sz = 2 + mag * frames;
+
+ // This chunk is marked as having a superframe index but doesn't have
+ // enough data for it, thus it's an invalid superframe index.
+ if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
+
+ {
+ const uint8_t marker2 =
+ read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz);
+
+ // This chunk is marked as having a superframe index but doesn't have
+ // the matching marker byte at the front of the index therefore it's an
+ // invalid chunk.
+ if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
+ }
+
+ {
+ // Found a valid superframe index.
+ uint32_t i, j;
+ const uint8_t *x = &data[data_sz - index_sz + 1];
+
+ // Frames has a maximum of 8 and mag has a maximum of 4.
+ uint8_t clear_buffer[32];
+ assert(sizeof(clear_buffer) >= frames * mag);
+ if (decrypt_cb) {
+ decrypt_cb(decrypt_state, x, clear_buffer, frames * mag);
+ x = clear_buffer;
+ }
+
+ for (i = 0; i < frames; ++i) {
+ uint32_t this_sz = 0;
+
+ for (j = 0; j < mag; ++j) this_sz |= ((uint32_t)(*x++)) << (j * 8);
+ sizes[i] = this_sz;
+ }
+ *count = frames;
+ }
+ }
+ return VPX_CODEC_OK;
+}
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_decoder.h b/media/libvpx/libvpx/vp9/decoder/vp9_decoder.h
new file mode 100644
index 0000000000..b0ef83c73d
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_decoder.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP9_DECODER_VP9_DECODER_H_
+#define VPX_VP9_DECODER_VP9_DECODER_H_
+
+#include "./vpx_config.h"
+
+#include "vpx/vpx_codec.h"
+#include "vpx_dsp/bitreader.h"
+#include "vpx_scale/yv12config.h"
+#include "vpx_util/vpx_thread.h"
+
+#include "vp9/common/vp9_thread_common.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_ppflags.h"
+#include "./vp9_job_queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EOBS_PER_SB_LOG2 8
+#define DQCOEFFS_PER_SB_LOG2 12
+#define PARTITIONS_PER_SB 85
+
+typedef enum JobType { PARSE_JOB, RECON_JOB, LPF_JOB } JobType;
+
+typedef struct ThreadData {
+ struct VP9Decoder *pbi;
+ LFWorkerData *lf_data;
+ VP9LfSync *lf_sync;
+} ThreadData;
+
+typedef struct TileBuffer {
+ const uint8_t *data;
+ size_t size;
+ int col; // only used with multi-threaded decoding
+} TileBuffer;
+
+typedef struct TileWorkerData {
+ const uint8_t *data_end;
+ int buf_start, buf_end; // pbi->tile_buffers to decode, inclusive
+ vpx_reader bit_reader;
+ FRAME_COUNTS counts;
+ LFWorkerData *lf_data;
+ VP9LfSync *lf_sync;
+ DECLARE_ALIGNED(16, MACROBLOCKD, xd);
+ /* dqcoeff are shared by all the planes. So planes must be decoded serially */
+ DECLARE_ALIGNED(16, tran_low_t, dqcoeff[32 * 32]);
+ DECLARE_ALIGNED(16, uint16_t, extend_and_predict_buf[80 * 2 * 80 * 2]);
+ struct vpx_internal_error_info error_info;
+} TileWorkerData;
+
+typedef void (*process_block_fn_t)(TileWorkerData *twd,
+ struct VP9Decoder *const pbi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize, int bwl,
+ int bhl);
+
+typedef struct RowMTWorkerData {
+ int num_sbs;
+ int *eob[MAX_MB_PLANE];
+ PARTITION_TYPE *partition;
+ tran_low_t *dqcoeff[MAX_MB_PLANE];
+ int8_t *recon_map;
+ const uint8_t *data_end;
+ uint8_t *jobq_buf;
+ JobQueueRowMt jobq;
+ size_t jobq_size;
+ int num_tiles_done;
+ int num_jobs;
+#if CONFIG_MULTITHREAD
+ pthread_mutex_t recon_done_mutex;
+ pthread_mutex_t *recon_sync_mutex;
+ pthread_cond_t *recon_sync_cond;
+#endif
+ ThreadData *thread_data;
+} RowMTWorkerData;
+
+/* Structure to queue and dequeue row decode jobs */
+typedef struct Job {
+ int row_num;
+ int tile_col;
+ JobType job_type;
+} Job;
+
+typedef struct VP9Decoder {
+ DECLARE_ALIGNED(16, MACROBLOCKD, mb);
+
+ DECLARE_ALIGNED(16, VP9_COMMON, common);
+
+ int ready_for_new_data;
+
+ int refresh_frame_flags;
+
+ // TODO(hkuang): Combine this with cur_buf in macroblockd as they are
+ // the same.
+ RefCntBuffer *cur_buf; // Current decoding frame buffer.
+
+ VPxWorker lf_worker;
+ VPxWorker *tile_workers;
+ TileWorkerData *tile_worker_data;
+ TileBuffer tile_buffers[64];
+ int num_tile_workers;
+ int total_tiles;
+
+ VP9LfSync lf_row_sync;
+
+ vpx_decrypt_cb decrypt_cb;
+ void *decrypt_state;
+
+ int max_threads;
+ int inv_tile_order;
+ int need_resync; // wait for key/intra-only frame.
+ int hold_ref_buf; // hold the reference buffer.
+
+ int row_mt;
+ int lpf_mt_opt;
+ RowMTWorkerData *row_mt_worker_data;
+} VP9Decoder;
+
+int vp9_receive_compressed_data(struct VP9Decoder *pbi, size_t size,
+ const uint8_t **psource);
+
+int vp9_get_raw_frame(struct VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
+ vp9_ppflags_t *flags);
+
+vpx_codec_err_t vp9_copy_reference_dec(struct VP9Decoder *pbi,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state, const uint8_t *data) {
+ if (decrypt_cb) {
+ uint8_t marker;
+ decrypt_cb(decrypt_state, data, &marker, 1);
+ return marker;
+ }
+ return *data;
+}
+
+// This function is exposed for use in tests, as well as the inlined function
+// "read_marker".
+vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz,
+ uint32_t sizes[8], int *count,
+ vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state);
+
+struct VP9Decoder *vp9_decoder_create(BufferPool *const pool);
+
+void vp9_decoder_remove(struct VP9Decoder *pbi);
+
+void vp9_dec_alloc_row_mt_mem(RowMTWorkerData *row_mt_worker_data,
+ VP9_COMMON *cm, int num_sbs, int max_threads,
+ int num_jobs);
+void vp9_dec_free_row_mt_mem(RowMTWorkerData *row_mt_worker_data);
+
+static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
+ BufferPool *const pool) {
+ if (idx >= 0 && frame_bufs[idx].ref_count > 0) {
+ --frame_bufs[idx].ref_count;
+ // A worker may only get a free framebuffer index when calling get_free_fb.
+ // But the private buffer is not set up until finish decoding header.
+ // So any error happens during decoding header, the frame_bufs will not
+ // have valid priv buffer.
+ if (!frame_bufs[idx].released && frame_bufs[idx].ref_count == 0 &&
+ frame_bufs[idx].raw_frame_buffer.priv) {
+ pool->release_fb_cb(pool->cb_priv, &frame_bufs[idx].raw_frame_buffer);
+ frame_bufs[idx].released = 1;
+ }
+ }
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP9_DECODER_VP9_DECODER_H_
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_detokenize.c b/media/libvpx/libvpx/vp9/decoder/vp9_detokenize.c
new file mode 100644
index 0000000000..c2e6b3d545
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_detokenize.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_entropy.h"
+#if CONFIG_COEFFICIENT_RANGE_CHECKING
+#include "vp9/common/vp9_idct.h"
+#endif
+
+#include "vp9/decoder/vp9_detokenize.h"
+
+#define EOB_CONTEXT_NODE 0
+#define ZERO_CONTEXT_NODE 1
+#define ONE_CONTEXT_NODE 2
+
+#define INCREMENT_COUNT(token) \
+ do { \
+ if (counts) ++coef_counts[band][ctx][token]; \
+ } while (0)
+
+static INLINE int read_bool(vpx_reader *r, int prob, BD_VALUE *value,
+ int *count, unsigned int *range) {
+ const unsigned int split = (*range * prob + (256 - prob)) >> CHAR_BIT;
+ const BD_VALUE bigsplit = (BD_VALUE)split << (BD_VALUE_SIZE - CHAR_BIT);
+#if CONFIG_BITSTREAM_DEBUG
+ const int queue_r = bitstream_queue_get_read();
+ const int frame_idx = bitstream_queue_get_frame_read();
+ int ref_result, ref_prob;
+ bitstream_queue_pop(&ref_result, &ref_prob);
+ if (prob != ref_prob) {
+ fprintf(stderr,
+ "\n *** [bit] prob error, frame_idx_r %d prob %d ref_prob %d "
+ "queue_r %d\n",
+ frame_idx, prob, ref_prob, queue_r);
+
+ assert(0);
+ }
+#endif
+
+ if (*count < 0) {
+ r->value = *value;
+ r->count = *count;
+ vpx_reader_fill(r);
+ *value = r->value;
+ *count = r->count;
+ }
+
+ if (*value >= bigsplit) {
+ *range = *range - split;
+ *value = *value - bigsplit;
+ {
+ const int shift = vpx_norm[*range];
+ *range <<= shift;
+ *value <<= shift;
+ *count -= shift;
+ }
+#if CONFIG_BITSTREAM_DEBUG
+ {
+ const int bit = 1;
+ if (bit != ref_result) {
+ fprintf(
+ stderr,
+ "\n *** [bit] result error, frame_idx_r %d bit %d ref_result %d "
+ "queue_r %d\n",
+ frame_idx, bit, ref_result, queue_r);
+
+ assert(0);
+ }
+ }
+#endif
+ return 1;
+ }
+ *range = split;
+ {
+ const int shift = vpx_norm[*range];
+ *range <<= shift;
+ *value <<= shift;
+ *count -= shift;
+ }
+#if CONFIG_BITSTREAM_DEBUG
+ {
+ const int bit = 0;
+ if (bit != ref_result) {
+ fprintf(stderr,
+ "\n *** [bit] result error, frame_idx_r %d bit %d ref_result %d "
+ "queue_r %d\n",
+ frame_idx, bit, ref_result, queue_r);
+
+ assert(0);
+ }
+ }
+#endif
+ return 0;
+}
+
+static INLINE int read_coeff(vpx_reader *r, const vpx_prob *probs, int n,
+ BD_VALUE *value, int *count, unsigned int *range) {
+ int i, val = 0;
+ for (i = 0; i < n; ++i)
+ val = (val << 1) | read_bool(r, probs[i], value, count, range);
+ return val;
+}
+
+static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
+ tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq,
+ int ctx, const int16_t *scan, const int16_t *nb,
+ vpx_reader *r) {
+ FRAME_COUNTS *counts = xd->counts;
+ const int max_eob = 16 << (tx_size << 1);
+ const FRAME_CONTEXT *const fc = xd->fc;
+ const int ref = is_inter_block(xd->mi[0]);
+ int band, c = 0;
+ const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ fc->coef_probs[tx_size][type][ref];
+ const vpx_prob *prob;
+ unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
+ unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
+ uint8_t token_cache[32 * 32];
+ const uint8_t *band_translate = get_band_translate(tx_size);
+ const int dq_shift = (tx_size == TX_32X32);
+ int v;
+ int16_t dqv = dq[0];
+ const uint8_t *const cat6_prob =
+#if CONFIG_VP9_HIGHBITDEPTH
+ (xd->bd == VPX_BITS_12)
+ ? vp9_cat6_prob_high12
+ : (xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 :
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_cat6_prob;
+ const int cat6_bits =
+#if CONFIG_VP9_HIGHBITDEPTH
+ (xd->bd == VPX_BITS_12) ? 18
+ : (xd->bd == VPX_BITS_10) ? 16 :
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ 14;
+ // Keep value, range, and count as locals. The compiler produces better
+ // results with the locals than using r directly.
+ BD_VALUE value = r->value;
+ unsigned int range = r->range;
+ int count = r->count;
+
+ if (counts) {
+ coef_counts = counts->coef[tx_size][type][ref];
+ eob_branch_count = counts->eob_branch[tx_size][type][ref];
+ }
+
+ while (c < max_eob) {
+ int val = -1;
+ band = *band_translate++;
+ prob = coef_probs[band][ctx];
+ if (counts) ++eob_branch_count[band][ctx];
+ if (!read_bool(r, prob[EOB_CONTEXT_NODE], &value, &count, &range)) {
+ INCREMENT_COUNT(EOB_MODEL_TOKEN);
+ break;
+ }
+
+ while (!read_bool(r, prob[ZERO_CONTEXT_NODE], &value, &count, &range)) {
+ INCREMENT_COUNT(ZERO_TOKEN);
+ dqv = dq[1];
+ token_cache[scan[c]] = 0;
+ ++c;
+ if (c >= max_eob) {
+ r->value = value;
+ r->range = range;
+ r->count = count;
+ return c; // zero tokens at the end (no eob token)
+ }
+ ctx = get_coef_context(nb, token_cache, c);
+ band = *band_translate++;
+ prob = coef_probs[band][ctx];
+ }
+
+ if (read_bool(r, prob[ONE_CONTEXT_NODE], &value, &count, &range)) {
+ const vpx_prob *p = vp9_pareto8_full[prob[PIVOT_NODE] - 1];
+ INCREMENT_COUNT(TWO_TOKEN);
+ if (read_bool(r, p[0], &value, &count, &range)) {
+ if (read_bool(r, p[3], &value, &count, &range)) {
+ token_cache[scan[c]] = 5;
+ if (read_bool(r, p[5], &value, &count, &range)) {
+ if (read_bool(r, p[7], &value, &count, &range)) {
+ val = CAT6_MIN_VAL +
+ read_coeff(r, cat6_prob, cat6_bits, &value, &count, &range);
+ } else {
+ val = CAT5_MIN_VAL +
+ read_coeff(r, vp9_cat5_prob, 5, &value, &count, &range);
+ }
+ } else if (read_bool(r, p[6], &value, &count, &range)) {
+ val = CAT4_MIN_VAL +
+ read_coeff(r, vp9_cat4_prob, 4, &value, &count, &range);
+ } else {
+ val = CAT3_MIN_VAL +
+ read_coeff(r, vp9_cat3_prob, 3, &value, &count, &range);
+ }
+ } else {
+ token_cache[scan[c]] = 4;
+ if (read_bool(r, p[4], &value, &count, &range)) {
+ val = CAT2_MIN_VAL +
+ read_coeff(r, vp9_cat2_prob, 2, &value, &count, &range);
+ } else {
+ val = CAT1_MIN_VAL +
+ read_coeff(r, vp9_cat1_prob, 1, &value, &count, &range);
+ }
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ // val may use 18-bits
+ v = (int)(((int64_t)val * dqv) >> dq_shift);
+#else
+ v = (val * dqv) >> dq_shift;
+#endif
+ } else {
+ if (read_bool(r, p[1], &value, &count, &range)) {
+ token_cache[scan[c]] = 3;
+ v = ((3 + read_bool(r, p[2], &value, &count, &range)) * dqv) >>
+ dq_shift;
+ } else {
+ token_cache[scan[c]] = 2;
+ v = (2 * dqv) >> dq_shift;
+ }
+ }
+ } else {
+ INCREMENT_COUNT(ONE_TOKEN);
+ token_cache[scan[c]] = 1;
+ v = dqv >> dq_shift;
+ }
+#if CONFIG_COEFFICIENT_RANGE_CHECKING
+#if CONFIG_VP9_HIGHBITDEPTH
+ dqcoeff[scan[c]] = highbd_check_range(
+ read_bool(r, 128, &value, &count, &range) ? -v : v, xd->bd);
+#else
+ dqcoeff[scan[c]] =
+ check_range(read_bool(r, 128, &value, &count, &range) ? -v : v);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#else
+ if (read_bool(r, 128, &value, &count, &range)) {
+ dqcoeff[scan[c]] = (tran_low_t)-v;
+ } else {
+ dqcoeff[scan[c]] = (tran_low_t)v;
+ }
+#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
+ ++c;
+ ctx = get_coef_context(nb, token_cache, c);
+ dqv = dq[1];
+ }
+
+ r->value = value;
+ r->range = range;
+ r->count = count;
+ return c;
+}
+
+static void get_ctx_shift(MACROBLOCKD *xd, int *ctx_shift_a, int *ctx_shift_l,
+ int x, int y, unsigned int tx_size_in_blocks) {
+ if (xd->max_blocks_wide) {
+ if (tx_size_in_blocks + x > xd->max_blocks_wide)
+ *ctx_shift_a = (tx_size_in_blocks - (xd->max_blocks_wide - x)) * 8;
+ }
+ if (xd->max_blocks_high) {
+ if (tx_size_in_blocks + y > xd->max_blocks_high)
+ *ctx_shift_l = (tx_size_in_blocks - (xd->max_blocks_high - y)) * 8;
+ }
+}
+
+int vp9_decode_block_tokens(TileWorkerData *twd, int plane,
+ const scan_order *sc, int x, int y, TX_SIZE tx_size,
+ int seg_id) {
+ vpx_reader *r = &twd->bit_reader;
+ MACROBLOCKD *xd = &twd->xd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const int16_t *const dequant = pd->seg_dequant[seg_id];
+ int eob;
+ ENTROPY_CONTEXT *a = pd->above_context + x;
+ ENTROPY_CONTEXT *l = pd->left_context + y;
+ int ctx;
+ int ctx_shift_a = 0;
+ int ctx_shift_l = 0;
+
+ switch (tx_size) {
+ case TX_4X4:
+ ctx = a[0] != 0;
+ ctx += l[0] != 0;
+ eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
+ dequant, ctx, sc->scan, sc->neighbors, r);
+ a[0] = l[0] = (eob > 0);
+ break;
+ case TX_8X8:
+ get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_8X8);
+ ctx = !!*(const uint16_t *)a;
+ ctx += !!*(const uint16_t *)l;
+ eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
+ dequant, ctx, sc->scan, sc->neighbors, r);
+ *(uint16_t *)a = ((eob > 0) * 0x0101) >> ctx_shift_a;
+ *(uint16_t *)l = ((eob > 0) * 0x0101) >> ctx_shift_l;
+ break;
+ case TX_16X16:
+ get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_16X16);
+ ctx = !!*(const uint32_t *)a;
+ ctx += !!*(const uint32_t *)l;
+ eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
+ dequant, ctx, sc->scan, sc->neighbors, r);
+ *(uint32_t *)a = ((eob > 0) * 0x01010101) >> ctx_shift_a;
+ *(uint32_t *)l = ((eob > 0) * 0x01010101) >> ctx_shift_l;
+ break;
+ case TX_32X32:
+ get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_32X32);
+ // NOTE: casting to uint64_t here is safe because the default memory
+ // alignment is at least 8 bytes and the TX_32X32 is aligned on 8 byte
+ // boundaries.
+ ctx = !!*(const uint64_t *)a;
+ ctx += !!*(const uint64_t *)l;
+ eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
+ dequant, ctx, sc->scan, sc->neighbors, r);
+ *(uint64_t *)a = ((eob > 0) * 0x0101010101010101ULL) >> ctx_shift_a;
+ *(uint64_t *)l = ((eob > 0) * 0x0101010101010101ULL) >> ctx_shift_l;
+ break;
+ default:
+ assert(0 && "Invalid transform size.");
+ eob = 0;
+ break;
+ }
+
+ return eob;
+}
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_detokenize.h b/media/libvpx/libvpx/vp9/decoder/vp9_detokenize.h
new file mode 100644
index 0000000000..a32052ffff
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_detokenize.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP9_DECODER_VP9_DETOKENIZE_H_
+#define VPX_VP9_DECODER_VP9_DETOKENIZE_H_
+
+#include "vpx_dsp/bitreader.h"
+#include "vp9/decoder/vp9_decoder.h"
+#include "vp9/common/vp9_scan.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int vp9_decode_block_tokens(TileWorkerData *twd, int plane,
+ const scan_order *sc, int x, int y, TX_SIZE tx_size,
+ int seg_id);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP9_DECODER_VP9_DETOKENIZE_H_
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_dsubexp.c b/media/libvpx/libvpx/vp9/decoder/vp9_dsubexp.c
new file mode 100644
index 0000000000..126ba0b96e
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_dsubexp.c
@@ -0,0 +1,72 @@
+/*
+ Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "vp9/common/vp9_entropy.h"
+
+#include "vp9/decoder/vp9_dsubexp.h"
+
+static int inv_recenter_nonneg(int v, int m) {
+ if (v > 2 * m) return v;
+
+ return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1);
+}
+
+static int decode_uniform(vpx_reader *r) {
+ const int l = 8;
+ const int m = (1 << l) - 191;
+ const int v = vpx_read_literal(r, l - 1);
+ return v < m ? v : (v << 1) - m + vpx_read_bit(r);
+}
+
+static int inv_remap_prob(int v, int m) {
+ static uint8_t inv_map_table[MAX_PROB] = {
+ 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189,
+ 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 125,
+ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 138, 139, 140, 141,
+ 142, 143, 144, 145, 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 253
+ };
+ assert(v < (int)(sizeof(inv_map_table) / sizeof(inv_map_table[0])));
+ v = inv_map_table[v];
+ m--;
+ if ((m << 1) <= MAX_PROB) {
+ return 1 + inv_recenter_nonneg(v, m);
+ } else {
+ return MAX_PROB - inv_recenter_nonneg(v, MAX_PROB - 1 - m);
+ }
+}
+
+static int decode_term_subexp(vpx_reader *r) {
+ if (!vpx_read_bit(r)) return vpx_read_literal(r, 4);
+ if (!vpx_read_bit(r)) return vpx_read_literal(r, 4) + 16;
+ if (!vpx_read_bit(r)) return vpx_read_literal(r, 5) + 32;
+ return decode_uniform(r) + 64;
+}
+
+void vp9_diff_update_prob(vpx_reader *r, vpx_prob *p) {
+ if (vpx_read(r, DIFF_UPDATE_PROB)) {
+ const int delp = decode_term_subexp(r);
+ *p = (vpx_prob)inv_remap_prob(delp, *p);
+ }
+}
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_dsubexp.h b/media/libvpx/libvpx/vp9/decoder/vp9_dsubexp.h
new file mode 100644
index 0000000000..b0c7750736
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_dsubexp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP9_DECODER_VP9_DSUBEXP_H_
+#define VPX_VP9_DECODER_VP9_DSUBEXP_H_
+
+#include "vpx_dsp/bitreader.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp9_diff_update_prob(vpx_reader *r, vpx_prob *p);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP9_DECODER_VP9_DSUBEXP_H_
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_job_queue.c b/media/libvpx/libvpx/vp9/decoder/vp9_job_queue.c
new file mode 100644
index 0000000000..9a31f5a6d0
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_job_queue.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "vpx/vpx_integer.h"
+
+#include "vp9/decoder/vp9_job_queue.h"
+
+void vp9_jobq_init(JobQueueRowMt *jobq, uint8_t *buf, size_t buf_size) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_init(&jobq->mutex, NULL);
+ pthread_cond_init(&jobq->cond, NULL);
+#endif
+ jobq->buf_base = buf;
+ jobq->buf_wr = buf;
+ jobq->buf_rd = buf;
+ jobq->buf_end = buf + buf_size;
+ jobq->terminate = 0;
+}
+
+void vp9_jobq_reset(JobQueueRowMt *jobq) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&jobq->mutex);
+#endif
+ jobq->buf_wr = jobq->buf_base;
+ jobq->buf_rd = jobq->buf_base;
+ jobq->terminate = 0;
+#if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&jobq->mutex);
+#endif
+}
+
+void vp9_jobq_deinit(JobQueueRowMt *jobq) {
+ vp9_jobq_reset(jobq);
+#if CONFIG_MULTITHREAD
+ pthread_mutex_destroy(&jobq->mutex);
+ pthread_cond_destroy(&jobq->cond);
+#endif
+}
+
+void vp9_jobq_terminate(JobQueueRowMt *jobq) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&jobq->mutex);
+#endif
+ jobq->terminate = 1;
+#if CONFIG_MULTITHREAD
+ pthread_cond_broadcast(&jobq->cond);
+ pthread_mutex_unlock(&jobq->mutex);
+#endif
+}
+
+int vp9_jobq_queue(JobQueueRowMt *jobq, void *job, size_t job_size) {
+ int ret = 0;
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&jobq->mutex);
+#endif
+ if (jobq->buf_end >= jobq->buf_wr + job_size) {
+ memcpy(jobq->buf_wr, job, job_size);
+ jobq->buf_wr = jobq->buf_wr + job_size;
+#if CONFIG_MULTITHREAD
+ pthread_cond_signal(&jobq->cond);
+#endif
+ ret = 0;
+ } else {
+ /* Wrap around case is not supported */
+ assert(0);
+ ret = 1;
+ }
+#if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&jobq->mutex);
+#endif
+ return ret;
+}
+
+int vp9_jobq_dequeue(JobQueueRowMt *jobq, void *job, size_t job_size,
+ int blocking) {
+ int ret = 0;
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&jobq->mutex);
+#endif
+ if (jobq->buf_end >= jobq->buf_rd + job_size) {
+ while (1) {
+ if (jobq->buf_wr >= jobq->buf_rd + job_size) {
+ memcpy(job, jobq->buf_rd, job_size);
+ jobq->buf_rd = jobq->buf_rd + job_size;
+ ret = 0;
+ break;
+ } else {
+ /* If all the entries have been dequeued, then break and return */
+ if (jobq->terminate == 1) {
+ ret = 1;
+ break;
+ }
+ if (blocking == 1) {
+#if CONFIG_MULTITHREAD
+ pthread_cond_wait(&jobq->cond, &jobq->mutex);
+#endif
+ } else {
+ /* If there is no job available,
+ * and this is non blocking call then return fail */
+ ret = 1;
+ break;
+ }
+ }
+ }
+ } else {
+ /* Wrap around case is not supported */
+ ret = 1;
+ }
+#if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&jobq->mutex);
+#endif
+
+ return ret;
+}
diff --git a/media/libvpx/libvpx/vp9/decoder/vp9_job_queue.h b/media/libvpx/libvpx/vp9/decoder/vp9_job_queue.h
new file mode 100644
index 0000000000..bc23bf9c2c
--- /dev/null
+++ b/media/libvpx/libvpx/vp9/decoder/vp9_job_queue.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP9_DECODER_VP9_JOB_QUEUE_H_
+#define VPX_VP9_DECODER_VP9_JOB_QUEUE_H_
+
+#include "vpx_util/vpx_thread.h"
+
+typedef struct {
+ // Pointer to buffer base which contains the jobs
+ uint8_t *buf_base;
+
+ // Pointer to current address where new job can be added
+ uint8_t *volatile buf_wr;
+
+ // Pointer to current address from where next job can be obtained
+ uint8_t *volatile buf_rd;
+
+ // Pointer to end of job buffer
+ uint8_t *buf_end;
+
+ int terminate;
+
+#if CONFIG_MULTITHREAD
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+#endif
+} JobQueueRowMt;
+
+void vp9_jobq_init(JobQueueRowMt *jobq, uint8_t *buf, size_t buf_size);
+void vp9_jobq_reset(JobQueueRowMt *jobq);
+void vp9_jobq_deinit(JobQueueRowMt *jobq);
+void vp9_jobq_terminate(JobQueueRowMt *jobq);
+int vp9_jobq_queue(JobQueueRowMt *jobq, void *job, size_t job_size);
+int vp9_jobq_dequeue(JobQueueRowMt *jobq, void *job, size_t job_size,
+ int blocking);
+
+#endif // VPX_VP9_DECODER_VP9_JOB_QUEUE_H_