summaryrefslogtreecommitdiffstats
path: root/media/libvpx/libvpx/vp8
diff options
context:
space:
mode:
Diffstat (limited to 'media/libvpx/libvpx/vp8')
-rw-r--r--media/libvpx/libvpx/vp8/common/alloccommon.c187
-rw-r--r--media/libvpx/libvpx/vp8/common/alloccommon.h30
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/loopfilter_arm.c85
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/loopfilter_arm.h31
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c764
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/copymem_neon.c52
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c41
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/dequant_idct_neon.c141
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/dequantizeb_neon.c26
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/idct_blk_neon.c295
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/iwalsh_neon.c102
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c106
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c274
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c613
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c121
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/sixtappredict_neon.c1729
-rw-r--r--media/libvpx/libvpx/vp8/common/arm/neon/vp8_loopfilter_neon.c538
-rw-r--r--media/libvpx/libvpx/vp8/common/blockd.c19
-rw-r--r--media/libvpx/libvpx/vp8/common/blockd.h311
-rw-r--r--media/libvpx/libvpx/vp8/common/coefupdateprobs.h197
-rw-r--r--media/libvpx/libvpx/vp8/common/common.h48
-rw-r--r--media/libvpx/libvpx/vp8/common/context.c398
-rw-r--r--media/libvpx/libvpx/vp8/common/debugmodes.c135
-rw-r--r--media/libvpx/libvpx/vp8/common/default_coef_probs.h160
-rw-r--r--media/libvpx/libvpx/vp8/common/dequantize.c37
-rw-r--r--media/libvpx/libvpx/vp8/common/entropy.c147
-rw-r--r--media/libvpx/libvpx/vp8/common/entropy.h108
-rw-r--r--media/libvpx/libvpx/vp8/common/entropymode.c104
-rw-r--r--media/libvpx/libvpx/vp8/common/entropymode.h88
-rw-r--r--media/libvpx/libvpx/vp8/common/entropymv.c47
-rw-r--r--media/libvpx/libvpx/vp8/common/entropymv.h49
-rw-r--r--media/libvpx/libvpx/vp8/common/extend.c167
-rw-r--r--media/libvpx/libvpx/vp8/common/extend.h32
-rw-r--r--media/libvpx/libvpx/vp8/common/filter.c381
-rw-r--r--media/libvpx/libvpx/vp8/common/filter.h31
-rw-r--r--media/libvpx/libvpx/vp8/common/findnearmv.c159
-rw-r--r--media/libvpx/libvpx/vp8/common/findnearmv.h151
-rw-r--r--media/libvpx/libvpx/vp8/common/generic/systemdependent.c111
-rw-r--r--media/libvpx/libvpx/vp8/common/header.h48
-rw-r--r--media/libvpx/libvpx/vp8/common/idct_blk.c72
-rw-r--r--media/libvpx/libvpx/vp8/common/idctllm.c185
-rw-r--r--media/libvpx/libvpx/vp8/common/invtrans.h57
-rw-r--r--media/libvpx/libvpx/vp8/common/loongarch/idct_lsx.c322
-rw-r--r--media/libvpx/libvpx/vp8/common/loongarch/loopfilter_filters_lsx.c743
-rw-r--r--media/libvpx/libvpx/vp8/common/loongarch/sixtap_filter_lsx.c1903
-rw-r--r--media/libvpx/libvpx/vp8/common/loopfilter.h101
-rw-r--r--media/libvpx/libvpx/vp8/common/loopfilter_filters.c397
-rw-r--r--media/libvpx/libvpx/vp8/common/mbpitch.c57
-rw-r--r--media/libvpx/libvpx/vp8/common/mfqe.c327
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c29
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/dspr2/filter_dspr2.c2767
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c76
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c346
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c97
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/dspr2/vp8_loopfilter_filters_dspr2.c2401
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/mmi/copymem_mmi.c114
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/mmi/dequantize_mmi.c115
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c70
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/mmi/idctllm_mmi.c335
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/mmi/loopfilter_filters_mmi.c1415
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/mmi/sixtap_filter_mmi.c427
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/msa/bilinear_filter_msa.c797
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/msa/copymem_msa.c62
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/msa/idct_msa.c406
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/msa/loopfilter_filters_msa.c709
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/msa/mfqe_msa.c139
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/msa/sixtap_filter_msa.c1738
-rw-r--r--media/libvpx/libvpx/vp8/common/mips/msa/vp8_macros_msa.h1762
-rw-r--r--media/libvpx/libvpx/vp8/common/modecont.c26
-rw-r--r--media/libvpx/libvpx/vp8/common/modecont.h24
-rw-r--r--media/libvpx/libvpx/vp8/common/mv.h33
-rw-r--r--media/libvpx/libvpx/vp8/common/onyx.h277
-rw-r--r--media/libvpx/libvpx/vp8/common/onyxc_int.h177
-rw-r--r--media/libvpx/libvpx/vp8/common/onyxd.h62
-rw-r--r--media/libvpx/libvpx/vp8/common/postproc.c264
-rw-r--r--media/libvpx/libvpx/vp8/common/postproc.h45
-rw-r--r--media/libvpx/libvpx/vp8/common/ppflags.h39
-rw-r--r--media/libvpx/libvpx/vp8/common/quant_common.c130
-rw-r--r--media/libvpx/libvpx/vp8/common/quant_common.h33
-rw-r--r--media/libvpx/libvpx/vp8/common/reconinter.c503
-rw-r--r--media/libvpx/libvpx/vp8/common/reconinter.h36
-rw-r--r--media/libvpx/libvpx/vp8/common/reconintra.c104
-rw-r--r--media/libvpx/libvpx/vp8/common/reconintra.h35
-rw-r--r--media/libvpx/libvpx/vp8/common/reconintra4x4.c75
-rw-r--r--media/libvpx/libvpx/vp8/common/reconintra4x4.h45
-rw-r--r--media/libvpx/libvpx/vp8/common/rtcd.c15
-rw-r--r--media/libvpx/libvpx/vp8/common/rtcd_defs.pl250
-rw-r--r--media/libvpx/libvpx/vp8/common/setupintrarecon.c38
-rw-r--r--media/libvpx/libvpx/vp8/common/setupintrarecon.h40
-rw-r--r--media/libvpx/libvpx/vp8/common/swapyv12buffer.c32
-rw-r--r--media/libvpx/libvpx/vp8/common/swapyv12buffer.h27
-rw-r--r--media/libvpx/libvpx/vp8/common/systemdependent.h27
-rw-r--r--media/libvpx/libvpx/vp8/common/threading.h215
-rw-r--r--media/libvpx/libvpx/vp8/common/treecoder.c102
-rw-r--r--media/libvpx/libvpx/vp8/common/treecoder.h82
-rw-r--r--media/libvpx/libvpx/vp8/common/vp8_entropymodedata.h172
-rw-r--r--media/libvpx/libvpx/vp8/common/vp8_loopfilter.c566
-rw-r--r--media/libvpx/libvpx/vp8/common/vp8_skin_detection.c109
-rw-r--r--media/libvpx/libvpx/vp8/common/vp8_skin_detection.h47
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/bilinear_filter_sse2.c336
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/dequantize_mmx.asm259
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/idct_blk_mmx.c23
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/idct_blk_sse2.c84
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/idctllm_mmx.asm296
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/idctllm_sse2.asm710
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/iwalsh_sse2.asm123
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/loopfilter_block_sse2_x86_64.asm817
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/loopfilter_sse2.asm1642
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/loopfilter_x86.c129
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/mfqe_sse2.asm289
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/recon_mmx.asm120
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/recon_sse2.asm118
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/subpixel_mmx.asm270
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/subpixel_sse2.asm963
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/subpixel_ssse3.asm1515
-rw-r--r--media/libvpx/libvpx/vp8/common/x86/vp8_asm_stubs.c365
-rw-r--r--media/libvpx/libvpx/vp8/decoder/dboolhuff.c72
-rw-r--r--media/libvpx/libvpx/vp8/decoder/dboolhuff.h132
-rw-r--r--media/libvpx/libvpx/vp8/decoder/decodeframe.c1271
-rw-r--r--media/libvpx/libvpx/vp8/decoder/decodemv.c562
-rw-r--r--media/libvpx/libvpx/vp8/decoder/decodemv.h26
-rw-r--r--media/libvpx/libvpx/vp8/decoder/decoderthreading.h30
-rw-r--r--media/libvpx/libvpx/vp8/decoder/detokenize.c210
-rw-r--r--media/libvpx/libvpx/vp8/decoder/detokenize.h27
-rw-r--r--media/libvpx/libvpx/vp8/decoder/ec_types.h53
-rw-r--r--media/libvpx/libvpx/vp8/decoder/error_concealment.c482
-rw-r--r--media/libvpx/libvpx/vp8/decoder/error_concealment.h41
-rw-r--r--media/libvpx/libvpx/vp8/decoder/onyxd_if.c464
-rw-r--r--media/libvpx/libvpx/vp8/decoder/onyxd_int.h142
-rw-r--r--media/libvpx/libvpx/vp8/decoder/threading.c907
-rw-r--r--media/libvpx/libvpx/vp8/decoder/treereader.h45
-rw-r--r--media/libvpx/libvpx/vp8/encoder/arm/neon/denoising_neon.c460
-rw-r--r--media/libvpx/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c91
-rw-r--r--media/libvpx/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c261
-rw-r--r--media/libvpx/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c121
-rw-r--r--media/libvpx/libvpx/vp8/encoder/bitstream.c1381
-rw-r--r--media/libvpx/libvpx/vp8/encoder/bitstream.h32
-rw-r--r--media/libvpx/libvpx/vp8/encoder/block.h168
-rw-r--r--media/libvpx/libvpx/vp8/encoder/boolhuff.c63
-rw-r--r--media/libvpx/libvpx/vp8/encoder/boolhuff.h112
-rw-r--r--media/libvpx/libvpx/vp8/encoder/copy_c.c27
-rw-r--r--media/libvpx/libvpx/vp8/encoder/dct.c108
-rw-r--r--media/libvpx/libvpx/vp8/encoder/dct_value_cost.h344
-rw-r--r--media/libvpx/libvpx/vp8/encoder/dct_value_tokens.h848
-rw-r--r--media/libvpx/libvpx/vp8/encoder/defaultcoefcounts.h235
-rw-r--r--media/libvpx/libvpx/vp8/encoder/denoising.c725
-rw-r--r--media/libvpx/libvpx/vp8/encoder/denoising.h103
-rw-r--r--media/libvpx/libvpx/vp8/encoder/encodeframe.c1287
-rw-r--r--media/libvpx/libvpx/vp8/encoder/encodeframe.h40
-rw-r--r--media/libvpx/libvpx/vp8/encoder/encodeintra.c116
-rw-r--r--media/libvpx/libvpx/vp8/encoder/encodeintra.h28
-rw-r--r--media/libvpx/libvpx/vp8/encoder/encodemb.c512
-rw-r--r--media/libvpx/libvpx/vp8/encoder/encodemb.h40
-rw-r--r--media/libvpx/libvpx/vp8/encoder/encodemv.c320
-rw-r--r--media/libvpx/libvpx/vp8/encoder/encodemv.h29
-rw-r--r--media/libvpx/libvpx/vp8/encoder/ethreading.c639
-rw-r--r--media/libvpx/libvpx/vp8/encoder/ethreading.h32
-rw-r--r--media/libvpx/libvpx/vp8/encoder/firstpass.c3144
-rw-r--r--media/libvpx/libvpx/vp8/encoder/firstpass.h31
-rw-r--r--media/libvpx/libvpx/vp8/encoder/lookahead.c184
-rw-r--r--media/libvpx/libvpx/vp8/encoder/lookahead.h99
-rw-r--r--media/libvpx/libvpx/vp8/encoder/loongarch/dct_lsx.c161
-rw-r--r--media/libvpx/libvpx/vp8/encoder/loongarch/encodeopt_lsx.c82
-rw-r--r--media/libvpx/libvpx/vp8/encoder/loongarch/vp8_quantize_lsx.c145
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mcomp.c1561
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mcomp.h75
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mips/mmi/dct_mmi.c434
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mips/mmi/vp8_quantize_mmi.c263
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mips/msa/dct_msa.c196
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mips/msa/denoising_msa.c568
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mips/msa/encodeopt_msa.c167
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mips/msa/quantize_msa.c211
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mips/msa/temporal_filter_msa.c284
-rw-r--r--media/libvpx/libvpx/vp8/encoder/modecosts.c48
-rw-r--r--media/libvpx/libvpx/vp8/encoder/modecosts.h26
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mr_dissim.c215
-rw-r--r--media/libvpx/libvpx/vp8/encoder/mr_dissim.h27
-rw-r--r--media/libvpx/libvpx/vp8/encoder/onyx_if.c5433
-rw-r--r--media/libvpx/libvpx/vp8/encoder/onyx_int.h738
-rw-r--r--media/libvpx/libvpx/vp8/encoder/pickinter.c1347
-rw-r--r--media/libvpx/libvpx/vp8/encoder/pickinter.h33
-rw-r--r--media/libvpx/libvpx/vp8/encoder/picklpf.c392
-rw-r--r--media/libvpx/libvpx/vp8/encoder/picklpf.h30
-rw-r--r--media/libvpx/libvpx/vp8/encoder/quantize.h34
-rw-r--r--media/libvpx/libvpx/vp8/encoder/ratectrl.c1589
-rw-r--r--media/libvpx/libvpx/vp8/encoder/ratectrl.h40
-rw-r--r--media/libvpx/libvpx/vp8/encoder/rdopt.c2394
-rw-r--r--media/libvpx/libvpx/vp8/encoder/rdopt.h126
-rw-r--r--media/libvpx/libvpx/vp8/encoder/segmentation.c55
-rw-r--r--media/libvpx/libvpx/vp8/encoder/segmentation.h29
-rw-r--r--media/libvpx/libvpx/vp8/encoder/temporal_filter.c434
-rw-r--r--media/libvpx/libvpx/vp8/encoder/temporal_filter.h26
-rw-r--r--media/libvpx/libvpx/vp8/encoder/tokenize.c468
-rw-r--r--media/libvpx/libvpx/vp8/encoder/tokenize.h48
-rw-r--r--media/libvpx/libvpx/vp8/encoder/treewriter.c33
-rw-r--r--media/libvpx/libvpx/vp8/encoder/treewriter.h106
-rw-r--r--media/libvpx/libvpx/vp8/encoder/vp8_quantize.c489
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/block_error_sse2.asm188
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/copy_sse2.asm94
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/copy_sse3.asm147
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/dct_sse2.asm434
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/denoising_sse2.c372
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/fwalsh_sse2.asm166
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/quantize_sse4.c141
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm209
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c28
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/vp8_quantize_sse2.c226
-rw-r--r--media/libvpx/libvpx/vp8/encoder/x86/vp8_quantize_ssse3.c93
-rw-r--r--media/libvpx/libvpx/vp8/exports_dec2
-rw-r--r--media/libvpx/libvpx/vp8/exports_enc2
-rw-r--r--media/libvpx/libvpx/vp8/vp8_common.mk149
-rw-r--r--media/libvpx/libvpx/vp8/vp8_cx_iface.c1387
-rw-r--r--media/libvpx/libvpx/vp8/vp8_dx_iface.c735
-rw-r--r--media/libvpx/libvpx/vp8/vp8_ratectrl_rtc.cc412
-rw-r--r--media/libvpx/libvpx/vp8/vp8_ratectrl_rtc.h62
-rw-r--r--media/libvpx/libvpx/vp8/vp8cx.mk132
-rw-r--r--media/libvpx/libvpx/vp8/vp8dx.mk39
217 files changed, 77090 insertions, 0 deletions
diff --git a/media/libvpx/libvpx/vp8/common/alloccommon.c b/media/libvpx/libvpx/vp8/common/alloccommon.c
new file mode 100644
index 0000000000..722b158c3a
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/alloccommon.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "alloccommon.h"
+#include "blockd.h"
+#include "vpx_mem/vpx_mem.h"
+#include "onyxc_int.h"
+#include "findnearmv.h"
+#include "entropymode.h"
+#include "systemdependent.h"
+
+void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) {
+ int i;
+ for (i = 0; i < NUM_YV12_BUFFERS; ++i) {
+ vp8_yv12_de_alloc_frame_buffer(&oci->yv12_fb[i]);
+ }
+
+ vp8_yv12_de_alloc_frame_buffer(&oci->temp_scale_frame);
+#if CONFIG_POSTPROC
+ vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer);
+ if (oci->post_proc_buffer_int_used) {
+ vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer_int);
+ }
+
+ vpx_free(oci->pp_limits_buffer);
+ oci->pp_limits_buffer = NULL;
+
+ vpx_free(oci->postproc_state.generated_noise);
+ oci->postproc_state.generated_noise = NULL;
+#endif
+
+ vpx_free(oci->above_context);
+ vpx_free(oci->mip);
+#if CONFIG_ERROR_CONCEALMENT
+ vpx_free(oci->prev_mip);
+ oci->prev_mip = NULL;
+#endif
+
+ oci->above_context = NULL;
+ oci->mip = NULL;
+}
+
+int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
+ int i;
+
+ vp8_de_alloc_frame_buffers(oci);
+
+ /* our internal buffers are always multiples of 16 */
+ if ((width & 0xf) != 0) width += 16 - (width & 0xf);
+
+ if ((height & 0xf) != 0) height += 16 - (height & 0xf);
+
+ for (i = 0; i < NUM_YV12_BUFFERS; ++i) {
+ oci->fb_idx_ref_cnt[i] = 0;
+ oci->yv12_fb[i].flags = 0;
+ if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height,
+ VP8BORDERINPIXELS) < 0) {
+ goto allocation_fail;
+ }
+ }
+
+ oci->new_fb_idx = 0;
+ oci->lst_fb_idx = 1;
+ oci->gld_fb_idx = 2;
+ oci->alt_fb_idx = 3;
+
+ oci->fb_idx_ref_cnt[0] = 1;
+ oci->fb_idx_ref_cnt[1] = 1;
+ oci->fb_idx_ref_cnt[2] = 1;
+ oci->fb_idx_ref_cnt[3] = 1;
+
+ if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16,
+ VP8BORDERINPIXELS) < 0) {
+ goto allocation_fail;
+ }
+
+ oci->mb_rows = height >> 4;
+ oci->mb_cols = width >> 4;
+ oci->MBs = oci->mb_rows * oci->mb_cols;
+ oci->mode_info_stride = oci->mb_cols + 1;
+ oci->mip =
+ vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
+
+ if (!oci->mip) goto allocation_fail;
+
+ oci->mi = oci->mip + oci->mode_info_stride + 1;
+
+ /* Allocation of previous mode info will be done in vp8_decode_frame()
+ * as it is a decoder only data */
+
+ oci->above_context =
+ vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1);
+
+ if (!oci->above_context) goto allocation_fail;
+
+#if CONFIG_POSTPROC
+ if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer, width, height,
+ VP8BORDERINPIXELS) < 0) {
+ goto allocation_fail;
+ }
+
+ oci->post_proc_buffer_int_used = 0;
+ memset(&oci->postproc_state, 0, sizeof(oci->postproc_state));
+ memset(oci->post_proc_buffer.buffer_alloc, 128,
+ oci->post_proc_buffer.frame_size);
+
+ /* Allocate buffer to store post-processing filter coefficients.
+ *
+ * Note: Round up mb_cols to support SIMD reads
+ */
+ oci->pp_limits_buffer = vpx_memalign(16, 24 * ((oci->mb_cols + 1) & ~1));
+ if (!oci->pp_limits_buffer) goto allocation_fail;
+#endif
+
+ return 0;
+
+allocation_fail:
+ vp8_de_alloc_frame_buffers(oci);
+ return 1;
+}
+
+void vp8_setup_version(VP8_COMMON *cm) {
+ switch (cm->version) {
+ case 0:
+ cm->no_lpf = 0;
+ cm->filter_type = NORMAL_LOOPFILTER;
+ cm->use_bilinear_mc_filter = 0;
+ cm->full_pixel = 0;
+ break;
+ case 1:
+ cm->no_lpf = 0;
+ cm->filter_type = SIMPLE_LOOPFILTER;
+ cm->use_bilinear_mc_filter = 1;
+ cm->full_pixel = 0;
+ break;
+ case 2:
+ cm->no_lpf = 1;
+ cm->filter_type = NORMAL_LOOPFILTER;
+ cm->use_bilinear_mc_filter = 1;
+ cm->full_pixel = 0;
+ break;
+ case 3:
+ cm->no_lpf = 1;
+ cm->filter_type = SIMPLE_LOOPFILTER;
+ cm->use_bilinear_mc_filter = 1;
+ cm->full_pixel = 1;
+ break;
+ default:
+ /*4,5,6,7 are reserved for future use*/
+ cm->no_lpf = 0;
+ cm->filter_type = NORMAL_LOOPFILTER;
+ cm->use_bilinear_mc_filter = 0;
+ cm->full_pixel = 0;
+ break;
+ }
+}
+void vp8_create_common(VP8_COMMON *oci) {
+ vp8_machine_specific_config(oci);
+
+ vp8_init_mbmode_probs(oci);
+ vp8_default_bmode_probs(oci->fc.bmode_prob);
+
+ oci->mb_no_coeff_skip = 1;
+ oci->no_lpf = 0;
+ oci->filter_type = NORMAL_LOOPFILTER;
+ oci->use_bilinear_mc_filter = 0;
+ oci->full_pixel = 0;
+ oci->multi_token_partition = ONE_PARTITION;
+ oci->clamp_type = RECON_CLAMP_REQUIRED;
+
+ /* Initialize reference frame sign bias structure to defaults */
+ memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
+
+ /* Default disable buffer to buffer copying */
+ oci->copy_buffer_to_gf = 0;
+ oci->copy_buffer_to_arf = 0;
+}
+
+void vp8_remove_common(VP8_COMMON *oci) { vp8_de_alloc_frame_buffers(oci); }
diff --git a/media/libvpx/libvpx/vp8/common/alloccommon.h b/media/libvpx/libvpx/vp8/common/alloccommon.h
new file mode 100644
index 0000000000..2d376bbac3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/alloccommon.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_ALLOCCOMMON_H_
+#define VPX_VP8_COMMON_ALLOCCOMMON_H_
+
+#include "onyxc_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_create_common(VP8_COMMON *oci);
+void vp8_remove_common(VP8_COMMON *oci);
+void vp8_de_alloc_frame_buffers(VP8_COMMON *oci);
+int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height);
+void vp8_setup_version(VP8_COMMON *cm);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_ALLOCCOMMON_H_
diff --git a/media/libvpx/libvpx/vp8/common/arm/loopfilter_arm.c b/media/libvpx/libvpx/vp8/common/arm/loopfilter_arm.c
new file mode 100644
index 0000000000..48a1972048
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/loopfilter_arm.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
+#include "vp8/common/arm/loopfilter_arm.h"
+#include "vp8/common/loopfilter.h"
+#include "vp8/common/onyxc_int.h"
+
+/* NEON loopfilter functions */
+/* Horizontal MB filtering */
+void vp8_loop_filter_mbh_neon(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ unsigned char mblim = *lfi->mblim;
+ unsigned char lim = *lfi->lim;
+ unsigned char hev_thr = *lfi->hev_thr;
+ vp8_mbloop_filter_horizontal_edge_y_neon(y_ptr, y_stride, mblim, lim,
+ hev_thr);
+
+ if (u_ptr)
+ vp8_mbloop_filter_horizontal_edge_uv_neon(u_ptr, uv_stride, mblim, lim,
+ hev_thr, v_ptr);
+}
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_neon(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ unsigned char mblim = *lfi->mblim;
+ unsigned char lim = *lfi->lim;
+ unsigned char hev_thr = *lfi->hev_thr;
+
+ vp8_mbloop_filter_vertical_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr);
+
+ if (u_ptr)
+ vp8_mbloop_filter_vertical_edge_uv_neon(u_ptr, uv_stride, mblim, lim,
+ hev_thr, v_ptr);
+}
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_neon(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ unsigned char blim = *lfi->blim;
+ unsigned char lim = *lfi->lim;
+ unsigned char hev_thr = *lfi->hev_thr;
+
+ vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 4 * y_stride, y_stride, blim,
+ lim, hev_thr);
+ vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 8 * y_stride, y_stride, blim,
+ lim, hev_thr);
+ vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 12 * y_stride, y_stride, blim,
+ lim, hev_thr);
+
+ if (u_ptr)
+ vp8_loop_filter_horizontal_edge_uv_neon(u_ptr + 4 * uv_stride, uv_stride,
+ blim, lim, hev_thr,
+ v_ptr + 4 * uv_stride);
+}
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_neon(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ unsigned char blim = *lfi->blim;
+ unsigned char lim = *lfi->lim;
+ unsigned char hev_thr = *lfi->hev_thr;
+
+ vp8_loop_filter_vertical_edge_y_neon(y_ptr + 4, y_stride, blim, lim, hev_thr);
+ vp8_loop_filter_vertical_edge_y_neon(y_ptr + 8, y_stride, blim, lim, hev_thr);
+ vp8_loop_filter_vertical_edge_y_neon(y_ptr + 12, y_stride, blim, lim,
+ hev_thr);
+
+ if (u_ptr)
+ vp8_loop_filter_vertical_edge_uv_neon(u_ptr + 4, uv_stride, blim, lim,
+ hev_thr, v_ptr + 4);
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/loopfilter_arm.h b/media/libvpx/libvpx/vp8/common/arm/loopfilter_arm.h
new file mode 100644
index 0000000000..6cf660d228
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/loopfilter_arm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_ARM_LOOPFILTER_ARM_H_
+#define VPX_VP8_COMMON_ARM_LOOPFILTER_ARM_H_
+
+typedef void loopfilter_y_neon(unsigned char *src, int pitch,
+ unsigned char blimit, unsigned char limit,
+ unsigned char thresh);
+typedef void loopfilter_uv_neon(unsigned char *u, int pitch,
+ unsigned char blimit, unsigned char limit,
+ unsigned char thresh, unsigned char *v);
+
+loopfilter_y_neon vp8_loop_filter_horizontal_edge_y_neon;
+loopfilter_y_neon vp8_loop_filter_vertical_edge_y_neon;
+loopfilter_uv_neon vp8_loop_filter_horizontal_edge_uv_neon;
+loopfilter_uv_neon vp8_loop_filter_vertical_edge_uv_neon;
+
+loopfilter_y_neon vp8_mbloop_filter_horizontal_edge_y_neon;
+loopfilter_y_neon vp8_mbloop_filter_vertical_edge_y_neon;
+loopfilter_uv_neon vp8_mbloop_filter_horizontal_edge_uv_neon;
+loopfilter_uv_neon vp8_mbloop_filter_vertical_edge_uv_neon;
+
+#endif // VPX_VP8_COMMON_ARM_LOOPFILTER_ARM_H_
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
new file mode 100644
index 0000000000..590956dde1
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
@@ -0,0 +1,764 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <string.h>
+
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
+#include "vpx_dsp/arm/mem_neon.h"
+
+static const uint8_t bifilter4_coeff[8][2] = { { 128, 0 }, { 112, 16 },
+ { 96, 32 }, { 80, 48 },
+ { 64, 64 }, { 48, 80 },
+ { 32, 96 }, { 16, 112 } };
+
+static INLINE uint8x8_t load_and_shift(const unsigned char *a) {
+ return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32));
+}
+
+void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ uint8x8_t e0, e1, e2;
+
+ if (xoffset == 0) { // skip_1stpass_filter
+ uint8x8_t a0, a1, a2, a3, a4;
+
+ a0 = load_and_shift(src_ptr);
+ src_ptr += src_pixels_per_line;
+ a1 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ a2 = load_and_shift(src_ptr);
+ src_ptr += src_pixels_per_line;
+ a3 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ a4 = vld1_u8(src_ptr);
+
+ e0 = vext_u8(a0, a1, 4);
+ e1 = vext_u8(a2, a3, 4);
+ e2 = a4;
+ } else {
+ uint8x8_t a0, a1, a2, a3, a4, b4;
+ uint8x16_t a01, a23;
+ uint8x16_t b01, b23;
+ uint32x2x2_t c0, c1, c2, c3;
+ uint16x8_t d0, d1, d2;
+ const uint8x8_t filter0 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
+ const uint8x8_t filter1 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
+
+ a0 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ a1 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ a2 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ a3 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ a4 = vld1_u8(src_ptr);
+
+ a01 = vcombine_u8(a0, a1);
+ a23 = vcombine_u8(a2, a3);
+
+ b01 = vreinterpretq_u8_u64(vshrq_n_u64(vreinterpretq_u64_u8(a01), 8));
+ b23 = vreinterpretq_u8_u64(vshrq_n_u64(vreinterpretq_u64_u8(a23), 8));
+ b4 = vreinterpret_u8_u64(vshr_n_u64(vreinterpret_u64_u8(a4), 8));
+
+ c0 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a01)),
+ vreinterpret_u32_u8(vget_high_u8(a01)));
+ c1 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a23)),
+ vreinterpret_u32_u8(vget_high_u8(a23)));
+ c2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b01)),
+ vreinterpret_u32_u8(vget_high_u8(b01)));
+ c3 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b23)),
+ vreinterpret_u32_u8(vget_high_u8(b23)));
+
+ d0 = vmull_u8(vreinterpret_u8_u32(c0.val[0]), filter0);
+ d1 = vmull_u8(vreinterpret_u8_u32(c1.val[0]), filter0);
+ d2 = vmull_u8(a4, filter0);
+
+ d0 = vmlal_u8(d0, vreinterpret_u8_u32(c2.val[0]), filter1);
+ d1 = vmlal_u8(d1, vreinterpret_u8_u32(c3.val[0]), filter1);
+ d2 = vmlal_u8(d2, b4, filter1);
+
+ e0 = vqrshrn_n_u16(d0, 7);
+ e1 = vqrshrn_n_u16(d1, 7);
+ e2 = vqrshrn_n_u16(d2, 7);
+ }
+
+ // secondpass_filter
+ if (yoffset == 0) { // skip_2ndpass_filter
+ store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1));
+ } else {
+ uint8x8_t f0, f1;
+ const uint8x8_t filter0 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
+ const uint8x8_t filter1 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
+
+ uint16x8_t b0 = vmull_u8(e0, filter0);
+ uint16x8_t b1 = vmull_u8(e1, filter0);
+
+ const uint8x8_t a0 = vext_u8(e0, e1, 4);
+ const uint8x8_t a1 = vext_u8(e1, e2, 4);
+
+ b0 = vmlal_u8(b0, a0, filter1);
+ b1 = vmlal_u8(b1, a1, filter1);
+
+ f0 = vqrshrn_n_u16(b0, 7);
+ f1 = vqrshrn_n_u16(b1, 7);
+
+ store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(f0, f1));
+ }
+}
+
+void vp8_bilinear_predict8x4_neon(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8;
+ uint8x8_t d7u8, d9u8, d11u8, d22u8, d23u8, d24u8, d25u8, d26u8;
+ uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8;
+ uint16x8_t q1u16, q2u16, q3u16, q4u16;
+ uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16;
+
+ if (xoffset == 0) { // skip_1stpass_filter
+ d22u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d23u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d24u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d25u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d26u8 = vld1_u8(src_ptr);
+ } else {
+ q1u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q2u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q3u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q4u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q5u8 = vld1q_u8(src_ptr);
+
+ d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
+ d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
+
+ q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
+ q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
+ q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
+ q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
+ q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
+
+ d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
+ d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
+ d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
+ d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
+ d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
+
+ q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
+ q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
+ q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
+ q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
+ q10u16 = vmlal_u8(q10u16, d11u8, d1u8);
+
+ d22u8 = vqrshrn_n_u16(q6u16, 7);
+ d23u8 = vqrshrn_n_u16(q7u16, 7);
+ d24u8 = vqrshrn_n_u16(q8u16, 7);
+ d25u8 = vqrshrn_n_u16(q9u16, 7);
+ d26u8 = vqrshrn_n_u16(q10u16, 7);
+ }
+
+ // secondpass_filter
+ if (yoffset == 0) { // skip_2ndpass_filter
+ vst1_u8((uint8_t *)dst_ptr, d22u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d23u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d24u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d25u8);
+ } else {
+ d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
+ d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
+
+ q1u16 = vmull_u8(d22u8, d0u8);
+ q2u16 = vmull_u8(d23u8, d0u8);
+ q3u16 = vmull_u8(d24u8, d0u8);
+ q4u16 = vmull_u8(d25u8, d0u8);
+
+ q1u16 = vmlal_u8(q1u16, d23u8, d1u8);
+ q2u16 = vmlal_u8(q2u16, d24u8, d1u8);
+ q3u16 = vmlal_u8(q3u16, d25u8, d1u8);
+ q4u16 = vmlal_u8(q4u16, d26u8, d1u8);
+
+ d2u8 = vqrshrn_n_u16(q1u16, 7);
+ d3u8 = vqrshrn_n_u16(q2u16, 7);
+ d4u8 = vqrshrn_n_u16(q3u16, 7);
+ d5u8 = vqrshrn_n_u16(q4u16, 7);
+
+ vst1_u8((uint8_t *)dst_ptr, d2u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d3u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d4u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d5u8);
+ }
+ return;
+}
+
+void vp8_bilinear_predict8x8_neon(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8, d11u8;
+ uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8, d30u8;
+ uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8;
+ uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16;
+ uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16;
+
+ if (xoffset == 0) { // skip_1stpass_filter
+ d22u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d23u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d24u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d25u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d26u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d27u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d28u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d29u8 = vld1_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ d30u8 = vld1_u8(src_ptr);
+ } else {
+ q1u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q2u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q3u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q4u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+
+ d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
+ d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
+
+ q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
+ q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
+ q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
+ q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
+
+ d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
+ d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
+ d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
+ d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
+
+ q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
+ q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
+ q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
+ q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
+
+ d22u8 = vqrshrn_n_u16(q6u16, 7);
+ d23u8 = vqrshrn_n_u16(q7u16, 7);
+ d24u8 = vqrshrn_n_u16(q8u16, 7);
+ d25u8 = vqrshrn_n_u16(q9u16, 7);
+
+ // first_pass filtering on the rest 5-line data
+ q1u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q2u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q3u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q4u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q5u8 = vld1q_u8(src_ptr);
+
+ q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
+ q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
+ q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
+ q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
+ q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
+
+ d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
+ d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
+ d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
+ d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
+ d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
+
+ q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
+ q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
+ q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
+ q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
+ q10u16 = vmlal_u8(q10u16, d11u8, d1u8);
+
+ d26u8 = vqrshrn_n_u16(q6u16, 7);
+ d27u8 = vqrshrn_n_u16(q7u16, 7);
+ d28u8 = vqrshrn_n_u16(q8u16, 7);
+ d29u8 = vqrshrn_n_u16(q9u16, 7);
+ d30u8 = vqrshrn_n_u16(q10u16, 7);
+ }
+
+ // secondpass_filter
+ if (yoffset == 0) { // skip_2ndpass_filter
+ vst1_u8((uint8_t *)dst_ptr, d22u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d23u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d24u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d25u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d26u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d27u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d28u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d29u8);
+ } else {
+ d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
+ d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
+
+ q1u16 = vmull_u8(d22u8, d0u8);
+ q2u16 = vmull_u8(d23u8, d0u8);
+ q3u16 = vmull_u8(d24u8, d0u8);
+ q4u16 = vmull_u8(d25u8, d0u8);
+ q5u16 = vmull_u8(d26u8, d0u8);
+ q6u16 = vmull_u8(d27u8, d0u8);
+ q7u16 = vmull_u8(d28u8, d0u8);
+ q8u16 = vmull_u8(d29u8, d0u8);
+
+ q1u16 = vmlal_u8(q1u16, d23u8, d1u8);
+ q2u16 = vmlal_u8(q2u16, d24u8, d1u8);
+ q3u16 = vmlal_u8(q3u16, d25u8, d1u8);
+ q4u16 = vmlal_u8(q4u16, d26u8, d1u8);
+ q5u16 = vmlal_u8(q5u16, d27u8, d1u8);
+ q6u16 = vmlal_u8(q6u16, d28u8, d1u8);
+ q7u16 = vmlal_u8(q7u16, d29u8, d1u8);
+ q8u16 = vmlal_u8(q8u16, d30u8, d1u8);
+
+ d2u8 = vqrshrn_n_u16(q1u16, 7);
+ d3u8 = vqrshrn_n_u16(q2u16, 7);
+ d4u8 = vqrshrn_n_u16(q3u16, 7);
+ d5u8 = vqrshrn_n_u16(q4u16, 7);
+ d6u8 = vqrshrn_n_u16(q5u16, 7);
+ d7u8 = vqrshrn_n_u16(q6u16, 7);
+ d8u8 = vqrshrn_n_u16(q7u16, 7);
+ d9u8 = vqrshrn_n_u16(q8u16, 7);
+
+ vst1_u8((uint8_t *)dst_ptr, d2u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d3u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d4u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d5u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d6u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d7u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d8u8);
+ dst_ptr += dst_pitch;
+ vst1_u8((uint8_t *)dst_ptr, d9u8);
+ }
+ return;
+}
+
+void vp8_bilinear_predict16x16_neon(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ int i;
+ unsigned char tmp[272];
+ unsigned char *tmpp;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
+ uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d16u8, d17u8, d18u8;
+ uint8x8_t d19u8, d20u8, d21u8;
+ uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8;
+ uint8x16_t q11u8, q12u8, q13u8, q14u8, q15u8;
+ uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16, q6u16, q7u16, q8u16;
+ uint16x8_t q9u16, q10u16, q11u16, q12u16, q13u16, q14u16;
+
+ if (xoffset == 0) { // secondpass_bfilter16x16_only
+ d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
+ d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
+
+ q11u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ for (i = 4; i > 0; i--) {
+ q12u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q13u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q14u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ q15u8 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+
+ q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
+ q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
+ q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
+ q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
+ q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
+ q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
+ q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
+ q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
+
+ q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
+ q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
+ q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
+ q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
+ q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
+ q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
+ q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
+ q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);
+
+ d2u8 = vqrshrn_n_u16(q1u16, 7);
+ d3u8 = vqrshrn_n_u16(q2u16, 7);
+ d4u8 = vqrshrn_n_u16(q3u16, 7);
+ d5u8 = vqrshrn_n_u16(q4u16, 7);
+ d6u8 = vqrshrn_n_u16(q5u16, 7);
+ d7u8 = vqrshrn_n_u16(q6u16, 7);
+ d8u8 = vqrshrn_n_u16(q7u16, 7);
+ d9u8 = vqrshrn_n_u16(q8u16, 7);
+
+ q1u8 = vcombine_u8(d2u8, d3u8);
+ q2u8 = vcombine_u8(d4u8, d5u8);
+ q3u8 = vcombine_u8(d6u8, d7u8);
+ q4u8 = vcombine_u8(d8u8, d9u8);
+
+ q11u8 = q15u8;
+
+ vst1q_u8((uint8_t *)dst_ptr, q1u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q2u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q3u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q4u8);
+ dst_ptr += dst_pitch;
+ }
+ return;
+ }
+
+ if (yoffset == 0) { // firstpass_bfilter16x16_only
+ d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
+ d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
+
+ for (i = 4; i > 0; i--) {
+ d2u8 = vld1_u8(src_ptr);
+ d3u8 = vld1_u8(src_ptr + 8);
+ d4u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d5u8 = vld1_u8(src_ptr);
+ d6u8 = vld1_u8(src_ptr + 8);
+ d7u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d8u8 = vld1_u8(src_ptr);
+ d9u8 = vld1_u8(src_ptr + 8);
+ d10u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d11u8 = vld1_u8(src_ptr);
+ d12u8 = vld1_u8(src_ptr + 8);
+ d13u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+
+ q7u16 = vmull_u8(d2u8, d0u8);
+ q8u16 = vmull_u8(d3u8, d0u8);
+ q9u16 = vmull_u8(d5u8, d0u8);
+ q10u16 = vmull_u8(d6u8, d0u8);
+ q11u16 = vmull_u8(d8u8, d0u8);
+ q12u16 = vmull_u8(d9u8, d0u8);
+ q13u16 = vmull_u8(d11u8, d0u8);
+ q14u16 = vmull_u8(d12u8, d0u8);
+
+ d2u8 = vext_u8(d2u8, d3u8, 1);
+ d5u8 = vext_u8(d5u8, d6u8, 1);
+ d8u8 = vext_u8(d8u8, d9u8, 1);
+ d11u8 = vext_u8(d11u8, d12u8, 1);
+
+ q7u16 = vmlal_u8(q7u16, d2u8, d1u8);
+ q9u16 = vmlal_u8(q9u16, d5u8, d1u8);
+ q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
+ q13u16 = vmlal_u8(q13u16, d11u8, d1u8);
+
+ d3u8 = vext_u8(d3u8, d4u8, 1);
+ d6u8 = vext_u8(d6u8, d7u8, 1);
+ d9u8 = vext_u8(d9u8, d10u8, 1);
+ d12u8 = vext_u8(d12u8, d13u8, 1);
+
+ q8u16 = vmlal_u8(q8u16, d3u8, d1u8);
+ q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
+ q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
+ q14u16 = vmlal_u8(q14u16, d12u8, d1u8);
+
+ d14u8 = vqrshrn_n_u16(q7u16, 7);
+ d15u8 = vqrshrn_n_u16(q8u16, 7);
+ d16u8 = vqrshrn_n_u16(q9u16, 7);
+ d17u8 = vqrshrn_n_u16(q10u16, 7);
+ d18u8 = vqrshrn_n_u16(q11u16, 7);
+ d19u8 = vqrshrn_n_u16(q12u16, 7);
+ d20u8 = vqrshrn_n_u16(q13u16, 7);
+ d21u8 = vqrshrn_n_u16(q14u16, 7);
+
+ q7u8 = vcombine_u8(d14u8, d15u8);
+ q8u8 = vcombine_u8(d16u8, d17u8);
+ q9u8 = vcombine_u8(d18u8, d19u8);
+ q10u8 = vcombine_u8(d20u8, d21u8);
+
+ vst1q_u8((uint8_t *)dst_ptr, q7u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q8u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q9u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q10u8);
+ dst_ptr += dst_pitch;
+ }
+ return;
+ }
+
+ d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
+ d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
+
+ d2u8 = vld1_u8(src_ptr);
+ d3u8 = vld1_u8(src_ptr + 8);
+ d4u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d5u8 = vld1_u8(src_ptr);
+ d6u8 = vld1_u8(src_ptr + 8);
+ d7u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d8u8 = vld1_u8(src_ptr);
+ d9u8 = vld1_u8(src_ptr + 8);
+ d10u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d11u8 = vld1_u8(src_ptr);
+ d12u8 = vld1_u8(src_ptr + 8);
+ d13u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+
+ // First Pass: output_height lines x output_width columns (17x16)
+ tmpp = tmp;
+ for (i = 3; i > 0; i--) {
+ q7u16 = vmull_u8(d2u8, d0u8);
+ q8u16 = vmull_u8(d3u8, d0u8);
+ q9u16 = vmull_u8(d5u8, d0u8);
+ q10u16 = vmull_u8(d6u8, d0u8);
+ q11u16 = vmull_u8(d8u8, d0u8);
+ q12u16 = vmull_u8(d9u8, d0u8);
+ q13u16 = vmull_u8(d11u8, d0u8);
+ q14u16 = vmull_u8(d12u8, d0u8);
+
+ d2u8 = vext_u8(d2u8, d3u8, 1);
+ d5u8 = vext_u8(d5u8, d6u8, 1);
+ d8u8 = vext_u8(d8u8, d9u8, 1);
+ d11u8 = vext_u8(d11u8, d12u8, 1);
+
+ q7u16 = vmlal_u8(q7u16, d2u8, d1u8);
+ q9u16 = vmlal_u8(q9u16, d5u8, d1u8);
+ q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
+ q13u16 = vmlal_u8(q13u16, d11u8, d1u8);
+
+ d3u8 = vext_u8(d3u8, d4u8, 1);
+ d6u8 = vext_u8(d6u8, d7u8, 1);
+ d9u8 = vext_u8(d9u8, d10u8, 1);
+ d12u8 = vext_u8(d12u8, d13u8, 1);
+
+ q8u16 = vmlal_u8(q8u16, d3u8, d1u8);
+ q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
+ q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
+ q14u16 = vmlal_u8(q14u16, d12u8, d1u8);
+
+ d14u8 = vqrshrn_n_u16(q7u16, 7);
+ d15u8 = vqrshrn_n_u16(q8u16, 7);
+ d16u8 = vqrshrn_n_u16(q9u16, 7);
+ d17u8 = vqrshrn_n_u16(q10u16, 7);
+ d18u8 = vqrshrn_n_u16(q11u16, 7);
+ d19u8 = vqrshrn_n_u16(q12u16, 7);
+ d20u8 = vqrshrn_n_u16(q13u16, 7);
+ d21u8 = vqrshrn_n_u16(q14u16, 7);
+
+ d2u8 = vld1_u8(src_ptr);
+ d3u8 = vld1_u8(src_ptr + 8);
+ d4u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d5u8 = vld1_u8(src_ptr);
+ d6u8 = vld1_u8(src_ptr + 8);
+ d7u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d8u8 = vld1_u8(src_ptr);
+ d9u8 = vld1_u8(src_ptr + 8);
+ d10u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+ d11u8 = vld1_u8(src_ptr);
+ d12u8 = vld1_u8(src_ptr + 8);
+ d13u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+
+ q7u8 = vcombine_u8(d14u8, d15u8);
+ q8u8 = vcombine_u8(d16u8, d17u8);
+ q9u8 = vcombine_u8(d18u8, d19u8);
+ q10u8 = vcombine_u8(d20u8, d21u8);
+
+ vst1q_u8((uint8_t *)tmpp, q7u8);
+ tmpp += 16;
+ vst1q_u8((uint8_t *)tmpp, q8u8);
+ tmpp += 16;
+ vst1q_u8((uint8_t *)tmpp, q9u8);
+ tmpp += 16;
+ vst1q_u8((uint8_t *)tmpp, q10u8);
+ tmpp += 16;
+ }
+
+ // First-pass filtering for rest 5 lines
+ d14u8 = vld1_u8(src_ptr);
+ d15u8 = vld1_u8(src_ptr + 8);
+ d16u8 = vld1_u8(src_ptr + 16);
+ src_ptr += src_pixels_per_line;
+
+ q9u16 = vmull_u8(d2u8, d0u8);
+ q10u16 = vmull_u8(d3u8, d0u8);
+ q11u16 = vmull_u8(d5u8, d0u8);
+ q12u16 = vmull_u8(d6u8, d0u8);
+ q13u16 = vmull_u8(d8u8, d0u8);
+ q14u16 = vmull_u8(d9u8, d0u8);
+
+ d2u8 = vext_u8(d2u8, d3u8, 1);
+ d5u8 = vext_u8(d5u8, d6u8, 1);
+ d8u8 = vext_u8(d8u8, d9u8, 1);
+
+ q9u16 = vmlal_u8(q9u16, d2u8, d1u8);
+ q11u16 = vmlal_u8(q11u16, d5u8, d1u8);
+ q13u16 = vmlal_u8(q13u16, d8u8, d1u8);
+
+ d3u8 = vext_u8(d3u8, d4u8, 1);
+ d6u8 = vext_u8(d6u8, d7u8, 1);
+ d9u8 = vext_u8(d9u8, d10u8, 1);
+
+ q10u16 = vmlal_u8(q10u16, d3u8, d1u8);
+ q12u16 = vmlal_u8(q12u16, d6u8, d1u8);
+ q14u16 = vmlal_u8(q14u16, d9u8, d1u8);
+
+ q1u16 = vmull_u8(d11u8, d0u8);
+ q2u16 = vmull_u8(d12u8, d0u8);
+ q3u16 = vmull_u8(d14u8, d0u8);
+ q4u16 = vmull_u8(d15u8, d0u8);
+
+ d11u8 = vext_u8(d11u8, d12u8, 1);
+ d14u8 = vext_u8(d14u8, d15u8, 1);
+
+ q1u16 = vmlal_u8(q1u16, d11u8, d1u8);
+ q3u16 = vmlal_u8(q3u16, d14u8, d1u8);
+
+ d12u8 = vext_u8(d12u8, d13u8, 1);
+ d15u8 = vext_u8(d15u8, d16u8, 1);
+
+ q2u16 = vmlal_u8(q2u16, d12u8, d1u8);
+ q4u16 = vmlal_u8(q4u16, d15u8, d1u8);
+
+ d10u8 = vqrshrn_n_u16(q9u16, 7);
+ d11u8 = vqrshrn_n_u16(q10u16, 7);
+ d12u8 = vqrshrn_n_u16(q11u16, 7);
+ d13u8 = vqrshrn_n_u16(q12u16, 7);
+ d14u8 = vqrshrn_n_u16(q13u16, 7);
+ d15u8 = vqrshrn_n_u16(q14u16, 7);
+ d16u8 = vqrshrn_n_u16(q1u16, 7);
+ d17u8 = vqrshrn_n_u16(q2u16, 7);
+ d18u8 = vqrshrn_n_u16(q3u16, 7);
+ d19u8 = vqrshrn_n_u16(q4u16, 7);
+
+ q5u8 = vcombine_u8(d10u8, d11u8);
+ q6u8 = vcombine_u8(d12u8, d13u8);
+ q7u8 = vcombine_u8(d14u8, d15u8);
+ q8u8 = vcombine_u8(d16u8, d17u8);
+ q9u8 = vcombine_u8(d18u8, d19u8);
+
+ vst1q_u8((uint8_t *)tmpp, q5u8);
+ tmpp += 16;
+ vst1q_u8((uint8_t *)tmpp, q6u8);
+ tmpp += 16;
+ vst1q_u8((uint8_t *)tmpp, q7u8);
+ tmpp += 16;
+ vst1q_u8((uint8_t *)tmpp, q8u8);
+ tmpp += 16;
+ vst1q_u8((uint8_t *)tmpp, q9u8);
+
+ // secondpass_filter
+ d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
+ d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
+
+ tmpp = tmp;
+ q11u8 = vld1q_u8(tmpp);
+ tmpp += 16;
+ for (i = 4; i > 0; i--) {
+ q12u8 = vld1q_u8(tmpp);
+ tmpp += 16;
+ q13u8 = vld1q_u8(tmpp);
+ tmpp += 16;
+ q14u8 = vld1q_u8(tmpp);
+ tmpp += 16;
+ q15u8 = vld1q_u8(tmpp);
+ tmpp += 16;
+
+ q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
+ q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
+ q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
+ q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
+ q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
+ q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
+ q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
+ q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
+
+ q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
+ q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
+ q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
+ q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
+ q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
+ q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
+ q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
+ q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);
+
+ d2u8 = vqrshrn_n_u16(q1u16, 7);
+ d3u8 = vqrshrn_n_u16(q2u16, 7);
+ d4u8 = vqrshrn_n_u16(q3u16, 7);
+ d5u8 = vqrshrn_n_u16(q4u16, 7);
+ d6u8 = vqrshrn_n_u16(q5u16, 7);
+ d7u8 = vqrshrn_n_u16(q6u16, 7);
+ d8u8 = vqrshrn_n_u16(q7u16, 7);
+ d9u8 = vqrshrn_n_u16(q8u16, 7);
+
+ q1u8 = vcombine_u8(d2u8, d3u8);
+ q2u8 = vcombine_u8(d4u8, d5u8);
+ q3u8 = vcombine_u8(d6u8, d7u8);
+ q4u8 = vcombine_u8(d8u8, d9u8);
+
+ q11u8 = q15u8;
+
+ vst1q_u8((uint8_t *)dst_ptr, q1u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q2u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q3u8);
+ dst_ptr += dst_pitch;
+ vst1q_u8((uint8_t *)dst_ptr, q4u8);
+ dst_ptr += dst_pitch;
+ }
+ return;
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/copymem_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/copymem_neon.c
new file mode 100644
index 0000000000..c89b47d628
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/copymem_neon.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+
+void vp8_copy_mem8x4_neon(unsigned char *src, int src_stride,
+ unsigned char *dst, int dst_stride) {
+ uint8x8_t vtmp;
+ int r;
+
+ for (r = 0; r < 4; ++r) {
+ vtmp = vld1_u8(src);
+ vst1_u8(dst, vtmp);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp8_copy_mem8x8_neon(unsigned char *src, int src_stride,
+ unsigned char *dst, int dst_stride) {
+ uint8x8_t vtmp;
+ int r;
+
+ for (r = 0; r < 8; ++r) {
+ vtmp = vld1_u8(src);
+ vst1_u8(dst, vtmp);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp8_copy_mem16x16_neon(unsigned char *src, int src_stride,
+ unsigned char *dst, int dst_stride) {
+ int r;
+ uint8x16_t qtmp;
+
+ for (r = 0; r < 16; ++r) {
+ qtmp = vld1q_u8(src);
+ vst1q_u8(dst, qtmp);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c
new file mode 100644
index 0000000000..d12c3a8392
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+
+void vp8_dc_only_idct_add_neon(int16_t input_dc, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride) {
+ int i;
+ uint16_t a1 = ((input_dc + 4) >> 3);
+ uint32x2_t d2u32 = vdup_n_u32(0);
+ uint8x8_t d2u8;
+ uint16x8_t q1u16;
+ uint16x8_t qAdd;
+
+ qAdd = vdupq_n_u16(a1);
+
+ for (i = 0; i < 2; ++i) {
+ d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 0);
+ pred_ptr += pred_stride;
+ d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 1);
+ pred_ptr += pred_stride;
+
+ q1u16 = vaddw_u8(qAdd, vreinterpret_u8_u32(d2u32));
+ d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
+
+ vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0);
+ dst_ptr += dst_stride;
+ vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1);
+ dst_ptr += dst_stride;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/dequant_idct_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/dequant_idct_neon.c
new file mode 100644
index 0000000000..5445f2965a
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/dequant_idct_neon.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+
+static const int16_t cospi8sqrt2minus1 = 20091;
+// 35468 exceeds INT16_MAX and gets converted to a negative number. Because of
+// the way it is used in vqdmulh, where the result is doubled, it can be divided
+// by 2 beforehand. This saves compensating for the negative value as well as
+// shifting the result.
+static const int16_t sinpi8sqrt2 = 35468 >> 1;
+
+void vp8_dequant_idct_add_neon(int16_t *input, int16_t *dq, unsigned char *dst,
+ int stride) {
+ unsigned char *dst0;
+ int32x2_t d14, d15;
+ int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
+ int16x8_t q1, q2, q3, q4, q5, q6;
+ int16x8_t qEmpty = vdupq_n_s16(0);
+ int32x2x2_t d2tmp0, d2tmp1;
+ int16x4x2_t d2tmp2, d2tmp3;
+
+ d14 = d15 = vdup_n_s32(0);
+
+ // load input
+ q3 = vld1q_s16(input);
+ vst1q_s16(input, qEmpty);
+ input += 8;
+ q4 = vld1q_s16(input);
+ vst1q_s16(input, qEmpty);
+
+ // load dq
+ q5 = vld1q_s16(dq);
+ dq += 8;
+ q6 = vld1q_s16(dq);
+
+ // load src from dst
+ dst0 = dst;
+ d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0);
+ dst0 += stride;
+ d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1);
+ dst0 += stride;
+ d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0);
+ dst0 += stride;
+ d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1);
+
+ q1 = vreinterpretq_s16_u16(
+ vmulq_u16(vreinterpretq_u16_s16(q3), vreinterpretq_u16_s16(q5)));
+ q2 = vreinterpretq_s16_u16(
+ vmulq_u16(vreinterpretq_u16_s16(q4), vreinterpretq_u16_s16(q6)));
+
+ d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2));
+ d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2));
+
+ q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2));
+
+ q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
+ q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
+
+ q4 = vshrq_n_s16(q4, 1);
+
+ q4 = vqaddq_s16(q4, q2);
+
+ d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
+ d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
+
+ d2 = vqadd_s16(d12, d11);
+ d3 = vqadd_s16(d13, d10);
+ d4 = vqsub_s16(d13, d10);
+ d5 = vqsub_s16(d12, d11);
+
+ d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
+ d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
+ d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
+ vreinterpret_s16_s32(d2tmp1.val[0]));
+ d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
+ vreinterpret_s16_s32(d2tmp1.val[1]));
+
+ // loop 2
+ q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]);
+
+ q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
+ q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
+
+ d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]);
+ d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]);
+
+ q4 = vshrq_n_s16(q4, 1);
+
+ q4 = vqaddq_s16(q4, q2);
+
+ d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
+ d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
+
+ d2 = vqadd_s16(d12, d11);
+ d3 = vqadd_s16(d13, d10);
+ d4 = vqsub_s16(d13, d10);
+ d5 = vqsub_s16(d12, d11);
+
+ d2 = vrshr_n_s16(d2, 3);
+ d3 = vrshr_n_s16(d3, 3);
+ d4 = vrshr_n_s16(d4, 3);
+ d5 = vrshr_n_s16(d5, 3);
+
+ d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
+ d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
+ d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
+ vreinterpret_s16_s32(d2tmp1.val[0]));
+ d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
+ vreinterpret_s16_s32(d2tmp1.val[1]));
+
+ q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]);
+ q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]);
+
+ q1 = vreinterpretq_s16_u16(
+ vaddw_u8(vreinterpretq_u16_s16(q1), vreinterpret_u8_s32(d14)));
+ q2 = vreinterpretq_s16_u16(
+ vaddw_u8(vreinterpretq_u16_s16(q2), vreinterpret_u8_s32(d15)));
+
+ d14 = vreinterpret_s32_u8(vqmovun_s16(q1));
+ d15 = vreinterpret_s32_u8(vqmovun_s16(q2));
+
+ dst0 = dst;
+ vst1_lane_s32((int32_t *)dst0, d14, 0);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst0, d14, 1);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst0, d15, 0);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst0, d15, 1);
+ return;
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/dequantizeb_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
new file mode 100644
index 0000000000..791aaea2ae
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/blockd.h"
+
+void vp8_dequantize_b_neon(BLOCKD *d, short *DQC) {
+ int16x8x2_t qQ, qDQC, qDQ;
+
+ qQ = vld2q_s16(d->qcoeff);
+ qDQC = vld2q_s16(DQC);
+
+ qDQ.val[0] = vmulq_s16(qQ.val[0], qDQC.val[0]);
+ qDQ.val[1] = vmulq_s16(qQ.val[1], qDQC.val[1]);
+
+ vst2q_s16(d->dqcoeff, qDQ);
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/idct_blk_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/idct_blk_neon.c
new file mode 100644
index 0000000000..5c26ce67a4
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/idct_blk_neon.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+
+static void idct_dequant_0_2x_neon(int16_t *q, int16_t dq, unsigned char *dst,
+ int stride) {
+ unsigned char *dst0;
+ int i, a0, a1;
+ int16x8x2_t q2Add;
+ int32x2_t d2s32 = vdup_n_s32(0), d4s32 = vdup_n_s32(0);
+ uint8x8_t d2u8, d4u8;
+ uint16x8_t q1u16, q2u16;
+
+ a0 = ((q[0] * dq) + 4) >> 3;
+ a1 = ((q[16] * dq) + 4) >> 3;
+ q[0] = q[16] = 0;
+ q2Add.val[0] = vdupq_n_s16((int16_t)a0);
+ q2Add.val[1] = vdupq_n_s16((int16_t)a1);
+
+ for (i = 0; i < 2; i++, dst += 4) {
+ dst0 = dst;
+ d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0);
+ dst0 += stride;
+ d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1);
+ dst0 += stride;
+ d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0);
+ dst0 += stride;
+ d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1);
+
+ q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
+ vreinterpret_u8_s32(d2s32));
+ q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
+ vreinterpret_u8_s32(d4s32));
+
+ d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
+ d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
+
+ d2s32 = vreinterpret_s32_u8(d2u8);
+ d4s32 = vreinterpret_s32_u8(d4u8);
+
+ dst0 = dst;
+ vst1_lane_s32((int32_t *)dst0, d2s32, 0);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst0, d2s32, 1);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst0, d4s32, 0);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst0, d4s32, 1);
+ }
+}
+
+static const int16_t cospi8sqrt2minus1 = 20091;
+static const int16_t sinpi8sqrt2 = 17734;
+// because the lowest bit in 0x8a8c is 0, we can pre-shift this
+
+static void idct_dequant_full_2x_neon(int16_t *q, int16_t *dq,
+ unsigned char *dst, int stride) {
+ unsigned char *dst0, *dst1;
+ int32x2_t d28, d29, d30, d31;
+ int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
+ int16x8_t qEmpty = vdupq_n_s16(0);
+ int32x4x2_t q2tmp0, q2tmp1;
+ int16x8x2_t q2tmp2, q2tmp3;
+ int16x4_t dLow0, dLow1, dHigh0, dHigh1;
+
+ d28 = d29 = d30 = d31 = vdup_n_s32(0);
+
+ // load dq
+ q0 = vld1q_s16(dq);
+ dq += 8;
+ q1 = vld1q_s16(dq);
+
+ // load q
+ q2 = vld1q_s16(q);
+ vst1q_s16(q, qEmpty);
+ q += 8;
+ q3 = vld1q_s16(q);
+ vst1q_s16(q, qEmpty);
+ q += 8;
+ q4 = vld1q_s16(q);
+ vst1q_s16(q, qEmpty);
+ q += 8;
+ q5 = vld1q_s16(q);
+ vst1q_s16(q, qEmpty);
+
+ // load src from dst
+ dst0 = dst;
+ dst1 = dst + 4;
+ d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0);
+ dst0 += stride;
+ d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1);
+ dst1 += stride;
+ d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0);
+ dst0 += stride;
+ d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1);
+ dst1 += stride;
+
+ d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0);
+ dst0 += stride;
+ d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1);
+ dst1 += stride;
+ d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0);
+ d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1);
+
+ q2 = vmulq_s16(q2, q0);
+ q3 = vmulq_s16(q3, q1);
+ q4 = vmulq_s16(q4, q0);
+ q5 = vmulq_s16(q5, q1);
+
+ // vswp
+ dLow0 = vget_low_s16(q2);
+ dHigh0 = vget_high_s16(q2);
+ dLow1 = vget_low_s16(q4);
+ dHigh1 = vget_high_s16(q4);
+ q2 = vcombine_s16(dLow0, dLow1);
+ q4 = vcombine_s16(dHigh0, dHigh1);
+
+ dLow0 = vget_low_s16(q3);
+ dHigh0 = vget_high_s16(q3);
+ dLow1 = vget_low_s16(q5);
+ dHigh1 = vget_high_s16(q5);
+ q3 = vcombine_s16(dLow0, dLow1);
+ q5 = vcombine_s16(dHigh0, dHigh1);
+
+ q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2);
+ q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2);
+ q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1);
+ q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1);
+
+ q10 = vqaddq_s16(q2, q3);
+ q11 = vqsubq_s16(q2, q3);
+
+ q8 = vshrq_n_s16(q8, 1);
+ q9 = vshrq_n_s16(q9, 1);
+
+ q4 = vqaddq_s16(q4, q8);
+ q5 = vqaddq_s16(q5, q9);
+
+ q2 = vqsubq_s16(q6, q5);
+ q3 = vqaddq_s16(q7, q4);
+
+ q4 = vqaddq_s16(q10, q3);
+ q5 = vqaddq_s16(q11, q2);
+ q6 = vqsubq_s16(q11, q2);
+ q7 = vqsubq_s16(q10, q3);
+
+ q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
+ q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
+ q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
+ vreinterpretq_s16_s32(q2tmp1.val[0]));
+ q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
+ vreinterpretq_s16_s32(q2tmp1.val[1]));
+
+ // loop 2
+ q8 = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2);
+ q9 = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2);
+ q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1);
+ q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1);
+
+ q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]);
+ q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]);
+
+ q10 = vshrq_n_s16(q10, 1);
+ q11 = vshrq_n_s16(q11, 1);
+
+ q10 = vqaddq_s16(q2tmp2.val[1], q10);
+ q11 = vqaddq_s16(q2tmp3.val[1], q11);
+
+ q8 = vqsubq_s16(q8, q11);
+ q9 = vqaddq_s16(q9, q10);
+
+ q4 = vqaddq_s16(q2, q9);
+ q5 = vqaddq_s16(q3, q8);
+ q6 = vqsubq_s16(q3, q8);
+ q7 = vqsubq_s16(q2, q9);
+
+ q4 = vrshrq_n_s16(q4, 3);
+ q5 = vrshrq_n_s16(q5, 3);
+ q6 = vrshrq_n_s16(q6, 3);
+ q7 = vrshrq_n_s16(q7, 3);
+
+ q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
+ q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
+ q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
+ vreinterpretq_s16_s32(q2tmp1.val[0]));
+ q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
+ vreinterpretq_s16_s32(q2tmp1.val[1]));
+
+ q4 = vreinterpretq_s16_u16(
+ vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]), vreinterpret_u8_s32(d28)));
+ q5 = vreinterpretq_s16_u16(
+ vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]), vreinterpret_u8_s32(d29)));
+ q6 = vreinterpretq_s16_u16(
+ vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]), vreinterpret_u8_s32(d30)));
+ q7 = vreinterpretq_s16_u16(
+ vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]), vreinterpret_u8_s32(d31)));
+
+ d28 = vreinterpret_s32_u8(vqmovun_s16(q4));
+ d29 = vreinterpret_s32_u8(vqmovun_s16(q5));
+ d30 = vreinterpret_s32_u8(vqmovun_s16(q6));
+ d31 = vreinterpret_s32_u8(vqmovun_s16(q7));
+
+ dst0 = dst;
+ dst1 = dst + 4;
+ vst1_lane_s32((int32_t *)dst0, d28, 0);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst1, d28, 1);
+ dst1 += stride;
+ vst1_lane_s32((int32_t *)dst0, d29, 0);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst1, d29, 1);
+ dst1 += stride;
+
+ vst1_lane_s32((int32_t *)dst0, d30, 0);
+ dst0 += stride;
+ vst1_lane_s32((int32_t *)dst1, d30, 1);
+ dst1 += stride;
+ vst1_lane_s32((int32_t *)dst0, d31, 0);
+ vst1_lane_s32((int32_t *)dst1, d31, 1);
+}
+
+void vp8_dequant_idct_add_y_block_neon(short *q, short *dq, unsigned char *dst,
+ int stride, char *eobs) {
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ if (((short *)(eobs))[0]) {
+ if (((short *)eobs)[0] & 0xfefe)
+ idct_dequant_full_2x_neon(q, dq, dst, stride);
+ else
+ idct_dequant_0_2x_neon(q, dq[0], dst, stride);
+ }
+
+ if (((short *)(eobs))[1]) {
+ if (((short *)eobs)[1] & 0xfefe)
+ idct_dequant_full_2x_neon(q + 32, dq, dst + 8, stride);
+ else
+ idct_dequant_0_2x_neon(q + 32, dq[0], dst + 8, stride);
+ }
+ q += 64;
+ dst += 4 * stride;
+ eobs += 4;
+ }
+}
+
+void vp8_dequant_idct_add_uv_block_neon(short *q, short *dq,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int stride,
+ char *eobs) {
+ if (((short *)(eobs))[0]) {
+ if (((short *)eobs)[0] & 0xfefe)
+ idct_dequant_full_2x_neon(q, dq, dst_u, stride);
+ else
+ idct_dequant_0_2x_neon(q, dq[0], dst_u, stride);
+ }
+
+ q += 32;
+ dst_u += 4 * stride;
+
+ if (((short *)(eobs))[1]) {
+ if (((short *)eobs)[1] & 0xfefe)
+ idct_dequant_full_2x_neon(q, dq, dst_u, stride);
+ else
+ idct_dequant_0_2x_neon(q, dq[0], dst_u, stride);
+ }
+
+ q += 32;
+
+ if (((short *)(eobs))[2]) {
+ if (((short *)eobs)[2] & 0xfefe)
+ idct_dequant_full_2x_neon(q, dq, dst_v, stride);
+ else
+ idct_dequant_0_2x_neon(q, dq[0], dst_v, stride);
+ }
+
+ q += 32;
+ dst_v += 4 * stride;
+
+ if (((short *)(eobs))[3]) {
+ if (((short *)eobs)[3] & 0xfefe)
+ idct_dequant_full_2x_neon(q, dq, dst_v, stride);
+ else
+ idct_dequant_0_2x_neon(q, dq[0], dst_v, stride);
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/iwalsh_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/iwalsh_neon.c
new file mode 100644
index 0000000000..91600bfc00
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/iwalsh_neon.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+
+void vp8_short_inv_walsh4x4_neon(int16_t *input, int16_t *mb_dqcoeff) {
+ int16x8_t q0s16, q1s16, q2s16, q3s16;
+ int16x4_t d4s16, d5s16, d6s16, d7s16;
+ int16x4x2_t v2tmp0, v2tmp1;
+ int32x2x2_t v2tmp2, v2tmp3;
+ int16x8_t qAdd3;
+
+ q0s16 = vld1q_s16(input);
+ q1s16 = vld1q_s16(input + 8);
+
+ // 1st for loop
+ d4s16 = vadd_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
+ d6s16 = vadd_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
+ d5s16 = vsub_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
+ d7s16 = vsub_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
+
+ q2s16 = vcombine_s16(d4s16, d5s16);
+ q3s16 = vcombine_s16(d6s16, d7s16);
+
+ q0s16 = vaddq_s16(q2s16, q3s16);
+ q1s16 = vsubq_s16(q2s16, q3s16);
+
+ v2tmp2 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(q0s16)),
+ vreinterpret_s32_s16(vget_low_s16(q1s16)));
+ v2tmp3 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(q0s16)),
+ vreinterpret_s32_s16(vget_high_s16(q1s16)));
+ v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]),
+ vreinterpret_s16_s32(v2tmp3.val[0]));
+ v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]),
+ vreinterpret_s16_s32(v2tmp3.val[1]));
+
+ // 2nd for loop
+ d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
+ d6s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ d5s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
+ d7s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ q2s16 = vcombine_s16(d4s16, d5s16);
+ q3s16 = vcombine_s16(d6s16, d7s16);
+
+ qAdd3 = vdupq_n_s16(3);
+
+ q0s16 = vaddq_s16(q2s16, q3s16);
+ q1s16 = vsubq_s16(q2s16, q3s16);
+
+ q0s16 = vaddq_s16(q0s16, qAdd3);
+ q1s16 = vaddq_s16(q1s16, qAdd3);
+
+ q0s16 = vshrq_n_s16(q0s16, 3);
+ q1s16 = vshrq_n_s16(q1s16, 3);
+
+ // store
+ vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 0);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 0);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 0);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 0);
+ mb_dqcoeff += 16;
+
+ vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 1);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 1);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 1);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 1);
+ mb_dqcoeff += 16;
+
+ vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 2);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 2);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 2);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 2);
+ mb_dqcoeff += 16;
+
+ vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 3);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 3);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 3);
+ mb_dqcoeff += 16;
+ vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 3);
+ mb_dqcoeff += 16;
+ return;
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c
new file mode 100644
index 0000000000..df983b23a3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
+
+static INLINE void vp8_loop_filter_simple_horizontal_edge_neon(
+ unsigned char *s, int p, const unsigned char *blimit) {
+ uint8_t *sp;
+ uint8x16_t qblimit, q0u8;
+ uint8x16_t q5u8, q6u8, q7u8, q8u8, q9u8, q10u8, q14u8, q15u8;
+ int16x8_t q2s16, q3s16, q13s16;
+ int8x8_t d8s8, d9s8;
+ int8x16_t q2s8, q3s8, q4s8, q10s8, q11s8, q14s8;
+
+ qblimit = vdupq_n_u8(*blimit);
+
+ sp = s - (p << 1);
+ q5u8 = vld1q_u8(sp);
+ sp += p;
+ q6u8 = vld1q_u8(sp);
+ sp += p;
+ q7u8 = vld1q_u8(sp);
+ sp += p;
+ q8u8 = vld1q_u8(sp);
+
+ q15u8 = vabdq_u8(q6u8, q7u8);
+ q14u8 = vabdq_u8(q5u8, q8u8);
+
+ q15u8 = vqaddq_u8(q15u8, q15u8);
+ q14u8 = vshrq_n_u8(q14u8, 1);
+ q0u8 = vdupq_n_u8(0x80);
+ q13s16 = vdupq_n_s16(3);
+ q15u8 = vqaddq_u8(q15u8, q14u8);
+
+ q5u8 = veorq_u8(q5u8, q0u8);
+ q6u8 = veorq_u8(q6u8, q0u8);
+ q7u8 = veorq_u8(q7u8, q0u8);
+ q8u8 = veorq_u8(q8u8, q0u8);
+
+ q15u8 = vcgeq_u8(qblimit, q15u8);
+
+ q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7u8)),
+ vget_low_s8(vreinterpretq_s8_u8(q6u8)));
+ q3s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7u8)),
+ vget_high_s8(vreinterpretq_s8_u8(q6u8)));
+
+ q4s8 = vqsubq_s8(vreinterpretq_s8_u8(q5u8), vreinterpretq_s8_u8(q8u8));
+
+ q2s16 = vmulq_s16(q2s16, q13s16);
+ q3s16 = vmulq_s16(q3s16, q13s16);
+
+ q10u8 = vdupq_n_u8(3);
+ q9u8 = vdupq_n_u8(4);
+
+ q2s16 = vaddw_s8(q2s16, vget_low_s8(q4s8));
+ q3s16 = vaddw_s8(q3s16, vget_high_s8(q4s8));
+
+ d8s8 = vqmovn_s16(q2s16);
+ d9s8 = vqmovn_s16(q3s16);
+ q4s8 = vcombine_s8(d8s8, d9s8);
+
+ q14s8 = vandq_s8(q4s8, vreinterpretq_s8_u8(q15u8));
+
+ q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q10u8));
+ q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q9u8));
+ q2s8 = vshrq_n_s8(q2s8, 3);
+ q3s8 = vshrq_n_s8(q3s8, 3);
+
+ q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6u8), q2s8);
+ q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7u8), q3s8);
+
+ q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
+ q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
+
+ vst1q_u8(s, q7u8);
+ s -= p;
+ vst1q_u8(s, q6u8);
+ return;
+}
+
+void vp8_loop_filter_bhs_neon(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ y_ptr += y_stride * 4;
+ vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
+ y_ptr += y_stride * 4;
+ vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
+ y_ptr += y_stride * 4;
+ vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
+ return;
+}
+
+void vp8_loop_filter_mbhs_neon(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
+ return;
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
new file mode 100644
index 0000000000..fbc83ae290
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
+#include "vpx_ports/arm.h"
+
+#ifdef VPX_INCOMPATIBLE_GCC
+static INLINE void write_2x4(unsigned char *dst, int pitch,
+ const uint8x8x2_t result) {
+ /*
+ * uint8x8x2_t result
+ 00 01 02 03 | 04 05 06 07
+ 10 11 12 13 | 14 15 16 17
+ ---
+ * after vtrn_u8
+ 00 10 02 12 | 04 14 06 16
+ 01 11 03 13 | 05 15 07 17
+ */
+ const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0], result.val[1]);
+ const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
+ const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
+ vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
+ dst += pitch;
+ vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
+ dst += pitch;
+ vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
+ dst += pitch;
+ vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
+ dst += pitch;
+ vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
+ dst += pitch;
+ vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
+ dst += pitch;
+ vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
+ dst += pitch;
+ vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
+}
+
+static INLINE void write_2x8(unsigned char *dst, int pitch,
+ const uint8x8x2_t result,
+ const uint8x8x2_t result2) {
+ write_2x4(dst, pitch, result);
+ dst += pitch * 8;
+ write_2x4(dst, pitch, result2);
+}
+#else
+static INLINE void write_2x8(unsigned char *dst, int pitch,
+ const uint8x8x2_t result,
+ const uint8x8x2_t result2) {
+ vst2_lane_u8(dst, result, 0);
+ dst += pitch;
+ vst2_lane_u8(dst, result, 1);
+ dst += pitch;
+ vst2_lane_u8(dst, result, 2);
+ dst += pitch;
+ vst2_lane_u8(dst, result, 3);
+ dst += pitch;
+ vst2_lane_u8(dst, result, 4);
+ dst += pitch;
+ vst2_lane_u8(dst, result, 5);
+ dst += pitch;
+ vst2_lane_u8(dst, result, 6);
+ dst += pitch;
+ vst2_lane_u8(dst, result, 7);
+ dst += pitch;
+
+ vst2_lane_u8(dst, result2, 0);
+ dst += pitch;
+ vst2_lane_u8(dst, result2, 1);
+ dst += pitch;
+ vst2_lane_u8(dst, result2, 2);
+ dst += pitch;
+ vst2_lane_u8(dst, result2, 3);
+ dst += pitch;
+ vst2_lane_u8(dst, result2, 4);
+ dst += pitch;
+ vst2_lane_u8(dst, result2, 5);
+ dst += pitch;
+ vst2_lane_u8(dst, result2, 6);
+ dst += pitch;
+ vst2_lane_u8(dst, result2, 7);
+}
+#endif // VPX_INCOMPATIBLE_GCC
+
+#ifdef VPX_INCOMPATIBLE_GCC
+static INLINE uint8x8x4_t read_4x8(unsigned char *src, int pitch) {
+ uint8x8x4_t x;
+ const uint8x8_t a = vld1_u8(src);
+ const uint8x8_t b = vld1_u8(src + pitch * 1);
+ const uint8x8_t c = vld1_u8(src + pitch * 2);
+ const uint8x8_t d = vld1_u8(src + pitch * 3);
+ const uint8x8_t e = vld1_u8(src + pitch * 4);
+ const uint8x8_t f = vld1_u8(src + pitch * 5);
+ const uint8x8_t g = vld1_u8(src + pitch * 6);
+ const uint8x8_t h = vld1_u8(src + pitch * 7);
+ const uint32x2x2_t r04_u32 =
+ vtrn_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(e));
+ const uint32x2x2_t r15_u32 =
+ vtrn_u32(vreinterpret_u32_u8(b), vreinterpret_u32_u8(f));
+ const uint32x2x2_t r26_u32 =
+ vtrn_u32(vreinterpret_u32_u8(c), vreinterpret_u32_u8(g));
+ const uint32x2x2_t r37_u32 =
+ vtrn_u32(vreinterpret_u32_u8(d), vreinterpret_u32_u8(h));
+ const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
+ vreinterpret_u16_u32(r26_u32.val[0]));
+ const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
+ vreinterpret_u16_u32(r37_u32.val[0]));
+ const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
+ vreinterpret_u8_u16(r13_u16.val[0]));
+ const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
+ vreinterpret_u8_u16(r13_u16.val[1]));
+ /*
+ * after vtrn_u32
+ 00 01 02 03 | 40 41 42 43
+ 10 11 12 13 | 50 51 52 53
+ 20 21 22 23 | 60 61 62 63
+ 30 31 32 33 | 70 71 72 73
+ ---
+ * after vtrn_u16
+ 00 01 20 21 | 40 41 60 61
+ 02 03 22 23 | 42 43 62 63
+ 10 11 30 31 | 50 51 70 71
+ 12 13 32 33 | 52 52 72 73
+
+ 00 01 20 21 | 40 41 60 61
+ 10 11 30 31 | 50 51 70 71
+ 02 03 22 23 | 42 43 62 63
+ 12 13 32 33 | 52 52 72 73
+ ---
+ * after vtrn_u8
+ 00 10 20 30 | 40 50 60 70
+ 01 11 21 31 | 41 51 61 71
+ 02 12 22 32 | 42 52 62 72
+ 03 13 23 33 | 43 53 63 73
+ */
+ x.val[0] = r01_u8.val[0];
+ x.val[1] = r01_u8.val[1];
+ x.val[2] = r23_u8.val[0];
+ x.val[3] = r23_u8.val[1];
+
+ return x;
+}
+#else
+static INLINE uint8x8x4_t read_4x8(unsigned char *src, int pitch) {
+ uint8x8x4_t x;
+ x.val[0] = x.val[1] = x.val[2] = x.val[3] = vdup_n_u8(0);
+ x = vld4_lane_u8(src, x, 0);
+ src += pitch;
+ x = vld4_lane_u8(src, x, 1);
+ src += pitch;
+ x = vld4_lane_u8(src, x, 2);
+ src += pitch;
+ x = vld4_lane_u8(src, x, 3);
+ src += pitch;
+ x = vld4_lane_u8(src, x, 4);
+ src += pitch;
+ x = vld4_lane_u8(src, x, 5);
+ src += pitch;
+ x = vld4_lane_u8(src, x, 6);
+ src += pitch;
+ x = vld4_lane_u8(src, x, 7);
+ return x;
+}
+#endif // VPX_INCOMPATIBLE_GCC
+
+static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
+ unsigned char *s, int p, const unsigned char *blimit) {
+ unsigned char *src1;
+ uint8x16_t qblimit, q0u8;
+ uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
+ int16x8_t q2s16, q13s16, q11s16;
+ int8x8_t d28s8, d29s8;
+ int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
+ uint8x8x4_t d0u8x4; // d6, d7, d8, d9
+ uint8x8x4_t d1u8x4; // d10, d11, d12, d13
+ uint8x8x2_t d2u8x2; // d12, d13
+ uint8x8x2_t d3u8x2; // d14, d15
+
+ qblimit = vdupq_n_u8(*blimit);
+
+ src1 = s - 2;
+ d0u8x4 = read_4x8(src1, p);
+ src1 += p * 8;
+ d1u8x4 = read_4x8(src1, p);
+
+ q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10
+ q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12
+ q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11
+ q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13
+
+ q15u8 = vabdq_u8(q5u8, q4u8);
+ q14u8 = vabdq_u8(q3u8, q6u8);
+
+ q15u8 = vqaddq_u8(q15u8, q15u8);
+ q14u8 = vshrq_n_u8(q14u8, 1);
+ q0u8 = vdupq_n_u8(0x80);
+ q11s16 = vdupq_n_s16(3);
+ q15u8 = vqaddq_u8(q15u8, q14u8);
+
+ q3u8 = veorq_u8(q3u8, q0u8);
+ q4u8 = veorq_u8(q4u8, q0u8);
+ q5u8 = veorq_u8(q5u8, q0u8);
+ q6u8 = veorq_u8(q6u8, q0u8);
+
+ q15u8 = vcgeq_u8(qblimit, q15u8);
+
+ q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
+ vget_low_s8(vreinterpretq_s8_u8(q5u8)));
+ q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
+ vget_high_s8(vreinterpretq_s8_u8(q5u8)));
+
+ q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8), vreinterpretq_s8_u8(q6u8));
+
+ q2s16 = vmulq_s16(q2s16, q11s16);
+ q13s16 = vmulq_s16(q13s16, q11s16);
+
+ q11u8 = vdupq_n_u8(3);
+ q12u8 = vdupq_n_u8(4);
+
+ q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
+ q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
+
+ d28s8 = vqmovn_s16(q2s16);
+ d29s8 = vqmovn_s16(q13s16);
+ q14s8 = vcombine_s8(d28s8, d29s8);
+
+ q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
+
+ q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
+ q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
+ q2s8 = vshrq_n_s8(q2s8, 3);
+ q14s8 = vshrq_n_s8(q3s8, 3);
+
+ q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
+ q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
+
+ q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
+ q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
+
+ d2u8x2.val[0] = vget_low_u8(q6u8); // d12
+ d2u8x2.val[1] = vget_low_u8(q7u8); // d14
+ d3u8x2.val[0] = vget_high_u8(q6u8); // d13
+ d3u8x2.val[1] = vget_high_u8(q7u8); // d15
+
+ src1 = s - 1;
+ write_2x8(src1, p, d2u8x2, d3u8x2);
+}
+
+void vp8_loop_filter_bvs_neon(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ y_ptr += 4;
+ vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
+ y_ptr += 4;
+ vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
+ y_ptr += 4;
+ vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
+ return;
+}
+
+void vp8_loop_filter_mbvs_neon(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
+ return;
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c
new file mode 100644
index 0000000000..fafaf2d451
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_config.h"
+#include "vp8/common/arm/loopfilter_arm.h"
+
+static INLINE void vp8_mbloop_filter_neon(uint8x16_t qblimit, // mblimit
+ uint8x16_t qlimit, // limit
+ uint8x16_t qthresh, // thresh
+ uint8x16_t q3, // p2
+ uint8x16_t q4, // p2
+ uint8x16_t q5, // p1
+ uint8x16_t q6, // p0
+ uint8x16_t q7, // q0
+ uint8x16_t q8, // q1
+ uint8x16_t q9, // q2
+ uint8x16_t q10, // q3
+ uint8x16_t *q4r, // p1
+ uint8x16_t *q5r, // p1
+ uint8x16_t *q6r, // p0
+ uint8x16_t *q7r, // q0
+ uint8x16_t *q8r, // q1
+ uint8x16_t *q9r) { // q1
+ uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8;
+ int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16;
+ int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8;
+ uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16;
+ int8x16_t q0s8, q12s8, q14s8, q15s8;
+ int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29;
+
+ q11u8 = vabdq_u8(q3, q4);
+ q12u8 = vabdq_u8(q4, q5);
+ q13u8 = vabdq_u8(q5, q6);
+ q14u8 = vabdq_u8(q8, q7);
+ q1u8 = vabdq_u8(q9, q8);
+ q0u8 = vabdq_u8(q10, q9);
+
+ q11u8 = vmaxq_u8(q11u8, q12u8);
+ q12u8 = vmaxq_u8(q13u8, q14u8);
+ q1u8 = vmaxq_u8(q1u8, q0u8);
+ q15u8 = vmaxq_u8(q11u8, q12u8);
+
+ q12u8 = vabdq_u8(q6, q7);
+
+ // vp8_hevmask
+ q13u8 = vcgtq_u8(q13u8, qthresh);
+ q14u8 = vcgtq_u8(q14u8, qthresh);
+ q15u8 = vmaxq_u8(q15u8, q1u8);
+
+ q15u8 = vcgeq_u8(qlimit, q15u8);
+
+ q1u8 = vabdq_u8(q5, q8);
+ q12u8 = vqaddq_u8(q12u8, q12u8);
+
+ // vp8_filter() function
+ // convert to signed
+ q0u8 = vdupq_n_u8(0x80);
+ q9 = veorq_u8(q9, q0u8);
+ q8 = veorq_u8(q8, q0u8);
+ q7 = veorq_u8(q7, q0u8);
+ q6 = veorq_u8(q6, q0u8);
+ q5 = veorq_u8(q5, q0u8);
+ q4 = veorq_u8(q4, q0u8);
+
+ q1u8 = vshrq_n_u8(q1u8, 1);
+ q12u8 = vqaddq_u8(q12u8, q1u8);
+
+ q14u8 = vorrq_u8(q13u8, q14u8);
+ q12u8 = vcgeq_u8(qblimit, q12u8);
+
+ q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
+ vget_low_s8(vreinterpretq_s8_u8(q6)));
+ q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
+ vget_high_s8(vreinterpretq_s8_u8(q6)));
+
+ q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8));
+
+ q11s16 = vdupq_n_s16(3);
+ q2s16 = vmulq_s16(q2s16, q11s16);
+ q13s16 = vmulq_s16(q13s16, q11s16);
+
+ q15u8 = vandq_u8(q15u8, q12u8);
+
+ q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
+ q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8));
+
+ q12u8 = vdupq_n_u8(3);
+ q11u8 = vdupq_n_u8(4);
+ // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
+ d2 = vqmovn_s16(q2s16);
+ d3 = vqmovn_s16(q13s16);
+ q1s8 = vcombine_s8(d2, d3);
+ q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8));
+ q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
+
+ q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8));
+ q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8));
+ q2s8 = vshrq_n_s8(q2s8, 3);
+ q13s8 = vshrq_n_s8(q13s8, 3);
+
+ q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8);
+ q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8);
+
+ q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
+
+ q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63);
+ d5 = vdup_n_s8(9);
+ d4 = vdup_n_s8(18);
+
+ q0s16 = vmlal_s8(vreinterpretq_s16_u16(q0u16), vget_low_s8(q1s8), d5);
+ q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5);
+ d5 = vdup_n_s8(27);
+ q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8), d4);
+ q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4);
+ q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8), d5);
+ q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5);
+
+ d0 = vqshrn_n_s16(q0s16, 7);
+ d1 = vqshrn_n_s16(q11s16, 7);
+ d24 = vqshrn_n_s16(q12s16, 7);
+ d25 = vqshrn_n_s16(q13s16, 7);
+ d28 = vqshrn_n_s16(q14s16, 7);
+ d29 = vqshrn_n_s16(q15s16, 7);
+
+ q0s8 = vcombine_s8(d0, d1);
+ q12s8 = vcombine_s8(d24, d25);
+ q14s8 = vcombine_s8(d28, d29);
+
+ q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8);
+ q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8);
+ q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8);
+ q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8);
+ q15s8 = vqsubq_s8((q7s8), q14s8);
+ q14s8 = vqaddq_s8((q6s8), q14s8);
+
+ q1u8 = vdupq_n_u8(0x80);
+ *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8);
+ *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8);
+ *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8);
+ *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8);
+ *q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8);
+ *q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8);
+ return;
+}
+
+void vp8_mbloop_filter_horizontal_edge_y_neon(unsigned char *src, int pitch,
+ unsigned char blimit,
+ unsigned char limit,
+ unsigned char thresh) {
+ uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+ uint8x16_t q5, q6, q7, q8, q9, q10;
+
+ qblimit = vdupq_n_u8(blimit);
+ qlimit = vdupq_n_u8(limit);
+ qthresh = vdupq_n_u8(thresh);
+
+ src -= (pitch << 2);
+
+ q3 = vld1q_u8(src);
+ src += pitch;
+ q4 = vld1q_u8(src);
+ src += pitch;
+ q5 = vld1q_u8(src);
+ src += pitch;
+ q6 = vld1q_u8(src);
+ src += pitch;
+ q7 = vld1q_u8(src);
+ src += pitch;
+ q8 = vld1q_u8(src);
+ src += pitch;
+ q9 = vld1q_u8(src);
+ src += pitch;
+ q10 = vld1q_u8(src);
+
+ vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
+ q10, &q4, &q5, &q6, &q7, &q8, &q9);
+
+ src -= (pitch * 6);
+ vst1q_u8(src, q4);
+ src += pitch;
+ vst1q_u8(src, q5);
+ src += pitch;
+ vst1q_u8(src, q6);
+ src += pitch;
+ vst1q_u8(src, q7);
+ src += pitch;
+ vst1q_u8(src, q8);
+ src += pitch;
+ vst1q_u8(src, q9);
+ return;
+}
+
+void vp8_mbloop_filter_horizontal_edge_uv_neon(unsigned char *u, int pitch,
+ unsigned char blimit,
+ unsigned char limit,
+ unsigned char thresh,
+ unsigned char *v) {
+ uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+ uint8x16_t q5, q6, q7, q8, q9, q10;
+ uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+ uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+
+ qblimit = vdupq_n_u8(blimit);
+ qlimit = vdupq_n_u8(limit);
+ qthresh = vdupq_n_u8(thresh);
+
+ u -= (pitch << 2);
+ v -= (pitch << 2);
+
+ d6 = vld1_u8(u);
+ u += pitch;
+ d7 = vld1_u8(v);
+ v += pitch;
+ d8 = vld1_u8(u);
+ u += pitch;
+ d9 = vld1_u8(v);
+ v += pitch;
+ d10 = vld1_u8(u);
+ u += pitch;
+ d11 = vld1_u8(v);
+ v += pitch;
+ d12 = vld1_u8(u);
+ u += pitch;
+ d13 = vld1_u8(v);
+ v += pitch;
+ d14 = vld1_u8(u);
+ u += pitch;
+ d15 = vld1_u8(v);
+ v += pitch;
+ d16 = vld1_u8(u);
+ u += pitch;
+ d17 = vld1_u8(v);
+ v += pitch;
+ d18 = vld1_u8(u);
+ u += pitch;
+ d19 = vld1_u8(v);
+ v += pitch;
+ d20 = vld1_u8(u);
+ d21 = vld1_u8(v);
+
+ q3 = vcombine_u8(d6, d7);
+ q4 = vcombine_u8(d8, d9);
+ q5 = vcombine_u8(d10, d11);
+ q6 = vcombine_u8(d12, d13);
+ q7 = vcombine_u8(d14, d15);
+ q8 = vcombine_u8(d16, d17);
+ q9 = vcombine_u8(d18, d19);
+ q10 = vcombine_u8(d20, d21);
+
+ vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
+ q10, &q4, &q5, &q6, &q7, &q8, &q9);
+
+ u -= (pitch * 6);
+ v -= (pitch * 6);
+ vst1_u8(u, vget_low_u8(q4));
+ u += pitch;
+ vst1_u8(v, vget_high_u8(q4));
+ v += pitch;
+ vst1_u8(u, vget_low_u8(q5));
+ u += pitch;
+ vst1_u8(v, vget_high_u8(q5));
+ v += pitch;
+ vst1_u8(u, vget_low_u8(q6));
+ u += pitch;
+ vst1_u8(v, vget_high_u8(q6));
+ v += pitch;
+ vst1_u8(u, vget_low_u8(q7));
+ u += pitch;
+ vst1_u8(v, vget_high_u8(q7));
+ v += pitch;
+ vst1_u8(u, vget_low_u8(q8));
+ u += pitch;
+ vst1_u8(v, vget_high_u8(q8));
+ v += pitch;
+ vst1_u8(u, vget_low_u8(q9));
+ vst1_u8(v, vget_high_u8(q9));
+ return;
+}
+
+void vp8_mbloop_filter_vertical_edge_y_neon(unsigned char *src, int pitch,
+ unsigned char blimit,
+ unsigned char limit,
+ unsigned char thresh) {
+ unsigned char *s1, *s2;
+ uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+ uint8x16_t q5, q6, q7, q8, q9, q10;
+ uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+ uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+ uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
+ uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
+ uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
+
+ qblimit = vdupq_n_u8(blimit);
+ qlimit = vdupq_n_u8(limit);
+ qthresh = vdupq_n_u8(thresh);
+
+ s1 = src - 4;
+ s2 = s1 + 8 * pitch;
+ d6 = vld1_u8(s1);
+ s1 += pitch;
+ d7 = vld1_u8(s2);
+ s2 += pitch;
+ d8 = vld1_u8(s1);
+ s1 += pitch;
+ d9 = vld1_u8(s2);
+ s2 += pitch;
+ d10 = vld1_u8(s1);
+ s1 += pitch;
+ d11 = vld1_u8(s2);
+ s2 += pitch;
+ d12 = vld1_u8(s1);
+ s1 += pitch;
+ d13 = vld1_u8(s2);
+ s2 += pitch;
+ d14 = vld1_u8(s1);
+ s1 += pitch;
+ d15 = vld1_u8(s2);
+ s2 += pitch;
+ d16 = vld1_u8(s1);
+ s1 += pitch;
+ d17 = vld1_u8(s2);
+ s2 += pitch;
+ d18 = vld1_u8(s1);
+ s1 += pitch;
+ d19 = vld1_u8(s2);
+ s2 += pitch;
+ d20 = vld1_u8(s1);
+ d21 = vld1_u8(s2);
+
+ q3 = vcombine_u8(d6, d7);
+ q4 = vcombine_u8(d8, d9);
+ q5 = vcombine_u8(d10, d11);
+ q6 = vcombine_u8(d12, d13);
+ q7 = vcombine_u8(d14, d15);
+ q8 = vcombine_u8(d16, d17);
+ q9 = vcombine_u8(d18, d19);
+ q10 = vcombine_u8(d20, d21);
+
+ q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
+ q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
+ q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
+ q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
+
+ q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
+ vreinterpretq_u16_u32(q2tmp2.val[0]));
+ q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
+ vreinterpretq_u16_u32(q2tmp3.val[0]));
+ q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
+ vreinterpretq_u16_u32(q2tmp2.val[1]));
+ q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
+ vreinterpretq_u16_u32(q2tmp3.val[1]));
+
+ q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
+ vreinterpretq_u8_u16(q2tmp5.val[0]));
+ q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
+ vreinterpretq_u8_u16(q2tmp5.val[1]));
+ q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
+ vreinterpretq_u8_u16(q2tmp7.val[0]));
+ q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
+ vreinterpretq_u8_u16(q2tmp7.val[1]));
+
+ q3 = q2tmp8.val[0];
+ q4 = q2tmp8.val[1];
+ q5 = q2tmp9.val[0];
+ q6 = q2tmp9.val[1];
+ q7 = q2tmp10.val[0];
+ q8 = q2tmp10.val[1];
+ q9 = q2tmp11.val[0];
+ q10 = q2tmp11.val[1];
+
+ vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
+ q10, &q4, &q5, &q6, &q7, &q8, &q9);
+
+ q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
+ q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
+ q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
+ q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
+
+ q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
+ vreinterpretq_u16_u32(q2tmp2.val[0]));
+ q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
+ vreinterpretq_u16_u32(q2tmp3.val[0]));
+ q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
+ vreinterpretq_u16_u32(q2tmp2.val[1]));
+ q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
+ vreinterpretq_u16_u32(q2tmp3.val[1]));
+
+ q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
+ vreinterpretq_u8_u16(q2tmp5.val[0]));
+ q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
+ vreinterpretq_u8_u16(q2tmp5.val[1]));
+ q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
+ vreinterpretq_u8_u16(q2tmp7.val[0]));
+ q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
+ vreinterpretq_u8_u16(q2tmp7.val[1]));
+
+ q3 = q2tmp8.val[0];
+ q4 = q2tmp8.val[1];
+ q5 = q2tmp9.val[0];
+ q6 = q2tmp9.val[1];
+ q7 = q2tmp10.val[0];
+ q8 = q2tmp10.val[1];
+ q9 = q2tmp11.val[0];
+ q10 = q2tmp11.val[1];
+
+ s1 -= 7 * pitch;
+ s2 -= 7 * pitch;
+
+ vst1_u8(s1, vget_low_u8(q3));
+ s1 += pitch;
+ vst1_u8(s2, vget_high_u8(q3));
+ s2 += pitch;
+ vst1_u8(s1, vget_low_u8(q4));
+ s1 += pitch;
+ vst1_u8(s2, vget_high_u8(q4));
+ s2 += pitch;
+ vst1_u8(s1, vget_low_u8(q5));
+ s1 += pitch;
+ vst1_u8(s2, vget_high_u8(q5));
+ s2 += pitch;
+ vst1_u8(s1, vget_low_u8(q6));
+ s1 += pitch;
+ vst1_u8(s2, vget_high_u8(q6));
+ s2 += pitch;
+ vst1_u8(s1, vget_low_u8(q7));
+ s1 += pitch;
+ vst1_u8(s2, vget_high_u8(q7));
+ s2 += pitch;
+ vst1_u8(s1, vget_low_u8(q8));
+ s1 += pitch;
+ vst1_u8(s2, vget_high_u8(q8));
+ s2 += pitch;
+ vst1_u8(s1, vget_low_u8(q9));
+ s1 += pitch;
+ vst1_u8(s2, vget_high_u8(q9));
+ s2 += pitch;
+ vst1_u8(s1, vget_low_u8(q10));
+ vst1_u8(s2, vget_high_u8(q10));
+ return;
+}
+
+void vp8_mbloop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch,
+ unsigned char blimit,
+ unsigned char limit,
+ unsigned char thresh,
+ unsigned char *v) {
+ unsigned char *us, *ud;
+ unsigned char *vs, *vd;
+ uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+ uint8x16_t q5, q6, q7, q8, q9, q10;
+ uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+ uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+ uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
+ uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
+ uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
+
+ qblimit = vdupq_n_u8(blimit);
+ qlimit = vdupq_n_u8(limit);
+ qthresh = vdupq_n_u8(thresh);
+
+ us = u - 4;
+ vs = v - 4;
+ d6 = vld1_u8(us);
+ us += pitch;
+ d7 = vld1_u8(vs);
+ vs += pitch;
+ d8 = vld1_u8(us);
+ us += pitch;
+ d9 = vld1_u8(vs);
+ vs += pitch;
+ d10 = vld1_u8(us);
+ us += pitch;
+ d11 = vld1_u8(vs);
+ vs += pitch;
+ d12 = vld1_u8(us);
+ us += pitch;
+ d13 = vld1_u8(vs);
+ vs += pitch;
+ d14 = vld1_u8(us);
+ us += pitch;
+ d15 = vld1_u8(vs);
+ vs += pitch;
+ d16 = vld1_u8(us);
+ us += pitch;
+ d17 = vld1_u8(vs);
+ vs += pitch;
+ d18 = vld1_u8(us);
+ us += pitch;
+ d19 = vld1_u8(vs);
+ vs += pitch;
+ d20 = vld1_u8(us);
+ d21 = vld1_u8(vs);
+
+ q3 = vcombine_u8(d6, d7);
+ q4 = vcombine_u8(d8, d9);
+ q5 = vcombine_u8(d10, d11);
+ q6 = vcombine_u8(d12, d13);
+ q7 = vcombine_u8(d14, d15);
+ q8 = vcombine_u8(d16, d17);
+ q9 = vcombine_u8(d18, d19);
+ q10 = vcombine_u8(d20, d21);
+
+ q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
+ q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
+ q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
+ q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
+
+ q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
+ vreinterpretq_u16_u32(q2tmp2.val[0]));
+ q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
+ vreinterpretq_u16_u32(q2tmp3.val[0]));
+ q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
+ vreinterpretq_u16_u32(q2tmp2.val[1]));
+ q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
+ vreinterpretq_u16_u32(q2tmp3.val[1]));
+
+ q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
+ vreinterpretq_u8_u16(q2tmp5.val[0]));
+ q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
+ vreinterpretq_u8_u16(q2tmp5.val[1]));
+ q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
+ vreinterpretq_u8_u16(q2tmp7.val[0]));
+ q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
+ vreinterpretq_u8_u16(q2tmp7.val[1]));
+
+ q3 = q2tmp8.val[0];
+ q4 = q2tmp8.val[1];
+ q5 = q2tmp9.val[0];
+ q6 = q2tmp9.val[1];
+ q7 = q2tmp10.val[0];
+ q8 = q2tmp10.val[1];
+ q9 = q2tmp11.val[0];
+ q10 = q2tmp11.val[1];
+
+ vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
+ q10, &q4, &q5, &q6, &q7, &q8, &q9);
+
+ q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
+ q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
+ q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
+ q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
+
+ q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
+ vreinterpretq_u16_u32(q2tmp2.val[0]));
+ q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
+ vreinterpretq_u16_u32(q2tmp3.val[0]));
+ q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
+ vreinterpretq_u16_u32(q2tmp2.val[1]));
+ q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
+ vreinterpretq_u16_u32(q2tmp3.val[1]));
+
+ q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
+ vreinterpretq_u8_u16(q2tmp5.val[0]));
+ q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
+ vreinterpretq_u8_u16(q2tmp5.val[1]));
+ q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
+ vreinterpretq_u8_u16(q2tmp7.val[0]));
+ q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
+ vreinterpretq_u8_u16(q2tmp7.val[1]));
+
+ q3 = q2tmp8.val[0];
+ q4 = q2tmp8.val[1];
+ q5 = q2tmp9.val[0];
+ q6 = q2tmp9.val[1];
+ q7 = q2tmp10.val[0];
+ q8 = q2tmp10.val[1];
+ q9 = q2tmp11.val[0];
+ q10 = q2tmp11.val[1];
+
+ ud = u - 4;
+ vst1_u8(ud, vget_low_u8(q3));
+ ud += pitch;
+ vst1_u8(ud, vget_low_u8(q4));
+ ud += pitch;
+ vst1_u8(ud, vget_low_u8(q5));
+ ud += pitch;
+ vst1_u8(ud, vget_low_u8(q6));
+ ud += pitch;
+ vst1_u8(ud, vget_low_u8(q7));
+ ud += pitch;
+ vst1_u8(ud, vget_low_u8(q8));
+ ud += pitch;
+ vst1_u8(ud, vget_low_u8(q9));
+ ud += pitch;
+ vst1_u8(ud, vget_low_u8(q10));
+
+ vd = v - 4;
+ vst1_u8(vd, vget_high_u8(q3));
+ vd += pitch;
+ vst1_u8(vd, vget_high_u8(q4));
+ vd += pitch;
+ vst1_u8(vd, vget_high_u8(q5));
+ vd += pitch;
+ vst1_u8(vd, vget_high_u8(q6));
+ vd += pitch;
+ vst1_u8(vd, vget_high_u8(q7));
+ vd += pitch;
+ vst1_u8(vd, vget_high_u8(q8));
+ vd += pitch;
+ vst1_u8(vd, vget_high_u8(q9));
+ vd += pitch;
+ vst1_u8(vd, vget_high_u8(q10));
+ return;
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c
new file mode 100644
index 0000000000..2724ca236b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+
+static const int16_t cospi8sqrt2minus1 = 20091;
+// 35468 exceeds INT16_MAX and gets converted to a negative number. Because of
+// the way it is used in vqdmulh, where the result is doubled, it can be divided
+// by 2 beforehand. This saves compensating for the negative value as well as
+// shifting the result.
+static const int16_t sinpi8sqrt2 = 35468 >> 1;
+
+void vp8_short_idct4x4llm_neon(int16_t *input, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride) {
+ int i;
+ uint32x2_t d6u32 = vdup_n_u32(0);
+ uint8x8_t d1u8;
+ int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
+ uint16x8_t q1u16;
+ int16x8_t q1s16, q2s16, q3s16, q4s16;
+ int32x2x2_t v2tmp0, v2tmp1;
+ int16x4x2_t v2tmp2, v2tmp3;
+
+ d2 = vld1_s16(input);
+ d3 = vld1_s16(input + 4);
+ d4 = vld1_s16(input + 8);
+ d5 = vld1_s16(input + 12);
+
+ // 1st for loop
+ q1s16 = vcombine_s16(d2, d4); // Swap d3 d4 here
+ q2s16 = vcombine_s16(d3, d5);
+
+ q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
+ q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
+
+ d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1
+ d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1
+
+ q4s16 = vshrq_n_s16(q4s16, 1);
+
+ q4s16 = vqaddq_s16(q4s16, q2s16);
+
+ d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1
+ d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1
+
+ d2 = vqadd_s16(d12, d11);
+ d3 = vqadd_s16(d13, d10);
+ d4 = vqsub_s16(d13, d10);
+ d5 = vqsub_s16(d12, d11);
+
+ v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
+ v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
+ v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
+ vreinterpret_s16_s32(v2tmp1.val[0]));
+ v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
+ vreinterpret_s16_s32(v2tmp1.val[1]));
+
+ // 2nd for loop
+ q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]);
+ q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]);
+
+ q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
+ q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
+
+ d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1
+ d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1
+
+ q4s16 = vshrq_n_s16(q4s16, 1);
+
+ q4s16 = vqaddq_s16(q4s16, q2s16);
+
+ d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1
+ d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1
+
+ d2 = vqadd_s16(d12, d11);
+ d3 = vqadd_s16(d13, d10);
+ d4 = vqsub_s16(d13, d10);
+ d5 = vqsub_s16(d12, d11);
+
+ d2 = vrshr_n_s16(d2, 3);
+ d3 = vrshr_n_s16(d3, 3);
+ d4 = vrshr_n_s16(d4, 3);
+ d5 = vrshr_n_s16(d5, 3);
+
+ v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
+ v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
+ v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
+ vreinterpret_s16_s32(v2tmp1.val[0]));
+ v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
+ vreinterpret_s16_s32(v2tmp1.val[1]));
+
+ q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]);
+ q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]);
+
+ // dc_only_idct_add
+ for (i = 0; i < 2; i++, q1s16 = q2s16) {
+ d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0);
+ pred_ptr += pred_stride;
+ d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1);
+ pred_ptr += pred_stride;
+
+ q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16), vreinterpret_u8_u32(d6u32));
+ d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
+
+ vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0);
+ dst_ptr += dst_stride;
+ vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1);
+ dst_ptr += dst_stride;
+ }
+ return;
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/sixtappredict_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/sixtappredict_neon.c
new file mode 100644
index 0000000000..ee3c281f0f
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/sixtappredict_neon.c
@@ -0,0 +1,1729 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <string.h>
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
+#include "vpx_dsp/arm/mem_neon.h"
+#include "vpx_ports/mem.h"
+
+static const int8_t vp8_sub_pel_filters[8][8] = {
+ { 0, 0, 128, 0, 0, 0, 0, 0 }, /* note that 1/8 pel positions are */
+ { 0, -6, 123, 12, -1, 0, 0, 0 }, /* just as per alpha -0.5 bicubic */
+ { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */
+ { 0, -9, 93, 50, -6, 0, 0, 0 },
+ { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */
+ { 0, -6, 50, 93, -9, 0, 0, 0 },
+ { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */
+ { 0, -1, 12, 123, -6, 0, 0, 0 },
+};
+
+// This table is derived from vp8/common/filter.c:vp8_sub_pel_filters.
+// Apply abs() to all the values. Elements 0, 2, 3, and 5 are always positive.
+// Elements 1 and 4 are either 0 or negative. The code accounts for this with
+// multiply/accumulates which either add or subtract as needed. The other
+// functions will be updated to use this table later.
+// It is also expanded to 8 elements to allow loading into 64 bit neon
+// registers.
+static const uint8_t abs_filters[8][8] = {
+ { 0, 0, 128, 0, 0, 0, 0, 0 }, { 0, 6, 123, 12, 1, 0, 0, 0 },
+ { 2, 11, 108, 36, 8, 1, 0, 0 }, { 0, 9, 93, 50, 6, 0, 0, 0 },
+ { 3, 16, 77, 77, 16, 3, 0, 0 }, { 0, 6, 50, 93, 9, 0, 0, 0 },
+ { 1, 8, 36, 108, 11, 2, 0, 0 }, { 0, 1, 12, 123, 6, 0, 0, 0 },
+};
+
+static INLINE uint8x8_t load_and_shift(const unsigned char *a) {
+ return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32));
+}
+
+static INLINE void filter_add_accumulate(const uint8x16_t a, const uint8x16_t b,
+ const uint8x8_t filter, uint16x8_t *c,
+ uint16x8_t *d) {
+ const uint32x2x2_t a_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a)),
+ vreinterpret_u32_u8(vget_high_u8(a)));
+ const uint32x2x2_t b_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b)),
+ vreinterpret_u32_u8(vget_high_u8(b)));
+ *c = vmlal_u8(*c, vreinterpret_u8_u32(a_shuf.val[0]), filter);
+ *d = vmlal_u8(*d, vreinterpret_u8_u32(b_shuf.val[0]), filter);
+}
+
+static INLINE void filter_sub_accumulate(const uint8x16_t a, const uint8x16_t b,
+ const uint8x8_t filter, uint16x8_t *c,
+ uint16x8_t *d) {
+ const uint32x2x2_t a_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a)),
+ vreinterpret_u32_u8(vget_high_u8(a)));
+ const uint32x2x2_t b_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b)),
+ vreinterpret_u32_u8(vget_high_u8(b)));
+ *c = vmlsl_u8(*c, vreinterpret_u8_u32(a_shuf.val[0]), filter);
+ *d = vmlsl_u8(*d, vreinterpret_u8_u32(b_shuf.val[0]), filter);
+}
+
+static INLINE void yonly4x4(const unsigned char *src, int src_stride,
+ int filter_offset, unsigned char *dst,
+ int dst_stride) {
+ uint8x8_t a0, a1, a2, a3, a4, a5, a6, a7, a8;
+ uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7, b8;
+ uint16x8_t c0, c1, c2, c3;
+ int16x8_t d0, d1;
+ uint8x8_t e0, e1;
+
+ const uint8x8_t filter = vld1_u8(abs_filters[filter_offset]);
+ const uint8x8_t filter0 = vdup_lane_u8(filter, 0);
+ const uint8x8_t filter1 = vdup_lane_u8(filter, 1);
+ const uint8x8_t filter2 = vdup_lane_u8(filter, 2);
+ const uint8x8_t filter3 = vdup_lane_u8(filter, 3);
+ const uint8x8_t filter4 = vdup_lane_u8(filter, 4);
+ const uint8x8_t filter5 = vdup_lane_u8(filter, 5);
+
+ src -= src_stride * 2;
+ // Shift the even rows to allow using 'vext' to combine the vectors. armv8
+ // has vcopy_lane which would be interesting. This started as just a
+ // horrible workaround for clang adding alignment hints to 32bit loads:
+ // https://llvm.org/bugs/show_bug.cgi?id=24421
+ // But it turns out it almost identical to casting the loads.
+ a0 = load_and_shift(src);
+ src += src_stride;
+ a1 = vld1_u8(src);
+ src += src_stride;
+ a2 = load_and_shift(src);
+ src += src_stride;
+ a3 = vld1_u8(src);
+ src += src_stride;
+ a4 = load_and_shift(src);
+ src += src_stride;
+ a5 = vld1_u8(src);
+ src += src_stride;
+ a6 = load_and_shift(src);
+ src += src_stride;
+ a7 = vld1_u8(src);
+ src += src_stride;
+ a8 = vld1_u8(src);
+
+ // Combine the rows so we can operate on 8 at a time.
+ b0 = vext_u8(a0, a1, 4);
+ b2 = vext_u8(a2, a3, 4);
+ b4 = vext_u8(a4, a5, 4);
+ b6 = vext_u8(a6, a7, 4);
+ b8 = a8;
+
+ // To keep with the 8-at-a-time theme, combine *alternate* rows. This
+ // allows combining the odd rows with the even.
+ b1 = vext_u8(b0, b2, 4);
+ b3 = vext_u8(b2, b4, 4);
+ b5 = vext_u8(b4, b6, 4);
+ b7 = vext_u8(b6, b8, 4);
+
+ // Multiply and expand to 16 bits.
+ c0 = vmull_u8(b0, filter0);
+ c1 = vmull_u8(b2, filter0);
+ c2 = vmull_u8(b5, filter5);
+ c3 = vmull_u8(b7, filter5);
+
+ // Multiply, subtract and accumulate for filters 1 and 4 (the negative
+ // ones).
+ c0 = vmlsl_u8(c0, b4, filter4);
+ c1 = vmlsl_u8(c1, b6, filter4);
+ c2 = vmlsl_u8(c2, b1, filter1);
+ c3 = vmlsl_u8(c3, b3, filter1);
+
+ // Add more positive ones. vmlal should really return a signed type.
+ // It's doing signed math internally, as evidenced by the fact we can do
+ // subtractions followed by more additions. Ideally we could use
+ // vqmlal/sl but that instruction doesn't exist. Might be able to
+ // shoehorn vqdmlal/vqdmlsl in here but it would take some effort.
+ c0 = vmlal_u8(c0, b2, filter2);
+ c1 = vmlal_u8(c1, b4, filter2);
+ c2 = vmlal_u8(c2, b3, filter3);
+ c3 = vmlal_u8(c3, b5, filter3);
+
+ // Use signed saturation math because vmlsl may have left some negative
+ // numbers in there.
+ d0 = vqaddq_s16(vreinterpretq_s16_u16(c2), vreinterpretq_s16_u16(c0));
+ d1 = vqaddq_s16(vreinterpretq_s16_u16(c3), vreinterpretq_s16_u16(c1));
+
+ // Use signed again because numbers like -200 need to be saturated to 0.
+ e0 = vqrshrun_n_s16(d0, 7);
+ e1 = vqrshrun_n_s16(d1, 7);
+
+ store_unaligned_u8q(dst, dst_stride, vcombine_u8(e0, e1));
+}
+
+void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset,
+ unsigned char *dst_ptr, int dst_pitch) {
+ uint8x16_t s0, s1, s2, s3, s4;
+ uint64x2_t s01, s23;
+ // Variables to hold src[] elements for the given filter[]
+ uint8x8_t s0_f5, s1_f5, s2_f5, s3_f5, s4_f5;
+ uint8x8_t s4_f1, s4_f2, s4_f3, s4_f4;
+ uint8x16_t s01_f0, s23_f0;
+ uint64x2_t s01_f3, s23_f3;
+ uint32x2x2_t s01_f3_q, s23_f3_q, s01_f5_q, s23_f5_q;
+ // Accumulator variables.
+ uint16x8_t d0123, d4567, d89;
+ uint16x8_t d0123_a, d4567_a, d89_a;
+ int16x8_t e0123, e4567, e89;
+ // Second pass intermediates.
+ uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7, b8;
+ uint16x8_t c0, c1, c2, c3;
+ int16x8_t d0, d1;
+ uint8x8_t e0, e1;
+ uint8x8_t filter, filter0, filter1, filter2, filter3, filter4, filter5;
+
+ if (xoffset == 0) { // Second pass only.
+ yonly4x4(src_ptr, src_pixels_per_line, yoffset, dst_ptr, dst_pitch);
+ return;
+ }
+
+ if (yoffset == 0) { // First pass only.
+ src_ptr -= 2;
+ } else { // Add context for the second pass. 2 extra lines on top.
+ src_ptr -= 2 + (src_pixels_per_line * 2);
+ }
+
+ filter = vld1_u8(abs_filters[xoffset]);
+ filter0 = vdup_lane_u8(filter, 0);
+ filter1 = vdup_lane_u8(filter, 1);
+ filter2 = vdup_lane_u8(filter, 2);
+ filter3 = vdup_lane_u8(filter, 3);
+ filter4 = vdup_lane_u8(filter, 4);
+ filter5 = vdup_lane_u8(filter, 5);
+
+ // 2 bytes of context, 4 bytes of src values, 3 bytes of context, 7 bytes of
+ // garbage. So much effort for that last single bit.
+ // The low values of each pair are for filter0.
+ s0 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ s1 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ s2 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ s3 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+
+ // Shift to extract values for filter[5]
+ // If src[] is 0, this puts:
+ // 3 4 5 6 7 8 9 10 in s0_f5
+ // Can't use vshr.u64 because it crosses the double word boundary.
+ s0_f5 = vext_u8(vget_low_u8(s0), vget_high_u8(s0), 5);
+ s1_f5 = vext_u8(vget_low_u8(s1), vget_high_u8(s1), 5);
+ s2_f5 = vext_u8(vget_low_u8(s2), vget_high_u8(s2), 5);
+ s3_f5 = vext_u8(vget_low_u8(s3), vget_high_u8(s3), 5);
+
+ s01_f0 = vcombine_u8(vget_low_u8(s0), vget_low_u8(s1));
+ s23_f0 = vcombine_u8(vget_low_u8(s2), vget_low_u8(s3));
+
+ s01_f5_q = vzip_u32(vreinterpret_u32_u8(s0_f5), vreinterpret_u32_u8(s1_f5));
+ s23_f5_q = vzip_u32(vreinterpret_u32_u8(s2_f5), vreinterpret_u32_u8(s3_f5));
+ d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5);
+ d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5);
+
+ // Keep original src data as 64 bits to simplify shifting and extracting.
+ s01 = vreinterpretq_u64_u8(s01_f0);
+ s23 = vreinterpretq_u64_u8(s23_f0);
+
+ // 3 4 5 6 * filter0
+ filter_add_accumulate(s01_f0, s23_f0, filter0, &d0123, &d4567);
+
+ // Shift over one to use -1, 0, 1, 2 for filter1
+ // -1 0 1 2 * filter1
+ filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 8)),
+ vreinterpretq_u8_u64(vshrq_n_u64(s23, 8)), filter1,
+ &d0123, &d4567);
+
+ // 2 3 4 5 * filter4
+ filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 32)),
+ vreinterpretq_u8_u64(vshrq_n_u64(s23, 32)), filter4,
+ &d0123, &d4567);
+
+ // 0 1 2 3 * filter2
+ filter_add_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 16)),
+ vreinterpretq_u8_u64(vshrq_n_u64(s23, 16)), filter2,
+ &d0123, &d4567);
+
+ // 1 2 3 4 * filter3
+ s01_f3 = vshrq_n_u64(s01, 24);
+ s23_f3 = vshrq_n_u64(s23, 24);
+ s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)),
+ vreinterpret_u32_u64(vget_high_u64(s01_f3)));
+ s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)),
+ vreinterpret_u32_u64(vget_high_u64(s23_f3)));
+ // Accumulate into different registers so it can use saturated addition.
+ d0123_a = vmull_u8(vreinterpret_u8_u32(s01_f3_q.val[0]), filter3);
+ d4567_a = vmull_u8(vreinterpret_u8_u32(s23_f3_q.val[0]), filter3);
+
+ e0123 =
+ vqaddq_s16(vreinterpretq_s16_u16(d0123), vreinterpretq_s16_u16(d0123_a));
+ e4567 =
+ vqaddq_s16(vreinterpretq_s16_u16(d4567), vreinterpretq_s16_u16(d4567_a));
+
+ // Shift and narrow.
+ b0 = vqrshrun_n_s16(e0123, 7);
+ b2 = vqrshrun_n_s16(e4567, 7);
+
+ if (yoffset == 0) { // firstpass_filter4x4_only
+ store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(b0, b2));
+ return;
+ }
+
+ // Load additional context when doing both filters.
+ s0 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ s1 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ s2 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ s3 = vld1q_u8(src_ptr);
+ src_ptr += src_pixels_per_line;
+ s4 = vld1q_u8(src_ptr);
+
+ s0_f5 = vext_u8(vget_low_u8(s0), vget_high_u8(s0), 5);
+ s1_f5 = vext_u8(vget_low_u8(s1), vget_high_u8(s1), 5);
+ s2_f5 = vext_u8(vget_low_u8(s2), vget_high_u8(s2), 5);
+ s3_f5 = vext_u8(vget_low_u8(s3), vget_high_u8(s3), 5);
+ s4_f5 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 5);
+
+ // 3 4 5 6 * filter0
+ s01_f0 = vcombine_u8(vget_low_u8(s0), vget_low_u8(s1));
+ s23_f0 = vcombine_u8(vget_low_u8(s2), vget_low_u8(s3));
+
+ s01_f5_q = vzip_u32(vreinterpret_u32_u8(s0_f5), vreinterpret_u32_u8(s1_f5));
+ s23_f5_q = vzip_u32(vreinterpret_u32_u8(s2_f5), vreinterpret_u32_u8(s3_f5));
+ // But this time instead of 16 pixels to filter, there are 20. So an extra
+ // run with a doubleword register.
+ d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5);
+ d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5);
+ d89 = vmull_u8(s4_f5, filter5);
+
+ // Save a copy as u64 for shifting.
+ s01 = vreinterpretq_u64_u8(s01_f0);
+ s23 = vreinterpretq_u64_u8(s23_f0);
+
+ filter_add_accumulate(s01_f0, s23_f0, filter0, &d0123, &d4567);
+ d89 = vmlal_u8(d89, vget_low_u8(s4), filter0);
+
+ filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 8)),
+ vreinterpretq_u8_u64(vshrq_n_u64(s23, 8)), filter1,
+ &d0123, &d4567);
+ s4_f1 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 1);
+ d89 = vmlsl_u8(d89, s4_f1, filter1);
+
+ filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 32)),
+ vreinterpretq_u8_u64(vshrq_n_u64(s23, 32)), filter4,
+ &d0123, &d4567);
+ s4_f4 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 4);
+ d89 = vmlsl_u8(d89, s4_f4, filter4);
+
+ filter_add_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 16)),
+ vreinterpretq_u8_u64(vshrq_n_u64(s23, 16)), filter2,
+ &d0123, &d4567);
+ s4_f2 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 2);
+ d89 = vmlal_u8(d89, s4_f2, filter2);
+
+ s01_f3 = vshrq_n_u64(s01, 24);
+ s23_f3 = vshrq_n_u64(s23, 24);
+ s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)),
+ vreinterpret_u32_u64(vget_high_u64(s01_f3)));
+ s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)),
+ vreinterpret_u32_u64(vget_high_u64(s23_f3)));
+ s4_f3 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 3);
+ d0123_a = vmull_u8(vreinterpret_u8_u32(s01_f3_q.val[0]), filter3);
+ d4567_a = vmull_u8(vreinterpret_u8_u32(s23_f3_q.val[0]), filter3);
+ d89_a = vmull_u8(s4_f3, filter3);
+
+ e0123 =
+ vqaddq_s16(vreinterpretq_s16_u16(d0123), vreinterpretq_s16_u16(d0123_a));
+ e4567 =
+ vqaddq_s16(vreinterpretq_s16_u16(d4567), vreinterpretq_s16_u16(d4567_a));
+ e89 = vqaddq_s16(vreinterpretq_s16_u16(d89), vreinterpretq_s16_u16(d89_a));
+
+ b4 = vqrshrun_n_s16(e0123, 7);
+ b6 = vqrshrun_n_s16(e4567, 7);
+ b8 = vqrshrun_n_s16(e89, 7);
+
+ // Second pass: 4x4
+ filter = vld1_u8(abs_filters[yoffset]);
+ filter0 = vdup_lane_u8(filter, 0);
+ filter1 = vdup_lane_u8(filter, 1);
+ filter2 = vdup_lane_u8(filter, 2);
+ filter3 = vdup_lane_u8(filter, 3);
+ filter4 = vdup_lane_u8(filter, 4);
+ filter5 = vdup_lane_u8(filter, 5);
+
+ b1 = vext_u8(b0, b2, 4);
+ b3 = vext_u8(b2, b4, 4);
+ b5 = vext_u8(b4, b6, 4);
+ b7 = vext_u8(b6, b8, 4);
+
+ c0 = vmull_u8(b0, filter0);
+ c1 = vmull_u8(b2, filter0);
+ c2 = vmull_u8(b5, filter5);
+ c3 = vmull_u8(b7, filter5);
+
+ c0 = vmlsl_u8(c0, b4, filter4);
+ c1 = vmlsl_u8(c1, b6, filter4);
+ c2 = vmlsl_u8(c2, b1, filter1);
+ c3 = vmlsl_u8(c3, b3, filter1);
+
+ c0 = vmlal_u8(c0, b2, filter2);
+ c1 = vmlal_u8(c1, b4, filter2);
+ c2 = vmlal_u8(c2, b3, filter3);
+ c3 = vmlal_u8(c3, b5, filter3);
+
+ d0 = vqaddq_s16(vreinterpretq_s16_u16(c2), vreinterpretq_s16_u16(c0));
+ d1 = vqaddq_s16(vreinterpretq_s16_u16(c3), vreinterpretq_s16_u16(c1));
+
+ e0 = vqrshrun_n_s16(d0, 7);
+ e1 = vqrshrun_n_s16(d1, 7);
+
+ store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1));
+}
+
+void vp8_sixtap_predict8x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset,
+ unsigned char *dst_ptr, int dst_pitch) {
+ unsigned char *src;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
+ uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8;
+ uint8x8_t d27u8, d28u8, d29u8, d30u8, d31u8;
+ int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
+ uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
+ uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
+ int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
+ int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
+ uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8;
+
+ if (xoffset == 0) { // secondpass_filter8x4_only
+ // load second_pass filter
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ // load src data
+ src = src_ptr - src_pixels_per_line * 2;
+ d22u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d23u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d24u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d25u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d26u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d27u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d28u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d29u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d30u8 = vld1_u8(src);
+
+ q3u16 = vmull_u8(d22u8, d0u8);
+ q4u16 = vmull_u8(d23u8, d0u8);
+ q5u16 = vmull_u8(d24u8, d0u8);
+ q6u16 = vmull_u8(d25u8, d0u8);
+
+ q3u16 = vmlsl_u8(q3u16, d23u8, d1u8);
+ q4u16 = vmlsl_u8(q4u16, d24u8, d1u8);
+ q5u16 = vmlsl_u8(q5u16, d25u8, d1u8);
+ q6u16 = vmlsl_u8(q6u16, d26u8, d1u8);
+
+ q3u16 = vmlsl_u8(q3u16, d26u8, d4u8);
+ q4u16 = vmlsl_u8(q4u16, d27u8, d4u8);
+ q5u16 = vmlsl_u8(q5u16, d28u8, d4u8);
+ q6u16 = vmlsl_u8(q6u16, d29u8, d4u8);
+
+ q3u16 = vmlal_u8(q3u16, d24u8, d2u8);
+ q4u16 = vmlal_u8(q4u16, d25u8, d2u8);
+ q5u16 = vmlal_u8(q5u16, d26u8, d2u8);
+ q6u16 = vmlal_u8(q6u16, d27u8, d2u8);
+
+ q3u16 = vmlal_u8(q3u16, d27u8, d5u8);
+ q4u16 = vmlal_u8(q4u16, d28u8, d5u8);
+ q5u16 = vmlal_u8(q5u16, d29u8, d5u8);
+ q6u16 = vmlal_u8(q6u16, d30u8, d5u8);
+
+ q7u16 = vmull_u8(d25u8, d3u8);
+ q8u16 = vmull_u8(d26u8, d3u8);
+ q9u16 = vmull_u8(d27u8, d3u8);
+ q10u16 = vmull_u8(d28u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+
+ q7s16 = vqaddq_s16(q7s16, q3s16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q9s16 = vqaddq_s16(q9s16, q5s16);
+ q10s16 = vqaddq_s16(q10s16, q6s16);
+
+ d6u8 = vqrshrun_n_s16(q7s16, 7);
+ d7u8 = vqrshrun_n_s16(q8s16, 7);
+ d8u8 = vqrshrun_n_s16(q9s16, 7);
+ d9u8 = vqrshrun_n_s16(q10s16, 7);
+
+ vst1_u8(dst_ptr, d6u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d7u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d8u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d9u8);
+ return;
+ }
+
+ // load first_pass filter
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ // First pass: output_height lines x output_width columns (9x4)
+ if (yoffset == 0) // firstpass_filter4x4_only
+ src = src_ptr - 2;
+ else
+ src = src_ptr - 2 - (src_pixels_per_line * 2);
+ q3u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q4u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q5u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q6u8 = vld1q_u8(src);
+
+ q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
+ q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
+ q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
+ q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
+
+ q7u16 = vmlsl_u8(q7u16, d28u8, d1u8);
+ q8u16 = vmlsl_u8(q8u16, d29u8, d1u8);
+ q9u16 = vmlsl_u8(q9u16, d30u8, d1u8);
+ q10u16 = vmlsl_u8(q10u16, d31u8, d1u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
+
+ q7u16 = vmlsl_u8(q7u16, d28u8, d4u8);
+ q8u16 = vmlsl_u8(q8u16, d29u8, d4u8);
+ q9u16 = vmlsl_u8(q9u16, d30u8, d4u8);
+ q10u16 = vmlsl_u8(q10u16, d31u8, d4u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
+
+ q7u16 = vmlal_u8(q7u16, d28u8, d2u8);
+ q8u16 = vmlal_u8(q8u16, d29u8, d2u8);
+ q9u16 = vmlal_u8(q9u16, d30u8, d2u8);
+ q10u16 = vmlal_u8(q10u16, d31u8, d2u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
+
+ q7u16 = vmlal_u8(q7u16, d28u8, d5u8);
+ q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
+ q9u16 = vmlal_u8(q9u16, d30u8, d5u8);
+ q10u16 = vmlal_u8(q10u16, d31u8, d5u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
+
+ q3u16 = vmull_u8(d28u8, d3u8);
+ q4u16 = vmull_u8(d29u8, d3u8);
+ q5u16 = vmull_u8(d30u8, d3u8);
+ q6u16 = vmull_u8(d31u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+
+ q7s16 = vqaddq_s16(q7s16, q3s16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q9s16 = vqaddq_s16(q9s16, q5s16);
+ q10s16 = vqaddq_s16(q10s16, q6s16);
+
+ d22u8 = vqrshrun_n_s16(q7s16, 7);
+ d23u8 = vqrshrun_n_s16(q8s16, 7);
+ d24u8 = vqrshrun_n_s16(q9s16, 7);
+ d25u8 = vqrshrun_n_s16(q10s16, 7);
+
+ if (yoffset == 0) { // firstpass_filter8x4_only
+ vst1_u8(dst_ptr, d22u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d23u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d24u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d25u8);
+ return;
+ }
+
+ // First Pass on rest 5-line data
+ src += src_pixels_per_line;
+ q3u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q4u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q5u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q6u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q7u8 = vld1q_u8(src);
+
+ q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
+ q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
+ q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
+ q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
+ q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1);
+
+ q8u16 = vmlsl_u8(q8u16, d27u8, d1u8);
+ q9u16 = vmlsl_u8(q9u16, d28u8, d1u8);
+ q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
+ q11u16 = vmlsl_u8(q11u16, d30u8, d1u8);
+ q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4);
+
+ q8u16 = vmlsl_u8(q8u16, d27u8, d4u8);
+ q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
+ q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
+ q11u16 = vmlsl_u8(q11u16, d30u8, d4u8);
+ q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2);
+
+ q8u16 = vmlal_u8(q8u16, d27u8, d2u8);
+ q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
+ q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
+ q11u16 = vmlal_u8(q11u16, d30u8, d2u8);
+ q12u16 = vmlal_u8(q12u16, d31u8, d2u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5);
+
+ q8u16 = vmlal_u8(q8u16, d27u8, d5u8);
+ q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
+ q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
+ q11u16 = vmlal_u8(q11u16, d30u8, d5u8);
+ q12u16 = vmlal_u8(q12u16, d31u8, d5u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3);
+
+ q3u16 = vmull_u8(d27u8, d3u8);
+ q4u16 = vmull_u8(d28u8, d3u8);
+ q5u16 = vmull_u8(d29u8, d3u8);
+ q6u16 = vmull_u8(d30u8, d3u8);
+ q7u16 = vmull_u8(d31u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+ q11s16 = vreinterpretq_s16_u16(q11u16);
+ q12s16 = vreinterpretq_s16_u16(q12u16);
+
+ q8s16 = vqaddq_s16(q8s16, q3s16);
+ q9s16 = vqaddq_s16(q9s16, q4s16);
+ q10s16 = vqaddq_s16(q10s16, q5s16);
+ q11s16 = vqaddq_s16(q11s16, q6s16);
+ q12s16 = vqaddq_s16(q12s16, q7s16);
+
+ d26u8 = vqrshrun_n_s16(q8s16, 7);
+ d27u8 = vqrshrun_n_s16(q9s16, 7);
+ d28u8 = vqrshrun_n_s16(q10s16, 7);
+ d29u8 = vqrshrun_n_s16(q11s16, 7);
+ d30u8 = vqrshrun_n_s16(q12s16, 7);
+
+ // Second pass: 8x4
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ q3u16 = vmull_u8(d22u8, d0u8);
+ q4u16 = vmull_u8(d23u8, d0u8);
+ q5u16 = vmull_u8(d24u8, d0u8);
+ q6u16 = vmull_u8(d25u8, d0u8);
+
+ q3u16 = vmlsl_u8(q3u16, d23u8, d1u8);
+ q4u16 = vmlsl_u8(q4u16, d24u8, d1u8);
+ q5u16 = vmlsl_u8(q5u16, d25u8, d1u8);
+ q6u16 = vmlsl_u8(q6u16, d26u8, d1u8);
+
+ q3u16 = vmlsl_u8(q3u16, d26u8, d4u8);
+ q4u16 = vmlsl_u8(q4u16, d27u8, d4u8);
+ q5u16 = vmlsl_u8(q5u16, d28u8, d4u8);
+ q6u16 = vmlsl_u8(q6u16, d29u8, d4u8);
+
+ q3u16 = vmlal_u8(q3u16, d24u8, d2u8);
+ q4u16 = vmlal_u8(q4u16, d25u8, d2u8);
+ q5u16 = vmlal_u8(q5u16, d26u8, d2u8);
+ q6u16 = vmlal_u8(q6u16, d27u8, d2u8);
+
+ q3u16 = vmlal_u8(q3u16, d27u8, d5u8);
+ q4u16 = vmlal_u8(q4u16, d28u8, d5u8);
+ q5u16 = vmlal_u8(q5u16, d29u8, d5u8);
+ q6u16 = vmlal_u8(q6u16, d30u8, d5u8);
+
+ q7u16 = vmull_u8(d25u8, d3u8);
+ q8u16 = vmull_u8(d26u8, d3u8);
+ q9u16 = vmull_u8(d27u8, d3u8);
+ q10u16 = vmull_u8(d28u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+
+ q7s16 = vqaddq_s16(q7s16, q3s16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q9s16 = vqaddq_s16(q9s16, q5s16);
+ q10s16 = vqaddq_s16(q10s16, q6s16);
+
+ d6u8 = vqrshrun_n_s16(q7s16, 7);
+ d7u8 = vqrshrun_n_s16(q8s16, 7);
+ d8u8 = vqrshrun_n_s16(q9s16, 7);
+ d9u8 = vqrshrun_n_s16(q10s16, 7);
+
+ vst1_u8(dst_ptr, d6u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d7u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d8u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d9u8);
+}
+
+void vp8_sixtap_predict8x8_neon(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset,
+ unsigned char *dst_ptr, int dst_pitch) {
+ unsigned char *src, *tmpp;
+ unsigned char tmp[64];
+ int i;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
+ uint8x8_t d18u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8, d25u8;
+ uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
+ int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
+ uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
+ uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
+ int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
+ int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
+ uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q9u8, q10u8, q11u8, q12u8;
+
+ if (xoffset == 0) { // secondpass_filter8x8_only
+ // load second_pass filter
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ // load src data
+ src = src_ptr - src_pixels_per_line * 2;
+ d18u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d19u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d20u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d21u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d22u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d23u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d24u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d25u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d26u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d27u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d28u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d29u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d30u8 = vld1_u8(src);
+
+ for (i = 2; i > 0; i--) {
+ q3u16 = vmull_u8(d18u8, d0u8);
+ q4u16 = vmull_u8(d19u8, d0u8);
+ q5u16 = vmull_u8(d20u8, d0u8);
+ q6u16 = vmull_u8(d21u8, d0u8);
+
+ q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
+ q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
+ q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
+ q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
+
+ q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
+ q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
+ q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
+ q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
+
+ q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
+ q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
+ q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
+ q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
+
+ q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
+ q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
+ q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
+ q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
+
+ q7u16 = vmull_u8(d21u8, d3u8);
+ q8u16 = vmull_u8(d22u8, d3u8);
+ q9u16 = vmull_u8(d23u8, d3u8);
+ q10u16 = vmull_u8(d24u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+
+ q7s16 = vqaddq_s16(q7s16, q3s16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q9s16 = vqaddq_s16(q9s16, q5s16);
+ q10s16 = vqaddq_s16(q10s16, q6s16);
+
+ d6u8 = vqrshrun_n_s16(q7s16, 7);
+ d7u8 = vqrshrun_n_s16(q8s16, 7);
+ d8u8 = vqrshrun_n_s16(q9s16, 7);
+ d9u8 = vqrshrun_n_s16(q10s16, 7);
+
+ d18u8 = d22u8;
+ d19u8 = d23u8;
+ d20u8 = d24u8;
+ d21u8 = d25u8;
+ d22u8 = d26u8;
+ d23u8 = d27u8;
+ d24u8 = d28u8;
+ d25u8 = d29u8;
+ d26u8 = d30u8;
+
+ vst1_u8(dst_ptr, d6u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d7u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d8u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d9u8);
+ dst_ptr += dst_pitch;
+ }
+ return;
+ }
+
+ // load first_pass filter
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ // First pass: output_height lines x output_width columns (9x4)
+ if (yoffset == 0) // firstpass_filter4x4_only
+ src = src_ptr - 2;
+ else
+ src = src_ptr - 2 - (src_pixels_per_line * 2);
+
+ tmpp = tmp;
+ for (i = 2; i > 0; i--) {
+ q3u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q4u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q5u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q6u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+
+ __builtin_prefetch(src);
+ __builtin_prefetch(src + src_pixels_per_line);
+ __builtin_prefetch(src + src_pixels_per_line * 2);
+
+ q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
+ q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
+ q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
+ q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
+
+ q7u16 = vmlsl_u8(q7u16, d28u8, d1u8);
+ q8u16 = vmlsl_u8(q8u16, d29u8, d1u8);
+ q9u16 = vmlsl_u8(q9u16, d30u8, d1u8);
+ q10u16 = vmlsl_u8(q10u16, d31u8, d1u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
+
+ q7u16 = vmlsl_u8(q7u16, d28u8, d4u8);
+ q8u16 = vmlsl_u8(q8u16, d29u8, d4u8);
+ q9u16 = vmlsl_u8(q9u16, d30u8, d4u8);
+ q10u16 = vmlsl_u8(q10u16, d31u8, d4u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
+
+ q7u16 = vmlal_u8(q7u16, d28u8, d2u8);
+ q8u16 = vmlal_u8(q8u16, d29u8, d2u8);
+ q9u16 = vmlal_u8(q9u16, d30u8, d2u8);
+ q10u16 = vmlal_u8(q10u16, d31u8, d2u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
+
+ q7u16 = vmlal_u8(q7u16, d28u8, d5u8);
+ q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
+ q9u16 = vmlal_u8(q9u16, d30u8, d5u8);
+ q10u16 = vmlal_u8(q10u16, d31u8, d5u8);
+
+ d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
+ d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
+ d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
+ d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
+
+ q3u16 = vmull_u8(d28u8, d3u8);
+ q4u16 = vmull_u8(d29u8, d3u8);
+ q5u16 = vmull_u8(d30u8, d3u8);
+ q6u16 = vmull_u8(d31u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+
+ q7s16 = vqaddq_s16(q7s16, q3s16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q9s16 = vqaddq_s16(q9s16, q5s16);
+ q10s16 = vqaddq_s16(q10s16, q6s16);
+
+ d22u8 = vqrshrun_n_s16(q7s16, 7);
+ d23u8 = vqrshrun_n_s16(q8s16, 7);
+ d24u8 = vqrshrun_n_s16(q9s16, 7);
+ d25u8 = vqrshrun_n_s16(q10s16, 7);
+
+ if (yoffset == 0) { // firstpass_filter8x4_only
+ vst1_u8(dst_ptr, d22u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d23u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d24u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d25u8);
+ dst_ptr += dst_pitch;
+ } else {
+ vst1_u8(tmpp, d22u8);
+ tmpp += 8;
+ vst1_u8(tmpp, d23u8);
+ tmpp += 8;
+ vst1_u8(tmpp, d24u8);
+ tmpp += 8;
+ vst1_u8(tmpp, d25u8);
+ tmpp += 8;
+ }
+ }
+ if (yoffset == 0) return;
+
+ // First Pass on rest 5-line data
+ q3u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q4u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q5u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q6u8 = vld1q_u8(src);
+ src += src_pixels_per_line;
+ q7u8 = vld1q_u8(src);
+
+ q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
+ q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
+ q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
+ q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
+ q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1);
+
+ q8u16 = vmlsl_u8(q8u16, d27u8, d1u8);
+ q9u16 = vmlsl_u8(q9u16, d28u8, d1u8);
+ q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
+ q11u16 = vmlsl_u8(q11u16, d30u8, d1u8);
+ q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4);
+
+ q8u16 = vmlsl_u8(q8u16, d27u8, d4u8);
+ q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
+ q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
+ q11u16 = vmlsl_u8(q11u16, d30u8, d4u8);
+ q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2);
+
+ q8u16 = vmlal_u8(q8u16, d27u8, d2u8);
+ q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
+ q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
+ q11u16 = vmlal_u8(q11u16, d30u8, d2u8);
+ q12u16 = vmlal_u8(q12u16, d31u8, d2u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5);
+
+ q8u16 = vmlal_u8(q8u16, d27u8, d5u8);
+ q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
+ q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
+ q11u16 = vmlal_u8(q11u16, d30u8, d5u8);
+ q12u16 = vmlal_u8(q12u16, d31u8, d5u8);
+
+ d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
+ d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
+ d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
+ d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
+ d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3);
+
+ q3u16 = vmull_u8(d27u8, d3u8);
+ q4u16 = vmull_u8(d28u8, d3u8);
+ q5u16 = vmull_u8(d29u8, d3u8);
+ q6u16 = vmull_u8(d30u8, d3u8);
+ q7u16 = vmull_u8(d31u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+ q11s16 = vreinterpretq_s16_u16(q11u16);
+ q12s16 = vreinterpretq_s16_u16(q12u16);
+
+ q8s16 = vqaddq_s16(q8s16, q3s16);
+ q9s16 = vqaddq_s16(q9s16, q4s16);
+ q10s16 = vqaddq_s16(q10s16, q5s16);
+ q11s16 = vqaddq_s16(q11s16, q6s16);
+ q12s16 = vqaddq_s16(q12s16, q7s16);
+
+ d26u8 = vqrshrun_n_s16(q8s16, 7);
+ d27u8 = vqrshrun_n_s16(q9s16, 7);
+ d28u8 = vqrshrun_n_s16(q10s16, 7);
+ d29u8 = vqrshrun_n_s16(q11s16, 7);
+ d30u8 = vqrshrun_n_s16(q12s16, 7);
+
+ // Second pass: 8x8
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ tmpp = tmp;
+ q9u8 = vld1q_u8(tmpp);
+ tmpp += 16;
+ q10u8 = vld1q_u8(tmpp);
+ tmpp += 16;
+ q11u8 = vld1q_u8(tmpp);
+ tmpp += 16;
+ q12u8 = vld1q_u8(tmpp);
+
+ d18u8 = vget_low_u8(q9u8);
+ d19u8 = vget_high_u8(q9u8);
+ d20u8 = vget_low_u8(q10u8);
+ d21u8 = vget_high_u8(q10u8);
+ d22u8 = vget_low_u8(q11u8);
+ d23u8 = vget_high_u8(q11u8);
+ d24u8 = vget_low_u8(q12u8);
+ d25u8 = vget_high_u8(q12u8);
+
+ for (i = 2; i > 0; i--) {
+ q3u16 = vmull_u8(d18u8, d0u8);
+ q4u16 = vmull_u8(d19u8, d0u8);
+ q5u16 = vmull_u8(d20u8, d0u8);
+ q6u16 = vmull_u8(d21u8, d0u8);
+
+ q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
+ q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
+ q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
+ q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
+
+ q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
+ q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
+ q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
+ q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
+
+ q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
+ q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
+ q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
+ q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
+
+ q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
+ q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
+ q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
+ q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
+
+ q7u16 = vmull_u8(d21u8, d3u8);
+ q8u16 = vmull_u8(d22u8, d3u8);
+ q9u16 = vmull_u8(d23u8, d3u8);
+ q10u16 = vmull_u8(d24u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+
+ q7s16 = vqaddq_s16(q7s16, q3s16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q9s16 = vqaddq_s16(q9s16, q5s16);
+ q10s16 = vqaddq_s16(q10s16, q6s16);
+
+ d6u8 = vqrshrun_n_s16(q7s16, 7);
+ d7u8 = vqrshrun_n_s16(q8s16, 7);
+ d8u8 = vqrshrun_n_s16(q9s16, 7);
+ d9u8 = vqrshrun_n_s16(q10s16, 7);
+
+ d18u8 = d22u8;
+ d19u8 = d23u8;
+ d20u8 = d24u8;
+ d21u8 = d25u8;
+ d22u8 = d26u8;
+ d23u8 = d27u8;
+ d24u8 = d28u8;
+ d25u8 = d29u8;
+ d26u8 = d30u8;
+
+ vst1_u8(dst_ptr, d6u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d7u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d8u8);
+ dst_ptr += dst_pitch;
+ vst1_u8(dst_ptr, d9u8);
+ dst_ptr += dst_pitch;
+ }
+}
+
+void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ unsigned char *src, *src_tmp, *dst, *tmpp;
+ unsigned char tmp[336];
+ int i, j;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
+ uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d18u8, d19u8;
+ uint8x8_t d20u8, d21u8, d22u8, d23u8, d24u8, d25u8, d26u8, d27u8;
+ uint8x8_t d28u8, d29u8, d30u8, d31u8;
+ int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
+ uint8x16_t q3u8, q4u8;
+ uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16, q8u16, q9u16, q10u16;
+ uint16x8_t q11u16, q12u16, q13u16, q15u16;
+ int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16, q8s16, q9s16, q10s16;
+ int16x8_t q11s16, q12s16, q13s16, q15s16;
+
+ if (xoffset == 0) { // secondpass_filter8x8_only
+ // load second_pass filter
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ // load src data
+ src_tmp = src_ptr - src_pixels_per_line * 2;
+ for (i = 0; i < 2; ++i) {
+ src = src_tmp + i * 8;
+ dst = dst_ptr + i * 8;
+ d18u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d19u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d20u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d21u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d22u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ for (j = 0; j < 4; ++j) {
+ d23u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d24u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d25u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+ d26u8 = vld1_u8(src);
+ src += src_pixels_per_line;
+
+ q3u16 = vmull_u8(d18u8, d0u8);
+ q4u16 = vmull_u8(d19u8, d0u8);
+ q5u16 = vmull_u8(d20u8, d0u8);
+ q6u16 = vmull_u8(d21u8, d0u8);
+
+ q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
+ q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
+ q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
+ q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
+
+ q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
+ q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
+ q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
+ q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
+
+ q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
+ q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
+ q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
+ q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
+
+ q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
+ q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
+ q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
+ q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
+
+ q7u16 = vmull_u8(d21u8, d3u8);
+ q8u16 = vmull_u8(d22u8, d3u8);
+ q9u16 = vmull_u8(d23u8, d3u8);
+ q10u16 = vmull_u8(d24u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+
+ q7s16 = vqaddq_s16(q7s16, q3s16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q9s16 = vqaddq_s16(q9s16, q5s16);
+ q10s16 = vqaddq_s16(q10s16, q6s16);
+
+ d6u8 = vqrshrun_n_s16(q7s16, 7);
+ d7u8 = vqrshrun_n_s16(q8s16, 7);
+ d8u8 = vqrshrun_n_s16(q9s16, 7);
+ d9u8 = vqrshrun_n_s16(q10s16, 7);
+
+ d18u8 = d22u8;
+ d19u8 = d23u8;
+ d20u8 = d24u8;
+ d21u8 = d25u8;
+ d22u8 = d26u8;
+
+ vst1_u8(dst, d6u8);
+ dst += dst_pitch;
+ vst1_u8(dst, d7u8);
+ dst += dst_pitch;
+ vst1_u8(dst, d8u8);
+ dst += dst_pitch;
+ vst1_u8(dst, d9u8);
+ dst += dst_pitch;
+ }
+ }
+ return;
+ }
+
+ // load first_pass filter
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ // First pass: output_height lines x output_width columns (9x4)
+ if (yoffset == 0) { // firstpass_filter4x4_only
+ src = src_ptr - 2;
+ dst = dst_ptr;
+ for (i = 0; i < 8; ++i) {
+ d6u8 = vld1_u8(src);
+ d7u8 = vld1_u8(src + 8);
+ d8u8 = vld1_u8(src + 16);
+ src += src_pixels_per_line;
+ d9u8 = vld1_u8(src);
+ d10u8 = vld1_u8(src + 8);
+ d11u8 = vld1_u8(src + 16);
+ src += src_pixels_per_line;
+
+ __builtin_prefetch(src);
+ __builtin_prefetch(src + src_pixels_per_line);
+
+ q6u16 = vmull_u8(d6u8, d0u8);
+ q7u16 = vmull_u8(d7u8, d0u8);
+ q8u16 = vmull_u8(d9u8, d0u8);
+ q9u16 = vmull_u8(d10u8, d0u8);
+
+ d20u8 = vext_u8(d6u8, d7u8, 1);
+ d21u8 = vext_u8(d9u8, d10u8, 1);
+ d22u8 = vext_u8(d7u8, d8u8, 1);
+ d23u8 = vext_u8(d10u8, d11u8, 1);
+ d24u8 = vext_u8(d6u8, d7u8, 4);
+ d25u8 = vext_u8(d9u8, d10u8, 4);
+ d26u8 = vext_u8(d7u8, d8u8, 4);
+ d27u8 = vext_u8(d10u8, d11u8, 4);
+ d28u8 = vext_u8(d6u8, d7u8, 5);
+ d29u8 = vext_u8(d9u8, d10u8, 5);
+
+ q6u16 = vmlsl_u8(q6u16, d20u8, d1u8);
+ q8u16 = vmlsl_u8(q8u16, d21u8, d1u8);
+ q7u16 = vmlsl_u8(q7u16, d22u8, d1u8);
+ q9u16 = vmlsl_u8(q9u16, d23u8, d1u8);
+ q6u16 = vmlsl_u8(q6u16, d24u8, d4u8);
+ q8u16 = vmlsl_u8(q8u16, d25u8, d4u8);
+ q7u16 = vmlsl_u8(q7u16, d26u8, d4u8);
+ q9u16 = vmlsl_u8(q9u16, d27u8, d4u8);
+ q6u16 = vmlal_u8(q6u16, d28u8, d5u8);
+ q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
+
+ d20u8 = vext_u8(d7u8, d8u8, 5);
+ d21u8 = vext_u8(d10u8, d11u8, 5);
+ d22u8 = vext_u8(d6u8, d7u8, 2);
+ d23u8 = vext_u8(d9u8, d10u8, 2);
+ d24u8 = vext_u8(d7u8, d8u8, 2);
+ d25u8 = vext_u8(d10u8, d11u8, 2);
+ d26u8 = vext_u8(d6u8, d7u8, 3);
+ d27u8 = vext_u8(d9u8, d10u8, 3);
+ d28u8 = vext_u8(d7u8, d8u8, 3);
+ d29u8 = vext_u8(d10u8, d11u8, 3);
+
+ q7u16 = vmlal_u8(q7u16, d20u8, d5u8);
+ q9u16 = vmlal_u8(q9u16, d21u8, d5u8);
+ q6u16 = vmlal_u8(q6u16, d22u8, d2u8);
+ q8u16 = vmlal_u8(q8u16, d23u8, d2u8);
+ q7u16 = vmlal_u8(q7u16, d24u8, d2u8);
+ q9u16 = vmlal_u8(q9u16, d25u8, d2u8);
+
+ q10u16 = vmull_u8(d26u8, d3u8);
+ q11u16 = vmull_u8(d27u8, d3u8);
+ q12u16 = vmull_u8(d28u8, d3u8);
+ q15u16 = vmull_u8(d29u8, d3u8);
+
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+ q11s16 = vreinterpretq_s16_u16(q11u16);
+ q12s16 = vreinterpretq_s16_u16(q12u16);
+ q15s16 = vreinterpretq_s16_u16(q15u16);
+
+ q6s16 = vqaddq_s16(q6s16, q10s16);
+ q8s16 = vqaddq_s16(q8s16, q11s16);
+ q7s16 = vqaddq_s16(q7s16, q12s16);
+ q9s16 = vqaddq_s16(q9s16, q15s16);
+
+ d6u8 = vqrshrun_n_s16(q6s16, 7);
+ d7u8 = vqrshrun_n_s16(q7s16, 7);
+ d8u8 = vqrshrun_n_s16(q8s16, 7);
+ d9u8 = vqrshrun_n_s16(q9s16, 7);
+
+ q3u8 = vcombine_u8(d6u8, d7u8);
+ q4u8 = vcombine_u8(d8u8, d9u8);
+ vst1q_u8(dst, q3u8);
+ dst += dst_pitch;
+ vst1q_u8(dst, q4u8);
+ dst += dst_pitch;
+ }
+ return;
+ }
+
+ src = src_ptr - 2 - src_pixels_per_line * 2;
+ tmpp = tmp;
+ for (i = 0; i < 7; ++i) {
+ d6u8 = vld1_u8(src);
+ d7u8 = vld1_u8(src + 8);
+ d8u8 = vld1_u8(src + 16);
+ src += src_pixels_per_line;
+ d9u8 = vld1_u8(src);
+ d10u8 = vld1_u8(src + 8);
+ d11u8 = vld1_u8(src + 16);
+ src += src_pixels_per_line;
+ d12u8 = vld1_u8(src);
+ d13u8 = vld1_u8(src + 8);
+ // Only 5 pixels are needed, avoid a potential out of bounds read.
+ d14u8 = vld1_u8(src + 13);
+ d14u8 = vext_u8(d14u8, d14u8, 3);
+ src += src_pixels_per_line;
+
+ __builtin_prefetch(src);
+ __builtin_prefetch(src + src_pixels_per_line);
+ __builtin_prefetch(src + src_pixels_per_line * 2);
+
+ q8u16 = vmull_u8(d6u8, d0u8);
+ q9u16 = vmull_u8(d7u8, d0u8);
+ q10u16 = vmull_u8(d9u8, d0u8);
+ q11u16 = vmull_u8(d10u8, d0u8);
+ q12u16 = vmull_u8(d12u8, d0u8);
+ q13u16 = vmull_u8(d13u8, d0u8);
+
+ d28u8 = vext_u8(d6u8, d7u8, 1);
+ d29u8 = vext_u8(d9u8, d10u8, 1);
+ d30u8 = vext_u8(d12u8, d13u8, 1);
+ q8u16 = vmlsl_u8(q8u16, d28u8, d1u8);
+ q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
+ q12u16 = vmlsl_u8(q12u16, d30u8, d1u8);
+ d28u8 = vext_u8(d7u8, d8u8, 1);
+ d29u8 = vext_u8(d10u8, d11u8, 1);
+ d30u8 = vext_u8(d13u8, d14u8, 1);
+ q9u16 = vmlsl_u8(q9u16, d28u8, d1u8);
+ q11u16 = vmlsl_u8(q11u16, d29u8, d1u8);
+ q13u16 = vmlsl_u8(q13u16, d30u8, d1u8);
+
+ d28u8 = vext_u8(d6u8, d7u8, 4);
+ d29u8 = vext_u8(d9u8, d10u8, 4);
+ d30u8 = vext_u8(d12u8, d13u8, 4);
+ q8u16 = vmlsl_u8(q8u16, d28u8, d4u8);
+ q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
+ q12u16 = vmlsl_u8(q12u16, d30u8, d4u8);
+ d28u8 = vext_u8(d7u8, d8u8, 4);
+ d29u8 = vext_u8(d10u8, d11u8, 4);
+ d30u8 = vext_u8(d13u8, d14u8, 4);
+ q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
+ q11u16 = vmlsl_u8(q11u16, d29u8, d4u8);
+ q13u16 = vmlsl_u8(q13u16, d30u8, d4u8);
+
+ d28u8 = vext_u8(d6u8, d7u8, 5);
+ d29u8 = vext_u8(d9u8, d10u8, 5);
+ d30u8 = vext_u8(d12u8, d13u8, 5);
+ q8u16 = vmlal_u8(q8u16, d28u8, d5u8);
+ q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
+ q12u16 = vmlal_u8(q12u16, d30u8, d5u8);
+ d28u8 = vext_u8(d7u8, d8u8, 5);
+ d29u8 = vext_u8(d10u8, d11u8, 5);
+ d30u8 = vext_u8(d13u8, d14u8, 5);
+ q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
+ q11u16 = vmlal_u8(q11u16, d29u8, d5u8);
+ q13u16 = vmlal_u8(q13u16, d30u8, d5u8);
+
+ d28u8 = vext_u8(d6u8, d7u8, 2);
+ d29u8 = vext_u8(d9u8, d10u8, 2);
+ d30u8 = vext_u8(d12u8, d13u8, 2);
+ q8u16 = vmlal_u8(q8u16, d28u8, d2u8);
+ q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
+ q12u16 = vmlal_u8(q12u16, d30u8, d2u8);
+ d28u8 = vext_u8(d7u8, d8u8, 2);
+ d29u8 = vext_u8(d10u8, d11u8, 2);
+ d30u8 = vext_u8(d13u8, d14u8, 2);
+ q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
+ q11u16 = vmlal_u8(q11u16, d29u8, d2u8);
+ q13u16 = vmlal_u8(q13u16, d30u8, d2u8);
+
+ d28u8 = vext_u8(d6u8, d7u8, 3);
+ d29u8 = vext_u8(d9u8, d10u8, 3);
+ d30u8 = vext_u8(d12u8, d13u8, 3);
+ d15u8 = vext_u8(d7u8, d8u8, 3);
+ d31u8 = vext_u8(d10u8, d11u8, 3);
+ d6u8 = vext_u8(d13u8, d14u8, 3);
+ q4u16 = vmull_u8(d28u8, d3u8);
+ q5u16 = vmull_u8(d29u8, d3u8);
+ q6u16 = vmull_u8(d30u8, d3u8);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+ q12s16 = vreinterpretq_s16_u16(q12u16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q10s16 = vqaddq_s16(q10s16, q5s16);
+ q12s16 = vqaddq_s16(q12s16, q6s16);
+
+ q6u16 = vmull_u8(d15u8, d3u8);
+ q7u16 = vmull_u8(d31u8, d3u8);
+ q3u16 = vmull_u8(d6u8, d3u8);
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q11s16 = vreinterpretq_s16_u16(q11u16);
+ q13s16 = vreinterpretq_s16_u16(q13u16);
+ q9s16 = vqaddq_s16(q9s16, q6s16);
+ q11s16 = vqaddq_s16(q11s16, q7s16);
+ q13s16 = vqaddq_s16(q13s16, q3s16);
+
+ d6u8 = vqrshrun_n_s16(q8s16, 7);
+ d7u8 = vqrshrun_n_s16(q9s16, 7);
+ d8u8 = vqrshrun_n_s16(q10s16, 7);
+ d9u8 = vqrshrun_n_s16(q11s16, 7);
+ d10u8 = vqrshrun_n_s16(q12s16, 7);
+ d11u8 = vqrshrun_n_s16(q13s16, 7);
+
+ vst1_u8(tmpp, d6u8);
+ tmpp += 8;
+ vst1_u8(tmpp, d7u8);
+ tmpp += 8;
+ vst1_u8(tmpp, d8u8);
+ tmpp += 8;
+ vst1_u8(tmpp, d9u8);
+ tmpp += 8;
+ vst1_u8(tmpp, d10u8);
+ tmpp += 8;
+ vst1_u8(tmpp, d11u8);
+ tmpp += 8;
+ }
+
+ // Second pass: 16x16
+ dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
+ d0s8 = vdup_lane_s8(dtmps8, 0);
+ d1s8 = vdup_lane_s8(dtmps8, 1);
+ d2s8 = vdup_lane_s8(dtmps8, 2);
+ d3s8 = vdup_lane_s8(dtmps8, 3);
+ d4s8 = vdup_lane_s8(dtmps8, 4);
+ d5s8 = vdup_lane_s8(dtmps8, 5);
+ d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
+ d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
+ d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
+ d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
+ d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
+ d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
+
+ for (i = 0; i < 2; ++i) {
+ dst = dst_ptr + 8 * i;
+ tmpp = tmp + 8 * i;
+ d18u8 = vld1_u8(tmpp);
+ tmpp += 16;
+ d19u8 = vld1_u8(tmpp);
+ tmpp += 16;
+ d20u8 = vld1_u8(tmpp);
+ tmpp += 16;
+ d21u8 = vld1_u8(tmpp);
+ tmpp += 16;
+ d22u8 = vld1_u8(tmpp);
+ tmpp += 16;
+ for (j = 0; j < 4; ++j) {
+ d23u8 = vld1_u8(tmpp);
+ tmpp += 16;
+ d24u8 = vld1_u8(tmpp);
+ tmpp += 16;
+ d25u8 = vld1_u8(tmpp);
+ tmpp += 16;
+ d26u8 = vld1_u8(tmpp);
+ tmpp += 16;
+
+ q3u16 = vmull_u8(d18u8, d0u8);
+ q4u16 = vmull_u8(d19u8, d0u8);
+ q5u16 = vmull_u8(d20u8, d0u8);
+ q6u16 = vmull_u8(d21u8, d0u8);
+
+ q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
+ q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
+ q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
+ q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
+
+ q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
+ q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
+ q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
+ q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
+
+ q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
+ q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
+ q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
+ q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
+
+ q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
+ q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
+ q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
+ q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
+
+ q7u16 = vmull_u8(d21u8, d3u8);
+ q8u16 = vmull_u8(d22u8, d3u8);
+ q9u16 = vmull_u8(d23u8, d3u8);
+ q10u16 = vmull_u8(d24u8, d3u8);
+
+ q3s16 = vreinterpretq_s16_u16(q3u16);
+ q4s16 = vreinterpretq_s16_u16(q4u16);
+ q5s16 = vreinterpretq_s16_u16(q5u16);
+ q6s16 = vreinterpretq_s16_u16(q6u16);
+ q7s16 = vreinterpretq_s16_u16(q7u16);
+ q8s16 = vreinterpretq_s16_u16(q8u16);
+ q9s16 = vreinterpretq_s16_u16(q9u16);
+ q10s16 = vreinterpretq_s16_u16(q10u16);
+
+ q7s16 = vqaddq_s16(q7s16, q3s16);
+ q8s16 = vqaddq_s16(q8s16, q4s16);
+ q9s16 = vqaddq_s16(q9s16, q5s16);
+ q10s16 = vqaddq_s16(q10s16, q6s16);
+
+ d6u8 = vqrshrun_n_s16(q7s16, 7);
+ d7u8 = vqrshrun_n_s16(q8s16, 7);
+ d8u8 = vqrshrun_n_s16(q9s16, 7);
+ d9u8 = vqrshrun_n_s16(q10s16, 7);
+
+ d18u8 = d22u8;
+ d19u8 = d23u8;
+ d20u8 = d24u8;
+ d21u8 = d25u8;
+ d22u8 = d26u8;
+
+ vst1_u8(dst, d6u8);
+ dst += dst_pitch;
+ vst1_u8(dst, d7u8);
+ dst += dst_pitch;
+ vst1_u8(dst, d8u8);
+ dst += dst_pitch;
+ vst1_u8(dst, d9u8);
+ dst += dst_pitch;
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/arm/neon/vp8_loopfilter_neon.c b/media/libvpx/libvpx/vp8/common/arm/neon/vp8_loopfilter_neon.c
new file mode 100644
index 0000000000..ebc004a048
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/arm/neon/vp8_loopfilter_neon.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_config.h"
+#include "vp8/common/arm/loopfilter_arm.h"
+#include "vpx_ports/arm.h"
+
+static INLINE void vp8_loop_filter_neon(uint8x16_t qblimit, // flimit
+ uint8x16_t qlimit, // limit
+ uint8x16_t qthresh, // thresh
+ uint8x16_t q3, // p3
+ uint8x16_t q4, // p2
+ uint8x16_t q5, // p1
+ uint8x16_t q6, // p0
+ uint8x16_t q7, // q0
+ uint8x16_t q8, // q1
+ uint8x16_t q9, // q2
+ uint8x16_t q10, // q3
+ uint8x16_t *q5r, // p1
+ uint8x16_t *q6r, // p0
+ uint8x16_t *q7r, // q0
+ uint8x16_t *q8r) { // q1
+ uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8;
+ int16x8_t q2s16, q11s16;
+ uint16x8_t q4u16;
+ int8x16_t q1s8, q2s8, q10s8, q11s8, q12s8, q13s8;
+ int8x8_t d2s8, d3s8;
+
+ q11u8 = vabdq_u8(q3, q4);
+ q12u8 = vabdq_u8(q4, q5);
+ q13u8 = vabdq_u8(q5, q6);
+ q14u8 = vabdq_u8(q8, q7);
+ q3 = vabdq_u8(q9, q8);
+ q4 = vabdq_u8(q10, q9);
+
+ q11u8 = vmaxq_u8(q11u8, q12u8);
+ q12u8 = vmaxq_u8(q13u8, q14u8);
+ q3 = vmaxq_u8(q3, q4);
+ q15u8 = vmaxq_u8(q11u8, q12u8);
+
+ q9 = vabdq_u8(q6, q7);
+
+ // vp8_hevmask
+ q13u8 = vcgtq_u8(q13u8, qthresh);
+ q14u8 = vcgtq_u8(q14u8, qthresh);
+ q15u8 = vmaxq_u8(q15u8, q3);
+
+ q2u8 = vabdq_u8(q5, q8);
+ q9 = vqaddq_u8(q9, q9);
+
+ q15u8 = vcgeq_u8(qlimit, q15u8);
+
+ // vp8_filter() function
+ // convert to signed
+ q10 = vdupq_n_u8(0x80);
+ q8 = veorq_u8(q8, q10);
+ q7 = veorq_u8(q7, q10);
+ q6 = veorq_u8(q6, q10);
+ q5 = veorq_u8(q5, q10);
+
+ q2u8 = vshrq_n_u8(q2u8, 1);
+ q9 = vqaddq_u8(q9, q2u8);
+
+ q10 = vdupq_n_u8(3);
+
+ q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
+ vget_low_s8(vreinterpretq_s8_u8(q6)));
+ q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
+ vget_high_s8(vreinterpretq_s8_u8(q6)));
+
+ q9 = vcgeq_u8(qblimit, q9);
+
+ q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8));
+
+ q14u8 = vorrq_u8(q13u8, q14u8);
+
+ q4u16 = vmovl_u8(vget_low_u8(q10));
+ q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16));
+ q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16));
+
+ q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8);
+ q15u8 = vandq_u8(q15u8, q9);
+
+ q1s8 = vreinterpretq_s8_u8(q1u8);
+ q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
+ q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8));
+
+ q9 = vdupq_n_u8(4);
+ // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
+ d2s8 = vqmovn_s16(q2s16);
+ d3s8 = vqmovn_s16(q11s16);
+ q1s8 = vcombine_s8(d2s8, d3s8);
+ q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8);
+ q1s8 = vreinterpretq_s8_u8(q1u8);
+
+ q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q10));
+ q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9));
+ q2s8 = vshrq_n_s8(q2s8, 3);
+ q1s8 = vshrq_n_s8(q1s8, 3);
+
+ q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8);
+ q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8);
+
+ q1s8 = vrshrq_n_s8(q1s8, 1);
+ q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
+
+ q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
+ q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8);
+
+ q0u8 = vdupq_n_u8(0x80);
+ *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8);
+ *q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
+ *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
+ *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8);
+ return;
+}
+
+void vp8_loop_filter_horizontal_edge_y_neon(unsigned char *src, int pitch,
+ unsigned char blimit,
+ unsigned char limit,
+ unsigned char thresh) {
+ uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+ uint8x16_t q5, q6, q7, q8, q9, q10;
+
+ qblimit = vdupq_n_u8(blimit);
+ qlimit = vdupq_n_u8(limit);
+ qthresh = vdupq_n_u8(thresh);
+ src -= (pitch << 2);
+
+ q3 = vld1q_u8(src);
+ src += pitch;
+ q4 = vld1q_u8(src);
+ src += pitch;
+ q5 = vld1q_u8(src);
+ src += pitch;
+ q6 = vld1q_u8(src);
+ src += pitch;
+ q7 = vld1q_u8(src);
+ src += pitch;
+ q8 = vld1q_u8(src);
+ src += pitch;
+ q9 = vld1q_u8(src);
+ src += pitch;
+ q10 = vld1q_u8(src);
+
+ vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
+ q10, &q5, &q6, &q7, &q8);
+
+ src -= (pitch * 5);
+ vst1q_u8(src, q5);
+ src += pitch;
+ vst1q_u8(src, q6);
+ src += pitch;
+ vst1q_u8(src, q7);
+ src += pitch;
+ vst1q_u8(src, q8);
+ return;
+}
+
+void vp8_loop_filter_horizontal_edge_uv_neon(unsigned char *u, int pitch,
+ unsigned char blimit,
+ unsigned char limit,
+ unsigned char thresh,
+ unsigned char *v) {
+ uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+ uint8x16_t q5, q6, q7, q8, q9, q10;
+ uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+ uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+
+ qblimit = vdupq_n_u8(blimit);
+ qlimit = vdupq_n_u8(limit);
+ qthresh = vdupq_n_u8(thresh);
+
+ u -= (pitch << 2);
+ v -= (pitch << 2);
+
+ d6 = vld1_u8(u);
+ u += pitch;
+ d7 = vld1_u8(v);
+ v += pitch;
+ d8 = vld1_u8(u);
+ u += pitch;
+ d9 = vld1_u8(v);
+ v += pitch;
+ d10 = vld1_u8(u);
+ u += pitch;
+ d11 = vld1_u8(v);
+ v += pitch;
+ d12 = vld1_u8(u);
+ u += pitch;
+ d13 = vld1_u8(v);
+ v += pitch;
+ d14 = vld1_u8(u);
+ u += pitch;
+ d15 = vld1_u8(v);
+ v += pitch;
+ d16 = vld1_u8(u);
+ u += pitch;
+ d17 = vld1_u8(v);
+ v += pitch;
+ d18 = vld1_u8(u);
+ u += pitch;
+ d19 = vld1_u8(v);
+ v += pitch;
+ d20 = vld1_u8(u);
+ d21 = vld1_u8(v);
+
+ q3 = vcombine_u8(d6, d7);
+ q4 = vcombine_u8(d8, d9);
+ q5 = vcombine_u8(d10, d11);
+ q6 = vcombine_u8(d12, d13);
+ q7 = vcombine_u8(d14, d15);
+ q8 = vcombine_u8(d16, d17);
+ q9 = vcombine_u8(d18, d19);
+ q10 = vcombine_u8(d20, d21);
+
+ vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
+ q10, &q5, &q6, &q7, &q8);
+
+ u -= (pitch * 5);
+ vst1_u8(u, vget_low_u8(q5));
+ u += pitch;
+ vst1_u8(u, vget_low_u8(q6));
+ u += pitch;
+ vst1_u8(u, vget_low_u8(q7));
+ u += pitch;
+ vst1_u8(u, vget_low_u8(q8));
+
+ v -= (pitch * 5);
+ vst1_u8(v, vget_high_u8(q5));
+ v += pitch;
+ vst1_u8(v, vget_high_u8(q6));
+ v += pitch;
+ vst1_u8(v, vget_high_u8(q7));
+ v += pitch;
+ vst1_u8(v, vget_high_u8(q8));
+ return;
+}
+
+static INLINE void write_4x8(unsigned char *dst, int pitch,
+ const uint8x8x4_t result) {
+#ifdef VPX_INCOMPATIBLE_GCC
+ /*
+ * uint8x8x4_t result
+ 00 01 02 03 | 04 05 06 07
+ 10 11 12 13 | 14 15 16 17
+ 20 21 22 23 | 24 25 26 27
+ 30 31 32 33 | 34 35 36 37
+ ---
+ * after vtrn_u16
+ 00 01 20 21 | 04 05 24 25
+ 02 03 22 23 | 06 07 26 27
+ 10 11 30 31 | 14 15 34 35
+ 12 13 32 33 | 16 17 36 37
+ ---
+ * after vtrn_u8
+ 00 10 20 30 | 04 14 24 34
+ 01 11 21 31 | 05 15 25 35
+ 02 12 22 32 | 06 16 26 36
+ 03 13 23 33 | 07 17 27 37
+ */
+ const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[0]),
+ vreinterpret_u16_u8(result.val[2]));
+ const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[1]),
+ vreinterpret_u16_u8(result.val[3]));
+ const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
+ vreinterpret_u8_u16(r13_u16.val[0]));
+ const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
+ vreinterpret_u8_u16(r13_u16.val[1]));
+ const uint32x2_t x_0_4 = vreinterpret_u32_u8(r01_u8.val[0]);
+ const uint32x2_t x_1_5 = vreinterpret_u32_u8(r01_u8.val[1]);
+ const uint32x2_t x_2_6 = vreinterpret_u32_u8(r23_u8.val[0]);
+ const uint32x2_t x_3_7 = vreinterpret_u32_u8(r23_u8.val[1]);
+ vst1_lane_u32((uint32_t *)dst, x_0_4, 0);
+ dst += pitch;
+ vst1_lane_u32((uint32_t *)dst, x_1_5, 0);
+ dst += pitch;
+ vst1_lane_u32((uint32_t *)dst, x_2_6, 0);
+ dst += pitch;
+ vst1_lane_u32((uint32_t *)dst, x_3_7, 0);
+ dst += pitch;
+ vst1_lane_u32((uint32_t *)dst, x_0_4, 1);
+ dst += pitch;
+ vst1_lane_u32((uint32_t *)dst, x_1_5, 1);
+ dst += pitch;
+ vst1_lane_u32((uint32_t *)dst, x_2_6, 1);
+ dst += pitch;
+ vst1_lane_u32((uint32_t *)dst, x_3_7, 1);
+#else
+ vst4_lane_u8(dst, result, 0);
+ dst += pitch;
+ vst4_lane_u8(dst, result, 1);
+ dst += pitch;
+ vst4_lane_u8(dst, result, 2);
+ dst += pitch;
+ vst4_lane_u8(dst, result, 3);
+ dst += pitch;
+ vst4_lane_u8(dst, result, 4);
+ dst += pitch;
+ vst4_lane_u8(dst, result, 5);
+ dst += pitch;
+ vst4_lane_u8(dst, result, 6);
+ dst += pitch;
+ vst4_lane_u8(dst, result, 7);
+#endif // VPX_INCOMPATIBLE_GCC
+}
+
+void vp8_loop_filter_vertical_edge_y_neon(unsigned char *src, int pitch,
+ unsigned char blimit,
+ unsigned char limit,
+ unsigned char thresh) {
+ unsigned char *s, *d;
+ uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+ uint8x16_t q5, q6, q7, q8, q9, q10;
+ uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+ uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+ uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
+ uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
+ uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
+ uint8x8x4_t q4ResultH, q4ResultL;
+
+ qblimit = vdupq_n_u8(blimit);
+ qlimit = vdupq_n_u8(limit);
+ qthresh = vdupq_n_u8(thresh);
+
+ s = src - 4;
+ d6 = vld1_u8(s);
+ s += pitch;
+ d8 = vld1_u8(s);
+ s += pitch;
+ d10 = vld1_u8(s);
+ s += pitch;
+ d12 = vld1_u8(s);
+ s += pitch;
+ d14 = vld1_u8(s);
+ s += pitch;
+ d16 = vld1_u8(s);
+ s += pitch;
+ d18 = vld1_u8(s);
+ s += pitch;
+ d20 = vld1_u8(s);
+ s += pitch;
+ d7 = vld1_u8(s);
+ s += pitch;
+ d9 = vld1_u8(s);
+ s += pitch;
+ d11 = vld1_u8(s);
+ s += pitch;
+ d13 = vld1_u8(s);
+ s += pitch;
+ d15 = vld1_u8(s);
+ s += pitch;
+ d17 = vld1_u8(s);
+ s += pitch;
+ d19 = vld1_u8(s);
+ s += pitch;
+ d21 = vld1_u8(s);
+
+ q3 = vcombine_u8(d6, d7);
+ q4 = vcombine_u8(d8, d9);
+ q5 = vcombine_u8(d10, d11);
+ q6 = vcombine_u8(d12, d13);
+ q7 = vcombine_u8(d14, d15);
+ q8 = vcombine_u8(d16, d17);
+ q9 = vcombine_u8(d18, d19);
+ q10 = vcombine_u8(d20, d21);
+
+ q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
+ q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
+ q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
+ q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
+
+ q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
+ vreinterpretq_u16_u32(q2tmp2.val[0]));
+ q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
+ vreinterpretq_u16_u32(q2tmp3.val[0]));
+ q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
+ vreinterpretq_u16_u32(q2tmp2.val[1]));
+ q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
+ vreinterpretq_u16_u32(q2tmp3.val[1]));
+
+ q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
+ vreinterpretq_u8_u16(q2tmp5.val[0]));
+ q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
+ vreinterpretq_u8_u16(q2tmp5.val[1]));
+ q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
+ vreinterpretq_u8_u16(q2tmp7.val[0]));
+ q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
+ vreinterpretq_u8_u16(q2tmp7.val[1]));
+
+ q3 = q2tmp8.val[0];
+ q4 = q2tmp8.val[1];
+ q5 = q2tmp9.val[0];
+ q6 = q2tmp9.val[1];
+ q7 = q2tmp10.val[0];
+ q8 = q2tmp10.val[1];
+ q9 = q2tmp11.val[0];
+ q10 = q2tmp11.val[1];
+
+ vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
+ q10, &q5, &q6, &q7, &q8);
+
+ q4ResultL.val[0] = vget_low_u8(q5); // d10
+ q4ResultL.val[1] = vget_low_u8(q6); // d12
+ q4ResultL.val[2] = vget_low_u8(q7); // d14
+ q4ResultL.val[3] = vget_low_u8(q8); // d16
+ q4ResultH.val[0] = vget_high_u8(q5); // d11
+ q4ResultH.val[1] = vget_high_u8(q6); // d13
+ q4ResultH.val[2] = vget_high_u8(q7); // d15
+ q4ResultH.val[3] = vget_high_u8(q8); // d17
+
+ d = src - 2;
+ write_4x8(d, pitch, q4ResultL);
+ d += pitch * 8;
+ write_4x8(d, pitch, q4ResultH);
+}
+
+void vp8_loop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch,
+ unsigned char blimit,
+ unsigned char limit,
+ unsigned char thresh,
+ unsigned char *v) {
+ unsigned char *us, *ud;
+ unsigned char *vs, *vd;
+ uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+ uint8x16_t q5, q6, q7, q8, q9, q10;
+ uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+ uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+ uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
+ uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
+ uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
+ uint8x8x4_t q4ResultH, q4ResultL;
+
+ qblimit = vdupq_n_u8(blimit);
+ qlimit = vdupq_n_u8(limit);
+ qthresh = vdupq_n_u8(thresh);
+
+ us = u - 4;
+ d6 = vld1_u8(us);
+ us += pitch;
+ d8 = vld1_u8(us);
+ us += pitch;
+ d10 = vld1_u8(us);
+ us += pitch;
+ d12 = vld1_u8(us);
+ us += pitch;
+ d14 = vld1_u8(us);
+ us += pitch;
+ d16 = vld1_u8(us);
+ us += pitch;
+ d18 = vld1_u8(us);
+ us += pitch;
+ d20 = vld1_u8(us);
+
+ vs = v - 4;
+ d7 = vld1_u8(vs);
+ vs += pitch;
+ d9 = vld1_u8(vs);
+ vs += pitch;
+ d11 = vld1_u8(vs);
+ vs += pitch;
+ d13 = vld1_u8(vs);
+ vs += pitch;
+ d15 = vld1_u8(vs);
+ vs += pitch;
+ d17 = vld1_u8(vs);
+ vs += pitch;
+ d19 = vld1_u8(vs);
+ vs += pitch;
+ d21 = vld1_u8(vs);
+
+ q3 = vcombine_u8(d6, d7);
+ q4 = vcombine_u8(d8, d9);
+ q5 = vcombine_u8(d10, d11);
+ q6 = vcombine_u8(d12, d13);
+ q7 = vcombine_u8(d14, d15);
+ q8 = vcombine_u8(d16, d17);
+ q9 = vcombine_u8(d18, d19);
+ q10 = vcombine_u8(d20, d21);
+
+ q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
+ q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
+ q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
+ q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
+
+ q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
+ vreinterpretq_u16_u32(q2tmp2.val[0]));
+ q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
+ vreinterpretq_u16_u32(q2tmp3.val[0]));
+ q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
+ vreinterpretq_u16_u32(q2tmp2.val[1]));
+ q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
+ vreinterpretq_u16_u32(q2tmp3.val[1]));
+
+ q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
+ vreinterpretq_u8_u16(q2tmp5.val[0]));
+ q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
+ vreinterpretq_u8_u16(q2tmp5.val[1]));
+ q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
+ vreinterpretq_u8_u16(q2tmp7.val[0]));
+ q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
+ vreinterpretq_u8_u16(q2tmp7.val[1]));
+
+ q3 = q2tmp8.val[0];
+ q4 = q2tmp8.val[1];
+ q5 = q2tmp9.val[0];
+ q6 = q2tmp9.val[1];
+ q7 = q2tmp10.val[0];
+ q8 = q2tmp10.val[1];
+ q9 = q2tmp11.val[0];
+ q10 = q2tmp11.val[1];
+
+ vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
+ q10, &q5, &q6, &q7, &q8);
+
+ q4ResultL.val[0] = vget_low_u8(q5); // d10
+ q4ResultL.val[1] = vget_low_u8(q6); // d12
+ q4ResultL.val[2] = vget_low_u8(q7); // d14
+ q4ResultL.val[3] = vget_low_u8(q8); // d16
+ ud = u - 2;
+ write_4x8(ud, pitch, q4ResultL);
+
+ q4ResultH.val[0] = vget_high_u8(q5); // d11
+ q4ResultH.val[1] = vget_high_u8(q6); // d13
+ q4ResultH.val[2] = vget_high_u8(q7); // d15
+ q4ResultH.val[3] = vget_high_u8(q8); // d17
+ vd = v - 2;
+ write_4x8(vd, pitch, q4ResultH);
+}
diff --git a/media/libvpx/libvpx/vp8/common/blockd.c b/media/libvpx/libvpx/vp8/common/blockd.c
new file mode 100644
index 0000000000..22905c10a6
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/blockd.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "blockd.h"
+#include "vpx_mem/vpx_mem.h"
+
+const unsigned char vp8_block2left[25] = { 0, 0, 0, 0, 1, 1, 1, 1, 2,
+ 2, 2, 2, 3, 3, 3, 3, 4, 4,
+ 5, 5, 6, 6, 7, 7, 8 };
+const unsigned char vp8_block2above[25] = { 0, 1, 2, 3, 0, 1, 2, 3, 0,
+ 1, 2, 3, 0, 1, 2, 3, 4, 5,
+ 4, 5, 6, 7, 6, 7, 8 };
diff --git a/media/libvpx/libvpx/vp8/common/blockd.h b/media/libvpx/libvpx/vp8/common/blockd.h
new file mode 100644
index 0000000000..405443449d
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/blockd.h
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_BLOCKD_H_
+#define VPX_VP8_COMMON_BLOCKD_H_
+
+void vpx_log(const char *format, ...);
+
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_config.h"
+#include "vpx_scale/yv12config.h"
+#include "mv.h"
+#include "treecoder.h"
+#include "vpx_ports/mem.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*#define DCPRED 1*/
+#define DCPREDSIMTHRESH 0
+#define DCPREDCNTTHRESH 3
+
+#define MB_FEATURE_TREE_PROBS 3
+#define MAX_MB_SEGMENTS 4
+
+#define MAX_REF_LF_DELTAS 4
+#define MAX_MODE_LF_DELTAS 4
+
+/* Segment Feature Masks */
+#define SEGMENT_DELTADATA 0
+#define SEGMENT_ABSDATA 1
+
+typedef struct {
+ int r, c;
+} POS;
+
+#define PLANE_TYPE_Y_NO_DC 0
+#define PLANE_TYPE_Y2 1
+#define PLANE_TYPE_UV 2
+#define PLANE_TYPE_Y_WITH_DC 3
+
+typedef char ENTROPY_CONTEXT;
+typedef struct {
+ ENTROPY_CONTEXT y1[4];
+ ENTROPY_CONTEXT u[2];
+ ENTROPY_CONTEXT v[2];
+ ENTROPY_CONTEXT y2;
+} ENTROPY_CONTEXT_PLANES;
+
+extern const unsigned char vp8_block2left[25];
+extern const unsigned char vp8_block2above[25];
+
+#define VP8_COMBINEENTROPYCONTEXTS(Dest, A, B) Dest = (A) + (B)
+
+typedef enum { KEY_FRAME = 0, INTER_FRAME = 1 } FRAME_TYPE;
+
+typedef enum {
+ DC_PRED, /* average of above and left pixels */
+ V_PRED, /* vertical prediction */
+ H_PRED, /* horizontal prediction */
+ TM_PRED, /* Truemotion prediction */
+ B_PRED, /* block based prediction, each block has its own prediction mode */
+
+ NEARESTMV,
+ NEARMV,
+ ZEROMV,
+ NEWMV,
+ SPLITMV,
+
+ MB_MODE_COUNT
+} MB_PREDICTION_MODE;
+
+/* Macroblock level features */
+typedef enum {
+ MB_LVL_ALT_Q = 0, /* Use alternate Quantizer .... */
+ MB_LVL_ALT_LF = 1, /* Use alternate loop filter value... */
+ MB_LVL_MAX = 2 /* Number of MB level features supported */
+
+} MB_LVL_FEATURES;
+
+/* Segment Feature Masks */
+#define SEGMENT_ALTQ 0x01
+#define SEGMENT_ALT_LF 0x02
+
+#define VP8_YMODES (B_PRED + 1)
+#define VP8_UV_MODES (TM_PRED + 1)
+
+#define VP8_MVREFS (1 + SPLITMV - NEARESTMV)
+
+typedef enum {
+ B_DC_PRED, /* average of above and left pixels */
+ B_TM_PRED,
+
+ B_VE_PRED, /* vertical prediction */
+ B_HE_PRED, /* horizontal prediction */
+
+ B_LD_PRED,
+ B_RD_PRED,
+
+ B_VR_PRED,
+ B_VL_PRED,
+ B_HD_PRED,
+ B_HU_PRED,
+
+ LEFT4X4,
+ ABOVE4X4,
+ ZERO4X4,
+ NEW4X4,
+
+ B_MODE_COUNT
+} B_PREDICTION_MODE;
+
+#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */
+#define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4)
+
+/* For keyframes, intra block modes are predicted by the (already decoded)
+ modes for the Y blocks to the left and above us; for interframes, there
+ is a single probability table. */
+
+union b_mode_info {
+ B_PREDICTION_MODE as_mode;
+ int_mv mv;
+};
+
+typedef enum {
+ INTRA_FRAME = 0,
+ LAST_FRAME = 1,
+ GOLDEN_FRAME = 2,
+ ALTREF_FRAME = 3,
+ MAX_REF_FRAMES = 4
+} MV_REFERENCE_FRAME;
+
+typedef struct {
+ uint8_t mode, uv_mode;
+ uint8_t ref_frame;
+ uint8_t is_4x4;
+ int_mv mv;
+
+ uint8_t partitioning;
+ /* does this mb has coefficients at all, 1=no coefficients, 0=need decode
+ tokens */
+ uint8_t mb_skip_coeff;
+ uint8_t need_to_clamp_mvs;
+ /* Which set of segmentation parameters should be used for this MB */
+ uint8_t segment_id;
+} MB_MODE_INFO;
+
+typedef struct modeinfo {
+ MB_MODE_INFO mbmi;
+ union b_mode_info bmi[16];
+} MODE_INFO;
+
+#if CONFIG_MULTI_RES_ENCODING
+/* The mb-level information needed to be stored for higher-resolution encoder */
+typedef struct {
+ MB_PREDICTION_MODE mode;
+ MV_REFERENCE_FRAME ref_frame;
+ int_mv mv;
+ int dissim; /* dissimilarity level of the macroblock */
+} LOWER_RES_MB_INFO;
+
+/* The frame-level information needed to be stored for higher-resolution
+ * encoder */
+typedef struct {
+ FRAME_TYPE frame_type;
+ int is_frame_dropped;
+ // If frame is dropped due to overshoot after encode_frame. This triggers a
+ // drop and resets rate control with Q forced to max for following frame.
+ // The check for this dropping due to overshoot is only done on lowest stream,
+ // and if set will force drop on all spatial streams for that current frame.
+ int is_frame_dropped_overshoot_maxqp;
+ // The frame rate for the lowest resolution.
+ double low_res_framerate;
+ /* The frame number of each reference frames */
+ unsigned int low_res_ref_frames[MAX_REF_FRAMES];
+ // The video frame counter value for the key frame, for lowest resolution.
+ unsigned int key_frame_counter_value;
+ // Flags to signal skipped encoding of previous and base layer stream.
+ unsigned int skip_encoding_prev_stream;
+ unsigned int skip_encoding_base_stream;
+ LOWER_RES_MB_INFO *mb_info;
+} LOWER_RES_FRAME_INFO;
+#endif
+
+typedef struct blockd {
+ short *qcoeff;
+ short *dqcoeff;
+ unsigned char *predictor;
+ short *dequant;
+
+ int offset;
+ char *eob;
+
+ union b_mode_info bmi;
+} BLOCKD;
+
+typedef void (*vp8_subpix_fn_t)(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset,
+ unsigned char *dst_ptr, int dst_pitch);
+
+typedef struct macroblockd {
+ DECLARE_ALIGNED(16, unsigned char, predictor[384]);
+ DECLARE_ALIGNED(16, short, qcoeff[400]);
+ DECLARE_ALIGNED(16, short, dqcoeff[400]);
+ DECLARE_ALIGNED(16, char, eobs[25]);
+
+ DECLARE_ALIGNED(16, short, dequant_y1[16]);
+ DECLARE_ALIGNED(16, short, dequant_y1_dc[16]);
+ DECLARE_ALIGNED(16, short, dequant_y2[16]);
+ DECLARE_ALIGNED(16, short, dequant_uv[16]);
+
+ /* 16 Y blocks, 4 U, 4 V, 1 DC 2nd order block, each with 16 entries. */
+ BLOCKD block[25];
+ int fullpixel_mask;
+
+ YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
+ YV12_BUFFER_CONFIG dst;
+
+ MODE_INFO *mode_info_context;
+ int mode_info_stride;
+
+ FRAME_TYPE frame_type;
+
+ int up_available;
+ int left_available;
+
+ unsigned char *recon_above[3];
+ unsigned char *recon_left[3];
+ int recon_left_stride[2];
+
+ /* Y,U,V,Y2 */
+ ENTROPY_CONTEXT_PLANES *above_context;
+ ENTROPY_CONTEXT_PLANES *left_context;
+
+ /* 0 indicates segmentation at MB level is not enabled. Otherwise the
+ * individual bits indicate which features are active. */
+ unsigned char segmentation_enabled;
+
+ /* 0 (do not update) 1 (update) the macroblock segmentation map. */
+ unsigned char update_mb_segmentation_map;
+
+ /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
+ unsigned char update_mb_segmentation_data;
+
+ /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
+ unsigned char mb_segement_abs_delta;
+
+ /* Per frame flags that define which MB level features (such as quantizer or
+ * loop filter level) */
+ /* are enabled and when enabled the proabilities used to decode the per MB
+ * flags in MB_MODE_INFO */
+ /* Probability Tree used to code Segment number */
+ vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS];
+ /* Segment parameters */
+ signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
+
+ /* mode_based Loop filter adjustment */
+ unsigned char mode_ref_lf_delta_enabled;
+ unsigned char mode_ref_lf_delta_update;
+
+ /* Delta values have the range +/- MAX_LOOP_FILTER */
+ signed char
+ last_ref_lf_deltas[MAX_REF_LF_DELTAS]; /* 0 = Intra, Last, GF, ARF */
+ signed char ref_lf_deltas[MAX_REF_LF_DELTAS]; /* 0 = Intra, Last, GF, ARF */
+ /* 0 = BPRED, ZERO_MV, MV, SPLIT */
+ signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
+ signed char
+ mode_lf_deltas[MAX_MODE_LF_DELTAS]; /* 0 = BPRED, ZERO_MV, MV, SPLIT */
+
+ /* Distance of MB away from frame edges */
+ int mb_to_left_edge;
+ int mb_to_right_edge;
+ int mb_to_top_edge;
+ int mb_to_bottom_edge;
+
+ vp8_subpix_fn_t subpixel_predict;
+ vp8_subpix_fn_t subpixel_predict8x4;
+ vp8_subpix_fn_t subpixel_predict8x8;
+ vp8_subpix_fn_t subpixel_predict16x16;
+
+ void *current_bc;
+
+ int corrupted;
+
+ struct vpx_internal_error_info error_info;
+
+#if VPX_ARCH_X86 || VPX_ARCH_X86_64
+ /* This is an intermediate buffer currently used in sub-pixel motion search
+ * to keep a copy of the reference area. This buffer can be used for other
+ * purpose.
+ */
+ DECLARE_ALIGNED(32, unsigned char, y_buf[22 * 32]);
+#endif
+} MACROBLOCKD;
+
+extern void vp8_build_block_doffsets(MACROBLOCKD *x);
+extern void vp8_setup_block_dptrs(MACROBLOCKD *x);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_BLOCKD_H_
diff --git a/media/libvpx/libvpx/vp8/common/coefupdateprobs.h b/media/libvpx/libvpx/vp8/common/coefupdateprobs.h
new file mode 100644
index 0000000000..b342096b55
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/coefupdateprobs.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_COEFUPDATEPROBS_H_
+#define VPX_VP8_COMMON_COEFUPDATEPROBS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Update probabilities for the nodes in the token entropy tree.
+ Generated file included by entropy.c */
+
+const vp8_prob vp8_coef_update_probs
+ [BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES] = {
+ {
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ },
+ {
+ {
+ { 217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255 },
+ { 234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ },
+ {
+ {
+ { 186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ },
+ {
+ {
+ { 248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255 },
+ { 248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ },
+ };
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_COEFUPDATEPROBS_H_
diff --git a/media/libvpx/libvpx/vp8/common/common.h b/media/libvpx/libvpx/vp8/common/common.h
new file mode 100644
index 0000000000..562569f9ab
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/common.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_COMMON_H_
+#define VPX_VP8_COMMON_COMMON_H_
+
+#include <assert.h>
+
+/* Interface header for common constant data structures and lookup tables */
+
+#include "vpx_mem/vpx_mem.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Only need this for fixed-size arrays, for structs just assign. */
+
+#define vp8_copy(Dest, Src) \
+ do { \
+ assert(sizeof(Dest) == sizeof(Src)); \
+ memcpy(Dest, Src, sizeof(Src)); \
+ } while (0)
+
+/* Use this for variably-sized arrays. */
+
+#define vp8_copy_array(Dest, Src, N) \
+ do { \
+ assert(sizeof(*(Dest)) == sizeof(*(Src))); \
+ memcpy(Dest, Src, (N) * sizeof(*(Src))); \
+ } while (0)
+
+#define vp8_zero(Dest) memset(&(Dest), 0, sizeof(Dest))
+
+#define vp8_zero_array(Dest, N) memset(Dest, 0, (N) * sizeof(*(Dest)))
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_COMMON_H_
diff --git a/media/libvpx/libvpx/vp8/common/context.c b/media/libvpx/libvpx/vp8/common/context.c
new file mode 100644
index 0000000000..3c624ae628
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/context.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "entropy.h"
+
+/* *** GENERATED FILE: DO NOT EDIT *** */
+
+#if 0
+int Contexts[vp8_coef_counter_dimen];
+
+const int default_contexts[vp8_coef_counter_dimen] =
+{
+ {
+ // Block Type ( 0 )
+ {
+ // Coeff Band ( 0 )
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ },
+ {
+ // Coeff Band ( 1 )
+ {30190, 26544, 225, 24, 4, 0, 0, 0, 0, 0, 0, 4171593,},
+ {26846, 25157, 1241, 130, 26, 6, 1, 0, 0, 0, 0, 149987,},
+ {10484, 9538, 1006, 160, 36, 18, 0, 0, 0, 0, 0, 15104,},
+ },
+ {
+ // Coeff Band ( 2 )
+ {25842, 40456, 1126, 83, 11, 2, 0, 0, 0, 0, 0, 0,},
+ {9338, 8010, 512, 73, 7, 3, 2, 0, 0, 0, 0, 43294,},
+ {1047, 751, 149, 31, 13, 6, 1, 0, 0, 0, 0, 879,},
+ },
+ {
+ // Coeff Band ( 3 )
+ {26136, 9826, 252, 13, 0, 0, 0, 0, 0, 0, 0, 0,},
+ {8134, 5574, 191, 14, 2, 0, 0, 0, 0, 0, 0, 35302,},
+ { 605, 677, 116, 9, 1, 0, 0, 0, 0, 0, 0, 611,},
+ },
+ {
+ // Coeff Band ( 4 )
+ {10263, 15463, 283, 17, 0, 0, 0, 0, 0, 0, 0, 0,},
+ {2773, 2191, 128, 9, 2, 2, 0, 0, 0, 0, 0, 10073,},
+ { 134, 125, 32, 4, 0, 2, 0, 0, 0, 0, 0, 50,},
+ },
+ {
+ // Coeff Band ( 5 )
+ {10483, 2663, 23, 1, 0, 0, 0, 0, 0, 0, 0, 0,},
+ {2137, 1251, 27, 1, 1, 0, 0, 0, 0, 0, 0, 14362,},
+ { 116, 156, 14, 2, 1, 0, 0, 0, 0, 0, 0, 190,},
+ },
+ {
+ // Coeff Band ( 6 )
+ {40977, 27614, 412, 28, 0, 0, 0, 0, 0, 0, 0, 0,},
+ {6113, 5213, 261, 22, 3, 0, 0, 0, 0, 0, 0, 26164,},
+ { 382, 312, 50, 14, 2, 0, 0, 0, 0, 0, 0, 345,},
+ },
+ {
+ // Coeff Band ( 7 )
+ { 0, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 319,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8,},
+ },
+ },
+ {
+ // Block Type ( 1 )
+ {
+ // Coeff Band ( 0 )
+ {3268, 19382, 1043, 250, 93, 82, 49, 26, 17, 8, 25, 82289,},
+ {8758, 32110, 5436, 1832, 827, 668, 420, 153, 24, 0, 3, 52914,},
+ {9337, 23725, 8487, 3954, 2107, 1836, 1069, 399, 59, 0, 0, 18620,},
+ },
+ {
+ // Coeff Band ( 1 )
+ {12419, 8420, 452, 62, 9, 1, 0, 0, 0, 0, 0, 0,},
+ {11715, 8705, 693, 92, 15, 7, 2, 0, 0, 0, 0, 53988,},
+ {7603, 8585, 2306, 778, 270, 145, 39, 5, 0, 0, 0, 9136,},
+ },
+ {
+ // Coeff Band ( 2 )
+ {15938, 14335, 1207, 184, 55, 13, 4, 1, 0, 0, 0, 0,},
+ {7415, 6829, 1138, 244, 71, 26, 7, 0, 0, 0, 0, 9980,},
+ {1580, 1824, 655, 241, 89, 46, 10, 2, 0, 0, 0, 429,},
+ },
+ {
+ // Coeff Band ( 3 )
+ {19453, 5260, 201, 19, 0, 0, 0, 0, 0, 0, 0, 0,},
+ {9173, 3758, 213, 22, 1, 1, 0, 0, 0, 0, 0, 9820,},
+ {1689, 1277, 276, 51, 17, 4, 0, 0, 0, 0, 0, 679,},
+ },
+ {
+ // Coeff Band ( 4 )
+ {12076, 10667, 620, 85, 19, 9, 5, 0, 0, 0, 0, 0,},
+ {4665, 3625, 423, 55, 19, 9, 0, 0, 0, 0, 0, 5127,},
+ { 415, 440, 143, 34, 20, 7, 2, 0, 0, 0, 0, 101,},
+ },
+ {
+ // Coeff Band ( 5 )
+ {12183, 4846, 115, 11, 1, 0, 0, 0, 0, 0, 0, 0,},
+ {4226, 3149, 177, 21, 2, 0, 0, 0, 0, 0, 0, 7157,},
+ { 375, 621, 189, 51, 11, 4, 1, 0, 0, 0, 0, 198,},
+ },
+ {
+ // Coeff Band ( 6 )
+ {61658, 37743, 1203, 94, 10, 3, 0, 0, 0, 0, 0, 0,},
+ {15514, 11563, 903, 111, 14, 5, 0, 0, 0, 0, 0, 25195,},
+ { 929, 1077, 291, 78, 14, 7, 1, 0, 0, 0, 0, 507,},
+ },
+ {
+ // Coeff Band ( 7 )
+ { 0, 990, 15, 3, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 0, 412, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1641,},
+ { 0, 18, 7, 1, 0, 0, 0, 0, 0, 0, 0, 30,},
+ },
+ },
+ {
+ // Block Type ( 2 )
+ {
+ // Coeff Band ( 0 )
+ { 953, 24519, 628, 120, 28, 12, 4, 0, 0, 0, 0, 2248798,},
+ {1525, 25654, 2647, 617, 239, 143, 42, 5, 0, 0, 0, 66837,},
+ {1180, 11011, 3001, 1237, 532, 448, 239, 54, 5, 0, 0, 7122,},
+ },
+ {
+ // Coeff Band ( 1 )
+ {1356, 2220, 67, 10, 4, 1, 0, 0, 0, 0, 0, 0,},
+ {1450, 2544, 102, 18, 4, 3, 0, 0, 0, 0, 0, 57063,},
+ {1182, 2110, 470, 130, 41, 21, 0, 0, 0, 0, 0, 6047,},
+ },
+ {
+ // Coeff Band ( 2 )
+ { 370, 3378, 200, 30, 5, 4, 1, 0, 0, 0, 0, 0,},
+ { 293, 1006, 131, 29, 11, 0, 0, 0, 0, 0, 0, 5404,},
+ { 114, 387, 98, 23, 4, 8, 1, 0, 0, 0, 0, 236,},
+ },
+ {
+ // Coeff Band ( 3 )
+ { 579, 194, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 395, 213, 5, 1, 0, 0, 0, 0, 0, 0, 0, 4157,},
+ { 119, 122, 4, 0, 0, 0, 0, 0, 0, 0, 0, 300,},
+ },
+ {
+ // Coeff Band ( 4 )
+ { 38, 557, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 21, 114, 12, 1, 0, 0, 0, 0, 0, 0, 0, 427,},
+ { 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7,},
+ },
+ {
+ // Coeff Band ( 5 )
+ { 52, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 18, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 652,},
+ { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30,},
+ },
+ {
+ // Coeff Band ( 6 )
+ { 640, 569, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 25, 77, 2, 0, 0, 0, 0, 0, 0, 0, 0, 517,},
+ { 4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,},
+ },
+ {
+ // Coeff Band ( 7 )
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ },
+ },
+ {
+ // Block Type ( 3 )
+ {
+ // Coeff Band ( 0 )
+ {2506, 20161, 2707, 767, 261, 178, 107, 30, 14, 3, 0, 100694,},
+ {8806, 36478, 8817, 3268, 1280, 850, 401, 114, 42, 0, 0, 58572,},
+ {11003, 27214, 11798, 5716, 2482, 2072, 1048, 175, 32, 0, 0, 19284,},
+ },
+ {
+ // Coeff Band ( 1 )
+ {9738, 11313, 959, 205, 70, 18, 11, 1, 0, 0, 0, 0,},
+ {12628, 15085, 1507, 273, 52, 19, 9, 0, 0, 0, 0, 54280,},
+ {10701, 15846, 5561, 1926, 813, 570, 249, 36, 0, 0, 0, 6460,},
+ },
+ {
+ // Coeff Band ( 2 )
+ {6781, 22539, 2784, 634, 182, 123, 20, 4, 0, 0, 0, 0,},
+ {6263, 11544, 2649, 790, 259, 168, 27, 5, 0, 0, 0, 20539,},
+ {3109, 4075, 2031, 896, 457, 386, 158, 29, 0, 0, 0, 1138,},
+ },
+ {
+ // Coeff Band ( 3 )
+ {11515, 4079, 465, 73, 5, 14, 2, 0, 0, 0, 0, 0,},
+ {9361, 5834, 650, 96, 24, 8, 4, 0, 0, 0, 0, 22181,},
+ {4343, 3974, 1360, 415, 132, 96, 14, 1, 0, 0, 0, 1267,},
+ },
+ {
+ // Coeff Band ( 4 )
+ {4787, 9297, 823, 168, 44, 12, 4, 0, 0, 0, 0, 0,},
+ {3619, 4472, 719, 198, 60, 31, 3, 0, 0, 0, 0, 8401,},
+ {1157, 1175, 483, 182, 88, 31, 8, 0, 0, 0, 0, 268,},
+ },
+ {
+ // Coeff Band ( 5 )
+ {8299, 1226, 32, 5, 1, 0, 0, 0, 0, 0, 0, 0,},
+ {3502, 1568, 57, 4, 1, 1, 0, 0, 0, 0, 0, 9811,},
+ {1055, 1070, 166, 29, 6, 1, 0, 0, 0, 0, 0, 527,},
+ },
+ {
+ // Coeff Band ( 6 )
+ {27414, 27927, 1989, 347, 69, 26, 0, 0, 0, 0, 0, 0,},
+ {5876, 10074, 1574, 341, 91, 24, 4, 0, 0, 0, 0, 21954,},
+ {1571, 2171, 778, 324, 124, 65, 16, 0, 0, 0, 0, 979,},
+ },
+ {
+ // Coeff Band ( 7 )
+ { 0, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ { 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 459,},
+ { 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13,},
+ },
+ },
+};
+
+//Update probabilities for the nodes in the token entropy tree.
+const vp8_prob tree_update_probs[vp8_coef_tree_dimen] =
+{
+ {
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255, },
+ {250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255, },
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ },
+ {
+ {
+ {217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255, },
+ {234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255, },
+ },
+ {
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ },
+ {
+ {
+ {186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255, },
+ {251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255, },
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ },
+ {
+ {
+ {248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255, },
+ {248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
+ },
+ },
+};
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/debugmodes.c b/media/libvpx/libvpx/vp8/common/debugmodes.c
new file mode 100644
index 0000000000..27a97b260c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/debugmodes.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include "blockd.h"
+
+void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
+ int frame) {
+ int mb_row;
+ int mb_col;
+ int mb_index = 0;
+ FILE *mvs = fopen("mvs.stt", "a");
+
+ /* print out the macroblock Y modes */
+ mb_index = 0;
+ fprintf(mvs, "Mb Modes for Frame %d\n", frame);
+
+ for (mb_row = 0; mb_row < rows; ++mb_row) {
+ for (mb_col = 0; mb_col < cols; ++mb_col) {
+ fprintf(mvs, "%2d ", mi[mb_index].mbmi.mode);
+
+ mb_index++;
+ }
+
+ fprintf(mvs, "\n");
+ mb_index++;
+ }
+
+ fprintf(mvs, "\n");
+
+ mb_index = 0;
+ fprintf(mvs, "Mb mv ref for Frame %d\n", frame);
+
+ for (mb_row = 0; mb_row < rows; ++mb_row) {
+ for (mb_col = 0; mb_col < cols; ++mb_col) {
+ fprintf(mvs, "%2d ", mi[mb_index].mbmi.ref_frame);
+
+ mb_index++;
+ }
+
+ fprintf(mvs, "\n");
+ mb_index++;
+ }
+
+ fprintf(mvs, "\n");
+
+ /* print out the macroblock UV modes */
+ mb_index = 0;
+ fprintf(mvs, "UV Modes for Frame %d\n", frame);
+
+ for (mb_row = 0; mb_row < rows; ++mb_row) {
+ for (mb_col = 0; mb_col < cols; ++mb_col) {
+ fprintf(mvs, "%2d ", mi[mb_index].mbmi.uv_mode);
+
+ mb_index++;
+ }
+
+ mb_index++;
+ fprintf(mvs, "\n");
+ }
+
+ fprintf(mvs, "\n");
+
+ /* print out the block modes */
+ fprintf(mvs, "Mbs for Frame %d\n", frame);
+ {
+ int b_row;
+
+ for (b_row = 0; b_row < 4 * rows; ++b_row) {
+ int b_col;
+ int bindex;
+
+ for (b_col = 0; b_col < 4 * cols; ++b_col) {
+ mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2);
+ bindex = (b_row & 3) * 4 + (b_col & 3);
+
+ if (mi[mb_index].mbmi.mode == B_PRED)
+ fprintf(mvs, "%2d ", mi[mb_index].bmi[bindex].as_mode);
+ else
+ fprintf(mvs, "xx ");
+ }
+
+ fprintf(mvs, "\n");
+ }
+ }
+ fprintf(mvs, "\n");
+
+ /* print out the macroblock mvs */
+ mb_index = 0;
+ fprintf(mvs, "MVs for Frame %d\n", frame);
+
+ for (mb_row = 0; mb_row < rows; ++mb_row) {
+ for (mb_col = 0; mb_col < cols; ++mb_col) {
+ fprintf(mvs, "%5d:%-5d", mi[mb_index].mbmi.mv.as_mv.row / 2,
+ mi[mb_index].mbmi.mv.as_mv.col / 2);
+
+ mb_index++;
+ }
+
+ mb_index++;
+ fprintf(mvs, "\n");
+ }
+
+ fprintf(mvs, "\n");
+
+ /* print out the block modes */
+ fprintf(mvs, "MVs for Frame %d\n", frame);
+ {
+ int b_row;
+
+ for (b_row = 0; b_row < 4 * rows; ++b_row) {
+ int b_col;
+ int bindex;
+
+ for (b_col = 0; b_col < 4 * cols; ++b_col) {
+ mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2);
+ bindex = (b_row & 3) * 4 + (b_col & 3);
+ fprintf(mvs, "%3d:%-3d ", mi[mb_index].bmi[bindex].mv.as_mv.row,
+ mi[mb_index].bmi[bindex].mv.as_mv.col);
+ }
+
+ fprintf(mvs, "\n");
+ }
+ }
+ fprintf(mvs, "\n");
+
+ fclose(mvs);
+}
diff --git a/media/libvpx/libvpx/vp8/common/default_coef_probs.h b/media/libvpx/libvpx/vp8/common/default_coef_probs.h
new file mode 100644
index 0000000000..b25e4a45a3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/default_coef_probs.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_DEFAULT_COEF_PROBS_H_
+#define VPX_VP8_COMMON_DEFAULT_COEF_PROBS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*Generated file, included by entropy.c*/
+
+static const vp8_prob default_coef_probs
+ [BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES] = {
+ { /* Block Type ( 0 ) */
+ { /* Coeff Band ( 0 )*/
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } },
+ { /* Coeff Band ( 1 )*/
+ { 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 },
+ { 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 },
+ { 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 } },
+ { /* Coeff Band ( 2 )*/
+ { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 },
+ { 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 },
+ { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 } },
+ { /* Coeff Band ( 3 )*/
+ { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 },
+ { 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 },
+ { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 } },
+ { /* Coeff Band ( 4 )*/
+ { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 },
+ { 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 },
+ { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 } },
+ { /* Coeff Band ( 5 )*/
+ { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 },
+ { 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 },
+ { 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 } },
+ { /* Coeff Band ( 6 )*/
+ { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 },
+ { 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 },
+ { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 } },
+ { /* Coeff Band ( 7 )*/
+ { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } } },
+ { /* Block Type ( 1 ) */
+ { /* Coeff Band ( 0 )*/
+ { 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 },
+ { 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 },
+ { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 } },
+ { /* Coeff Band ( 1 )*/
+ { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 },
+ { 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 },
+ { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 } },
+ { /* Coeff Band ( 2 )*/
+ { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 },
+ { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 },
+ { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 } },
+ { /* Coeff Band ( 3 )*/
+ { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 },
+ { 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 },
+ { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 } },
+ { /* Coeff Band ( 4 )*/
+ { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 },
+ { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 },
+ { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 } },
+ { /* Coeff Band ( 5 )*/
+ { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 },
+ { 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 },
+ { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 } },
+ { /* Coeff Band ( 6 )*/
+ { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 },
+ { 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 },
+ { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 } },
+ { /* Coeff Band ( 7 )*/
+ { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 },
+ { 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 } } },
+ { /* Block Type ( 2 ) */
+ { /* Coeff Band ( 0 )*/
+ { 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 },
+ { 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 },
+ { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 } },
+ { /* Coeff Band ( 1 )*/
+ { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 },
+ { 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 },
+ { 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 } },
+ { /* Coeff Band ( 2 )*/
+ { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 },
+ { 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 },
+ { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 } },
+ { /* Coeff Band ( 3 )*/
+ { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 },
+ { 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 } },
+ { /* Coeff Band ( 4 )*/
+ { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 },
+ { 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } },
+ { /* Coeff Band ( 5 )*/
+ { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 } },
+ { /* Coeff Band ( 6 )*/
+ { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 } },
+ { /* Coeff Band ( 7 )*/
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } } },
+ { /* Block Type ( 3 ) */
+ { /* Coeff Band ( 0 )*/
+ { 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 },
+ { 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 },
+ { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 } },
+ { /* Coeff Band ( 1 )*/
+ { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 },
+ { 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 },
+ { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 } },
+ { /* Coeff Band ( 2 )*/
+ { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 },
+ { 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 },
+ { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 } },
+ { /* Coeff Band ( 3 )*/
+ { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 },
+ { 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 },
+ { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 } },
+ { /* Coeff Band ( 4 )*/
+ { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 },
+ { 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 },
+ { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 } },
+ { /* Coeff Band ( 5 )*/
+ { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 },
+ { 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 },
+ { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 } },
+ { /* Coeff Band ( 6 )*/
+ { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 },
+ { 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 },
+ { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 } },
+ { /* Coeff Band ( 7 )*/
+ { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } } }
+ };
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_DEFAULT_COEF_PROBS_H_
diff --git a/media/libvpx/libvpx/vp8/common/dequantize.c b/media/libvpx/libvpx/vp8/common/dequantize.c
new file mode 100644
index 0000000000..8a56ae6868
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/dequantize.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vp8/common/blockd.h"
+#include "vpx_mem/vpx_mem.h"
+
+void vp8_dequantize_b_c(BLOCKD *d, short *DQC) {
+ int i;
+ short *DQ = d->dqcoeff;
+ short *Q = d->qcoeff;
+
+ for (i = 0; i < 16; ++i) {
+ DQ[i] = Q[i] * DQC[i];
+ }
+}
+
+void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *dest,
+ int stride) {
+ int i;
+
+ for (i = 0; i < 16; ++i) {
+ input[i] = dq[i] * input[i];
+ }
+
+ vp8_short_idct4x4llm_c(input, dest, stride, dest, stride);
+
+ memset(input, 0, 32);
+}
diff --git a/media/libvpx/libvpx/vp8/common/entropy.c b/media/libvpx/libvpx/vp8/common/entropy.c
new file mode 100644
index 0000000000..fc4a3539fd
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/entropy.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "entropy.h"
+#include "blockd.h"
+#include "onyxc_int.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "coefupdateprobs.h"
+
+DECLARE_ALIGNED(16, const unsigned char, vp8_norm[256]) = {
+ 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+DECLARE_ALIGNED(16, const unsigned char,
+ vp8_coef_bands[16]) = { 0, 1, 2, 3, 6, 4, 5, 6,
+ 6, 6, 6, 6, 6, 6, 6, 7 };
+
+DECLARE_ALIGNED(16, const unsigned char,
+ vp8_prev_token_class[MAX_ENTROPY_TOKENS]) = {
+ 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0
+};
+
+DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) = {
+ 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15,
+};
+
+DECLARE_ALIGNED(16, const short,
+ vp8_default_inv_zig_zag[16]) = { 1, 2, 6, 7, 3, 5, 8, 13,
+ 4, 9, 12, 14, 10, 11, 15, 16 };
+
+/* vp8_default_zig_zag_mask generated with:
+
+ void vp8_init_scan_order_mask()
+ {
+ int i;
+
+ for (i = 0; i < 16; ++i)
+ {
+ vp8_default_zig_zag_mask[vp8_default_zig_zag1d[i]] = 1 << i;
+ }
+
+ }
+*/
+DECLARE_ALIGNED(16, const short, vp8_default_zig_zag_mask[16]) = {
+ 1, 2, 32, 64, 4, 16, 128, 4096, 8, 256, 2048, 8192, 512, 1024, 16384, -32768
+};
+
+const int vp8_mb_feature_data_bits[MB_LVL_MAX] = { 7, 6 };
+
+/* Array indices are identical to previously-existing CONTEXT_NODE indices */
+/* corresponding _CONTEXT_NODEs */
+/* clang-format off */
+const vp8_tree_index vp8_coef_tree[22] = {
+ -DCT_EOB_TOKEN, 2, /* 0 = EOB */
+ -ZERO_TOKEN, 4, /* 1 = ZERO */
+ -ONE_TOKEN, 6, /* 2 = ONE */
+ 8, 12, /* 3 = LOW_VAL */
+ -TWO_TOKEN, 10, /* 4 = TWO */
+ -THREE_TOKEN, -FOUR_TOKEN, /* 5 = THREE */
+ 14, 16, /* 6 = HIGH_LOW */
+ -DCT_VAL_CATEGORY1, -DCT_VAL_CATEGORY2, /* 7 = CAT_ONE */
+ 18, 20, /* 8 = CAT_THREEFOUR */
+ -DCT_VAL_CATEGORY3, -DCT_VAL_CATEGORY4, /* 9 = CAT_THREE */
+ -DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */
+};
+/* clang-format on */
+
+/* vp8_coef_encodings generated with:
+ vp8_tokens_from_tree(vp8_coef_encodings, vp8_coef_tree);
+*/
+vp8_token vp8_coef_encodings[MAX_ENTROPY_TOKENS] = {
+ { 2, 2 }, { 6, 3 }, { 28, 5 }, { 58, 6 }, { 59, 6 }, { 60, 6 },
+ { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
+};
+
+/* Trees for extra bits. Probabilities are constant and
+ do not depend on previously encoded bits */
+
+static const vp8_prob Pcat1[] = { 159 };
+static const vp8_prob Pcat2[] = { 165, 145 };
+static const vp8_prob Pcat3[] = { 173, 148, 140 };
+static const vp8_prob Pcat4[] = { 176, 155, 140, 135 };
+static const vp8_prob Pcat5[] = { 180, 157, 141, 134, 130 };
+static const vp8_prob Pcat6[] = { 254, 254, 243, 230, 196, 177,
+ 153, 140, 133, 130, 129 };
+
+/* tree index tables generated with:
+
+ void init_bit_tree(vp8_tree_index *p, int n) {
+ int i = 0;
+
+ while (++i < n) {
+ p[0] = p[1] = i << 1;
+ p += 2;
+ }
+
+ p[0] = p[1] = 0;
+ }
+
+ void init_bit_trees() {
+ init_bit_tree(cat1, 1);
+ init_bit_tree(cat2, 2);
+ init_bit_tree(cat3, 3);
+ init_bit_tree(cat4, 4);
+ init_bit_tree(cat5, 5);
+ init_bit_tree(cat6, 11);
+ }
+*/
+
+static const vp8_tree_index cat1[2] = { 0, 0 };
+static const vp8_tree_index cat2[4] = { 2, 2, 0, 0 };
+static const vp8_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
+static const vp8_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vp8_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vp8_tree_index cat6[22] = { 2, 2, 4, 4, 6, 6, 8, 8,
+ 10, 10, 12, 12, 14, 14, 16, 16,
+ 18, 18, 20, 20, 0, 0 };
+
+const vp8_extra_bit_struct vp8_extra_bits[12] = {
+ { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { 0, 0, 0, 2 },
+ { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { cat1, Pcat1, 1, 5 },
+ { cat2, Pcat2, 2, 7 }, { cat3, Pcat3, 3, 11 }, { cat4, Pcat4, 4, 19 },
+ { cat5, Pcat5, 5, 35 }, { cat6, Pcat6, 11, 67 }, { 0, 0, 0, 0 }
+};
+
+#include "default_coef_probs.h"
+
+void vp8_default_coef_probs(VP8_COMMON *pc) {
+ memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(default_coef_probs));
+}
diff --git a/media/libvpx/libvpx/vp8/common/entropy.h b/media/libvpx/libvpx/vp8/common/entropy.h
new file mode 100644
index 0000000000..fbdb7bcfca
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/entropy.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_ENTROPY_H_
+#define VPX_VP8_COMMON_ENTROPY_H_
+
+#include "treecoder.h"
+#include "blockd.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Coefficient token alphabet */
+
+#define ZERO_TOKEN 0 /* 0 Extra Bits 0+0 */
+#define ONE_TOKEN 1 /* 1 Extra Bits 0+1 */
+#define TWO_TOKEN 2 /* 2 Extra Bits 0+1 */
+#define THREE_TOKEN 3 /* 3 Extra Bits 0+1 */
+#define FOUR_TOKEN 4 /* 4 Extra Bits 0+1 */
+#define DCT_VAL_CATEGORY1 5 /* 5-6 Extra Bits 1+1 */
+#define DCT_VAL_CATEGORY2 6 /* 7-10 Extra Bits 2+1 */
+#define DCT_VAL_CATEGORY3 7 /* 11-18 Extra Bits 3+1 */
+#define DCT_VAL_CATEGORY4 8 /* 19-34 Extra Bits 4+1 */
+#define DCT_VAL_CATEGORY5 9 /* 35-66 Extra Bits 5+1 */
+#define DCT_VAL_CATEGORY6 10 /* 67+ Extra Bits 11+1 */
+#define DCT_EOB_TOKEN 11 /* EOB Extra Bits 0+0 */
+
+#define MAX_ENTROPY_TOKENS 12
+#define ENTROPY_NODES 11
+
+extern const vp8_tree_index vp8_coef_tree[];
+
+extern const struct vp8_token_struct vp8_coef_encodings[MAX_ENTROPY_TOKENS];
+
+typedef struct {
+ vp8_tree_p tree;
+ const vp8_prob *prob;
+ int Len;
+ int base_val;
+} vp8_extra_bit_struct;
+
+extern const vp8_extra_bit_struct
+ vp8_extra_bits[12]; /* indexed by token value */
+
+#define PROB_UPDATE_BASELINE_COST 7
+
+#define MAX_PROB 255
+#define DCT_MAX_VALUE 2048
+
+/* Coefficients are predicted via a 3-dimensional probability table. */
+
+/* Outside dimension. 0 = Y no DC, 1 = Y2, 2 = UV, 3 = Y with DC */
+
+#define BLOCK_TYPES 4
+
+/* Middle dimension is a coarsening of the coefficient's
+ position within the 4x4 DCT. */
+
+#define COEF_BANDS 8
+extern DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]);
+
+/* Inside dimension is 3-valued measure of nearby complexity, that is,
+ the extent to which nearby coefficients are nonzero. For the first
+ coefficient (DC, unless block type is 0), we look at the (already encoded)
+ blocks above and to the left of the current block. The context index is
+ then the number (0,1,or 2) of these blocks having nonzero coefficients.
+ After decoding a coefficient, the measure is roughly the size of the
+ most recently decoded coefficient (0 for 0, 1 for 1, 2 for >1).
+ Note that the intuitive meaning of this measure changes as coefficients
+ are decoded, e.g., prior to the first token, a zero means that my neighbors
+ are empty while, after the first token, because of the use of end-of-block,
+ a zero means we just decoded a zero and hence guarantees that a non-zero
+ coefficient will appear later in this block. However, this shift
+ in meaning is perfectly OK because our context depends also on the
+ coefficient band (and since zigzag positions 0, 1, and 2 are in
+ distinct bands). */
+
+/*# define DC_TOKEN_CONTEXTS 3*/ /* 00, 0!0, !0!0 */
+#define PREV_COEF_CONTEXTS 3
+
+extern DECLARE_ALIGNED(16, const unsigned char,
+ vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
+
+extern const vp8_prob vp8_coef_update_probs[BLOCK_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS][ENTROPY_NODES];
+
+struct VP8Common;
+void vp8_default_coef_probs(struct VP8Common *);
+
+extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]);
+extern DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]);
+extern DECLARE_ALIGNED(16, const short, vp8_default_zig_zag_mask[16]);
+extern const int vp8_mb_feature_data_bits[MB_LVL_MAX];
+
+void vp8_coef_tree_initialize(void);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_ENTROPY_H_
diff --git a/media/libvpx/libvpx/vp8/common/entropymode.c b/media/libvpx/libvpx/vp8/common/entropymode.c
new file mode 100644
index 0000000000..f61e0c2e2b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/entropymode.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#define USE_PREBUILT_TABLES
+
+#include "entropymode.h"
+#include "entropy.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp8_entropymodedata.h"
+
+int vp8_mv_cont(const int_mv *l, const int_mv *a) {
+ int lez = (l->as_int == 0);
+ int aez = (a->as_int == 0);
+ int lea = (l->as_int == a->as_int);
+
+ if (lea && lez) return SUBMVREF_LEFT_ABOVE_ZED;
+
+ if (lea) return SUBMVREF_LEFT_ABOVE_SAME;
+
+ if (aez) return SUBMVREF_ABOVE_ZED;
+
+ if (lez) return SUBMVREF_LEFT_ZED;
+
+ return SUBMVREF_NORMAL;
+}
+
+static const vp8_prob sub_mv_ref_prob[VP8_SUBMVREFS - 1] = { 180, 162, 25 };
+
+const vp8_prob vp8_sub_mv_ref_prob2[SUBMVREF_COUNT][VP8_SUBMVREFS - 1] = {
+ { 147, 136, 18 },
+ { 106, 145, 1 },
+ { 179, 121, 1 },
+ { 223, 1, 34 },
+ { 208, 1, 1 }
+};
+
+const vp8_mbsplit vp8_mbsplits[VP8_NUMMBSPLITS] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
+ { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
+ { 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
+};
+
+const int vp8_mbsplit_count[VP8_NUMMBSPLITS] = { 2, 2, 4, 16 };
+
+const vp8_prob vp8_mbsplit_probs[VP8_NUMMBSPLITS - 1] = { 110, 111, 150 };
+
+/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
+
+const vp8_tree_index vp8_bmode_tree[18] = /* INTRAMODECONTEXTNODE value */
+ {
+ -B_DC_PRED, 2, /* 0 = DC_NODE */
+ -B_TM_PRED, 4, /* 1 = TM_NODE */
+ -B_VE_PRED, 6, /* 2 = VE_NODE */
+ 8, 12, /* 3 = COM_NODE */
+ -B_HE_PRED, 10, /* 4 = HE_NODE */
+ -B_RD_PRED, -B_VR_PRED, /* 5 = RD_NODE */
+ -B_LD_PRED, 14, /* 6 = LD_NODE */
+ -B_VL_PRED, 16, /* 7 = VL_NODE */
+ -B_HD_PRED, -B_HU_PRED /* 8 = HD_NODE */
+ };
+
+/* Again, these trees use the same probability indices as their
+ explicitly-programmed predecessors. */
+
+const vp8_tree_index vp8_ymode_tree[8] = {
+ -DC_PRED, 2, 4, 6, -V_PRED, -H_PRED, -TM_PRED, -B_PRED
+};
+
+const vp8_tree_index vp8_kf_ymode_tree[8] = { -B_PRED, 2, 4,
+ 6, -DC_PRED, -V_PRED,
+ -H_PRED, -TM_PRED };
+
+const vp8_tree_index vp8_uv_mode_tree[6] = { -DC_PRED, 2, -V_PRED,
+ 4, -H_PRED, -TM_PRED };
+
+const vp8_tree_index vp8_mbsplit_tree[6] = { -3, 2, -2, 4, -0, -1 };
+
+const vp8_tree_index vp8_mv_ref_tree[8] = { -ZEROMV, 2, -NEARESTMV, 4,
+ -NEARMV, 6, -NEWMV, -SPLITMV };
+
+const vp8_tree_index vp8_sub_mv_ref_tree[6] = { -LEFT4X4, 2, -ABOVE4X4,
+ 4, -ZERO4X4, -NEW4X4 };
+
+const vp8_tree_index vp8_small_mvtree[14] = { 2, 8, 4, 6, -0, -1, -2,
+ -3, 10, 12, -4, -5, -6, -7 };
+
+void vp8_init_mbmode_probs(VP8_COMMON *x) {
+ memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob));
+ memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob));
+ memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
+}
+
+void vp8_default_bmode_probs(vp8_prob dest[VP8_BINTRAMODES - 1]) {
+ memcpy(dest, vp8_bmode_prob, sizeof(vp8_bmode_prob));
+}
diff --git a/media/libvpx/libvpx/vp8/common/entropymode.h b/media/libvpx/libvpx/vp8/common/entropymode.h
new file mode 100644
index 0000000000..c772cece57
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/entropymode.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_ENTROPYMODE_H_
+#define VPX_VP8_COMMON_ENTROPYMODE_H_
+
+#include "onyxc_int.h"
+#include "treecoder.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ SUBMVREF_NORMAL,
+ SUBMVREF_LEFT_ZED,
+ SUBMVREF_ABOVE_ZED,
+ SUBMVREF_LEFT_ABOVE_SAME,
+ SUBMVREF_LEFT_ABOVE_ZED
+} sumvfref_t;
+
+typedef int vp8_mbsplit[16];
+
+#define VP8_NUMMBSPLITS 4
+
+extern const vp8_mbsplit vp8_mbsplits[VP8_NUMMBSPLITS];
+
+extern const int vp8_mbsplit_count[VP8_NUMMBSPLITS]; /* # of subsets */
+
+extern const vp8_prob vp8_mbsplit_probs[VP8_NUMMBSPLITS - 1];
+
+extern int vp8_mv_cont(const int_mv *l, const int_mv *a);
+#define SUBMVREF_COUNT 5
+extern const vp8_prob vp8_sub_mv_ref_prob2[SUBMVREF_COUNT][VP8_SUBMVREFS - 1];
+
+extern const unsigned int vp8_kf_default_bmode_counts[VP8_BINTRAMODES]
+ [VP8_BINTRAMODES]
+ [VP8_BINTRAMODES];
+
+extern const vp8_tree_index vp8_bmode_tree[];
+
+extern const vp8_tree_index vp8_ymode_tree[];
+extern const vp8_tree_index vp8_kf_ymode_tree[];
+extern const vp8_tree_index vp8_uv_mode_tree[];
+
+extern const vp8_tree_index vp8_mbsplit_tree[];
+extern const vp8_tree_index vp8_mv_ref_tree[];
+extern const vp8_tree_index vp8_sub_mv_ref_tree[];
+
+extern const struct vp8_token_struct vp8_bmode_encodings[VP8_BINTRAMODES];
+extern const struct vp8_token_struct vp8_ymode_encodings[VP8_YMODES];
+extern const struct vp8_token_struct vp8_kf_ymode_encodings[VP8_YMODES];
+extern const struct vp8_token_struct vp8_uv_mode_encodings[VP8_UV_MODES];
+extern const struct vp8_token_struct vp8_mbsplit_encodings[VP8_NUMMBSPLITS];
+
+/* Inter mode values do not start at zero */
+
+extern const struct vp8_token_struct vp8_mv_ref_encoding_array[VP8_MVREFS];
+extern const struct vp8_token_struct
+ vp8_sub_mv_ref_encoding_array[VP8_SUBMVREFS];
+
+extern const vp8_tree_index vp8_small_mvtree[];
+
+extern const struct vp8_token_struct vp8_small_mvencodings[8];
+
+/* Key frame default mode probs */
+extern const vp8_prob vp8_kf_bmode_prob[VP8_BINTRAMODES][VP8_BINTRAMODES]
+ [VP8_BINTRAMODES - 1];
+extern const vp8_prob vp8_kf_uv_mode_prob[VP8_UV_MODES - 1];
+extern const vp8_prob vp8_kf_ymode_prob[VP8_YMODES - 1];
+
+void vp8_init_mbmode_probs(VP8_COMMON *x);
+void vp8_default_bmode_probs(vp8_prob dest[VP8_BINTRAMODES - 1]);
+void vp8_kf_default_bmode_probs(
+ vp8_prob dest[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES - 1]);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_ENTROPYMODE_H_
diff --git a/media/libvpx/libvpx/vp8/common/entropymv.c b/media/libvpx/libvpx/vp8/common/entropymv.c
new file mode 100644
index 0000000000..fb4f0c889f
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/entropymv.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "entropymv.h"
+
+/* clang-format off */
+const MV_CONTEXT vp8_mv_update_probs[2] = {
+ { {
+ 237,
+ 246,
+ 253, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 250, 250, 252, 254, 254
+ } },
+ { {
+ 231,
+ 243,
+ 245, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 251, 251, 254, 254, 254
+ } }
+};
+/* clang-format on */
+
+const MV_CONTEXT vp8_default_mv_context[2] = {
+ { {
+ /* row */
+ 162, /* is short */
+ 128, /* sign */
+ 225, 146, 172, 147, 214, 39, 156, /* short tree */
+ 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 /* long bits */
+ } },
+
+ { {
+ /* same for column */
+ 164, /* is short */
+ 128, /**/
+ 204, 170, 119, 235, 140, 230, 228, /**/
+ 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 /* long bits */
+
+ } }
+};
diff --git a/media/libvpx/libvpx/vp8/common/entropymv.h b/media/libvpx/libvpx/vp8/common/entropymv.h
new file mode 100644
index 0000000000..40039f5b2c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/entropymv.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_ENTROPYMV_H_
+#define VPX_VP8_COMMON_ENTROPYMV_H_
+
+#include "treecoder.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+ mv_max = 1023, /* max absolute value of a MV component */
+ MVvals = (2 * mv_max) + 1, /* # possible values "" */
+ mvfp_max = 255, /* max absolute value of a full pixel MV component */
+ MVfpvals = (2 * mvfp_max) + 1, /* # possible full pixel MV values */
+
+ mvlong_width = 10, /* Large MVs have 9 bit magnitudes */
+ mvnum_short = 8, /* magnitudes 0 through 7 */
+
+ /* probability offsets for coding each MV component */
+
+ mvpis_short = 0, /* short (<= 7) vs long (>= 8) */
+ MVPsign, /* sign for non-zero */
+ MVPshort, /* 8 short values = 7-position tree */
+
+ MVPbits = MVPshort + mvnum_short - 1, /* mvlong_width long value bits */
+ MVPcount = MVPbits + mvlong_width /* (with independent probabilities) */
+};
+
+typedef struct mv_context {
+ vp8_prob prob[MVPcount]; /* often come in row, col pairs */
+} MV_CONTEXT;
+
+extern const MV_CONTEXT vp8_mv_update_probs[2], vp8_default_mv_context[2];
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_ENTROPYMV_H_
diff --git a/media/libvpx/libvpx/vp8/common/extend.c b/media/libvpx/libvpx/vp8/common/extend.c
new file mode 100644
index 0000000000..b52e9fe93c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/extend.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "extend.h"
+#include "vpx_mem/vpx_mem.h"
+
+static void copy_and_extend_plane(
+ unsigned char *s, /* source */
+ int sp, /* source pitch */
+ unsigned char *d, /* destination */
+ int dp, /* destination pitch */
+ int h, /* height */
+ int w, /* width */
+ int et, /* extend top border */
+ int el, /* extend left border */
+ int eb, /* extend bottom border */
+ int er, /* extend right border */
+ int interleave_step) { /* step between pixels of the current plane */
+ int i, j;
+ unsigned char *src_ptr1, *src_ptr2;
+ unsigned char *dest_ptr1, *dest_ptr2;
+ int linesize;
+
+ if (interleave_step < 1) interleave_step = 1;
+
+ /* copy the left and right most columns out */
+ src_ptr1 = s;
+ src_ptr2 = s + (w - 1) * interleave_step;
+ dest_ptr1 = d - el;
+ dest_ptr2 = d + w;
+
+ for (i = 0; i < h; ++i) {
+ memset(dest_ptr1, src_ptr1[0], el);
+ if (interleave_step == 1) {
+ memcpy(dest_ptr1 + el, src_ptr1, w);
+ } else {
+ for (j = 0; j < w; j++) {
+ dest_ptr1[el + j] = src_ptr1[interleave_step * j];
+ }
+ }
+ memset(dest_ptr2, src_ptr2[0], er);
+ src_ptr1 += sp;
+ src_ptr2 += sp;
+ dest_ptr1 += dp;
+ dest_ptr2 += dp;
+ }
+
+ /* Now copy the top and bottom lines into each line of the respective
+ * borders
+ */
+ src_ptr1 = d - el;
+ src_ptr2 = d + dp * (h - 1) - el;
+ dest_ptr1 = d + dp * (-et) - el;
+ dest_ptr2 = d + dp * (h)-el;
+ linesize = el + er + w;
+
+ for (i = 0; i < et; ++i) {
+ memcpy(dest_ptr1, src_ptr1, linesize);
+ dest_ptr1 += dp;
+ }
+
+ for (i = 0; i < eb; ++i) {
+ memcpy(dest_ptr2, src_ptr2, linesize);
+ dest_ptr2 += dp;
+ }
+}
+
+void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst) {
+ int et = dst->border;
+ int el = dst->border;
+ int eb = dst->border + dst->y_height - src->y_height;
+ int er = dst->border + dst->y_width - src->y_width;
+
+ // detect nv12 colorspace
+ int chroma_step = src->v_buffer - src->u_buffer == 1 ? 2 : 1;
+
+ copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, src->y_height, src->y_width, et, el, eb,
+ er, 1);
+
+ et = dst->border >> 1;
+ el = dst->border >> 1;
+ eb = (dst->border >> 1) + dst->uv_height - src->uv_height;
+ er = (dst->border >> 1) + dst->uv_width - src->uv_width;
+
+ copy_and_extend_plane(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, src->uv_height, src->uv_width, et, el,
+ eb, er, chroma_step);
+
+ copy_and_extend_plane(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, src->uv_height, src->uv_width, et, el,
+ eb, er, chroma_step);
+}
+
+void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst, int srcy,
+ int srcx, int srch, int srcw) {
+ int et = dst->border;
+ int el = dst->border;
+ int eb = dst->border + dst->y_height - src->y_height;
+ int er = dst->border + dst->y_width - src->y_width;
+ int src_y_offset = srcy * src->y_stride + srcx;
+ int dst_y_offset = srcy * dst->y_stride + srcx;
+ int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
+ int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
+ // detect nv12 colorspace
+ int chroma_step = src->v_buffer - src->u_buffer == 1 ? 2 : 1;
+
+ /* If the side is not touching the bounder then don't extend. */
+ if (srcy) et = 0;
+ if (srcx) el = 0;
+ if (srcy + srch != src->y_height) eb = 0;
+ if (srcx + srcw != src->y_width) er = 0;
+
+ copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
+ dst->y_buffer + dst_y_offset, dst->y_stride, srch, srcw,
+ et, el, eb, er, 1);
+
+ et = (et + 1) >> 1;
+ el = (el + 1) >> 1;
+ eb = (eb + 1) >> 1;
+ er = (er + 1) >> 1;
+ srch = (srch + 1) >> 1;
+ srcw = (srcw + 1) >> 1;
+
+ copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride,
+ dst->u_buffer + dst_uv_offset, dst->uv_stride, srch,
+ srcw, et, el, eb, er, chroma_step);
+
+ copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride,
+ dst->v_buffer + dst_uv_offset, dst->uv_stride, srch,
+ srcw, et, el, eb, er, chroma_step);
+}
+
+/* note the extension is only for the last row, for intra prediction purpose */
+void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr,
+ unsigned char *UPtr, unsigned char *VPtr) {
+ int i;
+
+ YPtr += ybf->y_stride * 14;
+ UPtr += ybf->uv_stride * 6;
+ VPtr += ybf->uv_stride * 6;
+
+ for (i = 0; i < 4; ++i) {
+ YPtr[i] = YPtr[-1];
+ UPtr[i] = UPtr[-1];
+ VPtr[i] = VPtr[-1];
+ }
+
+ YPtr += ybf->y_stride;
+ UPtr += ybf->uv_stride;
+ VPtr += ybf->uv_stride;
+
+ for (i = 0; i < 4; ++i) {
+ YPtr[i] = YPtr[-1];
+ UPtr[i] = UPtr[-1];
+ VPtr[i] = VPtr[-1];
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/extend.h b/media/libvpx/libvpx/vp8/common/extend.h
new file mode 100644
index 0000000000..586a38a4f3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/extend.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_EXTEND_H_
+#define VPX_VP8_COMMON_EXTEND_H_
+
+#include "vpx_scale/yv12config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr,
+ unsigned char *UPtr, unsigned char *VPtr);
+void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst);
+void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst, int srcy,
+ int srcx, int srch, int srcw);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_EXTEND_H_
diff --git a/media/libvpx/libvpx/vp8/common/filter.c b/media/libvpx/libvpx/vp8/common/filter.c
new file mode 100644
index 0000000000..267498335c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/filter.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include "./vp8_rtcd.h"
+#include "vp8/common/filter.h"
+
+DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]) = {
+ { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 },
+ { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 }
+};
+
+DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters[8][6]) = {
+
+ { 0, 0, 128, 0, 0,
+ 0 }, /* note that 1/8 pel positions are just as per alpha -0.5 bicubic */
+ { 0, -6, 123, 12, -1, 0 },
+ { 2, -11, 108, 36, -8, 1 }, /* New 1/4 pel 6 tap filter */
+ { 0, -9, 93, 50, -6, 0 },
+ { 3, -16, 77, 77, -16, 3 }, /* New 1/2 pel 6 tap filter */
+ { 0, -6, 50, 93, -9, 0 },
+ { 1, -8, 36, 108, -11, 2 }, /* New 1/4 pel 6 tap filter */
+ { 0, -1, 12, 123, -6, 0 },
+};
+
+static void filter_block2d_first_pass(unsigned char *src_ptr, int *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const short *vp8_filter) {
+ unsigned int i, j;
+ int Temp;
+
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) +
+ ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
+ ((int)src_ptr[0] * vp8_filter[2]) +
+ ((int)src_ptr[pixel_step] * vp8_filter[3]) +
+ ((int)src_ptr[2 * pixel_step] * vp8_filter[4]) +
+ ((int)src_ptr[3 * pixel_step] * vp8_filter[5]) +
+ (VP8_FILTER_WEIGHT >> 1); /* Rounding */
+
+ /* Normalize back to 0-255 */
+ Temp = Temp >> VP8_FILTER_SHIFT;
+
+ if (Temp < 0) {
+ Temp = 0;
+ } else if (Temp > 255) {
+ Temp = 255;
+ }
+
+ output_ptr[j] = Temp;
+ src_ptr++;
+ }
+
+ /* Next row... */
+ src_ptr += src_pixels_per_line - output_width;
+ output_ptr += output_width;
+ }
+}
+
+static void filter_block2d_second_pass(int *src_ptr, unsigned char *output_ptr,
+ int output_pitch,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const short *vp8_filter) {
+ unsigned int i, j;
+ int Temp;
+
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ /* Apply filter */
+ Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) +
+ ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
+ ((int)src_ptr[0] * vp8_filter[2]) +
+ ((int)src_ptr[pixel_step] * vp8_filter[3]) +
+ ((int)src_ptr[2 * pixel_step] * vp8_filter[4]) +
+ ((int)src_ptr[3 * pixel_step] * vp8_filter[5]) +
+ (VP8_FILTER_WEIGHT >> 1); /* Rounding */
+
+ /* Normalize back to 0-255 */
+ Temp = Temp >> VP8_FILTER_SHIFT;
+
+ if (Temp < 0) {
+ Temp = 0;
+ } else if (Temp > 255) {
+ Temp = 255;
+ }
+
+ output_ptr[j] = (unsigned char)Temp;
+ src_ptr++;
+ }
+
+ /* Start next row */
+ src_ptr += src_pixels_per_line - output_width;
+ output_ptr += output_pitch;
+ }
+}
+
+static void filter_block2d(unsigned char *src_ptr, unsigned char *output_ptr,
+ unsigned int src_pixels_per_line, int output_pitch,
+ const short *HFilter, const short *VFilter) {
+ int FData[9 * 4]; /* Temp data buffer used in filtering */
+
+ /* First filter 1-D horizontally... */
+ filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData,
+ src_pixels_per_line, 1, 9, 4, HFilter);
+
+ /* then filter verticaly... */
+ filter_block2d_second_pass(FData + 8, output_ptr, output_pitch, 4, 4, 4, 4,
+ VFilter);
+}
+
+void vp8_sixtap_predict4x4_c(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ const short *HFilter;
+ const short *VFilter;
+
+ HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */
+ VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */
+
+ filter_block2d(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter,
+ VFilter);
+}
+void vp8_sixtap_predict8x8_c(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ const short *HFilter;
+ const short *VFilter;
+ int FData[13 * 16]; /* Temp data buffer used in filtering */
+
+ HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */
+ VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */
+
+ /* First filter 1-D horizontally... */
+ filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData,
+ src_pixels_per_line, 1, 13, 8, HFilter);
+
+ /* then filter verticaly... */
+ filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 8, 8,
+ VFilter);
+}
+
+void vp8_sixtap_predict8x4_c(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ const short *HFilter;
+ const short *VFilter;
+ int FData[13 * 16]; /* Temp data buffer used in filtering */
+
+ HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */
+ VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */
+
+ /* First filter 1-D horizontally... */
+ filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData,
+ src_pixels_per_line, 1, 9, 8, HFilter);
+
+ /* then filter verticaly... */
+ filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 4, 8,
+ VFilter);
+}
+
+void vp8_sixtap_predict16x16_c(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ const short *HFilter;
+ const short *VFilter;
+ int FData[21 * 24]; /* Temp data buffer used in filtering */
+
+ HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */
+ VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */
+
+ /* First filter 1-D horizontally... */
+ filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData,
+ src_pixels_per_line, 1, 21, 16, HFilter);
+
+ /* then filter verticaly... */
+ filter_block2d_second_pass(FData + 32, dst_ptr, dst_pitch, 16, 16, 16, 16,
+ VFilter);
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : filter_block2d_bil_first_pass
+ *
+ * INPUTS : UINT8 *src_ptr : Pointer to source block.
+ * UINT32 src_stride : Stride of source block.
+ * UINT32 height : Block height.
+ * UINT32 width : Block width.
+ * INT32 *vp8_filter : Array of 2 bi-linear filter taps.
+ *
+ * OUTPUTS : INT32 *dst_ptr : Pointer to filtered block.
+ *
+ * RETURNS : void
+ *
+ * FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block
+ * in the horizontal direction to produce the filtered output
+ * block. Used to implement first-pass of 2-D separable filter.
+ *
+ * SPECIAL NOTES : Produces INT32 output to retain precision for next pass.
+ * Two filter taps should sum to VP8_FILTER_WEIGHT.
+ *
+ ****************************************************************************/
+static void filter_block2d_bil_first_pass(
+ unsigned char *src_ptr, unsigned short *dst_ptr, unsigned int src_stride,
+ unsigned int height, unsigned int width, const short *vp8_filter) {
+ unsigned int i, j;
+
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j < width; ++j) {
+ /* Apply bilinear filter */
+ dst_ptr[j] =
+ (((int)src_ptr[0] * vp8_filter[0]) +
+ ((int)src_ptr[1] * vp8_filter[1]) + (VP8_FILTER_WEIGHT / 2)) >>
+ VP8_FILTER_SHIFT;
+ src_ptr++;
+ }
+
+ /* Next row... */
+ src_ptr += src_stride - width;
+ dst_ptr += width;
+ }
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : filter_block2d_bil_second_pass
+ *
+ * INPUTS : INT32 *src_ptr : Pointer to source block.
+ * UINT32 dst_pitch : Destination block pitch.
+ * UINT32 height : Block height.
+ * UINT32 width : Block width.
+ * INT32 *vp8_filter : Array of 2 bi-linear filter taps.
+ *
+ * OUTPUTS : UINT16 *dst_ptr : Pointer to filtered block.
+ *
+ * RETURNS : void
+ *
+ * FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block
+ * in the vertical direction to produce the filtered output
+ * block. Used to implement second-pass of 2-D separable
+ * filter.
+ *
+ * SPECIAL NOTES : Requires 32-bit input as produced by
+ * filter_block2d_bil_first_pass.
+ * Two filter taps should sum to VP8_FILTER_WEIGHT.
+ *
+ ****************************************************************************/
+static void filter_block2d_bil_second_pass(unsigned short *src_ptr,
+ unsigned char *dst_ptr,
+ int dst_pitch, unsigned int height,
+ unsigned int width,
+ const short *vp8_filter) {
+ unsigned int i, j;
+ int Temp;
+
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j < width; ++j) {
+ /* Apply filter */
+ Temp = ((int)src_ptr[0] * vp8_filter[0]) +
+ ((int)src_ptr[width] * vp8_filter[1]) + (VP8_FILTER_WEIGHT / 2);
+ dst_ptr[j] = (unsigned int)(Temp >> VP8_FILTER_SHIFT);
+ src_ptr++;
+ }
+
+ /* Next row... */
+ dst_ptr += dst_pitch;
+ }
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : filter_block2d_bil
+ *
+ * INPUTS : UINT8 *src_ptr : Pointer to source block.
+ * UINT32 src_pitch : Stride of source block.
+ * UINT32 dst_pitch : Stride of destination block.
+ * INT32 *HFilter : Array of 2 horizontal filter
+ * taps.
+ * INT32 *VFilter : Array of 2 vertical filter taps.
+ * INT32 Width : Block width
+ * INT32 Height : Block height
+ *
+ * OUTPUTS : UINT16 *dst_ptr : Pointer to filtered block.
+ *
+ * RETURNS : void
+ *
+ * FUNCTION : 2-D filters an input block by applying a 2-tap
+ * bi-linear filter horizontally followed by a 2-tap
+ * bi-linear filter vertically on the result.
+ *
+ * SPECIAL NOTES : The largest block size can be handled here is 16x16
+ *
+ ****************************************************************************/
+static void filter_block2d_bil(unsigned char *src_ptr, unsigned char *dst_ptr,
+ unsigned int src_pitch, unsigned int dst_pitch,
+ const short *HFilter, const short *VFilter,
+ int Width, int Height) {
+ unsigned short FData[17 * 16]; /* Temp data buffer used in filtering */
+
+ /* First filter 1-D horizontally... */
+ filter_block2d_bil_first_pass(src_ptr, FData, src_pitch, Height + 1, Width,
+ HFilter);
+
+ /* then 1-D vertically... */
+ filter_block2d_bil_second_pass(FData, dst_ptr, dst_pitch, Height, Width,
+ VFilter);
+}
+
+void vp8_bilinear_predict4x4_c(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ const short *HFilter;
+ const short *VFilter;
+
+ // This represents a copy and is not required to be handled by optimizations.
+ assert((xoffset | yoffset) != 0);
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+ filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter,
+ VFilter, 4, 4);
+}
+
+void vp8_bilinear_predict8x8_c(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ const short *HFilter;
+ const short *VFilter;
+
+ assert((xoffset | yoffset) != 0);
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter,
+ VFilter, 8, 8);
+}
+
+void vp8_bilinear_predict8x4_c(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ const short *HFilter;
+ const short *VFilter;
+
+ assert((xoffset | yoffset) != 0);
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter,
+ VFilter, 8, 4);
+}
+
+void vp8_bilinear_predict16x16_c(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ const short *HFilter;
+ const short *VFilter;
+
+ assert((xoffset | yoffset) != 0);
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter,
+ VFilter, 16, 16);
+}
diff --git a/media/libvpx/libvpx/vp8/common/filter.h b/media/libvpx/libvpx/vp8/common/filter.h
new file mode 100644
index 0000000000..6acee22b21
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/filter.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_FILTER_H_
+#define VPX_VP8_COMMON_FILTER_H_
+
+#include "vpx_ports/mem.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define BLOCK_HEIGHT_WIDTH 4
+#define VP8_FILTER_WEIGHT 128
+#define VP8_FILTER_SHIFT 7
+
+extern DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]);
+extern DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters[8][6]);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_FILTER_H_
diff --git a/media/libvpx/libvpx/vp8/common/findnearmv.c b/media/libvpx/libvpx/vp8/common/findnearmv.c
new file mode 100644
index 0000000000..3b31923621
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/findnearmv.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "findnearmv.h"
+
+const unsigned char vp8_mbsplit_offset[4][16] = {
+ { 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
+};
+
+/* Predict motion vectors using those from already-decoded nearby blocks.
+ Note that we only consider one 4x4 subblock from each candidate 16x16
+ macroblock. */
+void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
+ int_mv *nearby, int_mv *best_mv, int near_mv_ref_cnts[4],
+ int refframe, int *ref_frame_sign_bias) {
+ const MODE_INFO *above = here - xd->mode_info_stride;
+ const MODE_INFO *left = here - 1;
+ const MODE_INFO *aboveleft = above - 1;
+ int_mv near_mvs[4];
+ int_mv *mv = near_mvs;
+ int *cntx = near_mv_ref_cnts;
+ enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
+
+ /* Zero accumulators */
+ mv[0].as_int = mv[1].as_int = mv[2].as_int = 0;
+ near_mv_ref_cnts[0] = near_mv_ref_cnts[1] = near_mv_ref_cnts[2] =
+ near_mv_ref_cnts[3] = 0;
+
+ /* Process above */
+ if (above->mbmi.ref_frame != INTRA_FRAME) {
+ if (above->mbmi.mv.as_int) {
+ (++mv)->as_int = above->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, mv,
+ ref_frame_sign_bias);
+ ++cntx;
+ }
+
+ *cntx += 2;
+ }
+
+ /* Process left */
+ if (left->mbmi.ref_frame != INTRA_FRAME) {
+ if (left->mbmi.mv.as_int) {
+ int_mv this_mv;
+
+ this_mv.as_int = left->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &this_mv,
+ ref_frame_sign_bias);
+
+ if (this_mv.as_int != mv->as_int) {
+ (++mv)->as_int = this_mv.as_int;
+ ++cntx;
+ }
+
+ *cntx += 2;
+ } else {
+ near_mv_ref_cnts[CNT_INTRA] += 2;
+ }
+ }
+
+ /* Process above left */
+ if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
+ if (aboveleft->mbmi.mv.as_int) {
+ int_mv this_mv;
+
+ this_mv.as_int = aboveleft->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe,
+ &this_mv, ref_frame_sign_bias);
+
+ if (this_mv.as_int != mv->as_int) {
+ (++mv)->as_int = this_mv.as_int;
+ ++cntx;
+ }
+
+ *cntx += 1;
+ } else {
+ near_mv_ref_cnts[CNT_INTRA] += 1;
+ }
+ }
+
+ /* If we have three distinct MV's ... */
+ if (near_mv_ref_cnts[CNT_SPLITMV]) {
+ /* See if above-left MV can be merged with NEAREST */
+ if (mv->as_int == near_mvs[CNT_NEAREST].as_int)
+ near_mv_ref_cnts[CNT_NEAREST] += 1;
+ }
+
+ near_mv_ref_cnts[CNT_SPLITMV] =
+ ((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) * 2 +
+ (aboveleft->mbmi.mode == SPLITMV);
+
+ /* Swap near and nearest if necessary */
+ if (near_mv_ref_cnts[CNT_NEAR] > near_mv_ref_cnts[CNT_NEAREST]) {
+ int tmp;
+ tmp = near_mv_ref_cnts[CNT_NEAREST];
+ near_mv_ref_cnts[CNT_NEAREST] = near_mv_ref_cnts[CNT_NEAR];
+ near_mv_ref_cnts[CNT_NEAR] = tmp;
+ tmp = (int)near_mvs[CNT_NEAREST].as_int;
+ near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int;
+ near_mvs[CNT_NEAR].as_int = (uint32_t)tmp;
+ }
+
+ /* Use near_mvs[0] to store the "best" MV */
+ if (near_mv_ref_cnts[CNT_NEAREST] >= near_mv_ref_cnts[CNT_INTRA]) {
+ near_mvs[CNT_INTRA] = near_mvs[CNT_NEAREST];
+ }
+
+ /* Set up return values */
+ best_mv->as_int = near_mvs[0].as_int;
+ nearest->as_int = near_mvs[CNT_NEAREST].as_int;
+ nearby->as_int = near_mvs[CNT_NEAR].as_int;
+}
+
+static void invert_and_clamp_mvs(int_mv *inv, int_mv *src, MACROBLOCKD *xd) {
+ inv->as_mv.row = src->as_mv.row * -1;
+ inv->as_mv.col = src->as_mv.col * -1;
+ vp8_clamp_mv2(inv, xd);
+ vp8_clamp_mv2(src, xd);
+}
+
+int vp8_find_near_mvs_bias(MACROBLOCKD *xd, const MODE_INFO *here,
+ int_mv mode_mv_sb[2][MB_MODE_COUNT],
+ int_mv best_mv_sb[2], int cnt[4], int refframe,
+ int *ref_frame_sign_bias) {
+ int sign_bias = ref_frame_sign_bias[refframe];
+
+ vp8_find_near_mvs(xd, here, &mode_mv_sb[sign_bias][NEARESTMV],
+ &mode_mv_sb[sign_bias][NEARMV], &best_mv_sb[sign_bias], cnt,
+ refframe, ref_frame_sign_bias);
+
+ invert_and_clamp_mvs(&mode_mv_sb[!sign_bias][NEARESTMV],
+ &mode_mv_sb[sign_bias][NEARESTMV], xd);
+ invert_and_clamp_mvs(&mode_mv_sb[!sign_bias][NEARMV],
+ &mode_mv_sb[sign_bias][NEARMV], xd);
+ invert_and_clamp_mvs(&best_mv_sb[!sign_bias], &best_mv_sb[sign_bias], xd);
+
+ return sign_bias;
+}
+
+vp8_prob *vp8_mv_ref_probs(vp8_prob p[VP8_MVREFS - 1],
+ const int near_mv_ref_ct[4]) {
+ p[0] = vp8_mode_contexts[near_mv_ref_ct[0]][0];
+ p[1] = vp8_mode_contexts[near_mv_ref_ct[1]][1];
+ p[2] = vp8_mode_contexts[near_mv_ref_ct[2]][2];
+ p[3] = vp8_mode_contexts[near_mv_ref_ct[3]][3];
+ /* p[3] = vp8_mode_contexts[near_mv_ref_ct[1] + near_mv_ref_ct[2] +
+ near_mv_ref_ct[3]][3]; */
+ return p;
+}
diff --git a/media/libvpx/libvpx/vp8/common/findnearmv.h b/media/libvpx/libvpx/vp8/common/findnearmv.h
new file mode 100644
index 0000000000..d7db9544aa
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/findnearmv.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_FINDNEARMV_H_
+#define VPX_VP8_COMMON_FINDNEARMV_H_
+
+#include "./vpx_config.h"
+#include "mv.h"
+#include "blockd.h"
+#include "modecont.h"
+#include "treecoder.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static INLINE void mv_bias(int refmb_ref_frame_sign_bias, int refframe,
+ int_mv *mvp, const int *ref_frame_sign_bias) {
+ if (refmb_ref_frame_sign_bias != ref_frame_sign_bias[refframe]) {
+ mvp->as_mv.row *= -1;
+ mvp->as_mv.col *= -1;
+ }
+}
+
+#define LEFT_TOP_MARGIN (16 << 3)
+#define RIGHT_BOTTOM_MARGIN (16 << 3)
+static INLINE void vp8_clamp_mv2(int_mv *mv, const MACROBLOCKD *xd) {
+ if (mv->as_mv.col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN)) {
+ mv->as_mv.col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
+ } else if (mv->as_mv.col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN) {
+ mv->as_mv.col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
+ }
+
+ if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN)) {
+ mv->as_mv.row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
+ } else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN) {
+ mv->as_mv.row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
+ }
+}
+
+static INLINE void vp8_clamp_mv(int_mv *mv, int mb_to_left_edge,
+ int mb_to_right_edge, int mb_to_top_edge,
+ int mb_to_bottom_edge) {
+ mv->as_mv.col =
+ (mv->as_mv.col < mb_to_left_edge) ? mb_to_left_edge : mv->as_mv.col;
+ mv->as_mv.col =
+ (mv->as_mv.col > mb_to_right_edge) ? mb_to_right_edge : mv->as_mv.col;
+ mv->as_mv.row =
+ (mv->as_mv.row < mb_to_top_edge) ? mb_to_top_edge : mv->as_mv.row;
+ mv->as_mv.row =
+ (mv->as_mv.row > mb_to_bottom_edge) ? mb_to_bottom_edge : mv->as_mv.row;
+}
+static INLINE unsigned int vp8_check_mv_bounds(int_mv *mv, int mb_to_left_edge,
+ int mb_to_right_edge,
+ int mb_to_top_edge,
+ int mb_to_bottom_edge) {
+ unsigned int need_to_clamp;
+ need_to_clamp = (mv->as_mv.col < mb_to_left_edge);
+ need_to_clamp |= (mv->as_mv.col > mb_to_right_edge);
+ need_to_clamp |= (mv->as_mv.row < mb_to_top_edge);
+ need_to_clamp |= (mv->as_mv.row > mb_to_bottom_edge);
+ return need_to_clamp;
+}
+
+void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
+ int_mv *nearby, int_mv *best_mv, int near_mv_ref_cnts[4],
+ int refframe, int *ref_frame_sign_bias);
+
+int vp8_find_near_mvs_bias(MACROBLOCKD *xd, const MODE_INFO *here,
+ int_mv mode_mv_sb[2][MB_MODE_COUNT],
+ int_mv best_mv_sb[2], int cnt[4], int refframe,
+ int *ref_frame_sign_bias);
+
+vp8_prob *vp8_mv_ref_probs(vp8_prob p[VP8_MVREFS - 1],
+ const int near_mv_ref_ct[4]);
+
+extern const unsigned char vp8_mbsplit_offset[4][16];
+
+static INLINE uint32_t left_block_mv(const MODE_INFO *cur_mb, int b) {
+ if (!(b & 3)) {
+ /* On L edge, get from MB to left of us */
+ --cur_mb;
+
+ if (cur_mb->mbmi.mode != SPLITMV) return cur_mb->mbmi.mv.as_int;
+ b += 4;
+ }
+
+ return (cur_mb->bmi + b - 1)->mv.as_int;
+}
+
+static INLINE uint32_t above_block_mv(const MODE_INFO *cur_mb, int b,
+ int mi_stride) {
+ if (!(b >> 2)) {
+ /* On top edge, get from MB above us */
+ cur_mb -= mi_stride;
+
+ if (cur_mb->mbmi.mode != SPLITMV) return cur_mb->mbmi.mv.as_int;
+ b += 16;
+ }
+
+ return (cur_mb->bmi + (b - 4))->mv.as_int;
+}
+static INLINE B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb,
+ int b) {
+ if (!(b & 3)) {
+ /* On L edge, get from MB to left of us */
+ --cur_mb;
+ switch (cur_mb->mbmi.mode) {
+ case B_PRED: return (cur_mb->bmi + b + 3)->as_mode;
+ case DC_PRED: return B_DC_PRED;
+ case V_PRED: return B_VE_PRED;
+ case H_PRED: return B_HE_PRED;
+ case TM_PRED: return B_TM_PRED;
+ default: return B_DC_PRED;
+ }
+ }
+
+ return (cur_mb->bmi + b - 1)->as_mode;
+}
+
+static INLINE B_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb, int b,
+ int mi_stride) {
+ if (!(b >> 2)) {
+ /* On top edge, get from MB above us */
+ cur_mb -= mi_stride;
+
+ switch (cur_mb->mbmi.mode) {
+ case B_PRED: return (cur_mb->bmi + b + 12)->as_mode;
+ case DC_PRED: return B_DC_PRED;
+ case V_PRED: return B_VE_PRED;
+ case H_PRED: return B_HE_PRED;
+ case TM_PRED: return B_TM_PRED;
+ default: return B_DC_PRED;
+ }
+ }
+
+ return (cur_mb->bmi + b - 4)->as_mode;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_FINDNEARMV_H_
diff --git a/media/libvpx/libvpx/vp8/common/generic/systemdependent.c b/media/libvpx/libvpx/vp8/common/generic/systemdependent.c
new file mode 100644
index 0000000000..71529bdfd8
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/generic/systemdependent.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#if VPX_ARCH_ARM
+#include "vpx_ports/arm.h"
+#elif VPX_ARCH_X86 || VPX_ARCH_X86_64
+#include "vpx_ports/x86.h"
+#elif VPX_ARCH_PPC
+#include "vpx_ports/ppc.h"
+#elif VPX_ARCH_MIPS
+#include "vpx_ports/mips.h"
+#elif VPX_ARCH_LOONGARCH
+#include "vpx_ports/loongarch.h"
+#endif
+#include "vp8/common/onyxc_int.h"
+#include "vp8/common/systemdependent.h"
+
+#if CONFIG_MULTITHREAD
+#if HAVE_UNISTD_H && !defined(__OS2__)
+#include <unistd.h>
+#elif defined(_WIN32)
+#include <windows.h>
+typedef void(WINAPI *PGNSI)(LPSYSTEM_INFO);
+#elif defined(__OS2__)
+#define INCL_DOS
+#define INCL_DOSSPINLOCK
+#include <os2.h>
+#endif
+#endif
+
+#if CONFIG_MULTITHREAD
+static int get_cpu_count() {
+ int core_count = 16;
+
+#if HAVE_UNISTD_H && !defined(__OS2__)
+#if defined(_SC_NPROCESSORS_ONLN)
+ core_count = (int)sysconf(_SC_NPROCESSORS_ONLN);
+#elif defined(_SC_NPROC_ONLN)
+ core_count = (int)sysconf(_SC_NPROC_ONLN);
+#endif
+#elif defined(_WIN32)
+ {
+#if _WIN32_WINNT >= 0x0501
+ SYSTEM_INFO sysinfo;
+ GetNativeSystemInfo(&sysinfo);
+#else
+ PGNSI pGNSI;
+ SYSTEM_INFO sysinfo;
+
+ /* Call GetNativeSystemInfo if supported or
+ * GetSystemInfo otherwise. */
+
+ pGNSI = (PGNSI)GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")),
+ "GetNativeSystemInfo");
+ if (pGNSI != NULL)
+ pGNSI(&sysinfo);
+ else
+ GetSystemInfo(&sysinfo);
+#endif
+
+ core_count = (int)sysinfo.dwNumberOfProcessors;
+ }
+#elif defined(__OS2__)
+ {
+ ULONG proc_id;
+ ULONG status;
+
+ core_count = 0;
+ for (proc_id = 1;; ++proc_id) {
+ if (DosGetProcessorStatus(proc_id, &status)) break;
+
+ if (status == PROC_ONLINE) core_count++;
+ }
+ }
+#else
+/* other platforms */
+#endif
+
+ return core_count > 0 ? core_count : 1;
+}
+#endif
+
+void vp8_machine_specific_config(VP8_COMMON *ctx) {
+#if CONFIG_MULTITHREAD
+ ctx->processor_core_count = get_cpu_count();
+#endif /* CONFIG_MULTITHREAD */
+
+#if VPX_ARCH_ARM
+ ctx->cpu_caps = arm_cpu_caps();
+#elif VPX_ARCH_X86 || VPX_ARCH_X86_64
+ ctx->cpu_caps = x86_simd_caps();
+#elif VPX_ARCH_PPC
+ ctx->cpu_caps = ppc_simd_caps();
+#elif VPX_ARCH_MIPS
+ ctx->cpu_caps = mips_cpu_caps();
+#elif VPX_ARCH_LOONGARCH
+ ctx->cpu_caps = loongarch_cpu_caps();
+#else
+ // generic-gnu targets.
+ ctx->cpu_caps = 0;
+#endif
+}
diff --git a/media/libvpx/libvpx/vp8/common/header.h b/media/libvpx/libvpx/vp8/common/header.h
new file mode 100644
index 0000000000..e64e241908
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/header.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_HEADER_H_
+#define VPX_VP8_COMMON_HEADER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* 24 bits total */
+typedef struct {
+ unsigned int type : 1;
+ unsigned int version : 3;
+ unsigned int show_frame : 1;
+
+ /* Allow 2^20 bytes = 8 megabits for first partition */
+
+ unsigned int first_partition_length_in_bytes : 19;
+
+#ifdef PACKET_TESTING
+ unsigned int frame_number;
+ unsigned int update_gold : 1;
+ unsigned int uses_gold : 1;
+ unsigned int update_last : 1;
+ unsigned int uses_last : 1;
+#endif
+
+} VP8_HEADER;
+
+#ifdef PACKET_TESTING
+#define VP8_HEADER_SIZE 8
+#else
+#define VP8_HEADER_SIZE 3
+#endif
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_HEADER_H_
diff --git a/media/libvpx/libvpx/vp8/common/idct_blk.c b/media/libvpx/libvpx/vp8/common/idct_blk.c
new file mode 100644
index 0000000000..ebe1774f56
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/idct_blk.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
+
+void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst,
+ int stride, char *eobs) {
+ int i, j;
+
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j) {
+ if (*eobs++ > 1) {
+ vp8_dequant_idct_add_c(q, dq, dst, stride);
+ } else {
+ vp8_dc_only_idct_add_c(q[0] * dq[0], dst, stride, dst, stride);
+ memset(q, 0, 2 * sizeof(q[0]));
+ }
+
+ q += 16;
+ dst += 4;
+ }
+
+ dst += 4 * stride - 16;
+ }
+}
+
+void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dst_u,
+ unsigned char *dst_v, int stride,
+ char *eobs) {
+ int i, j;
+
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 2; ++j) {
+ if (*eobs++ > 1) {
+ vp8_dequant_idct_add_c(q, dq, dst_u, stride);
+ } else {
+ vp8_dc_only_idct_add_c(q[0] * dq[0], dst_u, stride, dst_u, stride);
+ memset(q, 0, 2 * sizeof(q[0]));
+ }
+
+ q += 16;
+ dst_u += 4;
+ }
+
+ dst_u += 4 * stride - 8;
+ }
+
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 2; ++j) {
+ if (*eobs++ > 1) {
+ vp8_dequant_idct_add_c(q, dq, dst_v, stride);
+ } else {
+ vp8_dc_only_idct_add_c(q[0] * dq[0], dst_v, stride, dst_v, stride);
+ memset(q, 0, 2 * sizeof(q[0]));
+ }
+
+ q += 16;
+ dst_v += 4;
+ }
+
+ dst_v += 4 * stride - 8;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/idctllm.c b/media/libvpx/libvpx/vp8/common/idctllm.c
new file mode 100644
index 0000000000..2f5adc0b40
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/idctllm.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+
+/****************************************************************************
+ * Notes:
+ *
+ * This implementation makes use of 16 bit fixed point verio of two multiply
+ * constants:
+ * 1. sqrt(2) * cos (pi/8)
+ * 2. sqrt(2) * sin (pi/8)
+ * Becuase the first constant is bigger than 1, to maintain the same 16 bit
+ * fixed point precision as the second one, we use a trick of
+ * x * a = x + x*(a-1)
+ * so
+ * x * sqrt(2) * cos (pi/8) = x + x * (sqrt(2) *cos(pi/8)-1).
+ **************************************************************************/
+static const int cospi8sqrt2minus1 = 20091;
+static const int sinpi8sqrt2 = 35468;
+
+void vp8_short_idct4x4llm_c(short *input, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride) {
+ int i;
+ int r, c;
+ int a1, b1, c1, d1;
+ short output[16];
+ short *ip = input;
+ short *op = output;
+ int temp1, temp2;
+ int shortpitch = 4;
+
+ for (i = 0; i < 4; ++i) {
+ a1 = ip[0] + ip[8];
+ b1 = ip[0] - ip[8];
+
+ temp1 = (ip[4] * sinpi8sqrt2) >> 16;
+ temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
+ c1 = temp1 - temp2;
+
+ temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
+ temp2 = (ip[12] * sinpi8sqrt2) >> 16;
+ d1 = temp1 + temp2;
+
+ op[shortpitch * 0] = a1 + d1;
+ op[shortpitch * 3] = a1 - d1;
+
+ op[shortpitch * 1] = b1 + c1;
+ op[shortpitch * 2] = b1 - c1;
+
+ ip++;
+ op++;
+ }
+
+ ip = output;
+ op = output;
+
+ for (i = 0; i < 4; ++i) {
+ a1 = ip[0] + ip[2];
+ b1 = ip[0] - ip[2];
+
+ temp1 = (ip[1] * sinpi8sqrt2) >> 16;
+ temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
+ c1 = temp1 - temp2;
+
+ temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
+ temp2 = (ip[3] * sinpi8sqrt2) >> 16;
+ d1 = temp1 + temp2;
+
+ op[0] = (a1 + d1 + 4) >> 3;
+ op[3] = (a1 - d1 + 4) >> 3;
+
+ op[1] = (b1 + c1 + 4) >> 3;
+ op[2] = (b1 - c1 + 4) >> 3;
+
+ ip += shortpitch;
+ op += shortpitch;
+ }
+
+ ip = output;
+ for (r = 0; r < 4; ++r) {
+ for (c = 0; c < 4; ++c) {
+ int a = ip[c] + pred_ptr[c];
+
+ if (a < 0) a = 0;
+
+ if (a > 255) a = 255;
+
+ dst_ptr[c] = (unsigned char)a;
+ }
+ ip += 4;
+ dst_ptr += dst_stride;
+ pred_ptr += pred_stride;
+ }
+}
+
+void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride) {
+ int a1 = ((input_dc + 4) >> 3);
+ int r, c;
+
+ for (r = 0; r < 4; ++r) {
+ for (c = 0; c < 4; ++c) {
+ int a = a1 + pred_ptr[c];
+
+ if (a < 0) a = 0;
+
+ if (a > 255) a = 255;
+
+ dst_ptr[c] = (unsigned char)a;
+ }
+
+ dst_ptr += dst_stride;
+ pred_ptr += pred_stride;
+ }
+}
+
+void vp8_short_inv_walsh4x4_c(short *input, short *mb_dqcoeff) {
+ short output[16];
+ int i;
+ int a1, b1, c1, d1;
+ int a2, b2, c2, d2;
+ short *ip = input;
+ short *op = output;
+
+ for (i = 0; i < 4; ++i) {
+ a1 = ip[0] + ip[12];
+ b1 = ip[4] + ip[8];
+ c1 = ip[4] - ip[8];
+ d1 = ip[0] - ip[12];
+
+ op[0] = a1 + b1;
+ op[4] = c1 + d1;
+ op[8] = a1 - b1;
+ op[12] = d1 - c1;
+ ip++;
+ op++;
+ }
+
+ ip = output;
+ op = output;
+
+ for (i = 0; i < 4; ++i) {
+ a1 = ip[0] + ip[3];
+ b1 = ip[1] + ip[2];
+ c1 = ip[1] - ip[2];
+ d1 = ip[0] - ip[3];
+
+ a2 = a1 + b1;
+ b2 = c1 + d1;
+ c2 = a1 - b1;
+ d2 = d1 - c1;
+
+ op[0] = (a2 + 3) >> 3;
+ op[1] = (b2 + 3) >> 3;
+ op[2] = (c2 + 3) >> 3;
+ op[3] = (d2 + 3) >> 3;
+
+ ip += 4;
+ op += 4;
+ }
+
+ for (i = 0; i < 16; ++i) {
+ mb_dqcoeff[i * 16] = output[i];
+ }
+}
+
+void vp8_short_inv_walsh4x4_1_c(short *input, short *mb_dqcoeff) {
+ int i;
+ int a1;
+
+ a1 = ((input[0] + 3) >> 3);
+ for (i = 0; i < 16; ++i) {
+ mb_dqcoeff[i * 16] = a1;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/invtrans.h b/media/libvpx/libvpx/vp8/common/invtrans.h
new file mode 100644
index 0000000000..aed7bb0600
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/invtrans.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_INVTRANS_H_
+#define VPX_VP8_COMMON_INVTRANS_H_
+
+#include "./vpx_config.h"
+#include "vp8_rtcd.h"
+#include "blockd.h"
+#include "onyxc_int.h"
+
+#if CONFIG_MULTITHREAD
+#include "vpx_mem/vpx_mem.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static void eob_adjust(char *eobs, short *diff) {
+ /* eob adjust.... the idct can only skip if both the dc and eob are zero */
+ int js;
+ for (js = 0; js < 16; ++js) {
+ if ((eobs[js] == 0) && (diff[0] != 0)) eobs[js]++;
+ diff += 16;
+ }
+}
+
+static INLINE void vp8_inverse_transform_mby(MACROBLOCKD *xd) {
+ short *DQC = xd->dequant_y1;
+
+ if (xd->mode_info_context->mbmi.mode != SPLITMV) {
+ /* do 2nd order transform on the dc block */
+ if (xd->eobs[24] > 1) {
+ vp8_short_inv_walsh4x4(&xd->block[24].dqcoeff[0], xd->qcoeff);
+ } else {
+ vp8_short_inv_walsh4x4_1(&xd->block[24].dqcoeff[0], xd->qcoeff);
+ }
+ eob_adjust(xd->eobs, xd->qcoeff);
+
+ DQC = xd->dequant_y1_dc;
+ }
+ vp8_dequant_idct_add_y_block(xd->qcoeff, DQC, xd->dst.y_buffer,
+ xd->dst.y_stride, xd->eobs);
+}
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_INVTRANS_H_
diff --git a/media/libvpx/libvpx/vp8/common/loongarch/idct_lsx.c b/media/libvpx/libvpx/vp8/common/loongarch/idct_lsx.c
new file mode 100644
index 0000000000..eee871eec4
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/loongarch/idct_lsx.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/blockd.h"
+#include "vpx_util/loongson_intrinsics.h"
+
+static const int32_t cospi8sqrt2minus1 = 20091;
+static const int32_t sinpi8sqrt2 = 35468;
+
+#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
+ do { \
+ __m128i tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
+ \
+ DUP2_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, tmp0_m, tmp1_m); \
+ DUP2_ARG2(__lsx_vilvh_h, in1, in0, in3, in2, tmp2_m, tmp3_m); \
+ DUP2_ARG2(__lsx_vilvl_w, tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \
+ DUP2_ARG2(__lsx_vilvh_w, tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \
+ } while (0)
+
+#define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \
+ do { \
+ __m128i s4_m, s5_m, s6_m, s7_m; \
+ \
+ TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \
+ DUP2_ARG2(__lsx_vilvl_d, s6_m, s4_m, s7_m, s5_m, out0, out2); \
+ out1 = __lsx_vilvh_d(s6_m, s4_m); \
+ out3 = __lsx_vilvh_d(s7_m, s5_m); \
+ } while (0)
+
+#define EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in0, in1) \
+ do { \
+ __m128i zero_m = __lsx_vldi(0); \
+ __m128i tmp1_m, tmp2_m; \
+ __m128i sinpi8_sqrt2_m = __lsx_vreplgr2vr_w(sinpi8sqrt2); \
+ \
+ tmp1_m = __lsx_vilvl_h(in0, zero_m); \
+ tmp2_m = __lsx_vilvh_h(in0, zero_m); \
+ tmp1_m = __lsx_vsrai_w(tmp1_m, 16); \
+ tmp2_m = __lsx_vsrai_w(tmp2_m, 16); \
+ tmp1_m = __lsx_vmul_w(tmp1_m, sinpi8_sqrt2_m); \
+ tmp1_m = __lsx_vsrai_w(tmp1_m, 16); \
+ tmp2_m = __lsx_vmul_w(tmp2_m, sinpi8_sqrt2_m); \
+ tmp2_m = __lsx_vsrai_w(tmp2_m, 16); \
+ in1 = __lsx_vpickev_h(tmp2_m, tmp1_m); \
+ } while (0)
+
+#define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \
+ do { \
+ __m128i a1_m, b1_m, c1_m, d1_m; \
+ __m128i c_tmp1_m, c_tmp2_m; \
+ __m128i d_tmp1_m, d_tmp2_m; \
+ __m128i const_cospi8sqrt2minus1_m; \
+ \
+ const_cospi8sqrt2minus1_m = __lsx_vreplgr2vr_h(cospi8sqrt2minus1); \
+ a1_m = __lsx_vadd_h(in0, in2); \
+ b1_m = __lsx_vsub_h(in0, in2); \
+ EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in1, c_tmp1_m); \
+ \
+ c_tmp2_m = __lsx_vmuh_h(in3, const_cospi8sqrt2minus1_m); \
+ c_tmp2_m = __lsx_vslli_h(c_tmp2_m, 1); \
+ c_tmp2_m = __lsx_vsrai_h(c_tmp2_m, 1); \
+ c_tmp2_m = __lsx_vadd_h(in3, c_tmp2_m); \
+ c1_m = __lsx_vsub_h(c_tmp1_m, c_tmp2_m); \
+ \
+ d_tmp1_m = __lsx_vmuh_h(in1, const_cospi8sqrt2minus1_m); \
+ d_tmp1_m = __lsx_vslli_h(d_tmp1_m, 1); \
+ d_tmp1_m = __lsx_vsrai_h(d_tmp1_m, 1); \
+ d_tmp1_m = __lsx_vadd_h(in1, d_tmp1_m); \
+ EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3, d_tmp2_m); \
+ d1_m = __lsx_vadd_h(d_tmp1_m, d_tmp2_m); \
+ LSX_BUTTERFLY_4_H(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
+ } while (0)
+
+#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \
+ do { \
+ __m128i a1_m, b1_m, c1_m, d1_m; \
+ __m128i c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
+ __m128i const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m; \
+ \
+ const_cospi8sqrt2minus1_m = __lsx_vreplgr2vr_w(cospi8sqrt2minus1); \
+ sinpi8_sqrt2_m = __lsx_vreplgr2vr_w(sinpi8sqrt2); \
+ a1_m = __lsx_vadd_w(in0, in2); \
+ b1_m = __lsx_vsub_w(in0, in2); \
+ c_tmp1_m = __lsx_vmul_w(in1, sinpi8_sqrt2_m); \
+ c_tmp1_m = __lsx_vsrai_w(c_tmp1_m, 16); \
+ c_tmp2_m = __lsx_vmul_w(in3, const_cospi8sqrt2minus1_m); \
+ c_tmp2_m = __lsx_vsrai_w(c_tmp2_m, 16); \
+ c_tmp2_m = __lsx_vadd_w(in3, c_tmp2_m); \
+ c1_m = __lsx_vsub_w(c_tmp1_m, c_tmp2_m); \
+ d_tmp1_m = __lsx_vmul_w(in1, const_cospi8sqrt2minus1_m); \
+ d_tmp1_m = __lsx_vsrai_w(d_tmp1_m, 16); \
+ d_tmp1_m = __lsx_vadd_w(in1, d_tmp1_m); \
+ d_tmp2_m = __lsx_vmul_w(in3, sinpi8_sqrt2_m); \
+ d_tmp2_m = __lsx_vsrai_w(d_tmp2_m, 16); \
+ d1_m = __lsx_vadd_w(d_tmp1_m, d_tmp2_m); \
+ LSX_BUTTERFLY_4_W(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
+ } while (0)
+
+#define UNPCK_SH_SW(in, out0, out1) \
+ do { \
+ out0 = __lsx_vsllwil_w_h(in, 0); \
+ out1 = __lsx_vexth_w_h(in); \
+ } while (0)
+
+static void idct4x4_addconst_lsx(int16_t in_dc, uint8_t *pred,
+ int32_t pred_stride, uint8_t *dest,
+ int32_t dest_stride) {
+ __m128i vec, res0, res1, res2, res3, dst0, dst1;
+ __m128i pred0, pred1, pred2, pred3;
+ __m128i zero = __lsx_vldi(0);
+
+ int32_t pred_stride2 = pred_stride << 1;
+ int32_t pred_stride3 = pred_stride2 + pred_stride;
+
+ vec = __lsx_vreplgr2vr_h(in_dc);
+ vec = __lsx_vsrari_h(vec, 3);
+ pred0 = __lsx_vld(pred, 0);
+ DUP2_ARG2(__lsx_vldx, pred, pred_stride, pred, pred_stride2, pred1, pred2);
+ pred3 = __lsx_vldx(pred, pred_stride3);
+ DUP4_ARG2(__lsx_vilvl_b, zero, pred0, zero, pred1, zero, pred2, zero, pred3,
+ res0, res1, res2, res3);
+ DUP4_ARG2(__lsx_vadd_h, res0, vec, res1, vec, res2, vec, res3, vec, res0,
+ res1, res2, res3);
+ res0 = __lsx_vclip255_h(res0);
+ res1 = __lsx_vclip255_h(res1);
+ res2 = __lsx_vclip255_h(res2);
+ res3 = __lsx_vclip255_h(res3);
+
+ DUP2_ARG2(__lsx_vpickev_b, res1, res0, res3, res2, dst0, dst1);
+ dst0 = __lsx_vpickev_w(dst1, dst0);
+ __lsx_vstelm_w(dst0, dest, 0, 0);
+ dest += dest_stride;
+ __lsx_vstelm_w(dst0, dest, 0, 1);
+ dest += dest_stride;
+ __lsx_vstelm_w(dst0, dest, 0, 2);
+ dest += dest_stride;
+ __lsx_vstelm_w(dst0, dest, 0, 3);
+}
+
+void vp8_dc_only_idct_add_lsx(int16_t input_dc, uint8_t *pred_ptr,
+ int32_t pred_stride, uint8_t *dst_ptr,
+ int32_t dst_stride) {
+ idct4x4_addconst_lsx(input_dc, pred_ptr, pred_stride, dst_ptr, dst_stride);
+}
+
+static void dequant_idct4x4_addblk_2x_lsx(int16_t *input,
+ int16_t *dequant_input, uint8_t *dest,
+ int32_t dest_stride) {
+ __m128i dest0, dest1, dest2, dest3;
+ __m128i in0, in1, in2, in3, mul0, mul1, mul2, mul3, dequant_in0, dequant_in1;
+ __m128i hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3, res0, res1, res2, res3;
+ __m128i hz0l, hz1l, hz2l, hz3l, hz0r, hz1r, hz2r, hz3r;
+ __m128i vt0l, vt1l, vt2l, vt3l, vt0r, vt1r, vt2r, vt3r;
+ __m128i zero = __lsx_vldi(0);
+
+ int32_t dest_stride2 = dest_stride << 1;
+ int32_t dest_stride3 = dest_stride2 + dest_stride;
+
+ DUP4_ARG2(__lsx_vld, input, 0, input, 16, input, 32, input, 48, in0, in1, in2,
+ in3);
+ DUP2_ARG2(__lsx_vld, dequant_input, 0, dequant_input, 16, dequant_in0,
+ dequant_in1);
+
+ DUP4_ARG2(__lsx_vmul_h, in0, dequant_in0, in1, dequant_in1, in2, dequant_in0,
+ in3, dequant_in1, mul0, mul1, mul2, mul3);
+ DUP2_ARG2(__lsx_vpickev_d, mul2, mul0, mul3, mul1, in0, in2);
+ DUP2_ARG2(__lsx_vpickod_d, mul2, mul0, mul3, mul1, in1, in3);
+
+ VP8_IDCT_1D_H(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
+ TRANSPOSE_TWO_4x4_H(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
+ UNPCK_SH_SW(hz0, hz0r, hz0l);
+ UNPCK_SH_SW(hz1, hz1r, hz1l);
+ UNPCK_SH_SW(hz2, hz2r, hz2l);
+ UNPCK_SH_SW(hz3, hz3r, hz3l);
+ VP8_IDCT_1D_W(hz0l, hz1l, hz2l, hz3l, vt0l, vt1l, vt2l, vt3l);
+ DUP4_ARG2(__lsx_vsrari_w, vt0l, 3, vt1l, 3, vt2l, 3, vt3l, 3, vt0l, vt1l,
+ vt2l, vt3l);
+ VP8_IDCT_1D_W(hz0r, hz1r, hz2r, hz3r, vt0r, vt1r, vt2r, vt3r);
+ DUP4_ARG2(__lsx_vsrari_w, vt0r, 3, vt1r, 3, vt2r, 3, vt3r, 3, vt0r, vt1r,
+ vt2r, vt3r);
+ DUP4_ARG2(__lsx_vpickev_h, vt0l, vt0r, vt1l, vt1r, vt2l, vt2r, vt3l, vt3r,
+ vt0, vt1, vt2, vt3);
+ TRANSPOSE_TWO_4x4_H(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
+ dest0 = __lsx_vld(dest, 0);
+ DUP2_ARG2(__lsx_vldx, dest, dest_stride, dest, dest_stride2, dest1, dest2);
+ dest3 = __lsx_vldx(dest, dest_stride3);
+ DUP4_ARG2(__lsx_vilvl_b, zero, dest0, zero, dest1, zero, dest2, zero, dest3,
+ res0, res1, res2, res3);
+ DUP4_ARG2(__lsx_vadd_h, res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0,
+ res1, res2, res3);
+
+ res0 = __lsx_vclip255_h(res0);
+ res1 = __lsx_vclip255_h(res1);
+ res2 = __lsx_vclip255_h(res2);
+ res3 = __lsx_vclip255_h(res3);
+ DUP2_ARG2(__lsx_vpickev_b, res1, res0, res3, res2, vt0l, vt1l);
+
+ __lsx_vstelm_d(vt0l, dest, 0, 0);
+ __lsx_vstelm_d(vt0l, dest + dest_stride, 0, 1);
+ __lsx_vstelm_d(vt1l, dest + dest_stride2, 0, 0);
+ __lsx_vstelm_d(vt1l, dest + dest_stride3, 0, 1);
+
+ __lsx_vst(zero, input, 0);
+ __lsx_vst(zero, input, 16);
+ __lsx_vst(zero, input, 32);
+ __lsx_vst(zero, input, 48);
+}
+
+static void dequant_idct_addconst_2x_lsx(int16_t *input, int16_t *dequant_input,
+ uint8_t *dest, int32_t dest_stride) {
+ __m128i input_dc0, input_dc1, vec, res0, res1, res2, res3;
+ __m128i dest0, dest1, dest2, dest3;
+ __m128i zero = __lsx_vldi(0);
+ int32_t dest_stride2 = dest_stride << 1;
+ int32_t dest_stride3 = dest_stride2 + dest_stride;
+
+ input_dc0 = __lsx_vreplgr2vr_h(input[0] * dequant_input[0]);
+ input_dc1 = __lsx_vreplgr2vr_h(input[16] * dequant_input[0]);
+ DUP2_ARG2(__lsx_vsrari_h, input_dc0, 3, input_dc1, 3, input_dc0, input_dc1);
+ vec = __lsx_vpickev_d(input_dc1, input_dc0);
+ input[0] = 0;
+ input[16] = 0;
+ dest0 = __lsx_vld(dest, 0);
+ DUP2_ARG2(__lsx_vldx, dest, dest_stride, dest, dest_stride2, dest1, dest2);
+ dest3 = __lsx_vldx(dest, dest_stride3);
+ DUP4_ARG2(__lsx_vilvl_b, zero, dest0, zero, dest1, zero, dest2, zero, dest3,
+ res0, res1, res2, res3);
+ DUP4_ARG2(__lsx_vadd_h, res0, vec, res1, vec, res2, vec, res3, vec, res0,
+ res1, res2, res3);
+ res0 = __lsx_vclip255_h(res0);
+ res1 = __lsx_vclip255_h(res1);
+ res2 = __lsx_vclip255_h(res2);
+ res3 = __lsx_vclip255_h(res3);
+
+ DUP2_ARG2(__lsx_vpickev_b, res1, res0, res3, res2, res0, res1);
+ __lsx_vstelm_d(res0, dest, 0, 0);
+ __lsx_vstelm_d(res0, dest + dest_stride, 0, 1);
+ __lsx_vstelm_d(res1, dest + dest_stride2, 0, 0);
+ __lsx_vstelm_d(res1, dest + dest_stride3, 0, 1);
+}
+
+void vp8_dequant_idct_add_y_block_lsx(int16_t *q, int16_t *dq, uint8_t *dst,
+ int32_t stride, char *eobs) {
+ int16_t *eobs_h = (int16_t *)eobs;
+ uint8_t i;
+
+ for (i = 4; i--;) {
+ if (eobs_h[0]) {
+ if (eobs_h[0] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_lsx(q, dq, dst, stride);
+ } else {
+ dequant_idct_addconst_2x_lsx(q, dq, dst, stride);
+ }
+ }
+
+ q += 32;
+
+ if (eobs_h[1]) {
+ if (eobs_h[1] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_lsx(q, dq, dst + 8, stride);
+ } else {
+ dequant_idct_addconst_2x_lsx(q, dq, dst + 8, stride);
+ }
+ }
+
+ q += 32;
+ dst += (4 * stride);
+ eobs_h += 2;
+ }
+}
+
+void vp8_dequant_idct_add_uv_block_lsx(int16_t *q, int16_t *dq, uint8_t *dst_u,
+ uint8_t *dst_v, int32_t stride,
+ char *eobs) {
+ int16_t *eobs_h = (int16_t *)eobs;
+ if (eobs_h[0]) {
+ if (eobs_h[0] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_lsx(q, dq, dst_u, stride);
+ } else {
+ dequant_idct_addconst_2x_lsx(q, dq, dst_u, stride);
+ }
+ }
+
+ q += 32;
+ dst_u += (stride * 4);
+
+ if (eobs_h[1]) {
+ if (eobs_h[1] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_lsx(q, dq, dst_u, stride);
+ } else {
+ dequant_idct_addconst_2x_lsx(q, dq, dst_u, stride);
+ }
+ }
+
+ q += 32;
+
+ if (eobs_h[2]) {
+ if (eobs_h[2] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_lsx(q, dq, dst_v, stride);
+ } else {
+ dequant_idct_addconst_2x_lsx(q, dq, dst_v, stride);
+ }
+ }
+ q += 32;
+ dst_v += (stride * 4);
+
+ if (eobs_h[3]) {
+ if (eobs_h[3] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_lsx(q, dq, dst_v, stride);
+ } else {
+ dequant_idct_addconst_2x_lsx(q, dq, dst_v, stride);
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/loongarch/loopfilter_filters_lsx.c b/media/libvpx/libvpx/vp8/common/loongarch/loopfilter_filters_lsx.c
new file mode 100644
index 0000000000..79c3ea6dbb
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/loongarch/loopfilter_filters_lsx.c
@@ -0,0 +1,743 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Lu Wang <wanglu@loongson.cn>
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/loopfilter.h"
+#include "vpx_util/loongson_intrinsics.h"
+
+#define VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev) \
+ do { \
+ __m128i p1_m, p0_m, q0_m, q1_m, filt, q0_sub_p0, t1, t2; \
+ const __m128i cnst4b = __lsx_vldi(4); \
+ const __m128i cnst3b = __lsx_vldi(3); \
+ \
+ p1_m = __lsx_vxori_b(p1, 0x80); \
+ p0_m = __lsx_vxori_b(p0, 0x80); \
+ q0_m = __lsx_vxori_b(q0, 0x80); \
+ q1_m = __lsx_vxori_b(q1, 0x80); \
+ \
+ filt = __lsx_vssub_b(p1_m, q1_m); \
+ filt = __lsx_vand_v(filt, hev); \
+ q0_sub_p0 = __lsx_vssub_b(q0_m, p0_m); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = __lsx_vand_v(filt, mask); \
+ t1 = __lsx_vsadd_b(filt, cnst4b); \
+ t1 = __lsx_vsra_b(t1, cnst3b); \
+ t2 = __lsx_vsadd_b(filt, cnst3b); \
+ t2 = __lsx_vsra_b(t2, cnst3b); \
+ q0_m = __lsx_vssub_b(q0_m, t1); \
+ q0 = __lsx_vxori_b(q0_m, 0x80); \
+ p0_m = __lsx_vsadd_b(p0_m, t2); \
+ p0 = __lsx_vxori_b(p0_m, 0x80); \
+ filt = __lsx_vsrari_b(t1, 1); \
+ hev = __lsx_vxori_b(hev, 0xff); \
+ filt = __lsx_vand_v(filt, hev); \
+ q1_m = __lsx_vssub_b(q1_m, filt); \
+ q1 = __lsx_vxori_b(q1_m, 0x80); \
+ p1_m = __lsx_vsadd_b(p1_m, filt); \
+ p1 = __lsx_vxori_b(p1_m, 0x80); \
+ } while (0)
+
+#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \
+ do { \
+ __m128i p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \
+ __m128i u, filt, t1, t2, filt_sign, q0_sub_p0; \
+ __m128i filt_r, filt_l; \
+ __m128i temp0, temp1, temp2, temp3; \
+ const __m128i cnst4b = __lsx_vldi(4); \
+ const __m128i cnst3b = __lsx_vldi(3); \
+ const __m128i cnst9h = __lsx_vldi(1033); \
+ const __m128i cnst63h = __lsx_vldi(1087); \
+ \
+ p2_m = __lsx_vxori_b(p2, 0x80); \
+ p1_m = __lsx_vxori_b(p1, 0x80); \
+ p0_m = __lsx_vxori_b(p0, 0x80); \
+ q0_m = __lsx_vxori_b(q0, 0x80); \
+ q1_m = __lsx_vxori_b(q1, 0x80); \
+ q2_m = __lsx_vxori_b(q2, 0x80); \
+ \
+ filt = __lsx_vssub_b(p1_m, q1_m); \
+ q0_sub_p0 = __lsx_vssub_b(q0_m, p0_m); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = __lsx_vand_v(filt, mask); \
+ \
+ t2 = __lsx_vand_v(filt, hev); \
+ hev = __lsx_vxori_b(hev, 0xff); \
+ filt = __lsx_vand_v(hev, filt); \
+ t1 = __lsx_vsadd_b(t2, cnst4b); \
+ t1 = __lsx_vsra_b(t1, cnst3b); \
+ t2 = __lsx_vsadd_b(t2, cnst3b); \
+ t2 = __lsx_vsra_b(t2, cnst3b); \
+ q0_m = __lsx_vssub_b(q0_m, t1); \
+ p0_m = __lsx_vsadd_b(p0_m, t2); \
+ filt_sign = __lsx_vslti_b(filt, 0); \
+ filt_r = __lsx_vilvl_b(filt_sign, filt); \
+ filt_l = __lsx_vilvh_b(filt_sign, filt); \
+ temp0 = __lsx_vmul_h(filt_r, cnst9h); \
+ temp1 = __lsx_vadd_h(temp0, cnst63h); \
+ temp2 = __lsx_vmul_h(filt_l, cnst9h); \
+ temp3 = __lsx_vadd_h(temp2, cnst63h); \
+ \
+ u = __lsx_vssrani_b_h(temp3, temp1, 7); \
+ q2_m = __lsx_vssub_b(q2_m, u); \
+ p2_m = __lsx_vsadd_b(p2_m, u); \
+ q2 = __lsx_vxori_b(q2_m, 0x80); \
+ p2 = __lsx_vxori_b(p2_m, 0x80); \
+ \
+ temp1 = __lsx_vadd_h(temp1, temp0); \
+ temp3 = __lsx_vadd_h(temp3, temp2); \
+ \
+ u = __lsx_vssrani_b_h(temp3, temp1, 7); \
+ q1_m = __lsx_vssub_b(q1_m, u); \
+ p1_m = __lsx_vsadd_b(p1_m, u); \
+ q1 = __lsx_vxori_b(q1_m, 0x80); \
+ p1 = __lsx_vxori_b(p1_m, 0x80); \
+ \
+ temp1 = __lsx_vadd_h(temp1, temp0); \
+ temp3 = __lsx_vadd_h(temp3, temp2); \
+ \
+ u = __lsx_vssrani_b_h(temp3, temp1, 7); \
+ q0_m = __lsx_vssub_b(q0_m, u); \
+ p0_m = __lsx_vsadd_b(p0_m, u); \
+ q0 = __lsx_vxori_b(q0_m, 0x80); \
+ p0 = __lsx_vxori_b(p0_m, 0x80); \
+ } while (0)
+
+#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
+ limit_in, b_limit_in, thresh_in, hev_out, mask_out, \
+ flat_out) \
+ do { \
+ __m128i p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
+ __m128i p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
+ \
+ p3_asub_p2_m = __lsx_vabsd_bu(p3_in, p2_in); \
+ p2_asub_p1_m = __lsx_vabsd_bu(p2_in, p1_in); \
+ p1_asub_p0_m = __lsx_vabsd_bu(p1_in, p0_in); \
+ q1_asub_q0_m = __lsx_vabsd_bu(q1_in, q0_in); \
+ q2_asub_q1_m = __lsx_vabsd_bu(q2_in, q1_in); \
+ q3_asub_q2_m = __lsx_vabsd_bu(q3_in, q2_in); \
+ p0_asub_q0_m = __lsx_vabsd_bu(p0_in, q0_in); \
+ p1_asub_q1_m = __lsx_vabsd_bu(p1_in, q1_in); \
+ flat_out = __lsx_vmax_bu(p1_asub_p0_m, q1_asub_q0_m); \
+ hev_out = __lsx_vslt_bu(thresh_in, flat_out); \
+ p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p0_asub_q0_m); \
+ p1_asub_q1_m = __lsx_vsrli_b(p1_asub_q1_m, 1); \
+ p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p1_asub_q1_m); \
+ mask_out = __lsx_vslt_bu(b_limit_in, p0_asub_q0_m); \
+ mask_out = __lsx_vmax_bu(flat_out, mask_out); \
+ p3_asub_p2_m = __lsx_vmax_bu(p3_asub_p2_m, p2_asub_p1_m); \
+ mask_out = __lsx_vmax_bu(p3_asub_p2_m, mask_out); \
+ q2_asub_q1_m = __lsx_vmax_bu(q2_asub_q1_m, q3_asub_q2_m); \
+ mask_out = __lsx_vmax_bu(q2_asub_q1_m, mask_out); \
+ mask_out = __lsx_vslt_bu(limit_in, mask_out); \
+ mask_out = __lsx_vxori_b(mask_out, 0xff); \
+ } while (0)
+
+#define VP8_ST6x1_B(in0, in0_idx, in1, in1_idx, pdst, stride) \
+ do { \
+ __lsx_vstelm_w(in0, pdst, 0, in0_idx); \
+ __lsx_vstelm_h(in1, pdst + stride, 0, in1_idx); \
+ } while (0)
+
+static void loop_filter_horizontal_4_dual_lsx(uint8_t *src, int32_t pitch,
+ const uint8_t *b_limit0_ptr,
+ const uint8_t *limit0_ptr,
+ const uint8_t *thresh0_ptr,
+ const uint8_t *b_limit1_ptr,
+ const uint8_t *limit1_ptr,
+ const uint8_t *thresh1_ptr) {
+ int32_t pitch_x2 = pitch << 1;
+ int32_t pitch_x3 = pitch_x2 + pitch;
+ int32_t pitch_x4 = pitch << 2;
+
+ __m128i mask, hev, flat;
+ __m128i thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+
+ DUP4_ARG2(__lsx_vldx, src, -pitch_x4, src, -pitch_x3, src, -pitch_x2, src,
+ -pitch, p3, p2, p1, p0);
+ q0 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, pitch, src, pitch_x2, q1, q2);
+ q3 = __lsx_vldx(src, pitch_x3);
+
+ thresh0 = __lsx_vldrepl_b(thresh0_ptr, 0);
+ thresh1 = __lsx_vldrepl_b(thresh1_ptr, 0);
+ thresh0 = __lsx_vilvl_d(thresh1, thresh0);
+
+ b_limit0 = __lsx_vldrepl_b(b_limit0_ptr, 0);
+ b_limit1 = __lsx_vldrepl_b(b_limit1_ptr, 0);
+ b_limit0 = __lsx_vilvl_d(b_limit1, b_limit0);
+
+ limit0 = __lsx_vldrepl_b(limit0_ptr, 0);
+ limit1 = __lsx_vldrepl_b(limit1_ptr, 0);
+ limit0 = __lsx_vilvl_d(limit1, limit0);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
+ mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ __lsx_vstx(p1, src, -pitch_x2);
+ __lsx_vstx(p0, src, -pitch);
+ __lsx_vst(q0, src, 0);
+ __lsx_vstx(q1, src, pitch);
+}
+
+static void loop_filter_vertical_4_dual_lsx(uint8_t *src, int32_t pitch,
+ const uint8_t *b_limit0_ptr,
+ const uint8_t *limit0_ptr,
+ const uint8_t *thresh0_ptr,
+ const uint8_t *b_limit1_ptr,
+ const uint8_t *limit1_ptr,
+ const uint8_t *thresh1_ptr) {
+ uint8_t *src_tmp0 = src - 4;
+ int32_t pitch_x2 = pitch << 1;
+ int32_t pitch_x3 = pitch_x2 + pitch;
+ int32_t pitch_x4 = pitch << 2;
+ __m128i mask, hev, flat;
+ __m128i thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7;
+ __m128i row8, row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+
+ row0 = __lsx_vld(src_tmp0, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp0, pitch, src_tmp0, pitch_x2, row1, row2);
+ row3 = __lsx_vldx(src_tmp0, pitch_x3);
+ src_tmp0 += pitch_x4;
+ row4 = __lsx_vld(src_tmp0, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp0, pitch, src_tmp0, pitch_x2, row5, row6);
+ row7 = __lsx_vldx(src_tmp0, pitch_x3);
+ src_tmp0 += pitch_x4;
+
+ row8 = __lsx_vld(src_tmp0, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp0, pitch, src_tmp0, pitch_x2, row9, row10);
+ row11 = __lsx_vldx(src_tmp0, pitch_x3);
+ src_tmp0 += pitch_x4;
+ row12 = __lsx_vld(src_tmp0, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp0, pitch, src_tmp0, pitch_x2, row13, row14);
+ row15 = __lsx_vldx(src_tmp0, pitch_x3);
+
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p3, p2,
+ p1, p0, q0, q1, q2, q3);
+
+ thresh0 = __lsx_vldrepl_b(thresh0_ptr, 0);
+ thresh1 = __lsx_vldrepl_b(thresh1_ptr, 0);
+ thresh0 = __lsx_vilvl_d(thresh1, thresh0);
+
+ b_limit0 = __lsx_vldrepl_b(b_limit0_ptr, 0);
+ b_limit1 = __lsx_vldrepl_b(b_limit1_ptr, 0);
+ b_limit0 = __lsx_vilvl_d(b_limit1, b_limit0);
+
+ limit0 = __lsx_vldrepl_b(limit0_ptr, 0);
+ limit1 = __lsx_vldrepl_b(limit1_ptr, 0);
+ limit0 = __lsx_vilvl_d(limit1, limit0);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
+ mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ DUP2_ARG2(__lsx_vilvl_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp2 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp3 = __lsx_vilvh_h(tmp1, tmp0);
+ DUP2_ARG2(__lsx_vilvh_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp4 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp5 = __lsx_vilvh_h(tmp1, tmp0);
+
+ src -= 2;
+ __lsx_vstelm_w(tmp2, src, 0, 0);
+ src += pitch;
+ __lsx_vstelm_w(tmp2, src, 0, 1);
+ src += pitch;
+ __lsx_vstelm_w(tmp2, src, 0, 2);
+ src += pitch;
+ __lsx_vstelm_w(tmp2, src, 0, 3);
+ src += pitch;
+
+ __lsx_vstelm_w(tmp3, src, 0, 0);
+ src += pitch;
+ __lsx_vstelm_w(tmp3, src, 0, 1);
+ src += pitch;
+ __lsx_vstelm_w(tmp3, src, 0, 2);
+ src += pitch;
+ __lsx_vstelm_w(tmp3, src, 0, 3);
+ src += pitch;
+
+ __lsx_vstelm_w(tmp4, src, 0, 0);
+ src += pitch;
+ __lsx_vstelm_w(tmp4, src, 0, 1);
+ src += pitch;
+ __lsx_vstelm_w(tmp4, src, 0, 2);
+ src += pitch;
+ __lsx_vstelm_w(tmp4, src, 0, 3);
+ src += pitch;
+
+ __lsx_vstelm_w(tmp5, src, 0, 0);
+ src += pitch;
+ __lsx_vstelm_w(tmp5, src, 0, 1);
+ src += pitch;
+ __lsx_vstelm_w(tmp5, src, 0, 2);
+ src += pitch;
+ __lsx_vstelm_w(tmp5, src, 0, 3);
+}
+
+static void loop_filter_horizontal_edge_uv_lsx(uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ int32_t pitch_x2 = pitch << 1;
+ int32_t pitch_x3 = pitch_x2 + pitch;
+ int32_t pitch_x4 = pitch << 2;
+
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
+ __m128i p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
+
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+
+ DUP4_ARG2(__lsx_vldx, src_u, -pitch_x4, src_u, -pitch_x3, src_u, -pitch_x2,
+ src_u, -pitch, p3_u, p2_u, p1_u, p0_u);
+ q0_u = __lsx_vld(src_u, 0);
+ DUP2_ARG2(__lsx_vldx, src_u, pitch, src_u, pitch_x2, q1_u, q2_u);
+ q3_u = __lsx_vldx(src_u, pitch_x3);
+
+ DUP4_ARG2(__lsx_vldx, src_v, -pitch_x4, src_v, -pitch_x3, src_v, -pitch_x2,
+ src_v, -pitch, p3_v, p2_v, p1_v, p0_v);
+ q0_v = __lsx_vld(src_v, 0);
+ DUP2_ARG2(__lsx_vldx, src_v, pitch, src_v, pitch_x2, q1_v, q2_v);
+ q3_v = __lsx_vldx(src_v, pitch_x3);
+
+ /* right 8 element of p3 are u pixel and
+ left 8 element of p3 are v pixel */
+ DUP4_ARG2(__lsx_vilvl_d, p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3,
+ p2, p1, p0);
+ DUP4_ARG2(__lsx_vilvl_d, q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0,
+ q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ __lsx_vstelm_d(q1, src_u + pitch, 0, 0);
+ __lsx_vstelm_d(q0, src_u, 0, 0);
+ __lsx_vstelm_d(p0, src_u - pitch, 0, 0);
+ __lsx_vstelm_d(p1, src_u - pitch_x2, 0, 0);
+
+ __lsx_vstelm_d(q1, src_v + pitch, 0, 1);
+ __lsx_vstelm_d(q0, src_v, 0, 1);
+ __lsx_vstelm_d(p0, src_v - pitch, 0, 1);
+ __lsx_vstelm_d(p1, src_v - pitch_x2, 0, 1);
+}
+
+static void loop_filter_vertical_edge_uv_lsx(uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ uint8_t *src_u_tmp, *src_v_tmp;
+ int32_t pitch_x2 = pitch << 1;
+ int32_t pitch_x3 = pitch_x2 + pitch;
+ int32_t pitch_x4 = pitch << 2;
+
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ __m128i row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+
+ src_u_tmp = src_u - 4;
+ row0 = __lsx_vld(src_u_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_u_tmp, pitch, src_u_tmp, pitch_x2, row1, row2);
+ row3 = __lsx_vldx(src_u_tmp, pitch_x3);
+ src_u_tmp += pitch_x4;
+ row4 = __lsx_vld(src_u_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_u_tmp, pitch, src_u_tmp, pitch_x2, row5, row6);
+ row7 = __lsx_vldx(src_u_tmp, pitch_x3);
+
+ src_v_tmp = src_v - 4;
+ row8 = __lsx_vld(src_v_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_v_tmp, pitch, src_v_tmp, pitch_x2, row9, row10);
+ row11 = __lsx_vldx(src_v_tmp, pitch_x3);
+ src_v_tmp += pitch_x4;
+ row12 = __lsx_vld(src_v_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_v_tmp, pitch, src_v_tmp, pitch_x2, row13, row14);
+ row15 = __lsx_vldx(src_v_tmp, pitch_x3);
+
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p3, p2,
+ p1, p0, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ DUP2_ARG2(__lsx_vilvl_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp2 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp3 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp0 = __lsx_vilvh_b(p0, p1);
+ tmp1 = __lsx_vilvh_b(q1, q0);
+ tmp4 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp5 = __lsx_vilvh_h(tmp1, tmp0);
+
+ src_u_tmp += 2;
+ __lsx_vstelm_w(tmp2, src_u_tmp - pitch_x4, 0, 0);
+ __lsx_vstelm_w(tmp2, src_u_tmp - pitch_x3, 0, 1);
+ __lsx_vstelm_w(tmp2, src_u_tmp - pitch_x2, 0, 2);
+ __lsx_vstelm_w(tmp2, src_u_tmp - pitch, 0, 3);
+
+ __lsx_vstelm_w(tmp3, src_u_tmp, 0, 0);
+ __lsx_vstelm_w(tmp3, src_u_tmp + pitch, 0, 1);
+ __lsx_vstelm_w(tmp3, src_u_tmp + pitch_x2, 0, 2);
+ __lsx_vstelm_w(tmp3, src_u_tmp + pitch_x3, 0, 3);
+
+ src_v_tmp += 2;
+ __lsx_vstelm_w(tmp4, src_v_tmp - pitch_x4, 0, 0);
+ __lsx_vstelm_w(tmp4, src_v_tmp - pitch_x3, 0, 1);
+ __lsx_vstelm_w(tmp4, src_v_tmp - pitch_x2, 0, 2);
+ __lsx_vstelm_w(tmp4, src_v_tmp - pitch, 0, 3);
+
+ __lsx_vstelm_w(tmp5, src_v_tmp, 0, 0);
+ __lsx_vstelm_w(tmp5, src_v_tmp + pitch, 0, 1);
+ __lsx_vstelm_w(tmp5, src_v_tmp + pitch_x2, 0, 2);
+ __lsx_vstelm_w(tmp5, src_v_tmp + pitch_x3, 0, 3);
+}
+
+static inline void mbloop_filter_horizontal_edge_y_lsx(
+ uint8_t *src, int32_t pitch, const uint8_t b_limit_in,
+ const uint8_t limit_in, const uint8_t thresh_in) {
+ uint8_t *temp_src;
+ int32_t pitch_x2 = pitch << 1;
+ int32_t pitch_x3 = pitch_x2 + pitch;
+ int32_t pitch_x4 = pitch << 2;
+
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+
+ DUP2_ARG2(__lsx_vldrepl_b, &b_limit_in, 0, &limit_in, 0, b_limit, limit);
+ thresh = __lsx_vldrepl_b(&thresh_in, 0);
+
+ temp_src = src - pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, p3, p2, p1, p0);
+ temp_src += pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ temp_src = src - pitch_x3;
+ __lsx_vstx(p2, temp_src, 0);
+ __lsx_vstx(p1, temp_src, pitch);
+ __lsx_vstx(p0, temp_src, pitch_x2);
+ __lsx_vstx(q0, temp_src, pitch_x3);
+ temp_src += pitch_x4;
+ __lsx_vstx(q1, temp_src, 0);
+ __lsx_vstx(q2, temp_src, pitch);
+}
+
+static inline void mbloop_filter_horizontal_edge_uv_lsx(
+ uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in,
+ const uint8_t limit_in, const uint8_t thresh_in) {
+ uint8_t *temp_src;
+ int32_t pitch_x2 = pitch << 1;
+ int32_t pitch_x3 = pitch_x2 + pitch;
+ int32_t pitch_x4 = pitch << 2;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
+ __m128i p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
+
+ DUP2_ARG2(__lsx_vldrepl_b, &b_limit_in, 0, &limit_in, 0, b_limit, limit);
+ thresh = __lsx_vldrepl_b(&thresh_in, 0);
+
+ temp_src = src_u - pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, p3_u, p2_u, p1_u, p0_u);
+ temp_src += pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, q0_u, q1_u, q2_u, q3_u);
+ temp_src = src_v - pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, p3_v, p2_v, p1_v, p0_v);
+ temp_src += pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, q0_v, q1_v, q2_v, q3_v);
+
+ DUP4_ARG2(__lsx_vilvl_d, p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3,
+ p2, p1, p0);
+ DUP4_ARG2(__lsx_vilvl_d, q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0,
+ q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ src_u -= pitch_x3;
+ __lsx_vstelm_d(p2, src_u, 0, 0);
+ __lsx_vstelm_d(p1, src_u + pitch, 0, 0);
+ __lsx_vstelm_d(p0, src_u + pitch_x2, 0, 0);
+ __lsx_vstelm_d(q0, src_u + pitch_x3, 0, 0);
+ src_u += pitch_x4;
+ __lsx_vstelm_d(q1, src_u, 0, 0);
+ src_u += pitch;
+ __lsx_vstelm_d(q2, src_u, 0, 0);
+
+ src_v -= pitch_x3;
+ __lsx_vstelm_d(p2, src_v, 0, 1);
+ __lsx_vstelm_d(p1, src_v + pitch, 0, 1);
+ __lsx_vstelm_d(p0, src_v + pitch_x2, 0, 1);
+ __lsx_vstelm_d(q0, src_v + pitch_x3, 0, 1);
+ src_v += pitch_x4;
+ __lsx_vstelm_d(q1, src_v, 0, 1);
+ src_v += pitch;
+ __lsx_vstelm_d(q2, src_v, 0, 1);
+}
+
+static inline void mbloop_filter_vertical_edge_y_lsx(uint8_t *src,
+ int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ uint8_t *temp_src;
+ int32_t pitch_x2 = pitch << 1;
+ int32_t pitch_x3 = pitch_x2 + pitch;
+ int32_t pitch_x4 = pitch << 2;
+
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ __m128i row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ DUP2_ARG2(__lsx_vldrepl_b, &b_limit_in, 0, &limit_in, 0, b_limit, limit);
+ thresh = __lsx_vldrepl_b(&thresh_in, 0);
+ temp_src = src - 4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, row0, row1, row2, row3);
+ temp_src += pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, row4, row5, row6, row7);
+ temp_src += pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, row8, row9, row10, row11);
+ temp_src += pitch_x4;
+ DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2,
+ temp_src, pitch_x3, row12, row13, row14, row15);
+ temp_src -= pitch_x4;
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p3, p2,
+ p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, tmp0, tmp1);
+ tmp3 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp4 = __lsx_vilvh_h(tmp1, tmp0);
+ DUP2_ARG2(__lsx_vilvh_b, p1, p2, q0, p0, tmp0, tmp1);
+ tmp6 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp7 = __lsx_vilvh_h(tmp1, tmp0);
+ tmp2 = __lsx_vilvl_b(q2, q1);
+ tmp5 = __lsx_vilvh_b(q2, q1);
+
+ temp_src = src - 3;
+ VP8_ST6x1_B(tmp3, 0, tmp2, 0, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp3, 1, tmp2, 1, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp3, 2, tmp2, 2, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp3, 3, tmp2, 3, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp4, 0, tmp2, 4, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp4, 1, tmp2, 5, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp4, 2, tmp2, 6, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp4, 3, tmp2, 7, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp6, 0, tmp5, 0, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp6, 1, tmp5, 1, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp6, 2, tmp5, 2, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp6, 3, tmp5, 3, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp7, 0, tmp5, 4, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp7, 1, tmp5, 5, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp7, 2, tmp5, 6, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_B(tmp7, 3, tmp5, 7, temp_src, 4);
+}
+
+static inline void mbloop_filter_vertical_edge_uv_lsx(
+ uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in,
+ const uint8_t limit_in, const uint8_t thresh_in) {
+ int32_t pitch_x2 = pitch << 1;
+ int32_t pitch_x3 = pitch_x2 + pitch;
+ int32_t pitch_x4 = pitch << 2;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ __m128i row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ DUP2_ARG2(__lsx_vldrepl_b, &b_limit_in, 0, &limit_in, 0, b_limit, limit);
+ thresh = __lsx_vldrepl_b(&thresh_in, 0);
+
+ src_u -= 4;
+ DUP4_ARG2(__lsx_vldx, src_u, 0, src_u, pitch, src_u, pitch_x2, src_u,
+ pitch_x3, row0, row1, row2, row3);
+ src_u += pitch_x4;
+ DUP4_ARG2(__lsx_vldx, src_u, 0, src_u, pitch, src_u, pitch_x2, src_u,
+ pitch_x3, row4, row5, row6, row7);
+ src_v -= 4;
+ DUP4_ARG2(__lsx_vldx, src_v, 0, src_v, pitch, src_v, pitch_x2, src_v,
+ pitch_x3, row8, row9, row10, row11);
+ src_v += pitch_x4;
+ DUP4_ARG2(__lsx_vldx, src_v, 0, src_v, pitch, src_v, pitch_x2, src_v,
+ pitch_x3, row12, row13, row14, row15);
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p3, p2,
+ p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, tmp0, tmp1);
+ tmp3 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp4 = __lsx_vilvh_h(tmp1, tmp0);
+ DUP2_ARG2(__lsx_vilvh_b, p1, p2, q0, p0, tmp0, tmp1);
+ tmp6 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp7 = __lsx_vilvh_h(tmp1, tmp0);
+ tmp2 = __lsx_vilvl_b(q2, q1);
+ tmp5 = __lsx_vilvh_b(q2, q1);
+
+ src_u += 1 - pitch_x4;
+ VP8_ST6x1_B(tmp3, 0, tmp2, 0, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_B(tmp3, 1, tmp2, 1, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_B(tmp3, 2, tmp2, 2, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_B(tmp3, 3, tmp2, 3, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_B(tmp4, 0, tmp2, 4, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_B(tmp4, 1, tmp2, 5, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_B(tmp4, 2, tmp2, 6, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_B(tmp4, 3, tmp2, 7, src_u, 4);
+
+ src_v += 1 - pitch_x4;
+ VP8_ST6x1_B(tmp6, 0, tmp5, 0, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_B(tmp6, 1, tmp5, 1, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_B(tmp6, 2, tmp5, 2, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_B(tmp6, 3, tmp5, 3, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_B(tmp7, 0, tmp5, 4, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_B(tmp7, 1, tmp5, 5, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_B(tmp7, 2, tmp5, 6, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_B(tmp7, 3, tmp5, 7, src_v, 4);
+}
+
+void vp8_loop_filter_mbh_lsx(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch_y, int32_t pitch_u_v,
+ loop_filter_info *lpf_info_ptr) {
+ mbloop_filter_horizontal_edge_y_lsx(src_y, pitch_y, *lpf_info_ptr->mblim,
+ *lpf_info_ptr->lim,
+ *lpf_info_ptr->hev_thr);
+ if (src_u) {
+ mbloop_filter_horizontal_edge_uv_lsx(
+ src_u, src_v, pitch_u_v, *lpf_info_ptr->mblim, *lpf_info_ptr->lim,
+ *lpf_info_ptr->hev_thr);
+ }
+}
+
+void vp8_loop_filter_mbv_lsx(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch_y, int32_t pitch_u_v,
+ loop_filter_info *lpf_info_ptr) {
+ mbloop_filter_vertical_edge_y_lsx(src_y, pitch_y, *lpf_info_ptr->mblim,
+ *lpf_info_ptr->lim, *lpf_info_ptr->hev_thr);
+ if (src_u) {
+ mbloop_filter_vertical_edge_uv_lsx(src_u, src_v, pitch_u_v,
+ *lpf_info_ptr->mblim, *lpf_info_ptr->lim,
+ *lpf_info_ptr->hev_thr);
+ }
+}
+
+void vp8_loop_filter_bh_lsx(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch_y, int32_t pitch_u_v,
+ loop_filter_info *lpf_info_ptr) {
+ loop_filter_horizontal_4_dual_lsx(src_y + 4 * pitch_y, pitch_y,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr);
+ loop_filter_horizontal_4_dual_lsx(src_y + 8 * pitch_y, pitch_y,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr);
+ loop_filter_horizontal_4_dual_lsx(src_y + 12 * pitch_y, pitch_y,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr);
+ if (src_u) {
+ loop_filter_horizontal_edge_uv_lsx(
+ src_u + (4 * pitch_u_v), src_v + (4 * pitch_u_v), pitch_u_v,
+ *lpf_info_ptr->blim, *lpf_info_ptr->lim, *lpf_info_ptr->hev_thr);
+ }
+}
+
+void vp8_loop_filter_bv_lsx(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch_y, int32_t pitch_u_v,
+ loop_filter_info *lpf_info_ptr) {
+ loop_filter_vertical_4_dual_lsx(src_y + 4, pitch_y, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr);
+ loop_filter_vertical_4_dual_lsx(src_y + 8, pitch_y, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr);
+ loop_filter_vertical_4_dual_lsx(src_y + 12, pitch_y, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr);
+ if (src_u) {
+ loop_filter_vertical_edge_uv_lsx(src_u + 4, src_v + 4, pitch_u_v,
+ *lpf_info_ptr->blim, *lpf_info_ptr->lim,
+ *lpf_info_ptr->hev_thr);
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/loongarch/sixtap_filter_lsx.c b/media/libvpx/libvpx/vp8/common/loongarch/sixtap_filter_lsx.c
new file mode 100644
index 0000000000..cd7ba54746
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/loongarch/sixtap_filter_lsx.c
@@ -0,0 +1,1903 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Lu Wang <wanglu@loongson.cn>
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/filter.h"
+#include "vpx_ports/mem.h"
+#include "vpx_util/loongson_intrinsics.h"
+
+DECLARE_ALIGNED(16, static const int8_t, vp8_subpel_filters_lsx[7][8]) = {
+ { 0, -6, 123, 12, -1, 0, 0, 0 },
+ { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */
+ { 0, -9, 93, 50, -6, 0, 0, 0 },
+ { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */
+ { 0, -6, 50, 93, -9, 0, 0, 0 },
+ { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */
+ { 0, -1, 12, 123, -6, 0, 0, 0 },
+};
+
+static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = {
+ /* 8 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ /* 4 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
+ /* 4 width cases */
+ 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
+};
+
+static INLINE __m128i dpadd_h3(__m128i in0, __m128i in1, __m128i in2,
+ __m128i coeff0, __m128i coeff1, __m128i coeff2) {
+ __m128i out0_m;
+
+ out0_m = __lsx_vdp2_h_b(in0, coeff0);
+ out0_m = __lsx_vdp2add_h_b(out0_m, in1, coeff1);
+ out0_m = __lsx_vdp2add_h_b(out0_m, in2, coeff2);
+
+ return out0_m;
+}
+
+static INLINE __m128i horiz_6tap_filt(__m128i src0, __m128i src1, __m128i mask0,
+ __m128i mask1, __m128i mask2,
+ __m128i filt_h0, __m128i filt_h1,
+ __m128i filt_h2) {
+ __m128i vec0_m, vec1_m, vec2_m;
+ __m128i hz_out_m;
+
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src1, src0, mask1, vec0_m,
+ vec1_m);
+ vec2_m = __lsx_vshuf_b(src1, src0, mask2);
+ hz_out_m = dpadd_h3(vec0_m, vec1_m, vec2_m, filt_h0, filt_h1, filt_h2);
+ hz_out_m = __lsx_vsrari_h(hz_out_m, VP8_FILTER_SHIFT);
+ hz_out_m = __lsx_vsat_h(hz_out_m, 7);
+
+ return hz_out_m;
+}
+
+static INLINE __m128i filt_4tap_dpadd_h(__m128i vec0, __m128i vec1,
+ __m128i filt0, __m128i filt1) {
+ __m128i tmp_m;
+
+ tmp_m = __lsx_vdp2_h_b(vec0, filt0);
+ tmp_m = __lsx_vdp2add_h_b(tmp_m, vec1, filt1);
+
+ return tmp_m;
+}
+
+static INLINE __m128i horiz_4tap_filt(__m128i src0, __m128i src1, __m128i mask0,
+ __m128i mask1, __m128i filt_h0,
+ __m128i filt_h1) {
+ __m128i vec0_m, vec1_m, hz_out_m;
+
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src1, src0, mask1, vec0_m,
+ vec1_m);
+ hz_out_m = filt_4tap_dpadd_h(vec0_m, vec1_m, filt_h0, filt_h1);
+ hz_out_m = __lsx_vsrari_h(hz_out_m, VP8_FILTER_SHIFT);
+ hz_out_m = __lsx_vsat_h(hz_out_m, 7);
+
+ return hz_out_m;
+}
+
+#define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
+ mask2, filt0, filt1, filt2, out0, out1) \
+ do { \
+ __m128i vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \
+ \
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src3, src2, mask0, vec0_m, \
+ vec1_m); \
+ DUP2_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, out0, out1); \
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask1, src3, src2, mask1, vec2_m, \
+ vec3_m); \
+ DUP2_ARG3(__lsx_vdp2add_h_b, out0, vec2_m, filt1, out1, vec3_m, filt1, \
+ out0, out1); \
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask2, src3, src2, mask2, vec4_m, \
+ vec5_m); \
+ DUP2_ARG3(__lsx_vdp2add_h_b, out0, vec4_m, filt2, out1, vec5_m, filt2, \
+ out0, out1); \
+ } while (0)
+
+#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
+ mask2, filt0, filt1, filt2, out0, out1, \
+ out2, out3) \
+ do { \
+ __m128i vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
+ \
+ DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, vec0_m, \
+ vec1_m); \
+ DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0, vec2_m, \
+ vec3_m); \
+ DUP4_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, vec2_m, filt0, \
+ vec3_m, filt0, out0, out1, out2, out3); \
+ DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, vec0_m, \
+ vec1_m); \
+ DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1, vec2_m, \
+ vec3_m); \
+ DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, vec4_m, \
+ vec5_m); \
+ DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2, vec6_m, \
+ vec7_m); \
+ DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec0_m, filt1, out1, vec1_m, filt1, \
+ out2, vec2_m, filt1, out3, vec3_m, filt1, out0, out1, out2, \
+ out3); \
+ DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec4_m, filt2, out1, vec5_m, filt2, \
+ out2, vec6_m, filt2, out3, vec7_m, filt2, out0, out1, out2, \
+ out3); \
+ } while (0)
+
+#define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
+ filt0, filt1, out0, out1) \
+ do { \
+ __m128i vec0_m, vec1_m, vec2_m, vec3_m; \
+ \
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src3, src2, mask0, vec0_m, \
+ vec1_m); \
+ DUP2_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, out0, out1); \
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask1, src3, src2, mask1, vec2_m, \
+ vec3_m); \
+ DUP2_ARG3(__lsx_vdp2add_h_b, out0, vec2_m, filt1, out1, vec3_m, filt1, \
+ out0, out1); \
+ } while (0)
+
+#define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
+ filt0, filt1, out0, out1, out2, out3) \
+ do { \
+ __m128i vec0_m, vec1_m, vec2_m, vec3_m; \
+ \
+ DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, vec0_m, \
+ vec1_m); \
+ DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0, vec2_m, \
+ vec3_m); \
+ DUP4_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, vec2_m, filt0, \
+ vec3_m, filt0, out0, out1, out2, out3); \
+ DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, vec0_m, \
+ vec1_m); \
+ DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1, vec2_m, \
+ vec3_m); \
+ DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec0_m, filt1, out1, vec1_m, filt1, \
+ out2, vec2_m, filt1, out3, vec3_m, filt1, out0, out1, out2, \
+ out3); \
+ } while (0)
+
+static inline void common_hz_6t_4x4_lsx(uint8_t *RESTRICT src,
+ int32_t src_stride,
+ uint8_t *RESTRICT dst,
+ int32_t dst_stride,
+ const int8_t *filter) {
+ __m128i src0, src1, src2, src3, filt0, filt1, filt2;
+ __m128i mask0, mask1, mask2, out0, out1;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16);
+ src -= 2;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ src0 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2);
+ src3 = __lsx_vldx(src, src_stride_x3);
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0,
+ filt1, filt2, out0, out1);
+ out0 = __lsx_vssrarni_b_h(out1, out0, VP8_FILTER_SHIFT);
+ out0 = __lsx_vxori_b(out0, 128);
+
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+}
+
+static void common_hz_6t_4x8_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ __m128i src0, src1, src2, src3, filt0, filt1, filt2;
+ __m128i mask0, mask1, mask2, out0, out1, out2, out3;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride_x2 << 1;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16);
+ src -= 2;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ src0 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2);
+ src3 = __lsx_vldx(src, src_stride_x3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src += src_stride_x4;
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0,
+ filt1, filt2, out0, out1);
+
+ src0 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2);
+ src3 = __lsx_vldx(src, src_stride_x3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0,
+ filt1, filt2, out2, out3);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2,
+ VP8_FILTER_SHIFT, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+
+ __lsx_vstelm_w(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 3);
+}
+
+static void common_hz_6t_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ if (height == 4) {
+ common_hz_6t_4x4_lsx(src, src_stride, dst, dst_stride, filter);
+ } else if (height == 8) {
+ common_hz_6t_4x8_lsx(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_6t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, filt0, filt1, filt2;
+ __m128i mask0, mask1, mask2, tmp0, tmp1;
+ __m128i filt, out0, out1, out2, out3;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0);
+ src -= 2;
+
+ filt = __lsx_vld(filter, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1);
+ filt2 = __lsx_vreplvei_h(filt, 2);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src += src_stride_x4;
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0,
+ filt1, filt2, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2,
+ VP8_FILTER_SHIFT, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+
+ for (loop_cnt = (height >> 2) - 1; loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src += src_stride_x4;
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2,
+ VP8_FILTER_SHIFT, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+ }
+}
+
+static void common_hz_6t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1, filt2;
+ __m128i mask0, mask1, mask2, out;
+ __m128i filt, out0, out1, out2, out3, out4, out5, out6, out7;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0);
+ src -= 2;
+
+ filt = __lsx_vld(filter, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1);
+ filt2 = __lsx_vreplvei_h(filt, 2);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src2, src4, src6);
+ src += 8;
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src1, src3, src5, src7);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src6, 128, src7, 128, src4,
+ src5, src6, src7);
+ src += src_stride_x4 - 8;
+
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2,
+ filt0, filt1, filt2, out4, out5, out6, out7);
+ DUP4_ARG2(__lsx_vsrari_h, out0, VP8_FILTER_SHIFT, out1, VP8_FILTER_SHIFT,
+ out2, VP8_FILTER_SHIFT, out3, VP8_FILTER_SHIFT, out0, out1, out2,
+ out3);
+ DUP4_ARG2(__lsx_vsrari_h, out4, VP8_FILTER_SHIFT, out5, VP8_FILTER_SHIFT,
+ out6, VP8_FILTER_SHIFT, out7, VP8_FILTER_SHIFT, out4, out5, out6,
+ out7);
+ DUP4_ARG2(__lsx_vsat_h, out0, 7, out1, 7, out2, 7, out3, 7, out0, out1,
+ out2, out3);
+ DUP4_ARG2(__lsx_vsat_h, out4, 7, out5, 7, out6, 7, out7, 7, out4, out5,
+ out6, out7);
+ out = __lsx_vpickev_b(out1, out0);
+ out = __lsx_vxori_b(out, 128);
+ __lsx_vst(out, dst, 0);
+ out = __lsx_vpickev_b(out3, out2);
+ out = __lsx_vxori_b(out, 128);
+ __lsx_vstx(out, dst, dst_stride);
+ out = __lsx_vpickev_b(out5, out4);
+ out = __lsx_vxori_b(out, 128);
+ __lsx_vstx(out, dst, dst_stride_x2);
+ out = __lsx_vpickev_b(out7, out6);
+ out = __lsx_vxori_b(out, 128);
+ __lsx_vstx(out, dst, dst_stride_x3);
+ dst += dst_stride_x4;
+ }
+}
+
+static void common_vt_6t_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
+ __m128i src87_r, src2110, src4332, src6554, src8776, filt0, filt1, filt2;
+ __m128i out0, out1;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP2_ARG2(__lsx_vldx, src, -src_stride_x2, src, -src_stride, src0, src1);
+ src2 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src3, src4);
+ src += src_stride_x3;
+
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src3, src2, src4, src3,
+ src10_r, src21_r, src32_r, src43_r);
+ DUP2_ARG2(__lsx_vilvl_d, src21_r, src10_r, src43_r, src32_r, src2110,
+ src4332);
+ DUP2_ARG2(__lsx_vxori_b, src2110, 128, src4332, 128, src2110, src4332);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ src5 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src6, src7);
+ src8 = __lsx_vldx(src, src_stride_x3);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vilvl_b, src5, src4, src6, src5, src7, src6, src8, src7,
+ src54_r, src65_r, src76_r, src87_r);
+ DUP2_ARG2(__lsx_vilvl_d, src65_r, src54_r, src87_r, src76_r, src6554,
+ src8776);
+ DUP2_ARG2(__lsx_vxori_b, src6554, 128, src8776, 128, src6554, src8776);
+ out0 = dpadd_h3(src2110, src4332, src6554, filt0, filt1, filt2);
+ out1 = dpadd_h3(src4332, src6554, src8776, filt0, filt1, filt2);
+
+ out0 = __lsx_vssrarni_b_h(out1, out0, VP8_FILTER_SHIFT);
+ out0 = __lsx_vxori_b(out0, 128);
+
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+
+ src2110 = src6554;
+ src4332 = src8776;
+ src4 = src8;
+ }
+}
+
+static void common_vt_6t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, src4, src7, src8, src9, src10;
+ __m128i src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r;
+ __m128i src109_r, filt0, filt1, filt2;
+ __m128i tmp0, tmp1;
+ __m128i filt, out0_r, out1_r, out2_r, out3_r;
+
+ src -= src_stride_x2;
+ filt = __lsx_vld(filter, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1);
+ filt2 = __lsx_vreplvei_h(filt, 2);
+
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src1, src2, src3);
+ src += src_stride_x4;
+ src4 = __lsx_vld(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src2, src1, src4, src3,
+ src10_r, src32_r, src21_r, src43_r);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128, src7,
+ src8, src9, src10);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vilvl_b, src7, src4, src8, src7, src9, src8, src10, src9,
+ src76_r, src87_r, src98_r, src109_r);
+ out0_r = dpadd_h3(src10_r, src32_r, src76_r, filt0, filt1, filt2);
+ out1_r = dpadd_h3(src21_r, src43_r, src87_r, filt0, filt1, filt2);
+ out2_r = dpadd_h3(src32_r, src76_r, src98_r, filt0, filt1, filt2);
+ out3_r = dpadd_h3(src43_r, src87_r, src109_r, filt0, filt1, filt2);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1_r, out0_r, VP8_FILTER_SHIFT, out3_r,
+ out2_r, VP8_FILTER_SHIFT, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+
+ src10_r = src76_r;
+ src32_r = src98_r;
+ src21_r = src87_r;
+ src43_r = src109_r;
+ src4 = src10;
+ }
+}
+
+static void common_vt_6t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
+ __m128i src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l;
+ __m128i src65_l, src87_l, filt0, filt1, filt2;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
+
+ src -= src_stride_x2;
+ filt = __lsx_vld(filter, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1);
+ filt2 = __lsx_vreplvei_h(filt, 2);
+
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src1, src2, src3);
+ src += src_stride_x4;
+ src4 = __lsx_vldx(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src4, src3, src2, src1,
+ src10_r, src32_r, src43_r, src21_r);
+ DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src4, src3, src2, src1,
+ src10_l, src32_l, src43_l, src21_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src5, src6, src7, src8);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128, src5,
+ src6, src7, src8);
+ DUP4_ARG2(__lsx_vilvl_b, src5, src4, src6, src5, src7, src6, src8, src7,
+ src54_r, src65_r, src76_r, src87_r);
+ DUP4_ARG2(__lsx_vilvh_b, src5, src4, src6, src5, src7, src6, src8, src7,
+ src54_l, src65_l, src76_l, src87_l);
+ out0_r = dpadd_h3(src10_r, src32_r, src54_r, filt0, filt1, filt2);
+ out1_r = dpadd_h3(src21_r, src43_r, src65_r, filt0, filt1, filt2);
+ out2_r = dpadd_h3(src32_r, src54_r, src76_r, filt0, filt1, filt2);
+ out3_r = dpadd_h3(src43_r, src65_r, src87_r, filt0, filt1, filt2);
+ out0_l = dpadd_h3(src10_l, src32_l, src54_l, filt0, filt1, filt2);
+ out1_l = dpadd_h3(src21_l, src43_l, src65_l, filt0, filt1, filt2);
+ out2_l = dpadd_h3(src32_l, src54_l, src76_l, filt0, filt1, filt2);
+ out3_l = dpadd_h3(src43_l, src65_l, src87_l, filt0, filt1, filt2);
+ DUP4_ARG3(__lsx_vssrarni_b_h, out0_l, out0_r, VP8_FILTER_SHIFT, out1_l,
+ out1_r, VP8_FILTER_SHIFT, out2_l, out2_r, VP8_FILTER_SHIFT,
+ out3_l, out3_r, VP8_FILTER_SHIFT, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp2, 128, tmp3, 128, tmp0,
+ tmp1, tmp2, tmp3);
+ __lsx_vstx(tmp0, dst, 0);
+ __lsx_vstx(tmp1, dst, dst_stride);
+ __lsx_vstx(tmp2, dst, dst_stride_x2);
+ __lsx_vstx(tmp3, dst, dst_stride_x3);
+ dst += dst_stride_x4;
+
+ src10_r = src54_r;
+ src32_r = src76_r;
+ src21_r = src65_r;
+ src43_r = src87_r;
+ src10_l = src54_l;
+ src32_l = src76_l;
+ src21_l = src65_l;
+ src43_l = src87_l;
+ src4 = src8;
+ }
+}
+
+static void common_hv_6ht_6vt_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, tmp0, tmp1;
+ __m128i filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ __m128i hz_out7, filt_vt0, filt_vt1, filt_vt2, out0, out1, out2, out3;
+ __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16);
+ src -= 2;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0,
+ filt_hz1);
+ filt_hz2 = __lsx_vldrepl_h(filter_horiz, 4);
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0,
+ filt_vt1);
+ filt_vt2 = __lsx_vldrepl_h(filter_vert, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP2_ARG2(__lsx_vldx, src, -src_stride_x2, src, -src_stride, src0, src1);
+ src2 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src3, src4);
+ src += src_stride_x3;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ hz_out0 = horiz_6tap_filt(src0, src1, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out2 = horiz_6tap_filt(src2, src3, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out1 = __lsx_vshuf_b(hz_out2, hz_out0, shuff);
+ hz_out3 = horiz_6tap_filt(src3, src4, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, out0, out1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ src5 = __lsx_vld(src, 0);
+ src6 = __lsx_vldx(src, src_stride);
+ src += src_stride_x2;
+
+ DUP2_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src5, src6);
+ hz_out5 = horiz_6tap_filt(src5, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = __lsx_vshuf_b(hz_out5, hz_out3, shuff);
+
+ src7 = __lsx_vld(src, 0);
+ src8 = __lsx_vldx(src, src_stride);
+ src += src_stride_x2;
+
+ DUP2_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src7, src8);
+ hz_out7 = horiz_6tap_filt(src7, src8, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out6 = __lsx_vshuf_b(hz_out7, hz_out5, shuff);
+
+ out2 = __lsx_vpackev_b(hz_out5, hz_out4);
+ tmp0 = dpadd_h3(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ out3 = __lsx_vpackev_b(hz_out7, hz_out6);
+ tmp1 = dpadd_h3(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2);
+
+ tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7);
+ tmp0 = __lsx_vxori_b(tmp0, 128);
+ __lsx_vstelm_w(tmp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 3);
+ dst += dst_stride;
+
+ hz_out3 = hz_out7;
+ out0 = out2;
+ out1 = out3;
+ }
+}
+
+static void common_hv_6ht_6vt_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i filt_hz0, filt_hz1, filt_hz2;
+ __m128i mask0, mask1, mask2, vec0, vec1;
+ __m128i filt, filt_vt0, filt_vt1, filt_vt2;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ __m128i hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0);
+ src -= (2 + src_stride_x2);
+
+ filt = __lsx_vld(filter_horiz, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_hz0, filt_hz1);
+ filt_hz2 = __lsx_vreplvei_h(filt, 2);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src1, src2, src3);
+ src += src_stride_x4;
+ src4 = __lsx_vldx(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ hz_out0 = horiz_6tap_filt(src0, src0, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out1 = horiz_6tap_filt(src1, src1, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out2 = horiz_6tap_filt(src2, src2, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out3 = horiz_6tap_filt(src3, src3, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out4 = horiz_6tap_filt(src4, src4, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ filt = __lsx_vld(filter_vert, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_vt0, filt_vt1);
+ filt_vt2 = __lsx_vreplvei_h(filt, 2);
+
+ DUP4_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, hz_out2,
+ hz_out1, hz_out4, hz_out3, out0, out1, out3, out4);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src5, src6, src7, src8);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128, src5,
+ src6, src7, src8);
+ hz_out5 = horiz_6tap_filt(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out2 = __lsx_vpackev_b(hz_out5, hz_out4);
+ tmp0 = dpadd_h3(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out6 = horiz_6tap_filt(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out5 = __lsx_vpackev_b(hz_out6, hz_out5);
+ tmp1 = dpadd_h3(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = horiz_6tap_filt(src7, src7, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out7 = __lsx_vpackev_b(hz_out7, hz_out6);
+ tmp2 = dpadd_h3(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out8 = horiz_6tap_filt(src8, src8, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out6 = __lsx_vpackev_b(hz_out8, hz_out7);
+ tmp3 = dpadd_h3(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, VP8_FILTER_SHIFT, tmp3, tmp2,
+ VP8_FILTER_SHIFT, vec0, vec1);
+ DUP2_ARG2(__lsx_vxori_b, vec0, 128, vec1, 128, vec0, vec1);
+
+ __lsx_vstelm_d(vec0, dst, 0, 0);
+ __lsx_vstelm_d(vec0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(vec1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(vec1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+
+ hz_out4 = hz_out8;
+ out0 = out2;
+ out1 = out7;
+ out3 = out5;
+ out4 = out6;
+ }
+}
+
+static void common_hv_6ht_6vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ common_hv_6ht_6vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ common_hv_6ht_6vt_8w_lsx(src + 8, src_stride, dst + 8, dst_stride,
+ filter_horiz, filter_vert, height);
+}
+
+static void common_hz_4t_4x4_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ __m128i src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ __m128i out0, out1;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16);
+ src -= 1;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ src0 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2);
+ src3 = __lsx_vldx(src, src_stride_x3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1,
+ out0, out1);
+
+ out0 = __lsx_vssrarni_b_h(out1, out0, VP8_FILTER_SHIFT);
+ out0 = __lsx_vxori_b(out0, 128);
+
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+}
+
+static void common_hz_4t_4x8_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ __m128i src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ __m128i out0, out1, out2, out3;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16);
+ src -= 1;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ src0 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2);
+ src3 = __lsx_vldx(src, src_stride_x3);
+ src += src_stride_x4;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1,
+ out0, out1);
+
+ src0 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src1, src2);
+ src3 = __lsx_vldx(src, src_stride_x3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1,
+ out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2,
+ VP8_FILTER_SHIFT, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+
+ __lsx_vstelm_w(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 3);
+}
+
+static void common_hz_4t_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ if (height == 4) {
+ common_hz_4t_4x4_lsx(src, src_stride, dst, dst_stride, filter);
+ } else if (height == 8) {
+ common_hz_4t_4x8_lsx(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_4t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ __m128i tmp0, tmp1;
+ __m128i filt, out0, out1, out2, out3;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0);
+ src -= 1;
+
+ filt = __lsx_vld(filter, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src1, src2, src3);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0,
+ filt1, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2,
+ VP8_FILTER_SHIFT, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+ }
+}
+
+static void common_hz_4t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i filt0, filt1, mask0, mask1;
+ __m128i filt, out0, out1, out2, out3, out4, out5, out6, out7;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0);
+ src -= 1;
+
+ filt = __lsx_vld(filter, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src2, src4, src6);
+ src += 8;
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src1, src3, src5, src7);
+ src += src_stride_x4 - 8;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src6, 128, src7, 128, src4,
+ src5, src6, src7);
+ HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0,
+ filt1, out0, out1, out2, out3);
+ HORIZ_4TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, filt0,
+ filt1, out4, out5, out6, out7);
+ DUP4_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2,
+ VP8_FILTER_SHIFT, out5, out4, VP8_FILTER_SHIFT, out7, out6,
+ VP8_FILTER_SHIFT, out0, out1, out2, out3);
+ DUP4_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out2, 128, out3, 128, out0,
+ out1, out2, out3);
+ __lsx_vstx(out0, dst, 0);
+ __lsx_vstx(out1, dst, dst_stride);
+ __lsx_vstx(out2, dst, dst_stride_x2);
+ __lsx_vstx(out3, dst, dst_stride_x3);
+ dst += dst_stride_x4;
+ }
+}
+
+static void common_vt_4t_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ __m128i src0, src1, src2, src3, src4, src5;
+ __m128i src10_r, src32_r, src54_r, src21_r, src43_r, src65_r;
+ __m128i src2110, src4332, filt0, filt1, out0, out1;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ DUP2_ARG2(__lsx_vldx, src, -src_stride, src, src_stride, src0, src2);
+ src1 = __lsx_vld(src, 0);
+ src += src_stride_x2;
+
+ DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
+
+ src2110 = __lsx_vilvl_d(src21_r, src10_r);
+ src2110 = __lsx_vxori_b(src2110, 128);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ src3 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src4, src5);
+ src += src_stride_x3;
+ DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src32_r, src43_r);
+ src4332 = __lsx_vilvl_d(src43_r, src32_r);
+ src4332 = __lsx_vxori_b(src4332, 128);
+ out0 = filt_4tap_dpadd_h(src2110, src4332, filt0, filt1);
+
+ src2 = __lsx_vld(src, 0);
+ src += src_stride;
+ DUP2_ARG2(__lsx_vilvl_b, src5, src4, src2, src5, src54_r, src65_r);
+ src2110 = __lsx_vilvl_d(src65_r, src54_r);
+ src2110 = __lsx_vxori_b(src2110, 128);
+ out1 = filt_4tap_dpadd_h(src4332, src2110, filt0, filt1);
+ out0 = __lsx_vssrarni_b_h(out1, out0, VP8_FILTER_SHIFT);
+ out0 = __lsx_vxori_b(out0, 128);
+
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+ }
+}
+
+static void common_vt_4t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src7, src8, src9, src10;
+ __m128i src10_r, src72_r, src98_r, src21_r, src87_r, src109_r, filt0, filt1;
+ __m128i tmp0, tmp1;
+ __m128i filt, out0_r, out1_r, out2_r, out3_r;
+
+ src -= src_stride;
+ filt = __lsx_vld(filter, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1);
+
+ DUP2_ARG2(__lsx_vldx, src, 0, src, src_stride, src0, src1);
+ src2 = __lsx_vldx(src, src_stride_x2);
+ src += src_stride_x3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src7, src8, src9, src10);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128, src7,
+ src8, src9, src10);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src2, src8, src7, src9, src8, src10, src9,
+ src72_r, src87_r, src98_r, src109_r);
+ out0_r = filt_4tap_dpadd_h(src10_r, src72_r, filt0, filt1);
+ out1_r = filt_4tap_dpadd_h(src21_r, src87_r, filt0, filt1);
+ out2_r = filt_4tap_dpadd_h(src72_r, src98_r, filt0, filt1);
+ out3_r = filt_4tap_dpadd_h(src87_r, src109_r, filt0, filt1);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1_r, out0_r, VP8_FILTER_SHIFT, out3_r,
+ out2_r, VP8_FILTER_SHIFT, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+
+ src10_r = src98_r;
+ src21_r = src109_r;
+ src2 = src10;
+ }
+}
+
+static void common_vt_4t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6;
+ __m128i src10_r, src32_r, src54_r, src21_r, src43_r, src65_r, src10_l;
+ __m128i src32_l, src54_l, src21_l, src43_l, src65_l, filt0, filt1;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
+
+ src -= src_stride;
+ filt = __lsx_vld(filter, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1);
+
+ DUP2_ARG2(__lsx_vldx, src, 0, src, src_stride, src0, src1);
+ src2 = __lsx_vldx(src, src_stride_x2);
+ src += src_stride_x3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
+ DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_l, src21_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src3, src4, src5, src6);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128, src3,
+ src4, src5, src6);
+ DUP4_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src5, src4, src6, src5,
+ src32_r, src43_r, src54_r, src65_r);
+ DUP4_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src5, src4, src6, src5,
+ src32_l, src43_l, src54_l, src65_l);
+ out0_r = filt_4tap_dpadd_h(src10_r, src32_r, filt0, filt1);
+ out1_r = filt_4tap_dpadd_h(src21_r, src43_r, filt0, filt1);
+ out2_r = filt_4tap_dpadd_h(src32_r, src54_r, filt0, filt1);
+ out3_r = filt_4tap_dpadd_h(src43_r, src65_r, filt0, filt1);
+ out0_l = filt_4tap_dpadd_h(src10_l, src32_l, filt0, filt1);
+ out1_l = filt_4tap_dpadd_h(src21_l, src43_l, filt0, filt1);
+ out2_l = filt_4tap_dpadd_h(src32_l, src54_l, filt0, filt1);
+ out3_l = filt_4tap_dpadd_h(src43_l, src65_l, filt0, filt1);
+ DUP4_ARG3(__lsx_vssrarni_b_h, out0_l, out0_r, VP8_FILTER_SHIFT, out1_l,
+ out1_r, VP8_FILTER_SHIFT, out2_l, out2_r, VP8_FILTER_SHIFT,
+ out3_l, out3_r, VP8_FILTER_SHIFT, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp2, 128, tmp3, 128, tmp0,
+ tmp1, tmp2, tmp3);
+ __lsx_vstx(tmp0, dst, 0);
+ __lsx_vstx(tmp1, dst, dst_stride);
+ __lsx_vstx(tmp2, dst, dst_stride_x2);
+ __lsx_vstx(tmp3, dst, dst_stride_x3);
+ dst += dst_stride_x4;
+
+ src10_r = src54_r;
+ src21_r = src65_r;
+ src10_l = src54_l;
+ src21_l = src65_l;
+ src2 = src6;
+ }
+}
+
+static void common_hv_4ht_4vt_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ __m128i src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1;
+ __m128i mask0, mask1, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5;
+ __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16);
+ src -= 1;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0,
+ filt_hz1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ src1 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, -src_stride, src, src_stride, src0, src2);
+ src += src_stride_x2;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ hz_out0 = horiz_4tap_filt(src0, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = horiz_4tap_filt(src1, src2, mask0, mask1, filt_hz0, filt_hz1);
+ vec0 = __lsx_vpackev_b(hz_out1, hz_out0);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0,
+ filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ src3 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src4, src5);
+ src6 = __lsx_vldx(src, src_stride_x3);
+ src += src_stride_x4;
+
+ DUP2_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src3, src4);
+ hz_out3 = horiz_4tap_filt(src3, src4, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = __lsx_vshuf_b(hz_out3, hz_out1, shuff);
+ vec1 = __lsx_vpackev_b(hz_out3, hz_out2);
+ tmp0 = filt_4tap_dpadd_h(vec0, vec1, filt_vt0, filt_vt1);
+
+ DUP2_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src5, src6);
+ hz_out5 = horiz_4tap_filt(src5, src6, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = __lsx_vshuf_b(hz_out5, hz_out3, shuff);
+ vec2 = __lsx_vpackev_b(hz_out5, hz_out4);
+ tmp1 = filt_4tap_dpadd_h(vec1, vec2, filt_vt0, filt_vt1);
+
+ tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7);
+ tmp0 = __lsx_vxori_b(tmp0, 128);
+ __lsx_vstelm_w(tmp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 3);
+ dst += dst_stride;
+
+ hz_out1 = hz_out5;
+ vec0 = vec2;
+ }
+}
+
+static inline void common_hv_4ht_4vt_8w_lsx(
+ uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst,
+ int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1;
+ __m128i mask0, mask1, out0, out1;
+ __m128i filt, filt_vt0, filt_vt1, tmp0, tmp1, tmp2, tmp3;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3;
+ __m128i vec0, vec1, vec2, vec3, vec4;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0);
+ src -= 1 + src_stride;
+
+ filt = __lsx_vld(filter_horiz, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_hz0, filt_hz1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ DUP2_ARG2(__lsx_vldx, src, 0, src, src_stride, src0, src1);
+ src2 = __lsx_vldx(src, src_stride_x2);
+ src += src_stride_x3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ hz_out0 = horiz_4tap_filt(src0, src0, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = horiz_4tap_filt(src1, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = horiz_4tap_filt(src2, src2, mask0, mask1, filt_hz0, filt_hz1);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out2, hz_out1, vec0, vec2);
+
+ filt = __lsx_vld(filter_vert, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src3, src4, src5, src6);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128, src3,
+ src4, src5, src6);
+ hz_out3 = horiz_4tap_filt(src3, src3, mask0, mask1, filt_hz0, filt_hz1);
+ vec1 = __lsx_vpackev_b(hz_out3, hz_out2);
+ tmp0 = filt_4tap_dpadd_h(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out0 = horiz_4tap_filt(src4, src4, mask0, mask1, filt_hz0, filt_hz1);
+ vec3 = __lsx_vpackev_b(hz_out0, hz_out3);
+ tmp1 = filt_4tap_dpadd_h(vec2, vec3, filt_vt0, filt_vt1);
+
+ hz_out1 = horiz_4tap_filt(src5, src5, mask0, mask1, filt_hz0, filt_hz1);
+ vec4 = __lsx_vpackev_b(hz_out1, hz_out0);
+ tmp2 = filt_4tap_dpadd_h(vec1, vec4, filt_vt0, filt_vt1);
+
+ hz_out2 = horiz_4tap_filt(src6, src6, mask0, mask1, filt_hz0, filt_hz1);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out0, hz_out3, hz_out2, hz_out1, vec0, vec1);
+ tmp3 = filt_4tap_dpadd_h(vec0, vec1, filt_vt0, filt_vt1);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ __lsx_vstelm_d(out0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(out1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(out1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+
+ vec0 = vec4;
+ vec2 = vec1;
+ }
+}
+
+static void common_hv_4ht_4vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ common_hv_4ht_4vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ common_hv_4ht_4vt_8w_lsx(src + 8, src_stride, dst + 8, dst_stride,
+ filter_horiz, filter_vert, height);
+}
+
+static void common_hv_6ht_4vt_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ __m128i src0, src1, src2, src3, src4, src5, src6;
+ __m128i filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2;
+ __m128i filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5;
+ __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16);
+ src -= 2;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0,
+ filt_hz1);
+ filt_hz2 = __lsx_vldrepl_h(filter_horiz, 4);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ src1 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, -src_stride, src, src_stride, src0, src2);
+ src += src_stride_x2;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+
+ hz_out0 = horiz_6tap_filt(src0, src1, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out1 = horiz_6tap_filt(src1, src2, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ vec0 = __lsx_vpackev_b(hz_out1, hz_out0);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0,
+ filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ src3 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src4, src5);
+ src6 = __lsx_vldx(src, src_stride_x3);
+ src += src_stride_x4;
+ DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128, src3,
+ src4, src5, src6);
+
+ hz_out3 = horiz_6tap_filt(src3, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = __lsx_vshuf_b(hz_out3, hz_out1, shuff);
+ vec1 = __lsx_vpackev_b(hz_out3, hz_out2);
+ tmp0 = filt_4tap_dpadd_h(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out5 = horiz_6tap_filt(src5, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = __lsx_vshuf_b(hz_out5, hz_out3, shuff);
+ vec2 = __lsx_vpackev_b(hz_out5, hz_out4);
+ tmp1 = filt_4tap_dpadd_h(vec1, vec2, filt_vt0, filt_vt1);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp0, tmp0, 7, tmp1, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vstelm_w(tmp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp1, dst, 0, 1);
+ dst += dst_stride;
+
+ hz_out1 = hz_out5;
+ vec0 = vec2;
+ }
+}
+
+static inline void common_hv_6ht_4vt_8w_lsx(
+ uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst,
+ int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+
+ __m128i src0, src1, src2, src3, src4, src5, src6;
+ __m128i filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2;
+ __m128i filt, filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3;
+ __m128i tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3;
+ __m128i out0, out1;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0);
+ src -= (2 + src_stride);
+
+ filt = __lsx_vld(filter_horiz, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_hz0, filt_hz1);
+ filt_hz2 = __lsx_vreplvei_h(filt, 2);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP2_ARG2(__lsx_vldx, src, 0, src, src_stride, src0, src1);
+ src2 = __lsx_vldx(src, src_stride_x2);
+ src += src_stride_x3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ hz_out0 = horiz_6tap_filt(src0, src0, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out1 = horiz_6tap_filt(src1, src1, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out2 = horiz_6tap_filt(src2, src2, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out2, hz_out1, vec0, vec2);
+
+ filt = __lsx_vld(filter_vert, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src3, src4, src5, src6);
+ src += src_stride_x4;
+ DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128, src3,
+ src4, src5, src6);
+
+ hz_out3 = horiz_6tap_filt(src3, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec1 = __lsx_vpackev_b(hz_out3, hz_out2);
+ tmp0 = filt_4tap_dpadd_h(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out0 = horiz_6tap_filt(src4, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec3 = __lsx_vpackev_b(hz_out0, hz_out3);
+ tmp1 = filt_4tap_dpadd_h(vec2, vec3, filt_vt0, filt_vt1);
+
+ hz_out1 = horiz_6tap_filt(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec0 = __lsx_vpackev_b(hz_out1, hz_out0);
+ tmp2 = filt_4tap_dpadd_h(vec1, vec0, filt_vt0, filt_vt1);
+
+ hz_out2 = horiz_6tap_filt(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out0, hz_out3, hz_out2, hz_out1, vec1, vec2);
+ tmp3 = filt_4tap_dpadd_h(vec1, vec2, filt_vt0, filt_vt1);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ __lsx_vstelm_d(out0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(out1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(out1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+ }
+}
+
+static void common_hv_6ht_4vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ common_hv_6ht_4vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ common_hv_6ht_4vt_8w_lsx(src + 8, src_stride, dst + 8, dst_stride,
+ filter_horiz, filter_vert, height);
+}
+
+static void common_hv_4ht_6vt_4w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i filt_hz0, filt_hz1, filt_vt0, filt_vt1, filt_vt2, mask0, mask1;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ __m128i hz_out7, tmp0, tmp1, out0, out1, out2, out3;
+ __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 16);
+
+ src -= 1;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0,
+ filt_hz1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ DUP4_ARG2(__lsx_vldx, src, -src_stride_x2, src, -src_stride, src, src_stride,
+ src, src_stride_x2, src0, src1, src3, src4);
+ src2 = __lsx_vld(src, 0);
+ src += src_stride_x3;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+ hz_out0 = horiz_4tap_filt(src0, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = horiz_4tap_filt(src2, src3, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out3 = horiz_4tap_filt(src3, src4, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = __lsx_vshuf_b(hz_out2, hz_out0, shuff);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, out0, out1);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0,
+ filt_vt1);
+ filt_vt2 = __lsx_vldrepl_h(filter_vert, 4);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ src5 = __lsx_vld(src, 0);
+ DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_x2, src6, src7);
+ src8 = __lsx_vldx(src, src_stride_x3);
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128, src5,
+ src6, src7, src8);
+ src += src_stride_x4;
+
+ hz_out5 = horiz_4tap_filt(src5, src6, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = __lsx_vshuf_b(hz_out5, hz_out3, shuff);
+ out2 = __lsx_vpackev_b(hz_out5, hz_out4);
+ tmp0 = dpadd_h3(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = horiz_4tap_filt(src7, src8, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out6 = __lsx_vshuf_b(hz_out7, hz_out5, shuff);
+ out3 = __lsx_vpackev_b(hz_out7, hz_out6);
+ tmp1 = dpadd_h3(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2);
+
+ tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7);
+ tmp0 = __lsx_vxori_b(tmp0, 128);
+ __lsx_vstelm_w(tmp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(tmp0, dst, 0, 3);
+ dst += dst_stride;
+
+ hz_out3 = hz_out7;
+ out0 = out2;
+ out1 = out3;
+ }
+}
+
+static inline void common_hv_4ht_6vt_8w_lsx(
+ uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst,
+ int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ int32_t src_stride_x2 = src_stride << 1;
+ int32_t src_stride_x3 = src_stride_x2 + src_stride;
+ int32_t src_stride_x4 = src_stride << 2;
+ int32_t dst_stride_x2 = dst_stride << 1;
+ int32_t dst_stride_x3 = dst_stride_x2 + dst_stride;
+ int32_t dst_stride_x4 = dst_stride << 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i filt_hz0, filt_hz1, mask0, mask1;
+ __m128i filt, filt_vt0, filt_vt1, filt_vt2, tmp0, tmp1, tmp2, tmp3;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ __m128i hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7;
+ __m128i vec0, vec1;
+
+ mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0);
+ src -= 1 + src_stride_x2;
+
+ filt = __lsx_vld(filter_horiz, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_hz0, filt_hz1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src0, src1, src2, src3);
+ src += src_stride_x4;
+ src4 = __lsx_vld(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+ hz_out0 = horiz_4tap_filt(src0, src0, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = horiz_4tap_filt(src1, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = horiz_4tap_filt(src2, src2, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out3 = horiz_4tap_filt(src3, src3, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = horiz_4tap_filt(src4, src4, mask0, mask1, filt_hz0, filt_hz1);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, out0, out1);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out2, hz_out1, hz_out4, hz_out3, out3, out4);
+
+ filt = __lsx_vld(filter_vert, 0);
+ DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_vt0, filt_vt1);
+ filt_vt2 = __lsx_vreplvei_h(filt, 2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src,
+ src_stride_x3, src5, src6, src7, src8);
+ src += src_stride_x4;
+
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128, src5,
+ src6, src7, src8);
+ hz_out5 = horiz_4tap_filt(src5, src5, mask0, mask1, filt_hz0, filt_hz1);
+ out2 = __lsx_vpackev_b(hz_out5, hz_out4);
+ tmp0 = dpadd_h3(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out6 = horiz_4tap_filt(src6, src6, mask0, mask1, filt_hz0, filt_hz1);
+ out5 = __lsx_vpackev_b(hz_out6, hz_out5);
+ tmp1 = dpadd_h3(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = horiz_4tap_filt(src7, src7, mask0, mask1, filt_hz0, filt_hz1);
+ out6 = __lsx_vpackev_b(hz_out7, hz_out6);
+ tmp2 = dpadd_h3(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out8 = horiz_4tap_filt(src8, src8, mask0, mask1, filt_hz0, filt_hz1);
+ out7 = __lsx_vpackev_b(hz_out8, hz_out7);
+ tmp3 = dpadd_h3(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, vec0, vec1);
+ DUP2_ARG2(__lsx_vxori_b, vec0, 128, vec1, 128, vec0, vec1);
+ __lsx_vstelm_d(vec0, dst, 0, 0);
+ __lsx_vstelm_d(vec0, dst + dst_stride, 0, 1);
+ __lsx_vstelm_d(vec1, dst + dst_stride_x2, 0, 0);
+ __lsx_vstelm_d(vec1, dst + dst_stride_x3, 0, 1);
+ dst += dst_stride_x4;
+ hz_out4 = hz_out8;
+ out0 = out2;
+ out1 = out6;
+ out3 = out5;
+ out4 = out7;
+ }
+}
+
+static void common_hv_4ht_6vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ common_hv_4ht_6vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ common_hv_4ht_6vt_8w_lsx(src + 8, src_stride, dst + 8, dst_stride,
+ filter_horiz, filter_vert, height);
+}
+
+typedef void (*PVp8SixtapPredictFunc1)(
+ uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst,
+ int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert,
+ int32_t height);
+
+typedef void (*PVp8SixtapPredictFunc2)(uint8_t *RESTRICT src,
+ int32_t src_stride,
+ uint8_t *RESTRICT dst,
+ int32_t dst_stride, const int8_t *filter,
+ int32_t height);
+
+void vp8_sixtap_predict4x4_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_subpel_filters_lsx[xoffset - 1];
+ const int8_t *v_filter = vp8_subpel_filters_lsx[yoffset - 1];
+
+ static PVp8SixtapPredictFunc1 Predict4x4Funcs1[4] = {
+ common_hv_6ht_6vt_4w_lsx,
+ common_hv_6ht_4vt_4w_lsx,
+ common_hv_4ht_6vt_4w_lsx,
+ common_hv_4ht_4vt_4w_lsx,
+ };
+
+ static PVp8SixtapPredictFunc2 Predict4x4Funcs2[4] = { common_vt_6t_4w_lsx,
+ common_vt_4t_4w_lsx,
+ common_hz_6t_4w_lsx,
+ common_hz_4t_4w_lsx };
+ if (yoffset < 8 && xoffset < 8) {
+ if (yoffset) {
+ if (xoffset) {
+ switch (xoffset & 1) {
+ case 0:
+ switch (yoffset & 1) {
+ case 0:
+ Predict4x4Funcs1[0](src, src_stride, dst, dst_stride, h_filter,
+ v_filter, 4);
+ break;
+ case 1:
+ Predict4x4Funcs1[1](src, src_stride, dst, dst_stride, h_filter,
+ v_filter + 1, 4);
+ break;
+ }
+ break;
+
+ case 1:
+ switch (yoffset & 1) {
+ case 0:
+ Predict4x4Funcs1[2](src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter, 4);
+ break;
+
+ case 1:
+ Predict4x4Funcs1[3](src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter + 1, 4);
+ break;
+ }
+ break;
+ }
+ } else {
+ switch (yoffset & 1) {
+ case 0:
+ Predict4x4Funcs2[0](src, src_stride, dst, dst_stride, v_filter, 4);
+ break;
+
+ case 1:
+ Predict4x4Funcs2[1](src, src_stride, dst, dst_stride, v_filter + 1,
+ 4);
+ break;
+ }
+ }
+ } else {
+ switch (xoffset) {
+ case 0: {
+ __m128i tp0;
+ tp0 = __lsx_vinsgr2vr_w(tp0, src, 0);
+ src += src_stride;
+ tp0 = __lsx_vinsgr2vr_w(tp0, src, 0);
+ src += src_stride;
+ tp0 = __lsx_vinsgr2vr_w(tp0, src, 0);
+ src += src_stride;
+ tp0 = __lsx_vinsgr2vr_w(tp0, src, 0);
+
+ __lsx_vstelm_w(tp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(tp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(tp0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(tp0, dst, 0, 3);
+ break;
+ }
+ case 2:
+ case 4:
+ case 6:
+ Predict4x4Funcs2[2](src, src_stride, dst, dst_stride, h_filter, 4);
+ break;
+ }
+ switch (xoffset & 1) {
+ case 1:
+ Predict4x4Funcs2[3](src, src_stride, dst, dst_stride, h_filter + 1,
+ 4);
+ break;
+ }
+ }
+ }
+}
+
+void vp8_sixtap_predict8x8_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_subpel_filters_lsx[xoffset - 1];
+ const int8_t *v_filter = vp8_subpel_filters_lsx[yoffset - 1];
+
+ static PVp8SixtapPredictFunc1 Predict8x8Funcs1[4] = {
+ common_hv_6ht_6vt_8w_lsx,
+ common_hv_6ht_4vt_8w_lsx,
+ common_hv_4ht_6vt_8w_lsx,
+ common_hv_4ht_4vt_8w_lsx,
+ };
+
+ static PVp8SixtapPredictFunc2 Predict8x8Funcs2[4] = { common_vt_6t_8w_lsx,
+ common_vt_4t_8w_lsx,
+ common_hz_6t_8w_lsx,
+ common_hz_4t_8w_lsx };
+
+ if (yoffset < 8 && xoffset < 8) {
+ if (yoffset) {
+ if (xoffset) {
+ switch (xoffset & 1) {
+ case 0:
+ switch (yoffset & 1) {
+ case 0:
+ Predict8x8Funcs1[0](src, src_stride, dst, dst_stride, h_filter,
+ v_filter, 8);
+ break;
+
+ case 1:
+ Predict8x8Funcs1[1](src, src_stride, dst, dst_stride, h_filter,
+ v_filter + 1, 8);
+ break;
+ }
+ break;
+
+ case 1:
+ switch (yoffset & 1) {
+ case 0:
+ Predict8x8Funcs1[2](src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter, 8);
+ break;
+
+ case 1:
+ Predict8x8Funcs1[3](src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter + 1, 8);
+ break;
+ }
+ break;
+ }
+ } else {
+ switch (yoffset & 1) {
+ case 0:
+ Predict8x8Funcs2[0](src, src_stride, dst, dst_stride, v_filter, 8);
+ break;
+
+ case 1:
+ Predict8x8Funcs2[1](src, src_stride, dst, dst_stride, v_filter + 1,
+ 8);
+ break;
+ }
+ }
+ } else {
+ switch (xoffset & 1) {
+ case 1:
+ Predict8x8Funcs2[3](src, src_stride, dst, dst_stride, h_filter + 1,
+ 8);
+ break;
+ }
+ switch (xoffset) {
+ case 0: vp8_copy_mem8x8(src, src_stride, dst, dst_stride); break;
+ case 2:
+ case 4:
+ case 6:
+ Predict8x8Funcs2[2](src, src_stride, dst, dst_stride, h_filter, 8);
+ break;
+ }
+ }
+ }
+}
+
+void vp8_sixtap_predict16x16_lsx(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_subpel_filters_lsx[xoffset - 1];
+ const int8_t *v_filter = vp8_subpel_filters_lsx[yoffset - 1];
+
+ static PVp8SixtapPredictFunc1 Predict16x16Funcs1[4] = {
+ common_hv_6ht_6vt_16w_lsx,
+ common_hv_6ht_4vt_16w_lsx,
+ common_hv_4ht_6vt_16w_lsx,
+ common_hv_4ht_4vt_16w_lsx,
+ };
+
+ static PVp8SixtapPredictFunc2 Predict16x16Funcs2[4] = {
+ common_vt_6t_16w_lsx, common_vt_4t_16w_lsx, common_hz_6t_16w_lsx,
+ common_hz_4t_16w_lsx
+ };
+
+ if (yoffset < 8 && xoffset < 8) {
+ if (yoffset) {
+ if (xoffset) {
+ switch (xoffset & 1) {
+ case 0:
+ switch (yoffset & 1) {
+ case 0:
+ Predict16x16Funcs1[0](src, src_stride, dst, dst_stride,
+ h_filter, v_filter, 16);
+ break;
+
+ case 1:
+ Predict16x16Funcs1[1](src, src_stride, dst, dst_stride,
+ h_filter, v_filter + 1, 16);
+ break;
+ }
+ break;
+
+ case 1:
+ switch (yoffset & 1) {
+ case 0:
+ Predict16x16Funcs1[2](src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter, 16);
+ break;
+
+ case 1:
+ Predict16x16Funcs1[3](src, src_stride, dst, dst_stride,
+ h_filter, v_filter + 1, 16);
+ break;
+ }
+ break;
+ }
+ } else {
+ switch (yoffset & 1) {
+ case 0:
+ Predict16x16Funcs2[0](src, src_stride, dst, dst_stride, v_filter,
+ 16);
+ break;
+
+ case 1:
+ Predict16x16Funcs2[1](src, src_stride, dst, dst_stride,
+ v_filter + 1, 16);
+ break;
+ }
+ }
+ } else {
+ switch (xoffset & 1) {
+ case 1:
+ Predict16x16Funcs2[3](src, src_stride, dst, dst_stride, h_filter + 1,
+ 16);
+ break;
+ }
+ switch (xoffset) {
+ case 0: vp8_copy_mem16x16(src, src_stride, dst, dst_stride); break;
+ case 2:
+ case 4:
+ case 6:
+ Predict16x16Funcs2[2](src, src_stride, dst, dst_stride, h_filter, 16);
+ break;
+ }
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/loopfilter.h b/media/libvpx/libvpx/vp8/common/loopfilter.h
new file mode 100644
index 0000000000..909e8df512
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/loopfilter.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_LOOPFILTER_H_
+#define VPX_VP8_COMMON_LOOPFILTER_H_
+
+#include "vpx_ports/mem.h"
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_LOOP_FILTER 63
+/* fraction of total macroblock rows to be used in fast filter level picking */
+/* has to be > 2 */
+#define PARTIAL_FRAME_FRACTION 8
+
+typedef enum { NORMAL_LOOPFILTER = 0, SIMPLE_LOOPFILTER = 1 } LOOPFILTERTYPE;
+
+#if VPX_ARCH_ARM
+#define SIMD_WIDTH 1
+#else
+#define SIMD_WIDTH 16
+#endif
+
+/* Need to align this structure so when it is declared and
+ * passed it can be loaded into vector registers.
+ */
+typedef struct {
+ DECLARE_ALIGNED(SIMD_WIDTH, unsigned char,
+ mblim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, unsigned char,
+ blim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, unsigned char,
+ lim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, hev_thr[4][SIMD_WIDTH]);
+ unsigned char lvl[4][4][4];
+ unsigned char hev_thr_lut[2][MAX_LOOP_FILTER + 1];
+ unsigned char mode_lf_lut[10];
+} loop_filter_info_n;
+
+typedef struct loop_filter_info {
+ const unsigned char *mblim;
+ const unsigned char *blim;
+ const unsigned char *lim;
+ const unsigned char *hev_thr;
+} loop_filter_info;
+
+typedef void loop_filter_uvfunction(unsigned char *u, /* source pointer */
+ int p, /* pitch */
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ unsigned char *v);
+
+/* assorted loopfilter functions which get used elsewhere */
+struct VP8Common;
+struct macroblockd;
+struct modeinfo;
+
+void vp8_loop_filter_init(struct VP8Common *cm);
+
+void vp8_loop_filter_frame_init(struct VP8Common *cm, struct macroblockd *mbd,
+ int default_filt_lvl);
+
+void vp8_loop_filter_frame(struct VP8Common *cm, struct macroblockd *mbd,
+ int frame_type);
+
+void vp8_loop_filter_partial_frame(struct VP8Common *cm,
+ struct macroblockd *mbd,
+ int default_filt_lvl);
+
+void vp8_loop_filter_frame_yonly(struct VP8Common *cm, struct macroblockd *mbd,
+ int default_filt_lvl);
+
+void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
+ int sharpness_lvl);
+
+void vp8_loop_filter_row_normal(struct VP8Common *cm,
+ struct modeinfo *mode_info_context, int mb_row,
+ int post_ystride, int post_uvstride,
+ unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr);
+
+void vp8_loop_filter_row_simple(struct VP8Common *cm,
+ struct modeinfo *mode_info_context, int mb_row,
+ int post_ystride, unsigned char *y_ptr);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_LOOPFILTER_H_
diff --git a/media/libvpx/libvpx/vp8/common/loopfilter_filters.c b/media/libvpx/libvpx/vp8/common/loopfilter_filters.c
new file mode 100644
index 0000000000..61a55d3c92
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/loopfilter_filters.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include "loopfilter.h"
+#include "onyxc_int.h"
+
+typedef unsigned char uc;
+
+static signed char vp8_signed_char_clamp(int t) {
+ t = (t < -128 ? -128 : t);
+ t = (t > 127 ? 127 : t);
+ return (signed char)t;
+}
+
+/* should we apply any filter at all ( 11111111 yes, 00000000 no) */
+static signed char vp8_filter_mask(uc limit, uc blimit, uc p3, uc p2, uc p1,
+ uc p0, uc q0, uc q1, uc q2, uc q3) {
+ signed char mask = 0;
+ mask |= (abs(p3 - p2) > limit);
+ mask |= (abs(p2 - p1) > limit);
+ mask |= (abs(p1 - p0) > limit);
+ mask |= (abs(q1 - q0) > limit);
+ mask |= (abs(q2 - q1) > limit);
+ mask |= (abs(q3 - q2) > limit);
+ mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit);
+ return mask - 1;
+}
+
+/* is there high variance internal edge ( 11111111 yes, 00000000 no) */
+static signed char vp8_hevmask(uc thresh, uc p1, uc p0, uc q0, uc q1) {
+ signed char hev = 0;
+ hev |= (abs(p1 - p0) > thresh) * -1;
+ hev |= (abs(q1 - q0) > thresh) * -1;
+ return hev;
+}
+
+static void vp8_filter(signed char mask, uc hev, uc *op1, uc *op0, uc *oq0,
+ uc *oq1) {
+ signed char ps0, qs0;
+ signed char ps1, qs1;
+ signed char filter_value, Filter1, Filter2;
+ signed char u;
+
+ ps1 = (signed char)*op1 ^ 0x80;
+ ps0 = (signed char)*op0 ^ 0x80;
+ qs0 = (signed char)*oq0 ^ 0x80;
+ qs1 = (signed char)*oq1 ^ 0x80;
+
+ /* add outer taps if we have high edge variance */
+ filter_value = vp8_signed_char_clamp(ps1 - qs1);
+ filter_value &= hev;
+
+ /* inner taps */
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0));
+ filter_value &= mask;
+
+ /* save bottom 3 bits so that we round one side +4 and the other +3
+ * if it equals 4 we'll set it to adjust by -1 to account for the fact
+ * we'd round it by 3 the other way
+ */
+ Filter1 = vp8_signed_char_clamp(filter_value + 4);
+ Filter2 = vp8_signed_char_clamp(filter_value + 3);
+ Filter1 >>= 3;
+ Filter2 >>= 3;
+ u = vp8_signed_char_clamp(qs0 - Filter1);
+ *oq0 = u ^ 0x80;
+ u = vp8_signed_char_clamp(ps0 + Filter2);
+ *op0 = u ^ 0x80;
+ filter_value = Filter1;
+
+ /* outer tap adjustments */
+ filter_value += 1;
+ filter_value >>= 1;
+ filter_value &= ~hev;
+
+ u = vp8_signed_char_clamp(qs1 - filter_value);
+ *oq1 = u ^ 0x80;
+ u = vp8_signed_char_clamp(ps1 + filter_value);
+ *op1 = u ^ 0x80;
+}
+
+static void loop_filter_horizontal_edge_c(unsigned char *s, int p, /* pitch */
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ int count) {
+ int hev = 0; /* high edge variance */
+ signed char mask = 0;
+ int i = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+ do {
+ mask = vp8_filter_mask(limit[0], blimit[0], s[-4 * p], s[-3 * p], s[-2 * p],
+ s[-1 * p], s[0 * p], s[1 * p], s[2 * p], s[3 * p]);
+
+ hev = vp8_hevmask(thresh[0], s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]);
+
+ vp8_filter(mask, hev, s - 2 * p, s - 1 * p, s, s + 1 * p);
+
+ ++s;
+ } while (++i < count * 8);
+}
+
+static void loop_filter_vertical_edge_c(unsigned char *s, int p,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ int count) {
+ int hev = 0; /* high edge variance */
+ signed char mask = 0;
+ int i = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+ do {
+ mask = vp8_filter_mask(limit[0], blimit[0], s[-4], s[-3], s[-2], s[-1],
+ s[0], s[1], s[2], s[3]);
+
+ hev = vp8_hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
+
+ vp8_filter(mask, hev, s - 2, s - 1, s, s + 1);
+
+ s += p;
+ } while (++i < count * 8);
+}
+
+static void vp8_mbfilter(signed char mask, uc hev, uc *op2, uc *op1, uc *op0,
+ uc *oq0, uc *oq1, uc *oq2) {
+ signed char s, u;
+ signed char filter_value, Filter1, Filter2;
+ signed char ps2 = (signed char)*op2 ^ 0x80;
+ signed char ps1 = (signed char)*op1 ^ 0x80;
+ signed char ps0 = (signed char)*op0 ^ 0x80;
+ signed char qs0 = (signed char)*oq0 ^ 0x80;
+ signed char qs1 = (signed char)*oq1 ^ 0x80;
+ signed char qs2 = (signed char)*oq2 ^ 0x80;
+
+ /* add outer taps if we have high edge variance */
+ filter_value = vp8_signed_char_clamp(ps1 - qs1);
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0));
+ filter_value &= mask;
+
+ Filter2 = filter_value;
+ Filter2 &= hev;
+
+ /* save bottom 3 bits so that we round one side +4 and the other +3 */
+ Filter1 = vp8_signed_char_clamp(Filter2 + 4);
+ Filter2 = vp8_signed_char_clamp(Filter2 + 3);
+ Filter1 >>= 3;
+ Filter2 >>= 3;
+ qs0 = vp8_signed_char_clamp(qs0 - Filter1);
+ ps0 = vp8_signed_char_clamp(ps0 + Filter2);
+
+ /* only apply wider filter if not high edge variance */
+ filter_value &= ~hev;
+ Filter2 = filter_value;
+
+ /* roughly 3/7th difference across boundary */
+ u = vp8_signed_char_clamp((63 + Filter2 * 27) >> 7);
+ s = vp8_signed_char_clamp(qs0 - u);
+ *oq0 = s ^ 0x80;
+ s = vp8_signed_char_clamp(ps0 + u);
+ *op0 = s ^ 0x80;
+
+ /* roughly 2/7th difference across boundary */
+ u = vp8_signed_char_clamp((63 + Filter2 * 18) >> 7);
+ s = vp8_signed_char_clamp(qs1 - u);
+ *oq1 = s ^ 0x80;
+ s = vp8_signed_char_clamp(ps1 + u);
+ *op1 = s ^ 0x80;
+
+ /* roughly 1/7th difference across boundary */
+ u = vp8_signed_char_clamp((63 + Filter2 * 9) >> 7);
+ s = vp8_signed_char_clamp(qs2 - u);
+ *oq2 = s ^ 0x80;
+ s = vp8_signed_char_clamp(ps2 + u);
+ *op2 = s ^ 0x80;
+}
+
+static void mbloop_filter_horizontal_edge_c(unsigned char *s, int p,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ int count) {
+ signed char hev = 0; /* high edge variance */
+ signed char mask = 0;
+ int i = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+ do {
+ mask = vp8_filter_mask(limit[0], blimit[0], s[-4 * p], s[-3 * p], s[-2 * p],
+ s[-1 * p], s[0 * p], s[1 * p], s[2 * p], s[3 * p]);
+
+ hev = vp8_hevmask(thresh[0], s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]);
+
+ vp8_mbfilter(mask, hev, s - 3 * p, s - 2 * p, s - 1 * p, s, s + 1 * p,
+ s + 2 * p);
+
+ ++s;
+ } while (++i < count * 8);
+}
+
+static void mbloop_filter_vertical_edge_c(unsigned char *s, int p,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ int count) {
+ signed char hev = 0; /* high edge variance */
+ signed char mask = 0;
+ int i = 0;
+
+ do {
+ mask = vp8_filter_mask(limit[0], blimit[0], s[-4], s[-3], s[-2], s[-1],
+ s[0], s[1], s[2], s[3]);
+
+ hev = vp8_hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
+
+ vp8_mbfilter(mask, hev, s - 3, s - 2, s - 1, s, s + 1, s + 2);
+
+ s += p;
+ } while (++i < count * 8);
+}
+
+/* should we apply any filter at all ( 11111111 yes, 00000000 no) */
+static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0,
+ uc q1) {
+ /* Why does this cause problems for win32?
+ * error C2143: syntax error : missing ';' before 'type'
+ * (void) limit;
+ */
+ signed char mask = (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 <= blimit) * -1;
+ return mask;
+}
+
+static void vp8_simple_filter(signed char mask, uc *op1, uc *op0, uc *oq0,
+ uc *oq1) {
+ signed char filter_value, Filter1, Filter2;
+ signed char p1 = (signed char)*op1 ^ 0x80;
+ signed char p0 = (signed char)*op0 ^ 0x80;
+ signed char q0 = (signed char)*oq0 ^ 0x80;
+ signed char q1 = (signed char)*oq1 ^ 0x80;
+ signed char u;
+
+ filter_value = vp8_signed_char_clamp(p1 - q1);
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0));
+ filter_value &= mask;
+
+ /* save bottom 3 bits so that we round one side +4 and the other +3 */
+ Filter1 = vp8_signed_char_clamp(filter_value + 4);
+ Filter1 >>= 3;
+ u = vp8_signed_char_clamp(q0 - Filter1);
+ *oq0 = u ^ 0x80;
+
+ Filter2 = vp8_signed_char_clamp(filter_value + 3);
+ Filter2 >>= 3;
+ u = vp8_signed_char_clamp(p0 + Filter2);
+ *op0 = u ^ 0x80;
+}
+
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char *y_ptr,
+ int y_stride,
+ const unsigned char *blimit) {
+ signed char mask = 0;
+ int i = 0;
+
+ do {
+ mask = vp8_simple_filter_mask(blimit[0], y_ptr[-2 * y_stride],
+ y_ptr[-1 * y_stride], y_ptr[0 * y_stride],
+ y_ptr[1 * y_stride]);
+ vp8_simple_filter(mask, y_ptr - 2 * y_stride, y_ptr - 1 * y_stride, y_ptr,
+ y_ptr + 1 * y_stride);
+ ++y_ptr;
+ } while (++i < 16);
+}
+
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ signed char mask = 0;
+ int i = 0;
+
+ do {
+ mask = vp8_simple_filter_mask(blimit[0], y_ptr[-2], y_ptr[-1], y_ptr[0],
+ y_ptr[1]);
+ vp8_simple_filter(mask, y_ptr - 2, y_ptr - 1, y_ptr, y_ptr + 1);
+ y_ptr += y_stride;
+ } while (++i < 16);
+}
+
+/* Horizontal MB filtering */
+void vp8_loop_filter_mbh_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 2);
+
+ if (u_ptr) {
+ mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+
+ if (v_ptr) {
+ mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+}
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 2);
+
+ if (u_ptr) {
+ mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+
+ if (v_ptr) {
+ mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+}
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, lfi->blim,
+ lfi->lim, lfi->hev_thr, 2);
+ loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, lfi->blim,
+ lfi->lim, lfi->hev_thr, 2);
+ loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim,
+ lfi->lim, lfi->hev_thr, 2);
+
+ if (u_ptr) {
+ loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride, lfi->blim,
+ lfi->lim, lfi->hev_thr, 1);
+ }
+
+ if (v_ptr) {
+ loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim,
+ lfi->lim, lfi->hev_thr, 1);
+ }
+}
+
+void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride,
+ blimit);
+ vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride,
+ blimit);
+ vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride,
+ blimit);
+}
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ loop_filter_vertical_edge_c(y_ptr + 4, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 2);
+ loop_filter_vertical_edge_c(y_ptr + 8, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 2);
+ loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 2);
+
+ if (u_ptr) {
+ loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+
+ if (v_ptr) {
+ loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+}
+
+void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit);
+ vp8_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit);
+ vp8_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit);
+}
diff --git a/media/libvpx/libvpx/vp8/common/mbpitch.c b/media/libvpx/libvpx/vp8/common/mbpitch.c
new file mode 100644
index 0000000000..188b57f389
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mbpitch.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "blockd.h"
+
+void vp8_setup_block_dptrs(MACROBLOCKD *x) {
+ int r, c;
+
+ for (r = 0; r < 4; ++r) {
+ for (c = 0; c < 4; ++c) {
+ x->block[r * 4 + c].predictor = x->predictor + r * 4 * 16 + c * 4;
+ }
+ }
+
+ for (r = 0; r < 2; ++r) {
+ for (c = 0; c < 2; ++c) {
+ x->block[16 + r * 2 + c].predictor =
+ x->predictor + 256 + r * 4 * 8 + c * 4;
+ }
+ }
+
+ for (r = 0; r < 2; ++r) {
+ for (c = 0; c < 2; ++c) {
+ x->block[20 + r * 2 + c].predictor =
+ x->predictor + 320 + r * 4 * 8 + c * 4;
+ }
+ }
+
+ for (r = 0; r < 25; ++r) {
+ x->block[r].qcoeff = x->qcoeff + r * 16;
+ x->block[r].dqcoeff = x->dqcoeff + r * 16;
+ x->block[r].eob = x->eobs + r;
+ }
+}
+
+void vp8_build_block_doffsets(MACROBLOCKD *x) {
+ int block;
+
+ for (block = 0; block < 16; ++block) /* y blocks */
+ {
+ x->block[block].offset =
+ (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4;
+ }
+
+ for (block = 16; block < 20; ++block) /* U and V blocks */
+ {
+ x->block[block + 4].offset = x->block[block].offset =
+ ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/mfqe.c b/media/libvpx/libvpx/vp8/common/mfqe.c
new file mode 100644
index 0000000000..1fe7363f17
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mfqe.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* MFQE: Multiframe Quality Enhancement
+ * In rate limited situations keyframes may cause significant visual artifacts
+ * commonly referred to as "popping." This file implements a postproccesing
+ * algorithm which blends data from the preceeding frame when there is no
+ * motion and the q from the previous frame is lower which indicates that it is
+ * higher quality.
+ */
+
+#include "./vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "vp8/common/common.h"
+#include "vp8/common/postproc.h"
+#include "vpx_dsp/variance.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/yv12config.h"
+
+#include <limits.h>
+#include <stdlib.h>
+
+static void filter_by_weight(unsigned char *src, int src_stride,
+ unsigned char *dst, int dst_stride, int block_size,
+ int src_weight) {
+ int dst_weight = (1 << MFQE_PRECISION) - src_weight;
+ int rounding_bit = 1 << (MFQE_PRECISION - 1);
+ int r, c;
+
+ for (r = 0; r < block_size; ++r) {
+ for (c = 0; c < block_size; ++c) {
+ dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >>
+ MFQE_PRECISION;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp8_filter_by_weight16x16_c(unsigned char *src, int src_stride,
+ unsigned char *dst, int dst_stride,
+ int src_weight) {
+ filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight);
+}
+
+void vp8_filter_by_weight8x8_c(unsigned char *src, int src_stride,
+ unsigned char *dst, int dst_stride,
+ int src_weight) {
+ filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight);
+}
+
+void vp8_filter_by_weight4x4_c(unsigned char *src, int src_stride,
+ unsigned char *dst, int dst_stride,
+ int src_weight) {
+ filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight);
+}
+
+static void apply_ifactor(unsigned char *y_src, int y_src_stride,
+ unsigned char *y_dst, int y_dst_stride,
+ unsigned char *u_src, unsigned char *v_src,
+ int uv_src_stride, unsigned char *u_dst,
+ unsigned char *v_dst, int uv_dst_stride,
+ int block_size, int src_weight) {
+ if (block_size == 16) {
+ vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride,
+ src_weight);
+ vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride,
+ src_weight);
+ vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride,
+ src_weight);
+ } else {
+ vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride,
+ src_weight);
+ vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride,
+ src_weight);
+ vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride,
+ src_weight);
+ }
+}
+
+static unsigned int int_sqrt(unsigned int x) {
+ unsigned int y = x;
+ unsigned int guess;
+ int p = 1;
+ while (y >>= 1) p++;
+ p >>= 1;
+
+ guess = 0;
+ while (p >= 0) {
+ guess |= (1 << p);
+ if (x < guess * guess) guess -= (1 << p);
+ p--;
+ }
+ /* choose between guess or guess+1 */
+ return guess + (guess * guess + guess + 1 <= x);
+}
+
+#define USE_SSD
+static void multiframe_quality_enhance_block(
+ int blksize, /* Currently only values supported are 16, 8 */
+ int qcurr, int qprev, unsigned char *y, unsigned char *u, unsigned char *v,
+ int y_stride, int uv_stride, unsigned char *yd, unsigned char *ud,
+ unsigned char *vd, int yd_stride, int uvd_stride) {
+ static const unsigned char VP8_ZEROS[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+ int uvblksize = blksize >> 1;
+ int qdiff = qcurr - qprev;
+
+ int i;
+ unsigned char *up;
+ unsigned char *udp;
+ unsigned char *vp;
+ unsigned char *vdp;
+
+ unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk;
+
+ if (blksize == 16) {
+ actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
+ act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
+#ifdef USE_SSD
+ vpx_variance16x16(y, y_stride, yd, yd_stride, &sse);
+ sad = (sse + 128) >> 8;
+ vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse);
+ usad = (sse + 32) >> 6;
+ vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse);
+ vsad = (sse + 32) >> 6;
+#else
+ sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8;
+ usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6;
+ vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride) + 32) >> 6;
+#endif
+ } else {
+ actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
+ act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
+#ifdef USE_SSD
+ vpx_variance8x8(y, y_stride, yd, yd_stride, &sse);
+ sad = (sse + 32) >> 6;
+ vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse);
+ usad = (sse + 8) >> 4;
+ vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse);
+ vsad = (sse + 8) >> 4;
+#else
+ sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6;
+ usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4;
+ vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4;
+#endif
+ }
+
+ actrisk = (actd > act * 5);
+
+ /* thr = qdiff/16 + log2(act) + log4(qprev) */
+ thr = (qdiff >> 4);
+ while (actd >>= 1) thr++;
+ while (qprev >>= 2) thr++;
+
+#ifdef USE_SSD
+ thrsq = thr * thr;
+ if (sad < thrsq &&
+ /* additional checks for color mismatch and excessive addition of
+ * high-frequencies */
+ 4 * usad < thrsq && 4 * vsad < thrsq && !actrisk)
+#else
+ if (sad < thr &&
+ /* additional checks for color mismatch and excessive addition of
+ * high-frequencies */
+ 2 * usad < thr && 2 * vsad < thr && !actrisk)
+#endif
+ {
+ int ifactor;
+#ifdef USE_SSD
+ /* TODO: optimize this later to not need sqr root */
+ sad = int_sqrt(sad);
+#endif
+ ifactor = (sad << MFQE_PRECISION) / thr;
+ ifactor >>= (qdiff >> 5);
+
+ if (ifactor) {
+ apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd,
+ uvd_stride, blksize, ifactor);
+ }
+ } else { /* else implicitly copy from previous frame */
+ if (blksize == 16) {
+ vp8_copy_mem16x16(y, y_stride, yd, yd_stride);
+ vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride);
+ vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride);
+ } else {
+ vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
+ for (up = u, udp = ud, i = 0; i < uvblksize;
+ ++i, up += uv_stride, udp += uvd_stride) {
+ memcpy(udp, up, uvblksize);
+ }
+ for (vp = v, vdp = vd, i = 0; i < uvblksize;
+ ++i, vp += uv_stride, vdp += uvd_stride) {
+ memcpy(vdp, vp, uvblksize);
+ }
+ }
+ }
+}
+
+static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) {
+ if (mode_info_context->mbmi.mb_skip_coeff) {
+ map[0] = map[1] = map[2] = map[3] = 1;
+ } else if (mode_info_context->mbmi.mode == SPLITMV) {
+ static int ndx[4][4] = {
+ { 0, 1, 4, 5 }, { 2, 3, 6, 7 }, { 8, 9, 12, 13 }, { 10, 11, 14, 15 }
+ };
+ int i, j;
+ vp8_zero(*map);
+ for (i = 0; i < 4; ++i) {
+ map[i] = 1;
+ for (j = 0; j < 4 && map[j]; ++j) {
+ map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 &&
+ mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2);
+ }
+ }
+ } else {
+ map[0] = map[1] = map[2] = map[3] =
+ (mode_info_context->mbmi.mode > B_PRED &&
+ abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 &&
+ abs(mode_info_context->mbmi.mv.as_mv.col) <= 2);
+ }
+ return (map[0] + map[1] + map[2] + map[3]);
+}
+
+void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
+ YV12_BUFFER_CONFIG *show = cm->frame_to_show;
+ YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer;
+
+ FRAME_TYPE frame_type = cm->frame_type;
+ /* Point at base of Mb MODE_INFO list has motion vectors etc */
+ const MODE_INFO *mode_info_context = cm->mi;
+ int mb_row;
+ int mb_col;
+ int totmap, map[4];
+ int qcurr = cm->base_qindex;
+ int qprev = cm->postproc_state.last_base_qindex;
+
+ unsigned char *y_ptr, *u_ptr, *v_ptr;
+ unsigned char *yd_ptr, *ud_ptr, *vd_ptr;
+
+ /* Set up the buffer pointers */
+ y_ptr = show->y_buffer;
+ u_ptr = show->u_buffer;
+ v_ptr = show->v_buffer;
+ yd_ptr = dest->y_buffer;
+ ud_ptr = dest->u_buffer;
+ vd_ptr = dest->v_buffer;
+
+ /* postprocess each macro block */
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ /* if motion is high there will likely be no benefit */
+ if (frame_type == INTER_FRAME) {
+ totmap = qualify_inter_mb(mode_info_context, map);
+ } else {
+ totmap = (frame_type == KEY_FRAME ? 4 : 0);
+ }
+ if (totmap) {
+ if (totmap < 4) {
+ int i, j;
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 2; ++j) {
+ if (map[i * 2 + j]) {
+ multiframe_quality_enhance_block(
+ 8, qcurr, qprev, y_ptr + 8 * (i * show->y_stride + j),
+ u_ptr + 4 * (i * show->uv_stride + j),
+ v_ptr + 4 * (i * show->uv_stride + j), show->y_stride,
+ show->uv_stride, yd_ptr + 8 * (i * dest->y_stride + j),
+ ud_ptr + 4 * (i * dest->uv_stride + j),
+ vd_ptr + 4 * (i * dest->uv_stride + j), dest->y_stride,
+ dest->uv_stride);
+ } else {
+ /* copy a 8x8 block */
+ int k;
+ unsigned char *up = u_ptr + 4 * (i * show->uv_stride + j);
+ unsigned char *udp = ud_ptr + 4 * (i * dest->uv_stride + j);
+ unsigned char *vp = v_ptr + 4 * (i * show->uv_stride + j);
+ unsigned char *vdp = vd_ptr + 4 * (i * dest->uv_stride + j);
+ vp8_copy_mem8x8(
+ y_ptr + 8 * (i * show->y_stride + j), show->y_stride,
+ yd_ptr + 8 * (i * dest->y_stride + j), dest->y_stride);
+ for (k = 0; k < 4; ++k, up += show->uv_stride,
+ udp += dest->uv_stride, vp += show->uv_stride,
+ vdp += dest->uv_stride) {
+ memcpy(udp, up, 4);
+ memcpy(vdp, vp, 4);
+ }
+ }
+ }
+ }
+ } else { /* totmap = 4 */
+ multiframe_quality_enhance_block(
+ 16, qcurr, qprev, y_ptr, u_ptr, v_ptr, show->y_stride,
+ show->uv_stride, yd_ptr, ud_ptr, vd_ptr, dest->y_stride,
+ dest->uv_stride);
+ }
+ } else {
+ vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride);
+ vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride);
+ vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride);
+ }
+ y_ptr += 16;
+ u_ptr += 8;
+ v_ptr += 8;
+ yd_ptr += 16;
+ ud_ptr += 8;
+ vd_ptr += 8;
+ mode_info_context++; /* step to next MB */
+ }
+
+ y_ptr += show->y_stride * 16 - 16 * cm->mb_cols;
+ u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
+ v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
+ yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols;
+ ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
+ vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
+
+ mode_info_context++; /* Skip border mb */
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c b/media/libvpx/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c
new file mode 100644
index 0000000000..1cfd146189
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
+
+#if HAVE_DSPR2
+void vp8_dequant_idct_add_dspr2(short *input, short *dq, unsigned char *dest,
+ int stride) {
+ int i;
+
+ for (i = 0; i < 16; ++i) {
+ input[i] = dq[i] * input[i];
+ }
+
+ vp8_short_idct4x4llm_dspr2(input, dest, stride, dest, stride);
+
+ memset(input, 0, 32);
+}
+
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/mips/dspr2/filter_dspr2.c b/media/libvpx/libvpx/vp8/common/mips/dspr2/filter_dspr2.c
new file mode 100644
index 0000000000..b9da52084d
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/dspr2/filter_dspr2.c
@@ -0,0 +1,2767 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include "vp8_rtcd.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+#define CROP_WIDTH 256
+unsigned char ff_cropTbl[256 + 2 * CROP_WIDTH];
+
+static const unsigned short sub_pel_filterss[8][3] = {
+ { 0, 0, 0 },
+ { 0, 0x0601, 0x7b0c },
+ { 0x0201, 0x0b08, 0x6c24 },
+ { 0, 0x0906, 0x5d32 },
+ { 0x0303, 0x1010, 0x4d4d },
+ { 0, 0x0609, 0x325d },
+ { 0x0102, 0x080b, 0x246c },
+ { 0, 0x0106, 0x0c7b },
+};
+
+static const int sub_pel_filters_int[8][3] = {
+ { 0, 0, 0 },
+ { 0x0000fffa, 0x007b000c, 0xffff0000 },
+ { 0x0002fff5, 0x006c0024, 0xfff80001 },
+ { 0x0000fff7, 0x005d0032, 0xfffa0000 },
+ { 0x0003fff0, 0x004d004d, 0xfff00003 },
+ { 0x0000fffa, 0x0032005d, 0xfff70000 },
+ { 0x0001fff8, 0x0024006c, 0xfff50002 },
+ { 0x0000ffff, 0x000c007b, 0xfffa0000 },
+};
+
+static const int sub_pel_filters_inv[8][3] = {
+ { 0, 0, 0 },
+ { 0xfffa0000, 0x000c007b, 0x0000ffff },
+ { 0xfff50002, 0x0024006c, 0x0001fff8 },
+ { 0xfff70000, 0x0032005d, 0x0000fffa },
+ { 0xfff00003, 0x004d004d, 0x0003fff0 },
+ { 0xfffa0000, 0x005d0032, 0x0000fff7 },
+ { 0xfff80001, 0x006c0024, 0x0002fff5 },
+ { 0xffff0000, 0x007b000c, 0x0000fffa },
+};
+
+/* clang-format off */
+static const int sub_pel_filters_int_tap_4[8][2] = {
+ { 0, 0},
+ { 0xfffa007b, 0x000cffff},
+ { 0, 0},
+ { 0xfff7005d, 0x0032fffa},
+ { 0, 0},
+ { 0xfffa0032, 0x005dfff7},
+ { 0, 0},
+ { 0xffff000c, 0x007bfffa},
+};
+
+
+static const int sub_pel_filters_inv_tap_4[8][2] = {
+ { 0, 0},
+ { 0x007bfffa, 0xffff000c},
+ { 0, 0},
+ { 0x005dfff7, 0xfffa0032},
+ { 0, 0},
+ { 0x0032fffa, 0xfff7005d},
+ { 0, 0},
+ { 0x000cffff, 0xfffa007b},
+};
+/* clang-format on */
+
+inline void prefetch_load(unsigned char *src) {
+ __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src));
+}
+
+inline void prefetch_store(unsigned char *dst) {
+ __asm__ __volatile__("pref 1, 0(%[dst]) \n\t" : : [dst] "r"(dst));
+}
+
+void dsputil_static_init(void) {
+ int i;
+
+ for (i = 0; i < 256; ++i) ff_cropTbl[i + CROP_WIDTH] = i;
+
+ for (i = 0; i < CROP_WIDTH; ++i) {
+ ff_cropTbl[i] = 0;
+ ff_cropTbl[i + CROP_WIDTH + 256] = 255;
+ }
+}
+
+void vp8_filter_block2d_first_pass_4(unsigned char *RESTRICT src_ptr,
+ unsigned char *RESTRICT dst_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int output_height, int xoffset,
+ int pitch) {
+ unsigned int i;
+ int Temp1, Temp2, Temp3, Temp4;
+
+ unsigned int vector4a = 64;
+ int vector1b, vector2b, vector3b;
+ unsigned int tp1, tp2, tn1, tn2;
+ unsigned int p1, p2, p3;
+ unsigned int n1, n2, n3;
+ unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+ vector3b = sub_pel_filters_inv[xoffset][2];
+
+ /* if (xoffset == 0) we don't need any filtering */
+ if (vector3b == 0) {
+ for (i = 0; i < output_height; ++i) {
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + src_pixels_per_line);
+ dst_ptr[0] = src_ptr[0];
+ dst_ptr[1] = src_ptr[1];
+ dst_ptr[2] = src_ptr[2];
+ dst_ptr[3] = src_ptr[3];
+
+ /* next row... */
+ src_ptr += src_pixels_per_line;
+ dst_ptr += 4;
+ }
+ } else {
+ if (vector3b > 65536) {
+ /* 6 tap filter */
+
+ vector1b = sub_pel_filters_inv[xoffset][0];
+ vector2b = sub_pel_filters_inv[xoffset][1];
+
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + src_pixels_per_line);
+
+ for (i = output_height; i--;) {
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "ulw %[tp1], -2(%[src_ptr]) \n\t"
+ "ulw %[tp2], 2(%[src_ptr]) \n\t"
+
+ /* even 1. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p1], %[tp1] \n\t"
+ "preceu.ph.qbl %[p2], %[tp1] \n\t"
+ "preceu.ph.qbr %[p3], %[tp2] \n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
+
+ /* even 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[p1], %[tp2] \n\t"
+ "balign %[tp2], %[tp1], 3 \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
+
+ /* odd 1. pixel */
+ "ulw %[tn2], 3(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n1], %[tp2] \n\t"
+ "preceu.ph.qbl %[n2], %[tp2] \n\t"
+ "preceu.ph.qbr %[n3], %[tn2] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t"
+
+ /* even 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[n1], %[tn2] \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ /* clamp */
+ "lbux %[tp1], %[Temp1](%[cm]) \n\t"
+ "lbux %[tn1], %[Temp2](%[cm]) \n\t"
+ "lbux %[tp2], %[Temp3](%[cm]) \n\t"
+ "lbux %[n2], %[Temp4](%[cm]) \n\t"
+
+ /* store bytes */
+ "sb %[tp1], 0(%[dst_ptr]) \n\t"
+ "sb %[tn1], 1(%[dst_ptr]) \n\t"
+ "sb %[tp2], 2(%[dst_ptr]) \n\t"
+ "sb %[n2], 3(%[dst_ptr]) \n\t"
+
+ : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+ [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
+ [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3),
+ [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=&r"(Temp4)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector4a] "r"(vector4a), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr),
+ [vector3b] "r"(vector3b), [src_ptr] "r"(src_ptr));
+
+ /* Next row... */
+ src_ptr += src_pixels_per_line;
+ dst_ptr += pitch;
+ }
+ } else {
+ /* 4 tap filter */
+
+ vector1b = sub_pel_filters_inv_tap_4[xoffset][0];
+ vector2b = sub_pel_filters_inv_tap_4[xoffset][1];
+
+ for (i = output_height; i--;) {
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "ulw %[tp1], -1(%[src_ptr]) \n\t"
+ "ulw %[tp2], 3(%[src_ptr]) \n\t"
+
+ /* even 1. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p1], %[tp1] \n\t"
+ "preceu.ph.qbl %[p2], %[tp1] \n\t"
+ "preceu.ph.qbr %[p3], %[tp2] \n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
+
+ /* even 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+
+ /* odd 1. pixel */
+ "srl %[tn1], %[tp2], 8 \n\t"
+ "balign %[tp2], %[tp1], 3 \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n1], %[tp2] \n\t"
+ "preceu.ph.qbl %[n2], %[tp2] \n\t"
+ "preceu.ph.qbr %[n3], %[tn1] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
+
+ /* odd 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ /* clamp and store results */
+ "lbux %[tp1], %[Temp1](%[cm]) \n\t"
+ "lbux %[tn1], %[Temp2](%[cm]) \n\t"
+ "lbux %[tp2], %[Temp3](%[cm]) \n\t"
+ "sb %[tp1], 0(%[dst_ptr]) \n\t"
+ "sb %[tn1], 1(%[dst_ptr]) \n\t"
+ "lbux %[n2], %[Temp4](%[cm]) \n\t"
+ "sb %[tp2], 2(%[dst_ptr]) \n\t"
+ "sb %[n2], 3(%[dst_ptr]) \n\t"
+
+ : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+ [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [n1] "=&r"(n1),
+ [n2] "=&r"(n2), [n3] "=&r"(n3), [Temp1] "=&r"(Temp1),
+ [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector4a] "r"(vector4a), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr),
+ [src_ptr] "r"(src_ptr));
+ /* Next row... */
+ src_ptr += src_pixels_per_line;
+ dst_ptr += pitch;
+ }
+ }
+ }
+}
+
+void vp8_filter_block2d_first_pass_8_all(unsigned char *RESTRICT src_ptr,
+ unsigned char *RESTRICT dst_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int output_height,
+ int xoffset, int pitch) {
+ unsigned int i;
+ int Temp1, Temp2, Temp3, Temp4;
+
+ unsigned int vector4a = 64;
+ unsigned int vector1b, vector2b, vector3b;
+ unsigned int tp1, tp2, tn1, tn2;
+ unsigned int p1, p2, p3, p4;
+ unsigned int n1, n2, n3, n4;
+
+ unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+ /* if (xoffset == 0) we don't need any filtering */
+ if (xoffset == 0) {
+ for (i = 0; i < output_height; ++i) {
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + src_pixels_per_line);
+
+ dst_ptr[0] = src_ptr[0];
+ dst_ptr[1] = src_ptr[1];
+ dst_ptr[2] = src_ptr[2];
+ dst_ptr[3] = src_ptr[3];
+ dst_ptr[4] = src_ptr[4];
+ dst_ptr[5] = src_ptr[5];
+ dst_ptr[6] = src_ptr[6];
+ dst_ptr[7] = src_ptr[7];
+
+ /* next row... */
+ src_ptr += src_pixels_per_line;
+ dst_ptr += 8;
+ }
+ } else {
+ vector3b = sub_pel_filters_inv[xoffset][2];
+
+ if (vector3b > 65536) {
+ /* 6 tap filter */
+
+ vector1b = sub_pel_filters_inv[xoffset][0];
+ vector2b = sub_pel_filters_inv[xoffset][1];
+
+ for (i = output_height; i--;) {
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + src_pixels_per_line);
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "ulw %[tp1], -2(%[src_ptr]) \n\t"
+ "ulw %[tp2], 2(%[src_ptr]) \n\t"
+
+ /* even 1. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p1], %[tp1] \n\t"
+ "preceu.ph.qbl %[p2], %[tp1] \n\t"
+ "preceu.ph.qbr %[p3], %[tp2] \n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
+
+ /* even 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[p1], %[tp2] \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
+
+ "balign %[tp2], %[tp1], 3 \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+ "ulw %[tn2], 3(%[src_ptr]) \n\t"
+
+ /* odd 1. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n1], %[tp2] \n\t"
+ "preceu.ph.qbl %[n2], %[tp2] \n\t"
+ "preceu.ph.qbr %[n3], %[tn2] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t"
+
+ /* odd 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[n1], %[tn2] \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
+ "ulw %[tp1], 6(%[src_ptr]) \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p2], %[tp1] \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn2] "=&r"(tn2),
+ [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [n1] "=&r"(n1),
+ [n2] "=&r"(n2), [n3] "=&r"(n3), [Temp1] "=&r"(Temp1),
+ [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [Temp4] "=r"(Temp4)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector4a] "r"(vector4a), [vector3b] "r"(vector3b),
+ [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ dst_ptr[0] = cm[Temp1];
+ dst_ptr[1] = cm[Temp2];
+ dst_ptr[2] = cm[Temp3];
+ dst_ptr[3] = cm[Temp4];
+
+ /* next 4 pixels */
+ __asm__ __volatile__(
+ /* even 3. pixel */
+ "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector3b] \n\t"
+
+ /* even 4. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[p4], %[tp1] \n\t"
+ "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
+
+ "ulw %[tn1], 7(%[src_ptr]) \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+
+ /* odd 3. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n2], %[tn1] \n\t"
+ "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector3b] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+
+ /* odd 4. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[n4], %[tn1] \n\t"
+ "dpa.w.ph $ac2, %[n1], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ : [tn1] "=&r"(tn1), [n2] "=&r"(n2), [p4] "=&r"(p4), [n4] "=&r"(n4),
+ [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=r"(Temp4)
+ : [tp1] "r"(tp1), [vector1b] "r"(vector1b), [p2] "r"(p2),
+ [vector2b] "r"(vector2b), [n1] "r"(n1), [p1] "r"(p1),
+ [vector4a] "r"(vector4a), [vector3b] "r"(vector3b), [p3] "r"(p3),
+ [n3] "r"(n3), [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ dst_ptr[4] = cm[Temp1];
+ dst_ptr[5] = cm[Temp2];
+ dst_ptr[6] = cm[Temp3];
+ dst_ptr[7] = cm[Temp4];
+
+ src_ptr += src_pixels_per_line;
+ dst_ptr += pitch;
+ }
+ } else {
+ /* 4 tap filter */
+
+ vector1b = sub_pel_filters_inv_tap_4[xoffset][0];
+ vector2b = sub_pel_filters_inv_tap_4[xoffset][1];
+
+ for (i = output_height; i--;) {
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + src_pixels_per_line);
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "ulw %[tp1], -1(%[src_ptr]) \n\t"
+
+ /* even 1. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p1], %[tp1] \n\t"
+ "preceu.ph.qbl %[p2], %[tp1] \n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
+
+ "ulw %[tp2], 3(%[src_ptr]) \n\t"
+
+ /* even 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbr %[p3], %[tp2] \n\t"
+ "preceu.ph.qbl %[p4], %[tp2] \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+
+ "balign %[tp2], %[tp1], 3 \n\t"
+
+ /* odd 1. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n1], %[tp2] \n\t"
+ "preceu.ph.qbl %[n2], %[tp2] \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+
+ "ulw %[tn2], 4(%[src_ptr]) \n\t"
+
+ /* odd 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbr %[n3], %[tn2] \n\t"
+ "preceu.ph.qbl %[n4], %[tn2] \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t"
+ "ulw %[tp1], 7(%[src_ptr]) \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn2] "=&r"(tn2),
+ [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
+ [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3), [n4] "=&r"(n4),
+ [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=r"(Temp4)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ dst_ptr[0] = cm[Temp1];
+ dst_ptr[1] = cm[Temp2];
+ dst_ptr[2] = cm[Temp3];
+ dst_ptr[3] = cm[Temp4];
+
+ /* next 4 pixels */
+ __asm__ __volatile__(
+ /* even 3. pixel */
+ "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t"
+
+ /* even 4. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbr %[p2], %[tp1] \n\t"
+ "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+
+ /* odd 3. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n4], %[vector2b] \n\t"
+ "ulw %[tn1], 8(%[src_ptr]) \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+
+ /* odd 4. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbr %[n2], %[tn1] \n\t"
+ "dpa.w.ph $ac2, %[n4], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ : [tn1] "=&r"(tn1), [p2] "=&r"(p2), [n2] "=&r"(n2),
+ [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=r"(Temp4)
+ : [tp1] "r"(tp1), [p3] "r"(p3), [p4] "r"(p4),
+ [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr), [n3] "r"(n3),
+ [n4] "r"(n4));
+
+ /* clamp and store results */
+ dst_ptr[4] = cm[Temp1];
+ dst_ptr[5] = cm[Temp2];
+ dst_ptr[6] = cm[Temp3];
+ dst_ptr[7] = cm[Temp4];
+
+ /* next row... */
+ src_ptr += src_pixels_per_line;
+ dst_ptr += pitch;
+ }
+ }
+ }
+}
+
+void vp8_filter_block2d_first_pass16_6tap(unsigned char *RESTRICT src_ptr,
+ unsigned char *RESTRICT dst_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int output_height,
+ int xoffset, int pitch) {
+ unsigned int i;
+ int Temp1, Temp2, Temp3, Temp4;
+
+ unsigned int vector4a;
+ unsigned int vector1b, vector2b, vector3b;
+ unsigned int tp1, tp2, tn1, tn2;
+ unsigned int p1, p2, p3, p4;
+ unsigned int n1, n2, n3, n4;
+ unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+ vector1b = sub_pel_filters_inv[xoffset][0];
+ vector2b = sub_pel_filters_inv[xoffset][1];
+ vector3b = sub_pel_filters_inv[xoffset][2];
+ vector4a = 64;
+
+ for (i = output_height; i--;) {
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + src_pixels_per_line);
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "ulw %[tp1], -2(%[src_ptr]) \n\t"
+ "ulw %[tp2], 2(%[src_ptr]) \n\t"
+
+ /* even 1. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p1], %[tp1] \n\t"
+ "preceu.ph.qbl %[p2], %[tp1] \n\t"
+ "preceu.ph.qbr %[p3], %[tp2] \n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
+
+ /* even 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[p1], %[tp2] \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
+
+ "balign %[tp2], %[tp1], 3 \n\t"
+ "ulw %[tn2], 3(%[src_ptr]) \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+
+ /* odd 1. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n1], %[tp2] \n\t"
+ "preceu.ph.qbl %[n2], %[tp2] \n\t"
+ "preceu.ph.qbr %[n3], %[tn2] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t"
+
+ /* odd 2. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[n1], %[tn2] \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
+ "ulw %[tp1], 6(%[src_ptr]) \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p2], %[tp1] \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn2] "=&r"(tn2), [p1] "=&r"(p1),
+ [p2] "=&r"(p2), [p3] "=&r"(p3), [n1] "=&r"(n1), [n2] "=&r"(n2),
+ [n3] "=&r"(n3), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+ [Temp3] "=&r"(Temp3), [Temp4] "=r"(Temp4)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector4a] "r"(vector4a), [vector3b] "r"(vector3b),
+ [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ dst_ptr[0] = cm[Temp1];
+ dst_ptr[1] = cm[Temp2];
+ dst_ptr[2] = cm[Temp3];
+ dst_ptr[3] = cm[Temp4];
+
+ /* next 4 pixels */
+ __asm__ __volatile__(
+ /* even 3. pixel */
+ "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector3b] \n\t"
+
+ /* even 4. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[p4], %[tp1] \n\t"
+ "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
+ "ulw %[tn1], 7(%[src_ptr]) \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+
+ /* odd 3. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n2], %[tn1] \n\t"
+ "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector3b] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+
+ /* odd 4. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[n4], %[tn1] \n\t"
+ "dpa.w.ph $ac2, %[n1], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t"
+ "ulw %[tp2], 10(%[src_ptr]) \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p1], %[tp2] \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ : [tn1] "=&r"(tn1), [tp2] "=&r"(tp2), [n2] "=&r"(n2), [p4] "=&r"(p4),
+ [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+ [Temp3] "=&r"(Temp3), [Temp4] "=r"(Temp4), [p1] "+r"(p1)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), [tp1] "r"(tp1),
+ [n1] "r"(n1), [vector4a] "r"(vector4a), [p2] "r"(p2),
+ [vector3b] "r"(vector3b), [p3] "r"(p3), [n3] "r"(n3),
+ [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ dst_ptr[4] = cm[Temp1];
+ dst_ptr[5] = cm[Temp2];
+ dst_ptr[6] = cm[Temp3];
+ dst_ptr[7] = cm[Temp4];
+
+ /* next 4 pixels */
+ __asm__ __volatile__(
+ /* even 5. pixel */
+ "dpa.w.ph $ac3, %[p2], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector3b] \n\t"
+
+ /* even 6. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[p3], %[tp2] \n\t"
+ "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector3b] \n\t"
+
+ "ulw %[tn1], 11(%[src_ptr]) \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+
+ /* odd 5. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n1], %[tn1] \n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n4], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+
+ /* odd 6. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[n3], %[tn1] \n\t"
+ "dpa.w.ph $ac2, %[n4], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n1], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector3b] \n\t"
+ "ulw %[tp1], 14(%[src_ptr]) \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[p4], %[tp1] \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ : [tn1] "=&r"(tn1), [tp1] "=&r"(tp1), [n1] "=&r"(n1), [p3] "=&r"(p3),
+ [n3] "=&r"(n3), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
+ [Temp3] "=&r"(Temp3), [Temp4] "=r"(Temp4), [p4] "+r"(p4)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), [tp2] "r"(tp2),
+ [p2] "r"(p2), [n2] "r"(n2), [n4] "r"(n4), [p1] "r"(p1),
+ [src_ptr] "r"(src_ptr), [vector4a] "r"(vector4a),
+ [vector3b] "r"(vector3b));
+
+ /* clamp and store results */
+ dst_ptr[8] = cm[Temp1];
+ dst_ptr[9] = cm[Temp2];
+ dst_ptr[10] = cm[Temp3];
+ dst_ptr[11] = cm[Temp4];
+
+ /* next 4 pixels */
+ __asm__ __volatile__(
+ /* even 7. pixel */
+ "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[p3], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[p4], %[vector3b] \n\t"
+
+ /* even 8. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[p2], %[tp1] \n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[p4], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector3b] \n\t"
+ "ulw %[tn1], 15(%[src_ptr]) \n\t"
+ "extp %[Temp1], $ac3, 9 \n\t"
+
+ /* odd 7. pixel */
+ "mtlo %[vector4a], $ac3 \n\t"
+ "preceu.ph.qbr %[n4], %[tn1] \n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
+ "dpa.w.ph $ac3, %[n3], %[vector2b] \n\t"
+ "dpa.w.ph $ac3, %[n4], %[vector3b] \n\t"
+ "extp %[Temp3], $ac2, 9 \n\t"
+
+ /* odd 8. pixel */
+ "mtlo %[vector4a], $ac2 \n\t"
+ "preceu.ph.qbl %[n2], %[tn1] \n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector1b] \n\t"
+ "dpa.w.ph $ac2, %[n4], %[vector2b] \n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector3b] \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+ "extp %[Temp4], $ac2, 9 \n\t"
+
+ /* clamp and store results */
+ "lbux %[tp1], %[Temp1](%[cm]) \n\t"
+ "lbux %[tn1], %[Temp2](%[cm]) \n\t"
+ "lbux %[p2], %[Temp3](%[cm]) \n\t"
+ "sb %[tp1], 12(%[dst_ptr]) \n\t"
+ "sb %[tn1], 13(%[dst_ptr]) \n\t"
+ "lbux %[n2], %[Temp4](%[cm]) \n\t"
+ "sb %[p2], 14(%[dst_ptr]) \n\t"
+ "sb %[n2], 15(%[dst_ptr]) \n\t"
+
+ : [tn1] "=&r"(tn1), [p2] "=&r"(p2), [n2] "=&r"(n2), [n4] "=&r"(n4),
+ [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=r"(Temp4), [tp1] "+r"(tp1)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), [p4] "r"(p4),
+ [n1] "r"(n1), [p1] "r"(p1), [vector4a] "r"(vector4a),
+ [vector3b] "r"(vector3b), [p3] "r"(p3), [n3] "r"(n3),
+ [src_ptr] "r"(src_ptr), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
+
+ src_ptr += src_pixels_per_line;
+ dst_ptr += pitch;
+ }
+}
+
+void vp8_filter_block2d_first_pass16_0(unsigned char *RESTRICT src_ptr,
+ unsigned char *RESTRICT output_ptr,
+ unsigned int src_pixels_per_line) {
+ int Temp1, Temp2, Temp3, Temp4;
+ int i;
+
+ /* prefetch src_ptr data to cache memory */
+ prefetch_store(output_ptr + 32);
+
+ /* copy memory from src buffer to dst buffer */
+ for (i = 0; i < 7; ++i) {
+ __asm__ __volatile__(
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "ulw %[Temp3], 8(%[src_ptr]) \n\t"
+ "ulw %[Temp4], 12(%[src_ptr]) \n\t"
+ "sw %[Temp1], 0(%[output_ptr]) \n\t"
+ "sw %[Temp2], 4(%[output_ptr]) \n\t"
+ "sw %[Temp3], 8(%[output_ptr]) \n\t"
+ "sw %[Temp4], 12(%[output_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=&r"(Temp4), [src_ptr] "+r"(src_ptr)
+ : [src_pixels_per_line] "r"(src_pixels_per_line), [output_ptr] "r"(
+ output_ptr));
+
+ __asm__ __volatile__(
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "ulw %[Temp3], 8(%[src_ptr]) \n\t"
+ "ulw %[Temp4], 12(%[src_ptr]) \n\t"
+ "sw %[Temp1], 16(%[output_ptr]) \n\t"
+ "sw %[Temp2], 20(%[output_ptr]) \n\t"
+ "sw %[Temp3], 24(%[output_ptr]) \n\t"
+ "sw %[Temp4], 28(%[output_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=&r"(Temp4), [src_ptr] "+r"(src_ptr)
+ : [src_pixels_per_line] "r"(src_pixels_per_line), [output_ptr] "r"(
+ output_ptr));
+
+ __asm__ __volatile__(
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "ulw %[Temp3], 8(%[src_ptr]) \n\t"
+ "ulw %[Temp4], 12(%[src_ptr]) \n\t"
+ "sw %[Temp1], 32(%[output_ptr]) \n\t"
+ "sw %[Temp2], 36(%[output_ptr]) \n\t"
+ "sw %[Temp3], 40(%[output_ptr]) \n\t"
+ "sw %[Temp4], 44(%[output_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=&r"(Temp4), [src_ptr] "+r"(src_ptr)
+ : [src_pixels_per_line] "r"(src_pixels_per_line), [output_ptr] "r"(
+ output_ptr));
+
+ output_ptr += 48;
+ }
+}
+
+void vp8_filter_block2d_first_pass16_4tap(
+ unsigned char *RESTRICT src_ptr, unsigned char *RESTRICT output_ptr,
+ unsigned int src_pixels_per_line, unsigned int output_width,
+ unsigned int output_height, int xoffset, int yoffset,
+ unsigned char *RESTRICT dst_ptr, int pitch) {
+ unsigned int i, j;
+ int Temp1, Temp2, Temp3, Temp4;
+
+ unsigned int vector4a;
+ int vector1b, vector2b;
+ unsigned int tp1, tp2, tp3, tn1;
+ unsigned int p1, p2, p3;
+ unsigned int n1, n2, n3;
+ unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+ vector4a = 64;
+
+ vector1b = sub_pel_filters_inv_tap_4[xoffset][0];
+ vector2b = sub_pel_filters_inv_tap_4[xoffset][1];
+
+ /* if (yoffset == 0) don't need temp buffer, data will be stored in dst_ptr */
+ if (yoffset == 0) {
+ output_height -= 5;
+ src_ptr += (src_pixels_per_line + src_pixels_per_line);
+
+ for (i = output_height; i--;) {
+ __asm__ __volatile__("ulw %[tp3], -1(%[src_ptr]) \n\t"
+ : [tp3] "=&r"(tp3)
+ : [src_ptr] "r"(src_ptr));
+
+ /* processing 4 adjacent pixels */
+ for (j = 0; j < 16; j += 4) {
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "ulw %[tp2], 3(%[src_ptr]) "
+ "\n\t"
+ "move %[tp1], %[tp3] "
+ "\n\t"
+
+ /* even 1. pixel */
+ "mtlo %[vector4a], $ac3 "
+ "\n\t"
+ "mthi $0, $ac3 "
+ "\n\t"
+ "move %[tp3], %[tp2] "
+ "\n\t"
+ "preceu.ph.qbr %[p1], %[tp1] "
+ "\n\t"
+ "preceu.ph.qbl %[p2], %[tp1] "
+ "\n\t"
+ "preceu.ph.qbr %[p3], %[tp2] "
+ "\n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector1b] "
+ "\n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector2b] "
+ "\n\t"
+
+ /* even 2. pixel */
+ "mtlo %[vector4a], $ac2 "
+ "\n\t"
+ "mthi $0, $ac2 "
+ "\n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector1b] "
+ "\n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector2b] "
+ "\n\t"
+ "extr.w %[Temp1], $ac3, 7 "
+ "\n\t"
+
+ /* odd 1. pixel */
+ "ulw %[tn1], 4(%[src_ptr]) "
+ "\n\t"
+ "balign %[tp2], %[tp1], 3 "
+ "\n\t"
+ "mtlo %[vector4a], $ac3 "
+ "\n\t"
+ "mthi $0, $ac3 "
+ "\n\t"
+ "preceu.ph.qbr %[n1], %[tp2] "
+ "\n\t"
+ "preceu.ph.qbl %[n2], %[tp2] "
+ "\n\t"
+ "preceu.ph.qbr %[n3], %[tn1] "
+ "\n\t"
+ "extr.w %[Temp3], $ac2, 7 "
+ "\n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector1b] "
+ "\n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector2b] "
+ "\n\t"
+
+ /* odd 2. pixel */
+ "mtlo %[vector4a], $ac2 "
+ "\n\t"
+ "mthi $0, $ac2 "
+ "\n\t"
+ "extr.w %[Temp2], $ac3, 7 "
+ "\n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector1b] "
+ "\n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector2b] "
+ "\n\t"
+ "extr.w %[Temp4], $ac2, 7 "
+ "\n\t"
+
+ /* clamp and store results */
+ "lbux %[tp1], %[Temp1](%[cm]) "
+ "\n\t"
+ "lbux %[tn1], %[Temp2](%[cm]) "
+ "\n\t"
+ "lbux %[tp2], %[Temp3](%[cm]) "
+ "\n\t"
+ "sb %[tp1], 0(%[dst_ptr]) "
+ "\n\t"
+ "sb %[tn1], 1(%[dst_ptr]) "
+ "\n\t"
+ "lbux %[n2], %[Temp4](%[cm]) "
+ "\n\t"
+ "sb %[tp2], 2(%[dst_ptr]) "
+ "\n\t"
+ "sb %[n2], 3(%[dst_ptr]) "
+ "\n\t"
+
+ : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
+ [tn1] "=&r"(tn1), [p1] "=&r"(p1), [p2] "=&r"(p2), [n1] "=&r"(n1),
+ [n2] "=&r"(n2), [n3] "=&r"(n3), [Temp1] "=&r"(Temp1),
+ [Temp2] "=&r"(Temp2), [p3] "=&r"(p3), [Temp3] "=&r"(Temp3),
+ [Temp4] "=&r"(Temp4)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector4a] "r"(vector4a), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr),
+ [src_ptr] "r"(src_ptr));
+
+ src_ptr += 4;
+ }
+
+ /* Next row... */
+ src_ptr += src_pixels_per_line - 16;
+ dst_ptr += pitch;
+ }
+ } else {
+ for (i = output_height; i--;) {
+ /* processing 4 adjacent pixels */
+ for (j = 0; j < 16; j += 4) {
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "ulw %[tp1], -1(%[src_ptr]) "
+ "\n\t"
+ "ulw %[tp2], 3(%[src_ptr]) "
+ "\n\t"
+
+ /* even 1. pixel */
+ "mtlo %[vector4a], $ac3 "
+ "\n\t"
+ "mthi $0, $ac3 "
+ "\n\t"
+ "preceu.ph.qbr %[p1], %[tp1] "
+ "\n\t"
+ "preceu.ph.qbl %[p2], %[tp1] "
+ "\n\t"
+ "preceu.ph.qbr %[p3], %[tp2] "
+ "\n\t"
+ "dpa.w.ph $ac3, %[p1], %[vector1b] "
+ "\n\t"
+ "dpa.w.ph $ac3, %[p2], %[vector2b] "
+ "\n\t"
+
+ /* even 2. pixel */
+ "mtlo %[vector4a], $ac2 "
+ "\n\t"
+ "mthi $0, $ac2 "
+ "\n\t"
+ "dpa.w.ph $ac2, %[p2], %[vector1b] "
+ "\n\t"
+ "dpa.w.ph $ac2, %[p3], %[vector2b] "
+ "\n\t"
+ "extr.w %[Temp1], $ac3, 7 "
+ "\n\t"
+
+ /* odd 1. pixel */
+ "ulw %[tn1], 4(%[src_ptr]) "
+ "\n\t"
+ "balign %[tp2], %[tp1], 3 "
+ "\n\t"
+ "mtlo %[vector4a], $ac3 "
+ "\n\t"
+ "mthi $0, $ac3 "
+ "\n\t"
+ "preceu.ph.qbr %[n1], %[tp2] "
+ "\n\t"
+ "preceu.ph.qbl %[n2], %[tp2] "
+ "\n\t"
+ "preceu.ph.qbr %[n3], %[tn1] "
+ "\n\t"
+ "extr.w %[Temp3], $ac2, 7 "
+ "\n\t"
+ "dpa.w.ph $ac3, %[n1], %[vector1b] "
+ "\n\t"
+ "dpa.w.ph $ac3, %[n2], %[vector2b] "
+ "\n\t"
+
+ /* odd 2. pixel */
+ "mtlo %[vector4a], $ac2 "
+ "\n\t"
+ "mthi $0, $ac2 "
+ "\n\t"
+ "extr.w %[Temp2], $ac3, 7 "
+ "\n\t"
+ "dpa.w.ph $ac2, %[n2], %[vector1b] "
+ "\n\t"
+ "dpa.w.ph $ac2, %[n3], %[vector2b] "
+ "\n\t"
+ "extr.w %[Temp4], $ac2, 7 "
+ "\n\t"
+
+ /* clamp and store results */
+ "lbux %[tp1], %[Temp1](%[cm]) "
+ "\n\t"
+ "lbux %[tn1], %[Temp2](%[cm]) "
+ "\n\t"
+ "lbux %[tp2], %[Temp3](%[cm]) "
+ "\n\t"
+ "sb %[tp1], 0(%[output_ptr]) "
+ "\n\t"
+ "sb %[tn1], 1(%[output_ptr]) "
+ "\n\t"
+ "lbux %[n2], %[Temp4](%[cm]) "
+ "\n\t"
+ "sb %[tp2], 2(%[output_ptr]) "
+ "\n\t"
+ "sb %[n2], 3(%[output_ptr]) "
+ "\n\t"
+
+ : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
+ [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [n1] "=&r"(n1),
+ [n2] "=&r"(n2), [n3] "=&r"(n3), [Temp1] "=&r"(Temp1),
+ [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector4a] "r"(vector4a), [cm] "r"(cm),
+ [output_ptr] "r"(output_ptr), [src_ptr] "r"(src_ptr));
+
+ src_ptr += 4;
+ }
+
+ /* next row... */
+ src_ptr += src_pixels_per_line;
+ output_ptr += output_width;
+ }
+ }
+}
+
+void vp8_filter_block2d_second_pass4(unsigned char *RESTRICT src_ptr,
+ unsigned char *RESTRICT output_ptr,
+ int output_pitch, int yoffset) {
+ unsigned int i;
+
+ int Temp1, Temp2, Temp3, Temp4;
+ unsigned int vector1b, vector2b, vector3b, vector4a;
+
+ unsigned char src_ptr_l2;
+ unsigned char src_ptr_l1;
+ unsigned char src_ptr_0;
+ unsigned char src_ptr_r1;
+ unsigned char src_ptr_r2;
+ unsigned char src_ptr_r3;
+
+ unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+ vector4a = 64;
+
+ /* load filter coefficients */
+ vector1b = sub_pel_filterss[yoffset][0];
+ vector2b = sub_pel_filterss[yoffset][2];
+ vector3b = sub_pel_filterss[yoffset][1];
+
+ if (vector1b) {
+ /* 6 tap filter */
+
+ for (i = 2; i--;) {
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr);
+
+ /* do not allow compiler to reorder instructions */
+ __asm__ __volatile__(
+ ".set noreorder \n\t"
+ :
+ :);
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l2], -8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 12(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 9(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 13(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp1], $ac2, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 10(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 14(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 11(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 15(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp3], $ac0, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp4], $ac1, 9 \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=r"(Temp4), [src_ptr_l1] "=&r"(src_ptr_l1),
+ [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1),
+ [src_ptr_r2] "=&r"(src_ptr_r2), [src_ptr_l2] "=&r"(src_ptr_l2),
+ [src_ptr_r3] "=&r"(src_ptr_r3)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector3b] "r"(vector3b), [vector4a] "r"(vector4a),
+ [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ output_ptr[0] = cm[Temp1];
+ output_ptr[1] = cm[Temp2];
+ output_ptr[2] = cm[Temp3];
+ output_ptr[3] = cm[Temp4];
+
+ output_ptr += output_pitch;
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l2], -4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], 0(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 12(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 16(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], 1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 13(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 17(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp1], $ac2, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], 2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 14(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 18(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], 3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 15(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 19(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp3], $ac0, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp4], $ac1, 9 \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=r"(Temp4), [src_ptr_l1] "=&r"(src_ptr_l1),
+ [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1),
+ [src_ptr_r2] "=&r"(src_ptr_r2), [src_ptr_l2] "=&r"(src_ptr_l2),
+ [src_ptr_r3] "=&r"(src_ptr_r3)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector3b] "r"(vector3b), [vector4a] "r"(vector4a),
+ [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ output_ptr[0] = cm[Temp1];
+ output_ptr[1] = cm[Temp2];
+ output_ptr[2] = cm[Temp3];
+ output_ptr[3] = cm[Temp4];
+
+ src_ptr += 8;
+ output_ptr += output_pitch;
+ }
+ } else {
+ /* 4 tap filter */
+
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr);
+
+ for (i = 2; i--;) {
+ /* do not allow compiler to reorder instructions */
+ __asm__ __volatile__(
+ ".set noreorder \n\t"
+ :
+ :);
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 8(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 9(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp1], $ac2, 9 \n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 10(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 11(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp3], $ac0, 9 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp4], $ac1, 9 \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=r"(Temp4), [src_ptr_l1] "=&r"(src_ptr_l1),
+ [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1),
+ [src_ptr_r2] "=&r"(src_ptr_r2)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ output_ptr[0] = cm[Temp1];
+ output_ptr[1] = cm[Temp2];
+ output_ptr[2] = cm[Temp3];
+ output_ptr[3] = cm[Temp4];
+
+ output_ptr += output_pitch;
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l1], 0(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 12(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l1], 1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 13(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp1], $ac2, 9 \n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l1], 2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 14(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l1], 3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 15(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp3], $ac0, 9 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp4], $ac1, 9 \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=r"(Temp4), [src_ptr_l1] "=&r"(src_ptr_l1),
+ [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1),
+ [src_ptr_r2] "=&r"(src_ptr_r2)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ output_ptr[0] = cm[Temp1];
+ output_ptr[1] = cm[Temp2];
+ output_ptr[2] = cm[Temp3];
+ output_ptr[3] = cm[Temp4];
+
+ src_ptr += 8;
+ output_ptr += output_pitch;
+ }
+ }
+}
+
+void vp8_filter_block2d_second_pass_8(unsigned char *RESTRICT src_ptr,
+ unsigned char *RESTRICT output_ptr,
+ int output_pitch,
+ unsigned int output_height,
+ unsigned int output_width,
+ unsigned int yoffset) {
+ unsigned int i;
+
+ int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8;
+ unsigned int vector1b, vector2b, vector3b, vector4a;
+
+ unsigned char src_ptr_l2;
+ unsigned char src_ptr_l1;
+ unsigned char src_ptr_0;
+ unsigned char src_ptr_r1;
+ unsigned char src_ptr_r2;
+ unsigned char src_ptr_r3;
+ unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+ (void)output_width;
+
+ vector4a = 64;
+
+ vector1b = sub_pel_filterss[yoffset][0];
+ vector2b = sub_pel_filterss[yoffset][2];
+ vector3b = sub_pel_filterss[yoffset][1];
+
+ if (vector1b) {
+ /* 6 tap filter */
+
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr);
+
+ for (i = output_height; i--;) {
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l2], -16(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 16(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 24(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -15(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 17(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 25(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp1], $ac2, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -14(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 18(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 26(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -13(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 19(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 27(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp3], $ac0, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0),
+ [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2),
+ [src_ptr_l2] "=&r"(src_ptr_l2), [src_ptr_r3] "=&r"(src_ptr_r3)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector3b] "r"(vector3b), [vector4a] "r"(vector4a),
+ [src_ptr] "r"(src_ptr));
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l2], -12(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 12(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 20(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 28(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp4], $ac1, 9 \n\t"
+
+ "lbu %[src_ptr_l2], -11(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 13(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 21(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 29(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp5], $ac2, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -10(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 14(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 22(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 30(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp6], $ac3, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -9(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 15(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 23(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 31(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp7], $ac0, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp8], $ac1, 9 \n\t"
+
+ : [Temp4] "=&r"(Temp4), [Temp5] "=&r"(Temp5), [Temp6] "=&r"(Temp6),
+ [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8),
+ [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0),
+ [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2),
+ [src_ptr_l2] "=&r"(src_ptr_l2), [src_ptr_r3] "=&r"(src_ptr_r3)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector3b] "r"(vector3b), [vector4a] "r"(vector4a),
+ [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ output_ptr[0] = cm[Temp1];
+ output_ptr[1] = cm[Temp2];
+ output_ptr[2] = cm[Temp3];
+ output_ptr[3] = cm[Temp4];
+ output_ptr[4] = cm[Temp5];
+ output_ptr[5] = cm[Temp6];
+ output_ptr[6] = cm[Temp7];
+ output_ptr[7] = cm[Temp8];
+
+ src_ptr += 8;
+ output_ptr += output_pitch;
+ }
+ } else {
+ /* 4 tap filter */
+
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr);
+
+ for (i = output_height; i--;) {
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 16(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ : [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0),
+ [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr));
+
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 17(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp1], $ac2, 9 \n\t"
+
+ : [Temp1] "=r"(Temp1), [src_ptr_l1] "=&r"(src_ptr_l1),
+ [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1),
+ [src_ptr_r2] "=&r"(src_ptr_r2)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr));
+
+ src_ptr_l1 = src_ptr[-6];
+ src_ptr_0 = src_ptr[2];
+ src_ptr_r1 = src_ptr[10];
+ src_ptr_r2 = src_ptr[18];
+
+ __asm__ __volatile__(
+ "mtlo %[vector4a], $ac0 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp2], $ac3, 9 \n\t"
+
+ : [Temp2] "=r"(Temp2)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0),
+ [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2),
+ [vector4a] "r"(vector4a));
+
+ src_ptr_l1 = src_ptr[-5];
+ src_ptr_0 = src_ptr[3];
+ src_ptr_r1 = src_ptr[11];
+ src_ptr_r2 = src_ptr[19];
+
+ __asm__ __volatile__(
+ "mtlo %[vector4a], $ac1 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp3], $ac0, 9 \n\t"
+
+ : [Temp3] "=r"(Temp3)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0),
+ [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2),
+ [vector4a] "r"(vector4a));
+
+ src_ptr_l1 = src_ptr[-4];
+ src_ptr_0 = src_ptr[4];
+ src_ptr_r1 = src_ptr[12];
+ src_ptr_r2 = src_ptr[20];
+
+ __asm__ __volatile__(
+ "mtlo %[vector4a], $ac2 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp4], $ac1, 9 \n\t"
+
+ : [Temp4] "=r"(Temp4)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0),
+ [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2),
+ [vector4a] "r"(vector4a));
+
+ src_ptr_l1 = src_ptr[-3];
+ src_ptr_0 = src_ptr[5];
+ src_ptr_r1 = src_ptr[13];
+ src_ptr_r2 = src_ptr[21];
+
+ __asm__ __volatile__(
+ "mtlo %[vector4a], $ac3 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp5], $ac2, 9 \n\t"
+
+ : [Temp5] "=&r"(Temp5)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0),
+ [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2),
+ [vector4a] "r"(vector4a));
+
+ src_ptr_l1 = src_ptr[-2];
+ src_ptr_0 = src_ptr[6];
+ src_ptr_r1 = src_ptr[14];
+ src_ptr_r2 = src_ptr[22];
+
+ __asm__ __volatile__(
+ "mtlo %[vector4a], $ac0 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp6], $ac3, 9 \n\t"
+
+ : [Temp6] "=r"(Temp6)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0),
+ [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2),
+ [vector4a] "r"(vector4a));
+
+ src_ptr_l1 = src_ptr[-1];
+ src_ptr_0 = src_ptr[7];
+ src_ptr_r1 = src_ptr[15];
+ src_ptr_r2 = src_ptr[23];
+
+ __asm__ __volatile__(
+ "mtlo %[vector4a], $ac1 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp7], $ac0, 9 \n\t"
+ "extp %[Temp8], $ac1, 9 \n\t"
+
+ : [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0),
+ [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2),
+ [vector4a] "r"(vector4a));
+
+ /* clamp and store results */
+ output_ptr[0] = cm[Temp1];
+ output_ptr[1] = cm[Temp2];
+ output_ptr[2] = cm[Temp3];
+ output_ptr[3] = cm[Temp4];
+ output_ptr[4] = cm[Temp5];
+ output_ptr[5] = cm[Temp6];
+ output_ptr[6] = cm[Temp7];
+ output_ptr[7] = cm[Temp8];
+
+ src_ptr += 8;
+ output_ptr += output_pitch;
+ }
+ }
+}
+
+void vp8_filter_block2d_second_pass161(unsigned char *RESTRICT src_ptr,
+ unsigned char *RESTRICT output_ptr,
+ int output_pitch,
+ const unsigned short *vp8_filter) {
+ unsigned int i, j;
+
+ int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8;
+ unsigned int vector4a;
+ unsigned int vector1b, vector2b, vector3b;
+
+ unsigned char src_ptr_l2;
+ unsigned char src_ptr_l1;
+ unsigned char src_ptr_0;
+ unsigned char src_ptr_r1;
+ unsigned char src_ptr_r2;
+ unsigned char src_ptr_r3;
+ unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+ vector4a = 64;
+
+ vector1b = vp8_filter[0];
+ vector2b = vp8_filter[2];
+ vector3b = vp8_filter[1];
+
+ if (vector1b == 0) {
+ /* 4 tap filter */
+
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + 16);
+
+ for (i = 16; i--;) {
+ /* unrolling for loop */
+ for (j = 0; j < 16; j += 8) {
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l1], -16(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_0], 0(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r1], 16(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r2], 32(%[src_ptr]) "
+ "\n\t"
+ "mtlo %[vector4a], $ac2 "
+ "\n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 "
+ "\n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 "
+ "\n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] "
+ "\n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] "
+ "\n\t"
+
+ "lbu %[src_ptr_l1], -15(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_0], 1(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r1], 17(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r2], 33(%[src_ptr]) "
+ "\n\t"
+ "mtlo %[vector4a], $ac3 "
+ "\n\t"
+ "extp %[Temp1], $ac2, 9 "
+ "\n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 "
+ "\n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 "
+ "\n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] "
+ "\n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] "
+ "\n\t"
+
+ "lbu %[src_ptr_l1], -14(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_0], 2(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r1], 18(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r2], 34(%[src_ptr]) "
+ "\n\t"
+ "mtlo %[vector4a], $ac1 "
+ "\n\t"
+ "extp %[Temp2], $ac3, 9 "
+ "\n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 "
+ "\n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 "
+ "\n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] "
+ "\n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] "
+ "\n\t"
+
+ "lbu %[src_ptr_l1], -13(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_0], 3(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r1], 19(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r2], 35(%[src_ptr]) "
+ "\n\t"
+ "mtlo %[vector4a], $ac3 "
+ "\n\t"
+ "extp %[Temp3], $ac1, 9 "
+ "\n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 "
+ "\n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 "
+ "\n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] "
+ "\n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] "
+ "\n\t"
+
+ "lbu %[src_ptr_l1], -12(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_0], 4(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r1], 20(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r2], 36(%[src_ptr]) "
+ "\n\t"
+ "mtlo %[vector4a], $ac2 "
+ "\n\t"
+ "extp %[Temp4], $ac3, 9 "
+ "\n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 "
+ "\n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 "
+ "\n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] "
+ "\n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] "
+ "\n\t"
+
+ "lbu %[src_ptr_l1], -11(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_0], 5(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r1], 21(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r2], 37(%[src_ptr]) "
+ "\n\t"
+ "mtlo %[vector4a], $ac3 "
+ "\n\t"
+ "extp %[Temp5], $ac2, 9 "
+ "\n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 "
+ "\n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 "
+ "\n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] "
+ "\n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] "
+ "\n\t"
+
+ "lbu %[src_ptr_l1], -10(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_0], 6(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r1], 22(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r2], 38(%[src_ptr]) "
+ "\n\t"
+ "mtlo %[vector4a], $ac1 "
+ "\n\t"
+ "extp %[Temp6], $ac3, 9 "
+ "\n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 "
+ "\n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 "
+ "\n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] "
+ "\n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] "
+ "\n\t"
+
+ "lbu %[src_ptr_l1], -9(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_0], 7(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r1], 23(%[src_ptr]) "
+ "\n\t"
+ "lbu %[src_ptr_r2], 39(%[src_ptr]) "
+ "\n\t"
+ "mtlo %[vector4a], $ac3 "
+ "\n\t"
+ "extp %[Temp7], $ac1, 9 "
+ "\n\t"
+
+ "append %[src_ptr_0], %[src_ptr_r1], 8 "
+ "\n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 "
+ "\n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] "
+ "\n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] "
+ "\n\t"
+ "extp %[Temp8], $ac3, 9 "
+ "\n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=&r"(Temp4), [Temp5] "=&r"(Temp5), [Temp6] "=&r"(Temp6),
+ [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8),
+ [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0),
+ [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2)
+ : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b),
+ [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ output_ptr[j] = cm[Temp1];
+ output_ptr[j + 1] = cm[Temp2];
+ output_ptr[j + 2] = cm[Temp3];
+ output_ptr[j + 3] = cm[Temp4];
+ output_ptr[j + 4] = cm[Temp5];
+ output_ptr[j + 5] = cm[Temp6];
+ output_ptr[j + 6] = cm[Temp7];
+ output_ptr[j + 7] = cm[Temp8];
+
+ src_ptr += 8;
+ }
+
+ output_ptr += output_pitch;
+ }
+ } else {
+ /* 4 tap filter */
+
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + 16);
+
+ /* unroll for loop */
+ for (i = 16; i--;) {
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l2], -32(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -16(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 16(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 32(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 48(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -31(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -15(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 17(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 33(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 49(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp1], $ac2, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -30(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -14(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 18(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 34(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 50(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp2], $ac0, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -29(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -13(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 19(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 35(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 51(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp3], $ac1, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -28(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -12(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 20(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 36(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 52(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+ "extp %[Temp4], $ac3, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -27(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -11(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 21(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 37(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 53(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp5], $ac2, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -26(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -10(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 22(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 38(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 54(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp6], $ac0, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -25(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -9(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 23(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 39(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 55(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp7], $ac1, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp8], $ac3, 9 \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=&r"(Temp4), [Temp5] "=&r"(Temp5), [Temp6] "=&r"(Temp6),
+ [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8),
+ [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0),
+ [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2),
+ [src_ptr_l2] "=&r"(src_ptr_l2), [src_ptr_r3] "=&r"(src_ptr_r3)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector3b] "r"(vector3b), [vector4a] "r"(vector4a),
+ [src_ptr] "r"(src_ptr));
+
+ /* clamp and store results */
+ output_ptr[0] = cm[Temp1];
+ output_ptr[1] = cm[Temp2];
+ output_ptr[2] = cm[Temp3];
+ output_ptr[3] = cm[Temp4];
+ output_ptr[4] = cm[Temp5];
+ output_ptr[5] = cm[Temp6];
+ output_ptr[6] = cm[Temp7];
+ output_ptr[7] = cm[Temp8];
+
+ /* apply filter with vectors pairs */
+ __asm__ __volatile__(
+ "lbu %[src_ptr_l2], -24(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 8(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 24(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 40(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 56(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -23(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 9(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 25(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 41(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 57(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp1], $ac2, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -22(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -6(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 10(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 26(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 42(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 58(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp2], $ac0, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -21(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -5(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 11(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 27(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 43(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 59(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp3], $ac1, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -20(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 12(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 28(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 44(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 60(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac2 \n\t"
+ "extp %[Temp4], $ac3, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -19(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 13(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 29(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 45(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 61(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac0 \n\t"
+ "extp %[Temp5], $ac2, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -18(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 14(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 30(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 46(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 62(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac1 \n\t"
+ "extp %[Temp6], $ac0, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t"
+
+ "lbu %[src_ptr_l2], -17(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_0], 15(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r1], 31(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r2], 47(%[src_ptr]) \n\t"
+ "lbu %[src_ptr_r3], 63(%[src_ptr]) \n\t"
+ "mtlo %[vector4a], $ac3 \n\t"
+ "extp %[Temp7], $ac1, 9 \n\t"
+
+ "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t"
+ "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t"
+ "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t"
+ "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t"
+ "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t"
+ "extp %[Temp8], $ac3, 9 \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
+ [Temp4] "=&r"(Temp4), [Temp5] "=&r"(Temp5), [Temp6] "=&r"(Temp6),
+ [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8),
+ [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0),
+ [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2),
+ [src_ptr_l2] "=&r"(src_ptr_l2), [src_ptr_r3] "=&r"(src_ptr_r3)
+ : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
+ [vector3b] "r"(vector3b), [vector4a] "r"(vector4a),
+ [src_ptr] "r"(src_ptr));
+
+ src_ptr += 16;
+ output_ptr[8] = cm[Temp1];
+ output_ptr[9] = cm[Temp2];
+ output_ptr[10] = cm[Temp3];
+ output_ptr[11] = cm[Temp4];
+ output_ptr[12] = cm[Temp5];
+ output_ptr[13] = cm[Temp6];
+ output_ptr[14] = cm[Temp7];
+ output_ptr[15] = cm[Temp8];
+
+ output_ptr += output_pitch;
+ }
+ }
+}
+
+void vp8_sixtap_predict4x4_dspr2(unsigned char *RESTRICT src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *RESTRICT dst_ptr,
+ int dst_pitch) {
+ unsigned char FData[9 * 4]; /* Temp data bufffer used in filtering */
+ unsigned int pos = 16;
+
+ /* bit positon for extract from acc */
+ __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
+ :
+ : [pos] "r"(pos));
+
+ if (yoffset) {
+ /* First filter 1-D horizontally... */
+ vp8_filter_block2d_first_pass_4(src_ptr - (2 * src_pixels_per_line), FData,
+ src_pixels_per_line, 9, xoffset, 4);
+ /* then filter verticaly... */
+ vp8_filter_block2d_second_pass4(FData + 8, dst_ptr, dst_pitch, yoffset);
+ } else
+ /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */
+ vp8_filter_block2d_first_pass_4(src_ptr, dst_ptr, src_pixels_per_line, 4,
+ xoffset, dst_pitch);
+}
+
+void vp8_sixtap_predict8x8_dspr2(unsigned char *RESTRICT src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *RESTRICT dst_ptr,
+ int dst_pitch) {
+ unsigned char FData[13 * 8]; /* Temp data bufffer used in filtering */
+ unsigned int pos, Temp1, Temp2;
+
+ pos = 16;
+
+ /* bit positon for extract from acc */
+ __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
+ :
+ : [pos] "r"(pos));
+
+ if (yoffset) {
+ src_ptr = src_ptr - (2 * src_pixels_per_line);
+
+ if (xoffset) /* filter 1-D horizontally... */
+ vp8_filter_block2d_first_pass_8_all(src_ptr, FData, src_pixels_per_line,
+ 13, xoffset, 8);
+
+ else {
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + 2 * src_pixels_per_line);
+
+ __asm__ __volatile__(
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 0(%[FData]) \n\t"
+ "sw %[Temp2], 4(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 8(%[FData]) \n\t"
+ "sw %[Temp2], 12(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 16(%[FData]) \n\t"
+ "sw %[Temp2], 20(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 24(%[FData]) \n\t"
+ "sw %[Temp2], 28(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 32(%[FData]) \n\t"
+ "sw %[Temp2], 36(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 40(%[FData]) \n\t"
+ "sw %[Temp2], 44(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 48(%[FData]) \n\t"
+ "sw %[Temp2], 52(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 56(%[FData]) \n\t"
+ "sw %[Temp2], 60(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 64(%[FData]) \n\t"
+ "sw %[Temp2], 68(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 72(%[FData]) \n\t"
+ "sw %[Temp2], 76(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 80(%[FData]) \n\t"
+ "sw %[Temp2], 84(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 88(%[FData]) \n\t"
+ "sw %[Temp2], 92(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 96(%[FData]) \n\t"
+ "sw %[Temp2], 100(%[FData]) \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2)
+ : [FData] "r"(FData), [src_ptr] "r"(src_ptr),
+ [src_pixels_per_line] "r"(src_pixels_per_line));
+ }
+
+ /* filter verticaly... */
+ vp8_filter_block2d_second_pass_8(FData + 16, dst_ptr, dst_pitch, 8, 8,
+ yoffset);
+ }
+
+ /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */
+ else {
+ if (xoffset)
+ vp8_filter_block2d_first_pass_8_all(src_ptr, dst_ptr, src_pixels_per_line,
+ 8, xoffset, dst_pitch);
+
+ else {
+ /* copy from src buffer to dst buffer */
+ __asm__ __volatile__(
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 0(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 4(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 8(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 12(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 16(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 20(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 24(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 28(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 32(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 36(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 40(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 44(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 48(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 52(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 56(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 60(%[dst_ptr]) \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2)
+ : [dst_ptr] "r"(dst_ptr), [src_ptr] "r"(src_ptr),
+ [src_pixels_per_line] "r"(src_pixels_per_line));
+ }
+ }
+}
+
+void vp8_sixtap_predict8x4_dspr2(unsigned char *RESTRICT src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *RESTRICT dst_ptr,
+ int dst_pitch) {
+ unsigned char FData[9 * 8]; /* Temp data bufffer used in filtering */
+ unsigned int pos, Temp1, Temp2;
+
+ pos = 16;
+
+ /* bit positon for extract from acc */
+ __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
+ :
+ : [pos] "r"(pos));
+
+ if (yoffset) {
+ src_ptr = src_ptr - (2 * src_pixels_per_line);
+
+ if (xoffset) /* filter 1-D horizontally... */
+ vp8_filter_block2d_first_pass_8_all(src_ptr, FData, src_pixels_per_line,
+ 9, xoffset, 8);
+
+ else {
+ /* prefetch src_ptr data to cache memory */
+ prefetch_load(src_ptr + 2 * src_pixels_per_line);
+
+ __asm__ __volatile__(
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 0(%[FData]) \n\t"
+ "sw %[Temp2], 4(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 8(%[FData]) \n\t"
+ "sw %[Temp2], 12(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 16(%[FData]) \n\t"
+ "sw %[Temp2], 20(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 24(%[FData]) \n\t"
+ "sw %[Temp2], 28(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 32(%[FData]) \n\t"
+ "sw %[Temp2], 36(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 40(%[FData]) \n\t"
+ "sw %[Temp2], 44(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 48(%[FData]) \n\t"
+ "sw %[Temp2], 52(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 56(%[FData]) \n\t"
+ "sw %[Temp2], 60(%[FData]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 64(%[FData]) \n\t"
+ "sw %[Temp2], 68(%[FData]) \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2)
+ : [FData] "r"(FData), [src_ptr] "r"(src_ptr),
+ [src_pixels_per_line] "r"(src_pixels_per_line));
+ }
+
+ /* filter verticaly... */
+ vp8_filter_block2d_second_pass_8(FData + 16, dst_ptr, dst_pitch, 4, 8,
+ yoffset);
+ }
+
+ /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */
+ else {
+ if (xoffset)
+ vp8_filter_block2d_first_pass_8_all(src_ptr, dst_ptr, src_pixels_per_line,
+ 4, xoffset, dst_pitch);
+
+ else {
+ /* copy from src buffer to dst buffer */
+ __asm__ __volatile__(
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 0(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 4(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 8(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 12(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 16(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 20(%[dst_ptr]) \n\t"
+ "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t"
+
+ "ulw %[Temp1], 0(%[src_ptr]) \n\t"
+ "ulw %[Temp2], 4(%[src_ptr]) \n\t"
+ "sw %[Temp1], 24(%[dst_ptr]) \n\t"
+ "sw %[Temp2], 28(%[dst_ptr]) \n\t"
+
+ : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2)
+ : [dst_ptr] "r"(dst_ptr), [src_ptr] "r"(src_ptr),
+ [src_pixels_per_line] "r"(src_pixels_per_line));
+ }
+ }
+}
+
+void vp8_sixtap_predict16x16_dspr2(unsigned char *RESTRICT src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *RESTRICT dst_ptr,
+ int dst_pitch) {
+ const unsigned short *VFilter;
+ unsigned char FData[21 * 16]; /* Temp data bufffer used in filtering */
+ unsigned int pos;
+
+ VFilter = sub_pel_filterss[yoffset];
+
+ pos = 16;
+
+ /* bit positon for extract from acc */
+ __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
+ :
+ : [pos] "r"(pos));
+
+ if (yoffset) {
+ src_ptr = src_ptr - (2 * src_pixels_per_line);
+
+ switch (xoffset) {
+ /* filter 1-D horizontally... */
+ case 2:
+ case 4:
+ case 6:
+ /* 6 tap filter */
+ vp8_filter_block2d_first_pass16_6tap(
+ src_ptr, FData, src_pixels_per_line, 21, xoffset, 16);
+ break;
+
+ case 0:
+ /* only copy buffer */
+ vp8_filter_block2d_first_pass16_0(src_ptr, FData, src_pixels_per_line);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ /* 4 tap filter */
+ vp8_filter_block2d_first_pass16_4tap(
+ src_ptr, FData, src_pixels_per_line, 16, 21, xoffset, yoffset,
+ dst_ptr, dst_pitch);
+ break;
+ }
+
+ /* filter verticaly... */
+ vp8_filter_block2d_second_pass161(FData + 32, dst_ptr, dst_pitch, VFilter);
+ } else {
+ /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */
+ switch (xoffset) {
+ case 2:
+ case 4:
+ case 6:
+ /* 6 tap filter */
+ vp8_filter_block2d_first_pass16_6tap(
+ src_ptr, dst_ptr, src_pixels_per_line, 16, xoffset, dst_pitch);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ /* 4 tap filter */
+ vp8_filter_block2d_first_pass16_4tap(
+ src_ptr, dst_ptr, src_pixels_per_line, 16, 21, xoffset, yoffset,
+ dst_ptr, dst_pitch);
+ break;
+ }
+ }
+}
+
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c b/media/libvpx/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
new file mode 100644
index 0000000000..eae852d592
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+
+#if HAVE_DSPR2
+
+void vp8_dequant_idct_add_y_block_dspr2(short *q, short *dq, unsigned char *dst,
+ int stride, char *eobs) {
+ int i, j;
+
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j) {
+ if (*eobs++ > 1)
+ vp8_dequant_idct_add_dspr2(q, dq, dst, stride);
+ else {
+ vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dst, stride, dst, stride);
+ ((int *)q)[0] = 0;
+ }
+
+ q += 16;
+ dst += 4;
+ }
+
+ dst += 4 * stride - 16;
+ }
+}
+
+void vp8_dequant_idct_add_uv_block_dspr2(short *q, short *dq,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int stride,
+ char *eobs) {
+ int i, j;
+
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 2; ++j) {
+ if (*eobs++ > 1)
+ vp8_dequant_idct_add_dspr2(q, dq, dst_u, stride);
+ else {
+ vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dst_u, stride, dst_u, stride);
+ ((int *)q)[0] = 0;
+ }
+
+ q += 16;
+ dst_u += 4;
+ }
+
+ dst_u += 4 * stride - 8;
+ }
+
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 2; ++j) {
+ if (*eobs++ > 1)
+ vp8_dequant_idct_add_dspr2(q, dq, dst_v, stride);
+ else {
+ vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dst_v, stride, dst_v, stride);
+ ((int *)q)[0] = 0;
+ }
+
+ q += 16;
+ dst_v += 4;
+ }
+
+ dst_v += 4 * stride - 8;
+ }
+}
+
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c b/media/libvpx/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c
new file mode 100644
index 0000000000..9163ffad1e
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8_rtcd.h"
+
+#if HAVE_DSPR2
+#define CROP_WIDTH 256
+
+/******************************************************************************
+ * Notes:
+ *
+ * This implementation makes use of 16 bit fixed point version of two multiply
+ * constants:
+ * 1. sqrt(2) * cos (pi/8)
+ * 2. sqrt(2) * sin (pi/8)
+ * Since the first constant is bigger than 1, to maintain the same 16 bit
+ * fixed point precision as the second one, we use a trick of
+ * x * a = x + x*(a-1)
+ * so
+ * x * sqrt(2) * cos (pi/8) = x + x * (sqrt(2) *cos(pi/8)-1).
+ ****************************************************************************/
+extern unsigned char ff_cropTbl[256 + 2 * CROP_WIDTH];
+static const int cospi8sqrt2minus1 = 20091;
+static const int sinpi8sqrt2 = 35468;
+
+inline void prefetch_load_short(short *src) {
+ __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src));
+}
+
+void vp8_short_idct4x4llm_dspr2(short *input, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride) {
+ int r, c;
+ int a1, b1, c1, d1;
+ short output[16];
+ short *ip = input;
+ short *op = output;
+ int temp1, temp2;
+ int shortpitch = 4;
+
+ int c2, d2;
+ int temp3, temp4;
+ unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+ /* prepare data for load */
+ prefetch_load_short(ip + 8);
+
+ /* first loop is unrolled */
+ a1 = ip[0] + ip[8];
+ b1 = ip[0] - ip[8];
+
+ temp1 = (ip[4] * sinpi8sqrt2) >> 16;
+ temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
+ c1 = temp1 - temp2;
+
+ temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
+ temp2 = (ip[12] * sinpi8sqrt2) >> 16;
+ d1 = temp1 + temp2;
+
+ temp3 = (ip[5] * sinpi8sqrt2) >> 16;
+ temp4 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16);
+ c2 = temp3 - temp4;
+
+ temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16);
+ temp4 = (ip[13] * sinpi8sqrt2) >> 16;
+ d2 = temp3 + temp4;
+
+ op[0] = a1 + d1;
+ op[12] = a1 - d1;
+ op[4] = b1 + c1;
+ op[8] = b1 - c1;
+
+ a1 = ip[1] + ip[9];
+ b1 = ip[1] - ip[9];
+
+ op[1] = a1 + d2;
+ op[13] = a1 - d2;
+ op[5] = b1 + c2;
+ op[9] = b1 - c2;
+
+ a1 = ip[2] + ip[10];
+ b1 = ip[2] - ip[10];
+
+ temp1 = (ip[6] * sinpi8sqrt2) >> 16;
+ temp2 = ip[14] + ((ip[14] * cospi8sqrt2minus1) >> 16);
+ c1 = temp1 - temp2;
+
+ temp1 = ip[6] + ((ip[6] * cospi8sqrt2minus1) >> 16);
+ temp2 = (ip[14] * sinpi8sqrt2) >> 16;
+ d1 = temp1 + temp2;
+
+ temp3 = (ip[7] * sinpi8sqrt2) >> 16;
+ temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16);
+ c2 = temp3 - temp4;
+
+ temp3 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16);
+ temp4 = (ip[15] * sinpi8sqrt2) >> 16;
+ d2 = temp3 + temp4;
+
+ op[2] = a1 + d1;
+ op[14] = a1 - d1;
+ op[6] = b1 + c1;
+ op[10] = b1 - c1;
+
+ a1 = ip[3] + ip[11];
+ b1 = ip[3] - ip[11];
+
+ op[3] = a1 + d2;
+ op[15] = a1 - d2;
+ op[7] = b1 + c2;
+ op[11] = b1 - c2;
+
+ ip = output;
+
+ /* prepare data for load */
+ prefetch_load_short(ip + shortpitch);
+
+ /* second loop is unrolled */
+ a1 = ip[0] + ip[2];
+ b1 = ip[0] - ip[2];
+
+ temp1 = (ip[1] * sinpi8sqrt2) >> 16;
+ temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
+ c1 = temp1 - temp2;
+
+ temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
+ temp2 = (ip[3] * sinpi8sqrt2) >> 16;
+ d1 = temp1 + temp2;
+
+ temp3 = (ip[5] * sinpi8sqrt2) >> 16;
+ temp4 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16);
+ c2 = temp3 - temp4;
+
+ temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16);
+ temp4 = (ip[7] * sinpi8sqrt2) >> 16;
+ d2 = temp3 + temp4;
+
+ op[0] = (a1 + d1 + 4) >> 3;
+ op[3] = (a1 - d1 + 4) >> 3;
+ op[1] = (b1 + c1 + 4) >> 3;
+ op[2] = (b1 - c1 + 4) >> 3;
+
+ a1 = ip[4] + ip[6];
+ b1 = ip[4] - ip[6];
+
+ op[4] = (a1 + d2 + 4) >> 3;
+ op[7] = (a1 - d2 + 4) >> 3;
+ op[5] = (b1 + c2 + 4) >> 3;
+ op[6] = (b1 - c2 + 4) >> 3;
+
+ a1 = ip[8] + ip[10];
+ b1 = ip[8] - ip[10];
+
+ temp1 = (ip[9] * sinpi8sqrt2) >> 16;
+ temp2 = ip[11] + ((ip[11] * cospi8sqrt2minus1) >> 16);
+ c1 = temp1 - temp2;
+
+ temp1 = ip[9] + ((ip[9] * cospi8sqrt2minus1) >> 16);
+ temp2 = (ip[11] * sinpi8sqrt2) >> 16;
+ d1 = temp1 + temp2;
+
+ temp3 = (ip[13] * sinpi8sqrt2) >> 16;
+ temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16);
+ c2 = temp3 - temp4;
+
+ temp3 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16);
+ temp4 = (ip[15] * sinpi8sqrt2) >> 16;
+ d2 = temp3 + temp4;
+
+ op[8] = (a1 + d1 + 4) >> 3;
+ op[11] = (a1 - d1 + 4) >> 3;
+ op[9] = (b1 + c1 + 4) >> 3;
+ op[10] = (b1 - c1 + 4) >> 3;
+
+ a1 = ip[12] + ip[14];
+ b1 = ip[12] - ip[14];
+
+ op[12] = (a1 + d2 + 4) >> 3;
+ op[15] = (a1 - d2 + 4) >> 3;
+ op[13] = (b1 + c2 + 4) >> 3;
+ op[14] = (b1 - c2 + 4) >> 3;
+
+ ip = output;
+
+ for (r = 0; r < 4; ++r) {
+ for (c = 0; c < 4; ++c) {
+ short a = ip[c] + pred_ptr[c];
+ dst_ptr[c] = cm[a];
+ }
+
+ ip += 4;
+ dst_ptr += dst_stride;
+ pred_ptr += pred_stride;
+ }
+}
+
+void vp8_dc_only_idct_add_dspr2(short input_dc, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride) {
+ int a1;
+ int i, absa1;
+ int t2, vector_a1, vector_a;
+
+ /* a1 = ((input_dc + 4) >> 3); */
+ __asm__ __volatile__(
+ "addi %[a1], %[input_dc], 4 \n\t"
+ "sra %[a1], %[a1], 3 \n\t"
+ : [a1] "=r"(a1)
+ : [input_dc] "r"(input_dc));
+
+ if (a1 < 0) {
+ /* use quad-byte
+ * input and output memory are four byte aligned
+ */
+ __asm__ __volatile__(
+ "abs %[absa1], %[a1] \n\t"
+ "replv.qb %[vector_a1], %[absa1] \n\t"
+ : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1)
+ : [a1] "r"(a1));
+
+ /* use (a1 - predptr[c]) instead a1 + predptr[c] */
+ for (i = 4; i--;) {
+ __asm__ __volatile__(
+ "lw %[t2], 0(%[pred_ptr]) \n\t"
+ "add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t"
+ "subu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t"
+ "sw %[vector_a], 0(%[dst_ptr]) \n\t"
+ "add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
+ : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a),
+ [dst_ptr] "+&r"(dst_ptr), [pred_ptr] "+&r"(pred_ptr)
+ : [dst_stride] "r"(dst_stride), [pred_stride] "r"(pred_stride),
+ [vector_a1] "r"(vector_a1));
+ }
+ } else {
+ /* use quad-byte
+ * input and output memory are four byte aligned
+ */
+ __asm__ __volatile__("replv.qb %[vector_a1], %[a1] \n\t"
+ : [vector_a1] "=r"(vector_a1)
+ : [a1] "r"(a1));
+
+ for (i = 4; i--;) {
+ __asm__ __volatile__(
+ "lw %[t2], 0(%[pred_ptr]) \n\t"
+ "add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t"
+ "addu_s.qb %[vector_a], %[vector_a1], %[t2] \n\t"
+ "sw %[vector_a], 0(%[dst_ptr]) \n\t"
+ "add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
+ : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a),
+ [dst_ptr] "+&r"(dst_ptr), [pred_ptr] "+&r"(pred_ptr)
+ : [dst_stride] "r"(dst_stride), [pred_stride] "r"(pred_stride),
+ [vector_a1] "r"(vector_a1));
+ }
+ }
+}
+
+void vp8_short_inv_walsh4x4_dspr2(short *input, short *mb_dqcoeff) {
+ short output[16];
+ int i;
+ int a1, b1, c1, d1;
+ int a2, b2, c2, d2;
+ short *ip = input;
+ short *op = output;
+
+ prefetch_load_short(ip);
+
+ for (i = 4; i--;) {
+ a1 = ip[0] + ip[12];
+ b1 = ip[4] + ip[8];
+ c1 = ip[4] - ip[8];
+ d1 = ip[0] - ip[12];
+
+ op[0] = a1 + b1;
+ op[4] = c1 + d1;
+ op[8] = a1 - b1;
+ op[12] = d1 - c1;
+
+ ip++;
+ op++;
+ }
+
+ ip = output;
+ op = output;
+
+ prefetch_load_short(ip);
+
+ for (i = 4; i--;) {
+ a1 = ip[0] + ip[3] + 3;
+ b1 = ip[1] + ip[2];
+ c1 = ip[1] - ip[2];
+ d1 = ip[0] - ip[3] + 3;
+
+ a2 = a1 + b1;
+ b2 = d1 + c1;
+ c2 = a1 - b1;
+ d2 = d1 - c1;
+
+ op[0] = a2 >> 3;
+ op[1] = b2 >> 3;
+ op[2] = c2 >> 3;
+ op[3] = d2 >> 3;
+
+ ip += 4;
+ op += 4;
+ }
+
+ for (i = 0; i < 16; ++i) {
+ mb_dqcoeff[i * 16] = output[i];
+ }
+}
+
+void vp8_short_inv_walsh4x4_1_dspr2(short *input, short *mb_dqcoeff) {
+ int a1;
+
+ a1 = ((input[0] + 3) >> 3);
+
+ __asm__ __volatile__(
+ "sh %[a1], 0(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 32(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 64(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 96(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 128(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 160(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 192(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 224(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 256(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 288(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 320(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 352(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 384(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 416(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 448(%[mb_dqcoeff]) \n\t"
+ "sh %[a1], 480(%[mb_dqcoeff]) \n\t"
+
+ :
+ : [a1] "r"(a1), [mb_dqcoeff] "r"(mb_dqcoeff));
+}
+
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c b/media/libvpx/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c
new file mode 100644
index 0000000000..e44ae29278
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx/vpx_integer.h"
+
+#if HAVE_DSPR2
+inline void prefetch_load_int(unsigned char *src) {
+ __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src));
+}
+
+__inline void vp8_copy_mem16x16_dspr2(unsigned char *RESTRICT src,
+ int src_stride,
+ unsigned char *RESTRICT dst,
+ int dst_stride) {
+ int r;
+ unsigned int a0, a1, a2, a3;
+
+ for (r = 16; r--;) {
+ /* load src data in cache memory */
+ prefetch_load_int(src + src_stride);
+
+ /* use unaligned memory load and store */
+ __asm__ __volatile__(
+ "ulw %[a0], 0(%[src]) \n\t"
+ "ulw %[a1], 4(%[src]) \n\t"
+ "ulw %[a2], 8(%[src]) \n\t"
+ "ulw %[a3], 12(%[src]) \n\t"
+ "sw %[a0], 0(%[dst]) \n\t"
+ "sw %[a1], 4(%[dst]) \n\t"
+ "sw %[a2], 8(%[dst]) \n\t"
+ "sw %[a3], 12(%[dst]) \n\t"
+ : [a0] "=&r"(a0), [a1] "=&r"(a1), [a2] "=&r"(a2), [a3] "=&r"(a3)
+ : [src] "r"(src), [dst] "r"(dst));
+
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+__inline void vp8_copy_mem8x8_dspr2(unsigned char *RESTRICT src, int src_stride,
+ unsigned char *RESTRICT dst,
+ int dst_stride) {
+ int r;
+ unsigned int a0, a1;
+
+ /* load src data in cache memory */
+ prefetch_load_int(src + src_stride);
+
+ for (r = 8; r--;) {
+ /* use unaligned memory load and store */
+ __asm__ __volatile__(
+ "ulw %[a0], 0(%[src]) \n\t"
+ "ulw %[a1], 4(%[src]) \n\t"
+ "sw %[a0], 0(%[dst]) \n\t"
+ "sw %[a1], 4(%[dst]) \n\t"
+ : [a0] "=&r"(a0), [a1] "=&r"(a1)
+ : [src] "r"(src), [dst] "r"(dst));
+
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+__inline void vp8_copy_mem8x4_dspr2(unsigned char *RESTRICT src, int src_stride,
+ unsigned char *RESTRICT dst,
+ int dst_stride) {
+ int r;
+ unsigned int a0, a1;
+
+ /* load src data in cache memory */
+ prefetch_load_int(src + src_stride);
+
+ for (r = 4; r--;) {
+ /* use unaligned memory load and store */
+ __asm__ __volatile__(
+ "ulw %[a0], 0(%[src]) \n\t"
+ "ulw %[a1], 4(%[src]) \n\t"
+ "sw %[a0], 0(%[dst]) \n\t"
+ "sw %[a1], 4(%[dst]) \n\t"
+ : [a0] "=&r"(a0), [a1] "=&r"(a1)
+ : [src] "r"(src), [dst] "r"(dst));
+
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/mips/dspr2/vp8_loopfilter_filters_dspr2.c b/media/libvpx/libvpx/vp8/common/mips/dspr2/vp8_loopfilter_filters_dspr2.c
new file mode 100644
index 0000000000..21446fb413
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/dspr2/vp8_loopfilter_filters_dspr2.c
@@ -0,0 +1,2401 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include "vp8_rtcd.h"
+#include "vp8/common/onyxc_int.h"
+
+#if HAVE_DSPR2
+typedef unsigned char uc;
+
+/* prefetch data for load */
+inline void prefetch_load_lf(unsigned char *src) {
+ __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src));
+}
+
+/* prefetch data for store */
+inline void prefetch_store_lf(unsigned char *dst) {
+ __asm__ __volatile__("pref 1, 0(%[dst]) \n\t" : : [dst] "r"(dst));
+}
+
+/* processing 4 pixels at the same time
+ * compute hev and mask in the same function
+ */
+static __inline void vp8_filter_mask_vec_mips(
+ uint32_t limit, uint32_t flimit, uint32_t p1, uint32_t p0, uint32_t p3,
+ uint32_t p2, uint32_t q0, uint32_t q1, uint32_t q2, uint32_t q3,
+ uint32_t thresh, uint32_t *hev, uint32_t *mask) {
+ uint32_t c, r, r3, r_k;
+ uint32_t s1, s2, s3;
+ uint32_t ones = 0xFFFFFFFF;
+ uint32_t hev1;
+
+ __asm__ __volatile__(
+ /* mask |= (abs(p3 - p2) > limit) */
+ "subu_s.qb %[c], %[p3], %[p2] \n\t"
+ "subu_s.qb %[r_k], %[p2], %[p3] \n\t"
+ "or %[r_k], %[r_k], %[c] \n\t"
+ "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t"
+ "or %[r], $0, %[c] \n\t"
+
+ /* mask |= (abs(p2 - p1) > limit) */
+ "subu_s.qb %[c], %[p2], %[p1] \n\t"
+ "subu_s.qb %[r_k], %[p1], %[p2] \n\t"
+ "or %[r_k], %[r_k], %[c] \n\t"
+ "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t"
+ "or %[r], %[r], %[c] \n\t"
+
+ /* mask |= (abs(p1 - p0) > limit)
+ * hev |= (abs(p1 - p0) > thresh)
+ */
+ "subu_s.qb %[c], %[p1], %[p0] \n\t"
+ "subu_s.qb %[r_k], %[p0], %[p1] \n\t"
+ "or %[r_k], %[r_k], %[c] \n\t"
+ "cmpgu.lt.qb %[c], %[thresh], %[r_k] \n\t"
+ "or %[r3], $0, %[c] \n\t"
+ "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t"
+ "or %[r], %[r], %[c] \n\t"
+
+ /* mask |= (abs(q1 - q0) > limit)
+ * hev |= (abs(q1 - q0) > thresh)
+ */
+ "subu_s.qb %[c], %[q1], %[q0] \n\t"
+ "subu_s.qb %[r_k], %[q0], %[q1] \n\t"
+ "or %[r_k], %[r_k], %[c] \n\t"
+ "cmpgu.lt.qb %[c], %[thresh], %[r_k] \n\t"
+ "or %[r3], %[r3], %[c] \n\t"
+ "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t"
+ "or %[r], %[r], %[c] \n\t"
+
+ /* mask |= (abs(q2 - q1) > limit) */
+ "subu_s.qb %[c], %[q2], %[q1] \n\t"
+ "subu_s.qb %[r_k], %[q1], %[q2] \n\t"
+ "or %[r_k], %[r_k], %[c] \n\t"
+ "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t"
+ "or %[r], %[r], %[c] \n\t"
+ "sll %[r3], %[r3], 24 \n\t"
+
+ /* mask |= (abs(q3 - q2) > limit) */
+ "subu_s.qb %[c], %[q3], %[q2] \n\t"
+ "subu_s.qb %[r_k], %[q2], %[q3] \n\t"
+ "or %[r_k], %[r_k], %[c] \n\t"
+ "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t"
+ "or %[r], %[r], %[c] \n\t"
+
+ : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r3] "=&r"(r3)
+ : [limit] "r"(limit), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1),
+ [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3),
+ [thresh] "r"(thresh));
+
+ __asm__ __volatile__(
+ /* abs(p0 - q0) */
+ "subu_s.qb %[c], %[p0], %[q0] \n\t"
+ "subu_s.qb %[r_k], %[q0], %[p0] \n\t"
+ "wrdsp %[r3] \n\t"
+ "or %[s1], %[r_k], %[c] \n\t"
+
+ /* abs(p1 - q1) */
+ "subu_s.qb %[c], %[p1], %[q1] \n\t"
+ "addu_s.qb %[s3], %[s1], %[s1] \n\t"
+ "pick.qb %[hev1], %[ones], $0 \n\t"
+ "subu_s.qb %[r_k], %[q1], %[p1] \n\t"
+ "or %[s2], %[r_k], %[c] \n\t"
+
+ /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > flimit * 2 + limit */
+ "shrl.qb %[s2], %[s2], 1 \n\t"
+ "addu_s.qb %[s1], %[s2], %[s3] \n\t"
+ "cmpgu.lt.qb %[c], %[flimit], %[s1] \n\t"
+ "or %[r], %[r], %[c] \n\t"
+ "sll %[r], %[r], 24 \n\t"
+
+ "wrdsp %[r] \n\t"
+ "pick.qb %[s2], $0, %[ones] \n\t"
+
+ : [c] "=&r"(c), [r_k] "=&r"(r_k), [s1] "=&r"(s1), [hev1] "=&r"(hev1),
+ [s2] "=&r"(s2), [r] "+r"(r), [s3] "=&r"(s3)
+ : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [r3] "r"(r3), [q1] "r"(q1),
+ [ones] "r"(ones), [flimit] "r"(flimit));
+
+ *hev = hev1;
+ *mask = s2;
+}
+
+/* inputs & outputs are quad-byte vectors */
+static __inline void vp8_filter_mips(uint32_t mask, uint32_t hev, uint32_t *ps1,
+ uint32_t *ps0, uint32_t *qs0,
+ uint32_t *qs1) {
+ int32_t vp8_filter_l, vp8_filter_r;
+ int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
+ int32_t subr_r, subr_l;
+ uint32_t t1, t2, HWM, t3;
+ uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r;
+
+ int32_t vps1, vps0, vqs0, vqs1;
+ int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r;
+ uint32_t N128;
+
+ N128 = 0x80808080;
+ t1 = 0x03000300;
+ t2 = 0x04000400;
+ t3 = 0x01000100;
+ HWM = 0xFF00FF00;
+
+ vps0 = (*ps0) ^ N128;
+ vps1 = (*ps1) ^ N128;
+ vqs0 = (*qs0) ^ N128;
+ vqs1 = (*qs1) ^ N128;
+
+ /* use halfword pairs instead quad-bytes because of accuracy */
+ vps0_l = vps0 & HWM;
+ vps0_r = vps0 << 8;
+ vps0_r = vps0_r & HWM;
+
+ vps1_l = vps1 & HWM;
+ vps1_r = vps1 << 8;
+ vps1_r = vps1_r & HWM;
+
+ vqs0_l = vqs0 & HWM;
+ vqs0_r = vqs0 << 8;
+ vqs0_r = vqs0_r & HWM;
+
+ vqs1_l = vqs1 & HWM;
+ vqs1_r = vqs1 << 8;
+ vqs1_r = vqs1_r & HWM;
+
+ mask_l = mask & HWM;
+ mask_r = mask << 8;
+ mask_r = mask_r & HWM;
+
+ hev_l = hev & HWM;
+ hev_r = hev << 8;
+ hev_r = hev_r & HWM;
+
+ __asm__ __volatile__(
+ /* vp8_filter = vp8_signed_char_clamp(ps1 - qs1); */
+ "subq_s.ph %[vp8_filter_l], %[vps1_l], %[vqs1_l] \n\t"
+ "subq_s.ph %[vp8_filter_r], %[vps1_r], %[vqs1_r] \n\t"
+
+ /* qs0 - ps0 */
+ "subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t"
+ "subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t"
+
+ /* vp8_filter &= hev; */
+ "and %[vp8_filter_l], %[vp8_filter_l], %[hev_l] \n\t"
+ "and %[vp8_filter_r], %[vp8_filter_r], %[hev_r] \n\t"
+
+ /* vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0)); */
+ "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t"
+ "xor %[invhev_l], %[hev_l], %[HWM] \n\t"
+ "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t"
+ "xor %[invhev_r], %[hev_r], %[HWM] \n\t"
+ "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t"
+
+ /* vp8_filter &= mask; */
+ "and %[vp8_filter_l], %[vp8_filter_l], %[mask_l] \n\t"
+ "and %[vp8_filter_r], %[vp8_filter_r], %[mask_r] \n\t"
+
+ : [vp8_filter_l] "=&r"(vp8_filter_l), [vp8_filter_r] "=&r"(vp8_filter_r),
+ [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r),
+ [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r)
+
+ : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
+ [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r),
+ [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r), [mask_l] "r"(mask_l),
+ [mask_r] "r"(mask_r), [hev_l] "r"(hev_l), [hev_r] "r"(hev_r),
+ [HWM] "r"(HWM));
+
+ /* save bottom 3 bits so that we round one side +4 and the other +3 */
+ __asm__ __volatile__(
+ /* Filter2 = vp8_signed_char_clamp(vp8_filter + 3) >>= 3; */
+ "addq_s.ph %[Filter1_l], %[vp8_filter_l], %[t2] \n\t"
+ "addq_s.ph %[Filter1_r], %[vp8_filter_r], %[t2] \n\t"
+
+ /* Filter1 = vp8_signed_char_clamp(vp8_filter + 4) >>= 3; */
+ "addq_s.ph %[Filter2_l], %[vp8_filter_l], %[t1] \n\t"
+ "addq_s.ph %[Filter2_r], %[vp8_filter_r], %[t1] \n\t"
+ "shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t"
+ "shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t"
+
+ "shra.ph %[Filter2_l], %[Filter2_l], 3 \n\t"
+ "shra.ph %[Filter2_r], %[Filter2_r], 3 \n\t"
+
+ "and %[Filter1_l], %[Filter1_l], %[HWM] \n\t"
+ "and %[Filter1_r], %[Filter1_r], %[HWM] \n\t"
+
+ /* vps0 = vp8_signed_char_clamp(ps0 + Filter2); */
+ "addq_s.ph %[vps0_l], %[vps0_l], %[Filter2_l] \n\t"
+ "addq_s.ph %[vps0_r], %[vps0_r], %[Filter2_r] \n\t"
+
+ /* vqs0 = vp8_signed_char_clamp(qs0 - Filter1); */
+ "subq_s.ph %[vqs0_l], %[vqs0_l], %[Filter1_l] \n\t"
+ "subq_s.ph %[vqs0_r], %[vqs0_r], %[Filter1_r] \n\t"
+
+ : [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r),
+ [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r),
+ [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
+ [vqs0_r] "+r"(vqs0_r)
+
+ : [t1] "r"(t1), [t2] "r"(t2), [vp8_filter_l] "r"(vp8_filter_l),
+ [vp8_filter_r] "r"(vp8_filter_r), [HWM] "r"(HWM));
+
+ __asm__ __volatile__(
+ /* (vp8_filter += 1) >>= 1 */
+ "addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t"
+ "addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t"
+
+ /* vp8_filter &= ~hev; */
+ "and %[Filter1_l], %[Filter1_l], %[invhev_l] \n\t"
+ "and %[Filter1_r], %[Filter1_r], %[invhev_r] \n\t"
+
+ /* vps1 = vp8_signed_char_clamp(ps1 + vp8_filter); */
+ "addq_s.ph %[vps1_l], %[vps1_l], %[Filter1_l] \n\t"
+ "addq_s.ph %[vps1_r], %[vps1_r], %[Filter1_r] \n\t"
+
+ /* vqs1 = vp8_signed_char_clamp(qs1 - vp8_filter); */
+ "subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t"
+ "subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t"
+
+ : [Filter1_l] "+r"(Filter1_l), [Filter1_r] "+r"(Filter1_r),
+ [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l),
+ [vqs1_r] "+r"(vqs1_r)
+
+ : [t3] "r"(t3), [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r));
+
+ /* Create quad-bytes from halfword pairs */
+ vqs0_l = vqs0_l & HWM;
+ vqs1_l = vqs1_l & HWM;
+ vps0_l = vps0_l & HWM;
+ vps1_l = vps1_l & HWM;
+
+ __asm__ __volatile__(
+ "shrl.ph %[vqs0_r], %[vqs0_r], 8 \n\t"
+ "shrl.ph %[vps0_r], %[vps0_r], 8 \n\t"
+ "shrl.ph %[vqs1_r], %[vqs1_r], 8 \n\t"
+ "shrl.ph %[vps1_r], %[vps1_r], 8 \n\t"
+
+ : [vps1_r] "+r"(vps1_r), [vqs1_r] "+r"(vqs1_r), [vps0_r] "+r"(vps0_r),
+ [vqs0_r] "+r"(vqs0_r)
+ :);
+
+ vqs0 = vqs0_l | vqs0_r;
+ vqs1 = vqs1_l | vqs1_r;
+ vps0 = vps0_l | vps0_r;
+ vps1 = vps1_l | vps1_r;
+
+ *ps0 = vps0 ^ N128;
+ *ps1 = vps1 ^ N128;
+ *qs0 = vqs0 ^ N128;
+ *qs1 = vqs1 ^ N128;
+}
+
+void vp8_loop_filter_horizontal_edge_mips(unsigned char *s, int p,
+ unsigned int flimit,
+ unsigned int limit,
+ unsigned int thresh, int count) {
+ uint32_t mask;
+ uint32_t hev;
+ uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+ unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+ (void)count;
+
+ mask = 0;
+ hev = 0;
+ p1 = 0;
+ p2 = 0;
+ p3 = 0;
+ p4 = 0;
+
+ /* prefetch data for store */
+ prefetch_store_lf(s);
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+
+ sm1 = s - (p << 2);
+ s0 = s - p - p - p;
+ s1 = s - p - p;
+ s2 = s - p;
+ s3 = s;
+ s4 = s + p;
+ s5 = s + p + p;
+ s6 = s + p + p + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood */
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ }
+ }
+
+ sm1 += 4;
+ s0 += 4;
+ s1 += 4;
+ s2 += 4;
+ s3 += 4;
+ s4 += 4;
+ s5 += 4;
+ s6 += 4;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood */
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ }
+ }
+
+ sm1 += 4;
+ s0 += 4;
+ s1 += 4;
+ s2 += 4;
+ s3 += 4;
+ s4 += 4;
+ s5 += 4;
+ s6 += 4;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood */
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ }
+ }
+
+ sm1 += 4;
+ s0 += 4;
+ s1 += 4;
+ s2 += 4;
+ s3 += 4;
+ s4 += 4;
+ s5 += 4;
+ s6 += 4;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood */
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ }
+ }
+}
+
+void vp8_loop_filter_uvhorizontal_edge_mips(unsigned char *s, int p,
+ unsigned int flimit,
+ unsigned int limit,
+ unsigned int thresh, int count) {
+ uint32_t mask;
+ uint32_t hev;
+ uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+ unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+ (void)count;
+
+ mask = 0;
+ hev = 0;
+ p1 = 0;
+ p2 = 0;
+ p3 = 0;
+ p4 = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+
+ sm1 = s - (p << 2);
+ s0 = s - p - p - p;
+ s1 = s - p - p;
+ s2 = s - p;
+ s3 = s;
+ s4 = s + p;
+ s5 = s + p + p;
+ s6 = s + p + p + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood */
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ }
+ }
+
+ sm1 += 4;
+ s0 += 4;
+ s1 += 4;
+ s2 += 4;
+ s3 += 4;
+ s4 += 4;
+ s5 += 4;
+ s6 += 4;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood */
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ }
+ }
+}
+
+void vp8_loop_filter_vertical_edge_mips(unsigned char *s, int p,
+ const unsigned int flimit,
+ const unsigned int limit,
+ const unsigned int thresh, int count) {
+ int i;
+ uint32_t mask, hev;
+ uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+ unsigned char *s1, *s2, *s3, *s4;
+ uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+
+ hev = 0;
+ mask = 0;
+ i = 0;
+ pm1 = 0;
+ p0 = 0;
+ p1 = 0;
+ p2 = 0;
+ p3 = 0;
+ p4 = 0;
+ p5 = 0;
+ p6 = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+
+ /* apply filter on 4 pixesl at the same time */
+ do {
+ /* prefetch data for store */
+ prefetch_store_lf(s + p);
+
+ s1 = s;
+ s2 = s + p;
+ s3 = s2 + p;
+ s4 = s3 + p;
+ s = s4 + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p2 = *((uint32_t *)(s1 - 4));
+ p6 = *((uint32_t *)(s1));
+ p1 = *((uint32_t *)(s2 - 4));
+ p5 = *((uint32_t *)(s2));
+ p0 = *((uint32_t *)(s3 - 4));
+ p4 = *((uint32_t *)(s3));
+ pm1 = *((uint32_t *)(s4 - 4));
+ p3 = *((uint32_t *)(s4));
+
+ /* transpose pm1, p0, p1, p2 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t"
+ "precr.qb.ph %[prim2], %[p2], %[p1] \n\t"
+ "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t"
+ "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t"
+
+ "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p2], %[p1], %[sec3] \n\t"
+ "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t"
+ "append %[p1], %[sec3], 16 \n\t"
+ "append %[pm1], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
+ [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* transpose p3, p4, p5, p6 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t"
+ "precr.qb.ph %[prim2], %[p6], %[p5] \n\t"
+ "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t"
+ "precr.qb.ph %[prim4], %[p4], %[p3] \n\t"
+
+ "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p6], %[p5], %[sec3] \n\t"
+ "precrq.ph.w %[p4], %[p3], %[sec4] \n\t"
+ "append %[p5], %[sec3], 16 \n\t"
+ "append %[p3], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
+ [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood
+ * don't use transpose on output data
+ * because memory isn't aligned
+ */
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s4]) \n\t"
+ "sb %[p3], 0(%[s4]) \n\t"
+ "sb %[p2], -1(%[s4]) \n\t"
+ "sb %[p1], -2(%[s4]) \n\t"
+ :
+ : [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), [p2] "r"(p2),
+ [p1] "r"(p1));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s3]) \n\t"
+ "sb %[p3], 0(%[s3]) \n\t"
+ "sb %[p2], -1(%[s3]) \n\t"
+ "sb %[p1], -2(%[s3]) \n\t"
+ : [p1] "+r"(p1)
+ : [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), [p2] "r"(p2));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s2]) \n\t"
+ "sb %[p3], 0(%[s2]) \n\t"
+ "sb %[p2], -1(%[s2]) \n\t"
+ "sb %[p1], -2(%[s2]) \n\t"
+ :
+ : [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), [p2] "r"(p2),
+ [p1] "r"(p1));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s1]) \n\t"
+ "sb %[p3], 0(%[s1]) \n\t"
+ "sb %[p2], -1(%[s1]) \n\t"
+ "sb %[p1], -2(%[s1]) \n\t"
+ :
+ : [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), [p2] "r"(p2),
+ [p1] "r"(p1));
+ }
+ }
+
+ s1 = s;
+ s2 = s + p;
+ s3 = s2 + p;
+ s4 = s3 + p;
+ s = s4 + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p2 = *((uint32_t *)(s1 - 4));
+ p6 = *((uint32_t *)(s1));
+ p1 = *((uint32_t *)(s2 - 4));
+ p5 = *((uint32_t *)(s2));
+ p0 = *((uint32_t *)(s3 - 4));
+ p4 = *((uint32_t *)(s3));
+ pm1 = *((uint32_t *)(s4 - 4));
+ p3 = *((uint32_t *)(s4));
+
+ /* transpose pm1, p0, p1, p2 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t"
+ "precr.qb.ph %[prim2], %[p2], %[p1] \n\t"
+ "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t"
+ "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t"
+
+ "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p2], %[p1], %[sec3] \n\t"
+ "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t"
+ "append %[p1], %[sec3], 16 \n\t"
+ "append %[pm1], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
+ [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* transpose p3, p4, p5, p6 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t"
+ "precr.qb.ph %[prim2], %[p6], %[p5] \n\t"
+ "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t"
+ "precr.qb.ph %[prim4], %[p4], %[p3] \n\t"
+
+ "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p6], %[p5], %[sec3] \n\t"
+ "precrq.ph.w %[p4], %[p3], %[sec4] \n\t"
+ "append %[p5], %[sec3], 16 \n\t"
+ "append %[p3], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
+ [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood
+ * don't use transpose on output data
+ * because memory isn't aligned
+ */
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s4]) \n\t"
+ "sb %[p3], 0(%[s4]) \n\t"
+ "sb %[p2], -1(%[s4]) \n\t"
+ "sb %[p1], -2(%[s4]) \n\t"
+ :
+ : [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), [p2] "r"(p2),
+ [p1] "r"(p1));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s3]) \n\t"
+ "sb %[p3], 0(%[s3]) \n\t"
+ "sb %[p2], -1(%[s3]) \n\t"
+ "sb %[p1], -2(%[s3]) \n\t"
+ : [p1] "+r"(p1)
+ : [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), [p2] "r"(p2));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s2]) \n\t"
+ "sb %[p3], 0(%[s2]) \n\t"
+ "sb %[p2], -1(%[s2]) \n\t"
+ "sb %[p1], -2(%[s2]) \n\t"
+ :
+ : [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), [p2] "r"(p2),
+ [p1] "r"(p1));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s1]) \n\t"
+ "sb %[p3], 0(%[s1]) \n\t"
+ "sb %[p2], -1(%[s1]) \n\t"
+ "sb %[p1], -2(%[s1]) \n\t"
+ :
+ : [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), [p2] "r"(p2),
+ [p1] "r"(p1));
+ }
+ }
+
+ i += 8;
+ }
+
+ while (i < count);
+}
+
+void vp8_loop_filter_uvvertical_edge_mips(unsigned char *s, int p,
+ unsigned int flimit,
+ unsigned int limit,
+ unsigned int thresh, int count) {
+ uint32_t mask, hev;
+ uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+ unsigned char *s1, *s2, *s3, *s4;
+ uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+ (void)count;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+
+ /* apply filter on 4 pixesl at the same time */
+
+ s1 = s;
+ s2 = s + p;
+ s3 = s2 + p;
+ s4 = s3 + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p2 = *((uint32_t *)(s1 - 4));
+ p6 = *((uint32_t *)(s1));
+ p1 = *((uint32_t *)(s2 - 4));
+ p5 = *((uint32_t *)(s2));
+ p0 = *((uint32_t *)(s3 - 4));
+ p4 = *((uint32_t *)(s3));
+ pm1 = *((uint32_t *)(s4 - 4));
+ p3 = *((uint32_t *)(s4));
+
+ /* transpose pm1, p0, p1, p2 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t"
+ "precr.qb.ph %[prim2], %[p2], %[p1] \n\t"
+ "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t"
+ "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t"
+
+ "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p2], %[p1], %[sec3] \n\t"
+ "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t"
+ "append %[p1], %[sec3], 16 \n\t"
+ "append %[pm1], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
+ [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* transpose p3, p4, p5, p6 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t"
+ "precr.qb.ph %[prim2], %[p6], %[p5] \n\t"
+ "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t"
+ "precr.qb.ph %[prim4], %[p4], %[p3] \n\t"
+
+ "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p6], %[p5], %[sec3] \n\t"
+ "precrq.ph.w %[p4], %[p3], %[sec4] \n\t"
+ "append %[p5], %[sec3], 16 \n\t"
+ "append %[p3], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
+ [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood
+ * don't use transpose on output data
+ * because memory isn't aligned
+ */
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s4]) \n\t"
+ "sb %[p3], 0(%[s4]) \n\t"
+ "sb %[p2], -1(%[s4]) \n\t"
+ "sb %[p1], -2(%[s4]) \n\t"
+ :
+ :
+ [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), [p2] "r"(p2), [p1] "r"(p1));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s3]) \n\t"
+ "sb %[p3], 0(%[s3]) \n\t"
+ "sb %[p2], -1(%[s3]) \n\t"
+ "sb %[p1], -2(%[s3]) \n\t"
+ : [p1] "+r"(p1)
+ : [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), [p2] "r"(p2));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s2]) \n\t"
+ "sb %[p3], 0(%[s2]) \n\t"
+ "sb %[p2], -1(%[s2]) \n\t"
+ "sb %[p1], -2(%[s2]) \n\t"
+ :
+ :
+ [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), [p2] "r"(p2), [p1] "r"(p1));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s1]) \n\t"
+ "sb %[p3], 0(%[s1]) \n\t"
+ "sb %[p2], -1(%[s1]) \n\t"
+ "sb %[p1], -2(%[s1]) \n\t"
+ :
+ :
+ [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), [p2] "r"(p2), [p1] "r"(p1));
+ }
+ }
+
+ s1 = s4 + p;
+ s2 = s1 + p;
+ s3 = s2 + p;
+ s4 = s3 + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p2 = *((uint32_t *)(s1 - 4));
+ p6 = *((uint32_t *)(s1));
+ p1 = *((uint32_t *)(s2 - 4));
+ p5 = *((uint32_t *)(s2));
+ p0 = *((uint32_t *)(s3 - 4));
+ p4 = *((uint32_t *)(s3));
+ pm1 = *((uint32_t *)(s4 - 4));
+ p3 = *((uint32_t *)(s4));
+
+ /* transpose pm1, p0, p1, p2 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t"
+ "precr.qb.ph %[prim2], %[p2], %[p1] \n\t"
+ "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t"
+ "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t"
+
+ "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p2], %[p1], %[sec3] \n\t"
+ "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t"
+ "append %[p1], %[sec3], 16 \n\t"
+ "append %[pm1], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
+ [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* transpose p3, p4, p5, p6 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t"
+ "precr.qb.ph %[prim2], %[p6], %[p5] \n\t"
+ "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t"
+ "precr.qb.ph %[prim4], %[p4], %[p3] \n\t"
+
+ "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p6], %[p5], %[sec3] \n\t"
+ "precrq.ph.w %[p4], %[p3], %[sec4] \n\t"
+ "append %[p5], %[sec3], 16 \n\t"
+ "append %[p3], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
+ [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+ /* unpack processed 4x4 neighborhood
+ * don't use transpose on output data
+ * because memory isn't aligned
+ */
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s4]) \n\t"
+ "sb %[p3], 0(%[s4]) \n\t"
+ "sb %[p2], -1(%[s4]) \n\t"
+ "sb %[p1], -2(%[s4]) \n\t"
+ :
+ :
+ [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), [p2] "r"(p2), [p1] "r"(p1));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s3]) \n\t"
+ "sb %[p3], 0(%[s3]) \n\t"
+ "sb %[p2], -1(%[s3]) \n\t"
+ "sb %[p1], -2(%[s3]) \n\t"
+ : [p1] "+r"(p1)
+ : [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), [p2] "r"(p2));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s2]) \n\t"
+ "sb %[p3], 0(%[s2]) \n\t"
+ "sb %[p2], -1(%[s2]) \n\t"
+ "sb %[p1], -2(%[s2]) \n\t"
+ :
+ :
+ [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), [p2] "r"(p2), [p1] "r"(p1));
+
+ __asm__ __volatile__(
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p4], 1(%[s1]) \n\t"
+ "sb %[p3], 0(%[s1]) \n\t"
+ "sb %[p2], -1(%[s1]) \n\t"
+ "sb %[p1], -2(%[s1]) \n\t"
+ :
+ :
+ [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), [p2] "r"(p2), [p1] "r"(p1));
+ }
+ }
+}
+
+/* inputs & outputs are quad-byte vectors */
+static __inline void vp8_mbfilter_mips(uint32_t mask, uint32_t hev,
+ uint32_t *ps2, uint32_t *ps1,
+ uint32_t *ps0, uint32_t *qs0,
+ uint32_t *qs1, uint32_t *qs2) {
+ int32_t vps2, vps1, vps0, vqs0, vqs1, vqs2;
+ int32_t vps2_l, vps1_l, vps0_l, vqs0_l, vqs1_l, vqs2_l;
+ int32_t vps2_r, vps1_r, vps0_r, vqs0_r, vqs1_r, vqs2_r;
+ uint32_t HWM, vp8_filter_l, vp8_filter_r, mask_l, mask_r, hev_l, hev_r,
+ subr_r, subr_l;
+ uint32_t Filter2_l, Filter2_r, t1, t2, Filter1_l, Filter1_r, invhev_l,
+ invhev_r;
+ uint32_t N128, R63;
+ uint32_t u1_l, u1_r, u2_l, u2_r, u3_l, u3_r;
+
+ R63 = 0x003F003F;
+ HWM = 0xFF00FF00;
+ N128 = 0x80808080;
+ t1 = 0x03000300;
+ t2 = 0x04000400;
+
+ vps0 = (*ps0) ^ N128;
+ vps1 = (*ps1) ^ N128;
+ vps2 = (*ps2) ^ N128;
+ vqs0 = (*qs0) ^ N128;
+ vqs1 = (*qs1) ^ N128;
+ vqs2 = (*qs2) ^ N128;
+
+ /* use halfword pairs instead quad-bytes because of accuracy */
+ vps0_l = vps0 & HWM;
+ vps0_r = vps0 << 8;
+ vps0_r = vps0_r & HWM;
+
+ vqs0_l = vqs0 & HWM;
+ vqs0_r = vqs0 << 8;
+ vqs0_r = vqs0_r & HWM;
+
+ vps1_l = vps1 & HWM;
+ vps1_r = vps1 << 8;
+ vps1_r = vps1_r & HWM;
+
+ vqs1_l = vqs1 & HWM;
+ vqs1_r = vqs1 << 8;
+ vqs1_r = vqs1_r & HWM;
+
+ vqs2_l = vqs2 & HWM;
+ vqs2_r = vqs2 << 8;
+ vqs2_r = vqs2_r & HWM;
+
+ __asm__ __volatile__(
+ /* qs0 - ps0 */
+ "subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t"
+ "subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t"
+
+ /* vp8_filter = vp8_signed_char_clamp(ps1 - qs1); */
+ "subq_s.ph %[vp8_filter_l], %[vps1_l], %[vqs1_l] \n\t"
+ "subq_s.ph %[vp8_filter_r], %[vps1_r], %[vqs1_r] \n\t"
+
+ : [vp8_filter_l] "=&r"(vp8_filter_l), [vp8_filter_r] "=r"(vp8_filter_r),
+ [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r)
+ : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l),
+ [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r),
+ [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r));
+
+ vps2_l = vps2 & HWM;
+ vps2_r = vps2 << 8;
+ vps2_r = vps2_r & HWM;
+
+ /* add outer taps if we have high edge variance */
+ __asm__ __volatile__(
+ /* vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0)); */
+ "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t"
+ "and %[mask_l], %[HWM], %[mask] \n\t"
+ "sll %[mask_r], %[mask], 8 \n\t"
+ "and %[mask_r], %[HWM], %[mask_r] \n\t"
+ "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t"
+ "and %[hev_l], %[HWM], %[hev] \n\t"
+ "sll %[hev_r], %[hev], 8 \n\t"
+ "and %[hev_r], %[HWM], %[hev_r] \n\t"
+ "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t"
+ "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t"
+
+ /* vp8_filter &= mask; */
+ "and %[vp8_filter_l], %[vp8_filter_l], %[mask_l] \n\t"
+ "and %[vp8_filter_r], %[vp8_filter_r], %[mask_r] \n\t"
+
+ /* Filter2 = vp8_filter & hev; */
+ "and %[Filter2_l], %[vp8_filter_l], %[hev_l] \n\t"
+ "and %[Filter2_r], %[vp8_filter_r], %[hev_r] \n\t"
+
+ : [vp8_filter_l] "+r"(vp8_filter_l), [vp8_filter_r] "+r"(vp8_filter_r),
+ [hev_l] "=&r"(hev_l), [hev_r] "=&r"(hev_r), [mask_l] "=&r"(mask_l),
+ [mask_r] "=&r"(mask_r), [Filter2_l] "=&r"(Filter2_l),
+ [Filter2_r] "=&r"(Filter2_r)
+ : [subr_l] "r"(subr_l), [subr_r] "r"(subr_r), [HWM] "r"(HWM),
+ [hev] "r"(hev), [mask] "r"(mask));
+
+ /* save bottom 3 bits so that we round one side +4 and the other +3 */
+ __asm__ __volatile__(
+ /* Filter1 = vp8_signed_char_clamp(Filter2 + 4) >>= 3; */
+ "addq_s.ph %[Filter1_l], %[Filter2_l], %[t2] \n\t"
+ "xor %[invhev_l], %[hev_l], %[HWM] \n\t"
+ "addq_s.ph %[Filter1_r], %[Filter2_r], %[t2] \n\t"
+
+ /* Filter2 = vp8_signed_char_clamp(Filter2 + 3) >>= 3; */
+ "addq_s.ph %[Filter2_l], %[Filter2_l], %[t1] \n\t"
+ "addq_s.ph %[Filter2_r], %[Filter2_r], %[t1] \n\t"
+
+ "shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t"
+ "shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t"
+
+ "shra.ph %[Filter2_l], %[Filter2_l], 3 \n\t"
+ "shra.ph %[Filter2_r], %[Filter2_r], 3 \n\t"
+ "and %[Filter1_l], %[Filter1_l], %[HWM] \n\t"
+ "and %[Filter1_r], %[Filter1_r], %[HWM] \n\t"
+ "xor %[invhev_r], %[hev_r], %[HWM] \n\t"
+
+ /* qs0 = vp8_signed_char_clamp(qs0 - Filter1); */
+ "subq_s.ph %[vqs0_l], %[vqs0_l], %[Filter1_l] \n\t"
+ "subq_s.ph %[vqs0_r], %[vqs0_r], %[Filter1_r] \n\t"
+
+ /* ps0 = vp8_signed_char_clamp(ps0 + Filter2); */
+ "addq_s.ph %[vps0_l], %[vps0_l], %[Filter2_l] \n\t"
+ "addq_s.ph %[vps0_r], %[vps0_r], %[Filter2_r] \n\t"
+
+ : [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r),
+ [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r),
+ [Filter2_l] "+r"(Filter2_l), [Filter2_r] "+r"(Filter2_r),
+ [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
+ [vqs0_r] "+r"(vqs0_r)
+ : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM), [hev_l] "r"(hev_l),
+ [hev_r] "r"(hev_r));
+
+ /* only apply wider filter if not high edge variance */
+ __asm__ __volatile__(
+ /* vp8_filter &= ~hev; */
+ "and %[Filter2_l], %[vp8_filter_l], %[invhev_l] \n\t"
+ "and %[Filter2_r], %[vp8_filter_r], %[invhev_r] \n\t"
+
+ "shra.ph %[Filter2_l], %[Filter2_l], 8 \n\t"
+ "shra.ph %[Filter2_r], %[Filter2_r], 8 \n\t"
+
+ : [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r)
+ : [vp8_filter_l] "r"(vp8_filter_l), [vp8_filter_r] "r"(vp8_filter_r),
+ [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r));
+
+ /* roughly 3/7th difference across boundary */
+ __asm__ __volatile__(
+ "shll.ph %[u3_l], %[Filter2_l], 3 \n\t"
+ "shll.ph %[u3_r], %[Filter2_r], 3 \n\t"
+
+ "addq.ph %[u3_l], %[u3_l], %[Filter2_l] \n\t"
+ "addq.ph %[u3_r], %[u3_r], %[Filter2_r] \n\t"
+
+ "shll.ph %[u2_l], %[u3_l], 1 \n\t"
+ "shll.ph %[u2_r], %[u3_r], 1 \n\t"
+
+ "addq.ph %[u1_l], %[u3_l], %[u2_l] \n\t"
+ "addq.ph %[u1_r], %[u3_r], %[u2_r] \n\t"
+
+ "addq.ph %[u2_l], %[u2_l], %[R63] \n\t"
+ "addq.ph %[u2_r], %[u2_r], %[R63] \n\t"
+
+ "addq.ph %[u3_l], %[u3_l], %[R63] \n\t"
+ "addq.ph %[u3_r], %[u3_r], %[R63] \n\t"
+
+ /* vp8_signed_char_clamp((63 + Filter2 * 27) >> 7)
+ * vp8_signed_char_clamp((63 + Filter2 * 18) >> 7)
+ */
+ "addq.ph %[u1_l], %[u1_l], %[R63] \n\t"
+ "addq.ph %[u1_r], %[u1_r], %[R63] \n\t"
+ "shra.ph %[u1_l], %[u1_l], 7 \n\t"
+ "shra.ph %[u1_r], %[u1_r], 7 \n\t"
+ "shra.ph %[u2_l], %[u2_l], 7 \n\t"
+ "shra.ph %[u2_r], %[u2_r], 7 \n\t"
+ "shll.ph %[u1_l], %[u1_l], 8 \n\t"
+ "shll.ph %[u1_r], %[u1_r], 8 \n\t"
+ "shll.ph %[u2_l], %[u2_l], 8 \n\t"
+ "shll.ph %[u2_r], %[u2_r], 8 \n\t"
+
+ /* vqs0 = vp8_signed_char_clamp(qs0 - u); */
+ "subq_s.ph %[vqs0_l], %[vqs0_l], %[u1_l] \n\t"
+ "subq_s.ph %[vqs0_r], %[vqs0_r], %[u1_r] \n\t"
+
+ /* vps0 = vp8_signed_char_clamp(ps0 + u); */
+ "addq_s.ph %[vps0_l], %[vps0_l], %[u1_l] \n\t"
+ "addq_s.ph %[vps0_r], %[vps0_r], %[u1_r] \n\t"
+
+ : [u1_l] "=&r"(u1_l), [u1_r] "=&r"(u1_r), [u2_l] "=&r"(u2_l),
+ [u2_r] "=&r"(u2_r), [u3_l] "=&r"(u3_l), [u3_r] "=&r"(u3_r),
+ [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l),
+ [vqs0_r] "+r"(vqs0_r)
+ : [R63] "r"(R63), [Filter2_l] "r"(Filter2_l), [Filter2_r] "r"(Filter2_r));
+
+ __asm__ __volatile__(
+ /* vqs1 = vp8_signed_char_clamp(qs1 - u); */
+ "subq_s.ph %[vqs1_l], %[vqs1_l], %[u2_l] \n\t"
+ "addq_s.ph %[vps1_l], %[vps1_l], %[u2_l] \n\t"
+
+ /* vps1 = vp8_signed_char_clamp(ps1 + u); */
+ "addq_s.ph %[vps1_r], %[vps1_r], %[u2_r] \n\t"
+ "subq_s.ph %[vqs1_r], %[vqs1_r], %[u2_r] \n\t"
+
+ : [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l),
+ [vqs1_r] "+r"(vqs1_r)
+ : [u2_l] "r"(u2_l), [u2_r] "r"(u2_r));
+
+ /* roughly 1/7th difference across boundary */
+ __asm__ __volatile__(
+ /* u = vp8_signed_char_clamp((63 + Filter2 * 9) >> 7); */
+ "shra.ph %[u3_l], %[u3_l], 7 \n\t"
+ "shra.ph %[u3_r], %[u3_r], 7 \n\t"
+ "shll.ph %[u3_l], %[u3_l], 8 \n\t"
+ "shll.ph %[u3_r], %[u3_r], 8 \n\t"
+
+ /* vqs2 = vp8_signed_char_clamp(qs2 - u); */
+ "subq_s.ph %[vqs2_l], %[vqs2_l], %[u3_l] \n\t"
+ "subq_s.ph %[vqs2_r], %[vqs2_r], %[u3_r] \n\t"
+
+ /* vps2 = vp8_signed_char_clamp(ps2 + u); */
+ "addq_s.ph %[vps2_l], %[vps2_l], %[u3_l] \n\t"
+ "addq_s.ph %[vps2_r], %[vps2_r], %[u3_r] \n\t"
+
+ : [u3_l] "+r"(u3_l), [u3_r] "+r"(u3_r), [vps2_l] "+r"(vps2_l),
+ [vps2_r] "+r"(vps2_r), [vqs2_l] "+r"(vqs2_l), [vqs2_r] "+r"(vqs2_r)
+ :);
+
+ /* Create quad-bytes from halfword pairs */
+ __asm__ __volatile__(
+ "and %[vqs0_l], %[vqs0_l], %[HWM] \n\t"
+ "shrl.ph %[vqs0_r], %[vqs0_r], 8 \n\t"
+
+ "and %[vps0_l], %[vps0_l], %[HWM] \n\t"
+ "shrl.ph %[vps0_r], %[vps0_r], 8 \n\t"
+
+ "and %[vqs1_l], %[vqs1_l], %[HWM] \n\t"
+ "shrl.ph %[vqs1_r], %[vqs1_r], 8 \n\t"
+
+ "and %[vps1_l], %[vps1_l], %[HWM] \n\t"
+ "shrl.ph %[vps1_r], %[vps1_r], 8 \n\t"
+
+ "and %[vqs2_l], %[vqs2_l], %[HWM] \n\t"
+ "shrl.ph %[vqs2_r], %[vqs2_r], 8 \n\t"
+
+ "and %[vps2_l], %[vps2_l], %[HWM] \n\t"
+ "shrl.ph %[vps2_r], %[vps2_r], 8 \n\t"
+
+ "or %[vqs0_r], %[vqs0_l], %[vqs0_r] \n\t"
+ "or %[vps0_r], %[vps0_l], %[vps0_r] \n\t"
+ "or %[vqs1_r], %[vqs1_l], %[vqs1_r] \n\t"
+ "or %[vps1_r], %[vps1_l], %[vps1_r] \n\t"
+ "or %[vqs2_r], %[vqs2_l], %[vqs2_r] \n\t"
+ "or %[vps2_r], %[vps2_l], %[vps2_r] \n\t"
+
+ : [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l),
+ [vqs1_r] "+r"(vqs1_r), [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r),
+ [vqs0_l] "+r"(vqs0_l), [vqs0_r] "+r"(vqs0_r), [vqs2_l] "+r"(vqs2_l),
+ [vqs2_r] "+r"(vqs2_r), [vps2_r] "+r"(vps2_r), [vps2_l] "+r"(vps2_l)
+ : [HWM] "r"(HWM));
+
+ *ps0 = vps0_r ^ N128;
+ *ps1 = vps1_r ^ N128;
+ *ps2 = vps2_r ^ N128;
+ *qs0 = vqs0_r ^ N128;
+ *qs1 = vqs1_r ^ N128;
+ *qs2 = vqs2_r ^ N128;
+}
+
+void vp8_mbloop_filter_horizontal_edge_mips(unsigned char *s, int p,
+ unsigned int flimit,
+ unsigned int limit,
+ unsigned int thresh, int count) {
+ int i;
+ uint32_t mask, hev;
+ uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+ unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+
+ mask = 0;
+ hev = 0;
+ i = 0;
+ p1 = 0;
+ p2 = 0;
+ p3 = 0;
+ p4 = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+
+ sm1 = s - (p << 2);
+ s0 = s - p - p - p;
+ s1 = s - p - p;
+ s2 = s - p;
+ s3 = s;
+ s4 = s + p;
+ s5 = s + p + p;
+ s6 = s + p + p + p;
+
+ /* prefetch data for load */
+ prefetch_load_lf(s + p);
+
+ /* apply filter on 4 pixesl at the same time */
+ do {
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+ /* unpack processed 4x4 neighborhood
+ * memory is 4 byte aligned
+ */
+ *((uint32_t *)s0) = p0;
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ *((uint32_t *)s5) = p5;
+ }
+ }
+
+ sm1 += 4;
+ s0 += 4;
+ s1 += 4;
+ s2 += 4;
+ s3 += 4;
+ s4 += 4;
+ s5 += 4;
+ s6 += 4;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+ /* unpack processed 4x4 neighborhood
+ * memory is 4 byte aligned
+ */
+ *((uint32_t *)s0) = p0;
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ *((uint32_t *)s5) = p5;
+ }
+ }
+
+ sm1 += 4;
+ s0 += 4;
+ s1 += 4;
+ s2 += 4;
+ s3 += 4;
+ s4 += 4;
+ s5 += 4;
+ s6 += 4;
+
+ i += 8;
+ }
+
+ while (i < count);
+}
+
+void vp8_mbloop_filter_uvhorizontal_edge_mips(unsigned char *s, int p,
+ unsigned int flimit,
+ unsigned int limit,
+ unsigned int thresh, int count) {
+ uint32_t mask, hev;
+ uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+ unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+ (void)count;
+
+ mask = 0;
+ hev = 0;
+ p1 = 0;
+ p2 = 0;
+ p3 = 0;
+ p4 = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+
+ sm1 = s - (p << 2);
+ s0 = s - p - p - p;
+ s1 = s - p - p;
+ s2 = s - p;
+ s3 = s;
+ s4 = s + p;
+ s5 = s + p + p;
+ s6 = s + p + p + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ /* if mask == 0 do filtering is not needed */
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ if (mask) {
+ /* filtering */
+ vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+ /* unpack processed 4x4 neighborhood
+ * memory is 4 byte aligned
+ */
+ *((uint32_t *)s0) = p0;
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ *((uint32_t *)s5) = p5;
+ }
+ }
+
+ sm1 += 4;
+ s0 += 4;
+ s1 += 4;
+ s2 += 4;
+ s3 += 4;
+ s4 += 4;
+ s5 += 4;
+ s6 += 4;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p1 = *((uint32_t *)(s1));
+ p2 = *((uint32_t *)(s2));
+ p3 = *((uint32_t *)(s3));
+ p4 = *((uint32_t *)(s4));
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ pm1 = *((uint32_t *)(sm1));
+ p0 = *((uint32_t *)(s0));
+ p5 = *((uint32_t *)(s5));
+ p6 = *((uint32_t *)(s6));
+
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+ /* unpack processed 4x4 neighborhood
+ * memory is 4 byte aligned
+ */
+ *((uint32_t *)s0) = p0;
+ *((uint32_t *)s1) = p1;
+ *((uint32_t *)s2) = p2;
+ *((uint32_t *)s3) = p3;
+ *((uint32_t *)s4) = p4;
+ *((uint32_t *)s5) = p5;
+ }
+ }
+}
+
+void vp8_mbloop_filter_vertical_edge_mips(unsigned char *s, int p,
+ unsigned int flimit,
+ unsigned int limit,
+ unsigned int thresh, int count) {
+ int i;
+ uint32_t mask, hev;
+ uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+ unsigned char *s1, *s2, *s3, *s4;
+ uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+
+ mask = 0;
+ hev = 0;
+ i = 0;
+ pm1 = 0;
+ p0 = 0;
+ p1 = 0;
+ p2 = 0;
+ p3 = 0;
+ p4 = 0;
+ p5 = 0;
+ p6 = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+
+ /* apply filter on 4 pixesl at the same time */
+ do {
+ s1 = s;
+ s2 = s + p;
+ s3 = s2 + p;
+ s4 = s3 + p;
+ s = s4 + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p2 = *((uint32_t *)(s1 - 4));
+ p6 = *((uint32_t *)(s1));
+ p1 = *((uint32_t *)(s2 - 4));
+ p5 = *((uint32_t *)(s2));
+ p0 = *((uint32_t *)(s3 - 4));
+ p4 = *((uint32_t *)(s3));
+ pm1 = *((uint32_t *)(s4 - 4));
+ p3 = *((uint32_t *)(s4));
+
+ /* transpose pm1, p0, p1, p2 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t"
+ "precr.qb.ph %[prim2], %[p2], %[p1] \n\t"
+ "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t"
+ "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t"
+
+ "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p2], %[p1], %[sec3] \n\t"
+ "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t"
+ "append %[p1], %[sec3], 16 \n\t"
+ "append %[pm1], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
+ [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* transpose p3, p4, p5, p6 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t"
+ "precr.qb.ph %[prim2], %[p6], %[p5] \n\t"
+ "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t"
+ "precr.qb.ph %[prim4], %[p4], %[p3] \n\t"
+
+ "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p6], %[p5], %[sec3] \n\t"
+ "precrq.ph.w %[p4], %[p3], %[sec4] \n\t"
+ "append %[p5], %[sec3], 16 \n\t"
+ "append %[p3], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
+ [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+ /* don't use transpose on output data
+ * because memory isn't aligned
+ */
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s4]) \n\t"
+ "sb %[p4], 1(%[s4]) \n\t"
+ "sb %[p3], 0(%[s4]) \n\t"
+ "sb %[p2], -1(%[s4]) \n\t"
+ "sb %[p1], -2(%[s4]) \n\t"
+ "sb %[p0], -3(%[s4]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s3]) \n\t"
+ "sb %[p4], 1(%[s3]) \n\t"
+ "sb %[p3], 0(%[s3]) \n\t"
+ "sb %[p2], -1(%[s3]) \n\t"
+ "sb %[p1], -2(%[s3]) \n\t"
+ "sb %[p0], -3(%[s3]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s2]) \n\t"
+ "sb %[p4], 1(%[s2]) \n\t"
+ "sb %[p3], 0(%[s2]) \n\t"
+ "sb %[p2], -1(%[s2]) \n\t"
+ "sb %[p1], -2(%[s2]) \n\t"
+ "sb %[p0], -3(%[s2]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s1]) \n\t"
+ "sb %[p4], 1(%[s1]) \n\t"
+ "sb %[p3], 0(%[s1]) \n\t"
+ "sb %[p2], -1(%[s1]) \n\t"
+ "sb %[p1], -2(%[s1]) \n\t"
+ "sb %[p0], -3(%[s1]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+ }
+ }
+
+ i += 4;
+ }
+
+ while (i < count);
+}
+
+void vp8_mbloop_filter_uvvertical_edge_mips(unsigned char *s, int p,
+ unsigned int flimit,
+ unsigned int limit,
+ unsigned int thresh, int count) {
+ uint32_t mask, hev;
+ uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+ unsigned char *s1, *s2, *s3, *s4;
+ uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+ (void)count;
+
+ mask = 0;
+ hev = 0;
+ pm1 = 0;
+ p0 = 0;
+ p1 = 0;
+ p2 = 0;
+ p3 = 0;
+ p4 = 0;
+ p5 = 0;
+ p6 = 0;
+
+ /* loop filter designed to work using chars so that we can make maximum use
+ * of 8 bit simd instructions.
+ */
+
+ /* apply filter on 4 pixesl at the same time */
+
+ s1 = s;
+ s2 = s + p;
+ s3 = s2 + p;
+ s4 = s3 + p;
+
+ /* prefetch data for load */
+ prefetch_load_lf(s + 2 * p);
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p2 = *((uint32_t *)(s1 - 4));
+ p6 = *((uint32_t *)(s1));
+ p1 = *((uint32_t *)(s2 - 4));
+ p5 = *((uint32_t *)(s2));
+ p0 = *((uint32_t *)(s3 - 4));
+ p4 = *((uint32_t *)(s3));
+ pm1 = *((uint32_t *)(s4 - 4));
+ p3 = *((uint32_t *)(s4));
+
+ /* transpose pm1, p0, p1, p2 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t"
+ "precr.qb.ph %[prim2], %[p2], %[p1] \n\t"
+ "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t"
+ "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t"
+
+ "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p2], %[p1], %[sec3] \n\t"
+ "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t"
+ "append %[p1], %[sec3], 16 \n\t"
+ "append %[pm1], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
+ [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* transpose p3, p4, p5, p6 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t"
+ "precr.qb.ph %[prim2], %[p6], %[p5] \n\t"
+ "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t"
+ "precr.qb.ph %[prim4], %[p4], %[p3] \n\t"
+
+ "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p6], %[p5], %[sec3] \n\t"
+ "precrq.ph.w %[p4], %[p3], %[sec4] \n\t"
+ "append %[p5], %[sec3], 16 \n\t"
+ "append %[p3], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
+ [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+ /* don't use transpose on output data
+ * because memory isn't aligned
+ */
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s4]) \n\t"
+ "sb %[p4], 1(%[s4]) \n\t"
+ "sb %[p3], 0(%[s4]) \n\t"
+ "sb %[p2], -1(%[s4]) \n\t"
+ "sb %[p1], -2(%[s4]) \n\t"
+ "sb %[p0], -3(%[s4]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s3]) \n\t"
+ "sb %[p4], 1(%[s3]) \n\t"
+ "sb %[p3], 0(%[s3]) \n\t"
+ "sb %[p2], -1(%[s3]) \n\t"
+ "sb %[p1], -2(%[s3]) \n\t"
+ "sb %[p0], -3(%[s3]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s2]) \n\t"
+ "sb %[p4], 1(%[s2]) \n\t"
+ "sb %[p3], 0(%[s2]) \n\t"
+ "sb %[p2], -1(%[s2]) \n\t"
+ "sb %[p1], -2(%[s2]) \n\t"
+ "sb %[p0], -3(%[s2]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s1]) \n\t"
+ "sb %[p4], 1(%[s1]) \n\t"
+ "sb %[p3], 0(%[s1]) \n\t"
+ "sb %[p2], -1(%[s1]) \n\t"
+ "sb %[p1], -2(%[s1]) \n\t"
+ "sb %[p0], -3(%[s1]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+ }
+ }
+
+ s1 = s4 + p;
+ s2 = s1 + p;
+ s3 = s2 + p;
+ s4 = s3 + p;
+
+ /* load quad-byte vectors
+ * memory is 4 byte aligned
+ */
+ p2 = *((uint32_t *)(s1 - 4));
+ p6 = *((uint32_t *)(s1));
+ p1 = *((uint32_t *)(s2 - 4));
+ p5 = *((uint32_t *)(s2));
+ p0 = *((uint32_t *)(s3 - 4));
+ p4 = *((uint32_t *)(s3));
+ pm1 = *((uint32_t *)(s4 - 4));
+ p3 = *((uint32_t *)(s4));
+
+ /* transpose pm1, p0, p1, p2 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t"
+ "precr.qb.ph %[prim2], %[p2], %[p1] \n\t"
+ "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t"
+ "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t"
+
+ "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p2], %[p1], %[sec3] \n\t"
+ "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t"
+ "append %[p1], %[sec3], 16 \n\t"
+ "append %[pm1], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0),
+ [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* transpose p3, p4, p5, p6 */
+ __asm__ __volatile__(
+ "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t"
+ "precr.qb.ph %[prim2], %[p6], %[p5] \n\t"
+ "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t"
+ "precr.qb.ph %[prim4], %[p4], %[p3] \n\t"
+
+ "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t"
+ "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t"
+ "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t"
+ "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t"
+
+ "precrq.ph.w %[p6], %[p5], %[sec3] \n\t"
+ "precrq.ph.w %[p4], %[p3], %[sec4] \n\t"
+ "append %[p5], %[sec3], 16 \n\t"
+ "append %[p3], %[sec4], 16 \n\t"
+
+ : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3),
+ [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4),
+ [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4)
+ :);
+
+ /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+ * mask will be zero and filtering is not needed
+ */
+ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) {
+ vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+ thresh, &hev, &mask);
+
+ /* if mask == 0 do filtering is not needed */
+ if (mask) {
+ /* filtering */
+ vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+ /* don't use transpose on output data
+ * because memory isn't aligned
+ */
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s4]) \n\t"
+ "sb %[p4], 1(%[s4]) \n\t"
+ "sb %[p3], 0(%[s4]) \n\t"
+ "sb %[p2], -1(%[s4]) \n\t"
+ "sb %[p1], -2(%[s4]) \n\t"
+ "sb %[p0], -3(%[s4]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s3]) \n\t"
+ "sb %[p4], 1(%[s3]) \n\t"
+ "sb %[p3], 0(%[s3]) \n\t"
+ "sb %[p2], -1(%[s3]) \n\t"
+ "sb %[p1], -2(%[s3]) \n\t"
+ "sb %[p0], -3(%[s3]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s2]) \n\t"
+ "sb %[p4], 1(%[s2]) \n\t"
+ "sb %[p3], 0(%[s2]) \n\t"
+ "sb %[p2], -1(%[s2]) \n\t"
+ "sb %[p1], -2(%[s2]) \n\t"
+ "sb %[p0], -3(%[s2]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+
+ __asm__ __volatile__(
+ "srl %[p5], %[p5], 8 \n\t"
+ "srl %[p4], %[p4], 8 \n\t"
+ "srl %[p3], %[p3], 8 \n\t"
+ "srl %[p2], %[p2], 8 \n\t"
+ "srl %[p1], %[p1], 8 \n\t"
+ "srl %[p0], %[p0], 8 \n\t"
+ : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2),
+ [p1] "+r"(p1), [p0] "+r"(p0)
+ :);
+
+ __asm__ __volatile__(
+ "sb %[p5], 2(%[s1]) \n\t"
+ "sb %[p4], 1(%[s1]) \n\t"
+ "sb %[p3], 0(%[s1]) \n\t"
+ "sb %[p2], -1(%[s1]) \n\t"
+ "sb %[p1], -2(%[s1]) \n\t"
+ "sb %[p0], -3(%[s1]) \n\t"
+ :
+ : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1),
+ [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0));
+ }
+ }
+}
+
+/* Horizontal MB filtering */
+void vp8_loop_filter_mbh_dspr2(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride,
+ int uv_stride, loop_filter_info *lfi) {
+ unsigned int thresh_vec, flimit_vec, limit_vec;
+ unsigned char thresh, flimit, limit, flimit_temp;
+
+ /* use direct value instead pointers */
+ limit = *(lfi->lim);
+ flimit_temp = *(lfi->mblim);
+ thresh = *(lfi->hev_thr);
+ flimit = flimit_temp;
+
+ /* create quad-byte */
+ __asm__ __volatile__(
+ "replv.qb %[thresh_vec], %[thresh] \n\t"
+ "replv.qb %[flimit_vec], %[flimit] \n\t"
+ "replv.qb %[limit_vec], %[limit] \n\t"
+ : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+ [limit_vec] "=r"(limit_vec)
+ : [thresh] "r"(thresh), [flimit] "r"(flimit), [limit] "r"(limit));
+
+ vp8_mbloop_filter_horizontal_edge_mips(y_ptr, y_stride, flimit_vec, limit_vec,
+ thresh_vec, 16);
+
+ if (u_ptr) {
+ vp8_mbloop_filter_uvhorizontal_edge_mips(u_ptr, uv_stride, flimit_vec,
+ limit_vec, thresh_vec, 0);
+ }
+
+ if (v_ptr) {
+ vp8_mbloop_filter_uvhorizontal_edge_mips(v_ptr, uv_stride, flimit_vec,
+ limit_vec, thresh_vec, 0);
+ }
+}
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_dspr2(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride,
+ int uv_stride, loop_filter_info *lfi) {
+ unsigned int thresh_vec, flimit_vec, limit_vec;
+ unsigned char thresh, flimit, limit, flimit_temp;
+
+ /* use direct value instead pointers */
+ limit = *(lfi->lim);
+ flimit_temp = *(lfi->mblim);
+ thresh = *(lfi->hev_thr);
+ flimit = flimit_temp;
+
+ /* create quad-byte */
+ __asm__ __volatile__(
+ "replv.qb %[thresh_vec], %[thresh] \n\t"
+ "replv.qb %[flimit_vec], %[flimit] \n\t"
+ "replv.qb %[limit_vec], %[limit] \n\t"
+ : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+ [limit_vec] "=r"(limit_vec)
+ : [thresh] "r"(thresh), [flimit] "r"(flimit), [limit] "r"(limit));
+
+ vp8_mbloop_filter_vertical_edge_mips(y_ptr, y_stride, flimit_vec, limit_vec,
+ thresh_vec, 16);
+
+ if (u_ptr)
+ vp8_mbloop_filter_uvvertical_edge_mips(u_ptr, uv_stride, flimit_vec,
+ limit_vec, thresh_vec, 0);
+
+ if (v_ptr)
+ vp8_mbloop_filter_uvvertical_edge_mips(v_ptr, uv_stride, flimit_vec,
+ limit_vec, thresh_vec, 0);
+}
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_dspr2(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ unsigned int thresh_vec, flimit_vec, limit_vec;
+ unsigned char thresh, flimit, limit, flimit_temp;
+
+ /* use direct value instead pointers */
+ limit = *(lfi->lim);
+ flimit_temp = *(lfi->blim);
+ thresh = *(lfi->hev_thr);
+ flimit = flimit_temp;
+
+ /* create quad-byte */
+ __asm__ __volatile__(
+ "replv.qb %[thresh_vec], %[thresh] \n\t"
+ "replv.qb %[flimit_vec], %[flimit] \n\t"
+ "replv.qb %[limit_vec], %[limit] \n\t"
+ : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+ [limit_vec] "=r"(limit_vec)
+ : [thresh] "r"(thresh), [flimit] "r"(flimit), [limit] "r"(limit));
+
+ vp8_loop_filter_horizontal_edge_mips(y_ptr + 4 * y_stride, y_stride,
+ flimit_vec, limit_vec, thresh_vec, 16);
+ vp8_loop_filter_horizontal_edge_mips(y_ptr + 8 * y_stride, y_stride,
+ flimit_vec, limit_vec, thresh_vec, 16);
+ vp8_loop_filter_horizontal_edge_mips(y_ptr + 12 * y_stride, y_stride,
+ flimit_vec, limit_vec, thresh_vec, 16);
+
+ if (u_ptr)
+ vp8_loop_filter_uvhorizontal_edge_mips(
+ u_ptr + 4 * uv_stride, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+
+ if (v_ptr)
+ vp8_loop_filter_uvhorizontal_edge_mips(
+ v_ptr + 4 * uv_stride, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+}
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_dspr2(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ unsigned int thresh_vec, flimit_vec, limit_vec;
+ unsigned char thresh, flimit, limit, flimit_temp;
+
+ /* use direct value instead pointers */
+ limit = *(lfi->lim);
+ flimit_temp = *(lfi->blim);
+ thresh = *(lfi->hev_thr);
+ flimit = flimit_temp;
+
+ /* create quad-byte */
+ __asm__ __volatile__(
+ "replv.qb %[thresh_vec], %[thresh] \n\t"
+ "replv.qb %[flimit_vec], %[flimit] \n\t"
+ "replv.qb %[limit_vec], %[limit] \n\t"
+ : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec),
+ [limit_vec] "=r"(limit_vec)
+ : [thresh] "r"(thresh), [flimit] "r"(flimit), [limit] "r"(limit));
+
+ vp8_loop_filter_vertical_edge_mips(y_ptr + 4, y_stride, flimit_vec, limit_vec,
+ thresh_vec, 16);
+ vp8_loop_filter_vertical_edge_mips(y_ptr + 8, y_stride, flimit_vec, limit_vec,
+ thresh_vec, 16);
+ vp8_loop_filter_vertical_edge_mips(y_ptr + 12, y_stride, flimit_vec,
+ limit_vec, thresh_vec, 16);
+
+ if (u_ptr)
+ vp8_loop_filter_uvvertical_edge_mips(u_ptr + 4, uv_stride, flimit_vec,
+ limit_vec, thresh_vec, 0);
+
+ if (v_ptr)
+ vp8_loop_filter_uvvertical_edge_mips(v_ptr + 4, uv_stride, flimit_vec,
+ limit_vec, thresh_vec, 0);
+}
+
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/mips/mmi/copymem_mmi.c b/media/libvpx/libvpx/vp8/common/mips/mmi/copymem_mmi.c
new file mode 100644
index 0000000000..86a32aa9ef
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/mmi/copymem_mmi.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vpx_ports/asmdefs_mmi.h"
+
+#define COPY_MEM_16X2 \
+ "gsldlc1 %[ftmp0], 0x07(%[src]) \n\t" \
+ "gsldrc1 %[ftmp0], 0x00(%[src]) \n\t" \
+ "ldl %[tmp0], 0x0f(%[src]) \n\t" \
+ "ldr %[tmp0], 0x08(%[src]) \n\t" \
+ MMI_ADDU(%[src], %[src], %[src_stride]) \
+ "gssdlc1 %[ftmp0], 0x07(%[dst]) \n\t" \
+ "gssdrc1 %[ftmp0], 0x00(%[dst]) \n\t" \
+ "sdl %[tmp0], 0x0f(%[dst]) \n\t" \
+ "sdr %[tmp0], 0x08(%[dst]) \n\t" \
+ MMI_ADDU(%[dst], %[dst], %[dst_stride]) \
+ "gsldlc1 %[ftmp1], 0x07(%[src]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x00(%[src]) \n\t" \
+ "ldl %[tmp1], 0x0f(%[src]) \n\t" \
+ "ldr %[tmp1], 0x08(%[src]) \n\t" \
+ MMI_ADDU(%[src], %[src], %[src_stride]) \
+ "gssdlc1 %[ftmp1], 0x07(%[dst]) \n\t" \
+ "gssdrc1 %[ftmp1], 0x00(%[dst]) \n\t" \
+ "sdl %[tmp1], 0x0f(%[dst]) \n\t" \
+ "sdr %[tmp1], 0x08(%[dst]) \n\t" \
+ MMI_ADDU(%[dst], %[dst], %[dst_stride])
+
+#define COPY_MEM_8X2 \
+ "gsldlc1 %[ftmp0], 0x07(%[src]) \n\t" \
+ "gsldrc1 %[ftmp0], 0x00(%[src]) \n\t" \
+ MMI_ADDU(%[src], %[src], %[src_stride]) \
+ "ldl %[tmp0], 0x07(%[src]) \n\t" \
+ "ldr %[tmp0], 0x00(%[src]) \n\t" \
+ MMI_ADDU(%[src], %[src], %[src_stride]) \
+ \
+ "gssdlc1 %[ftmp0], 0x07(%[dst]) \n\t" \
+ "gssdrc1 %[ftmp0], 0x00(%[dst]) \n\t" \
+ MMI_ADDU(%[dst], %[dst], %[dst_stride]) \
+ "sdl %[tmp0], 0x07(%[dst]) \n\t" \
+ "sdr %[tmp0], 0x00(%[dst]) \n\t" \
+ MMI_ADDU(%[dst], %[dst], %[dst_stride])
+
+void vp8_copy_mem16x16_mmi(unsigned char *src, int src_stride,
+ unsigned char *dst, int dst_stride) {
+ double ftmp[2];
+ uint64_t tmp[2];
+ uint8_t loop_count = 4;
+
+ /* clang-format off */
+ __asm__ volatile (
+ "1: \n\t"
+ COPY_MEM_16X2
+ COPY_MEM_16X2
+ MMI_ADDIU(%[loop_count], %[loop_count], -0x01)
+ "bnez %[loop_count], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]),
+ [loop_count]"+&r"(loop_count),
+ [dst]"+&r"(dst), [src]"+&r"(src)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [dst_stride]"r"((mips_reg)dst_stride)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+void vp8_copy_mem8x8_mmi(unsigned char *src, int src_stride, unsigned char *dst,
+ int dst_stride) {
+ double ftmp[2];
+ uint64_t tmp[1];
+ uint8_t loop_count = 4;
+
+ /* clang-format off */
+ __asm__ volatile (
+ "1: \n\t"
+ COPY_MEM_8X2
+ MMI_ADDIU(%[loop_count], %[loop_count], -0x01)
+ "bnez %[loop_count], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [tmp0]"=&r"(tmp[0]), [loop_count]"+&r"(loop_count),
+ [dst]"+&r"(dst), [src]"+&r"(src)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [dst_stride]"r"((mips_reg)dst_stride)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+void vp8_copy_mem8x4_mmi(unsigned char *src, int src_stride, unsigned char *dst,
+ int dst_stride) {
+ double ftmp[2];
+ uint64_t tmp[1];
+
+ /* clang-format off */
+ __asm__ volatile (
+ COPY_MEM_8X2
+ COPY_MEM_8X2
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [tmp0]"=&r"(tmp[0]),
+ [dst]"+&r"(dst), [src]"+&r"(src)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [dst_stride]"r"((mips_reg)dst_stride)
+ : "memory"
+ );
+ /* clang-format on */
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/mmi/dequantize_mmi.c b/media/libvpx/libvpx/vp8/common/mips/mmi/dequantize_mmi.c
new file mode 100644
index 0000000000..b9330a6663
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/mmi/dequantize_mmi.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/blockd.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/asmdefs_mmi.h"
+
+void vp8_dequantize_b_mmi(BLOCKD *d, int16_t *DQC) {
+ double ftmp[8];
+
+ __asm__ volatile(
+ "gsldlc1 %[ftmp0], 0x07(%[qcoeff]) \n\t"
+ "gsldrc1 %[ftmp0], 0x00(%[qcoeff]) \n\t"
+ "gsldlc1 %[ftmp1], 0x0f(%[qcoeff]) \n\t"
+ "gsldrc1 %[ftmp1], 0x08(%[qcoeff]) \n\t"
+ "gsldlc1 %[ftmp2], 0x17(%[qcoeff]) \n\t"
+ "gsldrc1 %[ftmp2], 0x10(%[qcoeff]) \n\t"
+ "gsldlc1 %[ftmp3], 0x1f(%[qcoeff]) \n\t"
+ "gsldrc1 %[ftmp3], 0x18(%[qcoeff]) \n\t"
+
+ "gsldlc1 %[ftmp4], 0x07(%[DQC]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[DQC]) \n\t"
+ "gsldlc1 %[ftmp5], 0x0f(%[DQC]) \n\t"
+ "gsldrc1 %[ftmp5], 0x08(%[DQC]) \n\t"
+ "gsldlc1 %[ftmp6], 0x17(%[DQC]) \n\t"
+ "gsldrc1 %[ftmp6], 0x10(%[DQC]) \n\t"
+ "gsldlc1 %[ftmp7], 0x1f(%[DQC]) \n\t"
+ "gsldrc1 %[ftmp7], 0x18(%[DQC]) \n\t"
+
+ "pmullh %[ftmp0], %[ftmp0], %[ftmp4] \n\t"
+ "pmullh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
+ "pmullh %[ftmp2], %[ftmp2], %[ftmp6] \n\t"
+ "pmullh %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
+
+ "gssdlc1 %[ftmp0], 0x07(%[dqcoeff]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[dqcoeff]) \n\t"
+ "gssdlc1 %[ftmp1], 0x0f(%[dqcoeff]) \n\t"
+ "gssdrc1 %[ftmp1], 0x08(%[dqcoeff]) \n\t"
+ "gssdlc1 %[ftmp2], 0x17(%[dqcoeff]) \n\t"
+ "gssdrc1 %[ftmp2], 0x10(%[dqcoeff]) \n\t"
+ "gssdlc1 %[ftmp3], 0x1f(%[dqcoeff]) \n\t"
+ "gssdrc1 %[ftmp3], 0x18(%[dqcoeff]) \n\t"
+ : [ftmp0] "=&f"(ftmp[0]), [ftmp1] "=&f"(ftmp[1]), [ftmp2] "=&f"(ftmp[2]),
+ [ftmp3] "=&f"(ftmp[3]), [ftmp4] "=&f"(ftmp[4]), [ftmp5] "=&f"(ftmp[5]),
+ [ftmp6] "=&f"(ftmp[6]), [ftmp7] "=&f"(ftmp[7])
+ : [dqcoeff] "r"(d->dqcoeff), [qcoeff] "r"(d->qcoeff), [DQC] "r"(DQC)
+ : "memory");
+}
+
+void vp8_dequant_idct_add_mmi(int16_t *input, int16_t *dq, unsigned char *dest,
+ int stride) {
+ double ftmp[8];
+
+ __asm__ volatile(
+ "gsldlc1 %[ftmp0], 0x07(%[dq]) \n\t"
+ "gsldrc1 %[ftmp0], 0x00(%[dq]) \n\t"
+ "gsldlc1 %[ftmp1], 0x0f(%[dq]) \n\t"
+ "gsldrc1 %[ftmp1], 0x08(%[dq]) \n\t"
+ "gsldlc1 %[ftmp2], 0x17(%[dq]) \n\t"
+ "gsldrc1 %[ftmp2], 0x10(%[dq]) \n\t"
+ "gsldlc1 %[ftmp3], 0x1f(%[dq]) \n\t"
+ "gsldrc1 %[ftmp3], 0x18(%[dq]) \n\t"
+
+ "gsldlc1 %[ftmp4], 0x07(%[input]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[input]) \n\t"
+ "gsldlc1 %[ftmp5], 0x0f(%[input]) \n\t"
+ "gsldrc1 %[ftmp5], 0x08(%[input]) \n\t"
+ "gsldlc1 %[ftmp6], 0x17(%[input]) \n\t"
+ "gsldrc1 %[ftmp6], 0x10(%[input]) \n\t"
+ "gsldlc1 %[ftmp7], 0x1f(%[input]) \n\t"
+ "gsldrc1 %[ftmp7], 0x18(%[input]) \n\t"
+
+ "pmullh %[ftmp0], %[ftmp0], %[ftmp4] \n\t"
+ "pmullh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
+ "pmullh %[ftmp2], %[ftmp2], %[ftmp6] \n\t"
+ "pmullh %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
+
+ "gssdlc1 %[ftmp0], 0x07(%[input]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[input]) \n\t"
+ "gssdlc1 %[ftmp1], 0x0f(%[input]) \n\t"
+ "gssdrc1 %[ftmp1], 0x08(%[input]) \n\t"
+ "gssdlc1 %[ftmp2], 0x17(%[input]) \n\t"
+ "gssdrc1 %[ftmp2], 0x10(%[input]) \n\t"
+ "gssdlc1 %[ftmp3], 0x1f(%[input]) \n\t"
+ "gssdrc1 %[ftmp3], 0x18(%[input]) \n\t"
+ : [ftmp0] "=&f"(ftmp[0]), [ftmp1] "=&f"(ftmp[1]), [ftmp2] "=&f"(ftmp[2]),
+ [ftmp3] "=&f"(ftmp[3]), [ftmp4] "=&f"(ftmp[4]), [ftmp5] "=&f"(ftmp[5]),
+ [ftmp6] "=&f"(ftmp[6]), [ftmp7] "=&f"(ftmp[7])
+ : [dq] "r"(dq), [input] "r"(input)
+ : "memory");
+
+ vp8_short_idct4x4llm_mmi(input, dest, stride, dest, stride);
+
+ __asm__ volatile(
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "gssdlc1 %[ftmp0], 0x07(%[input]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[input]) \n\t"
+ "sdl $0, 0x0f(%[input]) \n\t"
+ "sdr $0, 0x08(%[input]) \n\t"
+ "gssdlc1 %[ftmp0], 0x17(%[input]) \n\t"
+ "gssdrc1 %[ftmp0], 0x10(%[input]) \n\t"
+ "sdl $0, 0x1f(%[input]) \n\t"
+ "sdr $0, 0x18(%[input]) \n\t"
+ : [ftmp0] "=&f"(ftmp[0])
+ : [input] "r"(input)
+ : "memory");
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c b/media/libvpx/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c
new file mode 100644
index 0000000000..4fd6854c52
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
+
+void vp8_dequant_idct_add_y_block_mmi(int16_t *q, int16_t *dq, uint8_t *dst,
+ int stride, char *eobs) {
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ if (*eobs++ > 1) {
+ vp8_dequant_idct_add_mmi(q, dq, dst, stride);
+ } else {
+ vp8_dc_only_idct_add_mmi(q[0] * dq[0], dst, stride, dst, stride);
+ memset(q, 0, 2 * sizeof(q[0]));
+ }
+
+ q += 16;
+ dst += 4;
+ }
+
+ dst += 4 * stride - 16;
+ }
+}
+
+void vp8_dequant_idct_add_uv_block_mmi(int16_t *q, int16_t *dq, uint8_t *dst_u,
+ uint8_t *dst_v, int stride, char *eobs) {
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ if (*eobs++ > 1) {
+ vp8_dequant_idct_add_mmi(q, dq, dst_u, stride);
+ } else {
+ vp8_dc_only_idct_add_mmi(q[0] * dq[0], dst_u, stride, dst_u, stride);
+ memset(q, 0, 2 * sizeof(q[0]));
+ }
+
+ q += 16;
+ dst_u += 4;
+ }
+
+ dst_u += 4 * stride - 8;
+ }
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ if (*eobs++ > 1) {
+ vp8_dequant_idct_add_mmi(q, dq, dst_v, stride);
+ } else {
+ vp8_dc_only_idct_add_mmi(q[0] * dq[0], dst_v, stride, dst_v, stride);
+ memset(q, 0, 2 * sizeof(q[0]));
+ }
+
+ q += 16;
+ dst_v += 4;
+ }
+
+ dst_v += 4 * stride - 8;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/mmi/idctllm_mmi.c b/media/libvpx/libvpx/vp8/common/mips/mmi/idctllm_mmi.c
new file mode 100644
index 0000000000..a35689dd30
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/mmi/idctllm_mmi.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vpx_ports/mem.h"
+#include "vpx_ports/asmdefs_mmi.h"
+
+#define TRANSPOSE_4H \
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
+ MMI_LI(%[tmp0], 0x93) \
+ "mtc1 %[tmp0], %[ftmp10] \n\t" \
+ "punpcklhw %[ftmp5], %[ftmp1], %[ftmp0] \n\t" \
+ "punpcklhw %[ftmp9], %[ftmp2], %[ftmp0] \n\t" \
+ "pshufh %[ftmp9], %[ftmp9], %[ftmp10] \n\t" \
+ "por %[ftmp5], %[ftmp5], %[ftmp9] \n\t" \
+ "punpckhhw %[ftmp6], %[ftmp1], %[ftmp0] \n\t" \
+ "punpckhhw %[ftmp9], %[ftmp2], %[ftmp0] \n\t" \
+ "pshufh %[ftmp9], %[ftmp9], %[ftmp10] \n\t" \
+ "por %[ftmp6], %[ftmp6], %[ftmp9] \n\t" \
+ "punpcklhw %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
+ "punpcklhw %[ftmp9], %[ftmp4], %[ftmp0] \n\t" \
+ "pshufh %[ftmp9], %[ftmp9], %[ftmp10] \n\t" \
+ "por %[ftmp7], %[ftmp7], %[ftmp9] \n\t" \
+ "punpckhhw %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
+ "punpckhhw %[ftmp9], %[ftmp4], %[ftmp0] \n\t" \
+ "pshufh %[ftmp9], %[ftmp9], %[ftmp10] \n\t" \
+ "por %[ftmp8], %[ftmp8], %[ftmp9] \n\t" \
+ "punpcklwd %[ftmp1], %[ftmp5], %[ftmp7] \n\t" \
+ "punpckhwd %[ftmp2], %[ftmp5], %[ftmp7] \n\t" \
+ "punpcklwd %[ftmp3], %[ftmp6], %[ftmp8] \n\t" \
+ "punpckhwd %[ftmp4], %[ftmp6], %[ftmp8] \n\t"
+
+void vp8_short_idct4x4llm_mmi(int16_t *input, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride) {
+ double ftmp[12];
+ uint64_t tmp[1];
+ double ff_ph_04, ff_ph_4e7b, ff_ph_22a3;
+
+ __asm__ volatile (
+ "dli %[tmp0], 0x0004000400040004 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_04] \n\t"
+ "dli %[tmp0], 0x4e7b4e7b4e7b4e7b \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_4e7b] \n\t"
+ "dli %[tmp0], 0x22a322a322a322a3 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_22a3] \n\t"
+ MMI_LI(%[tmp0], 0x02)
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+
+ "gsldlc1 %[ftmp1], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[ip]) \n\t"
+ "gsldlc1 %[ftmp2], 0x0f(%[ip]) \n\t"
+ "gsldrc1 %[ftmp2], 0x08(%[ip]) \n\t"
+ "gsldlc1 %[ftmp3], 0x17(%[ip]) \n\t"
+ "gsldrc1 %[ftmp3], 0x10(%[ip]) \n\t"
+ "gsldlc1 %[ftmp4], 0x1f(%[ip]) \n\t"
+ "gsldrc1 %[ftmp4], 0x18(%[ip]) \n\t"
+
+ // ip[0...3] + ip[8...11]
+ "paddh %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
+ // ip[0...3] - ip[8...11]
+ "psubh %[ftmp6], %[ftmp1], %[ftmp3] \n\t"
+ // (ip[12...15] * sinpi8sqrt2) >> 16
+ "psllh %[ftmp9], %[ftmp4], %[ftmp11] \n\t"
+ "pmulhh %[ftmp7], %[ftmp9], %[ff_ph_22a3] \n\t"
+ // (ip[ 4... 7] * sinpi8sqrt2) >> 16
+ "psllh %[ftmp9], %[ftmp2], %[ftmp11] \n\t"
+ "pmulhh %[ftmp8], %[ftmp9], %[ff_ph_22a3] \n\t"
+ // ip[ 4... 7] + ((ip[ 4... 7] * cospi8sqrt2minus1) >> 16)
+ "pmulhh %[ftmp9], %[ftmp2], %[ff_ph_4e7b] \n\t"
+ "paddh %[ftmp9], %[ftmp9], %[ftmp2] \n\t"
+ // ip[12...15] + ((ip[12...15] * cospi8sqrt2minus1) >> 16)
+ "pmulhh %[ftmp10], %[ftmp4], %[ff_ph_4e7b] \n\t"
+ "paddh %[ftmp10], %[ftmp10], %[ftmp4] \n\t"
+
+ "paddh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
+ "paddh %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
+ "paddh %[ftmp2], %[ftmp6], %[ftmp8] \n\t"
+ "psubh %[ftmp2], %[ftmp2], %[ftmp10] \n\t"
+ "psubh %[ftmp3], %[ftmp6], %[ftmp8] \n\t"
+ "paddh %[ftmp3], %[ftmp3], %[ftmp10] \n\t"
+ "psubh %[ftmp4], %[ftmp5], %[ftmp7] \n\t"
+ "psubh %[ftmp4], %[ftmp4], %[ftmp9] \n\t"
+
+ TRANSPOSE_4H
+ // a
+ "paddh %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
+ // b
+ "psubh %[ftmp6], %[ftmp1], %[ftmp3] \n\t"
+ // c
+ "psllh %[ftmp9], %[ftmp2], %[ftmp11] \n\t"
+ "pmulhh %[ftmp9], %[ftmp9], %[ff_ph_22a3] \n\t"
+ "psubh %[ftmp7], %[ftmp9], %[ftmp4] \n\t"
+ "pmulhh %[ftmp10], %[ftmp4], %[ff_ph_4e7b] \n\t"
+ "psubh %[ftmp7], %[ftmp7], %[ftmp10] \n\t"
+ // d
+ "psllh %[ftmp9], %[ftmp4], %[ftmp11] \n\t"
+ "pmulhh %[ftmp9], %[ftmp9], %[ff_ph_22a3] \n\t"
+ "paddh %[ftmp8], %[ftmp9], %[ftmp2] \n\t"
+ "pmulhh %[ftmp10], %[ftmp2], %[ff_ph_4e7b] \n\t"
+ "paddh %[ftmp8], %[ftmp8], %[ftmp10] \n\t"
+
+ MMI_LI(%[tmp0], 0x03)
+ "mtc1 %[tmp0], %[ftmp11] \n\t"
+ // a + d
+ "paddh %[ftmp1], %[ftmp5], %[ftmp8] \n\t"
+ "paddh %[ftmp1], %[ftmp1], %[ff_ph_04] \n\t"
+ "psrah %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
+ // b + c
+ "paddh %[ftmp2], %[ftmp6], %[ftmp7] \n\t"
+ "paddh %[ftmp2], %[ftmp2], %[ff_ph_04] \n\t"
+ "psrah %[ftmp2], %[ftmp2], %[ftmp11] \n\t"
+ // b - c
+ "psubh %[ftmp3], %[ftmp6], %[ftmp7] \n\t"
+ "paddh %[ftmp3], %[ftmp3], %[ff_ph_04] \n\t"
+ "psrah %[ftmp3], %[ftmp3], %[ftmp11] \n\t"
+ // a - d
+ "psubh %[ftmp4], %[ftmp5], %[ftmp8] \n\t"
+ "paddh %[ftmp4], %[ftmp4], %[ff_ph_04] \n\t"
+ "psrah %[ftmp4], %[ftmp4], %[ftmp11] \n\t"
+
+ TRANSPOSE_4H
+#if _MIPS_SIM == _ABIO32
+ "ulw %[tmp0], 0x00(%[pred_prt]) \n\t"
+ "mtc1 %[tmp0], %[ftmp5] \n\t"
+#else
+ "gslwlc1 %[ftmp5], 0x03(%[pred_ptr]) \n\t"
+ "gslwrc1 %[ftmp5], 0x00(%[pred_ptr]) \n\t"
+#endif
+ "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
+ "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
+ "gsswlc1 %[ftmp1], 0x03(%[dst_ptr]) \n\t"
+ "gsswrc1 %[ftmp1], 0x00(%[dst_ptr]) \n\t"
+ MMI_ADDU(%[pred_ptr], %[pred_ptr], %[pred_stride])
+ MMI_ADDU(%[dst_ptr], %[dst_ptr], %[dst_stride])
+
+#if _MIPS_SIM == _ABIO32
+ "ulw %[tmp0], 0x00(%[pred_prt]) \n\t"
+ "mtc1 %[tmp0], %[ftmp6] \n\t"
+#else
+ "gslwlc1 %[ftmp6], 0x03(%[pred_ptr]) \n\t"
+ "gslwrc1 %[ftmp6], 0x00(%[pred_ptr]) \n\t"
+#endif
+ "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+ "paddh %[ftmp2], %[ftmp2], %[ftmp6] \n\t"
+ "packushb %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
+ "gsswlc1 %[ftmp2], 0x03(%[dst_ptr]) \n\t"
+ "gsswrc1 %[ftmp2], 0x00(%[dst_ptr]) \n\t"
+ MMI_ADDU(%[pred_ptr], %[pred_ptr], %[pred_stride])
+ MMI_ADDU(%[dst_ptr], %[dst_ptr], %[dst_stride])
+
+#if _MIPS_SIM == _ABIO32
+ "ulw %[tmp0], 0x00(%[pred_prt]) \n\t"
+ "mtc1 %[tmp0], %[ftmp7] \n\t"
+#else
+ "gslwlc1 %[ftmp7], 0x03(%[pred_ptr]) \n\t"
+ "gslwrc1 %[ftmp7], 0x00(%[pred_ptr]) \n\t"
+#endif
+ "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t"
+ "paddh %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
+ "packushb %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
+ "gsswlc1 %[ftmp3], 0x03(%[dst_ptr]) \n\t"
+ "gsswrc1 %[ftmp3], 0x00(%[dst_ptr]) \n\t"
+ MMI_ADDU(%[pred_ptr], %[pred_ptr], %[pred_stride])
+ MMI_ADDU(%[dst_ptr], %[dst_ptr], %[dst_stride])
+
+#if _MIPS_SIM == _ABIO32
+ "ulw %[tmp0], 0x00(%[pred_prt]) \n\t"
+ "mtc1 %[tmp0], %[ftmp8] \n\t"
+#else
+ "gslwlc1 %[ftmp8], 0x03(%[pred_ptr]) \n\t"
+ "gslwrc1 %[ftmp8], 0x00(%[pred_ptr]) \n\t"
+#endif
+ "punpcklbh %[ftmp8], %[ftmp8], %[ftmp0] \n\t"
+ "paddh %[ftmp4], %[ftmp4], %[ftmp8] \n\t"
+ "packushb %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
+ "gsswlc1 %[ftmp4], 0x03(%[dst_ptr]) \n\t"
+ "gsswrc1 %[ftmp4], 0x00(%[dst_ptr]) \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), [ftmp2]"=&f"(ftmp[2]),
+ [ftmp3]"=&f"(ftmp[3]), [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), [ftmp8]"=&f"(ftmp[8]),
+ [ftmp9]"=&f"(ftmp[9]), [ftmp10]"=&f"(ftmp[10]),
+ [ftmp11]"=&f"(ftmp[11]), [tmp0]"=&r"(tmp[0]),
+ [pred_ptr]"+&r"(pred_ptr), [dst_ptr]"+&r"(dst_ptr),
+ [ff_ph_4e7b]"=&f"(ff_ph_4e7b), [ff_ph_04]"=&f"(ff_ph_04),
+ [ff_ph_22a3]"=&f"(ff_ph_22a3)
+ : [ip]"r"(input),
+ [pred_stride]"r"((mips_reg)pred_stride),
+ [dst_stride]"r"((mips_reg)dst_stride)
+ : "memory"
+ );
+}
+
+void vp8_dc_only_idct_add_mmi(int16_t input_dc, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride) {
+ int a0 = ((input_dc + 4) >> 3);
+ double a1, ftmp[5];
+ int low32;
+
+ __asm__ volatile (
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "dmtc1 %[a0], %[a1] \n\t"
+ "pshufh %[a1], %[a1], %[ftmp0] \n\t"
+ "ulw %[low32], 0x00(%[pred_ptr]) \n\t"
+ "mtc1 %[low32], %[ftmp1] \n\t"
+ "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t"
+ "paddsh %[ftmp2], %[ftmp2], %[a1] \n\t"
+ "packushb %[ftmp1], %[ftmp2], %[ftmp0] \n\t"
+ "gsswlc1 %[ftmp1], 0x03(%[dst_ptr]) \n\t"
+ "gsswrc1 %[ftmp1], 0x00(%[dst_ptr]) \n\t"
+
+ MMI_ADDU(%[pred_ptr], %[pred_ptr], %[pred_stride])
+ MMI_ADDU(%[dst_ptr], %[dst_ptr], %[dst_stride])
+ "ulw %[low32], 0x00(%[pred_ptr]) \n\t"
+ "mtc1 %[low32], %[ftmp1] \n\t"
+ "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t"
+ "paddsh %[ftmp2], %[ftmp2], %[a1] \n\t"
+ "packushb %[ftmp1], %[ftmp2], %[ftmp0] \n\t"
+ "gsswlc1 %[ftmp1], 0x03(%[dst_ptr]) \n\t"
+ "gsswrc1 %[ftmp1], 0x00(%[dst_ptr]) \n\t"
+
+ MMI_ADDU(%[pred_ptr], %[pred_ptr], %[pred_stride])
+ MMI_ADDU(%[dst_ptr], %[dst_ptr], %[dst_stride])
+ "ulw %[low32], 0x00(%[pred_ptr]) \n\t"
+ "mtc1 %[low32], %[ftmp1] \n\t"
+ "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t"
+ "paddsh %[ftmp2], %[ftmp2], %[a1] \n\t"
+ "packushb %[ftmp1], %[ftmp2], %[ftmp0] \n\t"
+ "gsswlc1 %[ftmp1], 0x03(%[dst_ptr]) \n\t"
+ "gsswrc1 %[ftmp1], 0x00(%[dst_ptr]) \n\t"
+
+ MMI_ADDU(%[pred_ptr], %[pred_ptr], %[pred_stride])
+ MMI_ADDU(%[dst_ptr], %[dst_ptr], %[dst_stride])
+ "ulw %[low32], 0x00(%[pred_ptr]) \n\t"
+ "mtc1 %[low32], %[ftmp1] \n\t"
+ "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t"
+ "paddsh %[ftmp2], %[ftmp2], %[a1] \n\t"
+ "packushb %[ftmp1], %[ftmp2], %[ftmp0] \n\t"
+ "gsswlc1 %[ftmp1], 0x03(%[dst_ptr]) \n\t"
+ "gsswrc1 %[ftmp1], 0x00(%[dst_ptr]) \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), [ftmp2]"=&f"(ftmp[2]),
+ [ftmp3]"=&f"(ftmp[3]), [ftmp4]"=&f"(ftmp[4]), [low32]"=&r"(low32),
+ [dst_ptr]"+&r"(dst_ptr), [pred_ptr]"+&r"(pred_ptr), [a1]"=&f"(a1)
+ : [dst_stride]"r"((mips_reg)dst_stride),
+ [pred_stride]"r"((mips_reg)pred_stride), [a0]"r"(a0)
+ : "memory"
+ );
+}
+
+void vp8_short_inv_walsh4x4_mmi(int16_t *input, int16_t *mb_dqcoeff) {
+ int i;
+ int16_t output[16];
+ double ff_ph_03, ftmp[12];
+ uint64_t tmp[1];
+
+ __asm__ volatile (
+ "dli %[tmp0], 0x0003000300030003 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_03] \n\t"
+ MMI_LI(%[tmp0], 0x03)
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[ip]) \n\t"
+ "gsldlc1 %[ftmp2], 0x0f(%[ip]) \n\t"
+ "gsldrc1 %[ftmp2], 0x08(%[ip]) \n\t"
+ "gsldlc1 %[ftmp3], 0x17(%[ip]) \n\t"
+ "gsldrc1 %[ftmp3], 0x10(%[ip]) \n\t"
+ "gsldlc1 %[ftmp4], 0x1f(%[ip]) \n\t"
+ "gsldrc1 %[ftmp4], 0x18(%[ip]) \n\t"
+ "paddh %[ftmp5], %[ftmp1], %[ftmp2] \n\t"
+ "psubh %[ftmp6], %[ftmp1], %[ftmp2] \n\t"
+ "paddh %[ftmp7], %[ftmp3], %[ftmp4] \n\t"
+ "psubh %[ftmp8], %[ftmp3], %[ftmp4] \n\t"
+
+ "paddh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
+ "psubh %[ftmp2], %[ftmp5], %[ftmp7] \n\t"
+ "psubh %[ftmp3], %[ftmp6], %[ftmp8] \n\t"
+ "paddh %[ftmp4], %[ftmp6], %[ftmp8] \n\t"
+
+ TRANSPOSE_4H
+ // a
+ "paddh %[ftmp5], %[ftmp1], %[ftmp4] \n\t"
+ // d
+ "psubh %[ftmp6], %[ftmp1], %[ftmp4] \n\t"
+ // b
+ "paddh %[ftmp7], %[ftmp2], %[ftmp3] \n\t"
+ // c
+ "psubh %[ftmp8], %[ftmp2], %[ftmp3] \n\t"
+
+ "paddh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
+ "paddh %[ftmp2], %[ftmp6], %[ftmp8] \n\t"
+ "psubh %[ftmp3], %[ftmp5], %[ftmp7] \n\t"
+ "psubh %[ftmp4], %[ftmp6], %[ftmp8] \n\t"
+
+ "paddh %[ftmp1], %[ftmp1], %[ff_ph_03] \n\t"
+ "psrah %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
+ "paddh %[ftmp2], %[ftmp2], %[ff_ph_03] \n\t"
+ "psrah %[ftmp2], %[ftmp2], %[ftmp11] \n\t"
+ "paddh %[ftmp3], %[ftmp3], %[ff_ph_03] \n\t"
+ "psrah %[ftmp3], %[ftmp3], %[ftmp11] \n\t"
+ "paddh %[ftmp4], %[ftmp4], %[ff_ph_03] \n\t"
+ "psrah %[ftmp4], %[ftmp4], %[ftmp11] \n\t"
+
+ TRANSPOSE_4H
+ "gssdlc1 %[ftmp1], 0x07(%[op]) \n\t"
+ "gssdrc1 %[ftmp1], 0x00(%[op]) \n\t"
+ "gssdlc1 %[ftmp2], 0x0f(%[op]) \n\t"
+ "gssdrc1 %[ftmp2], 0x08(%[op]) \n\t"
+ "gssdlc1 %[ftmp3], 0x17(%[op]) \n\t"
+ "gssdrc1 %[ftmp3], 0x10(%[op]) \n\t"
+ "gssdlc1 %[ftmp4], 0x1f(%[op]) \n\t"
+ "gssdrc1 %[ftmp4], 0x18(%[op]) \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), [ftmp2]"=&f"(ftmp[2]),
+ [ftmp3]"=&f"(ftmp[3]), [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), [ftmp8]"=&f"(ftmp[8]),
+ [ftmp9]"=&f"(ftmp[9]), [ftmp10]"=&f"(ftmp[10]),
+ [ftmp11]"=&f"(ftmp[11]), [tmp0]"=&r"(tmp[0]), [ff_ph_03]"=&f"(ff_ph_03)
+ : [ip]"r"(input), [op]"r"(output)
+ : "memory"
+ );
+
+ for (i = 0; i < 16; i++) {
+ mb_dqcoeff[i * 16] = output[i];
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/mmi/loopfilter_filters_mmi.c b/media/libvpx/libvpx/vp8/common/mips/mmi/loopfilter_filters_mmi.c
new file mode 100644
index 0000000000..a07a7e3b41
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/mmi/loopfilter_filters_mmi.c
@@ -0,0 +1,1415 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_dsp_rtcd.h"
+#include "vp8/common/loopfilter.h"
+#include "vp8/common/onyxc_int.h"
+#include "vpx_ports/asmdefs_mmi.h"
+
+void vp8_loop_filter_horizontal_edge_mmi(
+ unsigned char *src_ptr, int src_pixel_step, const unsigned char *blimit,
+ const unsigned char *limit, const unsigned char *thresh, int count) {
+ uint64_t tmp[1];
+ mips_reg addr[2];
+ double ftmp[12];
+ double ff_ph_01, ff_pb_fe, ff_pb_80, ff_pb_04, ff_pb_03;
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0x0001000100010001 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_01] \n\t"
+ "dli %[tmp0], 0xfefefefefefefefe \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x8080808080808080 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_80] \n\t"
+ "dli %[tmp0], 0x0404040404040404 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_04] \n\t"
+ "dli %[tmp0], 0x0303030303030303 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_03] \n\t"
+ "1: \n\t"
+ "gsldlc1 %[ftmp10], 0x07(%[limit]) \n\t"
+ "gsldrc1 %[ftmp10], 0x00(%[limit]) \n\t"
+
+ MMI_ADDU(%[addr0], %[src_ptr], %[src_pixel_step])
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step_x4])
+ "gsldlc1 %[ftmp1], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[addr1]) \n\t"
+
+ MMI_SUBU(%[addr1], %[addr0], %[src_pixel_step_x4])
+ "gsldlc1 %[ftmp3], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp3], 0x00(%[addr1]) \n\t"
+ "pasubub %[ftmp0], %[ftmp1], %[ftmp3] \n\t"
+ "psubusb %[ftmp0], %[ftmp0], %[ftmp10] \n\t"
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step_x2])
+ "gsldlc1 %[ftmp4], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[addr1]) \n\t"
+ "pasubub %[ftmp1], %[ftmp3], %[ftmp4] \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp10] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp5], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[addr1]) \n\t"
+ "pasubub %[ftmp9], %[ftmp4], %[ftmp5] \n\t"
+ "psubusb %[ftmp1], %[ftmp9], %[ftmp10] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+
+ "gsldlc1 %[ftmp6], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x00(%[src_ptr]) \n\t"
+
+ "gsldlc1 %[ftmp7], 0x07(%[addr0]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[addr0]) \n\t"
+ "pasubub %[ftmp11], %[ftmp7], %[ftmp6] \n\t"
+ "psubusb %[ftmp1], %[ftmp11], %[ftmp10] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+
+ MMI_ADDU(%[addr1], %[src_ptr], %[src_pixel_step_x2])
+ "gsldlc1 %[ftmp8], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[addr1]) \n\t"
+ "pasubub %[ftmp1], %[ftmp8], %[ftmp7] \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp10] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+
+ MMI_ADDU(%[addr1], %[addr0], %[src_pixel_step_x2])
+ "gsldlc1 %[ftmp2], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[addr1]) \n\t"
+ "pasubub %[ftmp1], %[ftmp2], %[ftmp8] \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp10] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+
+ "pasubub %[ftmp1], %[ftmp5], %[ftmp6] \n\t"
+ "paddusb %[ftmp1], %[ftmp1], %[ftmp1] \n\t"
+ "pasubub %[ftmp2], %[ftmp4], %[ftmp7] \n\t"
+ "pand %[ftmp2], %[ftmp2], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp10] \n\t"
+ "psrlh %[ftmp2], %[ftmp2], %[ftmp10] \n\t"
+ "paddusb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+ "gsldlc1 %[ftmp10], 0x07(%[blimit]) \n\t"
+ "gsldrc1 %[ftmp10], 0x00(%[blimit]) \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp10] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+ "pxor %[ftmp10], %[ftmp10], %[ftmp10] \n\t"
+ "pcmpeqb %[ftmp0], %[ftmp0], %[ftmp10] \n\t"
+
+ "gsldlc1 %[ftmp10], 0x07(%[thresh]) \n\t"
+ "gsldrc1 %[ftmp10], 0x00(%[thresh]) \n\t"
+ "psubusb %[ftmp1], %[ftmp9], %[ftmp10] \n\t"
+ "psubusb %[ftmp2], %[ftmp11], %[ftmp10] \n\t"
+ "paddb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+ "pxor %[ftmp2], %[ftmp2], %[ftmp2] \n\t"
+ "pcmpeqb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+ "pcmpeqb %[ftmp2], %[ftmp2], %[ftmp2] \n\t"
+ "pxor %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+
+ "pxor %[ftmp4], %[ftmp4], %[ff_pb_80] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ff_pb_80] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ "pxor %[ftmp7], %[ftmp7], %[ff_pb_80] \n\t"
+
+ "psubsb %[ftmp2], %[ftmp4], %[ftmp7] \n\t"
+ "pand %[ftmp2], %[ftmp2], %[ftmp1] \n\t"
+ "psubsb %[ftmp3], %[ftmp6], %[ftmp5] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp3] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp3] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp3] \n\t"
+ "pand %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
+
+ "paddsb %[ftmp8], %[ftmp2], %[ff_pb_03] \n\t"
+ "paddsb %[ftmp9], %[ftmp2], %[ff_pb_04] \n\t"
+
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "pxor %[ftmp11], %[ftmp11], %[ftmp11] \n\t"
+ "punpcklbh %[ftmp0], %[ftmp0], %[ftmp8] \n\t"
+ "punpckhbh %[ftmp11], %[ftmp11], %[ftmp8] \n\t"
+
+ "dli %[tmp0], 0x0b \n\t"
+ "dmtc1 %[tmp0], %[ftmp10] \n\t"
+ "psrah %[ftmp0], %[ftmp0], %[ftmp10] \n\t"
+ "psrah %[ftmp11], %[ftmp11], %[ftmp10] \n\t"
+ "packsshb %[ftmp8], %[ftmp0], %[ftmp11] \n\t"
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp0], %[ftmp0], %[ftmp9] \n\t"
+ "psrah %[ftmp0], %[ftmp0], %[ftmp10] \n\t"
+ "pxor %[ftmp11], %[ftmp11], %[ftmp11] \n\t"
+ "punpckhbh %[ftmp9], %[ftmp11], %[ftmp9] \n\t"
+ "psrah %[ftmp9], %[ftmp9], %[ftmp10] \n\t"
+ "paddsh %[ftmp11], %[ftmp0], %[ff_ph_01] \n\t"
+ "packsshb %[ftmp0], %[ftmp0], %[ftmp9] \n\t"
+ "paddsh %[ftmp9], %[ftmp9], %[ff_ph_01] \n\t"
+
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp10] \n\t"
+ "psrah %[ftmp11], %[ftmp11], %[ftmp10] \n\t"
+ "psrah %[ftmp9], %[ftmp9], %[ftmp10] \n\t"
+ "packsshb %[ftmp11], %[ftmp11], %[ftmp9] \n\t"
+ "pandn %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
+ "paddsb %[ftmp5], %[ftmp5], %[ftmp8] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ff_pb_80] \n\t"
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp5], 0x07(%[addr1]) \n\t"
+ "gssdrc1 %[ftmp5], 0x00(%[addr1]) \n\t"
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step_x2])
+ "paddsb %[ftmp4], %[ftmp4], %[ftmp1] \n\t"
+ "pxor %[ftmp4], %[ftmp4], %[ff_pb_80] \n\t"
+ "gssdlc1 %[ftmp4], 0x07(%[addr1]) \n\t"
+ "gssdrc1 %[ftmp4], 0x00(%[addr1]) \n\t"
+
+ "psubsb %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ "gssdlc1 %[ftmp6], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp6], 0x00(%[src_ptr]) \n\t"
+
+ "psubsb %[ftmp7], %[ftmp7], %[ftmp1] \n\t"
+ "pxor %[ftmp7], %[ftmp7], %[ff_pb_80] \n\t"
+ "gssdlc1 %[ftmp7], 0x07(%[addr0]) \n\t"
+ "gssdrc1 %[ftmp7], 0x00(%[addr0]) \n\t"
+
+ "addiu %[count], %[count], -0x01 \n\t"
+ MMI_ADDIU(%[src_ptr], %[src_ptr], 0x08)
+ "bnez %[count], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
+ [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
+ [tmp0]"=&r"(tmp[0]),
+ [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
+ [src_ptr]"+&r"(src_ptr), [count]"+&r"(count),
+ [ff_ph_01]"=&f"(ff_ph_01), [ff_pb_fe]"=&f"(ff_pb_fe),
+ [ff_pb_80]"=&f"(ff_pb_80), [ff_pb_04]"=&f"(ff_pb_04),
+ [ff_pb_03]"=&f"(ff_pb_03)
+ : [limit]"r"(limit), [blimit]"r"(blimit),
+ [thresh]"r"(thresh),
+ [src_pixel_step]"r"((mips_reg)src_pixel_step),
+ [src_pixel_step_x2]"r"((mips_reg)(src_pixel_step<<1)),
+ [src_pixel_step_x4]"r"((mips_reg)(src_pixel_step<<2))
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+void vp8_loop_filter_vertical_edge_mmi(unsigned char *src_ptr,
+ int src_pixel_step,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh, int count) {
+ uint64_t tmp[1];
+ mips_reg addr[2];
+ double ftmp[13];
+ double ff_pb_fe, ff_ph_01, ff_pb_03, ff_pb_04, ff_pb_80;
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0xfefefefefefefefe \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x0001000100010001 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_01] \n\t"
+ "dli %[tmp0], 0x0303030303030303 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_03] \n\t"
+ "dli %[tmp0], 0x0404040404040404 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_04] \n\t"
+ "dli %[tmp0], 0x8080808080808080 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_80] \n\t"
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x02)
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[tmp0])
+ MMI_SUBU(%[src_ptr], %[src_ptr], 0x04)
+
+ "1: \n\t"
+ MMI_ADDU(%[addr0], %[src_ptr], %[src_pixel_step])
+
+ MMI_SLL (%[tmp0], %[src_pixel_step], 0x01)
+ MMI_ADDU(%[addr1], %[src_ptr], %[tmp0])
+ "gsldlc1 %[ftmp11], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp11], 0x00(%[addr1]) \n\t"
+ MMI_ADDU(%[addr1], %[addr0], %[tmp0])
+ "gsldlc1 %[ftmp12], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp12], 0x00(%[addr1]) \n\t"
+ "punpcklbh %[ftmp1], %[ftmp11], %[ftmp12] \n\t"
+ "punpckhbh %[ftmp2], %[ftmp11], %[ftmp12] \n\t"
+
+ "gsldlc1 %[ftmp11], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp11], 0x00(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp12], 0x07(%[addr0]) \n\t"
+ "gsldrc1 %[ftmp12], 0x00(%[addr0]) \n\t"
+ "punpcklbh %[ftmp3], %[ftmp11], %[ftmp12] \n\t"
+ "punpckhbh %[ftmp4], %[ftmp11], %[ftmp12] \n\t"
+
+ "punpcklhw %[ftmp5], %[ftmp4], %[ftmp2] \n\t"
+ "punpckhhw %[ftmp6], %[ftmp4], %[ftmp2] \n\t"
+ "punpcklhw %[ftmp7], %[ftmp3], %[ftmp1] \n\t"
+ "punpckhhw %[ftmp8], %[ftmp3], %[ftmp1] \n\t"
+
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x01)
+ MMI_SUBU(%[addr1], %[src_ptr], %[tmp0])
+ "gsldlc1 %[ftmp11], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp11], 0x00(%[addr1]) \n\t"
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp12], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp12], 0x00(%[addr1]) \n\t"
+ "punpcklbh %[ftmp9], %[ftmp11], %[ftmp12] \n\t"
+ "punpckhbh %[ftmp10], %[ftmp11], %[ftmp12] \n\t"
+
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x02)
+ MMI_SUBU(%[addr1], %[src_ptr], %[tmp0])
+ "gsldlc1 %[ftmp11], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp11], 0x00(%[addr1]) \n\t"
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x02)
+ MMI_SUBU(%[addr1], %[addr0], %[tmp0])
+ "gsldlc1 %[ftmp12], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp12], 0x00(%[addr1]) \n\t"
+ "punpcklbh %[ftmp0], %[ftmp11], %[ftmp12] \n\t"
+ "punpckhbh %[ftmp11], %[ftmp11], %[ftmp12] \n\t"
+
+ "punpcklhw %[ftmp1], %[ftmp11], %[ftmp10] \n\t"
+ "punpckhhw %[ftmp2], %[ftmp11], %[ftmp10] \n\t"
+ "punpcklhw %[ftmp3], %[ftmp0], %[ftmp9] \n\t"
+ "punpckhhw %[ftmp4], %[ftmp0], %[ftmp9] \n\t"
+
+ /* ftmp9:q0 ftmp10:q1 */
+ "punpcklwd %[ftmp9], %[ftmp1], %[ftmp5] \n\t"
+ "punpckhwd %[ftmp10], %[ftmp1], %[ftmp5] \n\t"
+ /* ftmp11:q2 ftmp12:q3 */
+ "punpcklwd %[ftmp11], %[ftmp2], %[ftmp6] \n\t"
+ "punpckhwd %[ftmp12], %[ftmp2], %[ftmp6] \n\t"
+ /* ftmp1:p3 ftmp2:p2 */
+ "punpcklwd %[ftmp1], %[ftmp3], %[ftmp7] \n\t"
+ "punpckhwd %[ftmp2], %[ftmp3], %[ftmp7] \n\t"
+ /* ftmp5:p1 ftmp6:p0 */
+ "punpcklwd %[ftmp5], %[ftmp4], %[ftmp8] \n\t"
+ "punpckhwd %[ftmp6], %[ftmp4], %[ftmp8] \n\t"
+
+ "gsldlc1 %[ftmp8], 0x07(%[limit]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[limit]) \n\t"
+
+ /* abs (q3-q2) */
+ "pasubub %[ftmp7], %[ftmp12], %[ftmp11] \n\t"
+ "psubusb %[ftmp0], %[ftmp7], %[ftmp8] \n\t"
+ /* abs (q2-q1) */
+ "pasubub %[ftmp7], %[ftmp11], %[ftmp10] \n\t"
+ "psubusb %[ftmp7], %[ftmp7], %[ftmp8] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ /* ftmp3: abs(q1-q0) */
+ "pasubub %[ftmp3], %[ftmp10], %[ftmp9] \n\t"
+ "psubusb %[ftmp7], %[ftmp3], %[ftmp8] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ /* ftmp4: abs(p1-p0) */
+ "pasubub %[ftmp4], %[ftmp5], %[ftmp6] \n\t"
+ "psubusb %[ftmp7], %[ftmp4], %[ftmp8] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ /* abs (p2-p1) */
+ "pasubub %[ftmp7], %[ftmp2], %[ftmp5] \n\t"
+ "psubusb %[ftmp7], %[ftmp7], %[ftmp8] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ /* abs (p3-p2) */
+ "pasubub %[ftmp7], %[ftmp1], %[ftmp2] \n\t"
+ "psubusb %[ftmp7], %[ftmp7], %[ftmp8] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+
+ "gsldlc1 %[ftmp8], 0x07(%[blimit]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[blimit]) \n\t"
+
+ /* abs (p0-q0) */
+ "pasubub %[ftmp11], %[ftmp9], %[ftmp6] \n\t"
+ "paddusb %[ftmp11], %[ftmp11], %[ftmp11] \n\t"
+ /* abs (p1-q1) */
+ "pasubub %[ftmp12], %[ftmp10], %[ftmp5] \n\t"
+ "pand %[ftmp12], %[ftmp12], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp1] \n\t"
+ "psrlh %[ftmp12], %[ftmp12], %[ftmp1] \n\t"
+ "paddusb %[ftmp1], %[ftmp11], %[ftmp12] \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp8] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+ "pxor %[ftmp1], %[ftmp1], %[ftmp1] \n\t"
+ /* ftmp0:mask */
+ "pcmpeqb %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+
+ "gsldlc1 %[ftmp8], 0x07(%[thresh]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[thresh]) \n\t"
+
+ /* ftmp3: abs(q1-q0) ftmp4: abs(p1-p0) */
+ "psubusb %[ftmp4], %[ftmp4], %[ftmp8] \n\t"
+ "psubusb %[ftmp3], %[ftmp3], %[ftmp8] \n\t"
+ "por %[ftmp2], %[ftmp4], %[ftmp3] \n\t"
+ "pcmpeqb %[ftmp2], %[ftmp2], %[ftmp1] \n\t"
+ "pcmpeqb %[ftmp1], %[ftmp1], %[ftmp1] \n\t"
+ /* ftmp1:hev */
+ "pxor %[ftmp1], %[ftmp2], %[ftmp1] \n\t"
+
+ "pxor %[ftmp10], %[ftmp10], %[ff_pb_80] \n\t"
+ "pxor %[ftmp9], %[ftmp9], %[ff_pb_80] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ff_pb_80] \n\t"
+
+ "psubsb %[ftmp2], %[ftmp5], %[ftmp10] \n\t"
+ "pand %[ftmp2], %[ftmp2], %[ftmp1] \n\t"
+ "psubsb %[ftmp3], %[ftmp9], %[ftmp6] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp3] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp3] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp3] \n\t"
+ /* ftmp2:filter_value */
+ "pand %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
+
+ "paddsb %[ftmp11], %[ftmp2], %[ff_pb_04] \n\t"
+ "paddsb %[ftmp12], %[ftmp2], %[ff_pb_03] \n\t"
+
+ "dli %[tmp0], 0x0b \n\t"
+ "dmtc1 %[tmp0], %[ftmp7] \n\t"
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "pxor %[ftmp8], %[ftmp8], %[ftmp8] \n\t"
+ "punpcklbh %[ftmp0], %[ftmp0], %[ftmp12] \n\t"
+ "punpckhbh %[ftmp8], %[ftmp8], %[ftmp12] \n\t"
+ "psrah %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ "psrah %[ftmp8], %[ftmp8], %[ftmp7] \n\t"
+ "packsshb %[ftmp12], %[ftmp0], %[ftmp8] \n\t"
+
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "pxor %[ftmp8], %[ftmp8], %[ftmp8] \n\t"
+ "punpcklbh %[ftmp0], %[ftmp0], %[ftmp11] \n\t"
+ "punpckhbh %[ftmp8], %[ftmp8], %[ftmp11] \n\t"
+ "psrah %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ "psrah %[ftmp8], %[ftmp8], %[ftmp7] \n\t"
+ "packsshb %[ftmp11], %[ftmp0], %[ftmp8] \n\t"
+
+ "psubsb %[ftmp9], %[ftmp9], %[ftmp11] \n\t"
+ "pxor %[ftmp9], %[ftmp9], %[ff_pb_80] \n\t"
+ "paddsb %[ftmp6], %[ftmp6], %[ftmp12] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ "paddsh %[ftmp0], %[ftmp0], %[ff_ph_01] \n\t"
+ "paddsh %[ftmp8], %[ftmp8], %[ff_ph_01] \n\t"
+
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp7] \n\t"
+ "psrah %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ "psrah %[ftmp8], %[ftmp8], %[ftmp7] \n\t"
+ "packsshb %[ftmp2], %[ftmp0], %[ftmp8] \n\t"
+ "pandn %[ftmp2], %[ftmp1], %[ftmp2] \n\t"
+ "psubsb %[ftmp10], %[ftmp10], %[ftmp2] \n\t"
+ "pxor %[ftmp10], %[ftmp10], %[ff_pb_80] \n\t"
+ "paddsb %[ftmp5], %[ftmp5], %[ftmp2] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ff_pb_80] \n\t"
+
+ /* ftmp5: *op1 ; ftmp6: *op0 */
+ "punpcklbh %[ftmp2], %[ftmp5], %[ftmp6] \n\t"
+ "punpckhbh %[ftmp1], %[ftmp5], %[ftmp6] \n\t"
+ /* ftmp9: *oq0 ; ftmp10: *oq1 */
+ "punpcklbh %[ftmp4], %[ftmp9], %[ftmp10] \n\t"
+ "punpckhbh %[ftmp3], %[ftmp9], %[ftmp10] \n\t"
+ "punpckhhw %[ftmp6], %[ftmp2], %[ftmp4] \n\t"
+ "punpcklhw %[ftmp2], %[ftmp2], %[ftmp4] \n\t"
+ "punpckhhw %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
+ "punpcklhw %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
+
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x02)
+ MMI_SUBU(%[addr1], %[src_ptr], %[tmp0])
+ "gsswlc1 %[ftmp2], 0x05(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp2], 0x02(%[addr1]) \n\t"
+
+ "li %[tmp0], 0x20 \n\t"
+ "mtc1 %[tmp0], %[ftmp9] \n\t"
+ "ssrld %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x02)
+ MMI_SUBU(%[addr1], %[addr0], %[tmp0])
+ "gsswlc1 %[ftmp2], 0x05(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp2], 0x02(%[addr1]) \n\t"
+
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x01)
+ MMI_SUBU(%[addr1], %[src_ptr], %[tmp0])
+ "gsswlc1 %[ftmp6], 0x05(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp6], 0x02(%[addr1]) \n\t"
+
+ "ssrld %[ftmp6], %[ftmp6], %[ftmp9] \n\t"
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gsswlc1 %[ftmp6], 0x05(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp6], 0x02(%[addr1]) \n\t"
+ "gsswlc1 %[ftmp1], 0x05(%[src_ptr]) \n\t"
+ "gsswrc1 %[ftmp1], 0x02(%[src_ptr]) \n\t"
+
+ "ssrld %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
+ "gsswlc1 %[ftmp1], 0x05(%[addr0]) \n\t"
+ "gsswrc1 %[ftmp1], 0x02(%[addr0]) \n\t"
+ MMI_ADDU(%[addr1], %[addr0], %[src_pixel_step])
+ "gsswlc1 %[ftmp5], 0x05(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp5], 0x02(%[addr1]) \n\t"
+
+ "ssrld %[ftmp5], %[ftmp5], %[ftmp9] \n\t"
+ MMI_ADDU(%[addr1], %[addr0], %[tmp0])
+ "gsswlc1 %[ftmp5], 0x05(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp5], 0x02(%[addr1]) \n\t"
+
+ MMI_ADDIU(%[count], %[count], -0x01)
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x03)
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[tmp0])
+ "bnez %[count], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
+ [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
+ [ftmp12]"=&f"(ftmp[12]), [tmp0]"=&r"(tmp[0]),
+ [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
+ [src_ptr]"+&r"(src_ptr), [count]"+&r"(count),
+ [ff_ph_01]"=&f"(ff_ph_01), [ff_pb_03]"=&f"(ff_pb_03),
+ [ff_pb_04]"=&f"(ff_pb_04), [ff_pb_80]"=&f"(ff_pb_80),
+ [ff_pb_fe]"=&f"(ff_pb_fe)
+ : [limit]"r"(limit), [blimit]"r"(blimit),
+ [thresh]"r"(thresh),
+ [src_pixel_step]"r"((mips_reg)src_pixel_step)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+/* clang-format off */
+#define VP8_MBLOOP_HPSRAB \
+ "punpcklbh %[ftmp10], %[ftmp10], %[ftmp0] \n\t" \
+ "punpckhbh %[ftmp11], %[ftmp11], %[ftmp0] \n\t" \
+ "psrah %[ftmp10], %[ftmp10], %[ftmp9] \n\t" \
+ "psrah %[ftmp11], %[ftmp11], %[ftmp9] \n\t" \
+ "packsshb %[ftmp0], %[ftmp10], %[ftmp11] \n\t"
+
+#define VP8_MBLOOP_HPSRAB_ADD(reg) \
+ "punpcklbh %[ftmp1], %[ftmp0], %[ftmp12] \n\t" \
+ "punpckhbh %[ftmp2], %[ftmp0], %[ftmp12] \n\t" \
+ "pmulhh %[ftmp1], %[ftmp1], " #reg " \n\t" \
+ "pmulhh %[ftmp2], %[ftmp2], " #reg " \n\t" \
+ "paddh %[ftmp1], %[ftmp1], %[ff_ph_003f] \n\t" \
+ "paddh %[ftmp2], %[ftmp2], %[ff_ph_003f] \n\t" \
+ "psrah %[ftmp1], %[ftmp1], %[ftmp9] \n\t" \
+ "psrah %[ftmp2], %[ftmp2], %[ftmp9] \n\t" \
+ "packsshb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+/* clang-format on */
+
+void vp8_mbloop_filter_horizontal_edge_mmi(
+ unsigned char *src_ptr, int src_pixel_step, const unsigned char *blimit,
+ const unsigned char *limit, const unsigned char *thresh, int count) {
+ uint64_t tmp[1];
+ double ftmp[13];
+ double ff_pb_fe, ff_pb_80, ff_pb_04, ff_pb_03, ff_ph_003f, ff_ph_0900,
+ ff_ph_1200, ff_ph_1b00;
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0xfefefefefefefefe \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x8080808080808080 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_80] \n\t"
+ "dli %[tmp0], 0x0404040404040404 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_04] \n\t"
+ "dli %[tmp0], 0x0303030303030303 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_03] \n\t"
+ "dli %[tmp0], 0x003f003f003f003f \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_003f] \n\t"
+ "dli %[tmp0], 0x0900090009000900 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_0900] \n\t"
+ "dli %[tmp0], 0x1200120012001200 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_1200] \n\t"
+ "dli %[tmp0], 0x1b001b001b001b00 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_1b00] \n\t"
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x02)
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[tmp0])
+ "1: \n\t"
+ "gsldlc1 %[ftmp9], 0x07(%[limit]) \n\t"
+ "gsldrc1 %[ftmp9], 0x00(%[limit]) \n\t"
+ /* ftmp1: p3 */
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ /* ftmp3: p2 */
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp3], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp3], 0x00(%[src_ptr]) \n\t"
+ /* ftmp4: p1 */
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp4], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[src_ptr]) \n\t"
+ /* ftmp5: p0 */
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp5], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[src_ptr]) \n\t"
+ /* ftmp6: q0 */
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp6], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x00(%[src_ptr]) \n\t"
+ /* ftmp7: q1 */
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp7], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[src_ptr]) \n\t"
+ /* ftmp8: q2 */
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp8], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[src_ptr]) \n\t"
+ /* ftmp2: q3 */
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp2], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[src_ptr]) \n\t"
+
+ "gsldlc1 %[ftmp12], 0x07(%[blimit]) \n\t"
+ "gsldrc1 %[ftmp12], 0x00(%[blimit]) \n\t"
+
+ "pasubub %[ftmp0], %[ftmp1], %[ftmp3] \n\t"
+ "psubusb %[ftmp0], %[ftmp0], %[ftmp9] \n\t"
+ "pasubub %[ftmp1], %[ftmp3], %[ftmp4] \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+ "pasubub %[ftmp10], %[ftmp4], %[ftmp5] \n\t"
+ "psubusb %[ftmp1], %[ftmp10], %[ftmp9] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+ "pasubub %[ftmp11], %[ftmp7], %[ftmp6] \n\t"
+ "psubusb %[ftmp1], %[ftmp11], %[ftmp9] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+ "pasubub %[ftmp1], %[ftmp8], %[ftmp7] \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+ "pasubub %[ftmp1], %[ftmp2], %[ftmp8] \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+
+ "pasubub %[ftmp1], %[ftmp5], %[ftmp6] \n\t"
+ "paddusb %[ftmp1], %[ftmp1], %[ftmp1] \n\t"
+ "pasubub %[ftmp2], %[ftmp4], %[ftmp7] \n\t"
+ "pand %[ftmp2], %[ftmp2], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "psrlh %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
+ "paddusb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+ "psubusb %[ftmp1], %[ftmp1], %[ftmp12] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+ "pxor %[ftmp9], %[ftmp9], %[ftmp9] \n\t"
+ /* ftmp0: mask */
+ "pcmpeqb %[ftmp0], %[ftmp0], %[ftmp9] \n\t"
+
+ "gsldlc1 %[ftmp9], 0x07(%[thresh]) \n\t"
+ "gsldrc1 %[ftmp9], 0x00(%[thresh]) \n\t"
+ "psubusb %[ftmp1], %[ftmp10], %[ftmp9] \n\t"
+ "psubusb %[ftmp2], %[ftmp11], %[ftmp9] \n\t"
+ "paddb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+ "pxor %[ftmp2], %[ftmp2], %[ftmp2] \n\t"
+ "pcmpeqb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+ "pcmpeqb %[ftmp2], %[ftmp2], %[ftmp2] \n\t"
+ /* ftmp1: hev */
+ "pxor %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+
+ "pxor %[ftmp4], %[ftmp4], %[ff_pb_80] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ff_pb_80] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ "pxor %[ftmp7], %[ftmp7], %[ff_pb_80] \n\t"
+ "psubsb %[ftmp2], %[ftmp4], %[ftmp7] \n\t"
+ "psubsb %[ftmp9], %[ftmp6], %[ftmp5] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
+ "pand %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
+ "pandn %[ftmp12], %[ftmp1], %[ftmp2] \n\t"
+ "pand %[ftmp2], %[ftmp2], %[ftmp1] \n\t"
+
+ "dli %[tmp0], 0x0b \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "paddsb %[ftmp0], %[ftmp2], %[ff_pb_03] \n\t"
+ VP8_MBLOOP_HPSRAB
+ "paddsb %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "paddsb %[ftmp0], %[ftmp2], %[ff_pb_04] \n\t"
+ VP8_MBLOOP_HPSRAB
+ "psubsb %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+
+ "dli %[tmp0], 0x07 \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+
+ VP8_MBLOOP_HPSRAB_ADD(%[ff_ph_1b00])
+ "psubsb %[ftmp6], %[ftmp6], %[ftmp1] \n\t"
+ "paddsb %[ftmp5], %[ftmp5], %[ftmp1] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ff_pb_80] \n\t"
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x02)
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[tmp0])
+ "gssdlc1 %[ftmp5], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp5], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp6], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp6], 0x00(%[src_ptr]) \n\t"
+
+ VP8_MBLOOP_HPSRAB_ADD(%[ff_ph_1200])
+ "paddsb %[ftmp4], %[ftmp4], %[ftmp1] \n\t"
+ "psubsb %[ftmp7], %[ftmp7], %[ftmp1] \n\t"
+ "pxor %[ftmp4], %[ftmp4], %[ff_pb_80] \n\t"
+ "pxor %[ftmp7], %[ftmp7], %[ff_pb_80] \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp7], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp7], 0x00(%[src_ptr]) \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[tmp0])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp4], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp4], 0x00(%[src_ptr]) \n\t"
+
+ VP8_MBLOOP_HPSRAB_ADD(%[ff_ph_0900])
+ "pxor %[ftmp3], %[ftmp3], %[ff_pb_80] \n\t"
+ "pxor %[ftmp8], %[ftmp8], %[ff_pb_80] \n\t"
+ "paddsb %[ftmp3], %[ftmp3], %[ftmp1] \n\t"
+ "psubsb %[ftmp8], %[ftmp8], %[ftmp1] \n\t"
+ "pxor %[ftmp3], %[ftmp3], %[ff_pb_80] \n\t"
+ "pxor %[ftmp8], %[ftmp8], %[ff_pb_80] \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[tmp0])
+ "gssdlc1 %[ftmp8], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp8], 0x00(%[src_ptr]) \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[tmp0])
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp3], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp3], 0x00(%[src_ptr]) \n\t"
+
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ MMI_ADDIU(%[src_ptr], %[src_ptr], 0x08)
+ "addiu %[count], %[count], -0x01 \n\t"
+ "bnez %[count], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
+ [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
+ [ftmp12]"=&f"(ftmp[12]), [tmp0]"=&r"(tmp[0]),
+ [src_ptr]"+&r"(src_ptr), [count]"+&r"(count),
+ [ff_pb_fe]"=&f"(ff_pb_fe), [ff_pb_80]"=&f"(ff_pb_80),
+ [ff_pb_04]"=&f"(ff_pb_04), [ff_pb_03]"=&f"(ff_pb_03),
+ [ff_ph_0900]"=&f"(ff_ph_0900), [ff_ph_1b00]"=&f"(ff_ph_1b00),
+ [ff_ph_1200]"=&f"(ff_ph_1200), [ff_ph_003f]"=&f"(ff_ph_003f)
+ : [limit]"r"(limit), [blimit]"r"(blimit),
+ [thresh]"r"(thresh),
+ [src_pixel_step]"r"((mips_reg)src_pixel_step)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+/* clang-format off */
+#define VP8_MBLOOP_VPSRAB_ADDH \
+ "pxor %[ftmp7], %[ftmp7], %[ftmp7] \n\t" \
+ "pxor %[ftmp8], %[ftmp8], %[ftmp8] \n\t" \
+ "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t" \
+ "punpckhbh %[ftmp8], %[ftmp8], %[ftmp0] \n\t"
+
+#define VP8_MBLOOP_VPSRAB_ADDT \
+ "paddh %[ftmp7], %[ftmp7], %[ff_ph_003f] \n\t" \
+ "paddh %[ftmp8], %[ftmp8], %[ff_ph_003f] \n\t" \
+ "psrah %[ftmp7], %[ftmp7], %[ftmp12] \n\t" \
+ "psrah %[ftmp8], %[ftmp8], %[ftmp12] \n\t" \
+ "packsshb %[ftmp3], %[ftmp7], %[ftmp8] \n\t"
+/* clang-format on */
+
+void vp8_mbloop_filter_vertical_edge_mmi(
+ unsigned char *src_ptr, int src_pixel_step, const unsigned char *blimit,
+ const unsigned char *limit, const unsigned char *thresh, int count) {
+ mips_reg tmp[1];
+ DECLARE_ALIGNED(8, const uint64_t, srct[2]);
+ double ftmp[14];
+ double ff_ph_003f, ff_ph_0900, ff_pb_fe, ff_pb_80, ff_pb_04, ff_pb_03;
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0x003f003f003f003f \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_003f] \n\t"
+ "dli %[tmp0], 0x0900090009000900 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_0900] \n\t"
+ "dli %[tmp0], 0xfefefefefefefefe \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x8080808080808080 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_80] \n\t"
+ "dli %[tmp0], 0x0404040404040404 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_04] \n\t"
+ "dli %[tmp0], 0x0303030303030303 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_03] \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], 0x04)
+
+ "1: \n\t"
+ "gsldlc1 %[ftmp5], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp6], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp7], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp8], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[src_ptr]) \n\t"
+
+ "punpcklbh %[ftmp11], %[ftmp5], %[ftmp6] \n\t"
+ "punpckhbh %[ftmp12], %[ftmp5], %[ftmp6] \n\t"
+ "punpcklbh %[ftmp9], %[ftmp7], %[ftmp8] \n\t"
+ "punpckhbh %[ftmp10], %[ftmp7], %[ftmp8] \n\t"
+
+ "punpcklhw %[ftmp1], %[ftmp12], %[ftmp10] \n\t"
+ "punpckhhw %[ftmp2], %[ftmp12], %[ftmp10] \n\t"
+ "punpcklhw %[ftmp3], %[ftmp11], %[ftmp9] \n\t"
+ "punpckhhw %[ftmp4], %[ftmp11], %[ftmp9] \n\t"
+
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp5], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp6], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp7], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp8], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[src_ptr]) \n\t"
+
+ "punpcklbh %[ftmp11], %[ftmp5], %[ftmp6] \n\t"
+ "punpckhbh %[ftmp12], %[ftmp5], %[ftmp6] \n\t"
+ "punpcklbh %[ftmp9], %[ftmp7], %[ftmp8] \n\t"
+ "punpckhbh %[ftmp10], %[ftmp7], %[ftmp8] \n\t"
+
+ "punpcklhw %[ftmp5], %[ftmp12], %[ftmp10] \n\t"
+ "punpckhhw %[ftmp6], %[ftmp12], %[ftmp10] \n\t"
+ "punpcklhw %[ftmp7], %[ftmp11], %[ftmp9] \n\t"
+ "punpckhhw %[ftmp8], %[ftmp11], %[ftmp9] \n\t"
+
+ "gsldlc1 %[ftmp13], 0x07(%[limit]) \n\t"
+ "gsldrc1 %[ftmp13], 0x00(%[limit]) \n\t"
+ /* ftmp9:q0 ftmp10:q1 */
+ "punpcklwd %[ftmp9], %[ftmp1], %[ftmp5] \n\t"
+ "punpckhwd %[ftmp10], %[ftmp1], %[ftmp5] \n\t"
+ /* ftmp11:q2 ftmp12:q3 */
+ "punpcklwd %[ftmp11], %[ftmp2], %[ftmp6] \n\t"
+ "punpckhwd %[ftmp12], %[ftmp2], %[ftmp6] \n\t"
+ /* srct[0x00]: q3 */
+ "sdc1 %[ftmp12], 0x00(%[srct]) \n\t"
+ /* ftmp1:p3 ftmp2:p2 */
+ "punpcklwd %[ftmp1], %[ftmp3], %[ftmp7] \n\t"
+ "punpckhwd %[ftmp2], %[ftmp3], %[ftmp7] \n\t"
+ /* srct[0x08]: p3 */
+ "sdc1 %[ftmp1], 0x08(%[srct]) \n\t"
+ /* ftmp5:p1 ftmp6:p0 */
+ "punpcklwd %[ftmp5], %[ftmp4], %[ftmp8] \n\t"
+ "punpckhwd %[ftmp6], %[ftmp4], %[ftmp8] \n\t"
+
+ /* abs (q3-q2) */
+ "pasubub %[ftmp7], %[ftmp12], %[ftmp11] \n\t"
+ "psubusb %[ftmp0], %[ftmp7], %[ftmp13] \n\t"
+ /* abs (q2-q1) */
+ "pasubub %[ftmp7], %[ftmp11], %[ftmp10] \n\t"
+ "psubusb %[ftmp7], %[ftmp7], %[ftmp13] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ /* ftmp3: abs(q1-q0) */
+ "pasubub %[ftmp3], %[ftmp10], %[ftmp9] \n\t"
+ "psubusb %[ftmp7], %[ftmp3], %[ftmp13] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ /* ftmp4: abs(p1-p0) */
+ "pasubub %[ftmp4], %[ftmp5], %[ftmp6] \n\t"
+ "psubusb %[ftmp7], %[ftmp4], %[ftmp13] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ /* abs (p2-p1) */
+ "pasubub %[ftmp7], %[ftmp2], %[ftmp5] \n\t"
+ "psubusb %[ftmp7], %[ftmp7], %[ftmp13] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ /* abs (p3-p2) */
+ "pasubub %[ftmp7], %[ftmp1], %[ftmp2] \n\t"
+ "psubusb %[ftmp7], %[ftmp7], %[ftmp13] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+
+ "gsldlc1 %[ftmp13], 0x07(%[blimit]) \n\t"
+ "gsldrc1 %[ftmp13], 0x00(%[blimit]) \n\t"
+ "gsldlc1 %[ftmp7], 0x07(%[thresh]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[thresh]) \n\t"
+ /* abs (p0-q0) * 2 */
+ "pasubub %[ftmp1], %[ftmp9], %[ftmp6] \n\t"
+ "paddusb %[ftmp1], %[ftmp1], %[ftmp1] \n\t"
+ /* abs (p1-q1) / 2 */
+ "pasubub %[ftmp12], %[ftmp10], %[ftmp5] \n\t"
+ "pand %[ftmp12], %[ftmp12], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp8] \n\t"
+ "psrlh %[ftmp12], %[ftmp12], %[ftmp8] \n\t"
+ "paddusb %[ftmp12], %[ftmp1], %[ftmp12] \n\t"
+ "psubusb %[ftmp12], %[ftmp12], %[ftmp13] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp12] \n\t"
+ "pxor %[ftmp12], %[ftmp12], %[ftmp12] \n\t"
+ /* ftmp0: mask */
+ "pcmpeqb %[ftmp0], %[ftmp0], %[ftmp12] \n\t"
+
+ /* abs(p1-p0) - thresh */
+ "psubusb %[ftmp4], %[ftmp4], %[ftmp7] \n\t"
+ /* abs(q1-q0) - thresh */
+ "psubusb %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
+ "por %[ftmp3], %[ftmp4], %[ftmp3] \n\t"
+ "pcmpeqb %[ftmp3], %[ftmp3], %[ftmp12] \n\t"
+ "pcmpeqb %[ftmp1], %[ftmp1], %[ftmp1] \n\t"
+ /* ftmp1: hev */
+ "pxor %[ftmp1], %[ftmp3], %[ftmp1] \n\t"
+
+ /* ftmp2:ps2, ftmp5:ps1, ftmp6:ps0, ftmp9:qs0, ftmp10:qs1, ftmp11:qs2 */
+ "pxor %[ftmp11], %[ftmp11], %[ff_pb_80] \n\t"
+ "pxor %[ftmp10], %[ftmp10], %[ff_pb_80] \n\t"
+ "pxor %[ftmp9], %[ftmp9], %[ff_pb_80] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ff_pb_80] \n\t"
+ "pxor %[ftmp2], %[ftmp2], %[ff_pb_80] \n\t"
+
+ "psubsb %[ftmp3], %[ftmp5], %[ftmp10] \n\t"
+ "psubsb %[ftmp4], %[ftmp9], %[ftmp6] \n\t"
+ "paddsb %[ftmp3], %[ftmp3], %[ftmp4] \n\t"
+ "paddsb %[ftmp3], %[ftmp3], %[ftmp4] \n\t"
+ "paddsb %[ftmp3], %[ftmp3], %[ftmp4] \n\t"
+ /* filter_value &= mask */
+ "pand %[ftmp0], %[ftmp0], %[ftmp3] \n\t"
+ /* Filter2 = filter_value & hev */
+ "pand %[ftmp3], %[ftmp1], %[ftmp0] \n\t"
+ /* filter_value &= ~hev */
+ "pandn %[ftmp0], %[ftmp1], %[ftmp0] \n\t"
+
+ "paddsb %[ftmp4], %[ftmp3], %[ff_pb_04] \n\t"
+ "dli %[tmp0], 0x0b \n\t"
+ "dmtc1 %[tmp0], %[ftmp12] \n\t"
+ "punpcklbh %[ftmp7], %[ftmp7], %[ftmp4] \n\t"
+ "punpckhbh %[ftmp8], %[ftmp8], %[ftmp4] \n\t"
+ "psrah %[ftmp7], %[ftmp7], %[ftmp12] \n\t"
+ "psrah %[ftmp8], %[ftmp8], %[ftmp12] \n\t"
+ "packsshb %[ftmp4], %[ftmp7], %[ftmp8] \n\t"
+ /* ftmp9: qs0 */
+ "psubsb %[ftmp9], %[ftmp9], %[ftmp4] \n\t"
+ "paddsb %[ftmp3], %[ftmp3], %[ff_pb_03] \n\t"
+ "punpcklbh %[ftmp7], %[ftmp7], %[ftmp3] \n\t"
+ "punpckhbh %[ftmp8], %[ftmp8], %[ftmp3] \n\t"
+ "psrah %[ftmp7], %[ftmp7], %[ftmp12] \n\t"
+ "psrah %[ftmp8], %[ftmp8], %[ftmp12] \n\t"
+ "packsshb %[ftmp3], %[ftmp7], %[ftmp8] \n\t"
+ /* ftmp6: ps0 */
+ "paddsb %[ftmp6], %[ftmp6], %[ftmp3] \n\t"
+
+ "dli %[tmp0], 0x07 \n\t"
+ "dmtc1 %[tmp0], %[ftmp12] \n\t"
+ VP8_MBLOOP_VPSRAB_ADDH
+ "paddh %[ftmp1], %[ff_ph_0900], %[ff_ph_0900] \n\t"
+ "paddh %[ftmp1], %[ftmp1], %[ff_ph_0900] \n\t"
+ "pmulhh %[ftmp7], %[ftmp7], %[ftmp1] \n\t"
+ "pmulhh %[ftmp8], %[ftmp8], %[ftmp1] \n\t"
+ VP8_MBLOOP_VPSRAB_ADDT
+ "psubsb %[ftmp4], %[ftmp9], %[ftmp3] \n\t"
+ /* ftmp9: oq0 */
+ "pxor %[ftmp9], %[ftmp4], %[ff_pb_80] \n\t"
+ "paddsb %[ftmp4], %[ftmp6], %[ftmp3] \n\t"
+ /* ftmp6: op0 */
+ "pxor %[ftmp6], %[ftmp4], %[ff_pb_80] \n\t"
+
+ VP8_MBLOOP_VPSRAB_ADDH
+ "paddh %[ftmp1], %[ff_ph_0900], %[ff_ph_0900] \n\t"
+ "pmulhh %[ftmp7], %[ftmp7], %[ftmp1] \n\t"
+ "pmulhh %[ftmp8], %[ftmp8], %[ftmp1] \n\t"
+ VP8_MBLOOP_VPSRAB_ADDT
+ "psubsb %[ftmp4], %[ftmp10], %[ftmp3] \n\t"
+ /* ftmp10: oq1 */
+ "pxor %[ftmp10], %[ftmp4], %[ff_pb_80] \n\t"
+ "paddsb %[ftmp4], %[ftmp5], %[ftmp3] \n\t"
+ /* ftmp5: op1 */
+ "pxor %[ftmp5], %[ftmp4], %[ff_pb_80] \n\t"
+
+ VP8_MBLOOP_VPSRAB_ADDH
+ "pmulhh %[ftmp7], %[ftmp7], %[ff_ph_0900] \n\t"
+ "pmulhh %[ftmp8], %[ftmp8], %[ff_ph_0900] \n\t"
+ VP8_MBLOOP_VPSRAB_ADDT
+ "psubsb %[ftmp4], %[ftmp11], %[ftmp3] \n\t"
+ /* ftmp11: oq2 */
+ "pxor %[ftmp11], %[ftmp4], %[ff_pb_80] \n\t"
+ "paddsb %[ftmp4], %[ftmp2], %[ftmp3] \n\t"
+ /* ftmp2: op2 */
+ "pxor %[ftmp2], %[ftmp4], %[ff_pb_80] \n\t"
+
+ "ldc1 %[ftmp12], 0x00(%[srct]) \n\t"
+ "ldc1 %[ftmp8], 0x08(%[srct]) \n\t"
+
+ "punpcklbh %[ftmp0], %[ftmp8], %[ftmp2] \n\t"
+ "punpckhbh %[ftmp1], %[ftmp8], %[ftmp2] \n\t"
+ "punpcklbh %[ftmp2], %[ftmp5], %[ftmp6] \n\t"
+ "punpckhbh %[ftmp3], %[ftmp5], %[ftmp6] \n\t"
+ "punpcklhw %[ftmp4], %[ftmp0], %[ftmp2] \n\t"
+ "punpckhhw %[ftmp5], %[ftmp0], %[ftmp2] \n\t"
+ "punpcklhw %[ftmp6], %[ftmp1], %[ftmp3] \n\t"
+ "punpckhhw %[ftmp7], %[ftmp1], %[ftmp3] \n\t"
+
+ "punpcklbh %[ftmp0], %[ftmp9], %[ftmp10] \n\t"
+ "punpckhbh %[ftmp1], %[ftmp9], %[ftmp10] \n\t"
+ "punpcklbh %[ftmp2], %[ftmp11], %[ftmp12] \n\t"
+ "punpckhbh %[ftmp3], %[ftmp11], %[ftmp12] \n\t"
+ "punpcklhw %[ftmp8], %[ftmp0], %[ftmp2] \n\t"
+ "punpckhhw %[ftmp9], %[ftmp0], %[ftmp2] \n\t"
+ "punpcklhw %[ftmp10], %[ftmp1], %[ftmp3] \n\t"
+ "punpckhhw %[ftmp11], %[ftmp1], %[ftmp3] \n\t"
+
+ "punpcklwd %[ftmp0], %[ftmp7], %[ftmp11] \n\t"
+ "punpckhwd %[ftmp1], %[ftmp7], %[ftmp11] \n\t"
+ "gssdlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp0], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[src_ptr]) \n\t"
+
+ "punpcklwd %[ftmp0], %[ftmp6], %[ftmp10] \n\t"
+ "punpckhwd %[ftmp1], %[ftmp6], %[ftmp10] \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp0], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[src_ptr]) \n\t"
+
+ "punpcklwd %[ftmp1], %[ftmp5], %[ftmp9] \n\t"
+ "punpckhwd %[ftmp0], %[ftmp5], %[ftmp9] \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp0], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[src_ptr]) \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+
+ "punpcklwd %[ftmp1], %[ftmp4], %[ftmp8] \n\t"
+ "punpckhwd %[ftmp0], %[ftmp4], %[ftmp8] \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp0], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[src_ptr]) \n\t"
+ MMI_SUBU(%[src_ptr], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ "addiu %[count], %[count], -0x01 \n\t"
+
+ MMI_SLL(%[tmp0], %[src_pixel_step], 0x03)
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[tmp0])
+ "bnez %[count], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
+ [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
+ [ftmp12]"=&f"(ftmp[12]), [ftmp13]"=&f"(ftmp[13]),
+ [tmp0]"=&r"(tmp[0]), [src_ptr]"+&r"(src_ptr),
+ [count]"+&r"(count),
+ [ff_ph_003f]"=&f"(ff_ph_003f), [ff_ph_0900]"=&f"(ff_ph_0900),
+ [ff_pb_03]"=&f"(ff_pb_03), [ff_pb_04]"=&f"(ff_pb_04),
+ [ff_pb_80]"=&f"(ff_pb_80), [ff_pb_fe]"=&f"(ff_pb_fe)
+ : [limit]"r"(limit), [blimit]"r"(blimit),
+ [srct]"r"(srct), [thresh]"r"(thresh),
+ [src_pixel_step]"r"((mips_reg)src_pixel_step)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+/* clang-format off */
+#define VP8_SIMPLE_HPSRAB \
+ "psllh %[ftmp0], %[ftmp5], %[ftmp8] \n\t" \
+ "psrah %[ftmp0], %[ftmp0], %[ftmp9] \n\t" \
+ "psrlh %[ftmp0], %[ftmp0], %[ftmp8] \n\t" \
+ "psrah %[ftmp1], %[ftmp5], %[ftmp10] \n\t" \
+ "psllh %[ftmp1], %[ftmp1], %[ftmp8] \n\t" \
+ "por %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+/* clang-format on */
+
+void vp8_loop_filter_simple_horizontal_edge_mmi(unsigned char *src_ptr,
+ int src_pixel_step,
+ const unsigned char *blimit) {
+ uint64_t tmp[1], count = 2;
+ mips_reg addr[2];
+ double ftmp[12];
+ double ff_pb_fe, ff_pb_80, ff_pb_04, ff_pb_01;
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0x0b \n\t"
+ "dmtc1 %[tmp0], %[ftmp10] \n\t"
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+ "dli %[tmp0], 0x08 \n\t"
+ "dmtc1 %[tmp0], %[ftmp8] \n\t"
+ "dli %[tmp0], 0x03 \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "dli %[tmp0], 0x0b \n\t"
+ "dmtc1 %[tmp0], %[ftmp10] \n\t"
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+ "dli %[tmp0], 0xfefefefefefefefe \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x8080808080808080 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_80] \n\t"
+ "dli %[tmp0], 0x0404040404040404 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_04] \n\t"
+ "dli %[tmp0], 0x0101010101010101 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_01] \n\t"
+
+ "1: \n\t"
+ "gsldlc1 %[ftmp3], 0x07(%[blimit]) \n\t"
+ "gsldrc1 %[ftmp3], 0x00(%[blimit]) \n\t"
+
+ MMI_ADDU(%[addr0], %[src_ptr], %[src_pixel_step])
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step_x2])
+ "gsldlc1 %[ftmp2], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[addr1]) \n\t"
+ "gsldlc1 %[ftmp7], 0x07(%[addr0]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[addr0]) \n\t"
+ "pasubub %[ftmp1], %[ftmp7], %[ftmp2] \n\t"
+ "pand %[ftmp1], %[ftmp1], %[ff_pb_fe] \n\t"
+ "psrlh %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gsldlc1 %[ftmp6], 0x07(%[addr1]) \n\t"
+ "gsldrc1 %[ftmp6], 0x00(%[addr1]) \n\t"
+ "gsldlc1 %[ftmp0], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp0], 0x00(%[src_ptr]) \n\t"
+ "pasubub %[ftmp5], %[ftmp6], %[ftmp0] \n\t"
+ "paddusb %[ftmp5], %[ftmp5], %[ftmp5] \n\t"
+ "paddusb %[ftmp5], %[ftmp5], %[ftmp1] \n\t"
+ "psubusb %[ftmp5], %[ftmp5], %[ftmp3] \n\t"
+ "pxor %[ftmp3], %[ftmp3], %[ftmp3] \n\t"
+ "pcmpeqb %[ftmp5], %[ftmp5], %[ftmp3] \n\t"
+
+ "pxor %[ftmp2], %[ftmp2], %[ff_pb_80] \n\t"
+ "pxor %[ftmp7], %[ftmp7], %[ff_pb_80] \n\t"
+ "psubsb %[ftmp2], %[ftmp2], %[ftmp7] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ "pxor %[ftmp3], %[ftmp0], %[ff_pb_80] \n\t"
+ "psubsb %[ftmp0], %[ftmp3], %[ftmp6] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
+ "paddsb %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
+ "pand %[ftmp5], %[ftmp5], %[ftmp2] \n\t"
+
+ "paddsb %[ftmp5], %[ftmp5], %[ff_pb_04] \n\t"
+ VP8_SIMPLE_HPSRAB
+ "psubsb %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
+ "pxor %[ftmp3], %[ftmp3], %[ff_pb_80] \n\t"
+ "gssdlc1 %[ftmp3], 0x07(%[src_ptr]) \n\t"
+ "gssdrc1 %[ftmp3], 0x00(%[src_ptr]) \n\t"
+
+ "psubsb %[ftmp5], %[ftmp5], %[ff_pb_01] \n\t"
+ VP8_SIMPLE_HPSRAB
+ "paddsb %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gssdlc1 %[ftmp6], 0x07(%[addr1]) \n\t"
+ "gssdrc1 %[ftmp6], 0x00(%[addr1]) \n\t"
+
+ "addiu %[count], %[count], -0x01 \n\t"
+ MMI_ADDIU(%[src_ptr], %[src_ptr], 0x08)
+ "bnez %[count], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
+ [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
+ [tmp0]"=&r"(tmp[0]),
+ [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
+ [src_ptr]"+&r"(src_ptr), [count]"+&r"(count),
+ [ff_pb_fe]"=&f"(ff_pb_fe), [ff_pb_80]"=&f"(ff_pb_80),
+ [ff_pb_04]"=&f"(ff_pb_04), [ff_pb_01]"=&f"(ff_pb_01)
+ : [blimit]"r"(blimit),
+ [src_pixel_step]"r"((mips_reg)src_pixel_step),
+ [src_pixel_step_x2]"r"((mips_reg)(src_pixel_step<<1))
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+void vp8_loop_filter_simple_vertical_edge_mmi(unsigned char *src_ptr,
+ int src_pixel_step,
+ const unsigned char *blimit) {
+ uint64_t tmp[1], count = 2;
+ mips_reg addr[2];
+ DECLARE_ALIGNED(8, const uint64_t, srct[2]);
+ double ftmp[12], ff_pb_fe, ff_pb_80, ff_pb_04, ff_pb_01;
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0x08 \n\t"
+ "dmtc1 %[tmp0], %[ftmp8] \n\t"
+ "dli %[tmp0], 0x20 \n\t"
+ "dmtc1 %[tmp0], %[ftmp10] \n\t"
+ "dli %[tmp0], 0x08 \n\t"
+ "dmtc1 %[tmp0], %[ftmp8] \n\t"
+ "dli %[tmp0], 0x20 \n\t"
+ "dmtc1 %[tmp0], %[ftmp10] \n\t"
+ "dli %[tmp0], 0xfefefefefefefefe \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_fe] \n\t"
+ "dli %[tmp0], 0x8080808080808080 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_80] \n\t"
+ "dli %[tmp0], 0x0404040404040404 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_04] \n\t"
+ "dli %[tmp0], 0x0101010101010101 \n\t"
+ "dmtc1 %[tmp0], %[ff_pb_01] \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step_x4])
+ MMI_SUBU(%[src_ptr], %[src_ptr], 0x02)
+
+ "1: \n\t"
+ MMI_ADDU(%[addr0], %[src_ptr], %[src_pixel_step])
+ MMI_ADDU(%[addr1], %[addr0], %[src_pixel_step_x2])
+ "gslwlc1 %[ftmp0], 0x03(%[addr1]) \n\t"
+ "gslwrc1 %[ftmp0], 0x00(%[addr1]) \n\t"
+ MMI_ADDU(%[addr1], %[src_ptr], %[src_pixel_step_x2])
+ "gslwlc1 %[ftmp6], 0x03(%[addr1]) \n\t"
+ "gslwrc1 %[ftmp6], 0x00(%[addr1]) \n\t"
+ "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+
+ MMI_ADDU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gslwlc1 %[ftmp0], 0x03(%[addr1]) \n\t"
+ "gslwrc1 %[ftmp0], 0x00(%[addr1]) \n\t"
+ "gslwlc1 %[ftmp4], 0x03(%[src_ptr]) \n\t"
+ "gslwrc1 %[ftmp4], 0x00(%[src_ptr]) \n\t"
+
+ "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
+ "punpckhhw %[ftmp5], %[ftmp4], %[ftmp6] \n\t"
+ "punpcklhw %[ftmp4], %[ftmp4], %[ftmp6] \n\t"
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gslwlc1 %[ftmp7], 0x03(%[addr1]) \n\t"
+ "gslwrc1 %[ftmp7], 0x00(%[addr1]) \n\t"
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step_x2])
+ "gslwlc1 %[ftmp6], 0x03(%[addr1]) \n\t"
+ "gslwrc1 %[ftmp6], 0x00(%[addr1]) \n\t"
+ "punpcklbh %[ftmp6], %[ftmp6], %[ftmp7] \n\t"
+
+ MMI_SUBU(%[addr1], %[addr0], %[src_pixel_step_x4])
+ "gslwlc1 %[ftmp1], 0x03(%[addr1]) \n\t"
+ "gslwrc1 %[ftmp1], 0x00(%[addr1]) \n\t"
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step_x4])
+ "gslwlc1 %[ftmp0], 0x03(%[addr1]) \n\t"
+ "gslwrc1 %[ftmp0], 0x00(%[addr1]) \n\t"
+ "punpcklbh %[ftmp0], %[ftmp0], %[ftmp1] \n\t"
+
+ "punpckhhw %[ftmp2], %[ftmp0], %[ftmp6] \n\t"
+ "punpcklhw %[ftmp0], %[ftmp0], %[ftmp6] \n\t"
+ "punpckhwd %[ftmp1], %[ftmp0], %[ftmp4] \n\t"
+ "punpcklwd %[ftmp0], %[ftmp0], %[ftmp4] \n\t"
+ "punpckhwd %[ftmp3], %[ftmp2], %[ftmp5] \n\t"
+ "punpcklwd %[ftmp2], %[ftmp2], %[ftmp5] \n\t"
+
+ "dli %[tmp0], 0x01 \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "pasubub %[ftmp6], %[ftmp3], %[ftmp0] \n\t"
+ "pand %[ftmp6], %[ftmp6], %[ff_pb_fe] \n\t"
+ "psrlh %[ftmp6], %[ftmp6], %[ftmp9] \n\t"
+ "pasubub %[ftmp5], %[ftmp1], %[ftmp2] \n\t"
+ "paddusb %[ftmp5], %[ftmp5], %[ftmp5] \n\t"
+ "paddusb %[ftmp5], %[ftmp5], %[ftmp6] \n\t"
+
+ "gsldlc1 %[ftmp7], 0x07(%[blimit]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[blimit]) \n\t"
+ "psubusb %[ftmp5], %[ftmp5], %[ftmp7] \n\t"
+ "pxor %[ftmp7], %[ftmp7], %[ftmp7] \n\t"
+ "pcmpeqb %[ftmp5], %[ftmp5], %[ftmp7] \n\t"
+
+ "sdc1 %[ftmp0], 0x00(%[srct]) \n\t"
+ "sdc1 %[ftmp3], 0x08(%[srct]) \n\t"
+
+ "pxor %[ftmp0], %[ftmp0], %[ff_pb_80] \n\t"
+ "pxor %[ftmp3], %[ftmp3], %[ff_pb_80] \n\t"
+ "psubsb %[ftmp0], %[ftmp0], %[ftmp3] \n\t"
+
+ "pxor %[ftmp6], %[ftmp1], %[ff_pb_80] \n\t"
+ "pxor %[ftmp3], %[ftmp2], %[ff_pb_80] \n\t"
+ "psubsb %[ftmp7], %[ftmp3], %[ftmp6] \n\t"
+ "paddsb %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ "paddsb %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ "paddsb %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ "pand %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "paddsb %[ftmp5], %[ftmp5], %[ff_pb_04] \n\t"
+
+ "dli %[tmp0], 0x03 \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "psllh %[ftmp0], %[ftmp5], %[ftmp8] \n\t"
+ "psrah %[ftmp0], %[ftmp0], %[ftmp9] \n\t"
+ "psrlh %[ftmp0], %[ftmp0], %[ftmp8] \n\t"
+
+ "dli %[tmp0], 0x0b \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "psrah %[ftmp7], %[ftmp5], %[ftmp9] \n\t"
+ "psllh %[ftmp7], %[ftmp7], %[ftmp8] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp7] \n\t"
+ "psubsb %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
+ "pxor %[ftmp3], %[ftmp3], %[ff_pb_80] \n\t"
+ "psubsb %[ftmp5], %[ftmp5], %[ff_pb_01] \n\t"
+
+ "dli %[tmp0], 0x03 \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "psllh %[ftmp0], %[ftmp5], %[ftmp8] \n\t"
+ "psrah %[ftmp0], %[ftmp0], %[ftmp9] \n\t"
+ "psrlh %[ftmp0], %[ftmp0], %[ftmp8] \n\t"
+
+ "dli %[tmp0], 0x0b \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "psrah %[ftmp5], %[ftmp5], %[ftmp9] \n\t"
+ "psllh %[ftmp5], %[ftmp5], %[ftmp8] \n\t"
+ "por %[ftmp0], %[ftmp0], %[ftmp5] \n\t"
+ "paddsb %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ff_pb_80] \n\t"
+
+ "ldc1 %[ftmp0], 0x00(%[srct]) \n\t"
+ "ldc1 %[ftmp4], 0x08(%[srct]) \n\t"
+
+ "punpckhbh %[ftmp1], %[ftmp0], %[ftmp6] \n\t"
+ "punpcklbh %[ftmp0], %[ftmp0], %[ftmp6] \n\t"
+ "punpcklbh %[ftmp2], %[ftmp3], %[ftmp4] \n\t"
+ "punpckhbh %[ftmp3], %[ftmp3], %[ftmp4] \n\t"
+
+ "punpckhhw %[ftmp6], %[ftmp0], %[ftmp2] \n\t"
+ "punpcklhw %[ftmp0], %[ftmp0], %[ftmp2] \n\t"
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step_x4])
+ "gsswlc1 %[ftmp0], 0x03(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp0], 0x00(%[addr1]) \n\t"
+ "punpckhhw %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
+ "punpcklhw %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
+
+ "ssrld %[ftmp0], %[ftmp0], %[ftmp10] \n\t"
+ MMI_SUBU(%[addr1], %[addr0], %[src_pixel_step_x4])
+ "gsswlc1 %[ftmp0], 0x03(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp0], 0x00(%[addr1]) \n\t"
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step_x2])
+ "gsswlc1 %[ftmp6], 0x03(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp6], 0x00(%[addr1]) \n\t"
+
+ "ssrld %[ftmp6], %[ftmp6], %[ftmp10] \n\t"
+ "gsswlc1 %[ftmp1], 0x03(%[src_ptr]) \n\t"
+ "gsswrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+
+ MMI_SUBU(%[addr1], %[src_ptr], %[src_pixel_step])
+ "gsswlc1 %[ftmp6], 0x03(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp6], 0x00(%[addr1]) \n\t"
+
+ MMI_ADDU(%[addr1], %[src_ptr], %[src_pixel_step_x2])
+ "gsswlc1 %[ftmp5], 0x03(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp5], 0x00(%[addr1]) \n\t"
+
+ "ssrld %[ftmp1], %[ftmp1], %[ftmp10] \n\t"
+ "gsswlc1 %[ftmp1], 0x03(%[addr0]) \n\t"
+ "gsswrc1 %[ftmp1], 0x00(%[addr0]) \n\t"
+
+ "ssrld %[ftmp5], %[ftmp5], %[ftmp10] \n\t"
+ MMI_ADDU(%[addr1], %[addr0], %[src_pixel_step_x2])
+ "gsswlc1 %[ftmp5], 0x03(%[addr1]) \n\t"
+ "gsswrc1 %[ftmp5], 0x00(%[addr1]) \n\t"
+
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixel_step_x8])
+ "addiu %[count], %[count], -0x01 \n\t"
+ "bnez %[count], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
+ [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
+ [tmp0]"=&r"(tmp[0]),
+ [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
+ [src_ptr]"+&r"(src_ptr), [count]"+&r"(count),
+ [ff_pb_fe]"=&f"(ff_pb_fe), [ff_pb_80]"=&f"(ff_pb_80),
+ [ff_pb_04]"=&f"(ff_pb_04), [ff_pb_01]"=&f"(ff_pb_01)
+ : [blimit]"r"(blimit), [srct]"r"(srct),
+ [src_pixel_step]"r"((mips_reg)src_pixel_step),
+ [src_pixel_step_x2]"r"((mips_reg)(src_pixel_step<<1)),
+ [src_pixel_step_x4]"r"((mips_reg)(src_pixel_step<<2)),
+ [src_pixel_step_x8]"r"((mips_reg)(src_pixel_step<<3))
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+/* Horizontal MB filtering */
+void vp8_loop_filter_mbh_mmi(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ vp8_mbloop_filter_horizontal_edge_mmi(y_ptr, y_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 2);
+
+ if (u_ptr)
+ vp8_mbloop_filter_horizontal_edge_mmi(u_ptr, uv_stride, lfi->mblim,
+ lfi->lim, lfi->hev_thr, 1);
+
+ if (v_ptr)
+ vp8_mbloop_filter_horizontal_edge_mmi(v_ptr, uv_stride, lfi->mblim,
+ lfi->lim, lfi->hev_thr, 1);
+}
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_mmi(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ vp8_mbloop_filter_vertical_edge_mmi(y_ptr, y_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 2);
+
+ if (u_ptr)
+ vp8_mbloop_filter_vertical_edge_mmi(u_ptr, uv_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+
+ if (v_ptr)
+ vp8_mbloop_filter_vertical_edge_mmi(v_ptr, uv_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+}
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_mmi(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ vp8_loop_filter_horizontal_edge_mmi(y_ptr + 4 * y_stride, y_stride, lfi->blim,
+ lfi->lim, lfi->hev_thr, 2);
+ vp8_loop_filter_horizontal_edge_mmi(y_ptr + 8 * y_stride, y_stride, lfi->blim,
+ lfi->lim, lfi->hev_thr, 2);
+ vp8_loop_filter_horizontal_edge_mmi(y_ptr + 12 * y_stride, y_stride,
+ lfi->blim, lfi->lim, lfi->hev_thr, 2);
+
+ if (u_ptr)
+ vp8_loop_filter_horizontal_edge_mmi(u_ptr + 4 * uv_stride, uv_stride,
+ lfi->blim, lfi->lim, lfi->hev_thr, 1);
+
+ if (v_ptr)
+ vp8_loop_filter_horizontal_edge_mmi(v_ptr + 4 * uv_stride, uv_stride,
+ lfi->blim, lfi->lim, lfi->hev_thr, 1);
+}
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_mmi(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ vp8_loop_filter_vertical_edge_mmi(y_ptr + 4, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 2);
+ vp8_loop_filter_vertical_edge_mmi(y_ptr + 8, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 2);
+ vp8_loop_filter_vertical_edge_mmi(y_ptr + 12, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 2);
+
+ if (u_ptr)
+ vp8_loop_filter_vertical_edge_mmi(u_ptr + 4, uv_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 1);
+
+ if (v_ptr)
+ vp8_loop_filter_vertical_edge_mmi(v_ptr + 4, uv_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr, 1);
+}
+
+void vp8_loop_filter_bhs_mmi(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_horizontal_edge_mmi(y_ptr + 4 * y_stride, y_stride,
+ blimit);
+ vp8_loop_filter_simple_horizontal_edge_mmi(y_ptr + 8 * y_stride, y_stride,
+ blimit);
+ vp8_loop_filter_simple_horizontal_edge_mmi(y_ptr + 12 * y_stride, y_stride,
+ blimit);
+}
+
+void vp8_loop_filter_bvs_mmi(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_vertical_edge_mmi(y_ptr + 4, y_stride, blimit);
+ vp8_loop_filter_simple_vertical_edge_mmi(y_ptr + 8, y_stride, blimit);
+ vp8_loop_filter_simple_vertical_edge_mmi(y_ptr + 12, y_stride, blimit);
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/mmi/sixtap_filter_mmi.c b/media/libvpx/libvpx/vp8/common/mips/mmi/sixtap_filter_mmi.c
new file mode 100644
index 0000000000..b85f73fdff
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/mmi/sixtap_filter_mmi.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/common/filter.h"
+#include "vpx_ports/asmdefs_mmi.h"
+
+DECLARE_ALIGNED(8, static const int16_t, vp8_six_tap_mmi[8][6 * 8]) = {
+ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 },
+ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa,
+ 0x007b, 0x007b, 0x007b, 0x007b, 0x007b, 0x007b, 0x007b, 0x007b,
+ 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 },
+ { 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002,
+ 0xfff5, 0xfff5, 0xfff5, 0xfff5, 0xfff5, 0xfff5, 0xfff5, 0xfff5,
+ 0x006c, 0x006c, 0x006c, 0x006c, 0x006c, 0x006c, 0x006c, 0x006c,
+ 0x0024, 0x0024, 0x0024, 0x0024, 0x0024, 0x0024, 0x0024, 0x0024,
+ 0xfff8, 0xfff8, 0xfff8, 0xfff8, 0xfff8, 0xfff8, 0xfff8, 0xfff8,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001 },
+ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0xfff7, 0xfff7, 0xfff7, 0xfff7, 0xfff7, 0xfff7, 0xfff7, 0xfff7,
+ 0x005d, 0x005d, 0x005d, 0x005d, 0x005d, 0x005d, 0x005d, 0x005d,
+ 0x0032, 0x0032, 0x0032, 0x0032, 0x0032, 0x0032, 0x0032, 0x0032,
+ 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 },
+ { 0x0003, 0x0003, 0x0003, 0x0003, 0x0003, 0x0003, 0x0003, 0x0003,
+ 0xfff0, 0xfff0, 0xfff0, 0xfff0, 0xfff0, 0xfff0, 0xfff0, 0xfff0,
+ 0x004d, 0x004d, 0x004d, 0x004d, 0x004d, 0x004d, 0x004d, 0x004d,
+ 0x004d, 0x004d, 0x004d, 0x004d, 0x004d, 0x004d, 0x004d, 0x004d,
+ 0xfff0, 0xfff0, 0xfff0, 0xfff0, 0xfff0, 0xfff0, 0xfff0, 0xfff0,
+ 0x0003, 0x0003, 0x0003, 0x0003, 0x0003, 0x0003, 0x0003, 0x0003 },
+ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa,
+ 0x0032, 0x0032, 0x0032, 0x0032, 0x0032, 0x0032, 0x0032, 0x0032,
+ 0x005d, 0x005d, 0x005d, 0x005d, 0x005d, 0x005d, 0x005d, 0x005d,
+ 0xfff7, 0xfff7, 0xfff7, 0xfff7, 0xfff7, 0xfff7, 0xfff7, 0xfff7,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 },
+ { 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0xfff8, 0xfff8, 0xfff8, 0xfff8, 0xfff8, 0xfff8, 0xfff8, 0xfff8,
+ 0x0024, 0x0024, 0x0024, 0x0024, 0x0024, 0x0024, 0x0024, 0x0024,
+ 0x006c, 0x006c, 0x006c, 0x006c, 0x006c, 0x006c, 0x006c, 0x006c,
+ 0xfff5, 0xfff5, 0xfff5, 0xfff5, 0xfff5, 0xfff5, 0xfff5, 0xfff5,
+ 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002 },
+ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
+ 0x007b, 0x007b, 0x007b, 0x007b, 0x007b, 0x007b, 0x007b, 0x007b,
+ 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa, 0xfffa,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }
+};
+
+/* Horizontal filter: pixel_step is 1, output_height and output_width are
+ the size of horizontal filtering output, output_height is always H + 5 */
+static INLINE void vp8_filter_block1d_h6_mmi(unsigned char *src_ptr,
+ uint16_t *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int output_height,
+ unsigned int output_width,
+ const int16_t *vp8_filter) {
+ uint64_t tmp[1];
+ double ff_ph_40;
+#if _MIPS_SIM == _ABIO32
+ register double fzero asm("$f0");
+ register double ftmp0 asm("$f2");
+ register double ftmp1 asm("$f4");
+ register double ftmp2 asm("$f6");
+ register double ftmp3 asm("$f8");
+ register double ftmp4 asm("$f10");
+ register double ftmp5 asm("$f12");
+ register double ftmp6 asm("$f14");
+ register double ftmp7 asm("$f16");
+ register double ftmp8 asm("$f18");
+ register double ftmp9 asm("$f20");
+ register double ftmp10 asm("$f22");
+ register double ftmp11 asm("$f24");
+#else
+ register double fzero asm("$f0");
+ register double ftmp0 asm("$f1");
+ register double ftmp1 asm("$f2");
+ register double ftmp2 asm("$f3");
+ register double ftmp3 asm("$f4");
+ register double ftmp4 asm("$f5");
+ register double ftmp5 asm("$f6");
+ register double ftmp6 asm("$f7");
+ register double ftmp7 asm("$f8");
+ register double ftmp8 asm("$f9");
+ register double ftmp9 asm("$f10");
+ register double ftmp10 asm("$f11");
+ register double ftmp11 asm("$f12");
+#endif // _MIPS_SIM == _ABIO32
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0x0040004000400040 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_40] \n\t"
+ "ldc1 %[ftmp0], 0x00(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp1], 0x10(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp2], 0x20(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp3], 0x30(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp4], 0x40(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp5], 0x50(%[vp8_filter]) \n\t"
+ "pxor %[fzero], %[fzero], %[fzero] \n\t"
+ "dli %[tmp0], 0x07 \n\t"
+ "dmtc1 %[tmp0], %[ftmp7] \n\t"
+ "dli %[tmp0], 0x08 \n\t"
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+
+ "1: \n\t"
+ "gsldlc1 %[ftmp9], 0x05(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp9], -0x02(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp10], 0x06(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp10], -0x01(%[src_ptr]) \n\t"
+
+ "punpcklbh %[ftmp6], %[ftmp9], %[fzero] \n\t"
+ "pmullh %[ftmp8], %[ftmp6], %[ftmp0] \n\t"
+
+ "punpckhbh %[ftmp6], %[ftmp9], %[fzero] \n\t"
+ "pmullh %[ftmp6], %[ftmp6], %[ftmp4] \n\t"
+ "paddsh %[ftmp8], %[ftmp8], %[ftmp6] \n\t"
+
+ "punpcklbh %[ftmp6], %[ftmp10], %[fzero] \n\t"
+ "pmullh %[ftmp6], %[ftmp6], %[ftmp1] \n\t"
+ "paddsh %[ftmp8], %[ftmp8], %[ftmp6] \n\t"
+
+ "punpckhbh %[ftmp6], %[ftmp10], %[fzero] \n\t"
+ "pmullh %[ftmp6], %[ftmp6], %[ftmp5] \n\t"
+ "paddsh %[ftmp8], %[ftmp8], %[ftmp6] \n\t"
+
+ "ssrld %[ftmp10], %[ftmp10], %[ftmp11] \n\t"
+ "punpcklbh %[ftmp6], %[ftmp10], %[fzero] \n\t"
+ "pmullh %[ftmp6], %[ftmp6], %[ftmp2] \n\t"
+ "paddsh %[ftmp8], %[ftmp8], %[ftmp6] \n\t"
+
+ "ssrld %[ftmp10], %[ftmp10], %[ftmp11] \n\t"
+ "punpcklbh %[ftmp6], %[ftmp10], %[fzero] \n\t"
+ "pmullh %[ftmp6], %[ftmp6], %[ftmp3] \n\t"
+ "paddsh %[ftmp8], %[ftmp8], %[ftmp6] \n\t"
+
+ "paddsh %[ftmp8], %[ftmp8], %[ff_ph_40] \n\t"
+ "psrah %[ftmp8], %[ftmp8], %[ftmp7] \n\t"
+ "packushb %[ftmp8], %[ftmp8], %[fzero] \n\t"
+ "punpcklbh %[ftmp8], %[ftmp8], %[fzero] \n\t"
+ "gssdlc1 %[ftmp8], 0x07(%[output_ptr]) \n\t"
+ "gssdrc1 %[ftmp8], 0x00(%[output_ptr]) \n\t"
+
+ "addiu %[output_height], %[output_height], -0x01 \n\t"
+ MMI_ADDU(%[output_ptr], %[output_ptr], %[output_width])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixels_per_line])
+ "bnez %[output_height], 1b \n\t"
+ : [fzero]"=&f"(fzero), [ftmp0]"=&f"(ftmp0),
+ [ftmp1]"=&f"(ftmp1), [ftmp2]"=&f"(ftmp2),
+ [ftmp3]"=&f"(ftmp3), [ftmp4]"=&f"(ftmp4),
+ [ftmp5]"=&f"(ftmp5), [ftmp6]"=&f"(ftmp6),
+ [ftmp7]"=&f"(ftmp7), [ftmp8]"=&f"(ftmp8),
+ [ftmp9]"=&f"(ftmp9), [ftmp10]"=&f"(ftmp10),
+ [ftmp11]"=&f"(ftmp11), [tmp0]"=&r"(tmp[0]),
+ [output_ptr]"+&r"(output_ptr), [output_height]"+&r"(output_height),
+ [src_ptr]"+&r"(src_ptr), [ff_ph_40]"=&f"(ff_ph_40)
+ : [src_pixels_per_line]"r"((mips_reg)src_pixels_per_line),
+ [vp8_filter]"r"(vp8_filter), [output_width]"r"(output_width)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+/* Horizontal filter: pixel_step is always W */
+static INLINE void vp8_filter_block1dc_v6_mmi(
+ uint16_t *src_ptr, unsigned char *output_ptr, unsigned int output_height,
+ int output_pitch, unsigned int pixels_per_line, const int16_t *vp8_filter) {
+ double ff_ph_40;
+ uint64_t tmp[1];
+ mips_reg addr[1];
+
+#if _MIPS_SIM == _ABIO32
+ register double fzero asm("$f0");
+ register double ftmp0 asm("$f2");
+ register double ftmp1 asm("$f4");
+ register double ftmp2 asm("$f6");
+ register double ftmp3 asm("$f8");
+ register double ftmp4 asm("$f10");
+ register double ftmp5 asm("$f12");
+ register double ftmp6 asm("$f14");
+ register double ftmp7 asm("$f16");
+ register double ftmp8 asm("$f18");
+ register double ftmp9 asm("$f20");
+ register double ftmp10 asm("$f22");
+ register double ftmp11 asm("$f24");
+ register double ftmp12 asm("$f26");
+ register double ftmp13 asm("$f28");
+#else
+ register double fzero asm("$f0");
+ register double ftmp0 asm("$f1");
+ register double ftmp1 asm("$f2");
+ register double ftmp2 asm("$f3");
+ register double ftmp3 asm("$f4");
+ register double ftmp4 asm("$f5");
+ register double ftmp5 asm("$f6");
+ register double ftmp6 asm("$f7");
+ register double ftmp7 asm("$f8");
+ register double ftmp8 asm("$f9");
+ register double ftmp9 asm("$f10");
+ register double ftmp10 asm("$f11");
+ register double ftmp11 asm("$f12");
+ register double ftmp12 asm("$f13");
+ register double ftmp13 asm("$f14");
+#endif // _MIPS_SIM == _ABIO32
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0x0040004000400040 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_40] \n\t"
+ "ldc1 %[ftmp0], 0x00(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp1], 0x10(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp2], 0x20(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp3], 0x30(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp4], 0x40(%[vp8_filter]) \n\t"
+ "ldc1 %[ftmp5], 0x50(%[vp8_filter]) \n\t"
+ "pxor %[fzero], %[fzero], %[fzero] \n\t"
+ "dli %[tmp0], 0x07 \n\t"
+ "dmtc1 %[tmp0], %[ftmp13] \n\t"
+
+ /* In order to make full use of memory load delay slot,
+ * Operation of memory loading and calculating has been rearranged.
+ */
+ "1: \n\t"
+ "gsldlc1 %[ftmp6], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[addr0], %[src_ptr], %[pixels_per_line])
+ "gsldlc1 %[ftmp7], 0x07(%[addr0]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[addr0]) \n\t"
+ MMI_ADDU(%[addr0], %[src_ptr], %[pixels_per_line_x2])
+ "gsldlc1 %[ftmp8], 0x07(%[addr0]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[addr0]) \n\t"
+
+ MMI_ADDU(%[addr0], %[src_ptr], %[pixels_per_line_x4])
+ "gsldlc1 %[ftmp9], 0x07(%[addr0]) \n\t"
+ "gsldrc1 %[ftmp9], 0x00(%[addr0]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[pixels_per_line])
+ MMI_ADDU(%[addr0], %[src_ptr], %[pixels_per_line_x2])
+ "gsldlc1 %[ftmp10], 0x07(%[addr0]) \n\t"
+ "gsldrc1 %[ftmp10], 0x00(%[addr0]) \n\t"
+ MMI_ADDU(%[addr0], %[src_ptr], %[pixels_per_line_x4])
+ "gsldlc1 %[ftmp11], 0x07(%[addr0]) \n\t"
+ "gsldrc1 %[ftmp11], 0x00(%[addr0]) \n\t"
+
+ "pmullh %[ftmp12], %[ftmp6], %[ftmp0] \n\t"
+
+ "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t"
+ "paddsh %[ftmp12], %[ftmp12], %[ftmp7] \n\t"
+
+ "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t"
+ "paddsh %[ftmp12], %[ftmp12], %[ftmp8] \n\t"
+
+ "pmullh %[ftmp9], %[ftmp9], %[ftmp4] \n\t"
+ "paddsh %[ftmp12], %[ftmp12], %[ftmp9] \n\t"
+
+ "pmullh %[ftmp10], %[ftmp10], %[ftmp3] \n\t"
+ "paddsh %[ftmp12], %[ftmp12], %[ftmp10] \n\t"
+
+ "pmullh %[ftmp11], %[ftmp11], %[ftmp5] \n\t"
+ "paddsh %[ftmp12], %[ftmp12], %[ftmp11] \n\t"
+
+ "paddsh %[ftmp12], %[ftmp12], %[ff_ph_40] \n\t"
+ "psrah %[ftmp12], %[ftmp12], %[ftmp13] \n\t"
+ "packushb %[ftmp12], %[ftmp12], %[fzero] \n\t"
+ "gsswlc1 %[ftmp12], 0x03(%[output_ptr]) \n\t"
+ "gsswrc1 %[ftmp12], 0x00(%[output_ptr]) \n\t"
+
+ MMI_ADDIU(%[output_height], %[output_height], -0x01)
+ MMI_ADDU(%[output_ptr], %[output_ptr], %[output_pitch])
+ "bnez %[output_height], 1b \n\t"
+ : [fzero]"=&f"(fzero), [ftmp0]"=&f"(ftmp0),
+ [ftmp1]"=&f"(ftmp1), [ftmp2]"=&f"(ftmp2),
+ [ftmp3]"=&f"(ftmp3), [ftmp4]"=&f"(ftmp4),
+ [ftmp5]"=&f"(ftmp5), [ftmp6]"=&f"(ftmp6),
+ [ftmp7]"=&f"(ftmp7), [ftmp8]"=&f"(ftmp8),
+ [ftmp9]"=&f"(ftmp9), [ftmp10]"=&f"(ftmp10),
+ [ftmp11]"=&f"(ftmp11), [ftmp12]"=&f"(ftmp12),
+ [ftmp13]"=&f"(ftmp13), [tmp0]"=&r"(tmp[0]),
+ [addr0]"=&r"(addr[0]), [src_ptr]"+&r"(src_ptr),
+ [output_ptr]"+&r"(output_ptr), [output_height]"+&r"(output_height),
+ [ff_ph_40]"=&f"(ff_ph_40)
+ : [pixels_per_line]"r"((mips_reg)pixels_per_line),
+ [pixels_per_line_x2]"r"((mips_reg)(pixels_per_line<<1)),
+ [pixels_per_line_x4]"r"((mips_reg)(pixels_per_line<<2)),
+ [vp8_filter]"r"(vp8_filter),
+ [output_pitch]"r"((mips_reg)output_pitch)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+/* When xoffset == 0, vp8_filter= {0,0,128,0,0,0},
+ function vp8_filter_block1d_h6_mmi and vp8_filter_block1d_v6_mmi can
+ be simplified */
+static INLINE void vp8_filter_block1d_h6_filter0_mmi(
+ unsigned char *src_ptr, uint16_t *output_ptr,
+ unsigned int src_pixels_per_line, unsigned int output_height,
+ unsigned int output_width) {
+#if _MIPS_SIM == _ABIO32
+ register double fzero asm("$f0");
+ register double ftmp0 asm("$f2");
+ register double ftmp1 asm("$f4");
+#else
+ register double fzero asm("$f0");
+ register double ftmp0 asm("$f1");
+ register double ftmp1 asm("$f2");
+#endif // _MIPS_SIM == _ABIO32
+
+ /* clang-format off */
+ __asm__ volatile (
+ "pxor %[fzero], %[fzero], %[fzero] \n\t"
+
+ "1: \n\t"
+ "gsldlc1 %[ftmp0], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp0], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_pixels_per_line])
+
+ "punpcklbh %[ftmp1], %[ftmp0], %[fzero] \n\t"
+ "gssdlc1 %[ftmp1], 0x07(%[output_ptr]) \n\t"
+ "gssdrc1 %[ftmp1], 0x00(%[output_ptr]) \n\t"
+
+ "addiu %[output_height], %[output_height], -0x01 \n\t"
+ MMI_ADDU(%[output_ptr], %[output_ptr], %[output_width])
+ "bnez %[output_height], 1b \n\t"
+ : [fzero]"=&f"(fzero), [ftmp0]"=&f"(ftmp0),
+ [ftmp1]"=&f"(ftmp1), [src_ptr]"+&r"(src_ptr),
+ [output_ptr]"+&r"(output_ptr), [output_height]"+&r"(output_height)
+ : [src_pixels_per_line]"r"((mips_reg)src_pixels_per_line),
+ [output_width]"r"(output_width)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+static INLINE void vp8_filter_block1dc_v6_filter0_mmi(
+ uint16_t *src_ptr, unsigned char *output_ptr, unsigned int output_height,
+ int output_pitch, unsigned int pixels_per_line) {
+#if _MIPS_SIM == _ABIO32
+ register double fzero asm("$f0");
+ register double ftmp0 asm("$f2");
+ register double ftmp1 asm("$f4");
+#else
+ register double fzero asm("$f0");
+ register double ftmp0 asm("$f1");
+ register double ftmp1 asm("$f2");
+#endif // _MIPS_SIM == _ABIO32
+
+ /* clang-format on */
+ __asm__ volatile (
+ "pxor %[fzero], %[fzero], %[fzero] \n\t"
+
+ "1: \n\t"
+ "gsldlc1 %[ftmp0], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp0], 0x00(%[src_ptr]) \n\t"
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[pixels_per_line])
+ MMI_ADDIU(%[output_height], %[output_height], -0x01)
+ "packushb %[ftmp1], %[ftmp0], %[fzero] \n\t"
+ "gsswlc1 %[ftmp1], 0x03(%[output_ptr]) \n\t"
+ "gsswrc1 %[ftmp1], 0x00(%[output_ptr]) \n\t"
+
+ MMI_ADDU(%[output_ptr], %[output_ptr], %[output_pitch])
+ "bnez %[output_height], 1b \n\t"
+ : [fzero]"=&f"(fzero), [ftmp0]"=&f"(ftmp0),
+ [ftmp1]"=&f"(ftmp1), [src_ptr]"+&r"(src_ptr),
+ [output_ptr]"+&r"(output_ptr), [output_height]"+&r"(output_height)
+ : [pixels_per_line]"r"((mips_reg)pixels_per_line),
+ [output_pitch]"r"((mips_reg)output_pitch)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+#define sixtapNxM(n, m) \
+ void vp8_sixtap_predict##n##x##m##_mmi( \
+ unsigned char *src_ptr, int src_pixels_per_line, int xoffset, \
+ int yoffset, unsigned char *dst_ptr, int dst_pitch) { \
+ DECLARE_ALIGNED(16, uint16_t, \
+ FData2[(n + 5) * (n == 16 ? 24 : (n == 8 ? 16 : n))]); \
+ const int16_t *HFilter, *VFilter; \
+ int i, loop = n / 4; \
+ HFilter = vp8_six_tap_mmi[xoffset]; \
+ VFilter = vp8_six_tap_mmi[yoffset]; \
+ \
+ if (xoffset == 0) { \
+ for (i = 0; i < loop; ++i) { \
+ vp8_filter_block1d_h6_filter0_mmi( \
+ src_ptr - (2 * src_pixels_per_line) + i * 4, FData2 + i * 4, \
+ src_pixels_per_line, m + 5, n * 2); \
+ } \
+ } else { \
+ for (i = 0; i < loop; ++i) { \
+ vp8_filter_block1d_h6_mmi(src_ptr - (2 * src_pixels_per_line) + i * 4, \
+ FData2 + i * 4, src_pixels_per_line, m + 5, \
+ n * 2, HFilter); \
+ } \
+ } \
+ if (yoffset == 0) { \
+ for (i = 0; i < loop; ++i) { \
+ vp8_filter_block1dc_v6_filter0_mmi( \
+ FData2 + n * 2 + i * 4, dst_ptr + i * 4, m, dst_pitch, n * 2); \
+ } \
+ } else { \
+ for (i = 0; i < loop; ++i) { \
+ vp8_filter_block1dc_v6_mmi(FData2 + i * 4, dst_ptr + i * 4, m, \
+ dst_pitch, n * 2, VFilter); \
+ } \
+ } \
+ }
+
+sixtapNxM(4, 4);
+sixtapNxM(8, 8);
+sixtapNxM(8, 4);
+sixtapNxM(16, 16);
diff --git a/media/libvpx/libvpx/vp8/common/mips/msa/bilinear_filter_msa.c b/media/libvpx/libvpx/vp8/common/mips/msa/bilinear_filter_msa.c
new file mode 100644
index 0000000000..c7fb1ed33f
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/msa/bilinear_filter_msa.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vpx_ports/mem.h"
+#include "vp8/common/filter.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+
+DECLARE_ALIGNED(16, static const int8_t, vp8_bilinear_filters_msa[7][2]) = {
+ { 112, 16 }, { 96, 32 }, { 80, 48 }, { 64, 64 },
+ { 48, 80 }, { 32, 96 }, { 16, 112 }
+};
+
+static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = {
+ /* 8 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ /* 4 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
+ /* 4 width cases */
+ 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
+};
+
+static void common_hz_2t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16i8 src0, src1, src2, src3, mask;
+ v16u8 filt0, vec0, vec1, res0, res1;
+ v8u16 vec2, vec3, filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[16]);
+
+ filt = LD_UH(filter);
+ filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3);
+ SRARI_H2_UH(vec2, vec3, VP8_FILTER_SHIFT);
+ PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+}
+
+static void common_hz_2t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16u8 vec0, vec1, vec2, vec3, filt0;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
+ v16i8 res0, res1, res2, res3;
+ v8u16 vec4, vec5, vec6, vec7, filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[16]);
+
+ filt = LD_UH(filter);
+ filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5,
+ vec6, vec7);
+ SRARI_H4_UH(vec4, vec5, vec6, vec7, VP8_FILTER_SHIFT);
+ PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2,
+ res3);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
+}
+
+static void common_hz_2t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ if (4 == height) {
+ common_hz_2t_4x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (8 == height) {
+ common_hz_2t_4x8_msa(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_2t_8x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16u8 filt0;
+ v16i8 src0, src1, src2, src3, mask;
+ v8u16 vec0, vec1, vec2, vec3, filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[0]);
+
+ filt = LD_UH(filter);
+ filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, src0, src1);
+ ST8x4_UB(src0, src1, dst, dst_stride);
+}
+
+static void common_hz_2t_8x8mult_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ v16u8 filt0;
+ v16i8 src0, src1, src2, src3, mask, out0, out1;
+ v8u16 vec0, vec1, vec2, vec3, filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[0]);
+
+ filt = LD_UH(filter);
+ filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ if (16 == height) {
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst + 4 * dst_stride, dst_stride);
+ }
+}
+
+static void common_hz_2t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ if (4 == height) {
+ common_hz_2t_8x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else {
+ common_hz_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, height);
+ }
+}
+
+static void common_hz_2t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
+ v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[0]);
+
+ loop_cnt = (height >> 2) - 1;
+
+ filt = LD_UH(filter);
+ filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
+ VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
+ out2, out3);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
+ out6, out7);
+ SRARI_H4_UH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SRARI_H4_UH(out4, out5, out6, out7, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(out0, out1, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out2, out3, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out4, out5, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out6, out7, dst);
+ dst += dst_stride;
+
+ for (; loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
+ VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
+ out2, out3);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
+ out6, out7);
+ SRARI_H4_UH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SRARI_H4_UH(out4, out5, out6, out7, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(out0, out1, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out2, out3, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out4, out5, dst);
+ dst += dst_stride;
+ PCKEV_ST_SB(out6, out7, dst);
+ dst += dst_stride;
+ }
+}
+
+static void common_vt_2t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16i8 src0, src1, src2, src3, src4;
+ v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332;
+ v16u8 filt0;
+ v8i16 filt;
+ v8u16 tmp0, tmp1;
+
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+ src32_r, src43_r);
+ ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
+ DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT);
+ src2110 = __msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
+ ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_vt_2t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r;
+ v16i8 src65_r, src87_r, src2110, src4332, src6554, src8776;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v16u8 filt0;
+ v8i16 filt;
+
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ src8 = LD_SB(src);
+ src += src_stride;
+
+ ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+ src32_r, src43_r);
+ ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
+ src76_r, src87_r);
+ ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r,
+ src76_r, src2110, src4332, src6554, src8776);
+ DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
+ tmp0, tmp1, tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, src2110, src4332);
+ ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
+ ST4x4_UB(src4332, src4332, 0, 1, 2, 3, dst + 4 * dst_stride, dst_stride);
+}
+
+static void common_vt_2t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ if (4 == height) {
+ common_vt_2t_4x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (8 == height) {
+ common_vt_2t_4x8_msa(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_vt_2t_8x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0;
+ v16i8 out0, out1;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt;
+
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
+
+ LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
+ ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1);
+ ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
+ tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+}
+
+static void common_vt_2t_8x8mult_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
+ v16i8 out0, out1;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt;
+
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
+
+ src0 = LD_UB(src);
+ src += src_stride;
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8);
+ src += (8 * src_stride);
+
+ ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
+ vec3);
+ ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6,
+ vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
+ tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1,
+ tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src0 = src8;
+ }
+}
+
+static void common_vt_2t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ if (4 == height) {
+ common_vt_2t_8x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else {
+ common_vt_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, height);
+ }
+}
+
+static void common_vt_2t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16u8 src0, src1, src2, src3, src4;
+ v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt;
+
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
+
+ src0 = LD_UB(src);
+ src += src_stride;
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_UB4(src, src_stride, src1, src2, src3, src4);
+ src += (4 * src_stride);
+
+ ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
+ ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(tmp0, tmp1, dst);
+ dst += dst_stride;
+
+ ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
+ ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
+ DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(tmp2, tmp3, dst);
+ dst += dst_stride;
+
+ DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(tmp0, tmp1, dst);
+ dst += dst_stride;
+
+ DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(tmp2, tmp3, dst);
+ dst += dst_stride;
+
+ src0 = src4;
+ }
+}
+
+static void common_hv_2ht_2vt_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert) {
+ v16i8 src0, src1, src2, src3, src4, mask;
+ v16u8 filt_vt, filt_hz, vec0, vec1, res0, res1;
+ v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, filt, tmp0, tmp1;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[16]);
+
+ filt = LD_UH(filter_horiz);
+ filt_hz = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_UH(filter_vert);
+ filt_vt = (v16u8)__msa_splati_h((v8i16)filt, 0);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out1 = (v8u16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8);
+ hz_out3 = (v8u16)__msa_pckod_d((v2i64)hz_out4, (v2i64)hz_out2);
+
+ ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT);
+ PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+}
+
+static void common_hv_2ht_2vt_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert) {
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask;
+ v16i8 res0, res1, res2, res3;
+ v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3;
+ v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8u16 hz_out7, hz_out8, vec4, vec5, vec6, vec7, filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[16]);
+
+ filt = LD_UH(filter_horiz);
+ filt_hz = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_UH(filter_vert);
+ filt_vt = (v16u8)__msa_splati_h((v8i16)filt, 0);
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+ src8 = LD_SB(src);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out4 = HORIZ_2TAP_FILT_UH(src4, src5, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out6 = HORIZ_2TAP_FILT_UH(src6, src7, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out8 = HORIZ_2TAP_FILT_UH(src8, src8, mask, filt_hz, VP8_FILTER_SHIFT);
+ SLDI_B3_UH(hz_out2, hz_out4, hz_out6, hz_out0, hz_out2, hz_out4, hz_out1,
+ hz_out3, hz_out5, 8);
+ hz_out7 = (v8u16)__msa_pckod_d((v2i64)hz_out8, (v2i64)hz_out6);
+
+ ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
+ ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, vec4,
+ vec5, vec6, vec7);
+ SRARI_H4_UH(vec4, vec5, vec6, vec7, VP8_FILTER_SHIFT);
+ PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2,
+ res3);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
+}
+
+static void common_hv_2ht_2vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ if (4 == height) {
+ common_hv_2ht_2vt_4x4_msa(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert);
+ } else if (8 == height) {
+ common_hv_2ht_2vt_4x8_msa(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert);
+ }
+}
+
+static void common_hv_2ht_2vt_8x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert) {
+ v16i8 src0, src1, src2, src3, src4, mask, out0, out1;
+ v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3;
+ v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[0]);
+
+ filt = LD_SH(filter_horiz);
+ filt_hz = (v16u8)__msa_splati_h(filt, 0);
+ filt = LD_SH(filter_vert);
+ filt_vt = (v16u8)__msa_splati_h(filt, 0);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+ tmp0 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec1 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
+ tmp1 = __msa_dotp_u_h(vec1, filt_vt);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec2 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+ tmp2 = __msa_dotp_u_h(vec2, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec3 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
+ tmp3 = __msa_dotp_u_h(vec3, filt_vt);
+
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+}
+
+static void common_hv_2ht_2vt_8x8mult_msa(
+ uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst,
+ int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, mask, out0, out1;
+ v16u8 filt_hz, filt_vt, vec0;
+ v8u16 hz_out0, hz_out1, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ v8i16 filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[0]);
+
+ filt = LD_SH(filter_horiz);
+ filt_hz = (v16u8)__msa_splati_h(filt, 0);
+ filt = LD_SH(filter_vert);
+ filt_vt = (v16u8)__msa_splati_h(filt, 0);
+
+ src0 = LD_SB(src);
+ src += src_stride;
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT);
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LD_SB4(src, src_stride, src1, src2, src3, src4);
+ src += (4 * src_stride);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+ tmp1 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
+ tmp2 = __msa_dotp_u_h(vec0, filt_vt);
+
+ SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+ tmp3 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT);
+ LD_SB4(src, src_stride, src1, src2, src3, src4);
+ src += (4 * src_stride);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
+ tmp4 = __msa_dotp_u_h(vec0, filt_vt);
+
+ SRARI_H2_UH(tmp3, tmp4, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(tmp2, tmp1, tmp4, tmp3, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+ tmp5 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
+ tmp6 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+ tmp7 = __msa_dotp_u_h(vec0, filt_vt);
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT);
+ vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
+ tmp8 = __msa_dotp_u_h(vec0, filt_vt);
+
+ SRARI_H4_UH(tmp5, tmp6, tmp7, tmp8, VP8_FILTER_SHIFT);
+ PCKEV_B2_SB(tmp6, tmp5, tmp8, tmp7, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+static void common_hv_2ht_2vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ if (4 == height) {
+ common_hv_2ht_2vt_8x4_msa(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert);
+ } else {
+ common_hv_2ht_2vt_8x8mult_msa(src, src_stride, dst, dst_stride,
+ filter_horiz, filter_vert, height);
+ }
+}
+
+static void common_hv_2ht_2vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
+ v16u8 filt_hz, filt_vt, vec0, vec1;
+ v8u16 tmp1, tmp2, hz_out0, hz_out1, hz_out2, hz_out3;
+ v8i16 filt;
+
+ mask = LD_SB(&vp8_mc_filt_mask_arr[0]);
+
+ /* rearranging filter */
+ filt = LD_SH(filter_horiz);
+ filt_hz = (v16u8)__msa_splati_h(filt, 0);
+ filt = LD_SH(filter_vert);
+ filt_vt = (v16u8)__msa_splati_h(filt, 0);
+
+ LD_SB2(src, 8, src0, src1);
+ src += src_stride;
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT);
+ ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2);
+ SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(tmp1, tmp2, dst);
+ dst += dst_stride;
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT);
+ ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2);
+ SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(tmp1, tmp2, dst);
+ dst += dst_stride;
+
+ hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, VP8_FILTER_SHIFT);
+ ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2);
+ SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(tmp1, tmp2, dst);
+ dst += dst_stride;
+
+ hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, VP8_FILTER_SHIFT);
+ hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, VP8_FILTER_SHIFT);
+ ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2);
+ SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT);
+ PCKEV_ST_SB(tmp1, tmp2, dst);
+ dst += dst_stride;
+ }
+}
+
+void vp8_bilinear_predict4x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1];
+ const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1];
+
+ if (yoffset) {
+ if (xoffset) {
+ common_hv_2ht_2vt_4w_msa(src, src_stride, dst, dst_stride, h_filter,
+ v_filter, 4);
+ } else {
+ common_vt_2t_4w_msa(src, src_stride, dst, dst_stride, v_filter, 4);
+ }
+ } else {
+ if (xoffset) {
+ common_hz_2t_4w_msa(src, src_stride, dst, dst_stride, h_filter, 4);
+ } else {
+ uint32_t tp0, tp1, tp2, tp3;
+
+ LW4(src, src_stride, tp0, tp1, tp2, tp3);
+ SW4(tp0, tp1, tp2, tp3, dst, dst_stride);
+ }
+ }
+}
+
+void vp8_bilinear_predict8x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1];
+ const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1];
+
+ if (yoffset) {
+ if (xoffset) {
+ common_hv_2ht_2vt_8w_msa(src, src_stride, dst, dst_stride, h_filter,
+ v_filter, 4);
+ } else {
+ common_vt_2t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 4);
+ }
+ } else {
+ if (xoffset) {
+ common_hz_2t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 4);
+ } else {
+ vp8_copy_mem8x4(src, src_stride, dst, dst_stride);
+ }
+ }
+}
+
+void vp8_bilinear_predict8x8_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1];
+ const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1];
+
+ if (yoffset) {
+ if (xoffset) {
+ common_hv_2ht_2vt_8w_msa(src, src_stride, dst, dst_stride, h_filter,
+ v_filter, 8);
+ } else {
+ common_vt_2t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 8);
+ }
+ } else {
+ if (xoffset) {
+ common_hz_2t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 8);
+ } else {
+ vp8_copy_mem8x8(src, src_stride, dst, dst_stride);
+ }
+ }
+}
+
+void vp8_bilinear_predict16x16_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1];
+ const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1];
+
+ if (yoffset) {
+ if (xoffset) {
+ common_hv_2ht_2vt_16w_msa(src, src_stride, dst, dst_stride, h_filter,
+ v_filter, 16);
+ } else {
+ common_vt_2t_16w_msa(src, src_stride, dst, dst_stride, v_filter, 16);
+ }
+ } else {
+ if (xoffset) {
+ common_hz_2t_16w_msa(src, src_stride, dst, dst_stride, h_filter, 16);
+ } else {
+ vp8_copy_mem16x16(src, src_stride, dst, dst_stride);
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/msa/copymem_msa.c b/media/libvpx/libvpx/vp8/common/mips/msa/copymem_msa.c
new file mode 100644
index 0000000000..357c99b8b6
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/msa/copymem_msa.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+
+static void copy_8x4_msa(uint8_t *src, int32_t src_stride, uint8_t *dst,
+ int32_t dst_stride) {
+ uint64_t src0, src1, src2, src3;
+
+ LD4(src, src_stride, src0, src1, src2, src3);
+ SD4(src0, src1, src2, src3, dst, dst_stride);
+}
+
+static void copy_8x8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst,
+ int32_t dst_stride) {
+ uint64_t src0, src1, src2, src3;
+
+ LD4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+ SD4(src0, src1, src2, src3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ LD4(src, src_stride, src0, src1, src2, src3);
+ SD4(src0, src1, src2, src3, dst, dst_stride);
+}
+
+static void copy_16x16_msa(uint8_t *src, int32_t src_stride, uint8_t *dst,
+ int32_t dst_stride) {
+ v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v16u8 src8, src9, src10, src11, src12, src13, src14, src15;
+
+ LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+ LD_UB8(src, src_stride, src8, src9, src10, src11, src12, src13, src14, src15);
+
+ ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride);
+ dst += (8 * dst_stride);
+ ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15, dst, dst_stride);
+}
+
+void vp8_copy_mem16x16_msa(uint8_t *src, int32_t src_stride, uint8_t *dst,
+ int32_t dst_stride) {
+ copy_16x16_msa(src, src_stride, dst, dst_stride);
+}
+
+void vp8_copy_mem8x8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst,
+ int32_t dst_stride) {
+ copy_8x8_msa(src, src_stride, dst, dst_stride);
+}
+
+void vp8_copy_mem8x4_msa(uint8_t *src, int32_t src_stride, uint8_t *dst,
+ int32_t dst_stride) {
+ copy_8x4_msa(src, src_stride, dst, dst_stride);
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/msa/idct_msa.c b/media/libvpx/libvpx/vp8/common/mips/msa/idct_msa.c
new file mode 100644
index 0000000000..efad0c29f8
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/msa/idct_msa.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/blockd.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+
+static const int32_t cospi8sqrt2minus1 = 20091;
+static const int32_t sinpi8sqrt2 = 35468;
+
+#define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v8i16 s4_m, s5_m, s6_m, s7_m; \
+ \
+ TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \
+ ILVR_D2_SH(s6_m, s4_m, s7_m, s5_m, out0, out2); \
+ out1 = (v8i16)__msa_ilvl_d((v2i64)s6_m, (v2i64)s4_m); \
+ out3 = (v8i16)__msa_ilvl_d((v2i64)s7_m, (v2i64)s5_m); \
+ }
+
+#define EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in) \
+ ({ \
+ v8i16 out_m; \
+ v8i16 zero_m = { 0 }; \
+ v4i32 tmp1_m, tmp2_m; \
+ v4i32 sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \
+ \
+ ILVRL_H2_SW(in, zero_m, tmp1_m, tmp2_m); \
+ tmp1_m >>= 16; \
+ tmp2_m >>= 16; \
+ tmp1_m = (tmp1_m * sinpi8_sqrt2_m) >> 16; \
+ tmp2_m = (tmp2_m * sinpi8_sqrt2_m) >> 16; \
+ out_m = __msa_pckev_h((v8i16)tmp2_m, (v8i16)tmp1_m); \
+ \
+ out_m; \
+ })
+
+#define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v8i16 a1_m, b1_m, c1_m, d1_m; \
+ v8i16 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
+ v8i16 const_cospi8sqrt2minus1_m; \
+ \
+ const_cospi8sqrt2minus1_m = __msa_fill_h(cospi8sqrt2minus1); \
+ a1_m = in0 + in2; \
+ b1_m = in0 - in2; \
+ c_tmp1_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in1); \
+ c_tmp2_m = __msa_mul_q_h(in3, const_cospi8sqrt2minus1_m); \
+ c_tmp2_m = c_tmp2_m >> 1; \
+ c_tmp2_m = in3 + c_tmp2_m; \
+ c1_m = c_tmp1_m - c_tmp2_m; \
+ d_tmp1_m = __msa_mul_q_h(in1, const_cospi8sqrt2minus1_m); \
+ d_tmp1_m = d_tmp1_m >> 1; \
+ d_tmp1_m = in1 + d_tmp1_m; \
+ d_tmp2_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3); \
+ d1_m = d_tmp1_m + d_tmp2_m; \
+ BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
+ }
+
+#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v4i32 a1_m, b1_m, c1_m, d1_m; \
+ v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
+ v4i32 const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m; \
+ \
+ const_cospi8sqrt2minus1_m = __msa_fill_w(cospi8sqrt2minus1); \
+ sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \
+ a1_m = in0 + in2; \
+ b1_m = in0 - in2; \
+ c_tmp1_m = (in1 * sinpi8_sqrt2_m) >> 16; \
+ c_tmp2_m = in3 + ((in3 * const_cospi8sqrt2minus1_m) >> 16); \
+ c1_m = c_tmp1_m - c_tmp2_m; \
+ d_tmp1_m = in1 + ((in1 * const_cospi8sqrt2minus1_m) >> 16); \
+ d_tmp2_m = (in3 * sinpi8_sqrt2_m) >> 16; \
+ d1_m = d_tmp1_m + d_tmp2_m; \
+ BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
+ }
+
+static void idct4x4_addblk_msa(int16_t *input, uint8_t *pred,
+ int32_t pred_stride, uint8_t *dest,
+ int32_t dest_stride) {
+ v8i16 input0, input1;
+ v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
+ v4i32 res0, res1, res2, res3;
+ v16i8 zero = { 0 };
+ v16i8 pred0, pred1, pred2, pred3;
+
+ LD_SH2(input, 8, input0, input1);
+ UNPCK_SH_SW(input0, in0, in1);
+ UNPCK_SH_SW(input1, in2, in3);
+ VP8_IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
+ TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
+ VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3);
+ SRARI_W4_SW(vt0, vt1, vt2, vt3, 3);
+ TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
+ LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3);
+ ILVR_B4_SW(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1,
+ res2, res3);
+ ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, res2,
+ res3);
+ ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
+ res0 = CLIP_SW_0_255(res0);
+ res1 = CLIP_SW_0_255(res1);
+ res2 = CLIP_SW_0_255(res2);
+ res3 = CLIP_SW_0_255(res3);
+ PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1);
+ res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1);
+ ST4x4_UB(res0, res0, 3, 2, 1, 0, dest, dest_stride);
+}
+
+static void idct4x4_addconst_msa(int16_t in_dc, uint8_t *pred,
+ int32_t pred_stride, uint8_t *dest,
+ int32_t dest_stride) {
+ v8i16 vec, res0, res1, res2, res3, dst0, dst1;
+ v16i8 zero = { 0 };
+ v16i8 pred0, pred1, pred2, pred3;
+
+ vec = __msa_fill_h(in_dc);
+ vec = __msa_srari_h(vec, 3);
+ LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3);
+ ILVR_B4_SH(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1,
+ res2, res3);
+ ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
+ CLIP_SH4_0_255(res0, res1, res2, res3);
+ PCKEV_B2_SH(res1, res0, res3, res2, dst0, dst1);
+ dst0 = (v8i16)__msa_pckev_w((v4i32)dst1, (v4i32)dst0);
+ ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dest, dest_stride);
+}
+
+void vp8_short_inv_walsh4x4_msa(int16_t *input, int16_t *mb_dqcoeff) {
+ v8i16 input0, input1, tmp0, tmp1, tmp2, tmp3, out0, out1;
+ const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 };
+ const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 };
+ const v8i16 mask2 = { 0, 4, 8, 12, 1, 5, 9, 13 };
+ const v8i16 mask3 = { 3, 7, 11, 15, 2, 6, 10, 14 };
+
+ LD_SH2(input, 8, input0, input1);
+ input1 = (v8i16)__msa_sldi_b((v16i8)input1, (v16i8)input1, 8);
+ tmp0 = input0 + input1;
+ tmp1 = input0 - input1;
+ VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3);
+ out0 = tmp2 + tmp3;
+ out1 = tmp2 - tmp3;
+ VSHF_H2_SH(out0, out1, out0, out1, mask2, mask3, input0, input1);
+ tmp0 = input0 + input1;
+ tmp1 = input0 - input1;
+ VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3);
+ tmp0 = tmp2 + tmp3;
+ tmp1 = tmp2 - tmp3;
+ ADD2(tmp0, 3, tmp1, 3, out0, out1);
+ out0 >>= 3;
+ out1 >>= 3;
+ mb_dqcoeff[0] = __msa_copy_s_h(out0, 0);
+ mb_dqcoeff[16] = __msa_copy_s_h(out0, 4);
+ mb_dqcoeff[32] = __msa_copy_s_h(out1, 0);
+ mb_dqcoeff[48] = __msa_copy_s_h(out1, 4);
+ mb_dqcoeff[64] = __msa_copy_s_h(out0, 1);
+ mb_dqcoeff[80] = __msa_copy_s_h(out0, 5);
+ mb_dqcoeff[96] = __msa_copy_s_h(out1, 1);
+ mb_dqcoeff[112] = __msa_copy_s_h(out1, 5);
+ mb_dqcoeff[128] = __msa_copy_s_h(out0, 2);
+ mb_dqcoeff[144] = __msa_copy_s_h(out0, 6);
+ mb_dqcoeff[160] = __msa_copy_s_h(out1, 2);
+ mb_dqcoeff[176] = __msa_copy_s_h(out1, 6);
+ mb_dqcoeff[192] = __msa_copy_s_h(out0, 3);
+ mb_dqcoeff[208] = __msa_copy_s_h(out0, 7);
+ mb_dqcoeff[224] = __msa_copy_s_h(out1, 3);
+ mb_dqcoeff[240] = __msa_copy_s_h(out1, 7);
+}
+
+static void dequant_idct4x4_addblk_msa(int16_t *input, int16_t *dequant_input,
+ uint8_t *dest, int32_t dest_stride) {
+ v8i16 input0, input1, dequant_in0, dequant_in1, mul0, mul1;
+ v8i16 in0, in1, in2, in3, hz0_h, hz1_h, hz2_h, hz3_h;
+ v16u8 dest0, dest1, dest2, dest3;
+ v4i32 hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3, res0, res1, res2, res3;
+ v2i64 zero = { 0 };
+
+ LD_SH2(input, 8, input0, input1);
+ LD_SH2(dequant_input, 8, dequant_in0, dequant_in1);
+ MUL2(input0, dequant_in0, input1, dequant_in1, mul0, mul1);
+ PCKEV_D2_SH(zero, mul0, zero, mul1, in0, in2);
+ PCKOD_D2_SH(zero, mul0, zero, mul1, in1, in3);
+ VP8_IDCT_1D_H(in0, in1, in2, in3, hz0_h, hz1_h, hz2_h, hz3_h);
+ PCKEV_D2_SH(hz1_h, hz0_h, hz3_h, hz2_h, mul0, mul1);
+ UNPCK_SH_SW(mul0, hz0_w, hz1_w);
+ UNPCK_SH_SW(mul1, hz2_w, hz3_w);
+ TRANSPOSE4x4_SW_SW(hz0_w, hz1_w, hz2_w, hz3_w, hz0_w, hz1_w, hz2_w, hz3_w);
+ VP8_IDCT_1D_W(hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3);
+ SRARI_W4_SW(vt0, vt1, vt2, vt3, 3);
+ TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
+ LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3);
+ ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1,
+ res2, res3);
+ ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, res2,
+ res3);
+ ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
+ res0 = CLIP_SW_0_255(res0);
+ res1 = CLIP_SW_0_255(res1);
+ res2 = CLIP_SW_0_255(res2);
+ res3 = CLIP_SW_0_255(res3);
+ PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1);
+ res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1);
+ ST4x4_UB(res0, res0, 3, 2, 1, 0, dest, dest_stride);
+}
+
+static void dequant_idct4x4_addblk_2x_msa(int16_t *input,
+ int16_t *dequant_input, uint8_t *dest,
+ int32_t dest_stride) {
+ v16u8 dest0, dest1, dest2, dest3;
+ v8i16 in0, in1, in2, in3, mul0, mul1, mul2, mul3, dequant_in0, dequant_in1;
+ v8i16 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3, res0, res1, res2, res3;
+ v4i32 hz0l, hz1l, hz2l, hz3l, hz0r, hz1r, hz2r, hz3r;
+ v4i32 vt0l, vt1l, vt2l, vt3l, vt0r, vt1r, vt2r, vt3r;
+ v16i8 zero = { 0 };
+
+ LD_SH4(input, 8, in0, in1, in2, in3);
+ LD_SH2(dequant_input, 8, dequant_in0, dequant_in1);
+ MUL4(in0, dequant_in0, in1, dequant_in1, in2, dequant_in0, in3, dequant_in1,
+ mul0, mul1, mul2, mul3);
+ PCKEV_D2_SH(mul2, mul0, mul3, mul1, in0, in2);
+ PCKOD_D2_SH(mul2, mul0, mul3, mul1, in1, in3);
+ VP8_IDCT_1D_H(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
+ TRANSPOSE_TWO_4x4_H(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
+ UNPCK_SH_SW(hz0, hz0r, hz0l);
+ UNPCK_SH_SW(hz1, hz1r, hz1l);
+ UNPCK_SH_SW(hz2, hz2r, hz2l);
+ UNPCK_SH_SW(hz3, hz3r, hz3l);
+ VP8_IDCT_1D_W(hz0l, hz1l, hz2l, hz3l, vt0l, vt1l, vt2l, vt3l);
+ SRARI_W4_SW(vt0l, vt1l, vt2l, vt3l, 3);
+ VP8_IDCT_1D_W(hz0r, hz1r, hz2r, hz3r, vt0r, vt1r, vt2r, vt3r);
+ SRARI_W4_SW(vt0r, vt1r, vt2r, vt3r, 3);
+ PCKEV_H4_SH(vt0l, vt0r, vt1l, vt1r, vt2l, vt2r, vt3l, vt3r, vt0, vt1, vt2,
+ vt3);
+ TRANSPOSE_TWO_4x4_H(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
+ LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3);
+ ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1,
+ res2, res3);
+ ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
+ CLIP_SH4_0_255(res0, res1, res2, res3);
+ PCKEV_B2_SW(res1, res0, res3, res2, vt0l, vt1l);
+ ST8x4_UB(vt0l, vt1l, dest, dest_stride);
+
+ __asm__ __volatile__(
+ "sw $zero, 0(%[input]) \n\t"
+ "sw $zero, 4(%[input]) \n\t"
+ "sw $zero, 8(%[input]) \n\t"
+ "sw $zero, 12(%[input]) \n\t"
+ "sw $zero, 16(%[input]) \n\t"
+ "sw $zero, 20(%[input]) \n\t"
+ "sw $zero, 24(%[input]) \n\t"
+ "sw $zero, 28(%[input]) \n\t"
+ "sw $zero, 32(%[input]) \n\t"
+ "sw $zero, 36(%[input]) \n\t"
+ "sw $zero, 40(%[input]) \n\t"
+ "sw $zero, 44(%[input]) \n\t"
+ "sw $zero, 48(%[input]) \n\t"
+ "sw $zero, 52(%[input]) \n\t"
+ "sw $zero, 56(%[input]) \n\t"
+ "sw $zero, 60(%[input]) \n\t" ::
+
+ [input] "r"(input));
+}
+
+static void dequant_idct_addconst_2x_msa(int16_t *input, int16_t *dequant_input,
+ uint8_t *dest, int32_t dest_stride) {
+ v8i16 input_dc0, input_dc1, vec, res0, res1, res2, res3;
+ v16u8 dest0, dest1, dest2, dest3;
+ v16i8 zero = { 0 };
+
+ input_dc0 = __msa_fill_h(input[0] * dequant_input[0]);
+ input_dc1 = __msa_fill_h(input[16] * dequant_input[0]);
+ SRARI_H2_SH(input_dc0, input_dc1, 3);
+ vec = (v8i16)__msa_pckev_d((v2i64)input_dc1, (v2i64)input_dc0);
+ input[0] = 0;
+ input[16] = 0;
+ LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3);
+ ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1,
+ res2, res3);
+ ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
+ CLIP_SH4_0_255(res0, res1, res2, res3);
+ PCKEV_B2_SH(res1, res0, res3, res2, res0, res1);
+ ST8x4_UB(res0, res1, dest, dest_stride);
+}
+
+void vp8_short_idct4x4llm_msa(int16_t *input, uint8_t *pred_ptr,
+ int32_t pred_stride, uint8_t *dst_ptr,
+ int32_t dst_stride) {
+ idct4x4_addblk_msa(input, pred_ptr, pred_stride, dst_ptr, dst_stride);
+}
+
+void vp8_dc_only_idct_add_msa(int16_t input_dc, uint8_t *pred_ptr,
+ int32_t pred_stride, uint8_t *dst_ptr,
+ int32_t dst_stride) {
+ idct4x4_addconst_msa(input_dc, pred_ptr, pred_stride, dst_ptr, dst_stride);
+}
+
+void vp8_dequantize_b_msa(BLOCKD *d, int16_t *DQC) {
+ v8i16 dqc0, dqc1, q0, q1, dq0, dq1;
+
+ LD_SH2(DQC, 8, dqc0, dqc1);
+ LD_SH2(d->qcoeff, 8, q0, q1);
+ MUL2(dqc0, q0, dqc1, q1, dq0, dq1);
+ ST_SH2(dq0, dq1, d->dqcoeff, 8);
+}
+
+void vp8_dequant_idct_add_msa(int16_t *input, int16_t *dq, uint8_t *dest,
+ int32_t stride) {
+ dequant_idct4x4_addblk_msa(input, dq, dest, stride);
+
+ __asm__ __volatile__(
+ "sw $zero, 0(%[input]) \n\t"
+ "sw $zero, 4(%[input]) \n\t"
+ "sw $zero, 8(%[input]) \n\t"
+ "sw $zero, 12(%[input]) \n\t"
+ "sw $zero, 16(%[input]) \n\t"
+ "sw $zero, 20(%[input]) \n\t"
+ "sw $zero, 24(%[input]) \n\t"
+ "sw $zero, 28(%[input]) \n\t"
+
+ :
+ : [input] "r"(input));
+}
+
+void vp8_dequant_idct_add_y_block_msa(int16_t *q, int16_t *dq, uint8_t *dst,
+ int32_t stride, char *eobs) {
+ int16_t *eobs_h = (int16_t *)eobs;
+ uint8_t i;
+
+ for (i = 4; i--;) {
+ if (eobs_h[0]) {
+ if (eobs_h[0] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst, stride);
+ } else {
+ dequant_idct_addconst_2x_msa(q, dq, dst, stride);
+ }
+ }
+
+ q += 32;
+
+ if (eobs_h[1]) {
+ if (eobs_h[1] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst + 8, stride);
+ } else {
+ dequant_idct_addconst_2x_msa(q, dq, dst + 8, stride);
+ }
+ }
+
+ q += 32;
+ dst += (4 * stride);
+ eobs_h += 2;
+ }
+}
+
+void vp8_dequant_idct_add_uv_block_msa(int16_t *q, int16_t *dq, uint8_t *dst_u,
+ uint8_t *dst_v, int32_t stride,
+ char *eobs) {
+ int16_t *eobs_h = (int16_t *)eobs;
+
+ if (eobs_h[0]) {
+ if (eobs_h[0] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst_u, stride);
+ } else {
+ dequant_idct_addconst_2x_msa(q, dq, dst_u, stride);
+ }
+ }
+
+ q += 32;
+ dst_u += (stride * 4);
+
+ if (eobs_h[1]) {
+ if (eobs_h[1] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst_u, stride);
+ } else {
+ dequant_idct_addconst_2x_msa(q, dq, dst_u, stride);
+ }
+ }
+
+ q += 32;
+
+ if (eobs_h[2]) {
+ if (eobs_h[2] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst_v, stride);
+ } else {
+ dequant_idct_addconst_2x_msa(q, dq, dst_v, stride);
+ }
+ }
+
+ q += 32;
+ dst_v += (stride * 4);
+
+ if (eobs_h[3]) {
+ if (eobs_h[3] & 0xfefe) {
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst_v, stride);
+ } else {
+ dequant_idct_addconst_2x_msa(q, dq, dst_v, stride);
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/msa/loopfilter_filters_msa.c b/media/libvpx/libvpx/vp8/common/mips/msa/loopfilter_filters_msa.c
new file mode 100644
index 0000000000..98a4fc09a3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/msa/loopfilter_filters_msa.c
@@ -0,0 +1,709 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/loopfilter.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+
+#define VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask) \
+ { \
+ v16u8 p1_a_sub_q1, p0_a_sub_q0; \
+ \
+ p0_a_sub_q0 = __msa_asub_u_b(p0, q0); \
+ p1_a_sub_q1 = __msa_asub_u_b(p1, q1); \
+ p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \
+ p0_a_sub_q0 = __msa_adds_u_b(p0_a_sub_q0, p0_a_sub_q0); \
+ mask = __msa_adds_u_b(p0_a_sub_q0, p1_a_sub_q1); \
+ mask = ((v16u8)mask <= b_limit); \
+ }
+
+#define VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev) \
+ { \
+ v16i8 p1_m, p0_m, q0_m, q1_m, filt, q0_sub_p0, t1, t2; \
+ const v16i8 cnst4b = __msa_ldi_b(4); \
+ const v16i8 cnst3b = __msa_ldi_b(3); \
+ \
+ p1_m = (v16i8)__msa_xori_b(p1, 0x80); \
+ p0_m = (v16i8)__msa_xori_b(p0, 0x80); \
+ q0_m = (v16i8)__msa_xori_b(q0, 0x80); \
+ q1_m = (v16i8)__msa_xori_b(q1, 0x80); \
+ \
+ filt = __msa_subs_s_b(p1_m, q1_m); \
+ filt &= hev; \
+ q0_sub_p0 = __msa_subs_s_b(q0_m, p0_m); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt &= mask; \
+ t1 = __msa_adds_s_b(filt, cnst4b); \
+ t1 >>= cnst3b; \
+ t2 = __msa_adds_s_b(filt, cnst3b); \
+ t2 >>= cnst3b; \
+ q0_m = __msa_subs_s_b(q0_m, t1); \
+ q0 = __msa_xori_b((v16u8)q0_m, 0x80); \
+ p0_m = __msa_adds_s_b(p0_m, t2); \
+ p0 = __msa_xori_b((v16u8)p0_m, 0x80); \
+ filt = __msa_srari_b(t1, 1); \
+ hev = __msa_xori_b(hev, 0xff); \
+ filt &= hev; \
+ q1_m = __msa_subs_s_b(q1_m, filt); \
+ q1 = __msa_xori_b((v16u8)q1_m, 0x80); \
+ p1_m = __msa_adds_s_b(p1_m, filt); \
+ p1 = __msa_xori_b((v16u8)p1_m, 0x80); \
+ }
+
+#define VP8_SIMPLE_FILT(p1_in, p0_in, q0_in, q1_in, mask) \
+ { \
+ v16i8 p1_m, p0_m, q0_m, q1_m, filt, filt1, filt2; \
+ v16i8 q0_sub_p0; \
+ const v16i8 cnst4b = __msa_ldi_b(4); \
+ const v16i8 cnst3b = __msa_ldi_b(3); \
+ \
+ p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \
+ p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \
+ q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \
+ q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \
+ \
+ filt = __msa_subs_s_b(p1_m, q1_m); \
+ q0_sub_p0 = __msa_subs_s_b(q0_m, p0_m); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt &= mask; \
+ filt1 = __msa_adds_s_b(filt, cnst4b); \
+ filt1 >>= cnst3b; \
+ filt2 = __msa_adds_s_b(filt, cnst3b); \
+ filt2 >>= cnst3b; \
+ q0_m = __msa_subs_s_b(q0_m, filt1); \
+ p0_m = __msa_adds_s_b(p0_m, filt2); \
+ q0_in = __msa_xori_b((v16u8)q0_m, 0x80); \
+ p0_in = __msa_xori_b((v16u8)p0_m, 0x80); \
+ }
+
+#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \
+ { \
+ v16i8 p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \
+ v16i8 u, filt, t1, t2, filt_sign, q0_sub_p0; \
+ v8i16 filt_r, filt_l, u_r, u_l; \
+ v8i16 temp0, temp1, temp2, temp3; \
+ const v16i8 cnst4b = __msa_ldi_b(4); \
+ const v16i8 cnst3b = __msa_ldi_b(3); \
+ const v8i16 cnst9h = __msa_ldi_h(9); \
+ const v8i16 cnst63h = __msa_ldi_h(63); \
+ \
+ p2_m = (v16i8)__msa_xori_b(p2, 0x80); \
+ p1_m = (v16i8)__msa_xori_b(p1, 0x80); \
+ p0_m = (v16i8)__msa_xori_b(p0, 0x80); \
+ q0_m = (v16i8)__msa_xori_b(q0, 0x80); \
+ q1_m = (v16i8)__msa_xori_b(q1, 0x80); \
+ q2_m = (v16i8)__msa_xori_b(q2, 0x80); \
+ \
+ filt = __msa_subs_s_b(p1_m, q1_m); \
+ q0_sub_p0 = __msa_subs_s_b(q0_m, p0_m); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt = __msa_adds_s_b(filt, q0_sub_p0); \
+ filt &= mask; \
+ \
+ t2 = filt & hev; \
+ hev = __msa_xori_b(hev, 0xff); \
+ filt &= hev; \
+ t1 = __msa_adds_s_b(t2, cnst4b); \
+ t1 >>= cnst3b; \
+ t2 = __msa_adds_s_b(t2, cnst3b); \
+ t2 >>= cnst3b; \
+ q0_m = __msa_subs_s_b(q0_m, t1); \
+ p0_m = __msa_adds_s_b(p0_m, t2); \
+ filt_sign = __msa_clti_s_b(filt, 0); \
+ ILVRL_B2_SH(filt_sign, filt, filt_r, filt_l); \
+ temp0 = filt_r * cnst9h; \
+ temp1 = temp0 + cnst63h; \
+ temp2 = filt_l * cnst9h; \
+ temp3 = temp2 + cnst63h; \
+ \
+ u_r = temp1 >> 7; \
+ u_r = __msa_sat_s_h(u_r, 7); \
+ u_l = temp3 >> 7; \
+ u_l = __msa_sat_s_h(u_l, 7); \
+ u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \
+ q2_m = __msa_subs_s_b(q2_m, u); \
+ p2_m = __msa_adds_s_b(p2_m, u); \
+ q2 = __msa_xori_b((v16u8)q2_m, 0x80); \
+ p2 = __msa_xori_b((v16u8)p2_m, 0x80); \
+ \
+ temp1 += temp0; \
+ temp3 += temp2; \
+ \
+ u_r = temp1 >> 7; \
+ u_r = __msa_sat_s_h(u_r, 7); \
+ u_l = temp3 >> 7; \
+ u_l = __msa_sat_s_h(u_l, 7); \
+ u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \
+ q1_m = __msa_subs_s_b(q1_m, u); \
+ p1_m = __msa_adds_s_b(p1_m, u); \
+ q1 = __msa_xori_b((v16u8)q1_m, 0x80); \
+ p1 = __msa_xori_b((v16u8)p1_m, 0x80); \
+ \
+ temp1 += temp0; \
+ temp3 += temp2; \
+ \
+ u_r = temp1 >> 7; \
+ u_r = __msa_sat_s_h(u_r, 7); \
+ u_l = temp3 >> 7; \
+ u_l = __msa_sat_s_h(u_l, 7); \
+ u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \
+ q0_m = __msa_subs_s_b(q0_m, u); \
+ p0_m = __msa_adds_s_b(p0_m, u); \
+ q0 = __msa_xori_b((v16u8)q0_m, 0x80); \
+ p0 = __msa_xori_b((v16u8)p0_m, 0x80); \
+ }
+
+#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
+ limit_in, b_limit_in, thresh_in, hev_out, mask_out, \
+ flat_out) \
+ { \
+ v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
+ v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
+ \
+ p3_asub_p2_m = __msa_asub_u_b((p3_in), (p2_in)); \
+ p2_asub_p1_m = __msa_asub_u_b((p2_in), (p1_in)); \
+ p1_asub_p0_m = __msa_asub_u_b((p1_in), (p0_in)); \
+ q1_asub_q0_m = __msa_asub_u_b((q1_in), (q0_in)); \
+ q2_asub_q1_m = __msa_asub_u_b((q2_in), (q1_in)); \
+ q3_asub_q2_m = __msa_asub_u_b((q3_in), (q2_in)); \
+ p0_asub_q0_m = __msa_asub_u_b((p0_in), (q0_in)); \
+ p1_asub_q1_m = __msa_asub_u_b((p1_in), (q1_in)); \
+ flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \
+ hev_out = (thresh_in) < (v16u8)flat_out; \
+ p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \
+ p1_asub_q1_m >>= 1; \
+ p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \
+ mask_out = (b_limit_in) < p0_asub_q0_m; \
+ mask_out = __msa_max_u_b(flat_out, mask_out); \
+ p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \
+ mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \
+ q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \
+ mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \
+ mask_out = (limit_in) < (v16u8)mask_out; \
+ mask_out = __msa_xori_b(mask_out, 0xff); \
+ }
+
+#define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) \
+ { \
+ uint16_t tmp0_h; \
+ uint32_t tmp0_w; \
+ \
+ tmp0_w = __msa_copy_u_w((v4i32)in0, in0_idx); \
+ tmp0_h = __msa_copy_u_h((v8i16)in1, in1_idx); \
+ SW(tmp0_w, pdst); \
+ SH(tmp0_h, pdst + stride); \
+ }
+
+static void loop_filter_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
+ const uint8_t *b_limit0_ptr,
+ const uint8_t *limit0_ptr,
+ const uint8_t *thresh0_ptr,
+ const uint8_t *b_limit1_ptr,
+ const uint8_t *limit1_ptr,
+ const uint8_t *thresh1_ptr) {
+ v16u8 mask, hev, flat;
+ v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+
+ LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
+ thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr);
+ thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr);
+ thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0);
+
+ b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr);
+ b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr);
+ b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0);
+
+ limit0 = (v16u8)__msa_fill_b(*limit0_ptr);
+ limit1 = (v16u8)__msa_fill_b(*limit1_ptr);
+ limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
+ mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch);
+}
+
+static void loop_filter_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
+ const uint8_t *b_limit0_ptr,
+ const uint8_t *limit0_ptr,
+ const uint8_t *thresh0_ptr,
+ const uint8_t *b_limit1_ptr,
+ const uint8_t *limit1_ptr,
+ const uint8_t *thresh1_ptr) {
+ v16u8 mask, hev, flat;
+ v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
+ v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+
+ LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13,
+ row14, row15);
+ TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p3, p2,
+ p1, p0, q0, q1, q2, q3);
+
+ thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr);
+ thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr);
+ thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0);
+
+ b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr);
+ b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr);
+ b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0);
+
+ limit0 = (v16u8)__msa_fill_b(*limit0_ptr);
+ limit1 = (v16u8)__msa_fill_b(*limit1_ptr);
+ limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
+ mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+ ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
+ ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5);
+
+ src -= 2;
+ ST4x8_UB(tmp2, tmp3, src, pitch);
+ src += (8 * pitch);
+ ST4x8_UB(tmp4, tmp5, src, pitch);
+}
+
+static void mbloop_filter_horizontal_edge_y_msa(uint8_t *src, int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ uint8_t *temp_src;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+
+ b_limit = (v16u8)__msa_fill_b(b_limit_in);
+ limit = (v16u8)__msa_fill_b(limit_in);
+ thresh = (v16u8)__msa_fill_b(thresh_in);
+ temp_src = src - (pitch << 2);
+ LD_UB8(temp_src, pitch, p3, p2, p1, p0, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+ temp_src = src - 3 * pitch;
+ ST_UB4(p2, p1, p0, q0, temp_src, pitch);
+ temp_src += (4 * pitch);
+ ST_UB2(q1, q2, temp_src, pitch);
+}
+
+static void mbloop_filter_horizontal_edge_uv_msa(uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ uint8_t *temp_src;
+ uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
+ v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
+
+ b_limit = (v16u8)__msa_fill_b(b_limit_in);
+ limit = (v16u8)__msa_fill_b(limit_in);
+ thresh = (v16u8)__msa_fill_b(thresh_in);
+
+ temp_src = src_u - (pitch << 2);
+ LD_UB8(temp_src, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u);
+ temp_src = src_v - (pitch << 2);
+ LD_UB8(temp_src, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v);
+
+ ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
+ ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ p2_d = __msa_copy_u_d((v2i64)p2, 0);
+ p1_d = __msa_copy_u_d((v2i64)p1, 0);
+ p0_d = __msa_copy_u_d((v2i64)p0, 0);
+ q0_d = __msa_copy_u_d((v2i64)q0, 0);
+ q1_d = __msa_copy_u_d((v2i64)q1, 0);
+ q2_d = __msa_copy_u_d((v2i64)q2, 0);
+ src_u -= (pitch * 3);
+ SD4(p2_d, p1_d, p0_d, q0_d, src_u, pitch);
+ src_u += 4 * pitch;
+ SD(q1_d, src_u);
+ src_u += pitch;
+ SD(q2_d, src_u);
+
+ p2_d = __msa_copy_u_d((v2i64)p2, 1);
+ p1_d = __msa_copy_u_d((v2i64)p1, 1);
+ p0_d = __msa_copy_u_d((v2i64)p0, 1);
+ q0_d = __msa_copy_u_d((v2i64)q0, 1);
+ q1_d = __msa_copy_u_d((v2i64)q1, 1);
+ q2_d = __msa_copy_u_d((v2i64)q2, 1);
+ src_v -= (pitch * 3);
+ SD4(p2_d, p1_d, p0_d, q0_d, src_v, pitch);
+ src_v += 4 * pitch;
+ SD(q1_d, src_v);
+ src_v += pitch;
+ SD(q2_d, src_v);
+}
+
+static void mbloop_filter_vertical_edge_y_msa(uint8_t *src, int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ uint8_t *temp_src;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ v16u8 row9, row10, row11, row12, row13, row14, row15;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ b_limit = (v16u8)__msa_fill_b(b_limit_in);
+ limit = (v16u8)__msa_fill_b(limit_in);
+ thresh = (v16u8)__msa_fill_b(thresh_in);
+ temp_src = src - 4;
+ LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ temp_src += (8 * pitch);
+ LD_UB8(temp_src, pitch, row8, row9, row10, row11, row12, row13, row14, row15);
+ TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p3, p2,
+ p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+ ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4);
+ ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7);
+ ILVRL_B2_SH(q2, q1, tmp2, tmp5);
+
+ temp_src = src - 3;
+ VP8_ST6x1_UB(tmp3, 0, tmp2, 0, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp3, 1, tmp2, 1, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp3, 2, tmp2, 2, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp3, 3, tmp2, 3, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp4, 0, tmp2, 4, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp4, 1, tmp2, 5, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp4, 2, tmp2, 6, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp4, 3, tmp2, 7, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4);
+ temp_src += pitch;
+ VP8_ST6x1_UB(tmp7, 3, tmp5, 7, temp_src, 4);
+}
+
+static void mbloop_filter_vertical_edge_uv_msa(uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ v16u8 row9, row10, row11, row12, row13, row14, row15;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ b_limit = (v16u8)__msa_fill_b(b_limit_in);
+ limit = (v16u8)__msa_fill_b(limit_in);
+ thresh = (v16u8)__msa_fill_b(thresh_in);
+
+ LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ LD_UB8(src_v - 4, pitch, row8, row9, row10, row11, row12, row13, row14,
+ row15);
+ TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p3, p2,
+ p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4);
+ ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7);
+ ILVRL_B2_SH(q2, q1, tmp2, tmp5);
+
+ src_u -= 3;
+ VP8_ST6x1_UB(tmp3, 0, tmp2, 0, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp3, 1, tmp2, 1, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp3, 2, tmp2, 2, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp3, 3, tmp2, 3, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp4, 0, tmp2, 4, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp4, 1, tmp2, 5, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp4, 2, tmp2, 6, src_u, 4);
+ src_u += pitch;
+ VP8_ST6x1_UB(tmp4, 3, tmp2, 7, src_u, 4);
+
+ src_v -= 3;
+ VP8_ST6x1_UB(tmp6, 0, tmp5, 0, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp6, 1, tmp5, 1, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp6, 2, tmp5, 2, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp6, 3, tmp5, 3, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp7, 0, tmp5, 4, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp7, 1, tmp5, 5, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp7, 2, tmp5, 6, src_v, 4);
+ src_v += pitch;
+ VP8_ST6x1_UB(tmp7, 3, tmp5, 7, src_v, 4);
+}
+
+void vp8_loop_filter_simple_horizontal_edge_msa(uint8_t *src, int32_t pitch,
+ const uint8_t *b_limit_ptr) {
+ v16u8 p1, p0, q1, q0;
+ v16u8 mask, b_limit;
+
+ b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
+ LD_UB4(src - (pitch << 1), pitch, p1, p0, q0, q1);
+ VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask);
+ VP8_SIMPLE_FILT(p1, p0, q0, q1, mask);
+ ST_UB2(p0, q0, (src - pitch), pitch);
+}
+
+void vp8_loop_filter_simple_vertical_edge_msa(uint8_t *src, int32_t pitch,
+ const uint8_t *b_limit_ptr) {
+ uint8_t *temp_src;
+ v16u8 p1, p0, q1, q0;
+ v16u8 mask, b_limit;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ v16u8 row9, row10, row11, row12, row13, row14, row15;
+ v8i16 tmp0, tmp1;
+
+ b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
+ temp_src = src - 2;
+ LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ temp_src += (8 * pitch);
+ LD_UB8(temp_src, pitch, row8, row9, row10, row11, row12, row13, row14, row15);
+ TRANSPOSE16x4_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p1, p0,
+ q0, q1);
+ VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask);
+ VP8_SIMPLE_FILT(p1, p0, q0, q1, mask);
+ ILVRL_B2_SH(q0, p0, tmp1, tmp0);
+
+ src -= 1;
+ ST2x4_UB(tmp1, 0, src, pitch);
+ src += 4 * pitch;
+ ST2x4_UB(tmp1, 4, src, pitch);
+ src += 4 * pitch;
+ ST2x4_UB(tmp0, 0, src, pitch);
+ src += 4 * pitch;
+ ST2x4_UB(tmp0, 4, src, pitch);
+ src += 4 * pitch;
+}
+
+static void loop_filter_horizontal_edge_uv_msa(uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ uint64_t p1_d, p0_d, q0_d, q1_d;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
+ v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
+
+ thresh = (v16u8)__msa_fill_b(thresh_in);
+ limit = (v16u8)__msa_fill_b(limit_in);
+ b_limit = (v16u8)__msa_fill_b(b_limit_in);
+
+ src_u = src_u - (pitch << 2);
+ LD_UB8(src_u, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u);
+ src_u += (5 * pitch);
+ src_v = src_v - (pitch << 2);
+ LD_UB8(src_v, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v);
+ src_v += (5 * pitch);
+
+ /* right 8 element of p3 are u pixel and
+ left 8 element of p3 are v pixel */
+ ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
+ ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ p1_d = __msa_copy_u_d((v2i64)p1, 0);
+ p0_d = __msa_copy_u_d((v2i64)p0, 0);
+ q0_d = __msa_copy_u_d((v2i64)q0, 0);
+ q1_d = __msa_copy_u_d((v2i64)q1, 0);
+ SD4(q1_d, q0_d, p0_d, p1_d, src_u, (-pitch));
+
+ p1_d = __msa_copy_u_d((v2i64)p1, 1);
+ p0_d = __msa_copy_u_d((v2i64)p0, 1);
+ q0_d = __msa_copy_u_d((v2i64)q0, 1);
+ q1_d = __msa_copy_u_d((v2i64)q1, 1);
+ SD4(q1_d, q0_d, p0_d, p1_d, src_v, (-pitch));
+}
+
+static void loop_filter_vertical_edge_uv_msa(uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch,
+ const uint8_t b_limit_in,
+ const uint8_t limit_in,
+ const uint8_t thresh_in) {
+ uint8_t *temp_src_u, *temp_src_v;
+ v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
+ v16u8 mask, hev, flat, thresh, limit, b_limit;
+ v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ v16u8 row9, row10, row11, row12, row13, row14, row15;
+ v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+
+ thresh = (v16u8)__msa_fill_b(thresh_in);
+ limit = (v16u8)__msa_fill_b(limit_in);
+ b_limit = (v16u8)__msa_fill_b(b_limit_in);
+
+ LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
+ LD_UB8(src_v - 4, pitch, row8, row9, row10, row11, row12, row13, row14,
+ row15);
+ TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
+ row9, row10, row11, row12, row13, row14, row15, p3, p2,
+ p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
+ mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+ ILVR_B2_SW(p0, p1, q1, q0, tmp0, tmp1);
+ ILVRL_H2_SW(tmp1, tmp0, tmp2, tmp3);
+ tmp0 = (v4i32)__msa_ilvl_b((v16i8)p0, (v16i8)p1);
+ tmp1 = (v4i32)__msa_ilvl_b((v16i8)q1, (v16i8)q0);
+ ILVRL_H2_SW(tmp1, tmp0, tmp4, tmp5);
+
+ temp_src_u = src_u - 2;
+ ST4x4_UB(tmp2, tmp2, 0, 1, 2, 3, temp_src_u, pitch);
+ temp_src_u += 4 * pitch;
+ ST4x4_UB(tmp3, tmp3, 0, 1, 2, 3, temp_src_u, pitch);
+
+ temp_src_v = src_v - 2;
+ ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, temp_src_v, pitch);
+ temp_src_v += 4 * pitch;
+ ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, temp_src_v, pitch);
+}
+
+void vp8_loop_filter_mbh_msa(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch_y, int32_t pitch_u_v,
+ loop_filter_info *lpf_info_ptr) {
+ mbloop_filter_horizontal_edge_y_msa(src_y, pitch_y, *lpf_info_ptr->mblim,
+ *lpf_info_ptr->lim,
+ *lpf_info_ptr->hev_thr);
+ if (src_u) {
+ mbloop_filter_horizontal_edge_uv_msa(
+ src_u, src_v, pitch_u_v, *lpf_info_ptr->mblim, *lpf_info_ptr->lim,
+ *lpf_info_ptr->hev_thr);
+ }
+}
+
+void vp8_loop_filter_mbv_msa(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch_y, int32_t pitch_u_v,
+ loop_filter_info *lpf_info_ptr) {
+ mbloop_filter_vertical_edge_y_msa(src_y, pitch_y, *lpf_info_ptr->mblim,
+ *lpf_info_ptr->lim, *lpf_info_ptr->hev_thr);
+ if (src_u) {
+ mbloop_filter_vertical_edge_uv_msa(src_u, src_v, pitch_u_v,
+ *lpf_info_ptr->mblim, *lpf_info_ptr->lim,
+ *lpf_info_ptr->hev_thr);
+ }
+}
+
+void vp8_loop_filter_bh_msa(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch_y, int32_t pitch_u_v,
+ loop_filter_info *lpf_info_ptr) {
+ loop_filter_horizontal_4_dual_msa(src_y + 4 * pitch_y, pitch_y,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr);
+ loop_filter_horizontal_4_dual_msa(src_y + 8 * pitch_y, pitch_y,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr);
+ loop_filter_horizontal_4_dual_msa(src_y + 12 * pitch_y, pitch_y,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr);
+ if (src_u) {
+ loop_filter_horizontal_edge_uv_msa(
+ src_u + (4 * pitch_u_v), src_v + (4 * pitch_u_v), pitch_u_v,
+ *lpf_info_ptr->blim, *lpf_info_ptr->lim, *lpf_info_ptr->hev_thr);
+ }
+}
+
+void vp8_loop_filter_bv_msa(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
+ int32_t pitch_y, int32_t pitch_u_v,
+ loop_filter_info *lpf_info_ptr) {
+ loop_filter_vertical_4_dual_msa(src_y + 4, pitch_y, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr);
+ loop_filter_vertical_4_dual_msa(src_y + 8, pitch_y, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr);
+ loop_filter_vertical_4_dual_msa(src_y + 12, pitch_y, lpf_info_ptr->blim,
+ lpf_info_ptr->lim, lpf_info_ptr->hev_thr,
+ lpf_info_ptr->blim, lpf_info_ptr->lim,
+ lpf_info_ptr->hev_thr);
+ if (src_u) {
+ loop_filter_vertical_edge_uv_msa(src_u + 4, src_v + 4, pitch_u_v,
+ *lpf_info_ptr->blim, *lpf_info_ptr->lim,
+ *lpf_info_ptr->hev_thr);
+ }
+}
+
+void vp8_loop_filter_bhs_msa(uint8_t *src_y, int32_t pitch_y,
+ const uint8_t *b_limit_ptr) {
+ vp8_loop_filter_simple_horizontal_edge_msa(src_y + (4 * pitch_y), pitch_y,
+ b_limit_ptr);
+ vp8_loop_filter_simple_horizontal_edge_msa(src_y + (8 * pitch_y), pitch_y,
+ b_limit_ptr);
+ vp8_loop_filter_simple_horizontal_edge_msa(src_y + (12 * pitch_y), pitch_y,
+ b_limit_ptr);
+}
+
+void vp8_loop_filter_bvs_msa(uint8_t *src_y, int32_t pitch_y,
+ const uint8_t *b_limit_ptr) {
+ vp8_loop_filter_simple_vertical_edge_msa(src_y + 4, pitch_y, b_limit_ptr);
+ vp8_loop_filter_simple_vertical_edge_msa(src_y + 8, pitch_y, b_limit_ptr);
+ vp8_loop_filter_simple_vertical_edge_msa(src_y + 12, pitch_y, b_limit_ptr);
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/msa/mfqe_msa.c b/media/libvpx/libvpx/vp8/common/mips/msa/mfqe_msa.c
new file mode 100644
index 0000000000..9aac95b2fa
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/msa/mfqe_msa.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/postproc.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+
+static void filter_by_weight8x8_msa(uint8_t *src_ptr, int32_t src_stride,
+ uint8_t *dst_ptr, int32_t dst_stride,
+ int32_t src_weight) {
+ int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight;
+ int32_t row;
+ uint64_t src0_d, src1_d, dst0_d, dst1_d;
+ v16i8 src0 = { 0 };
+ v16i8 src1 = { 0 };
+ v16i8 dst0 = { 0 };
+ v16i8 dst1 = { 0 };
+ v8i16 src_wt, dst_wt, res_h_r, res_h_l, src_r, src_l, dst_r, dst_l;
+
+ src_wt = __msa_fill_h(src_weight);
+ dst_wt = __msa_fill_h(dst_weight);
+
+ for (row = 2; row--;) {
+ LD2(src_ptr, src_stride, src0_d, src1_d);
+ src_ptr += (2 * src_stride);
+ LD2(dst_ptr, dst_stride, dst0_d, dst1_d);
+ INSERT_D2_SB(src0_d, src1_d, src0);
+ INSERT_D2_SB(dst0_d, dst1_d, dst0);
+
+ LD2(src_ptr, src_stride, src0_d, src1_d);
+ src_ptr += (2 * src_stride);
+ LD2((dst_ptr + 2 * dst_stride), dst_stride, dst0_d, dst1_d);
+ INSERT_D2_SB(src0_d, src1_d, src1);
+ INSERT_D2_SB(dst0_d, dst1_d, dst1);
+
+ UNPCK_UB_SH(src0, src_r, src_l);
+ UNPCK_UB_SH(dst0, dst_r, dst_l);
+ res_h_r = (src_r * src_wt);
+ res_h_r += (dst_r * dst_wt);
+ res_h_l = (src_l * src_wt);
+ res_h_l += (dst_l * dst_wt);
+ SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION);
+ dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r);
+ ST8x2_UB(dst0, dst_ptr, dst_stride);
+ dst_ptr += (2 * dst_stride);
+
+ UNPCK_UB_SH(src1, src_r, src_l);
+ UNPCK_UB_SH(dst1, dst_r, dst_l);
+ res_h_r = (src_r * src_wt);
+ res_h_r += (dst_r * dst_wt);
+ res_h_l = (src_l * src_wt);
+ res_h_l += (dst_l * dst_wt);
+ SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION);
+ dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r);
+ ST8x2_UB(dst1, dst_ptr, dst_stride);
+ dst_ptr += (2 * dst_stride);
+ }
+}
+
+static void filter_by_weight16x16_msa(uint8_t *src_ptr, int32_t src_stride,
+ uint8_t *dst_ptr, int32_t dst_stride,
+ int32_t src_weight) {
+ int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight;
+ int32_t row;
+ v16i8 src0, src1, src2, src3;
+ v16i8 dst0, dst1, dst2, dst3;
+ v8i16 src_wt, dst_wt;
+ v8i16 res_h_r, res_h_l;
+ v8i16 src_r, src_l, dst_r, dst_l;
+
+ src_wt = __msa_fill_h(src_weight);
+ dst_wt = __msa_fill_h(dst_weight);
+
+ for (row = 4; row--;) {
+ LD_SB4(src_ptr, src_stride, src0, src1, src2, src3);
+ src_ptr += (4 * src_stride);
+ LD_SB4(dst_ptr, dst_stride, dst0, dst1, dst2, dst3);
+
+ UNPCK_UB_SH(src0, src_r, src_l);
+ UNPCK_UB_SH(dst0, dst_r, dst_l);
+ res_h_r = (src_r * src_wt);
+ res_h_r += (dst_r * dst_wt);
+ res_h_l = (src_l * src_wt);
+ res_h_l += (dst_l * dst_wt);
+ SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION);
+ PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr);
+ dst_ptr += dst_stride;
+
+ UNPCK_UB_SH(src1, src_r, src_l);
+ UNPCK_UB_SH(dst1, dst_r, dst_l);
+ res_h_r = (src_r * src_wt);
+ res_h_r += (dst_r * dst_wt);
+ res_h_l = (src_l * src_wt);
+ res_h_l += (dst_l * dst_wt);
+ SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION);
+ PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr);
+ dst_ptr += dst_stride;
+
+ UNPCK_UB_SH(src2, src_r, src_l);
+ UNPCK_UB_SH(dst2, dst_r, dst_l);
+ res_h_r = (src_r * src_wt);
+ res_h_r += (dst_r * dst_wt);
+ res_h_l = (src_l * src_wt);
+ res_h_l += (dst_l * dst_wt);
+ SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION);
+ PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr);
+ dst_ptr += dst_stride;
+
+ UNPCK_UB_SH(src3, src_r, src_l);
+ UNPCK_UB_SH(dst3, dst_r, dst_l);
+ res_h_r = (src_r * src_wt);
+ res_h_r += (dst_r * dst_wt);
+ res_h_l = (src_l * src_wt);
+ res_h_l += (dst_l * dst_wt);
+ SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION);
+ PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr);
+ dst_ptr += dst_stride;
+ }
+}
+
+void vp8_filter_by_weight16x16_msa(uint8_t *src_ptr, int32_t src_stride,
+ uint8_t *dst_ptr, int32_t dst_stride,
+ int32_t src_weight) {
+ filter_by_weight16x16_msa(src_ptr, src_stride, dst_ptr, dst_stride,
+ src_weight);
+}
+
+void vp8_filter_by_weight8x8_msa(uint8_t *src_ptr, int32_t src_stride,
+ uint8_t *dst_ptr, int32_t dst_stride,
+ int32_t src_weight) {
+ filter_by_weight8x8_msa(src_ptr, src_stride, dst_ptr, dst_stride, src_weight);
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/msa/sixtap_filter_msa.c b/media/libvpx/libvpx/vp8/common/mips/msa/sixtap_filter_msa.c
new file mode 100644
index 0000000000..3a1bb7cd57
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/msa/sixtap_filter_msa.c
@@ -0,0 +1,1738 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vpx_ports/mem.h"
+#include "vp8/common/filter.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+
+DECLARE_ALIGNED(16, static const int8_t, vp8_subpel_filters_msa[7][8]) = {
+ { 0, -6, 123, 12, -1, 0, 0, 0 },
+ { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */
+ { 0, -9, 93, 50, -6, 0, 0, 0 },
+ { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */
+ { 0, -6, 50, 93, -9, 0, 0, 0 },
+ { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */
+ { 0, -1, 12, 123, -6, 0, 0, 0 },
+};
+
+static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = {
+ /* 8 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ /* 4 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
+ /* 4 width cases */
+ 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
+};
+
+#define HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_h0, filt_h1, \
+ filt_h2) \
+ ({ \
+ v16i8 _6tap_vec0_m, _6tap_vec1_m, _6tap_vec2_m; \
+ v8i16 _6tap_out_m; \
+ \
+ VSHF_B3_SB(src0, src1, src0, src1, src0, src1, mask0, mask1, mask2, \
+ _6tap_vec0_m, _6tap_vec1_m, _6tap_vec2_m); \
+ _6tap_out_m = DPADD_SH3_SH(_6tap_vec0_m, _6tap_vec1_m, _6tap_vec2_m, \
+ filt_h0, filt_h1, filt_h2); \
+ \
+ _6tap_out_m = __msa_srari_h(_6tap_out_m, VP8_FILTER_SHIFT); \
+ _6tap_out_m = __msa_sat_s_h(_6tap_out_m, 7); \
+ \
+ _6tap_out_m; \
+ })
+
+#define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
+ mask2, filt0, filt1, filt2, out0, out1) \
+ { \
+ v16i8 _6tap_4wid_vec0_m, _6tap_4wid_vec1_m, _6tap_4wid_vec2_m, \
+ _6tap_4wid_vec3_m, _6tap_4wid_vec4_m, _6tap_4wid_vec5_m; \
+ \
+ VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, _6tap_4wid_vec0_m, \
+ _6tap_4wid_vec1_m); \
+ DOTP_SB2_SH(_6tap_4wid_vec0_m, _6tap_4wid_vec1_m, filt0, filt0, out0, \
+ out1); \
+ VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, _6tap_4wid_vec2_m, \
+ _6tap_4wid_vec3_m); \
+ DPADD_SB2_SH(_6tap_4wid_vec2_m, _6tap_4wid_vec3_m, filt1, filt1, out0, \
+ out1); \
+ VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, _6tap_4wid_vec4_m, \
+ _6tap_4wid_vec5_m); \
+ DPADD_SB2_SH(_6tap_4wid_vec4_m, _6tap_4wid_vec5_m, filt2, filt2, out0, \
+ out1); \
+ }
+
+#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
+ mask2, filt0, filt1, filt2, out0, out1, \
+ out2, out3) \
+ { \
+ v16i8 _6tap_8wid_vec0_m, _6tap_8wid_vec1_m, _6tap_8wid_vec2_m, \
+ _6tap_8wid_vec3_m, _6tap_8wid_vec4_m, _6tap_8wid_vec5_m, \
+ _6tap_8wid_vec6_m, _6tap_8wid_vec7_m; \
+ \
+ VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, _6tap_8wid_vec0_m, \
+ _6tap_8wid_vec1_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, _6tap_8wid_vec2_m, \
+ _6tap_8wid_vec3_m); \
+ DOTP_SB4_SH(_6tap_8wid_vec0_m, _6tap_8wid_vec1_m, _6tap_8wid_vec2_m, \
+ _6tap_8wid_vec3_m, filt0, filt0, filt0, filt0, out0, out1, \
+ out2, out3); \
+ VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, _6tap_8wid_vec0_m, \
+ _6tap_8wid_vec1_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, _6tap_8wid_vec2_m, \
+ _6tap_8wid_vec3_m); \
+ VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, _6tap_8wid_vec4_m, \
+ _6tap_8wid_vec5_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, _6tap_8wid_vec6_m, \
+ _6tap_8wid_vec7_m); \
+ DPADD_SB4_SH(_6tap_8wid_vec0_m, _6tap_8wid_vec1_m, _6tap_8wid_vec2_m, \
+ _6tap_8wid_vec3_m, filt1, filt1, filt1, filt1, out0, out1, \
+ out2, out3); \
+ DPADD_SB4_SH(_6tap_8wid_vec4_m, _6tap_8wid_vec5_m, _6tap_8wid_vec6_m, \
+ _6tap_8wid_vec7_m, filt2, filt2, filt2, filt2, out0, out1, \
+ out2, out3); \
+ }
+
+#define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \
+ ({ \
+ v8i16 _4tap_dpadd_tmp0; \
+ \
+ _4tap_dpadd_tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \
+ _4tap_dpadd_tmp0 = \
+ __msa_dpadd_s_h(_4tap_dpadd_tmp0, (v16i8)vec1, (v16i8)filt1); \
+ \
+ _4tap_dpadd_tmp0; \
+ })
+
+#define HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_h0, filt_h1) \
+ ({ \
+ v16i8 _4tap_vec0_m, _4tap_vec1_m; \
+ v8i16 _4tap_out_m; \
+ \
+ VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, _4tap_vec0_m, \
+ _4tap_vec1_m); \
+ _4tap_out_m = \
+ FILT_4TAP_DPADD_S_H(_4tap_vec0_m, _4tap_vec1_m, filt_h0, filt_h1); \
+ \
+ _4tap_out_m = __msa_srari_h(_4tap_out_m, VP8_FILTER_SHIFT); \
+ _4tap_out_m = __msa_sat_s_h(_4tap_out_m, 7); \
+ \
+ _4tap_out_m; \
+ })
+
+#define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
+ filt0, filt1, out0, out1) \
+ { \
+ v16i8 _4tap_4wid_vec0_m, _4tap_4wid_vec1_m, _4tap_4wid_vec2_m, \
+ _4tap_4wid_vec3_m; \
+ \
+ VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, _4tap_4wid_vec0_m, \
+ _4tap_4wid_vec1_m); \
+ DOTP_SB2_SH(_4tap_4wid_vec0_m, _4tap_4wid_vec1_m, filt0, filt0, out0, \
+ out1); \
+ VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, _4tap_4wid_vec2_m, \
+ _4tap_4wid_vec3_m); \
+ DPADD_SB2_SH(_4tap_4wid_vec2_m, _4tap_4wid_vec3_m, filt1, filt1, out0, \
+ out1); \
+ }
+
+#define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
+ filt0, filt1, out0, out1, out2, out3) \
+ { \
+ v16i8 _4tap_8wid_vec0_m, _4tap_8wid_vec1_m, _4tap_8wid_vec2_m, \
+ _4tap_8wid_vec3_m; \
+ \
+ VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, _4tap_8wid_vec0_m, \
+ _4tap_8wid_vec1_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, _4tap_8wid_vec2_m, \
+ _4tap_8wid_vec3_m); \
+ DOTP_SB4_SH(_4tap_8wid_vec0_m, _4tap_8wid_vec1_m, _4tap_8wid_vec2_m, \
+ _4tap_8wid_vec3_m, filt0, filt0, filt0, filt0, out0, out1, \
+ out2, out3); \
+ VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, _4tap_8wid_vec0_m, \
+ _4tap_8wid_vec1_m); \
+ VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, _4tap_8wid_vec2_m, \
+ _4tap_8wid_vec3_m); \
+ DPADD_SB4_SH(_4tap_8wid_vec0_m, _4tap_8wid_vec1_m, _4tap_8wid_vec2_m, \
+ _4tap_8wid_vec3_m, filt1, filt1, filt1, filt1, out0, out1, \
+ out2, out3); \
+ }
+
+static void common_hz_6t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2;
+ v16u8 mask0, mask1, mask2, out;
+ v8i16 filt, out0, out1;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]);
+ src -= 2;
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0,
+ filt1, filt2, out0, out1);
+ SRARI_H2_SH(out0, out1, VP8_FILTER_SHIFT);
+ SAT_SH2_SH(out0, out1, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_hz_6t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2;
+ v16u8 mask0, mask1, mask2, out;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]);
+ src -= 2;
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ src += (4 * src_stride);
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0,
+ filt1, filt2, out0, out1);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0,
+ filt1, filt2, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_hz_6t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ if (4 == height) {
+ common_hz_6t_4x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (8 == height) {
+ common_hz_6t_4x8_msa(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_6t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2;
+ v16u8 mask0, mask1, mask2, tmp0, tmp1;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]);
+ src -= 2;
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ src += (4 * src_stride);
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0,
+ filt1, filt2, out0, out1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ tmp0 = PCKEV_XORI128_UB(out0, out1);
+ tmp1 = PCKEV_XORI128_UB(out2, out3);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ for (loop_cnt = (height >> 2) - 1; loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ src += (4 * src_stride);
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ tmp0 = PCKEV_XORI128_UB(out0, out1);
+ tmp1 = PCKEV_XORI128_UB(out2, out3);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+static void common_hz_6t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1, filt2;
+ v16u8 mask0, mask1, mask2, out;
+ v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]);
+ src -= 2;
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (4 * src_stride);
+
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2,
+ filt0, filt1, filt2, out4, out5, out6, out7);
+ SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SRARI_H4_SH(out4, out5, out6, out7, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out4, out5, out6, out7, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out4, out5);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out6, out7);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ }
+}
+
+static void common_vt_6t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
+ v16i8 src87_r, src2110, src4332, src6554, src8776, filt0, filt1, filt2;
+ v16u8 out;
+ v8i16 filt, out10, out32;
+
+ src -= (2 * src_stride);
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+ src32_r, src43_r);
+ ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
+ XORI_B2_128_SB(src2110, src4332);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
+ src76_r, src87_r);
+ ILVR_D2_SB(src65_r, src54_r, src87_r, src76_r, src6554, src8776);
+ XORI_B2_128_SB(src6554, src8776);
+ out10 = DPADD_SH3_SH(src2110, src4332, src6554, filt0, filt1, filt2);
+ out32 = DPADD_SH3_SH(src4332, src6554, src8776, filt0, filt1, filt2);
+ SRARI_H2_SH(out10, out32, VP8_FILTER_SHIFT);
+ SAT_SH2_SH(out10, out32, 7);
+ out = PCKEV_XORI128_UB(out10, out32);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src2110 = src6554;
+ src4332 = src8776;
+ src4 = src8;
+ }
+}
+
+static void common_vt_6t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10;
+ v16i8 src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r;
+ v16i8 src109_r, filt0, filt1, filt2;
+ v16u8 tmp0, tmp1;
+ v8i16 filt, out0_r, out1_r, out2_r, out3_r;
+
+ src -= (2 * src_stride);
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ ILVR_B4_SB(src1, src0, src3, src2, src2, src1, src4, src3, src10_r, src32_r,
+ src21_r, src43_r);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
+ XORI_B4_128_SB(src7, src8, src9, src10);
+ src += (4 * src_stride);
+
+ ILVR_B4_SB(src7, src4, src8, src7, src9, src8, src10, src9, src76_r,
+ src87_r, src98_r, src109_r);
+ out0_r = DPADD_SH3_SH(src10_r, src32_r, src76_r, filt0, filt1, filt2);
+ out1_r = DPADD_SH3_SH(src21_r, src43_r, src87_r, filt0, filt1, filt2);
+ out2_r = DPADD_SH3_SH(src32_r, src76_r, src98_r, filt0, filt1, filt2);
+ out3_r = DPADD_SH3_SH(src43_r, src87_r, src109_r, filt0, filt1, filt2);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ tmp0 = PCKEV_XORI128_UB(out0_r, out1_r);
+ tmp1 = PCKEV_XORI128_UB(out2_r, out3_r);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src10_r = src76_r;
+ src32_r = src98_r;
+ src21_r = src87_r;
+ src43_r = src109_r;
+ src4 = src10;
+ }
+}
+
+static void common_vt_6t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
+ v16i8 src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l;
+ v16i8 src65_l, src87_l, filt0, filt1, filt2;
+ v16u8 tmp0, tmp1, tmp2, tmp3;
+ v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt;
+
+ src -= (2 * src_stride);
+
+ filt = LD_SH(filter);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ ILVR_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_r, src32_r,
+ src43_r, src21_r);
+ ILVL_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_l, src32_l,
+ src43_l, src21_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src5, src6, src7, src8);
+ ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
+ src76_r, src87_r);
+ ILVL_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_l, src65_l,
+ src76_l, src87_l);
+ out0_r = DPADD_SH3_SH(src10_r, src32_r, src54_r, filt0, filt1, filt2);
+ out1_r = DPADD_SH3_SH(src21_r, src43_r, src65_r, filt0, filt1, filt2);
+ out2_r = DPADD_SH3_SH(src32_r, src54_r, src76_r, filt0, filt1, filt2);
+ out3_r = DPADD_SH3_SH(src43_r, src65_r, src87_r, filt0, filt1, filt2);
+ out0_l = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2);
+ out1_l = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2);
+ out2_l = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2);
+ out3_l = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT);
+ SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
+ PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, out3_r,
+ tmp0, tmp1, tmp2, tmp3);
+ XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
+ ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src10_r = src54_r;
+ src32_r = src76_r;
+ src21_r = src65_r;
+ src43_r = src87_r;
+ src10_l = src54_l;
+ src32_l = src76_l;
+ src21_l = src65_l;
+ src43_l = src87_l;
+ src4 = src8;
+ }
+}
+
+static void common_hv_6ht_6vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 filt_hz0, filt_hz1, filt_hz2;
+ v16u8 mask0, mask1, mask2, out;
+ v8i16 tmp0, tmp1;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8i16 hz_out7, filt, filt_vt0, filt_vt1, filt_vt2, out0, out1, out2, out3;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]);
+ src -= (2 + 2 * src_stride);
+
+ filt = LD_SH(filter_horiz);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2);
+ filt = LD_SH(filter_vert);
+ SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src3, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out1 = (v8i16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8);
+ hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB2(src, src_stride, src5, src6);
+ src += (2 * src_stride);
+
+ XORI_B2_128_SB(src5, src6);
+ hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8);
+
+ LD_SB2(src, src_stride, src7, src8);
+ src += (2 * src_stride);
+
+ XORI_B2_128_SB(src7, src8);
+ hz_out7 = HORIZ_6TAP_FILT(src7, src8, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8);
+
+ out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
+ tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2);
+
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ out = PCKEV_XORI128_UB(tmp0, tmp1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out3 = hz_out7;
+ out0 = out2;
+ out1 = out3;
+ }
+}
+
+static void common_hv_6ht_6vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 filt_hz0, filt_hz1, filt_hz2;
+ v16u8 mask0, mask1, mask2, vec0, vec1;
+ v8i16 filt, filt_vt0, filt_vt1, filt_vt2;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7;
+ v8i16 tmp0, tmp1, tmp2, tmp3;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]);
+ src -= (2 + 2 * src_stride);
+
+ filt = LD_SH(filter_horiz);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out4 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2);
+
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
+ ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src5, src6, src7, src8);
+ hz_out5 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out6 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out5 = (v8i16)__msa_ilvev_b((v16i8)hz_out6, (v16i8)hz_out5);
+ tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = HORIZ_6TAP_FILT(src7, src7, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
+ tmp2 = DPADD_SH3_SH(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out8 = HORIZ_6TAP_FILT(src8, src8, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out6 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7);
+ tmp3 = DPADD_SH3_SH(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ vec0 = PCKEV_XORI128_UB(tmp0, tmp1);
+ vec1 = PCKEV_XORI128_UB(tmp2, tmp3);
+ ST8x4_UB(vec0, vec1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out4 = hz_out8;
+ out0 = out2;
+ out1 = out7;
+ out3 = out5;
+ out4 = out6;
+ }
+}
+
+static void common_hv_6ht_6vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ int32_t multiple8_cnt;
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ common_hv_6ht_6vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hz_4t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ v8i16 filt, out0, out1;
+ v16u8 out;
+
+ mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]);
+ src -= 1;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1,
+ out0, out1);
+ SRARI_H2_SH(out0, out1, VP8_FILTER_SHIFT);
+ SAT_SH2_SH(out0, out1, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_hz_4t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter) {
+ v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ v16u8 out;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]);
+ src -= 1;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1,
+ out0, out1);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1,
+ out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+}
+
+static void common_hz_4t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ if (4 == height) {
+ common_hz_4t_4x4_msa(src, src_stride, dst, dst_stride, filter);
+ } else if (8 == height) {
+ common_hz_4t_4x8_msa(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_4t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1;
+ v16u8 tmp0, tmp1;
+ v8i16 filt, out0, out1, out2, out3;
+
+ mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]);
+ src -= 1;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0,
+ filt1, out0, out1, out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ tmp0 = PCKEV_XORI128_UB(out0, out1);
+ tmp1 = PCKEV_XORI128_UB(out2, out3);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+static void common_hz_4t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v16i8 filt0, filt1, mask0, mask1;
+ v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7;
+ v16u8 out;
+
+ mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]);
+ src -= 1;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ mask1 = mask0 + 2;
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
+ HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0,
+ filt1, out0, out1, out2, out3);
+ HORIZ_4TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, filt0,
+ filt1, out4, out5, out6, out7);
+ SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT);
+ SRARI_H4_SH(out4, out5, out6, out7, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ SAT_SH4_SH(out4, out5, out6, out7, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out4, out5);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ out = PCKEV_XORI128_UB(out6, out7);
+ ST_UB(out, dst);
+ dst += dst_stride;
+ }
+}
+
+static void common_vt_4t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5;
+ v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r;
+ v16i8 src2110, src4332, filt0, filt1;
+ v8i16 filt, out10, out32;
+ v16u8 out;
+
+ src -= src_stride;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
+
+ src2110 = (v16i8)__msa_ilvr_d((v2i64)src21_r, (v2i64)src10_r);
+ src2110 = (v16i8)__msa_xori_b((v16u8)src2110, 128);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB3(src, src_stride, src3, src4, src5);
+ src += (3 * src_stride);
+ ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
+ src4332 = (v16i8)__msa_ilvr_d((v2i64)src43_r, (v2i64)src32_r);
+ src4332 = (v16i8)__msa_xori_b((v16u8)src4332, 128);
+ out10 = FILT_4TAP_DPADD_S_H(src2110, src4332, filt0, filt1);
+
+ src2 = LD_SB(src);
+ src += (src_stride);
+ ILVR_B2_SB(src5, src4, src2, src5, src54_r, src65_r);
+ src2110 = (v16i8)__msa_ilvr_d((v2i64)src65_r, (v2i64)src54_r);
+ src2110 = (v16i8)__msa_xori_b((v16u8)src2110, 128);
+ out32 = FILT_4TAP_DPADD_S_H(src4332, src2110, filt0, filt1);
+ SRARI_H2_SH(out10, out32, VP8_FILTER_SHIFT);
+ SAT_SH2_SH(out10, out32, 7);
+ out = PCKEV_XORI128_UB(out10, out32);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+static void common_vt_4t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src7, src8, src9, src10;
+ v16i8 src10_r, src72_r, src98_r, src21_r, src87_r, src109_r, filt0, filt1;
+ v16u8 tmp0, tmp1;
+ v8i16 filt, out0_r, out1_r, out2_r, out3_r;
+
+ src -= src_stride;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src7, src8, src9, src10);
+ ILVR_B4_SB(src7, src2, src8, src7, src9, src8, src10, src9, src72_r,
+ src87_r, src98_r, src109_r);
+ out0_r = FILT_4TAP_DPADD_S_H(src10_r, src72_r, filt0, filt1);
+ out1_r = FILT_4TAP_DPADD_S_H(src21_r, src87_r, filt0, filt1);
+ out2_r = FILT_4TAP_DPADD_S_H(src72_r, src98_r, filt0, filt1);
+ out3_r = FILT_4TAP_DPADD_S_H(src87_r, src109_r, filt0, filt1);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ tmp0 = PCKEV_XORI128_UB(out0_r, out1_r);
+ tmp1 = PCKEV_XORI128_UB(out2_r, out3_r);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src10_r = src98_r;
+ src21_r = src109_r;
+ src2 = src10;
+ }
+}
+
+static void common_vt_4t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6;
+ v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r, src10_l;
+ v16i8 src32_l, src54_l, src21_l, src43_l, src65_l, filt0, filt1;
+ v16u8 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
+
+ src -= src_stride;
+
+ filt = LD_SH(filter);
+ SPLATI_H2_SB(filt, 0, 1, filt0, filt1);
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
+ ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src3, src4, src5, src6);
+ ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, src32_r, src43_r,
+ src54_r, src65_r);
+ ILVL_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, src32_l, src43_l,
+ src54_l, src65_l);
+ out0_r = FILT_4TAP_DPADD_S_H(src10_r, src32_r, filt0, filt1);
+ out1_r = FILT_4TAP_DPADD_S_H(src21_r, src43_r, filt0, filt1);
+ out2_r = FILT_4TAP_DPADD_S_H(src32_r, src54_r, filt0, filt1);
+ out3_r = FILT_4TAP_DPADD_S_H(src43_r, src65_r, filt0, filt1);
+ out0_l = FILT_4TAP_DPADD_S_H(src10_l, src32_l, filt0, filt1);
+ out1_l = FILT_4TAP_DPADD_S_H(src21_l, src43_l, filt0, filt1);
+ out2_l = FILT_4TAP_DPADD_S_H(src32_l, src54_l, filt0, filt1);
+ out3_l = FILT_4TAP_DPADD_S_H(src43_l, src65_l, filt0, filt1);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT);
+ SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, VP8_FILTER_SHIFT);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
+ PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, out3_r,
+ tmp0, tmp1, tmp2, tmp3);
+ XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
+ ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ src10_r = src54_r;
+ src21_r = src65_r;
+ src10_l = src54_l;
+ src21_l = src65_l;
+ src2 = src6;
+ }
+}
+
+static void common_hv_4ht_4vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1;
+ v16u8 mask0, mask1, out;
+ v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]);
+ src -= (1 + 1 * src_stride);
+
+ filt = LD_SH(filter_horiz);
+ SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = HORIZ_4TAP_FILT(src1, src2, mask0, mask1, filt_hz0, filt_hz1);
+ vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B2_128_SB(src3, src4);
+ hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = (v8i16)__msa_sldi_b((v16i8)hz_out3, (v16i8)hz_out1, 8);
+ vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ XORI_B2_128_SB(src5, src6);
+ hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8);
+ vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1);
+
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ out = PCKEV_XORI128_UB(tmp0, tmp1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out1 = hz_out5;
+ vec0 = vec2;
+ }
+}
+
+static void common_hv_4ht_4vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1;
+ v16u8 mask0, mask1, out0, out1;
+ v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, tmp2, tmp3;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3;
+ v8i16 vec0, vec1, vec2, vec3, vec4;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]);
+ src -= (1 + 1 * src_stride);
+
+ filt = LD_SH(filter_horiz);
+ SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src3, src4, src5, src6);
+ hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1);
+ vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out0 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1);
+ vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out3);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1);
+
+ hz_out1 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1);
+ vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+ tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec4, filt_vt0, filt_vt1);
+
+ hz_out2 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1);
+ ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec0, vec1);
+ tmp3 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ out0 = PCKEV_XORI128_UB(tmp0, tmp1);
+ out1 = PCKEV_XORI128_UB(tmp2, tmp3);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ vec0 = vec4;
+ vec2 = vec1;
+ }
+}
+
+static void common_hv_4ht_4vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ int32_t multiple8_cnt;
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ common_hv_4ht_4vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hv_6ht_4vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6;
+ v16i8 filt_hz0, filt_hz1, filt_hz2;
+ v16u8 res0, res1, mask0, mask1, mask2;
+ v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5;
+
+ mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]);
+ src -= (2 + 1 * src_stride);
+
+ filt = LD_SH(filter_horiz);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src2, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src3, src4, src5, src6);
+ hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = (v8i16)__msa_sldi_b((v16i8)hz_out3, (v16i8)hz_out1, 8);
+ vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8);
+ vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1);
+
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1);
+ XORI_B2_128_UB(res0, res1);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out1 = hz_out5;
+ vec0 = vec2;
+ }
+}
+
+static void common_hv_6ht_4vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6;
+ v16i8 filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2;
+ v8i16 filt, filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3;
+ v8i16 tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3;
+ v16u8 out0, out1;
+
+ mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]);
+ src -= (2 + src_stride);
+
+ filt = LD_SH(filter_horiz);
+ SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, filt_hz1,
+ filt_hz2);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src3, src4, src5, src6);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src3, src4, src5, src6);
+
+ hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out0 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out3);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1);
+
+ hz_out1 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
+ tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec0, filt_vt0, filt_vt1);
+
+ hz_out2 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec1, vec2);
+ tmp3 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1);
+
+ SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ out0 = PCKEV_XORI128_UB(tmp0, tmp1);
+ out1 = PCKEV_XORI128_UB(tmp2, tmp3);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+static void common_hv_6ht_4vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ int32_t multiple8_cnt;
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ common_hv_6ht_4vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hv_4ht_6vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 filt_hz0, filt_hz1, mask0, mask1;
+ v16u8 out;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8i16 hz_out7, tmp0, tmp1, out0, out1, out2, out3;
+ v8i16 filt, filt_vt0, filt_vt1, filt_vt2;
+
+ mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]);
+
+ src -= (1 + 2 * src_stride);
+
+ filt = LD_SH(filter_horiz);
+ SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = HORIZ_4TAP_FILT(src2, src3, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = (v8i16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ XORI_B4_128_SB(src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8);
+ out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = HORIZ_4TAP_FILT(src7, src8, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8);
+ out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
+ tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2);
+
+ SRARI_H2_SH(tmp0, tmp1, 7);
+ SAT_SH2_SH(tmp0, tmp1, 7);
+ out = PCKEV_XORI128_UB(tmp0, tmp1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out3 = hz_out7;
+ out0 = out2;
+ out1 = out3;
+ }
+}
+
+static void common_hv_4ht_6vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16i8 filt_hz0, filt_hz1, mask0, mask1;
+ v8i16 filt, filt_vt0, filt_vt1, filt_vt2, tmp0, tmp1, tmp2, tmp3;
+ v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7;
+ v16u8 vec0, vec1;
+
+ mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]);
+ src -= (1 + 2 * src_stride);
+
+ filt = LD_SH(filter_horiz);
+ SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1);
+
+ mask1 = mask0 + 2;
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1);
+ hz_out4 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1);
+ ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1);
+ ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4);
+
+ filt = LD_SH(filter_vert);
+ SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ src += (4 * src_stride);
+
+ XORI_B4_128_SB(src5, src6, src7, src8);
+
+ hz_out5 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1);
+ out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out6 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1);
+ out5 = (v8i16)__msa_ilvev_b((v16i8)hz_out6, (v16i8)hz_out5);
+ tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = HORIZ_4TAP_FILT(src7, src7, mask0, mask1, filt_hz0, filt_hz1);
+ out6 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
+ tmp2 = DPADD_SH3_SH(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out8 = HORIZ_4TAP_FILT(src8, src8, mask0, mask1, filt_hz0, filt_hz1);
+ out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7);
+ tmp3 = DPADD_SH3_SH(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7);
+ vec0 = PCKEV_XORI128_UB(tmp0, tmp1);
+ vec1 = PCKEV_XORI128_UB(tmp2, tmp3);
+ ST8x4_UB(vec0, vec1, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ hz_out4 = hz_out8;
+ out0 = out2;
+ out1 = out6;
+ out3 = out5;
+ out4 = out7;
+ }
+}
+
+static void common_hv_4ht_6vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ uint8_t *RESTRICT dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height) {
+ int32_t multiple8_cnt;
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ common_hv_4ht_6vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ src += 8;
+ dst += 8;
+ }
+}
+
+void vp8_sixtap_predict4x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1];
+ const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1];
+
+ if (yoffset) {
+ if (xoffset) {
+ switch (xoffset) {
+ case 2:
+ case 4:
+ case 6:
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_hv_6ht_6vt_4w_msa(src, src_stride, dst, dst_stride,
+ h_filter, v_filter, 4);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hv_6ht_4vt_4w_msa(src, src_stride, dst, dst_stride,
+ h_filter, v_filter + 1, 4);
+ break;
+ }
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_hv_4ht_6vt_4w_msa(src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter, 4);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hv_4ht_4vt_4w_msa(src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter + 1, 4);
+ break;
+ }
+ break;
+ }
+ } else {
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_vt_6t_4w_msa(src, src_stride, dst, dst_stride, v_filter, 4);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_vt_4t_4w_msa(src, src_stride, dst, dst_stride, v_filter + 1,
+ 4);
+ break;
+ }
+ }
+ } else {
+ switch (xoffset) {
+ case 0: {
+ uint32_t tp0, tp1, tp2, tp3;
+
+ LW4(src, src_stride, tp0, tp1, tp2, tp3);
+ SW4(tp0, tp1, tp2, tp3, dst, dst_stride);
+ break;
+ }
+ case 2:
+ case 4:
+ case 6:
+ common_hz_6t_4w_msa(src, src_stride, dst, dst_stride, h_filter, 4);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hz_4t_4w_msa(src, src_stride, dst, dst_stride, h_filter + 1, 4);
+ break;
+ }
+ }
+}
+
+void vp8_sixtap_predict8x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1];
+ const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1];
+
+ if (yoffset) {
+ if (xoffset) {
+ switch (xoffset) {
+ case 2:
+ case 4:
+ case 6:
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_hv_6ht_6vt_8w_msa(src, src_stride, dst, dst_stride,
+ h_filter, v_filter, 4);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hv_6ht_4vt_8w_msa(src, src_stride, dst, dst_stride,
+ h_filter, v_filter + 1, 4);
+ break;
+ }
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_hv_4ht_6vt_8w_msa(src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter, 4);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hv_4ht_4vt_8w_msa(src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter + 1, 4);
+ break;
+ }
+ break;
+ }
+ } else {
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_vt_6t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 4);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_vt_4t_8w_msa(src, src_stride, dst, dst_stride, v_filter + 1,
+ 4);
+ break;
+ }
+ }
+ } else {
+ switch (xoffset) {
+ case 0: vp8_copy_mem8x4(src, src_stride, dst, dst_stride); break;
+ case 2:
+ case 4:
+ case 6:
+ common_hz_6t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 4);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hz_4t_8w_msa(src, src_stride, dst, dst_stride, h_filter + 1, 4);
+ break;
+ }
+ }
+}
+
+void vp8_sixtap_predict8x8_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1];
+ const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1];
+
+ if (yoffset) {
+ if (xoffset) {
+ switch (xoffset) {
+ case 2:
+ case 4:
+ case 6:
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_hv_6ht_6vt_8w_msa(src, src_stride, dst, dst_stride,
+ h_filter, v_filter, 8);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hv_6ht_4vt_8w_msa(src, src_stride, dst, dst_stride,
+ h_filter, v_filter + 1, 8);
+ break;
+ }
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_hv_4ht_6vt_8w_msa(src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter, 8);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hv_4ht_4vt_8w_msa(src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter + 1, 8);
+ break;
+ }
+ break;
+ }
+ } else {
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_vt_6t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 8);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_vt_4t_8w_msa(src, src_stride, dst, dst_stride, v_filter + 1,
+ 8);
+ break;
+ }
+ }
+ } else {
+ switch (xoffset) {
+ case 0: vp8_copy_mem8x8(src, src_stride, dst, dst_stride); break;
+ case 2:
+ case 4:
+ case 6:
+ common_hz_6t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 8);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hz_4t_8w_msa(src, src_stride, dst, dst_stride, h_filter + 1, 8);
+ break;
+ }
+ }
+}
+
+void vp8_sixtap_predict16x16_msa(uint8_t *RESTRICT src, int32_t src_stride,
+ int32_t xoffset, int32_t yoffset,
+ uint8_t *RESTRICT dst, int32_t dst_stride) {
+ const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1];
+ const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1];
+
+ if (yoffset) {
+ if (xoffset) {
+ switch (xoffset) {
+ case 2:
+ case 4:
+ case 6:
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_hv_6ht_6vt_16w_msa(src, src_stride, dst, dst_stride,
+ h_filter, v_filter, 16);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hv_6ht_4vt_16w_msa(src, src_stride, dst, dst_stride,
+ h_filter, v_filter + 1, 16);
+ break;
+ }
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_hv_4ht_6vt_16w_msa(src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter, 16);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hv_4ht_4vt_16w_msa(src, src_stride, dst, dst_stride,
+ h_filter + 1, v_filter + 1, 16);
+ break;
+ }
+ break;
+ }
+ } else {
+ switch (yoffset) {
+ case 2:
+ case 4:
+ case 6:
+ common_vt_6t_16w_msa(src, src_stride, dst, dst_stride, v_filter, 16);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_vt_4t_16w_msa(src, src_stride, dst, dst_stride, v_filter + 1,
+ 16);
+ break;
+ }
+ }
+ } else {
+ switch (xoffset) {
+ case 0: vp8_copy_mem16x16(src, src_stride, dst, dst_stride); break;
+ case 2:
+ case 4:
+ case 6:
+ common_hz_6t_16w_msa(src, src_stride, dst, dst_stride, h_filter, 16);
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ common_hz_4t_16w_msa(src, src_stride, dst, dst_stride, h_filter + 1,
+ 16);
+ break;
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/mips/msa/vp8_macros_msa.h b/media/libvpx/libvpx/vp8/common/mips/msa/vp8_macros_msa.h
new file mode 100644
index 0000000000..7cb3c98690
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mips/msa/vp8_macros_msa.h
@@ -0,0 +1,1762 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_
+#define VPX_VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_
+
+#include <msa.h>
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+
+#define LD_B(RTYPE, psrc) *((const RTYPE *)(psrc))
+#define LD_UB(...) LD_B(v16u8, __VA_ARGS__)
+#define LD_SB(...) LD_B(v16i8, __VA_ARGS__)
+
+#define LD_H(RTYPE, psrc) *((const RTYPE *)(psrc))
+#define LD_UH(...) LD_H(v8u16, __VA_ARGS__)
+#define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
+
+#define LD_W(RTYPE, psrc) *((const RTYPE *)(psrc))
+#define LD_UW(...) LD_W(v4u32, __VA_ARGS__)
+#define LD_SW(...) LD_W(v4i32, __VA_ARGS__)
+
+#define ST_B(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
+#define ST_UB(...) ST_B(v16u8, __VA_ARGS__)
+#define ST_SB(...) ST_B(v16i8, __VA_ARGS__)
+
+#define ST_H(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
+#define ST_UH(...) ST_H(v8u16, __VA_ARGS__)
+#define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
+
+#define ST_W(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
+#define ST_SW(...) ST_W(v4i32, __VA_ARGS__)
+
+#if (__mips_isa_rev >= 6)
+#define LW(psrc) \
+ ({ \
+ const uint8_t *psrc_m = (const uint8_t *)(psrc); \
+ uint32_t val_m; \
+ \
+ asm volatile("lw %[val_m], %[psrc_m] \n\t" \
+ \
+ : [val_m] "=r"(val_m) \
+ : [psrc_m] "m"(*psrc_m)); \
+ \
+ val_m; \
+ })
+
+#if (__mips == 64)
+#define LD(psrc) \
+ ({ \
+ const uint8_t *psrc_m = (const uint8_t *)(psrc); \
+ uint64_t val_m = 0; \
+ \
+ asm volatile("ld %[val_m], %[psrc_m] \n\t" \
+ \
+ : [val_m] "=r"(val_m) \
+ : [psrc_m] "m"(*psrc_m)); \
+ \
+ val_m; \
+ })
+#else // !(__mips == 64)
+#define LD(psrc) \
+ ({ \
+ const uint8_t *psrc_ld = (const uint8_t *)(psrc); \
+ uint32_t val0_m, val1_m; \
+ uint64_t val_m = 0; \
+ \
+ val0_m = LW(psrc_ld); \
+ val1_m = LW(psrc_ld + 4); \
+ \
+ val_m = (uint64_t)(val1_m); \
+ val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \
+ val_m = (uint64_t)(val_m | (uint64_t)val0_m); \
+ \
+ val_m; \
+ })
+#endif // (__mips == 64)
+
+#define SH(val, pdst) \
+ { \
+ uint8_t *pdst_m = (uint8_t *)(pdst); \
+ const uint16_t val_m = (val); \
+ \
+ asm volatile("sh %[val_m], %[pdst_m] \n\t" \
+ \
+ : [pdst_m] "=m"(*pdst_m) \
+ : [val_m] "r"(val_m)); \
+ }
+
+#define SW(val, pdst) \
+ { \
+ uint8_t *pdst_m = (uint8_t *)(pdst); \
+ const uint32_t val_m = (val); \
+ \
+ asm volatile("sw %[val_m], %[pdst_m] \n\t" \
+ \
+ : [pdst_m] "=m"(*pdst_m) \
+ : [val_m] "r"(val_m)); \
+ }
+
+#define SD(val, pdst) \
+ { \
+ uint8_t *pdst_m = (uint8_t *)(pdst); \
+ const uint64_t val_m = (val); \
+ \
+ asm volatile("sd %[val_m], %[pdst_m] \n\t" \
+ \
+ : [pdst_m] "=m"(*pdst_m) \
+ : [val_m] "r"(val_m)); \
+ }
+#else // !(__mips_isa_rev >= 6)
+#define LW(psrc) \
+ ({ \
+ const uint8_t *psrc_m = (const uint8_t *)(psrc); \
+ uint32_t val_m; \
+ \
+ asm volatile( \
+ "lwr %[val_m], 0(%[psrc_m]) \n\t" \
+ "lwl %[val_m], 3(%[psrc_m]) \n\t" \
+ : [val_m] "=&r"(val_m) \
+ : [psrc_m] "r"(psrc_m)); \
+ \
+ val_m; \
+ })
+
+#if (__mips == 64)
+#define LD(psrc) \
+ ({ \
+ const uint8_t *psrc_m = (const uint8_t *)(psrc); \
+ uint64_t val_m = 0; \
+ \
+ asm volatile( \
+ "ldr %[val_m], 0(%[psrc_m]) \n\t" \
+ "ldl %[val_m], 7(%[psrc_m]) \n\t" \
+ : [val_m] "=&r"(val_m) \
+ : [psrc_m] "r"(psrc_m)); \
+ \
+ val_m; \
+ })
+#else // !(__mips == 64)
+#define LD(psrc) \
+ ({ \
+ const uint8_t *psrc_m1 = (const uint8_t *)(psrc); \
+ uint32_t val0_m, val1_m; \
+ uint64_t val_m = 0; \
+ \
+ val0_m = LW(psrc_m1); \
+ val1_m = LW(psrc_m1 + 4); \
+ \
+ val_m = (uint64_t)(val1_m); \
+ val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \
+ val_m = (uint64_t)(val_m | (uint64_t)val0_m); \
+ \
+ val_m; \
+ })
+#endif // (__mips == 64)
+#define SH(val, pdst) \
+ { \
+ uint8_t *pdst_m = (uint8_t *)(pdst); \
+ const uint16_t val_m = (val); \
+ \
+ asm volatile("ush %[val_m], %[pdst_m] \n\t" \
+ \
+ : [pdst_m] "=m"(*pdst_m) \
+ : [val_m] "r"(val_m)); \
+ }
+
+#define SW(val, pdst) \
+ { \
+ uint8_t *pdst_m = (uint8_t *)(pdst); \
+ const uint32_t val_m = (val); \
+ \
+ asm volatile("usw %[val_m], %[pdst_m] \n\t" \
+ \
+ : [pdst_m] "=m"(*pdst_m) \
+ : [val_m] "r"(val_m)); \
+ }
+
+#define SD(val, pdst) \
+ { \
+ uint8_t *pdst_m1 = (uint8_t *)(pdst); \
+ uint32_t val0_m, val1_m; \
+ \
+ val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \
+ val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \
+ \
+ SW(val0_m, pdst_m1); \
+ SW(val1_m, pdst_m1 + 4); \
+ }
+#endif // (__mips_isa_rev >= 6)
+
+/* Description : Load 4 words with stride
+ Arguments : Inputs - psrc, stride
+ Outputs - out0, out1, out2, out3
+ Details : Load word in 'out0' from (psrc)
+ Load word in 'out1' from (psrc + stride)
+ Load word in 'out2' from (psrc + 2 * stride)
+ Load word in 'out3' from (psrc + 3 * stride)
+*/
+#define LW4(psrc, stride, out0, out1, out2, out3) \
+ { \
+ out0 = LW((psrc)); \
+ out1 = LW((psrc) + stride); \
+ out2 = LW((psrc) + 2 * stride); \
+ out3 = LW((psrc) + 3 * stride); \
+ }
+
+/* Description : Load double words with stride
+ Arguments : Inputs - psrc, stride
+ Outputs - out0, out1
+ Details : Load double word in 'out0' from (psrc)
+ Load double word in 'out1' from (psrc + stride)
+*/
+#define LD2(psrc, stride, out0, out1) \
+ { \
+ out0 = LD((psrc)); \
+ out1 = LD((psrc) + stride); \
+ }
+#define LD4(psrc, stride, out0, out1, out2, out3) \
+ { \
+ LD2((psrc), stride, out0, out1); \
+ LD2((psrc) + 2 * stride, stride, out2, out3); \
+ }
+
+/* Description : Store 4 words with stride
+ Arguments : Inputs - in0, in1, in2, in3, pdst, stride
+ Details : Store word from 'in0' to (pdst)
+ Store word from 'in1' to (pdst + stride)
+ Store word from 'in2' to (pdst + 2 * stride)
+ Store word from 'in3' to (pdst + 3 * stride)
+*/
+#define SW4(in0, in1, in2, in3, pdst, stride) \
+ { \
+ SW(in0, (pdst)); \
+ SW(in1, (pdst) + stride); \
+ SW(in2, (pdst) + 2 * stride); \
+ SW(in3, (pdst) + 3 * stride); \
+ }
+
+/* Description : Store 4 double words with stride
+ Arguments : Inputs - in0, in1, in2, in3, pdst, stride
+ Details : Store double word from 'in0' to (pdst)
+ Store double word from 'in1' to (pdst + stride)
+ Store double word from 'in2' to (pdst + 2 * stride)
+ Store double word from 'in3' to (pdst + 3 * stride)
+*/
+#define SD4(in0, in1, in2, in3, pdst, stride) \
+ { \
+ SD(in0, (pdst)); \
+ SD(in1, (pdst) + stride); \
+ SD(in2, (pdst) + 2 * stride); \
+ SD(in3, (pdst) + 3 * stride); \
+ }
+
+/* Description : Load vectors with 16 byte elements with stride
+ Arguments : Inputs - psrc, stride
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Load 16 byte elements in 'out0' from (psrc)
+ Load 16 byte elements in 'out1' from (psrc + stride)
+*/
+#define LD_B2(RTYPE, psrc, stride, out0, out1) \
+ { \
+ out0 = LD_B(RTYPE, (psrc)); \
+ out1 = LD_B(RTYPE, (psrc) + stride); \
+ }
+#define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
+#define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
+
+#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) \
+ { \
+ LD_B2(RTYPE, (psrc), stride, out0, out1); \
+ out2 = LD_B(RTYPE, (psrc) + 2 * stride); \
+ }
+#define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__)
+#define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__)
+
+#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \
+ { \
+ LD_B2(RTYPE, (psrc), stride, out0, out1); \
+ LD_B2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
+ }
+#define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
+#define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
+
+#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \
+ { \
+ LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
+ out4 = LD_B(RTYPE, (psrc) + 4 * stride); \
+ }
+#define LD_UB5(...) LD_B5(v16u8, __VA_ARGS__)
+#define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__)
+
+#define LD_B8(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \
+ out7) \
+ { \
+ LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
+ LD_B4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \
+ }
+#define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__)
+#define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__)
+
+/* Description : Load vectors with 8 halfword elements with stride
+ Arguments : Inputs - psrc, stride
+ Outputs - out0, out1
+ Details : Load 8 halfword elements in 'out0' from (psrc)
+ Load 8 halfword elements in 'out1' from (psrc + stride)
+*/
+#define LD_H2(RTYPE, psrc, stride, out0, out1) \
+ { \
+ out0 = LD_H(RTYPE, (psrc)); \
+ out1 = LD_H(RTYPE, (psrc) + (stride)); \
+ }
+#define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
+
+#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) \
+ { \
+ LD_H2(RTYPE, (psrc), stride, out0, out1); \
+ LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
+ }
+#define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
+
+/* Description : Load 2 vectors of signed word elements with stride
+ Arguments : Inputs - psrc, stride
+ Outputs - out0, out1
+ Return Type - signed word
+*/
+#define LD_SW2(psrc, stride, out0, out1) \
+ { \
+ out0 = LD_SW((psrc)); \
+ out1 = LD_SW((psrc) + stride); \
+ }
+
+/* Description : Store vectors of 16 byte elements with stride
+ Arguments : Inputs - in0, in1, pdst, stride
+ Details : Store 16 byte elements from 'in0' to (pdst)
+ Store 16 byte elements from 'in1' to (pdst + stride)
+*/
+#define ST_B2(RTYPE, in0, in1, pdst, stride) \
+ { \
+ ST_B(RTYPE, in0, (pdst)); \
+ ST_B(RTYPE, in1, (pdst) + stride); \
+ }
+#define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
+
+#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \
+ { \
+ ST_B2(RTYPE, in0, in1, (pdst), stride); \
+ ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
+ }
+#define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
+#define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__)
+
+#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
+ { \
+ ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
+ ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
+ }
+#define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__)
+
+/* Description : Store vectors of 8 halfword elements with stride
+ Arguments : Inputs - in0, in1, pdst, stride
+ Details : Store 8 halfword elements from 'in0' to (pdst)
+ Store 8 halfword elements from 'in1' to (pdst + stride)
+*/
+#define ST_H2(RTYPE, in0, in1, pdst, stride) \
+ { \
+ ST_H(RTYPE, in0, (pdst)); \
+ ST_H(RTYPE, in1, (pdst) + stride); \
+ }
+#define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
+
+/* Description : Store vectors of word elements with stride
+ Arguments : Inputs - in0, in1, pdst, stride
+ Details : Store 4 word elements from 'in0' to (pdst)
+ Store 4 word elements from 'in1' to (pdst + stride)
+*/
+#define ST_SW2(in0, in1, pdst, stride) \
+ { \
+ ST_SW(in0, (pdst)); \
+ ST_SW(in1, (pdst) + stride); \
+ }
+
+/* Description : Store 2x4 byte block to destination memory from input vector
+ Arguments : Inputs - in, stidx, pdst, stride
+ Details : Index 'stidx' halfword element from 'in' vector is copied to
+ the GP register and stored to (pdst)
+ Index 'stidx+1' halfword element from 'in' vector is copied to
+ the GP register and stored to (pdst + stride)
+ Index 'stidx+2' halfword element from 'in' vector is copied to
+ the GP register and stored to (pdst + 2 * stride)
+ Index 'stidx+3' halfword element from 'in' vector is copied to
+ the GP register and stored to (pdst + 3 * stride)
+*/
+#define ST2x4_UB(in, stidx, pdst, stride) \
+ { \
+ uint16_t out0_m, out1_m, out2_m, out3_m; \
+ uint8_t *pblk_2x4_m = (uint8_t *)(pdst); \
+ \
+ out0_m = __msa_copy_u_h((v8i16)in, (stidx)); \
+ out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1)); \
+ out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2)); \
+ out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3)); \
+ \
+ SH(out0_m, pblk_2x4_m); \
+ SH(out1_m, pblk_2x4_m + stride); \
+ SH(out2_m, pblk_2x4_m + 2 * stride); \
+ SH(out3_m, pblk_2x4_m + 3 * stride); \
+ }
+
+/* Description : Store 4x4 byte block to destination memory from input vector
+ Arguments : Inputs - in0, in1, pdst, stride
+ Details : 'Idx0' word element from input vector 'in0' is copied to the
+ GP register and stored to (pdst)
+ 'Idx1' word element from input vector 'in0' is copied to the
+ GP register and stored to (pdst + stride)
+ 'Idx2' word element from input vector 'in0' is copied to the
+ GP register and stored to (pdst + 2 * stride)
+ 'Idx3' word element from input vector 'in0' is copied to the
+ GP register and stored to (pdst + 3 * stride)
+*/
+#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
+ { \
+ uint32_t out0_m, out1_m, out2_m, out3_m; \
+ uint8_t *pblk_4x4_m = (uint8_t *)(pdst); \
+ \
+ out0_m = __msa_copy_u_w((v4i32)in0, idx0); \
+ out1_m = __msa_copy_u_w((v4i32)in0, idx1); \
+ out2_m = __msa_copy_u_w((v4i32)in1, idx2); \
+ out3_m = __msa_copy_u_w((v4i32)in1, idx3); \
+ \
+ SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \
+ }
+#define ST4x8_UB(in0, in1, pdst, stride) \
+ { \
+ uint8_t *pblk_4x8 = (uint8_t *)(pdst); \
+ \
+ ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \
+ ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \
+ }
+
+/* Description : Store 8x1 byte block to destination memory from input vector
+ Arguments : Inputs - in, pdst
+ Details : Index 0 double word element from 'in' vector is copied to the
+ GP register and stored to (pdst)
+*/
+#define ST8x1_UB(in, pdst) \
+ { \
+ uint64_t out0_m; \
+ \
+ out0_m = __msa_copy_u_d((v2i64)in, 0); \
+ SD(out0_m, pdst); \
+ }
+
+/* Description : Store 8x2 byte block to destination memory from input vector
+ Arguments : Inputs - in, pdst, stride
+ Details : Index 0 double word element from 'in' vector is copied to the
+ GP register and stored to (pdst)
+ Index 1 double word element from 'in' vector is copied to the
+ GP register and stored to (pdst + stride)
+*/
+#define ST8x2_UB(in, pdst, stride) \
+ { \
+ uint64_t out0_m, out1_m; \
+ uint8_t *pblk_8x2_m = (uint8_t *)(pdst); \
+ \
+ out0_m = __msa_copy_u_d((v2i64)in, 0); \
+ out1_m = __msa_copy_u_d((v2i64)in, 1); \
+ \
+ SD(out0_m, pblk_8x2_m); \
+ SD(out1_m, pblk_8x2_m + stride); \
+ }
+
+/* Description : Store 8x4 byte block to destination memory from input
+ vectors
+ Arguments : Inputs - in0, in1, pdst, stride
+ Details : Index 0 double word element from 'in0' vector is copied to the
+ GP register and stored to (pdst)
+ Index 1 double word element from 'in0' vector is copied to the
+ GP register and stored to (pdst + stride)
+ Index 0 double word element from 'in1' vector is copied to the
+ GP register and stored to (pdst + 2 * stride)
+ Index 1 double word element from 'in1' vector is copied to the
+ GP register and stored to (pdst + 3 * stride)
+*/
+#define ST8x4_UB(in0, in1, pdst, stride) \
+ { \
+ uint64_t out0_m, out1_m, out2_m, out3_m; \
+ uint8_t *pblk_8x4_m = (uint8_t *)(pdst); \
+ \
+ out0_m = __msa_copy_u_d((v2i64)in0, 0); \
+ out1_m = __msa_copy_u_d((v2i64)in0, 1); \
+ out2_m = __msa_copy_u_d((v2i64)in1, 0); \
+ out3_m = __msa_copy_u_d((v2i64)in1, 1); \
+ \
+ SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \
+ }
+
+/* Description : Immediate number of elements to slide with zero
+ Arguments : Inputs - in0, in1, slide_val
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Byte elements from 'zero_m' vector are slid into 'in0' by
+ value specified in the 'slide_val'
+*/
+#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) \
+ { \
+ v16i8 zero_m = { 0 }; \
+ \
+ out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \
+ out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \
+ }
+#define SLDI_B2_0_UB(...) SLDI_B2_0(v16u8, __VA_ARGS__)
+
+/* Description : Immediate number of elements to slide
+ Arguments : Inputs - in0_0, in0_1, in1_0, in1_1, slide_val
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Byte elements from 'in0_0' vector are slid into 'in1_0' by
+ value specified in the 'slide_val'
+*/
+#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \
+ { \
+ out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val); \
+ out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val); \
+ }
+
+#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, out0, out1, \
+ out2, slide_val) \
+ { \
+ SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val); \
+ out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val); \
+ }
+#define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__)
+
+/* Description : Shuffle byte vector elements as per mask vector
+ Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Byte elements from 'in0' & 'in1' are copied selectively to
+ 'out0' as per control vector 'mask0'
+*/
+#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \
+ out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
+ }
+#define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
+#define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
+#define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
+
+#define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
+ out0, out1, out2) \
+ { \
+ VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
+ out2 = (RTYPE)__msa_vshf_b((v16i8)mask2, (v16i8)in5, (v16i8)in4); \
+ }
+#define VSHF_B3_SB(...) VSHF_B3(v16i8, __VA_ARGS__)
+
+/* Description : Shuffle halfword vector elements as per mask vector
+ Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : halfword elements from 'in0' & 'in1' are copied selectively to
+ 'out0' as per control vector 'mask0'
+*/
+#define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \
+ out1 = (RTYPE)__msa_vshf_h((v8i16)mask1, (v8i16)in3, (v8i16)in2); \
+ }
+#define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
+
+/* Description : Dot product of byte vector elements
+ Arguments : Inputs - mult0, mult1, cnst0, cnst1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Unsigned byte elements from 'mult0' are multiplied with
+ unsigned byte elements from 'cnst0' producing a result
+ twice the size of input i.e. unsigned halfword.
+ The multiplication result of adjacent odd-even elements
+ are added together and written to the 'out0' vector
+*/
+#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0); \
+ out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1); \
+ }
+#define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__)
+
+#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+ cnst3, out0, out1, out2, out3) \
+ { \
+ DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
+ DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
+ }
+#define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__)
+
+/* Description : Dot product of byte vector elements
+ Arguments : Inputs - mult0, mult1, cnst0, cnst1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Signed byte elements from 'mult0' are multiplied with
+ signed byte elements from 'cnst0' producing a result
+ twice the size of input i.e. signed halfword.
+ The multiplication result of adjacent odd-even elements
+ are added together and written to the 'out0' vector
+*/
+#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \
+ out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \
+ }
+#define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
+
+#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+ cnst3, out0, out1, out2, out3) \
+ { \
+ DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
+ DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
+ }
+#define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__)
+
+/* Description : Dot product of halfword vector elements
+ Arguments : Inputs - mult0, mult1, cnst0, cnst1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Signed halfword elements from 'mult0' are multiplied with
+ signed halfword elements from 'cnst0' producing a result
+ twice the size of input i.e. signed word.
+ The multiplication result of adjacent odd-even elements
+ are added together and written to the 'out0' vector
+*/
+#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \
+ out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \
+ }
+
+#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+ cnst3, out0, out1, out2, out3) \
+ { \
+ DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
+ DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
+ }
+#define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
+
+/* Description : Dot product of word vector elements
+ Arguments : Inputs - mult0, mult1, cnst0, cnst1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Signed word elements from 'mult0' are multiplied with
+ signed word elements from 'cnst0' producing a result
+ twice the size of input i.e. signed double word.
+ The multiplication result of adjacent odd-even elements
+ are added together and written to the 'out0' vector
+*/
+#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0); \
+ out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1); \
+ }
+#define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
+
+/* Description : Dot product & addition of byte vector elements
+ Arguments : Inputs - mult0, mult1, cnst0, cnst1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Signed byte elements from 'mult0' are multiplied with
+ signed byte elements from 'cnst0' producing a result
+ twice the size of input i.e. signed halfword.
+ The multiplication result of adjacent odd-even elements
+ are added to the 'out0' vector
+*/
+#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \
+ out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \
+ }
+#define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__)
+
+#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+ cnst3, out0, out1, out2, out3) \
+ { \
+ DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
+ DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
+ }
+#define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__)
+
+/* Description : Dot product & addition of halfword vector elements
+ Arguments : Inputs - mult0, mult1, cnst0, cnst1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Signed halfword elements from 'mult0' are multiplied with
+ signed halfword elements from 'cnst0' producing a result
+ twice the size of input i.e. signed word.
+ The multiplication result of adjacent odd-even elements
+ are added to the 'out0' vector
+*/
+#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \
+ out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \
+ }
+#define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
+
+#define DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
+ cnst3, out0, out1, out2, out3) \
+ { \
+ DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
+ DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
+ }
+#define DPADD_SH4_SW(...) DPADD_SH4(v4i32, __VA_ARGS__)
+
+/* Description : Dot product & addition of double word vector elements
+ Arguments : Inputs - mult0, mult1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Each signed word element from 'mult0' is multiplied with itself
+ producing an intermediate result twice the size of it
+ i.e. signed double word
+ The multiplication result of adjacent odd-even elements
+ are added to the 'out0' vector
+*/
+#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \
+ out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \
+ }
+#define DPADD_SD2_SD(...) DPADD_SD2(v2i64, __VA_ARGS__)
+
+/* Description : Clips all signed halfword elements of input vector
+ between 0 & 255
+ Arguments : Input - in
+ Output - out_m
+ Return Type - signed halfword
+*/
+#define CLIP_SH_0_255(in) \
+ ({ \
+ v8i16 max_m = __msa_ldi_h(255); \
+ v8i16 out_m; \
+ \
+ out_m = __msa_maxi_s_h((v8i16)in, 0); \
+ out_m = __msa_min_s_h((v8i16)max_m, (v8i16)out_m); \
+ out_m; \
+ })
+#define CLIP_SH2_0_255(in0, in1) \
+ { \
+ in0 = CLIP_SH_0_255(in0); \
+ in1 = CLIP_SH_0_255(in1); \
+ }
+#define CLIP_SH4_0_255(in0, in1, in2, in3) \
+ { \
+ CLIP_SH2_0_255(in0, in1); \
+ CLIP_SH2_0_255(in2, in3); \
+ }
+
+/* Description : Clips all signed word elements of input vector
+ between 0 & 255
+ Arguments : Input - in
+ Output - out_m
+ Return Type - signed word
+*/
+#define CLIP_SW_0_255(in) \
+ ({ \
+ v4i32 max_m = __msa_ldi_w(255); \
+ v4i32 out_m; \
+ \
+ out_m = __msa_maxi_s_w((v4i32)in, 0); \
+ out_m = __msa_min_s_w((v4i32)max_m, (v4i32)out_m); \
+ out_m; \
+ })
+
+/* Description : Horizontal addition of 4 signed word elements of input vector
+ Arguments : Input - in (signed word vector)
+ Output - sum_m (i32 sum)
+ Return Type - signed word (GP)
+ Details : 4 signed word elements of 'in' vector are added together and
+ the resulting integer sum is returned
+*/
+#define HADD_SW_S32(in) \
+ ({ \
+ v2i64 res0_m, res1_m; \
+ int32_t sum_m; \
+ \
+ res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); \
+ res1_m = __msa_splati_d(res0_m, 1); \
+ res0_m = res0_m + res1_m; \
+ sum_m = __msa_copy_s_w((v4i32)res0_m, 0); \
+ sum_m; \
+ })
+
+/* Description : Horizontal addition of 8 unsigned halfword elements
+ Arguments : Inputs - in (unsigned halfword vector)
+ Outputs - sum_m (u32 sum)
+ Return Type - unsigned word
+ Details : 8 unsigned halfword elements of input vector are added
+ together and the resulting integer sum is returned
+*/
+#define HADD_UH_U32(in) \
+ ({ \
+ v4u32 res_m; \
+ v2u64 res0_m, res1_m; \
+ uint32_t sum_m; \
+ \
+ res_m = __msa_hadd_u_w((v8u16)in, (v8u16)in); \
+ res0_m = __msa_hadd_u_d(res_m, res_m); \
+ res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \
+ res0_m = res0_m + res1_m; \
+ sum_m = __msa_copy_u_w((v4i32)res0_m, 0); \
+ sum_m; \
+ })
+
+/* Description : Horizontal addition of unsigned byte vector elements
+ Arguments : Inputs - in0, in1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Each unsigned odd byte element from 'in0' is added to
+ even unsigned byte element from 'in0' (pairwise) and the
+ halfword result is written to 'out0'
+*/
+#define HADD_UB2(RTYPE, in0, in1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0); \
+ out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1); \
+ }
+#define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__)
+
+/* Description : Horizontal subtraction of unsigned byte vector elements
+ Arguments : Inputs - in0, in1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Each unsigned odd byte element from 'in0' is subtracted from
+ even unsigned byte element from 'in0' (pairwise) and the
+ halfword result is written to 'out0'
+*/
+#define HSUB_UB2(RTYPE, in0, in1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \
+ out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \
+ }
+#define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
+
+/* Description : Horizontal subtraction of signed halfword vector elements
+ Arguments : Inputs - in0, in1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Each signed odd halfword element from 'in0' is subtracted from
+ even signed halfword element from 'in0' (pairwise) and the
+ word result is written to 'out0'
+*/
+#define HSUB_UH2(RTYPE, in0, in1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0); \
+ out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1); \
+ }
+#define HSUB_UH2_SW(...) HSUB_UH2(v4i32, __VA_ARGS__)
+
+/* Description : Set element n input vector to GPR value
+ Arguments : Inputs - in0, in1, in2, in3
+ Output - out
+ Return Type - as per RTYPE
+ Details : Set element 0 in vector 'out' to value specified in 'in0'
+*/
+#define INSERT_D2(RTYPE, in0, in1, out) \
+ { \
+ out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \
+ out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \
+ }
+#define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
+
+/* Description : Interleave even byte elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Even byte elements of 'in0' and 'in1' are interleaved
+ and written to 'out0'
+*/
+#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \
+ out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \
+ }
+#define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
+#define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
+#define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
+
+/* Description : Interleave even halfword elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Even halfword elements of 'in0' and 'in1' are interleaved
+ and written to 'out0'
+*/
+#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
+ out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \
+ }
+#define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
+#define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
+
+/* Description : Interleave even word elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Even word elements of 'in0' and 'in1' are interleaved
+ and written to 'out0'
+*/
+#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
+ out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \
+ }
+#define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
+
+/* Description : Interleave even double word elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Even double word elements of 'in0' and 'in1' are interleaved
+ and written to 'out0'
+*/
+#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \
+ out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \
+ }
+#define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
+
+/* Description : Interleave left half of byte elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Left half of byte elements of 'in0' and 'in1' are interleaved
+ and written to 'out0'.
+*/
+#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
+ out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \
+ }
+#define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
+#define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
+#define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
+
+#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+ out2, out3) \
+ { \
+ ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
+ ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
+ }
+#define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__)
+#define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__)
+
+/* Description : Interleave left half of halfword elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Left half of halfword elements of 'in0' and 'in1' are
+ interleaved and written to 'out0'.
+*/
+#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
+ out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \
+ }
+#define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
+#define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__)
+
+/* Description : Interleave left half of word elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Left half of word elements of 'in0' and 'in1' are interleaved
+ and written to 'out0'.
+*/
+#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
+ out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \
+ }
+#define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
+
+/* Description : Interleave right half of byte elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Right half of byte elements of 'in0' and 'in1' are interleaved
+ and written to out0.
+*/
+#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
+ out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \
+ }
+#define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
+#define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
+#define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
+#define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__)
+
+#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+ out2, out3) \
+ { \
+ ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
+ ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
+ }
+#define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
+#define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
+#define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
+#define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
+#define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
+
+/* Description : Interleave right half of halfword elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Right half of halfword elements of 'in0' and 'in1' are
+ interleaved and written to 'out0'.
+*/
+#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
+ out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \
+ }
+#define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
+#define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
+
+#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+ out2, out3) \
+ { \
+ ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
+ ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
+ }
+#define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
+#define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
+
+#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
+ out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \
+ }
+#define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
+
+/* Description : Interleave right half of double word elements from vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Right half of double word elements of 'in0' and 'in1' are
+ interleaved and written to 'out0'.
+*/
+#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \
+ out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \
+ }
+#define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
+#define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
+#define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
+
+#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+ out2, out3) \
+ { \
+ ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
+ ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
+ }
+#define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
+#define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
+
+/* Description : Interleave both left and right half of input vectors
+ Arguments : Inputs - in0, in1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Right half of byte elements from 'in0' and 'in1' are
+ interleaved and written to 'out0'
+*/
+#define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
+ out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
+ }
+#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
+#define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
+#define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
+#define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
+
+#define ILVRL_H2(RTYPE, in0, in1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
+ out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
+ }
+#define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
+#define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
+
+#define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
+ out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
+ }
+#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
+#define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
+#define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
+
+/* Description : Maximum values between signed elements of vector and
+ 5-bit signed immediate value are copied to the output vector
+ Arguments : Inputs - in0, in1, in2, in3, max_val
+ Outputs - in place operation
+ Return Type - unsigned halfword
+ Details : Maximum of signed halfword element values from 'in0' and
+ 'max_val' are written in place
+*/
+#define MAXI_SH2(RTYPE, in0, in1, max_val) \
+ { \
+ in0 = (RTYPE)__msa_maxi_s_h((v8i16)in0, (max_val)); \
+ in1 = (RTYPE)__msa_maxi_s_h((v8i16)in1, (max_val)); \
+ }
+#define MAXI_SH2_SH(...) MAXI_SH2(v8i16, __VA_ARGS__)
+
+/* Description : Saturate the halfword element values to the max
+ unsigned value of (sat_val + 1) bits
+ The element data width remains unchanged
+ Arguments : Inputs - in0, in1, sat_val
+ Outputs - in place operation
+ Return Type - as per RTYPE
+ Details : Each unsigned halfword element from 'in0' is saturated to the
+ value generated with (sat_val + 1) bit range.
+ The results are written in place
+*/
+#define SAT_UH2(RTYPE, in0, in1, sat_val) \
+ { \
+ in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val); \
+ in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val); \
+ }
+#define SAT_UH2_SH(...) SAT_UH2(v8i16, __VA_ARGS__)
+
+/* Description : Saturate the halfword element values to the max
+ unsigned value of (sat_val + 1) bits
+ The element data width remains unchanged
+ Arguments : Inputs - in0, in1, sat_val
+ Outputs - in place operation
+ Return Type - as per RTYPE
+ Details : Each unsigned halfword element from 'in0' is saturated to the
+ value generated with (sat_val + 1) bit range
+ The results are written in place
+*/
+#define SAT_SH2(RTYPE, in0, in1, sat_val) \
+ { \
+ in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \
+ in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \
+ }
+#define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__)
+
+#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \
+ { \
+ SAT_SH2(RTYPE, in0, in1, sat_val); \
+ SAT_SH2(RTYPE, in2, in3, sat_val); \
+ }
+#define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__)
+
+/* Description : Indexed halfword element values are replicated to all
+ elements in output vector
+ Arguments : Inputs - in, idx0, idx1
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : 'idx0' element value from 'in' vector is replicated to all
+ elements in 'out0' vector
+ Valid index range for halfword operation is 0-7
+*/
+#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0); \
+ out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1); \
+ }
+#define SPLATI_H2_SB(...) SPLATI_H2(v16i8, __VA_ARGS__)
+#define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__)
+
+#define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, out0, out1, out2) \
+ { \
+ SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
+ out2 = (RTYPE)__msa_splati_h((v8i16)in, idx2); \
+ }
+#define SPLATI_H3_SB(...) SPLATI_H3(v16i8, __VA_ARGS__)
+#define SPLATI_H3_SH(...) SPLATI_H3(v8i16, __VA_ARGS__)
+
+/* Description : Indexed word element values are replicated to all
+ elements in output vector
+ Arguments : Inputs - in, stidx
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : 'stidx' element value from 'in' vector is replicated to all
+ elements in 'out0' vector
+ 'stidx + 1' element value from 'in' vector is replicated to all
+ elements in 'out1' vector
+ Valid index range for word operation is 0-3
+*/
+#define SPLATI_W2(RTYPE, in, stidx, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_splati_w((v4i32)in, stidx); \
+ out1 = (RTYPE)__msa_splati_w((v4i32)in, (stidx + 1)); \
+ }
+#define SPLATI_W2_SW(...) SPLATI_W2(v4i32, __VA_ARGS__)
+
+/* Description : Pack even byte elements of vector pairs
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Even byte elements of 'in0' are copied to the left half of
+ 'out0' & even byte elements of 'in1' are copied to the right
+ half of 'out0'.
+*/
+#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
+ out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \
+ }
+#define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
+#define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
+#define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
+#define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__)
+
+#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+ out2, out3) \
+ { \
+ PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
+ PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
+ }
+#define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
+#define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
+#define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
+
+/* Description : Pack even halfword elements of vector pairs
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Even halfword elements of 'in0' are copied to the left half of
+ 'out0' & even halfword elements of 'in1' are copied to the
+ right half of 'out0'.
+*/
+#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \
+ out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \
+ }
+#define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
+
+#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
+ out2, out3) \
+ { \
+ PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
+ PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
+ }
+#define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
+
+/* Description : Pack even double word elements of vector pairs
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Even double elements of 'in0' are copied to the left half of
+ 'out0' & even double elements of 'in1' are copied to the right
+ half of 'out0'.
+*/
+#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \
+ out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \
+ }
+#define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
+#define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
+
+/* Description : Pack odd double word elements of vector pairs
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Return Type - as per RTYPE
+ Details : Odd double word elements of 'in0' are copied to the left half
+ of 'out0' & odd double word elements of 'in1' are copied to
+ the right half of 'out0'.
+*/
+#define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = (RTYPE)__msa_pckod_d((v2i64)in0, (v2i64)in1); \
+ out1 = (RTYPE)__msa_pckod_d((v2i64)in2, (v2i64)in3); \
+ }
+#define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__)
+#define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__)
+
+/* Description : Each byte element is logically xor'ed with immediate 128
+ Arguments : Inputs - in0, in1
+ Outputs - in place operation
+ Return Type - as per RTYPE
+ Details : Each unsigned byte element from input vector 'in0' is
+ logically xor'ed with 128 and the result is stored in-place.
+*/
+#define XORI_B2_128(RTYPE, in0, in1) \
+ { \
+ in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \
+ in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \
+ }
+#define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__)
+#define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__)
+
+#define XORI_B3_128(RTYPE, in0, in1, in2) \
+ { \
+ XORI_B2_128(RTYPE, in0, in1); \
+ in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \
+ }
+#define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__)
+
+#define XORI_B4_128(RTYPE, in0, in1, in2, in3) \
+ { \
+ XORI_B2_128(RTYPE, in0, in1); \
+ XORI_B2_128(RTYPE, in2, in3); \
+ }
+#define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__)
+#define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__)
+
+#define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \
+ { \
+ XORI_B3_128(RTYPE, in0, in1, in2); \
+ XORI_B2_128(RTYPE, in3, in4); \
+ }
+#define XORI_B5_128_SB(...) XORI_B5_128(v16i8, __VA_ARGS__)
+
+#define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \
+ { \
+ XORI_B4_128(RTYPE, in0, in1, in2, in3); \
+ XORI_B4_128(RTYPE, in4, in5, in6, in7); \
+ }
+#define XORI_B8_128_SB(...) XORI_B8_128(v16i8, __VA_ARGS__)
+
+/* Description : Shift left all elements of vector (generic for all data types)
+ Arguments : Inputs - in0, in1, in2, in3, shift
+ Outputs - in place operation
+ Return Type - as per input vector RTYPE
+ Details : Each element of vector 'in0' is left shifted by 'shift' and
+ the result is written in-place.
+*/
+#define SLLI_4V(in0, in1, in2, in3, shift) \
+ { \
+ in0 = in0 << shift; \
+ in1 = in1 << shift; \
+ in2 = in2 << shift; \
+ in3 = in3 << shift; \
+ }
+
+/* Description : Arithmetic shift right all elements of vector
+ (generic for all data types)
+ Arguments : Inputs - in0, in1, in2, in3, shift
+ Outputs - in place operation
+ Return Type - as per input vector RTYPE
+ Details : Each element of vector 'in0' is right shifted by 'shift' and
+ the result is written in-place. 'shift' is a GP variable.
+*/
+#define SRA_4V(in0, in1, in2, in3, shift) \
+ { \
+ in0 = in0 >> shift; \
+ in1 = in1 >> shift; \
+ in2 = in2 >> shift; \
+ in3 = in3 >> shift; \
+ }
+
+/* Description : Shift right arithmetic rounded words
+ Arguments : Inputs - in0, in1, shift
+ Outputs - in place operation
+ Return Type - as per RTYPE
+ Details : Each element of vector 'in0' is shifted right arithmetically by
+ the number of bits in the corresponding element in the vector
+ 'shift'. The last discarded bit is added to shifted value for
+ rounding and the result is written in-place.
+ 'shift' is a vector.
+*/
+#define SRAR_W2(RTYPE, in0, in1, shift) \
+ { \
+ in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \
+ in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \
+ }
+
+#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \
+ { \
+ SRAR_W2(RTYPE, in0, in1, shift); \
+ SRAR_W2(RTYPE, in2, in3, shift); \
+ }
+#define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
+
+/* Description : Shift right arithmetic rounded (immediate)
+ Arguments : Inputs - in0, in1, shift
+ Outputs - in place operation
+ Return Type - as per RTYPE
+ Details : Each element of vector 'in0' is shifted right arithmetically by
+ the value in 'shift'. The last discarded bit is added to the
+ shifted value for rounding and the result is written in-place.
+ 'shift' is an immediate value.
+*/
+#define SRARI_H2(RTYPE, in0, in1, shift) \
+ { \
+ in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \
+ in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \
+ }
+#define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
+#define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
+
+#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \
+ { \
+ SRARI_H2(RTYPE, in0, in1, shift); \
+ SRARI_H2(RTYPE, in2, in3, shift); \
+ }
+#define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__)
+#define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__)
+
+#define SRARI_W2(RTYPE, in0, in1, shift) \
+ { \
+ in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \
+ in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \
+ }
+
+#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
+ { \
+ SRARI_W2(RTYPE, in0, in1, shift); \
+ SRARI_W2(RTYPE, in2, in3, shift); \
+ }
+#define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
+
+/* Description : Multiplication of pairs of vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Details : Each element from 'in0' is multiplied with elements from 'in1'
+ and the result is written to 'out0'
+*/
+#define MUL2(in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = in0 * in1; \
+ out1 = in2 * in3; \
+ }
+#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
+ { \
+ MUL2(in0, in1, in2, in3, out0, out1); \
+ MUL2(in4, in5, in6, in7, out2, out3); \
+ }
+
+/* Description : Addition of 2 pairs of vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Details : Each element in 'in0' is added to 'in1' and result is written
+ to 'out0'.
+*/
+#define ADD2(in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = in0 + in1; \
+ out1 = in2 + in3; \
+ }
+#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
+ { \
+ ADD2(in0, in1, in2, in3, out0, out1); \
+ ADD2(in4, in5, in6, in7, out2, out3); \
+ }
+
+/* Description : Subtraction of 2 pairs of vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1
+ Details : Each element in 'in1' is subtracted from 'in0' and result is
+ written to 'out0'.
+*/
+#define SUB2(in0, in1, in2, in3, out0, out1) \
+ { \
+ out0 = in0 - in1; \
+ out1 = in2 - in3; \
+ }
+#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
+ { \
+ out0 = in0 - in1; \
+ out1 = in2 - in3; \
+ out2 = in4 - in5; \
+ out3 = in6 - in7; \
+ }
+
+/* Description : Sign extend halfword elements from right half of the vector
+ Arguments : Input - in (halfword vector)
+ Output - out (sign extended word vector)
+ Return Type - signed word
+ Details : Sign bit of halfword elements from input vector 'in' is
+ extracted and interleaved with same vector 'in0' to generate
+ 4 word elements keeping sign intact
+*/
+#define UNPCK_R_SH_SW(in, out) \
+ { \
+ v8i16 sign_m; \
+ \
+ sign_m = __msa_clti_s_h((v8i16)in, 0); \
+ out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \
+ }
+
+/* Description : Zero extend unsigned byte elements to halfword elements
+ Arguments : Input - in (unsigned byte vector)
+ Outputs - out0, out1 (unsigned halfword vectors)
+ Return Type - signed halfword
+ Details : Zero extended right half of vector is returned in 'out0'
+ Zero extended left half of vector is returned in 'out1'
+*/
+#define UNPCK_UB_SH(in, out0, out1) \
+ { \
+ v16i8 zero_m = { 0 }; \
+ \
+ ILVRL_B2_SH(zero_m, in, out0, out1); \
+ }
+
+/* Description : Sign extend halfword elements from input vector and return
+ the result in pair of vectors
+ Arguments : Input - in (halfword vector)
+ Outputs - out0, out1 (sign extended word vectors)
+ Return Type - signed word
+ Details : Sign bit of halfword elements from input vector 'in' is
+ extracted and interleaved right with same vector 'in0' to
+ generate 4 signed word elements in 'out0'
+ Then interleaved left with same vector 'in0' to
+ generate 4 signed word elements in 'out1'
+*/
+#define UNPCK_SH_SW(in, out0, out1) \
+ { \
+ v8i16 tmp_m; \
+ \
+ tmp_m = __msa_clti_s_h((v8i16)in, 0); \
+ ILVRL_H2_SW(tmp_m, in, out0, out1); \
+ }
+
+/* Description : Butterfly of 4 input vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1, out2, out3
+ Details : Butterfly operation
+*/
+#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ out0 = in0 + in3; \
+ out1 = in1 + in2; \
+ \
+ out2 = in1 - in2; \
+ out3 = in0 - in3; \
+ }
+
+/* Description : Transpose input 8x8 byte block
+ Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
+ Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+ Return Type - as per RTYPE
+*/
+#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, \
+ out1, out2, out3, out4, out5, out6, out7) \
+ { \
+ v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
+ v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
+ \
+ ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, tmp0_m, tmp1_m, tmp2_m, \
+ tmp3_m); \
+ ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \
+ ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \
+ ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \
+ ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \
+ SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8); \
+ SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8); \
+ }
+#define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__)
+
+/* Description : Transpose 16x4 block into 4x16 with byte elements in vectors
+ Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
+ in8, in9, in10, in11, in12, in13, in14, in15
+ Outputs - out0, out1, out2, out3
+ Return Type - unsigned byte
+*/
+#define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
+ in10, in11, in12, in13, in14, in15, out0, out1, \
+ out2, out3) \
+ { \
+ v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
+ \
+ ILVEV_W2_SD(in0, in4, in8, in12, tmp0_m, tmp1_m); \
+ out1 = (v16u8)__msa_ilvev_d(tmp1_m, tmp0_m); \
+ \
+ ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \
+ out3 = (v16u8)__msa_ilvev_d(tmp1_m, tmp0_m); \
+ \
+ ILVEV_W2_SD(in2, in6, in10, in14, tmp0_m, tmp1_m); \
+ \
+ tmp2_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
+ ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
+ \
+ tmp3_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
+ ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
+ out0 = (v16u8)__msa_ilvev_h((v8i16)tmp1_m, (v8i16)tmp0_m); \
+ out2 = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \
+ \
+ tmp0_m = (v2i64)__msa_ilvod_b((v16i8)out3, (v16i8)out1); \
+ tmp1_m = (v2i64)__msa_ilvod_b((v16i8)tmp3_m, (v16i8)tmp2_m); \
+ out1 = (v16u8)__msa_ilvev_h((v8i16)tmp1_m, (v8i16)tmp0_m); \
+ out3 = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \
+ }
+
+/* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
+ Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
+ in8, in9, in10, in11, in12, in13, in14, in15
+ Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+ Return Type - unsigned byte
+*/
+#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
+ in10, in11, in12, in13, in14, in15, out0, out1, \
+ out2, out3, out4, out5, out6, out7) \
+ { \
+ v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
+ v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
+ \
+ ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
+ ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
+ ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \
+ ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \
+ \
+ tmp0_m = (v16u8)__msa_ilvev_b((v16i8)out6, (v16i8)out7); \
+ tmp4_m = (v16u8)__msa_ilvod_b((v16i8)out6, (v16i8)out7); \
+ tmp1_m = (v16u8)__msa_ilvev_b((v16i8)out4, (v16i8)out5); \
+ tmp5_m = (v16u8)__msa_ilvod_b((v16i8)out4, (v16i8)out5); \
+ out5 = (v16u8)__msa_ilvev_b((v16i8)out2, (v16i8)out3); \
+ tmp6_m = (v16u8)__msa_ilvod_b((v16i8)out2, (v16i8)out3); \
+ out7 = (v16u8)__msa_ilvev_b((v16i8)out0, (v16i8)out1); \
+ tmp7_m = (v16u8)__msa_ilvod_b((v16i8)out0, (v16i8)out1); \
+ \
+ ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
+ out0 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
+ out4 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
+ \
+ tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \
+ tmp3_m = (v16u8)__msa_ilvod_h((v8i16)out7, (v8i16)out5); \
+ out2 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
+ out6 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
+ \
+ ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
+ out1 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
+ out5 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
+ \
+ tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \
+ tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \
+ tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \
+ tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \
+ out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
+ out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
+ }
+
+/* Description : Transpose 4x4 block with half word elements in vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1, out2, out3
+ Return Type - signed halfword
+*/
+#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v8i16 s0_m, s1_m; \
+ \
+ ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
+ ILVRL_W2_SH(s1_m, s0_m, out0, out2); \
+ out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
+ out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2); \
+ }
+
+/* Description : Transpose 8x4 block with half word elements in vectors
+ Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
+ Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+ Return Type - signed halfword
+*/
+#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
+ \
+ ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m); \
+ ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m); \
+ ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \
+ ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \
+ }
+
+/* Description : Transpose 4x4 block with word elements in vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1, out2, out3
+ Return Type - signed word
+*/
+#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v4i32 s0_m, s1_m, s2_m, s3_m; \
+ \
+ ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
+ ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
+ \
+ out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \
+ out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \
+ out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \
+ out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \
+ }
+
+/* Description : Dot product and addition of 3 signed halfword input vectors
+ Arguments : Inputs - in0, in1, in2, coeff0, coeff1, coeff2
+ Output - out0_m
+ Return Type - signed halfword
+ Details : Dot product of 'in0' with 'coeff0'
+ Dot product of 'in1' with 'coeff1'
+ Dot product of 'in2' with 'coeff2'
+ Addition of all the 3 vector results
+ out0_m = (in0 * coeff0) + (in1 * coeff1) + (in2 * coeff2)
+*/
+#define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \
+ ({ \
+ v8i16 tmp1_m; \
+ v8i16 out0_m; \
+ \
+ out0_m = __msa_dotp_s_h((v16i8)in0, (v16i8)coeff0); \
+ out0_m = __msa_dpadd_s_h(out0_m, (v16i8)in1, (v16i8)coeff1); \
+ tmp1_m = __msa_dotp_s_h((v16i8)in2, (v16i8)coeff2); \
+ out0_m = __msa_adds_s_h(out0_m, tmp1_m); \
+ \
+ out0_m; \
+ })
+
+/* Description : Pack even elements of input vectors & xor with 128
+ Arguments : Inputs - in0, in1
+ Output - out_m
+ Return Type - unsigned byte
+ Details : Signed byte even elements from 'in0' and 'in1' are packed
+ together in one vector and the resulting vector is xor'ed with
+ 128 to shift the range from signed to unsigned byte
+*/
+#define PCKEV_XORI128_UB(in0, in1) \
+ ({ \
+ v16u8 out_m; \
+ out_m = (v16u8)__msa_pckev_b((v16i8)in1, (v16i8)in0); \
+ out_m = (v16u8)__msa_xori_b((v16u8)out_m, 128); \
+ out_m; \
+ })
+
+/* Description : Pack even byte elements and store byte vector in destination
+ memory
+ Arguments : Inputs - in0, in1, pdst
+*/
+#define PCKEV_ST_SB(in0, in1, pdst) \
+ { \
+ v16i8 tmp_m; \
+ tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0); \
+ ST_SB(tmp_m, (pdst)); \
+ }
+
+/* Description : Horizontal 2 tap filter kernel code
+ Arguments : Inputs - in0, in1, mask, coeff, shift
+*/
+#define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) \
+ ({ \
+ v16i8 tmp0_m; \
+ v8u16 tmp1_m; \
+ \
+ tmp0_m = __msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0); \
+ tmp1_m = __msa_dotp_u_h((v16u8)tmp0_m, (v16u8)coeff); \
+ tmp1_m = (v8u16)__msa_srari_h((v8i16)tmp1_m, shift); \
+ \
+ tmp1_m; \
+ })
+#endif // VPX_VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_
diff --git a/media/libvpx/libvpx/vp8/common/modecont.c b/media/libvpx/libvpx/vp8/common/modecont.c
new file mode 100644
index 0000000000..bab410374f
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/modecont.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "entropy.h"
+
+const int vp8_mode_contexts[6][4] = {
+ { /* 0 */
+ 7, 1, 1, 143 },
+ { /* 1 */
+ 14, 18, 14, 107 },
+ { /* 2 */
+ 135, 64, 57, 68 },
+ { /* 3 */
+ 60, 56, 128, 65 },
+ { /* 4 */
+ 159, 134, 128, 34 },
+ { /* 5 */
+ 234, 188, 128, 28 },
+};
diff --git a/media/libvpx/libvpx/vp8/common/modecont.h b/media/libvpx/libvpx/vp8/common/modecont.h
new file mode 100644
index 0000000000..031f74f2ff
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/modecont.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_MODECONT_H_
+#define VPX_VP8_COMMON_MODECONT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const int vp8_mode_contexts[6][4];
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_MODECONT_H_
diff --git a/media/libvpx/libvpx/vp8/common/mv.h b/media/libvpx/libvpx/vp8/common/mv.h
new file mode 100644
index 0000000000..4cde12f201
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/mv.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_MV_H_
+#define VPX_VP8_COMMON_MV_H_
+#include "vpx/vpx_integer.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ short row;
+ short col;
+} MV;
+
+typedef union int_mv {
+ uint32_t as_int;
+ MV as_mv;
+} int_mv; /* facilitates faster equality tests and copies */
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_MV_H_
diff --git a/media/libvpx/libvpx/vp8/common/onyx.h b/media/libvpx/libvpx/vp8/common/onyx.h
new file mode 100644
index 0000000000..8c35e433e7
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/onyx.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_ONYX_H_
+#define VPX_VP8_COMMON_ONYX_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "vpx_config.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_encoder.h"
+#include "vpx_scale/yv12config.h"
+#include "ppflags.h"
+
+struct VP8_COMP;
+
+/* Create/destroy static data structures. */
+
+typedef enum {
+ USAGE_LOCAL_FILE_PLAYBACK = 0x0,
+ USAGE_STREAM_FROM_SERVER = 0x1,
+ USAGE_CONSTRAINED_QUALITY = 0x2,
+ USAGE_CONSTANT_QUALITY = 0x3
+} END_USAGE;
+
+typedef enum {
+ MODE_REALTIME = 0x0,
+ MODE_GOODQUALITY = 0x1,
+ MODE_BESTQUALITY = 0x2,
+ MODE_FIRSTPASS = 0x3,
+ MODE_SECONDPASS = 0x4,
+ MODE_SECONDPASS_BEST = 0x5
+} MODE;
+
+typedef enum {
+ FRAMEFLAGS_KEY = 1,
+ FRAMEFLAGS_GOLDEN = 2,
+ FRAMEFLAGS_ALTREF = 4
+} FRAMETYPE_FLAGS;
+
+#include <assert.h>
+static INLINE void Scale2Ratio(int mode, int *hr, int *hs) {
+ switch (mode) {
+ case VP8E_NORMAL:
+ *hr = 1;
+ *hs = 1;
+ break;
+ case VP8E_FOURFIVE:
+ *hr = 4;
+ *hs = 5;
+ break;
+ case VP8E_THREEFIVE:
+ *hr = 3;
+ *hs = 5;
+ break;
+ case VP8E_ONETWO:
+ *hr = 1;
+ *hs = 2;
+ break;
+ default:
+ *hr = 1;
+ *hs = 1;
+ assert(0);
+ break;
+ }
+}
+
+typedef struct {
+ /* 4 versions of bitstream defined:
+ * 0 best quality/slowest decode, 3 lowest quality/fastest decode
+ */
+ int Version;
+ int Width;
+ int Height;
+ struct vpx_rational timebase;
+ unsigned int target_bandwidth; /* kilobits per second */
+
+ /* Parameter used for applying denoiser.
+ * For temporal denoiser: noise_sensitivity = 0 means off,
+ * noise_sensitivity = 1 means temporal denoiser on for Y channel only,
+ * noise_sensitivity = 2 means temporal denoiser on for all channels.
+ * noise_sensitivity = 3 means aggressive denoising mode.
+ * noise_sensitivity >= 4 means adaptive denoising mode.
+ * Temporal denoiser is enabled via the configuration option:
+ * CONFIG_TEMPORAL_DENOISING.
+ * For spatial denoiser: noise_sensitivity controls the amount of
+ * pre-processing blur: noise_sensitivity = 0 means off.
+ * Spatial denoiser invoked under !CONFIG_TEMPORAL_DENOISING.
+ */
+ int noise_sensitivity;
+
+ /* parameter used for sharpening output: recommendation 0: */
+ int Sharpness;
+ int cpu_used;
+ unsigned int rc_max_intra_bitrate_pct;
+ /* percent of rate boost for golden frame in CBR mode. */
+ unsigned int gf_cbr_boost_pct;
+ unsigned int screen_content_mode;
+
+ /* mode ->
+ *(0)=Realtime/Live Encoding. This mode is optimized for realtim
+ * encoding (for example, capturing a television signal or feed
+ * from a live camera). ( speed setting controls how fast )
+ *(1)=Good Quality Fast Encoding. The encoder balances quality with
+ * the amount of time it takes to encode the output. ( speed
+ * setting controls how fast )
+ *(2)=One Pass - Best Quality. The encoder places priority on the
+ * quality of the output over encoding speed. The output is
+ * compressed at the highest possible quality. This option takes
+ * the longest amount of time to encode. ( speed setting ignored
+ * )
+ *(3)=Two Pass - First Pass. The encoder generates a file of
+ * statistics for use in the second encoding pass. ( speed
+ * setting controls how fast )
+ *(4)=Two Pass - Second Pass. The encoder uses the statistics that
+ * were generated in the first encoding pass to create the
+ * compressed output. ( speed setting controls how fast )
+ *(5)=Two Pass - Second Pass Best. The encoder uses the statistics
+ * that were generated in the first encoding pass to create the
+ * compressed output using the highest possible quality, and
+ * taking a longer amount of time to encode.. ( speed setting
+ * ignored )
+ */
+ int Mode;
+
+ /* Key Framing Operations */
+ int auto_key; /* automatically detect cut scenes */
+ int key_freq; /* maximum distance to key frame. */
+
+ /* lagged compression (if allow_lag == 0 lag_in_frames is ignored) */
+ int allow_lag;
+ int lag_in_frames; /* how many frames lag before we start encoding */
+
+ /*
+ * DATARATE CONTROL OPTIONS
+ */
+
+ int end_usage; /* vbr or cbr */
+
+ /* buffer targeting aggressiveness */
+ int under_shoot_pct;
+ int over_shoot_pct;
+
+ /* buffering parameters */
+ int64_t starting_buffer_level;
+ int64_t optimal_buffer_level;
+ int64_t maximum_buffer_size;
+
+ int64_t starting_buffer_level_in_ms;
+ int64_t optimal_buffer_level_in_ms;
+ int64_t maximum_buffer_size_in_ms;
+
+ /* controlling quality */
+ int fixed_q;
+ int worst_allowed_q;
+ int best_allowed_q;
+ int cq_level;
+
+ /* allow internal resizing */
+ int allow_spatial_resampling;
+ int resample_down_water_mark;
+ int resample_up_water_mark;
+
+ /* allow internal frame rate alterations */
+ int allow_df;
+ int drop_frames_water_mark;
+
+ /* two pass datarate control */
+ int two_pass_vbrbias;
+ int two_pass_vbrmin_section;
+ int two_pass_vbrmax_section;
+
+ /*
+ * END DATARATE CONTROL OPTIONS
+ */
+
+ /* these parameters aren't to be used in final build don't use!!! */
+ int play_alternate;
+ int alt_freq;
+ int alt_q;
+ int key_q;
+ int gold_q;
+
+ int multi_threaded; /* how many threads to run the encoder on */
+ int token_partitions; /* how many token partitions to create */
+
+ /* early breakout threshold: for video conf recommend 800 */
+ int encode_breakout;
+
+ /* Bitfield defining the error resiliency features to enable.
+ * Can provide decodable frames after losses in previous
+ * frames and decodable partitions after losses in the same frame.
+ */
+ unsigned int error_resilient_mode;
+
+ int arnr_max_frames;
+ int arnr_strength;
+ int arnr_type;
+
+ vpx_fixed_buf_t two_pass_stats_in;
+ struct vpx_codec_pkt_list *output_pkt_list;
+
+ vp8e_tuning tuning;
+
+ /* Temporal scaling parameters */
+ unsigned int number_of_layers;
+ unsigned int target_bitrate[VPX_TS_MAX_PERIODICITY];
+ unsigned int rate_decimator[VPX_TS_MAX_PERIODICITY];
+ unsigned int periodicity;
+ unsigned int layer_id[VPX_TS_MAX_PERIODICITY];
+
+#if CONFIG_MULTI_RES_ENCODING
+ /* Number of total resolutions encoded */
+ unsigned int mr_total_resolutions;
+
+ /* Current encoder ID */
+ unsigned int mr_encoder_id;
+
+ /* Down-sampling factor */
+ vpx_rational_t mr_down_sampling_factor;
+
+ /* Memory location to store low-resolution encoder's mode info */
+ void *mr_low_res_mode_info;
+#endif
+} VP8_CONFIG;
+
+void vp8_initialize();
+
+struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf);
+void vp8_remove_compressor(struct VP8_COMP **comp);
+
+void vp8_init_config(struct VP8_COMP *onyx, VP8_CONFIG *oxcf);
+void vp8_change_config(struct VP8_COMP *cpi, VP8_CONFIG *oxcf);
+
+int vp8_receive_raw_frame(struct VP8_COMP *cpi, unsigned int frame_flags,
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time);
+int vp8_get_compressed_data(struct VP8_COMP *cpi, unsigned int *frame_flags,
+ size_t *size, unsigned char *dest,
+ unsigned char *dest_end, int64_t *time_stamp,
+ int64_t *time_end, int flush);
+int vp8_get_preview_raw_frame(struct VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
+ vp8_ppflags_t *flags);
+
+int vp8_use_as_reference(struct VP8_COMP *cpi, int ref_frame_flags);
+int vp8_update_reference(struct VP8_COMP *cpi, int ref_frame_flags);
+int vp8_get_reference(struct VP8_COMP *cpi,
+ enum vpx_ref_frame_type ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+int vp8_set_reference(struct VP8_COMP *cpi,
+ enum vpx_ref_frame_type ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+int vp8_update_entropy(struct VP8_COMP *cpi, int update);
+int vp8_set_roimap(struct VP8_COMP *cpi, unsigned char *map, unsigned int rows,
+ unsigned int cols, int delta_q[4], int delta_lf[4],
+ unsigned int threshold[4]);
+int vp8_set_active_map(struct VP8_COMP *cpi, unsigned char *map,
+ unsigned int rows, unsigned int cols);
+int vp8_set_internal_size(struct VP8_COMP *cpi, VPX_SCALING_MODE horiz_mode,
+ VPX_SCALING_MODE vert_mode);
+int vp8_get_quantizer(struct VP8_COMP *cpi);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VPX_VP8_COMMON_ONYX_H_
diff --git a/media/libvpx/libvpx/vp8/common/onyxc_int.h b/media/libvpx/libvpx/vp8/common/onyxc_int.h
new file mode 100644
index 0000000000..ef8d007620
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/onyxc_int.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_ONYXC_INT_H_
+#define VPX_VP8_COMMON_ONYXC_INT_H_
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "loopfilter.h"
+#include "entropymv.h"
+#include "entropy.h"
+#if CONFIG_POSTPROC
+#include "postproc.h"
+#endif
+
+/*#ifdef PACKET_TESTING*/
+#include "header.h"
+/*#endif*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MINQ 0
+#define MAXQ 127
+#define QINDEX_RANGE (MAXQ + 1)
+
+#define NUM_YV12_BUFFERS 4
+
+#define MAX_PARTITIONS 9
+
+typedef struct frame_contexts {
+ vp8_prob bmode_prob[VP8_BINTRAMODES - 1];
+ vp8_prob ymode_prob[VP8_YMODES - 1]; /* interframe intra mode probs */
+ vp8_prob uv_mode_prob[VP8_UV_MODES - 1];
+ vp8_prob sub_mv_ref_prob[VP8_SUBMVREFS - 1];
+ vp8_prob coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES];
+ MV_CONTEXT mvc[2];
+} FRAME_CONTEXT;
+
+typedef enum {
+ ONE_PARTITION = 0,
+ TWO_PARTITION = 1,
+ FOUR_PARTITION = 2,
+ EIGHT_PARTITION = 3
+} TOKEN_PARTITION;
+
+typedef enum {
+ RECON_CLAMP_REQUIRED = 0,
+ RECON_CLAMP_NOTREQUIRED = 1
+} CLAMP_TYPE;
+
+typedef struct VP8Common {
+ struct vpx_internal_error_info error;
+
+ DECLARE_ALIGNED(16, short, Y1dequant[QINDEX_RANGE][2]);
+ DECLARE_ALIGNED(16, short, Y2dequant[QINDEX_RANGE][2]);
+ DECLARE_ALIGNED(16, short, UVdequant[QINDEX_RANGE][2]);
+
+ int Width;
+ int Height;
+ int horiz_scale;
+ int vert_scale;
+
+ CLAMP_TYPE clamp_type;
+
+ YV12_BUFFER_CONFIG *frame_to_show;
+
+ YV12_BUFFER_CONFIG yv12_fb[NUM_YV12_BUFFERS];
+ int fb_idx_ref_cnt[NUM_YV12_BUFFERS];
+ int new_fb_idx, lst_fb_idx, gld_fb_idx, alt_fb_idx;
+
+ YV12_BUFFER_CONFIG temp_scale_frame;
+
+#if CONFIG_POSTPROC
+ YV12_BUFFER_CONFIG post_proc_buffer;
+ YV12_BUFFER_CONFIG post_proc_buffer_int;
+ int post_proc_buffer_int_used;
+ unsigned char *pp_limits_buffer; /* post-processing filter coefficients */
+#endif
+
+ FRAME_TYPE
+ last_frame_type; /* Save last frame's frame type for motion search. */
+ FRAME_TYPE frame_type;
+
+ int show_frame;
+
+ int frame_flags;
+ int MBs;
+ int mb_rows;
+ int mb_cols;
+ int mode_info_stride;
+
+ /* profile settings */
+ int mb_no_coeff_skip;
+ int no_lpf;
+ int use_bilinear_mc_filter;
+ int full_pixel;
+
+ int base_qindex;
+
+ int y1dc_delta_q;
+ int y2dc_delta_q;
+ int y2ac_delta_q;
+ int uvdc_delta_q;
+ int uvac_delta_q;
+
+ /* We allocate a MODE_INFO struct for each macroblock, together with
+ an extra row on top and column on the left to simplify prediction. */
+
+ MODE_INFO *mip; /* Base of allocated array */
+ MODE_INFO *mi; /* Corresponds to upper left visible macroblock */
+#if CONFIG_ERROR_CONCEALMENT
+ MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
+ MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
+#endif
+ /* MODE_INFO for the last decoded frame to show */
+ MODE_INFO *show_frame_mi;
+ LOOPFILTERTYPE filter_type;
+
+ loop_filter_info_n lf_info;
+
+ int filter_level;
+ int last_sharpness_level;
+ int sharpness_level;
+
+ int refresh_last_frame; /* Two state 0 = NO, 1 = YES */
+ int refresh_golden_frame; /* Two state 0 = NO, 1 = YES */
+ int refresh_alt_ref_frame; /* Two state 0 = NO, 1 = YES */
+
+ int copy_buffer_to_gf; /* 0 none, 1 Last to GF, 2 ARF to GF */
+ int copy_buffer_to_arf; /* 0 none, 1 Last to ARF, 2 GF to ARF */
+
+ int refresh_entropy_probs; /* Two state 0 = NO, 1 = YES */
+
+ int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */
+
+ /* Y,U,V,Y2 */
+ ENTROPY_CONTEXT_PLANES *above_context; /* row of context for each plane */
+ ENTROPY_CONTEXT_PLANES left_context; /* (up to) 4 contexts "" */
+
+ FRAME_CONTEXT lfc; /* last frame entropy */
+ FRAME_CONTEXT fc; /* this frame entropy */
+
+ unsigned int current_video_frame;
+
+ int version;
+
+ TOKEN_PARTITION multi_token_partition;
+
+#ifdef PACKET_TESTING
+ VP8_HEADER oh;
+#endif
+
+#if CONFIG_MULTITHREAD
+ int processor_core_count;
+#endif
+#if CONFIG_POSTPROC
+ struct postproc_state postproc_state;
+#endif
+ int cpu_caps;
+} VP8_COMMON;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_ONYXC_INT_H_
diff --git a/media/libvpx/libvpx/vp8/common/onyxd.h b/media/libvpx/libvpx/vp8/common/onyxd.h
new file mode 100644
index 0000000000..e4e81aaac5
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/onyxd.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_ONYXD_H_
+#define VPX_VP8_COMMON_ONYXD_H_
+
+/* Create/destroy static data structures. */
+#ifdef __cplusplus
+extern "C" {
+#endif
+#include "vpx_scale/yv12config.h"
+#include "ppflags.h"
+#include "vpx_ports/mem.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8.h"
+
+struct VP8D_COMP;
+struct VP8Common;
+
+typedef struct {
+ int Width;
+ int Height;
+ int Version;
+ int postprocess;
+ int max_threads;
+ int error_concealment;
+} VP8D_CONFIG;
+
+typedef enum { VP8D_OK = 0 } VP8D_SETTING;
+
+void vp8dx_initialize(void);
+
+void vp8dx_set_setting(struct VP8D_COMP *comp, VP8D_SETTING oxst, int x);
+
+int vp8dx_get_setting(struct VP8D_COMP *comp, VP8D_SETTING oxst);
+
+int vp8dx_receive_compressed_data(struct VP8D_COMP *pbi, int64_t time_stamp);
+int vp8dx_get_raw_frame(struct VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd,
+ int64_t *time_stamp, int64_t *time_end_stamp,
+ vp8_ppflags_t *flags);
+int vp8dx_references_buffer(struct VP8Common *oci, int ref_frame);
+
+vpx_codec_err_t vp8dx_get_reference(struct VP8D_COMP *pbi,
+ enum vpx_ref_frame_type ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+vpx_codec_err_t vp8dx_set_reference(struct VP8D_COMP *pbi,
+ enum vpx_ref_frame_type ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+int vp8dx_get_quantizer(const struct VP8D_COMP *pbi);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VPX_VP8_COMMON_ONYXD_H_
diff --git a/media/libvpx/libvpx/vp8/common/postproc.c b/media/libvpx/libvpx/vp8/common/postproc.c
new file mode 100644
index 0000000000..c03b16b2f5
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/postproc.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vpx_dsp_rtcd.h"
+#include "vp8_rtcd.h"
+#include "vpx_dsp/postproc.h"
+#include "vpx_ports/system_state.h"
+#include "vpx_scale_rtcd.h"
+#include "vpx_scale/yv12config.h"
+#include "postproc.h"
+#include "common.h"
+#include "vpx_scale/vpx_scale.h"
+#include "systemdependent.h"
+
+#include <limits.h>
+#include <math.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+/* clang-format off */
+#define RGB_TO_YUV(t) \
+ (unsigned char)((0.257 * (float)(t >> 16)) + \
+ (0.504 * (float)(t >> 8 & 0xff)) + \
+ (0.098 * (float)(t & 0xff)) + 16), \
+ (unsigned char)(-(0.148 * (float)(t >> 16)) - \
+ (0.291 * (float)(t >> 8 & 0xff)) + \
+ (0.439 * (float)(t & 0xff)) + 128), \
+ (unsigned char)((0.439 * (float)(t >> 16)) - \
+ (0.368 * (float)(t >> 8 & 0xff)) - \
+ (0.071 * (float)(t & 0xff)) + 128)
+/* clang-format on */
+
+extern void vp8_blit_text(const char *msg, unsigned char *address,
+ const int pitch);
+extern void vp8_blit_line(int x0, int x1, int y0, int y1, unsigned char *image,
+ const int pitch);
+/***********************************************************************************************************
+ */
+#if CONFIG_POSTPROC
+static int q2mbl(int x) {
+ if (x < 20) x = 20;
+
+ x = 50 + (x - 50) * 10 / 8;
+ return x * x / 3;
+}
+
+static void vp8_de_mblock(YV12_BUFFER_CONFIG *post, int q) {
+ vpx_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height,
+ post->y_width, q2mbl(q));
+ vpx_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height,
+ post->y_width, q2mbl(q));
+}
+
+void vp8_deblock(VP8_COMMON *cm, YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *post, int q) {
+ double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065;
+ int ppl = (int)(level + .5);
+
+ const MODE_INFO *mode_info_context = cm->mi;
+ int mbr, mbc;
+
+ /* The pixel thresholds are adjusted according to if or not the macroblock
+ * is a skipped block. */
+ unsigned char *ylimits = cm->pp_limits_buffer;
+ unsigned char *uvlimits = cm->pp_limits_buffer + 16 * cm->mb_cols;
+
+ if (ppl > 0) {
+ for (mbr = 0; mbr < cm->mb_rows; ++mbr) {
+ unsigned char *ylptr = ylimits;
+ unsigned char *uvlptr = uvlimits;
+ for (mbc = 0; mbc < cm->mb_cols; ++mbc) {
+ unsigned char mb_ppl;
+
+ if (mode_info_context->mbmi.mb_skip_coeff) {
+ mb_ppl = (unsigned char)ppl >> 1;
+ } else {
+ mb_ppl = (unsigned char)ppl;
+ }
+
+ memset(ylptr, mb_ppl, 16);
+ memset(uvlptr, mb_ppl, 8);
+
+ ylptr += 16;
+ uvlptr += 8;
+ mode_info_context++;
+ }
+ mode_info_context++;
+
+ vpx_post_proc_down_and_across_mb_row(
+ source->y_buffer + 16 * mbr * source->y_stride,
+ post->y_buffer + 16 * mbr * post->y_stride, source->y_stride,
+ post->y_stride, source->y_width, ylimits, 16);
+
+ vpx_post_proc_down_and_across_mb_row(
+ source->u_buffer + 8 * mbr * source->uv_stride,
+ post->u_buffer + 8 * mbr * post->uv_stride, source->uv_stride,
+ post->uv_stride, source->uv_width, uvlimits, 8);
+ vpx_post_proc_down_and_across_mb_row(
+ source->v_buffer + 8 * mbr * source->uv_stride,
+ post->v_buffer + 8 * mbr * post->uv_stride, source->uv_stride,
+ post->uv_stride, source->uv_width, uvlimits, 8);
+ }
+ } else {
+ vp8_yv12_copy_frame(source, post);
+ }
+}
+
+void vp8_de_noise(VP8_COMMON *cm, YV12_BUFFER_CONFIG *source, int q,
+ int uvfilter) {
+ int mbr;
+ double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065;
+ int ppl = (int)(level + .5);
+ int mb_rows = cm->mb_rows;
+ int mb_cols = cm->mb_cols;
+ unsigned char *limits = cm->pp_limits_buffer;
+
+ memset(limits, (unsigned char)ppl, 16 * mb_cols);
+
+ /* TODO: The original code don't filter the 2 outer rows and columns. */
+ for (mbr = 0; mbr < mb_rows; ++mbr) {
+ vpx_post_proc_down_and_across_mb_row(
+ source->y_buffer + 16 * mbr * source->y_stride,
+ source->y_buffer + 16 * mbr * source->y_stride, source->y_stride,
+ source->y_stride, source->y_width, limits, 16);
+ if (uvfilter == 1) {
+ vpx_post_proc_down_and_across_mb_row(
+ source->u_buffer + 8 * mbr * source->uv_stride,
+ source->u_buffer + 8 * mbr * source->uv_stride, source->uv_stride,
+ source->uv_stride, source->uv_width, limits, 8);
+ vpx_post_proc_down_and_across_mb_row(
+ source->v_buffer + 8 * mbr * source->uv_stride,
+ source->v_buffer + 8 * mbr * source->uv_stride, source->uv_stride,
+ source->uv_stride, source->uv_width, limits, 8);
+ }
+ }
+}
+#endif // CONFIG_POSTPROC
+
+#if CONFIG_POSTPROC
+int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest,
+ vp8_ppflags_t *ppflags) {
+ int q = oci->filter_level * 10 / 6;
+ int flags = ppflags->post_proc_flag;
+ int deblock_level = ppflags->deblocking_level;
+ int noise_level = ppflags->noise_level;
+
+ if (!oci->frame_to_show) return -1;
+
+ if (q > 63) q = 63;
+
+ if (!flags) {
+ *dest = *oci->frame_to_show;
+
+ /* handle problem with extending borders */
+ dest->y_width = oci->Width;
+ dest->y_height = oci->Height;
+ dest->uv_height = dest->y_height / 2;
+ oci->postproc_state.last_base_qindex = oci->base_qindex;
+ oci->postproc_state.last_frame_valid = 1;
+ return 0;
+ }
+ if (flags & VP8D_ADDNOISE) {
+ if (!oci->postproc_state.generated_noise) {
+ oci->postproc_state.generated_noise = vpx_calloc(
+ oci->Width + 256, sizeof(*oci->postproc_state.generated_noise));
+ if (!oci->postproc_state.generated_noise) return 1;
+ }
+ }
+
+ /* Allocate post_proc_buffer_int if needed */
+ if ((flags & VP8D_MFQE) && !oci->post_proc_buffer_int_used) {
+ if ((flags & VP8D_DEBLOCK) || (flags & VP8D_DEMACROBLOCK)) {
+ int width = (oci->Width + 15) & ~15;
+ int height = (oci->Height + 15) & ~15;
+
+ if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer_int, width, height,
+ VP8BORDERINPIXELS)) {
+ vpx_internal_error(&oci->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate MFQE framebuffer");
+ }
+
+ oci->post_proc_buffer_int_used = 1;
+
+ /* insure that postproc is set to all 0's so that post proc
+ * doesn't pull random data in from edge
+ */
+ memset((&oci->post_proc_buffer_int)->buffer_alloc, 128,
+ (&oci->post_proc_buffer)->frame_size);
+ }
+ }
+
+ vpx_clear_system_state();
+
+ if ((flags & VP8D_MFQE) && oci->postproc_state.last_frame_valid &&
+ oci->current_video_frame > 10 &&
+ oci->postproc_state.last_base_qindex < 60 &&
+ oci->base_qindex - oci->postproc_state.last_base_qindex >= 20) {
+ vp8_multiframe_quality_enhance(oci);
+ if (((flags & VP8D_DEBLOCK) || (flags & VP8D_DEMACROBLOCK)) &&
+ oci->post_proc_buffer_int_used) {
+ vp8_yv12_copy_frame(&oci->post_proc_buffer, &oci->post_proc_buffer_int);
+ if (flags & VP8D_DEMACROBLOCK) {
+ vp8_deblock(oci, &oci->post_proc_buffer_int, &oci->post_proc_buffer,
+ q + (deblock_level - 5) * 10);
+ vp8_de_mblock(&oci->post_proc_buffer, q + (deblock_level - 5) * 10);
+ } else if (flags & VP8D_DEBLOCK) {
+ vp8_deblock(oci, &oci->post_proc_buffer_int, &oci->post_proc_buffer, q);
+ }
+ }
+ /* Move partially towards the base q of the previous frame */
+ oci->postproc_state.last_base_qindex =
+ (3 * oci->postproc_state.last_base_qindex + oci->base_qindex) >> 2;
+ } else if (flags & VP8D_DEMACROBLOCK) {
+ vp8_deblock(oci, oci->frame_to_show, &oci->post_proc_buffer,
+ q + (deblock_level - 5) * 10);
+ vp8_de_mblock(&oci->post_proc_buffer, q + (deblock_level - 5) * 10);
+
+ oci->postproc_state.last_base_qindex = oci->base_qindex;
+ } else if (flags & VP8D_DEBLOCK) {
+ vp8_deblock(oci, oci->frame_to_show, &oci->post_proc_buffer, q);
+ oci->postproc_state.last_base_qindex = oci->base_qindex;
+ } else {
+ vp8_yv12_copy_frame(oci->frame_to_show, &oci->post_proc_buffer);
+ oci->postproc_state.last_base_qindex = oci->base_qindex;
+ }
+ oci->postproc_state.last_frame_valid = 1;
+
+ if (flags & VP8D_ADDNOISE) {
+ if (oci->postproc_state.last_q != q ||
+ oci->postproc_state.last_noise != noise_level) {
+ double sigma;
+ struct postproc_state *ppstate = &oci->postproc_state;
+ vpx_clear_system_state();
+ sigma = noise_level + .5 + .6 * q / 63.0;
+ ppstate->clamp =
+ vpx_setup_noise(sigma, ppstate->generated_noise, oci->Width + 256);
+ ppstate->last_q = q;
+ ppstate->last_noise = noise_level;
+ }
+
+ vpx_plane_add_noise(
+ oci->post_proc_buffer.y_buffer, oci->postproc_state.generated_noise,
+ oci->postproc_state.clamp, oci->postproc_state.clamp,
+ oci->post_proc_buffer.y_width, oci->post_proc_buffer.y_height,
+ oci->post_proc_buffer.y_stride);
+ }
+
+ *dest = oci->post_proc_buffer;
+
+ /* handle problem with extending borders */
+ dest->y_width = oci->Width;
+ dest->y_height = oci->Height;
+ dest->uv_height = dest->y_height / 2;
+ return 0;
+}
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/postproc.h b/media/libvpx/libvpx/vp8/common/postproc.h
new file mode 100644
index 0000000000..492c52aef6
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/postproc.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_POSTPROC_H_
+#define VPX_VP8_COMMON_POSTPROC_H_
+
+#include "vpx_ports/mem.h"
+struct postproc_state {
+ int last_q;
+ int last_noise;
+ int last_base_qindex;
+ int last_frame_valid;
+ int clamp;
+ int8_t *generated_noise;
+};
+#include "onyxc_int.h"
+#include "ppflags.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+int vp8_post_proc_frame(struct VP8Common *oci, YV12_BUFFER_CONFIG *dest,
+ vp8_ppflags_t *ppflags);
+
+void vp8_de_noise(struct VP8Common *cm, YV12_BUFFER_CONFIG *source, int q,
+ int uvfilter);
+
+void vp8_deblock(struct VP8Common *cm, YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *post, int q);
+
+#define MFQE_PRECISION 4
+
+void vp8_multiframe_quality_enhance(struct VP8Common *cm);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_POSTPROC_H_
diff --git a/media/libvpx/libvpx/vp8/common/ppflags.h b/media/libvpx/libvpx/vp8/common/ppflags.h
new file mode 100644
index 0000000000..bdf08734b9
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/ppflags.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_PPFLAGS_H_
+#define VPX_VP8_COMMON_PPFLAGS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+enum {
+ VP8D_NOFILTERING = 0,
+ VP8D_DEBLOCK = 1 << 0,
+ VP8D_DEMACROBLOCK = 1 << 1,
+ VP8D_ADDNOISE = 1 << 2,
+ VP8D_MFQE = 1 << 3
+};
+
+typedef struct {
+ int post_proc_flag;
+ int deblocking_level;
+ int noise_level;
+ int display_ref_frame_flag;
+ int display_mb_modes_flag;
+ int display_b_modes_flag;
+ int display_mv_flag;
+} vp8_ppflags_t;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_PPFLAGS_H_
diff --git a/media/libvpx/libvpx/vp8/common/quant_common.c b/media/libvpx/libvpx/vp8/common/quant_common.c
new file mode 100644
index 0000000000..e290eec92b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/quant_common.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "quant_common.h"
+
+static const int dc_qlookup[QINDEX_RANGE] = {
+ 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17,
+ 17, 18, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 25, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 87, 88, 89, 91, 93, 95, 96, 98, 100, 101, 102, 104,
+ 106, 108, 110, 112, 114, 116, 118, 122, 124, 126, 128, 130, 132, 134, 136,
+ 138, 140, 143, 145, 148, 151, 154, 157,
+};
+
+static const int ac_qlookup[QINDEX_RANGE] = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60, 62, 64, 66, 68,
+ 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98,
+ 100, 102, 104, 106, 108, 110, 112, 114, 116, 119, 122, 125, 128, 131, 134,
+ 137, 140, 143, 146, 149, 152, 155, 158, 161, 164, 167, 170, 173, 177, 181,
+ 185, 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 234, 239, 245,
+ 249, 254, 259, 264, 269, 274, 279, 284,
+};
+
+int vp8_dc_quant(int QIndex, int Delta) {
+ int retval;
+
+ QIndex = QIndex + Delta;
+
+ if (QIndex > 127) {
+ QIndex = 127;
+ } else if (QIndex < 0) {
+ QIndex = 0;
+ }
+
+ retval = dc_qlookup[QIndex];
+ return retval;
+}
+
+int vp8_dc2quant(int QIndex, int Delta) {
+ int retval;
+
+ QIndex = QIndex + Delta;
+
+ if (QIndex > 127) {
+ QIndex = 127;
+ } else if (QIndex < 0) {
+ QIndex = 0;
+ }
+
+ retval = dc_qlookup[QIndex] * 2;
+ return retval;
+}
+int vp8_dc_uv_quant(int QIndex, int Delta) {
+ int retval;
+
+ QIndex = QIndex + Delta;
+
+ if (QIndex > 127) {
+ QIndex = 127;
+ } else if (QIndex < 0) {
+ QIndex = 0;
+ }
+
+ retval = dc_qlookup[QIndex];
+
+ if (retval > 132) retval = 132;
+
+ return retval;
+}
+
+int vp8_ac_yquant(int QIndex) {
+ int retval;
+
+ if (QIndex > 127) {
+ QIndex = 127;
+ } else if (QIndex < 0) {
+ QIndex = 0;
+ }
+
+ retval = ac_qlookup[QIndex];
+ return retval;
+}
+
+int vp8_ac2quant(int QIndex, int Delta) {
+ int retval;
+
+ QIndex = QIndex + Delta;
+
+ if (QIndex > 127) {
+ QIndex = 127;
+ } else if (QIndex < 0) {
+ QIndex = 0;
+ }
+
+ /* For all x in [0..284], x*155/100 is bitwise equal to (x*101581) >> 16.
+ * The smallest precision for that is '(x*6349) >> 12' but 16 is a good
+ * word size. */
+ retval = (ac_qlookup[QIndex] * 101581) >> 16;
+
+ if (retval < 8) retval = 8;
+
+ return retval;
+}
+int vp8_ac_uv_quant(int QIndex, int Delta) {
+ int retval;
+
+ QIndex = QIndex + Delta;
+
+ if (QIndex > 127) {
+ QIndex = 127;
+ } else if (QIndex < 0) {
+ QIndex = 0;
+ }
+
+ retval = ac_qlookup[QIndex];
+ return retval;
+}
diff --git a/media/libvpx/libvpx/vp8/common/quant_common.h b/media/libvpx/libvpx/vp8/common/quant_common.h
new file mode 100644
index 0000000000..049840a272
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/quant_common.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_QUANT_COMMON_H_
+#define VPX_VP8_COMMON_QUANT_COMMON_H_
+
+#include "string.h"
+#include "blockd.h"
+#include "onyxc_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int vp8_ac_yquant(int QIndex);
+extern int vp8_dc_quant(int QIndex, int Delta);
+extern int vp8_dc2quant(int QIndex, int Delta);
+extern int vp8_ac2quant(int QIndex, int Delta);
+extern int vp8_dc_uv_quant(int QIndex, int Delta);
+extern int vp8_ac_uv_quant(int QIndex, int Delta);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_QUANT_COMMON_H_
diff --git a/media/libvpx/libvpx/vp8/common/reconinter.c b/media/libvpx/libvpx/vp8/common/reconinter.c
new file mode 100644
index 0000000000..2cb0709318
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/reconinter.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <string.h>
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx/vpx_integer.h"
+#include "blockd.h"
+#include "reconinter.h"
+#if CONFIG_RUNTIME_CPU_DETECT
+#include "onyxc_int.h"
+#endif
+
+void vp8_copy_mem16x16_c(unsigned char *src, int src_stride, unsigned char *dst,
+ int dst_stride) {
+ int r;
+
+ for (r = 0; r < 16; ++r) {
+ memcpy(dst, src, 16);
+
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp8_copy_mem8x8_c(unsigned char *src, int src_stride, unsigned char *dst,
+ int dst_stride) {
+ int r;
+
+ for (r = 0; r < 8; ++r) {
+ memcpy(dst, src, 8);
+
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp8_copy_mem8x4_c(unsigned char *src, int src_stride, unsigned char *dst,
+ int dst_stride) {
+ int r;
+
+ for (r = 0; r < 4; ++r) {
+ memcpy(dst, src, 8);
+
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
+ int pre_stride, vp8_subpix_fn_t sppf) {
+ int r;
+ unsigned char *pred_ptr = d->predictor;
+ unsigned char *ptr;
+ ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
+ (d->bmi.mv.as_mv.col >> 3);
+
+ if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
+ sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7,
+ pred_ptr, pitch);
+ } else {
+ for (r = 0; r < 4; ++r) {
+ pred_ptr[0] = ptr[0];
+ pred_ptr[1] = ptr[1];
+ pred_ptr[2] = ptr[2];
+ pred_ptr[3] = ptr[3];
+ pred_ptr += pitch;
+ ptr += pre_stride;
+ }
+ }
+}
+
+static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d,
+ unsigned char *dst, int dst_stride,
+ unsigned char *base_pre, int pre_stride) {
+ unsigned char *ptr;
+ ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
+ (d->bmi.mv.as_mv.col >> 3);
+
+ if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
+ x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7,
+ d->bmi.mv.as_mv.row & 7, dst, dst_stride);
+ } else {
+ vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride);
+ }
+}
+
+static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d,
+ unsigned char *dst, int dst_stride,
+ unsigned char *base_pre, int pre_stride) {
+ unsigned char *ptr;
+ ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
+ (d->bmi.mv.as_mv.col >> 3);
+
+ if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
+ x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7,
+ d->bmi.mv.as_mv.row & 7, dst, dst_stride);
+ } else {
+ vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride);
+ }
+}
+
+static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst,
+ int dst_stride, unsigned char *base_pre,
+ int pre_stride, vp8_subpix_fn_t sppf) {
+ int r;
+ unsigned char *ptr;
+ ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
+ (d->bmi.mv.as_mv.col >> 3);
+
+ if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
+ sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst,
+ dst_stride);
+ } else {
+ for (r = 0; r < 4; ++r) {
+ dst[0] = ptr[0];
+ dst[1] = ptr[1];
+ dst[2] = ptr[2];
+ dst[3] = ptr[3];
+ dst += dst_stride;
+ ptr += pre_stride;
+ }
+ }
+}
+
+/*encoder only*/
+void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x) {
+ unsigned char *uptr, *vptr;
+ unsigned char *upred_ptr = &x->predictor[256];
+ unsigned char *vpred_ptr = &x->predictor[320];
+
+ int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
+ int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
+ int offset;
+ int pre_stride = x->pre.uv_stride;
+
+ /* calc uv motion vectors */
+ mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1));
+ mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1));
+ mv_row /= 2;
+ mv_col /= 2;
+ mv_row &= x->fullpixel_mask;
+ mv_col &= x->fullpixel_mask;
+
+ offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
+ uptr = x->pre.u_buffer + offset;
+ vptr = x->pre.v_buffer + offset;
+
+ if ((mv_row | mv_col) & 7) {
+ x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr,
+ 8);
+ x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr,
+ 8);
+ } else {
+ vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8);
+ vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8);
+ }
+}
+
+/*encoder only*/
+void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) {
+ int i, j;
+ int pre_stride = x->pre.uv_stride;
+ unsigned char *base_pre;
+
+ /* build uv mvs */
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 2; ++j) {
+ int yoffset = i * 8 + j * 2;
+ int uoffset = 16 + i * 2 + j;
+ int voffset = 20 + i * 2 + j;
+
+ int temp;
+
+ temp = x->block[yoffset].bmi.mv.as_mv.row +
+ x->block[yoffset + 1].bmi.mv.as_mv.row +
+ x->block[yoffset + 4].bmi.mv.as_mv.row +
+ x->block[yoffset + 5].bmi.mv.as_mv.row;
+
+ temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
+
+ x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
+
+ temp = x->block[yoffset].bmi.mv.as_mv.col +
+ x->block[yoffset + 1].bmi.mv.as_mv.col +
+ x->block[yoffset + 4].bmi.mv.as_mv.col +
+ x->block[yoffset + 5].bmi.mv.as_mv.col;
+
+ temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
+
+ x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
+
+ x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
+ }
+ }
+
+ base_pre = x->pre.u_buffer;
+ for (i = 16; i < 20; i += 2) {
+ BLOCKD *d0 = &x->block[i];
+ BLOCKD *d1 = &x->block[i + 1];
+
+ if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
+ build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
+ } else {
+ vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride,
+ x->subpixel_predict);
+ vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride,
+ x->subpixel_predict);
+ }
+ }
+
+ base_pre = x->pre.v_buffer;
+ for (i = 20; i < 24; i += 2) {
+ BLOCKD *d0 = &x->block[i];
+ BLOCKD *d1 = &x->block[i + 1];
+
+ if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
+ build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
+ } else {
+ vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride,
+ x->subpixel_predict);
+ vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride,
+ x->subpixel_predict);
+ }
+ }
+}
+
+/*encoder only*/
+void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *dst_y,
+ int dst_ystride) {
+ unsigned char *ptr_base;
+ unsigned char *ptr;
+ int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
+ int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
+ int pre_stride = x->pre.y_stride;
+
+ ptr_base = x->pre.y_buffer;
+ ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
+
+ if ((mv_row | mv_col) & 7) {
+ x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y,
+ dst_ystride);
+ } else {
+ vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
+ }
+}
+
+static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
+ /* If the MV points so far into the UMV border that no visible pixels
+ * are used for reconstruction, the subpel part of the MV can be
+ * discarded and the MV limited to 16 pixels with equivalent results.
+ *
+ * This limit kicks in at 19 pixels for the top and left edges, for
+ * the 16 pixels plus 3 taps right of the central pixel when subpel
+ * filtering. The bottom and right edges use 16 pixels plus 2 pixels
+ * left of the central pixel when filtering.
+ */
+ if (mv->col < (xd->mb_to_left_edge - (19 << 3))) {
+ mv->col = xd->mb_to_left_edge - (16 << 3);
+ } else if (mv->col > xd->mb_to_right_edge + (18 << 3)) {
+ mv->col = xd->mb_to_right_edge + (16 << 3);
+ }
+
+ if (mv->row < (xd->mb_to_top_edge - (19 << 3))) {
+ mv->row = xd->mb_to_top_edge - (16 << 3);
+ } else if (mv->row > xd->mb_to_bottom_edge + (18 << 3)) {
+ mv->row = xd->mb_to_bottom_edge + (16 << 3);
+ }
+}
+
+/* A version of the above function for chroma block MVs.*/
+static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
+ mv->col = (2 * mv->col < (xd->mb_to_left_edge - (19 << 3)))
+ ? (xd->mb_to_left_edge - (16 << 3)) >> 1
+ : mv->col;
+ mv->col = (2 * mv->col > xd->mb_to_right_edge + (18 << 3))
+ ? (xd->mb_to_right_edge + (16 << 3)) >> 1
+ : mv->col;
+
+ mv->row = (2 * mv->row < (xd->mb_to_top_edge - (19 << 3)))
+ ? (xd->mb_to_top_edge - (16 << 3)) >> 1
+ : mv->row;
+ mv->row = (2 * mv->row > xd->mb_to_bottom_edge + (18 << 3))
+ ? (xd->mb_to_bottom_edge + (16 << 3)) >> 1
+ : mv->row;
+}
+
+void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, unsigned char *dst_y,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int dst_ystride,
+ int dst_uvstride) {
+ int offset;
+ unsigned char *ptr;
+ unsigned char *uptr, *vptr;
+
+ int_mv _16x16mv;
+
+ unsigned char *ptr_base = x->pre.y_buffer;
+ int pre_stride = x->pre.y_stride;
+
+ _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
+
+ if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
+ clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
+ }
+
+ ptr = ptr_base + (_16x16mv.as_mv.row >> 3) * pre_stride +
+ (_16x16mv.as_mv.col >> 3);
+
+ if (_16x16mv.as_int & 0x00070007) {
+ x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7,
+ _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
+ } else {
+ vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
+ }
+
+ /* calc uv motion vectors */
+ _16x16mv.as_mv.row +=
+ 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1));
+ _16x16mv.as_mv.col +=
+ 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1));
+ _16x16mv.as_mv.row /= 2;
+ _16x16mv.as_mv.col /= 2;
+ _16x16mv.as_mv.row &= x->fullpixel_mask;
+ _16x16mv.as_mv.col &= x->fullpixel_mask;
+
+ if (2 * _16x16mv.as_mv.col < (x->mb_to_left_edge - (19 << 3)) ||
+ 2 * _16x16mv.as_mv.col > x->mb_to_right_edge + (18 << 3) ||
+ 2 * _16x16mv.as_mv.row < (x->mb_to_top_edge - (19 << 3)) ||
+ 2 * _16x16mv.as_mv.row > x->mb_to_bottom_edge + (18 << 3)) {
+ return;
+ }
+
+ pre_stride >>= 1;
+ offset = (_16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
+ uptr = x->pre.u_buffer + offset;
+ vptr = x->pre.v_buffer + offset;
+
+ if (_16x16mv.as_int & 0x00070007) {
+ x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7,
+ _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
+ x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7,
+ _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
+ } else {
+ vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
+ vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
+ }
+}
+
+static void build_inter4x4_predictors_mb(MACROBLOCKD *x) {
+ int i;
+ unsigned char *base_dst = x->dst.y_buffer;
+ unsigned char *base_pre = x->pre.y_buffer;
+
+ if (x->mode_info_context->mbmi.partitioning < 3) {
+ BLOCKD *b;
+ int dst_stride = x->dst.y_stride;
+
+ x->block[0].bmi = x->mode_info_context->bmi[0];
+ x->block[2].bmi = x->mode_info_context->bmi[2];
+ x->block[8].bmi = x->mode_info_context->bmi[8];
+ x->block[10].bmi = x->mode_info_context->bmi[10];
+ if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
+ clamp_mv_to_umv_border(&x->block[0].bmi.mv.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[2].bmi.mv.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[8].bmi.mv.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
+ }
+
+ b = &x->block[0];
+ build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
+ dst_stride);
+ b = &x->block[2];
+ build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
+ dst_stride);
+ b = &x->block[8];
+ build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
+ dst_stride);
+ b = &x->block[10];
+ build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
+ dst_stride);
+ } else {
+ for (i = 0; i < 16; i += 2) {
+ BLOCKD *d0 = &x->block[i];
+ BLOCKD *d1 = &x->block[i + 1];
+ int dst_stride = x->dst.y_stride;
+
+ x->block[i + 0].bmi = x->mode_info_context->bmi[i + 0];
+ x->block[i + 1].bmi = x->mode_info_context->bmi[i + 1];
+ if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
+ clamp_mv_to_umv_border(&x->block[i + 0].bmi.mv.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[i + 1].bmi.mv.as_mv, x);
+ }
+
+ if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
+ build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
+ base_pre, dst_stride);
+ } else {
+ build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride,
+ base_pre, dst_stride, x->subpixel_predict);
+ build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride,
+ base_pre, dst_stride, x->subpixel_predict);
+ }
+ }
+ }
+ base_dst = x->dst.u_buffer;
+ base_pre = x->pre.u_buffer;
+ for (i = 16; i < 20; i += 2) {
+ BLOCKD *d0 = &x->block[i];
+ BLOCKD *d1 = &x->block[i + 1];
+ int dst_stride = x->dst.uv_stride;
+
+ /* Note: uv mvs already clamped in build_4x4uvmvs() */
+
+ if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
+ build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
+ base_pre, dst_stride);
+ } else {
+ build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre,
+ dst_stride, x->subpixel_predict);
+ build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre,
+ dst_stride, x->subpixel_predict);
+ }
+ }
+
+ base_dst = x->dst.v_buffer;
+ base_pre = x->pre.v_buffer;
+ for (i = 20; i < 24; i += 2) {
+ BLOCKD *d0 = &x->block[i];
+ BLOCKD *d1 = &x->block[i + 1];
+ int dst_stride = x->dst.uv_stride;
+
+ /* Note: uv mvs already clamped in build_4x4uvmvs() */
+
+ if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
+ build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
+ base_pre, dst_stride);
+ } else {
+ build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre,
+ dst_stride, x->subpixel_predict);
+ build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre,
+ dst_stride, x->subpixel_predict);
+ }
+ }
+}
+
+static void build_4x4uvmvs(MACROBLOCKD *x) {
+ int i, j;
+
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 2; ++j) {
+ int yoffset = i * 8 + j * 2;
+ int uoffset = 16 + i * 2 + j;
+ int voffset = 20 + i * 2 + j;
+
+ int temp;
+
+ temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row +
+ x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row +
+ x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row +
+ x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
+
+ temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
+
+ x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
+
+ temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col +
+ x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col +
+ x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col +
+ x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
+
+ temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
+
+ x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
+
+ if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
+ clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
+ }
+
+ x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
+ }
+ }
+}
+
+void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) {
+ if (xd->mode_info_context->mbmi.mode != SPLITMV) {
+ vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer,
+ xd->dst.v_buffer, xd->dst.y_stride,
+ xd->dst.uv_stride);
+ } else {
+ build_4x4uvmvs(xd);
+ build_inter4x4_predictors_mb(xd);
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/reconinter.h b/media/libvpx/libvpx/vp8/common/reconinter.h
new file mode 100644
index 0000000000..974e7ce754
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/reconinter.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_RECONINTER_H_
+#define VPX_VP8_COMMON_RECONINTER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_build_inter_predictors_mb(MACROBLOCKD *xd);
+void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, unsigned char *dst_y,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int dst_ystride,
+ int dst_uvstride);
+
+void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *dst_y,
+ int dst_ystride);
+void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
+ int pre_stride, vp8_subpix_fn_t sppf);
+
+void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x);
+void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_RECONINTER_H_
diff --git a/media/libvpx/libvpx/vp8/common/reconintra.c b/media/libvpx/libvpx/vp8/common/reconintra.c
new file mode 100644
index 0000000000..8e2094da87
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/reconintra.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vp8_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/vpx_once.h"
+#include "blockd.h"
+#include "vp8/common/reconintra.h"
+#include "vp8/common/reconintra4x4.h"
+
+enum {
+ SIZE_16,
+ SIZE_8,
+ NUM_SIZES,
+};
+
+typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left);
+
+static intra_pred_fn pred[4][NUM_SIZES];
+static intra_pred_fn dc_pred[2][2][NUM_SIZES];
+
+static void vp8_init_intra_predictors_internal(void) {
+#define INIT_SIZE(sz) \
+ pred[V_PRED][SIZE_##sz] = vpx_v_predictor_##sz##x##sz; \
+ pred[H_PRED][SIZE_##sz] = vpx_h_predictor_##sz##x##sz; \
+ pred[TM_PRED][SIZE_##sz] = vpx_tm_predictor_##sz##x##sz; \
+ \
+ dc_pred[0][0][SIZE_##sz] = vpx_dc_128_predictor_##sz##x##sz; \
+ dc_pred[0][1][SIZE_##sz] = vpx_dc_top_predictor_##sz##x##sz; \
+ dc_pred[1][0][SIZE_##sz] = vpx_dc_left_predictor_##sz##x##sz; \
+ dc_pred[1][1][SIZE_##sz] = vpx_dc_predictor_##sz##x##sz
+
+ INIT_SIZE(16);
+ INIT_SIZE(8);
+ vp8_init_intra4x4_predictors_internal();
+}
+
+void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x, unsigned char *yabove_row,
+ unsigned char *yleft, int left_stride,
+ unsigned char *ypred_ptr, int y_stride) {
+ MB_PREDICTION_MODE mode = x->mode_info_context->mbmi.mode;
+ DECLARE_ALIGNED(16, uint8_t, yleft_col[16]);
+ int i;
+ intra_pred_fn fn;
+
+ for (i = 0; i < 16; ++i) {
+ yleft_col[i] = yleft[i * left_stride];
+ }
+
+ if (mode == DC_PRED) {
+ fn = dc_pred[x->left_available][x->up_available][SIZE_16];
+ } else {
+ fn = pred[mode][SIZE_16];
+ }
+
+ fn(ypred_ptr, y_stride, yabove_row, yleft_col);
+}
+
+void vp8_build_intra_predictors_mbuv_s(
+ MACROBLOCKD *x, unsigned char *uabove_row, unsigned char *vabove_row,
+ unsigned char *uleft, unsigned char *vleft, int left_stride,
+ unsigned char *upred_ptr, unsigned char *vpred_ptr, int pred_stride) {
+ MB_PREDICTION_MODE uvmode = x->mode_info_context->mbmi.uv_mode;
+#if HAVE_VSX
+ /* Power PC implementation uses "vec_vsx_ld" to read 16 bytes from
+ uleft_col and vleft_col. Play it safe by reserving enough stack
+ space here. */
+ unsigned char uleft_col[16];
+ unsigned char vleft_col[16];
+#else
+ unsigned char uleft_col[8];
+ unsigned char vleft_col[8];
+#endif
+ int i;
+ intra_pred_fn fn;
+
+ for (i = 0; i < 8; ++i) {
+ uleft_col[i] = uleft[i * left_stride];
+ vleft_col[i] = vleft[i * left_stride];
+ }
+
+ if (uvmode == DC_PRED) {
+ fn = dc_pred[x->left_available][x->up_available][SIZE_8];
+ } else {
+ fn = pred[uvmode][SIZE_8];
+ }
+
+ fn(upred_ptr, pred_stride, uabove_row, uleft_col);
+ fn(vpred_ptr, pred_stride, vabove_row, vleft_col);
+}
+
+void vp8_init_intra_predictors(void) {
+ once(vp8_init_intra_predictors_internal);
+}
diff --git a/media/libvpx/libvpx/vp8/common/reconintra.h b/media/libvpx/libvpx/vp8/common/reconintra.h
new file mode 100644
index 0000000000..029ac00a24
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/reconintra.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_RECONINTRA_H_
+#define VPX_VP8_COMMON_RECONINTRA_H_
+
+#include "vp8/common/blockd.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x, unsigned char *yabove_row,
+ unsigned char *yleft, int left_stride,
+ unsigned char *ypred_ptr, int y_stride);
+
+void vp8_build_intra_predictors_mbuv_s(
+ MACROBLOCKD *x, unsigned char *uabove_row, unsigned char *vabove_row,
+ unsigned char *uleft, unsigned char *vleft, int left_stride,
+ unsigned char *upred_ptr, unsigned char *vpred_ptr, int pred_stride);
+
+void vp8_init_intra_predictors(void);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_RECONINTRA_H_
diff --git a/media/libvpx/libvpx/vp8/common/reconintra4x4.c b/media/libvpx/libvpx/vp8/common/reconintra4x4.c
new file mode 100644
index 0000000000..be936df5e0
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/reconintra4x4.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "vp8_rtcd.h"
+#include "blockd.h"
+#include "reconintra4x4.h"
+#include "vp8/common/common.h"
+#include "vpx_ports/compiler_attributes.h"
+
+typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left);
+
+static intra_pred_fn pred[10];
+
+void vp8_init_intra4x4_predictors_internal(void) {
+ pred[B_DC_PRED] = vpx_dc_predictor_4x4;
+ pred[B_TM_PRED] = vpx_tm_predictor_4x4;
+ pred[B_VE_PRED] = vpx_ve_predictor_4x4;
+ pred[B_HE_PRED] = vpx_he_predictor_4x4;
+ pred[B_LD_PRED] = vpx_d45e_predictor_4x4;
+ pred[B_RD_PRED] = vpx_d135_predictor_4x4;
+ pred[B_VR_PRED] = vpx_d117_predictor_4x4;
+ pred[B_VL_PRED] = vpx_d63e_predictor_4x4;
+ pred[B_HD_PRED] = vpx_d153_predictor_4x4;
+ pred[B_HU_PRED] = vpx_d207_predictor_4x4;
+}
+
+void vp8_intra4x4_predict(unsigned char *above, unsigned char *yleft,
+ int left_stride, B_PREDICTION_MODE b_mode,
+ unsigned char *dst, int dst_stride,
+ unsigned char top_left) {
+/* Power PC implementation uses "vec_vsx_ld" to read 16 bytes from
+ Above (aka, Aboveb + 4). Play it safe by reserving enough stack
+ space here. Similary for "Left". */
+#if HAVE_VSX
+ unsigned char Aboveb[20];
+#else
+ unsigned char Aboveb[12];
+#endif
+ unsigned char *Above = Aboveb + 4;
+#if HAVE_NEON
+ // Neon intrinsics are unable to load 32 bits, or 4 8 bit values. Instead, it
+ // over reads but does not use the extra 4 values.
+ unsigned char Left[8];
+#if VPX_WITH_ASAN
+ // Silence an 'uninitialized read' warning. Although uninitialized values are
+ // indeed read, they are not used.
+ vp8_zero_array(Left, 8);
+#endif // VPX_WITH_ASAN
+#elif HAVE_VSX
+ unsigned char Left[16];
+#else
+ unsigned char Left[4];
+#endif // HAVE_NEON
+
+ Left[0] = yleft[0];
+ Left[1] = yleft[left_stride];
+ Left[2] = yleft[2 * left_stride];
+ Left[3] = yleft[3 * left_stride];
+ memcpy(Above, above, 8);
+ Above[-1] = top_left;
+
+ pred[b_mode](dst, dst_stride, Above, Left);
+}
diff --git a/media/libvpx/libvpx/vp8/common/reconintra4x4.h b/media/libvpx/libvpx/vp8/common/reconintra4x4.h
new file mode 100644
index 0000000000..3618ec5cbe
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/reconintra4x4.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_RECONINTRA4X4_H_
+#define VPX_VP8_COMMON_RECONINTRA4X4_H_
+#include "vp8/common/blockd.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static INLINE void intra_prediction_down_copy(MACROBLOCKD *xd,
+ unsigned char *above_right_src) {
+ int dst_stride = xd->dst.y_stride;
+ unsigned char *above_right_dst = xd->dst.y_buffer - dst_stride + 16;
+
+ unsigned int *src_ptr = (unsigned int *)above_right_src;
+ unsigned int *dst_ptr0 = (unsigned int *)(above_right_dst + 4 * dst_stride);
+ unsigned int *dst_ptr1 = (unsigned int *)(above_right_dst + 8 * dst_stride);
+ unsigned int *dst_ptr2 = (unsigned int *)(above_right_dst + 12 * dst_stride);
+
+ *dst_ptr0 = *src_ptr;
+ *dst_ptr1 = *src_ptr;
+ *dst_ptr2 = *src_ptr;
+}
+
+void vp8_intra4x4_predict(unsigned char *above, unsigned char *yleft,
+ int left_stride, B_PREDICTION_MODE b_mode,
+ unsigned char *dst, int dst_stride,
+ unsigned char top_left);
+
+void vp8_init_intra4x4_predictors_internal(void);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_RECONINTRA4X4_H_
diff --git a/media/libvpx/libvpx/vp8/common/rtcd.c b/media/libvpx/libvpx/vp8/common/rtcd.c
new file mode 100644
index 0000000000..09a0e2b4b3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/rtcd.c
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "./vpx_config.h"
+#define RTCD_C
+#include "./vp8_rtcd.h"
+#include "vpx_ports/vpx_once.h"
+
+void vp8_rtcd() { once(setup_rtcd_internal); }
diff --git a/media/libvpx/libvpx/vp8/common/rtcd_defs.pl b/media/libvpx/libvpx/vp8/common/rtcd_defs.pl
new file mode 100644
index 0000000000..739a612847
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/rtcd_defs.pl
@@ -0,0 +1,250 @@
+##
+## Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+sub vp8_common_forward_decls() {
+print <<EOF
+/*
+ * VP8
+ */
+
+struct blockd;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct block;
+struct macroblock;
+struct variance_vtable;
+union int_mv;
+struct yv12_buffer_config;
+EOF
+}
+forward_decls qw/vp8_common_forward_decls/;
+
+#
+# Dequant
+#
+add_proto qw/void vp8_dequantize_b/, "struct blockd*, short *DQC";
+specialize qw/vp8_dequantize_b mmx neon msa mmi/;
+
+add_proto qw/void vp8_dequant_idct_add/, "short *input, short *dq, unsigned char *dest, int stride";
+specialize qw/vp8_dequant_idct_add mmx neon dspr2 msa mmi/;
+
+add_proto qw/void vp8_dequant_idct_add_y_block/, "short *q, short *dq, unsigned char *dst, int stride, char *eobs";
+specialize qw/vp8_dequant_idct_add_y_block sse2 neon dspr2 msa mmi lsx/;
+
+add_proto qw/void vp8_dequant_idct_add_uv_block/, "short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs";
+specialize qw/vp8_dequant_idct_add_uv_block sse2 neon dspr2 msa mmi lsx/;
+
+#
+# Loopfilter
+#
+add_proto qw/void vp8_loop_filter_mbv/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi";
+specialize qw/vp8_loop_filter_mbv sse2 neon dspr2 msa mmi lsx/;
+
+add_proto qw/void vp8_loop_filter_bv/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi";
+specialize qw/vp8_loop_filter_bv sse2 neon dspr2 msa mmi lsx/;
+
+add_proto qw/void vp8_loop_filter_mbh/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi";
+specialize qw/vp8_loop_filter_mbh sse2 neon dspr2 msa mmi lsx/;
+
+add_proto qw/void vp8_loop_filter_bh/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi";
+specialize qw/vp8_loop_filter_bh sse2 neon dspr2 msa mmi lsx/;
+
+
+add_proto qw/void vp8_loop_filter_simple_mbv/, "unsigned char *y_ptr, int y_stride, const unsigned char *blimit";
+specialize qw/vp8_loop_filter_simple_mbv sse2 neon msa mmi/;
+$vp8_loop_filter_simple_mbv_c=vp8_loop_filter_simple_vertical_edge_c;
+$vp8_loop_filter_simple_mbv_sse2=vp8_loop_filter_simple_vertical_edge_sse2;
+$vp8_loop_filter_simple_mbv_neon=vp8_loop_filter_mbvs_neon;
+$vp8_loop_filter_simple_mbv_msa=vp8_loop_filter_simple_vertical_edge_msa;
+$vp8_loop_filter_simple_mbv_mmi=vp8_loop_filter_simple_vertical_edge_mmi;
+
+add_proto qw/void vp8_loop_filter_simple_mbh/, "unsigned char *y_ptr, int y_stride, const unsigned char *blimit";
+specialize qw/vp8_loop_filter_simple_mbh sse2 neon msa mmi/;
+$vp8_loop_filter_simple_mbh_c=vp8_loop_filter_simple_horizontal_edge_c;
+$vp8_loop_filter_simple_mbh_sse2=vp8_loop_filter_simple_horizontal_edge_sse2;
+$vp8_loop_filter_simple_mbh_neon=vp8_loop_filter_mbhs_neon;
+$vp8_loop_filter_simple_mbh_msa=vp8_loop_filter_simple_horizontal_edge_msa;
+$vp8_loop_filter_simple_mbh_mmi=vp8_loop_filter_simple_horizontal_edge_mmi;
+
+add_proto qw/void vp8_loop_filter_simple_bv/, "unsigned char *y_ptr, int y_stride, const unsigned char *blimit";
+specialize qw/vp8_loop_filter_simple_bv sse2 neon msa mmi/;
+$vp8_loop_filter_simple_bv_c=vp8_loop_filter_bvs_c;
+$vp8_loop_filter_simple_bv_sse2=vp8_loop_filter_bvs_sse2;
+$vp8_loop_filter_simple_bv_neon=vp8_loop_filter_bvs_neon;
+$vp8_loop_filter_simple_bv_msa=vp8_loop_filter_bvs_msa;
+$vp8_loop_filter_simple_bv_mmi=vp8_loop_filter_bvs_mmi;
+
+add_proto qw/void vp8_loop_filter_simple_bh/, "unsigned char *y_ptr, int y_stride, const unsigned char *blimit";
+specialize qw/vp8_loop_filter_simple_bh sse2 neon msa mmi/;
+$vp8_loop_filter_simple_bh_c=vp8_loop_filter_bhs_c;
+$vp8_loop_filter_simple_bh_sse2=vp8_loop_filter_bhs_sse2;
+$vp8_loop_filter_simple_bh_neon=vp8_loop_filter_bhs_neon;
+$vp8_loop_filter_simple_bh_msa=vp8_loop_filter_bhs_msa;
+$vp8_loop_filter_simple_bh_mmi=vp8_loop_filter_bhs_mmi;
+
+#
+# IDCT
+#
+#idct16
+add_proto qw/void vp8_short_idct4x4llm/, "short *input, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, int dst_stride";
+specialize qw/vp8_short_idct4x4llm mmx neon dspr2 msa mmi/;
+
+#iwalsh1
+add_proto qw/void vp8_short_inv_walsh4x4_1/, "short *input, short *mb_dqcoeff";
+specialize qw/vp8_short_inv_walsh4x4_1 dspr2/;
+
+#iwalsh16
+add_proto qw/void vp8_short_inv_walsh4x4/, "short *input, short *mb_dqcoeff";
+specialize qw/vp8_short_inv_walsh4x4 sse2 neon dspr2 msa mmi/;
+
+#idct1_scalar_add
+add_proto qw/void vp8_dc_only_idct_add/, "short input_dc, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, int dst_stride";
+specialize qw/vp8_dc_only_idct_add mmx neon dspr2 msa mmi lsx/;
+
+#
+# RECON
+#
+add_proto qw/void vp8_copy_mem16x16/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride";
+specialize qw/vp8_copy_mem16x16 sse2 neon dspr2 msa mmi/;
+
+add_proto qw/void vp8_copy_mem8x8/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride";
+specialize qw/vp8_copy_mem8x8 mmx neon dspr2 msa mmi/;
+
+add_proto qw/void vp8_copy_mem8x4/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride";
+specialize qw/vp8_copy_mem8x4 mmx neon dspr2 msa mmi/;
+
+#
+# Postproc
+#
+if (vpx_config("CONFIG_POSTPROC") eq "yes") {
+
+ add_proto qw/void vp8_blend_mb_inner/, "unsigned char *y, unsigned char *u, unsigned char *v, int y_1, int u_1, int v_1, int alpha, int stride";
+
+ add_proto qw/void vp8_blend_mb_outer/, "unsigned char *y, unsigned char *u, unsigned char *v, int y_1, int u_1, int v_1, int alpha, int stride";
+
+ add_proto qw/void vp8_blend_b/, "unsigned char *y, unsigned char *u, unsigned char *v, int y_1, int u_1, int v_1, int alpha, int stride";
+
+ add_proto qw/void vp8_filter_by_weight16x16/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, int src_weight";
+ specialize qw/vp8_filter_by_weight16x16 sse2 msa/;
+
+ add_proto qw/void vp8_filter_by_weight8x8/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, int src_weight";
+ specialize qw/vp8_filter_by_weight8x8 sse2 msa/;
+
+ add_proto qw/void vp8_filter_by_weight4x4/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, int src_weight";
+}
+
+#
+# Subpixel
+#
+add_proto qw/void vp8_sixtap_predict16x16/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_sixtap_predict16x16 sse2 ssse3 neon dspr2 msa mmi lsx/;
+
+add_proto qw/void vp8_sixtap_predict8x8/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_sixtap_predict8x8 sse2 ssse3 neon dspr2 msa mmi lsx/;
+
+add_proto qw/void vp8_sixtap_predict8x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_sixtap_predict8x4 sse2 ssse3 neon dspr2 msa mmi/;
+
+add_proto qw/void vp8_sixtap_predict4x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_sixtap_predict4x4 mmx ssse3 neon dspr2 msa mmi lsx/;
+
+add_proto qw/void vp8_bilinear_predict16x16/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_bilinear_predict16x16 sse2 ssse3 neon msa/;
+
+add_proto qw/void vp8_bilinear_predict8x8/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_bilinear_predict8x8 sse2 ssse3 neon msa/;
+
+add_proto qw/void vp8_bilinear_predict8x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_bilinear_predict8x4 sse2 neon msa/;
+
+add_proto qw/void vp8_bilinear_predict4x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_bilinear_predict4x4 sse2 neon msa/;
+
+#
+# Encoder functions below this point.
+#
+if (vpx_config("CONFIG_VP8_ENCODER") eq "yes") {
+
+#
+# Block copy
+#
+add_proto qw/void vp8_copy32xn/, "const unsigned char *src_ptr, int src_stride, unsigned char *dst_ptr, int dst_stride, int height";
+specialize qw/vp8_copy32xn sse2 sse3/;
+
+#
+# Forward DCT
+#
+add_proto qw/void vp8_short_fdct4x4/, "short *input, short *output, int pitch";
+specialize qw/vp8_short_fdct4x4 sse2 neon msa mmi lsx/;
+
+add_proto qw/void vp8_short_fdct8x4/, "short *input, short *output, int pitch";
+specialize qw/vp8_short_fdct8x4 sse2 neon msa mmi lsx/;
+
+add_proto qw/void vp8_short_walsh4x4/, "short *input, short *output, int pitch";
+specialize qw/vp8_short_walsh4x4 sse2 neon msa mmi/;
+
+#
+# Quantizer
+#
+add_proto qw/void vp8_regular_quantize_b/, "struct block *, struct blockd *";
+specialize qw/vp8_regular_quantize_b sse2 sse4_1 msa mmi lsx/;
+
+add_proto qw/void vp8_fast_quantize_b/, "struct block *, struct blockd *";
+specialize qw/vp8_fast_quantize_b sse2 ssse3 neon msa mmi/;
+
+#
+# Block subtraction
+#
+add_proto qw/int vp8_block_error/, "short *coeff, short *dqcoeff";
+specialize qw/vp8_block_error sse2 msa lsx/;
+
+add_proto qw/int vp8_mbblock_error/, "struct macroblock *mb, int dc";
+specialize qw/vp8_mbblock_error sse2 msa lsx/;
+
+add_proto qw/int vp8_mbuverror/, "struct macroblock *mb";
+specialize qw/vp8_mbuverror sse2 msa/;
+
+#
+# Motion search
+#
+add_proto qw/int vp8_refining_search_sad/, "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, int error_per_bit, int search_range, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv";
+specialize qw/vp8_refining_search_sad sse2 msa/;
+$vp8_refining_search_sad_sse2=vp8_refining_search_sadx4;
+$vp8_refining_search_sad_msa=vp8_refining_search_sadx4;
+
+add_proto qw/int vp8_diamond_search_sad/, "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv";
+specialize qw/vp8_diamond_search_sad sse2 msa lsx/;
+$vp8_diamond_search_sad_sse2=vp8_diamond_search_sadx4;
+$vp8_diamond_search_sad_msa=vp8_diamond_search_sadx4;
+$vp8_diamond_search_sad_lsx=vp8_diamond_search_sadx4;
+
+#
+# Alt-ref Noise Reduction (ARNR)
+#
+if (vpx_config("CONFIG_REALTIME_ONLY") ne "yes") {
+ add_proto qw/void vp8_temporal_filter_apply/, "unsigned char *frame1, unsigned int stride, unsigned char *frame2, unsigned int block_size, int strength, int filter_weight, unsigned int *accumulator, unsigned short *count";
+ specialize qw/vp8_temporal_filter_apply sse2 msa/;
+}
+
+#
+# Denoiser filter
+#
+if (vpx_config("CONFIG_TEMPORAL_DENOISING") eq "yes") {
+ add_proto qw/int vp8_denoiser_filter/, "unsigned char *mc_running_avg_y, int mc_avg_y_stride, unsigned char *running_avg_y, int avg_y_stride, unsigned char *sig, int sig_stride, unsigned int motion_magnitude, int increase_denoising";
+ specialize qw/vp8_denoiser_filter sse2 neon msa/;
+ add_proto qw/int vp8_denoiser_filter_uv/, "unsigned char *mc_running_avg, int mc_avg_stride, unsigned char *running_avg, int avg_stride, unsigned char *sig, int sig_stride, unsigned int motion_magnitude, int increase_denoising";
+ specialize qw/vp8_denoiser_filter_uv sse2 neon msa/;
+}
+
+# End of encoder only functions
+}
+1;
diff --git a/media/libvpx/libvpx/vp8/common/setupintrarecon.c b/media/libvpx/libvpx/vp8/common/setupintrarecon.c
new file mode 100644
index 0000000000..dc8a8aae96
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/setupintrarecon.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "setupintrarecon.h"
+#include "vpx_mem/vpx_mem.h"
+
+void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf) {
+ int i;
+
+ /* set up frame new frame for intra coded blocks */
+ memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
+ for (i = 0; i < ybf->y_height; ++i) {
+ ybf->y_buffer[ybf->y_stride * i - 1] = (unsigned char)129;
+ }
+
+ memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+ for (i = 0; i < ybf->uv_height; ++i) {
+ ybf->u_buffer[ybf->uv_stride * i - 1] = (unsigned char)129;
+ }
+
+ memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+ for (i = 0; i < ybf->uv_height; ++i) {
+ ybf->v_buffer[ybf->uv_stride * i - 1] = (unsigned char)129;
+ }
+}
+
+void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf) {
+ memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
+ memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+ memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+}
diff --git a/media/libvpx/libvpx/vp8/common/setupintrarecon.h b/media/libvpx/libvpx/vp8/common/setupintrarecon.h
new file mode 100644
index 0000000000..903a536aed
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/setupintrarecon.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_SETUPINTRARECON_H_
+#define VPX_VP8_COMMON_SETUPINTRARECON_H_
+
+#include "./vpx_config.h"
+#include "vpx_scale/yv12config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf);
+extern void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf);
+
+static INLINE void setup_intra_recon_left(unsigned char *y_buffer,
+ unsigned char *u_buffer,
+ unsigned char *v_buffer, int y_stride,
+ int uv_stride) {
+ int i;
+
+ for (i = 0; i < 16; ++i) y_buffer[y_stride * i] = (unsigned char)129;
+
+ for (i = 0; i < 8; ++i) u_buffer[uv_stride * i] = (unsigned char)129;
+
+ for (i = 0; i < 8; ++i) v_buffer[uv_stride * i] = (unsigned char)129;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_SETUPINTRARECON_H_
diff --git a/media/libvpx/libvpx/vp8/common/swapyv12buffer.c b/media/libvpx/libvpx/vp8/common/swapyv12buffer.c
new file mode 100644
index 0000000000..5ff21e94a8
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/swapyv12buffer.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "swapyv12buffer.h"
+
+void vp8_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame,
+ YV12_BUFFER_CONFIG *last_frame) {
+ unsigned char *temp;
+
+ temp = last_frame->buffer_alloc;
+ last_frame->buffer_alloc = new_frame->buffer_alloc;
+ new_frame->buffer_alloc = temp;
+
+ temp = last_frame->y_buffer;
+ last_frame->y_buffer = new_frame->y_buffer;
+ new_frame->y_buffer = temp;
+
+ temp = last_frame->u_buffer;
+ last_frame->u_buffer = new_frame->u_buffer;
+ new_frame->u_buffer = temp;
+
+ temp = last_frame->v_buffer;
+ last_frame->v_buffer = new_frame->v_buffer;
+ new_frame->v_buffer = temp;
+}
diff --git a/media/libvpx/libvpx/vp8/common/swapyv12buffer.h b/media/libvpx/libvpx/vp8/common/swapyv12buffer.h
new file mode 100644
index 0000000000..e37c471f63
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/swapyv12buffer.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_SWAPYV12BUFFER_H_
+#define VPX_VP8_COMMON_SWAPYV12BUFFER_H_
+
+#include "vpx_scale/yv12config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame,
+ YV12_BUFFER_CONFIG *last_frame);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_SWAPYV12BUFFER_H_
diff --git a/media/libvpx/libvpx/vp8/common/systemdependent.h b/media/libvpx/libvpx/vp8/common/systemdependent.h
new file mode 100644
index 0000000000..83a5513aae
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/systemdependent.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_SYSTEMDEPENDENT_H_
+#define VPX_VP8_COMMON_SYSTEMDEPENDENT_H_
+
+#include "vpx_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP8Common;
+void vp8_machine_specific_config(struct VP8Common *);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_SYSTEMDEPENDENT_H_
diff --git a/media/libvpx/libvpx/vp8/common/threading.h b/media/libvpx/libvpx/vp8/common/threading.h
new file mode 100644
index 0000000000..1cfb9fec51
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/threading.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_THREADING_H_
+#define VPX_VP8_COMMON_THREADING_H_
+
+#include "./vpx_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if CONFIG_OS_SUPPORT && CONFIG_MULTITHREAD
+
+/* Thread management macros */
+#if defined(_WIN32) && !HAVE_PTHREAD_H
+/* Win32 */
+#include <process.h>
+#include <windows.h>
+#if defined(__GNUC__) && \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
+#define THREAD_FUNCTION \
+ __attribute__((force_align_arg_pointer)) unsigned int __stdcall
+#else
+#define THREAD_FUNCTION unsigned int __stdcall
+#endif
+#define THREAD_FUNCTION_RETURN DWORD
+#define THREAD_SPECIFIC_INDEX DWORD
+#define pthread_t HANDLE
+#define pthread_attr_t DWORD
+#define pthread_detach(thread) \
+ if (thread != NULL) CloseHandle(thread)
+#define thread_sleep(nms) Sleep(nms)
+#define pthread_cancel(thread) terminate_thread(thread, 0)
+#define ts_key_create(ts_key, destructor) \
+ { ts_key = TlsAlloc(); };
+#define pthread_getspecific(ts_key) TlsGetValue(ts_key)
+#define pthread_setspecific(ts_key, value) TlsSetValue(ts_key, (void *)value)
+#define pthread_self() GetCurrentThreadId()
+
+#elif defined(__OS2__)
+/* OS/2 */
+#define INCL_DOS
+#include <os2.h>
+
+#include <stdlib.h>
+#define THREAD_FUNCTION void *
+#define THREAD_FUNCTION_RETURN void *
+#define THREAD_SPECIFIC_INDEX PULONG
+#define pthread_t TID
+#define pthread_attr_t ULONG
+#define pthread_detach(thread) 0
+#define thread_sleep(nms) DosSleep(nms)
+#define pthread_cancel(thread) DosKillThread(thread)
+#define ts_key_create(ts_key, destructor) \
+ DosAllocThreadLocalMemory(1, &(ts_key));
+#define pthread_getspecific(ts_key) ((void *)(*(ts_key)))
+#define pthread_setspecific(ts_key, value) (*(ts_key) = (ULONG)(value))
+#define pthread_self() _gettid()
+#else
+#ifdef __APPLE__
+#include <mach/mach_init.h>
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <time.h>
+#include <unistd.h>
+
+#else
+#include <semaphore.h>
+#endif
+
+#include <pthread.h>
+/* pthreads */
+/* Nearly everything is already defined */
+#define THREAD_FUNCTION void *
+#define THREAD_FUNCTION_RETURN void *
+#define THREAD_SPECIFIC_INDEX pthread_key_t
+#define ts_key_create(ts_key, destructor) \
+ pthread_key_create(&(ts_key), destructor);
+#endif
+
+/* Synchronization macros: Win32 and Pthreads */
+#if defined(_WIN32) && !HAVE_PTHREAD_H
+#define sem_t HANDLE
+#define pause(voidpara) __asm PAUSE
+#define sem_init(sem, sem_attr1, sem_init_value) \
+ (int)((*sem = CreateSemaphore(NULL, 0, 32768, NULL)) == NULL)
+#define sem_wait(sem) \
+ (int)(WAIT_OBJECT_0 != WaitForSingleObject(*sem, INFINITE))
+#define sem_post(sem) ReleaseSemaphore(*sem, 1, NULL)
+#define sem_destroy(sem) \
+ if (*sem) ((int)(CloseHandle(*sem)) == TRUE)
+#define thread_sleep(nms) Sleep(nms)
+
+#elif defined(__OS2__)
+typedef struct {
+ HEV event;
+ HMTX wait_mutex;
+ HMTX count_mutex;
+ int count;
+} sem_t;
+
+static inline int sem_init(sem_t *sem, int pshared, unsigned int value) {
+ DosCreateEventSem(NULL, &sem->event, pshared ? DC_SEM_SHARED : 0,
+ value > 0 ? TRUE : FALSE);
+ DosCreateMutexSem(NULL, &sem->wait_mutex, 0, FALSE);
+ DosCreateMutexSem(NULL, &sem->count_mutex, 0, FALSE);
+
+ sem->count = value;
+
+ return 0;
+}
+
+static inline int sem_wait(sem_t *sem) {
+ DosRequestMutexSem(sem->wait_mutex, -1);
+
+ DosWaitEventSem(sem->event, -1);
+
+ DosRequestMutexSem(sem->count_mutex, -1);
+
+ sem->count--;
+ if (sem->count == 0) {
+ ULONG post_count;
+
+ DosResetEventSem(sem->event, &post_count);
+ }
+
+ DosReleaseMutexSem(sem->count_mutex);
+
+ DosReleaseMutexSem(sem->wait_mutex);
+
+ return 0;
+}
+
+static inline int sem_post(sem_t *sem) {
+ DosRequestMutexSem(sem->count_mutex, -1);
+
+ if (sem->count < 32768) {
+ sem->count++;
+ DosPostEventSem(sem->event);
+ }
+
+ DosReleaseMutexSem(sem->count_mutex);
+
+ return 0;
+}
+
+static inline int sem_destroy(sem_t *sem) {
+ DosCloseEventSem(sem->event);
+ DosCloseMutexSem(sem->wait_mutex);
+ DosCloseMutexSem(sem->count_mutex);
+
+ return 0;
+}
+
+#define thread_sleep(nms) DosSleep(nms)
+
+#else
+
+#ifdef __APPLE__
+#define sem_t semaphore_t
+#define sem_init(X, Y, Z) \
+ semaphore_create(mach_task_self(), X, SYNC_POLICY_FIFO, Z)
+#define sem_wait(sem) (semaphore_wait(*sem))
+#define sem_post(sem) semaphore_signal(*sem)
+#define sem_destroy(sem) semaphore_destroy(mach_task_self(), *sem)
+#else
+#include <unistd.h>
+#include <sched.h>
+#endif /* __APPLE__ */
+/* Not Windows. Assume pthreads */
+
+/* thread_sleep implementation: yield unless Linux/Unix. */
+#if defined(__unix__) || defined(__APPLE__)
+#define thread_sleep(nms)
+/* {struct timespec ts;ts.tv_sec=0;
+ ts.tv_nsec = 1000*nms;nanosleep(&ts, NULL);} */
+#else
+#define thread_sleep(nms) sched_yield();
+#endif /* __unix__ || __APPLE__ */
+
+#endif
+
+#if VPX_ARCH_X86 || VPX_ARCH_X86_64
+#include "vpx_ports/x86.h"
+#else
+#define x86_pause_hint()
+#endif
+
+#include "vpx_util/vpx_thread.h"
+#include "vpx_util/vpx_atomics.h"
+
+static INLINE void vp8_atomic_spin_wait(
+ int mb_col, const vpx_atomic_int *last_row_current_mb_col,
+ const int nsync) {
+ while (mb_col > (vpx_atomic_load_acquire(last_row_current_mb_col) - nsync)) {
+ x86_pause_hint();
+ thread_sleep(0);
+ }
+}
+
+#endif /* CONFIG_OS_SUPPORT && CONFIG_MULTITHREAD */
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_THREADING_H_
diff --git a/media/libvpx/libvpx/vp8/common/treecoder.c b/media/libvpx/libvpx/vp8/common/treecoder.c
new file mode 100644
index 0000000000..f1e78f4321
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/treecoder.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "vp8/common/treecoder.h"
+#include "vpx/vpx_integer.h"
+
+static void tree2tok(struct vp8_token_struct *const p, vp8_tree t, int i, int v,
+ int L) {
+ v += v;
+ ++L;
+
+ do {
+ const vp8_tree_index j = t[i++];
+
+ if (j <= 0) {
+ p[-j].value = v;
+ p[-j].Len = L;
+ } else {
+ tree2tok(p, t, j, v, L);
+ }
+ } while (++v & 1);
+}
+
+void vp8_tokens_from_tree(struct vp8_token_struct *p, vp8_tree t) {
+ tree2tok(p, t, 0, 0, 0);
+}
+
+void vp8_tokens_from_tree_offset(struct vp8_token_struct *p, vp8_tree t,
+ int offset) {
+ tree2tok(p - offset, t, 0, 0, 0);
+}
+
+static void branch_counts(int n, /* n = size of alphabet */
+ vp8_token tok[/* n */], vp8_tree tree,
+ unsigned int branch_ct[/* n-1 */][2],
+ const unsigned int num_events[/* n */]) {
+ const int tree_len = n - 1;
+ int t = 0;
+
+ assert(tree_len);
+
+ do {
+ branch_ct[t][0] = branch_ct[t][1] = 0;
+ } while (++t < tree_len);
+
+ t = 0;
+
+ do {
+ int L = tok[t].Len;
+ const int enc = tok[t].value;
+ const unsigned int ct = num_events[t];
+
+ vp8_tree_index i = 0;
+
+ do {
+ const int b = (enc >> --L) & 1;
+ const int j = i >> 1;
+ assert(j < tree_len && 0 <= L);
+
+ branch_ct[j][b] += ct;
+ i = tree[i + b];
+ } while (i > 0);
+
+ assert(!L);
+ } while (++t < n);
+}
+
+void vp8_tree_probs_from_distribution(int n, /* n = size of alphabet */
+ vp8_token tok[/* n */], vp8_tree tree,
+ vp8_prob probs[/* n-1 */],
+ unsigned int branch_ct[/* n-1 */][2],
+ const unsigned int num_events[/* n */],
+ unsigned int Pfactor, int Round) {
+ const int tree_len = n - 1;
+ int t = 0;
+
+ branch_counts(n, tok, tree, branch_ct, num_events);
+
+ do {
+ const unsigned int *const c = branch_ct[t];
+ const unsigned int tot = c[0] + c[1];
+
+ if (tot) {
+ const unsigned int p =
+ (unsigned int)(((uint64_t)c[0] * Pfactor) + (Round ? tot >> 1 : 0)) /
+ tot;
+ probs[t] = p < 256 ? (p ? p : 1) : 255; /* agree w/old version for now */
+ } else {
+ probs[t] = vp8_prob_half;
+ }
+ } while (++t < tree_len);
+}
diff --git a/media/libvpx/libvpx/vp8/common/treecoder.h b/media/libvpx/libvpx/vp8/common/treecoder.h
new file mode 100644
index 0000000000..d7d8d0ead0
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/treecoder.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_TREECODER_H_
+#define VPX_VP8_COMMON_TREECODER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned char vp8bc_index_t; /* probability index */
+
+typedef unsigned char vp8_prob;
+
+#define vp8_prob_half ((vp8_prob)128)
+
+typedef signed char vp8_tree_index;
+struct bool_coder_spec;
+
+typedef struct bool_coder_spec bool_coder_spec;
+typedef struct bool_writer bool_writer;
+typedef struct bool_reader bool_reader;
+
+typedef const bool_coder_spec c_bool_coder_spec;
+typedef const bool_writer c_bool_writer;
+typedef const bool_reader c_bool_reader;
+
+#define vp8_complement(x) (255 - (x))
+
+/* We build coding trees compactly in arrays.
+ Each node of the tree is a pair of vp8_tree_indices.
+ Array index often references a corresponding probability table.
+ Index <= 0 means done encoding/decoding and value = -Index,
+ Index > 0 means need another bit, specification at index.
+ Nonnegative indices are always even; processing begins at node 0. */
+
+typedef const vp8_tree_index vp8_tree[], *vp8_tree_p;
+
+typedef const struct vp8_token_struct {
+ int value;
+ int Len;
+} vp8_token;
+
+/* Construct encoding array from tree. */
+
+void vp8_tokens_from_tree(struct vp8_token_struct *, vp8_tree);
+void vp8_tokens_from_tree_offset(struct vp8_token_struct *, vp8_tree,
+ int offset);
+
+/* Convert array of token occurrence counts into a table of probabilities
+ for the associated binary encoding tree. Also writes count of branches
+ taken for each node on the tree; this facilitiates decisions as to
+ probability updates. */
+
+void vp8_tree_probs_from_distribution(int n, /* n = size of alphabet */
+ vp8_token tok[/* n */], vp8_tree tree,
+ vp8_prob probs[/* n-1 */],
+ unsigned int branch_ct[/* n-1 */][2],
+ const unsigned int num_events[/* n */],
+ unsigned int Pfactor, int Round);
+
+/* Variant of above using coder spec rather than hardwired 8-bit probs. */
+
+void vp8bc_tree_probs_from_distribution(int n, /* n = size of alphabet */
+ vp8_token tok[/* n */], vp8_tree tree,
+ vp8_prob probs[/* n-1 */],
+ unsigned int branch_ct[/* n-1 */][2],
+ const unsigned int num_events[/* n */],
+ c_bool_coder_spec *s);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_TREECODER_H_
diff --git a/media/libvpx/libvpx/vp8/common/vp8_entropymodedata.h b/media/libvpx/libvpx/vp8/common/vp8_entropymodedata.h
new file mode 100644
index 0000000000..3fc942e050
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/vp8_entropymodedata.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_VP8_ENTROPYMODEDATA_H_
+#define VPX_VP8_COMMON_VP8_ENTROPYMODEDATA_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*Generated file, included by entropymode.c*/
+
+const struct vp8_token_struct vp8_bmode_encodings[VP8_BINTRAMODES] = {
+ { 0, 1 }, { 2, 2 }, { 6, 3 }, { 28, 5 }, { 30, 5 },
+ { 58, 6 }, { 59, 6 }, { 62, 6 }, { 126, 7 }, { 127, 7 }
+};
+
+const struct vp8_token_struct vp8_ymode_encodings[VP8_YMODES] = {
+ { 0, 1 }, { 4, 3 }, { 5, 3 }, { 6, 3 }, { 7, 3 }
+};
+
+const struct vp8_token_struct vp8_kf_ymode_encodings[VP8_YMODES] = {
+ { 4, 3 }, { 5, 3 }, { 6, 3 }, { 7, 3 }, { 0, 1 }
+};
+
+const struct vp8_token_struct vp8_uv_mode_encodings[VP8_UV_MODES] = {
+ { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
+};
+
+const struct vp8_token_struct vp8_mbsplit_encodings[VP8_NUMMBSPLITS] = {
+ { 6, 3 }, { 7, 3 }, { 2, 2 }, { 0, 1 }
+};
+
+const struct vp8_token_struct vp8_mv_ref_encoding_array[VP8_MVREFS] = {
+ { 2, 2 }, { 6, 3 }, { 0, 1 }, { 14, 4 }, { 15, 4 }
+};
+
+const struct vp8_token_struct vp8_sub_mv_ref_encoding_array[VP8_SUBMVREFS] = {
+ { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
+};
+
+const struct vp8_token_struct vp8_small_mvencodings[8] = {
+ { 0, 3 }, { 1, 3 }, { 2, 3 }, { 3, 3 }, { 4, 3 }, { 5, 3 }, { 6, 3 }, { 7, 3 }
+};
+
+const vp8_prob vp8_ymode_prob[VP8_YMODES - 1] = { 112, 86, 140, 37 };
+
+const vp8_prob vp8_kf_ymode_prob[VP8_YMODES - 1] = { 145, 156, 163, 128 };
+
+const vp8_prob vp8_uv_mode_prob[VP8_UV_MODES - 1] = { 162, 101, 204 };
+
+const vp8_prob vp8_kf_uv_mode_prob[VP8_UV_MODES - 1] = { 142, 114, 183 };
+
+const vp8_prob vp8_bmode_prob[VP8_BINTRAMODES - 1] = { 120, 90, 79, 133, 87,
+ 85, 80, 111, 151 };
+
+const vp8_prob
+ vp8_kf_bmode_prob[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES - 1] = {
+ { { 231, 120, 48, 89, 115, 113, 120, 152, 112 },
+ { 152, 179, 64, 126, 170, 118, 46, 70, 95 },
+ { 175, 69, 143, 80, 85, 82, 72, 155, 103 },
+ { 56, 58, 10, 171, 218, 189, 17, 13, 152 },
+ { 144, 71, 10, 38, 171, 213, 144, 34, 26 },
+ { 114, 26, 17, 163, 44, 195, 21, 10, 173 },
+ { 121, 24, 80, 195, 26, 62, 44, 64, 85 },
+ { 170, 46, 55, 19, 136, 160, 33, 206, 71 },
+ { 63, 20, 8, 114, 114, 208, 12, 9, 226 },
+ { 81, 40, 11, 96, 182, 84, 29, 16, 36 } },
+ { { 134, 183, 89, 137, 98, 101, 106, 165, 148 },
+ { 72, 187, 100, 130, 157, 111, 32, 75, 80 },
+ { 66, 102, 167, 99, 74, 62, 40, 234, 128 },
+ { 41, 53, 9, 178, 241, 141, 26, 8, 107 },
+ { 104, 79, 12, 27, 217, 255, 87, 17, 7 },
+ { 74, 43, 26, 146, 73, 166, 49, 23, 157 },
+ { 65, 38, 105, 160, 51, 52, 31, 115, 128 },
+ { 87, 68, 71, 44, 114, 51, 15, 186, 23 },
+ { 47, 41, 14, 110, 182, 183, 21, 17, 194 },
+ { 66, 45, 25, 102, 197, 189, 23, 18, 22 } },
+ { { 88, 88, 147, 150, 42, 46, 45, 196, 205 },
+ { 43, 97, 183, 117, 85, 38, 35, 179, 61 },
+ { 39, 53, 200, 87, 26, 21, 43, 232, 171 },
+ { 56, 34, 51, 104, 114, 102, 29, 93, 77 },
+ { 107, 54, 32, 26, 51, 1, 81, 43, 31 },
+ { 39, 28, 85, 171, 58, 165, 90, 98, 64 },
+ { 34, 22, 116, 206, 23, 34, 43, 166, 73 },
+ { 68, 25, 106, 22, 64, 171, 36, 225, 114 },
+ { 34, 19, 21, 102, 132, 188, 16, 76, 124 },
+ { 62, 18, 78, 95, 85, 57, 50, 48, 51 } },
+ { { 193, 101, 35, 159, 215, 111, 89, 46, 111 },
+ { 60, 148, 31, 172, 219, 228, 21, 18, 111 },
+ { 112, 113, 77, 85, 179, 255, 38, 120, 114 },
+ { 40, 42, 1, 196, 245, 209, 10, 25, 109 },
+ { 100, 80, 8, 43, 154, 1, 51, 26, 71 },
+ { 88, 43, 29, 140, 166, 213, 37, 43, 154 },
+ { 61, 63, 30, 155, 67, 45, 68, 1, 209 },
+ { 142, 78, 78, 16, 255, 128, 34, 197, 171 },
+ { 41, 40, 5, 102, 211, 183, 4, 1, 221 },
+ { 51, 50, 17, 168, 209, 192, 23, 25, 82 } },
+ { { 125, 98, 42, 88, 104, 85, 117, 175, 82 },
+ { 95, 84, 53, 89, 128, 100, 113, 101, 45 },
+ { 75, 79, 123, 47, 51, 128, 81, 171, 1 },
+ { 57, 17, 5, 71, 102, 57, 53, 41, 49 },
+ { 115, 21, 2, 10, 102, 255, 166, 23, 6 },
+ { 38, 33, 13, 121, 57, 73, 26, 1, 85 },
+ { 41, 10, 67, 138, 77, 110, 90, 47, 114 },
+ { 101, 29, 16, 10, 85, 128, 101, 196, 26 },
+ { 57, 18, 10, 102, 102, 213, 34, 20, 43 },
+ { 117, 20, 15, 36, 163, 128, 68, 1, 26 } },
+ { { 138, 31, 36, 171, 27, 166, 38, 44, 229 },
+ { 67, 87, 58, 169, 82, 115, 26, 59, 179 },
+ { 63, 59, 90, 180, 59, 166, 93, 73, 154 },
+ { 40, 40, 21, 116, 143, 209, 34, 39, 175 },
+ { 57, 46, 22, 24, 128, 1, 54, 17, 37 },
+ { 47, 15, 16, 183, 34, 223, 49, 45, 183 },
+ { 46, 17, 33, 183, 6, 98, 15, 32, 183 },
+ { 65, 32, 73, 115, 28, 128, 23, 128, 205 },
+ { 40, 3, 9, 115, 51, 192, 18, 6, 223 },
+ { 87, 37, 9, 115, 59, 77, 64, 21, 47 } },
+ { { 104, 55, 44, 218, 9, 54, 53, 130, 226 },
+ { 64, 90, 70, 205, 40, 41, 23, 26, 57 },
+ { 54, 57, 112, 184, 5, 41, 38, 166, 213 },
+ { 30, 34, 26, 133, 152, 116, 10, 32, 134 },
+ { 75, 32, 12, 51, 192, 255, 160, 43, 51 },
+ { 39, 19, 53, 221, 26, 114, 32, 73, 255 },
+ { 31, 9, 65, 234, 2, 15, 1, 118, 73 },
+ { 88, 31, 35, 67, 102, 85, 55, 186, 85 },
+ { 56, 21, 23, 111, 59, 205, 45, 37, 192 },
+ { 55, 38, 70, 124, 73, 102, 1, 34, 98 } },
+ { { 102, 61, 71, 37, 34, 53, 31, 243, 192 },
+ { 69, 60, 71, 38, 73, 119, 28, 222, 37 },
+ { 68, 45, 128, 34, 1, 47, 11, 245, 171 },
+ { 62, 17, 19, 70, 146, 85, 55, 62, 70 },
+ { 75, 15, 9, 9, 64, 255, 184, 119, 16 },
+ { 37, 43, 37, 154, 100, 163, 85, 160, 1 },
+ { 63, 9, 92, 136, 28, 64, 32, 201, 85 },
+ { 86, 6, 28, 5, 64, 255, 25, 248, 1 },
+ { 56, 8, 17, 132, 137, 255, 55, 116, 128 },
+ { 58, 15, 20, 82, 135, 57, 26, 121, 40 } },
+ { { 164, 50, 31, 137, 154, 133, 25, 35, 218 },
+ { 51, 103, 44, 131, 131, 123, 31, 6, 158 },
+ { 86, 40, 64, 135, 148, 224, 45, 183, 128 },
+ { 22, 26, 17, 131, 240, 154, 14, 1, 209 },
+ { 83, 12, 13, 54, 192, 255, 68, 47, 28 },
+ { 45, 16, 21, 91, 64, 222, 7, 1, 197 },
+ { 56, 21, 39, 155, 60, 138, 23, 102, 213 },
+ { 85, 26, 85, 85, 128, 128, 32, 146, 171 },
+ { 18, 11, 7, 63, 144, 171, 4, 4, 246 },
+ { 35, 27, 10, 146, 174, 171, 12, 26, 128 } },
+ { { 190, 80, 35, 99, 180, 80, 126, 54, 45 },
+ { 85, 126, 47, 87, 176, 51, 41, 20, 32 },
+ { 101, 75, 128, 139, 118, 146, 116, 128, 85 },
+ { 56, 41, 15, 176, 236, 85, 37, 9, 62 },
+ { 146, 36, 19, 30, 171, 255, 97, 27, 20 },
+ { 71, 30, 17, 119, 118, 255, 17, 18, 138 },
+ { 101, 38, 60, 138, 55, 70, 43, 26, 142 },
+ { 138, 45, 61, 62, 219, 1, 81, 188, 64 },
+ { 32, 41, 20, 117, 151, 142, 20, 21, 163 },
+ { 112, 19, 12, 61, 195, 128, 48, 4, 24 } }
+ };
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_VP8_ENTROPYMODEDATA_H_
diff --git a/media/libvpx/libvpx/vp8/common/vp8_loopfilter.c b/media/libvpx/libvpx/vp8/common/vp8_loopfilter.c
new file mode 100644
index 0000000000..9c9e5f351b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/vp8_loopfilter.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "loopfilter.h"
+#include "onyxc_int.h"
+#include "vpx_mem/vpx_mem.h"
+
+static void lf_init_lut(loop_filter_info_n *lfi) {
+ int filt_lvl;
+
+ for (filt_lvl = 0; filt_lvl <= MAX_LOOP_FILTER; ++filt_lvl) {
+ if (filt_lvl >= 40) {
+ lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 2;
+ lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 3;
+ } else if (filt_lvl >= 20) {
+ lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 1;
+ lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 2;
+ } else if (filt_lvl >= 15) {
+ lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 1;
+ lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 1;
+ } else {
+ lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 0;
+ lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 0;
+ }
+ }
+
+ lfi->mode_lf_lut[DC_PRED] = 1;
+ lfi->mode_lf_lut[V_PRED] = 1;
+ lfi->mode_lf_lut[H_PRED] = 1;
+ lfi->mode_lf_lut[TM_PRED] = 1;
+ lfi->mode_lf_lut[B_PRED] = 0;
+
+ lfi->mode_lf_lut[ZEROMV] = 1;
+ lfi->mode_lf_lut[NEARESTMV] = 2;
+ lfi->mode_lf_lut[NEARMV] = 2;
+ lfi->mode_lf_lut[NEWMV] = 2;
+ lfi->mode_lf_lut[SPLITMV] = 3;
+}
+
+void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
+ int sharpness_lvl) {
+ int i;
+
+ /* For each possible value for the loop filter fill out limits */
+ for (i = 0; i <= MAX_LOOP_FILTER; ++i) {
+ int filt_lvl = i;
+ int block_inside_limit = 0;
+
+ /* Set loop filter paramaeters that control sharpness. */
+ block_inside_limit = filt_lvl >> (sharpness_lvl > 0);
+ block_inside_limit = block_inside_limit >> (sharpness_lvl > 4);
+
+ if (sharpness_lvl > 0) {
+ if (block_inside_limit > (9 - sharpness_lvl)) {
+ block_inside_limit = (9 - sharpness_lvl);
+ }
+ }
+
+ if (block_inside_limit < 1) block_inside_limit = 1;
+
+ memset(lfi->lim[i], block_inside_limit, SIMD_WIDTH);
+ memset(lfi->blim[i], (2 * filt_lvl + block_inside_limit), SIMD_WIDTH);
+ memset(lfi->mblim[i], (2 * (filt_lvl + 2) + block_inside_limit),
+ SIMD_WIDTH);
+ }
+}
+
+void vp8_loop_filter_init(VP8_COMMON *cm) {
+ loop_filter_info_n *lfi = &cm->lf_info;
+ int i;
+
+ /* init limits for given sharpness*/
+ vp8_loop_filter_update_sharpness(lfi, cm->sharpness_level);
+ cm->last_sharpness_level = cm->sharpness_level;
+
+ /* init LUT for lvl and hev thr picking */
+ lf_init_lut(lfi);
+
+ /* init hev threshold const vectors */
+ for (i = 0; i < 4; ++i) {
+ memset(lfi->hev_thr[i], i, SIMD_WIDTH);
+ }
+}
+
+void vp8_loop_filter_frame_init(VP8_COMMON *cm, MACROBLOCKD *mbd,
+ int default_filt_lvl) {
+ int seg, /* segment number */
+ ref, /* index in ref_lf_deltas */
+ mode; /* index in mode_lf_deltas */
+
+ loop_filter_info_n *lfi = &cm->lf_info;
+
+ /* update limits if sharpness has changed */
+ if (cm->last_sharpness_level != cm->sharpness_level) {
+ vp8_loop_filter_update_sharpness(lfi, cm->sharpness_level);
+ cm->last_sharpness_level = cm->sharpness_level;
+ }
+
+ for (seg = 0; seg < MAX_MB_SEGMENTS; ++seg) {
+ int lvl_seg = default_filt_lvl;
+ int lvl_ref, lvl_mode;
+
+ /* Note the baseline filter values for each segment */
+ if (mbd->segmentation_enabled) {
+ if (mbd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
+ lvl_seg = mbd->segment_feature_data[MB_LVL_ALT_LF][seg];
+ } else { /* Delta Value */
+ lvl_seg += mbd->segment_feature_data[MB_LVL_ALT_LF][seg];
+ }
+ lvl_seg = (lvl_seg > 0) ? ((lvl_seg > 63) ? 63 : lvl_seg) : 0;
+ }
+
+ if (!mbd->mode_ref_lf_delta_enabled) {
+ /* we could get rid of this if we assume that deltas are set to
+ * zero when not in use; encoder always uses deltas
+ */
+ memset(lfi->lvl[seg][0], lvl_seg, 4 * 4);
+ continue;
+ }
+
+ /* INTRA_FRAME */
+ ref = INTRA_FRAME;
+
+ /* Apply delta for reference frame */
+ lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref];
+
+ /* Apply delta for Intra modes */
+ mode = 0; /* B_PRED */
+ /* Only the split mode BPRED has a further special case */
+ lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
+ /* clamp */
+ lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;
+
+ lfi->lvl[seg][ref][mode] = lvl_mode;
+
+ mode = 1; /* all the rest of Intra modes */
+ /* clamp */
+ lvl_mode = (lvl_ref > 0) ? (lvl_ref > 63 ? 63 : lvl_ref) : 0;
+ lfi->lvl[seg][ref][mode] = lvl_mode;
+
+ /* LAST, GOLDEN, ALT */
+ for (ref = 1; ref < MAX_REF_FRAMES; ++ref) {
+ /* Apply delta for reference frame */
+ lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref];
+
+ /* Apply delta for Inter modes */
+ for (mode = 1; mode < 4; ++mode) {
+ lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
+ /* clamp */
+ lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;
+
+ lfi->lvl[seg][ref][mode] = lvl_mode;
+ }
+ }
+ }
+}
+
+void vp8_loop_filter_row_normal(VP8_COMMON *cm, MODE_INFO *mode_info_context,
+ int mb_row, int post_ystride, int post_uvstride,
+ unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr) {
+ int mb_col;
+ int filter_level;
+ loop_filter_info_n *lfi_n = &cm->lf_info;
+ loop_filter_info lfi;
+ FRAME_TYPE frame_type = cm->frame_type;
+
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
+ mode_info_context->mbmi.mode != SPLITMV &&
+ mode_info_context->mbmi.mb_skip_coeff);
+
+ const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
+ const int seg = mode_info_context->mbmi.segment_id;
+ const int ref_frame = mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
+
+ if (filter_level) {
+ const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level];
+ lfi.mblim = lfi_n->mblim[filter_level];
+ lfi.blim = lfi_n->blim[filter_level];
+ lfi.lim = lfi_n->lim[filter_level];
+ lfi.hev_thr = lfi_n->hev_thr[hev_index];
+
+ if (mb_col > 0)
+ vp8_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride,
+ &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bv(y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride,
+ &lfi);
+
+ /* don't apply across umv border */
+ if (mb_row > 0)
+ vp8_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride,
+ &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bh(y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride,
+ &lfi);
+ }
+
+ y_ptr += 16;
+ u_ptr += 8;
+ v_ptr += 8;
+
+ mode_info_context++; /* step to next MB */
+ }
+}
+
+void vp8_loop_filter_row_simple(VP8_COMMON *cm, MODE_INFO *mode_info_context,
+ int mb_row, int post_ystride,
+ unsigned char *y_ptr) {
+ int mb_col;
+ int filter_level;
+ loop_filter_info_n *lfi_n = &cm->lf_info;
+
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
+ mode_info_context->mbmi.mode != SPLITMV &&
+ mode_info_context->mbmi.mb_skip_coeff);
+
+ const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
+ const int seg = mode_info_context->mbmi.segment_id;
+ const int ref_frame = mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
+
+ if (filter_level) {
+ if (mb_col > 0)
+ vp8_loop_filter_simple_mbv(y_ptr, post_ystride,
+ lfi_n->mblim[filter_level]);
+
+ if (!skip_lf)
+ vp8_loop_filter_simple_bv(y_ptr, post_ystride,
+ lfi_n->blim[filter_level]);
+
+ /* don't apply across umv border */
+ if (mb_row > 0)
+ vp8_loop_filter_simple_mbh(y_ptr, post_ystride,
+ lfi_n->mblim[filter_level]);
+
+ if (!skip_lf)
+ vp8_loop_filter_simple_bh(y_ptr, post_ystride,
+ lfi_n->blim[filter_level]);
+ }
+
+ y_ptr += 16;
+
+ mode_info_context++; /* step to next MB */
+ }
+}
+void vp8_loop_filter_frame(VP8_COMMON *cm, MACROBLOCKD *mbd, int frame_type) {
+ YV12_BUFFER_CONFIG *post = cm->frame_to_show;
+ loop_filter_info_n *lfi_n = &cm->lf_info;
+ loop_filter_info lfi;
+
+ int mb_row;
+ int mb_col;
+ int mb_rows = cm->mb_rows;
+ int mb_cols = cm->mb_cols;
+
+ int filter_level;
+
+ unsigned char *y_ptr, *u_ptr, *v_ptr;
+
+ /* Point at base of Mb MODE_INFO list */
+ const MODE_INFO *mode_info_context = cm->mi;
+ int post_y_stride = post->y_stride;
+ int post_uv_stride = post->uv_stride;
+
+ /* Initialize the loop filter for this frame. */
+ vp8_loop_filter_frame_init(cm, mbd, cm->filter_level);
+
+ /* Set up the buffer pointers */
+ y_ptr = post->y_buffer;
+ u_ptr = post->u_buffer;
+ v_ptr = post->v_buffer;
+
+ /* vp8_filter each macro block */
+ if (cm->filter_type == NORMAL_LOOPFILTER) {
+ for (mb_row = 0; mb_row < mb_rows; ++mb_row) {
+ for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
+ int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
+ mode_info_context->mbmi.mode != SPLITMV &&
+ mode_info_context->mbmi.mb_skip_coeff);
+
+ const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
+ const int seg = mode_info_context->mbmi.segment_id;
+ const int ref_frame = mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
+
+ if (filter_level) {
+ const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level];
+ lfi.mblim = lfi_n->mblim[filter_level];
+ lfi.blim = lfi_n->blim[filter_level];
+ lfi.lim = lfi_n->lim[filter_level];
+ lfi.hev_thr = lfi_n->hev_thr[hev_index];
+
+ if (mb_col > 0)
+ vp8_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post_y_stride,
+ post_uv_stride, &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bv(y_ptr, u_ptr, v_ptr, post_y_stride,
+ post_uv_stride, &lfi);
+
+ /* don't apply across umv border */
+ if (mb_row > 0)
+ vp8_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post_y_stride,
+ post_uv_stride, &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bh(y_ptr, u_ptr, v_ptr, post_y_stride,
+ post_uv_stride, &lfi);
+ }
+
+ y_ptr += 16;
+ u_ptr += 8;
+ v_ptr += 8;
+
+ mode_info_context++; /* step to next MB */
+ }
+ y_ptr += post_y_stride * 16 - post->y_width;
+ u_ptr += post_uv_stride * 8 - post->uv_width;
+ v_ptr += post_uv_stride * 8 - post->uv_width;
+
+ mode_info_context++; /* Skip border mb */
+ }
+ } else { /* SIMPLE_LOOPFILTER */
+ for (mb_row = 0; mb_row < mb_rows; ++mb_row) {
+ for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
+ int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
+ mode_info_context->mbmi.mode != SPLITMV &&
+ mode_info_context->mbmi.mb_skip_coeff);
+
+ const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
+ const int seg = mode_info_context->mbmi.segment_id;
+ const int ref_frame = mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
+ if (filter_level) {
+ const unsigned char *mblim = lfi_n->mblim[filter_level];
+ const unsigned char *blim = lfi_n->blim[filter_level];
+
+ if (mb_col > 0)
+ vp8_loop_filter_simple_mbv(y_ptr, post_y_stride, mblim);
+
+ if (!skip_lf) vp8_loop_filter_simple_bv(y_ptr, post_y_stride, blim);
+
+ /* don't apply across umv border */
+ if (mb_row > 0)
+ vp8_loop_filter_simple_mbh(y_ptr, post_y_stride, mblim);
+
+ if (!skip_lf) vp8_loop_filter_simple_bh(y_ptr, post_y_stride, blim);
+ }
+
+ y_ptr += 16;
+ u_ptr += 8;
+ v_ptr += 8;
+
+ mode_info_context++; /* step to next MB */
+ }
+ y_ptr += post_y_stride * 16 - post->y_width;
+ u_ptr += post_uv_stride * 8 - post->uv_width;
+ v_ptr += post_uv_stride * 8 - post->uv_width;
+
+ mode_info_context++; /* Skip border mb */
+ }
+ }
+}
+
+void vp8_loop_filter_frame_yonly(VP8_COMMON *cm, MACROBLOCKD *mbd,
+ int default_filt_lvl) {
+ YV12_BUFFER_CONFIG *post = cm->frame_to_show;
+
+ unsigned char *y_ptr;
+ int mb_row;
+ int mb_col;
+
+ loop_filter_info_n *lfi_n = &cm->lf_info;
+ loop_filter_info lfi;
+
+ int filter_level;
+ FRAME_TYPE frame_type = cm->frame_type;
+
+ /* Point at base of Mb MODE_INFO list */
+ const MODE_INFO *mode_info_context = cm->mi;
+
+#if 0
+ if(default_filt_lvl == 0) /* no filter applied */
+ return;
+#endif
+
+ /* Initialize the loop filter for this frame. */
+ vp8_loop_filter_frame_init(cm, mbd, default_filt_lvl);
+
+ /* Set up the buffer pointers */
+ y_ptr = post->y_buffer;
+
+ /* vp8_filter each macro block */
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
+ mode_info_context->mbmi.mode != SPLITMV &&
+ mode_info_context->mbmi.mb_skip_coeff);
+
+ const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
+ const int seg = mode_info_context->mbmi.segment_id;
+ const int ref_frame = mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
+
+ if (filter_level) {
+ if (cm->filter_type == NORMAL_LOOPFILTER) {
+ const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level];
+ lfi.mblim = lfi_n->mblim[filter_level];
+ lfi.blim = lfi_n->blim[filter_level];
+ lfi.lim = lfi_n->lim[filter_level];
+ lfi.hev_thr = lfi_n->hev_thr[hev_index];
+
+ if (mb_col > 0)
+ vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
+
+ /* don't apply across umv border */
+ if (mb_row > 0)
+ vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ } else {
+ if (mb_col > 0)
+ vp8_loop_filter_simple_mbv(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
+
+ if (!skip_lf)
+ vp8_loop_filter_simple_bv(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
+
+ /* don't apply across umv border */
+ if (mb_row > 0)
+ vp8_loop_filter_simple_mbh(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
+
+ if (!skip_lf)
+ vp8_loop_filter_simple_bh(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
+ }
+ }
+
+ y_ptr += 16;
+ mode_info_context++; /* step to next MB */
+ }
+
+ y_ptr += post->y_stride * 16 - post->y_width;
+ mode_info_context++; /* Skip border mb */
+ }
+}
+
+void vp8_loop_filter_partial_frame(VP8_COMMON *cm, MACROBLOCKD *mbd,
+ int default_filt_lvl) {
+ YV12_BUFFER_CONFIG *post = cm->frame_to_show;
+
+ unsigned char *y_ptr;
+ int mb_row;
+ int mb_col;
+ int mb_cols = post->y_width >> 4;
+ int mb_rows = post->y_height >> 4;
+
+ int linestocopy;
+
+ loop_filter_info_n *lfi_n = &cm->lf_info;
+ loop_filter_info lfi;
+
+ int filter_level;
+ FRAME_TYPE frame_type = cm->frame_type;
+
+ const MODE_INFO *mode_info_context;
+
+#if 0
+ if(default_filt_lvl == 0) /* no filter applied */
+ return;
+#endif
+
+ /* Initialize the loop filter for this frame. */
+ vp8_loop_filter_frame_init(cm, mbd, default_filt_lvl);
+
+ /* number of MB rows to use in partial filtering */
+ linestocopy = mb_rows / PARTIAL_FRAME_FRACTION;
+ linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */
+
+ /* Set up the buffer pointers; partial image starts at ~middle of frame */
+ y_ptr = post->y_buffer + ((post->y_height >> 5) * 16) * post->y_stride;
+ mode_info_context = cm->mi + (post->y_height >> 5) * (mb_cols + 1);
+
+ /* vp8_filter each macro block */
+ for (mb_row = 0; mb_row < (linestocopy >> 4); ++mb_row) {
+ for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
+ int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
+ mode_info_context->mbmi.mode != SPLITMV &&
+ mode_info_context->mbmi.mb_skip_coeff);
+
+ const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
+ const int seg = mode_info_context->mbmi.segment_id;
+ const int ref_frame = mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
+
+ if (filter_level) {
+ if (cm->filter_type == NORMAL_LOOPFILTER) {
+ const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level];
+ lfi.mblim = lfi_n->mblim[filter_level];
+ lfi.blim = lfi_n->blim[filter_level];
+ lfi.lim = lfi_n->lim[filter_level];
+ lfi.hev_thr = lfi_n->hev_thr[hev_index];
+
+ if (mb_col > 0)
+ vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi);
+
+ vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi);
+ } else {
+ if (mb_col > 0)
+ vp8_loop_filter_simple_mbv(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
+
+ if (!skip_lf)
+ vp8_loop_filter_simple_bv(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
+
+ vp8_loop_filter_simple_mbh(y_ptr, post->y_stride,
+ lfi_n->mblim[filter_level]);
+
+ if (!skip_lf)
+ vp8_loop_filter_simple_bh(y_ptr, post->y_stride,
+ lfi_n->blim[filter_level]);
+ }
+ }
+
+ y_ptr += 16;
+ mode_info_context += 1; /* step to next MB */
+ }
+
+ y_ptr += post->y_stride * 16 - post->y_width;
+ mode_info_context += 1; /* Skip border mb */
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/vp8_skin_detection.c b/media/libvpx/libvpx/vp8/common/vp8_skin_detection.c
new file mode 100644
index 0000000000..6739efa5fe
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/vp8_skin_detection.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/common/alloccommon.h"
+#include "vp8/common/vp8_skin_detection.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_util/vpx_write_yuv_frame.h"
+
+static int avg_2x2(const uint8_t *s, int p) {
+ int i, j;
+ int sum = 0;
+ for (i = 0; i < 2; ++i, s += p) {
+ for (j = 0; j < 2; ++j) {
+ sum += s[j];
+ }
+ }
+ return (sum + 2) >> 2;
+}
+
+int vp8_compute_skin_block(const uint8_t *y, const uint8_t *u, const uint8_t *v,
+ int stride, int strideuv,
+ SKIN_DETECTION_BLOCK_SIZE bsize, int consec_zeromv,
+ int curr_motion_magn) {
+ // No skin if block has been zero/small motion for long consecutive time.
+ if (consec_zeromv > 60 && curr_motion_magn == 0) {
+ return 0;
+ } else {
+ int motion = 1;
+ if (consec_zeromv > 25 && curr_motion_magn == 0) motion = 0;
+ if (bsize == SKIN_16X16) {
+ // Take the average of center 2x2 pixels.
+ const int ysource = avg_2x2(y + 7 * stride + 7, stride);
+ const int usource = avg_2x2(u + 3 * strideuv + 3, strideuv);
+ const int vsource = avg_2x2(v + 3 * strideuv + 3, strideuv);
+ return vpx_skin_pixel(ysource, usource, vsource, motion);
+ } else {
+ int num_skin = 0;
+ int i, j;
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ // Take the average of center 2x2 pixels.
+ const int ysource = avg_2x2(y + 3 * stride + 3, stride);
+ const int usource = avg_2x2(u + strideuv + 1, strideuv);
+ const int vsource = avg_2x2(v + strideuv + 1, strideuv);
+ num_skin += vpx_skin_pixel(ysource, usource, vsource, motion);
+ if (num_skin >= 2) return 1;
+ y += 8;
+ u += 4;
+ v += 4;
+ }
+ y += (stride << 3) - 16;
+ u += (strideuv << 2) - 8;
+ v += (strideuv << 2) - 8;
+ }
+
+ return 0;
+ }
+ }
+}
+
+#ifdef OUTPUT_YUV_SKINMAP
+// For viewing skin map on input source.
+void vp8_compute_skin_map(VP8_COMP *const cpi, FILE *yuv_skinmap_file) {
+ int i, j, mb_row, mb_col, num_bl;
+ VP8_COMMON *const cm = &cpi->common;
+ uint8_t *y;
+ const uint8_t *src_y = cpi->Source->y_buffer;
+ const int src_ystride = cpi->Source->y_stride;
+ int offset = 0;
+
+ YV12_BUFFER_CONFIG skinmap;
+ memset(&skinmap, 0, sizeof(skinmap));
+ if (vp8_yv12_alloc_frame_buffer(&skinmap, cm->Width, cm->Height,
+ VP8BORDERINPIXELS) < 0) {
+ vpx_free_frame_buffer(&skinmap);
+ return;
+ }
+ memset(skinmap.buffer_alloc, 128, skinmap.frame_size);
+ y = skinmap.y_buffer;
+ // Loop through blocks and set skin map based on center pixel of block.
+ // Set y to white for skin block, otherwise set to source with gray scale.
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 1) {
+ num_bl = 0;
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col += 1) {
+ const int is_skin = cpi->skin_map[offset++];
+ for (i = 0; i < 16; i++) {
+ for (j = 0; j < 16; j++) {
+ y[i * src_ystride + j] = is_skin ? 255 : src_y[i * src_ystride + j];
+ }
+ }
+ num_bl++;
+ y += 16;
+ src_y += 16;
+ }
+ y += (src_ystride << 4) - (num_bl << 4);
+ src_y += (src_ystride << 4) - (num_bl << 4);
+ }
+ vpx_write_yuv_frame(yuv_skinmap_file, &skinmap);
+ vpx_free_frame_buffer(&skinmap);
+}
+#endif // OUTPUT_YUV_SKINMAP
diff --git a/media/libvpx/libvpx/vp8/common/vp8_skin_detection.h b/media/libvpx/libvpx/vp8/common/vp8_skin_detection.h
new file mode 100644
index 0000000000..ef0e4ae4fe
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/vp8_skin_detection.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_COMMON_VP8_SKIN_DETECTION_H_
+#define VPX_VP8_COMMON_VP8_SKIN_DETECTION_H_
+
+#include "vp8/encoder/onyx_int.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_dsp/skin_detection.h"
+#include "vpx_scale/yv12config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP8_COMP;
+
+typedef enum {
+ // Skin detection based on 8x8 block. If two of them are identified as skin,
+ // the macroblock is marked as skin.
+ SKIN_8X8,
+ // Skin detection based on 16x16 block.
+ SKIN_16X16
+} SKIN_DETECTION_BLOCK_SIZE;
+
+int vp8_compute_skin_block(const uint8_t *y, const uint8_t *u, const uint8_t *v,
+ int stride, int strideuv,
+ SKIN_DETECTION_BLOCK_SIZE bsize, int consec_zeromv,
+ int curr_motion_magn);
+
+#ifdef OUTPUT_YUV_SKINMAP
+// For viewing skin map on input source.
+void vp8_compute_skin_map(struct VP8_COMP *const cpi, FILE *yuv_skinmap_file);
+#endif
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_COMMON_VP8_SKIN_DETECTION_H_
diff --git a/media/libvpx/libvpx/vp8/common/x86/bilinear_filter_sse2.c b/media/libvpx/libvpx/vp8/common/x86/bilinear_filter_sse2.c
new file mode 100644
index 0000000000..ff6cbbd68c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/bilinear_filter_sse2.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <xmmintrin.h>
+
+#include "./vp8_rtcd.h"
+#include "./vpx_config.h"
+#include "vp8/common/filter.h"
+#include "vpx_dsp/x86/mem_sse2.h"
+#include "vpx_ports/mem.h"
+
+static INLINE void horizontal_16x16(uint8_t *src, const int stride,
+ uint16_t *dst, const int xoffset) {
+ int h;
+ const __m128i zero = _mm_setzero_si128();
+
+ if (xoffset == 0) {
+ for (h = 0; h < 17; ++h) {
+ const __m128i a = _mm_loadu_si128((__m128i *)src);
+ const __m128i a_lo = _mm_unpacklo_epi8(a, zero);
+ const __m128i a_hi = _mm_unpackhi_epi8(a, zero);
+ _mm_store_si128((__m128i *)dst, a_lo);
+ _mm_store_si128((__m128i *)(dst + 8), a_hi);
+ src += stride;
+ dst += 16;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i hfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][0]);
+ const __m128i hfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][1]);
+
+ for (h = 0; h < 17; ++h) {
+ const __m128i a = _mm_loadu_si128((__m128i *)src);
+ const __m128i a_lo = _mm_unpacklo_epi8(a, zero);
+ const __m128i a_hi = _mm_unpackhi_epi8(a, zero);
+ const __m128i a_lo_filtered = _mm_mullo_epi16(a_lo, hfilter_0);
+ const __m128i a_hi_filtered = _mm_mullo_epi16(a_hi, hfilter_0);
+
+ const __m128i b = _mm_loadu_si128((__m128i *)(src + 1));
+ const __m128i b_lo = _mm_unpacklo_epi8(b, zero);
+ const __m128i b_hi = _mm_unpackhi_epi8(b, zero);
+ const __m128i b_lo_filtered = _mm_mullo_epi16(b_lo, hfilter_1);
+ const __m128i b_hi_filtered = _mm_mullo_epi16(b_hi, hfilter_1);
+
+ const __m128i sum_lo = _mm_add_epi16(a_lo_filtered, b_lo_filtered);
+ const __m128i sum_hi = _mm_add_epi16(a_hi_filtered, b_hi_filtered);
+
+ const __m128i compensated_lo = _mm_add_epi16(sum_lo, round_factor);
+ const __m128i compensated_hi = _mm_add_epi16(sum_hi, round_factor);
+
+ const __m128i shifted_lo =
+ _mm_srai_epi16(compensated_lo, VP8_FILTER_SHIFT);
+ const __m128i shifted_hi =
+ _mm_srai_epi16(compensated_hi, VP8_FILTER_SHIFT);
+
+ _mm_store_si128((__m128i *)dst, shifted_lo);
+ _mm_store_si128((__m128i *)(dst + 8), shifted_hi);
+ src += stride;
+ dst += 16;
+ }
+ }
+}
+
+static INLINE void vertical_16x16(uint16_t *src, uint8_t *dst, const int stride,
+ const int yoffset) {
+ int h;
+
+ if (yoffset == 0) {
+ for (h = 0; h < 16; ++h) {
+ const __m128i row_lo = _mm_load_si128((__m128i *)src);
+ const __m128i row_hi = _mm_load_si128((__m128i *)(src + 8));
+ const __m128i packed = _mm_packus_epi16(row_lo, row_hi);
+ _mm_store_si128((__m128i *)dst, packed);
+ src += 16;
+ dst += stride;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i vfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][0]);
+ const __m128i vfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][1]);
+
+ __m128i row_0_lo = _mm_load_si128((__m128i *)src);
+ __m128i row_0_hi = _mm_load_si128((__m128i *)(src + 8));
+ src += 16;
+ for (h = 0; h < 16; ++h) {
+ const __m128i row_0_lo_filtered = _mm_mullo_epi16(row_0_lo, vfilter_0);
+ const __m128i row_0_hi_filtered = _mm_mullo_epi16(row_0_hi, vfilter_0);
+
+ const __m128i row_1_lo = _mm_load_si128((__m128i *)src);
+ const __m128i row_1_hi = _mm_load_si128((__m128i *)(src + 8));
+ const __m128i row_1_lo_filtered = _mm_mullo_epi16(row_1_lo, vfilter_1);
+ const __m128i row_1_hi_filtered = _mm_mullo_epi16(row_1_hi, vfilter_1);
+
+ const __m128i sum_lo =
+ _mm_add_epi16(row_0_lo_filtered, row_1_lo_filtered);
+ const __m128i sum_hi =
+ _mm_add_epi16(row_0_hi_filtered, row_1_hi_filtered);
+
+ const __m128i compensated_lo = _mm_add_epi16(sum_lo, round_factor);
+ const __m128i compensated_hi = _mm_add_epi16(sum_hi, round_factor);
+
+ const __m128i shifted_lo =
+ _mm_srai_epi16(compensated_lo, VP8_FILTER_SHIFT);
+ const __m128i shifted_hi =
+ _mm_srai_epi16(compensated_hi, VP8_FILTER_SHIFT);
+
+ const __m128i packed = _mm_packus_epi16(shifted_lo, shifted_hi);
+ _mm_store_si128((__m128i *)dst, packed);
+ row_0_lo = row_1_lo;
+ row_0_hi = row_1_hi;
+ src += 16;
+ dst += stride;
+ }
+ }
+}
+
+void vp8_bilinear_predict16x16_sse2(uint8_t *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, uint8_t *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, uint16_t, FData[16 * 17]);
+
+ assert((xoffset | yoffset) != 0);
+
+ horizontal_16x16(src_ptr, src_pixels_per_line, FData, xoffset);
+
+ vertical_16x16(FData, dst_ptr, dst_pitch, yoffset);
+}
+
+static INLINE void horizontal_8xN(uint8_t *src, const int stride, uint16_t *dst,
+ const int xoffset, const int height) {
+ int h;
+ const __m128i zero = _mm_setzero_si128();
+
+ if (xoffset == 0) {
+ for (h = 0; h < height; ++h) {
+ const __m128i a = _mm_loadl_epi64((__m128i *)src);
+ const __m128i a_u16 = _mm_unpacklo_epi8(a, zero);
+ _mm_store_si128((__m128i *)dst, a_u16);
+ src += stride;
+ dst += 8;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i hfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][0]);
+ const __m128i hfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][1]);
+
+ // Filter horizontally. Rather than load the whole array and transpose, load
+ // 16 values (overreading) and shift to set up the second value. Do an
+ // "extra" 9th line so the vertical pass has the necessary context.
+ for (h = 0; h < height; ++h) {
+ const __m128i a = _mm_loadu_si128((__m128i *)src);
+ const __m128i b = _mm_srli_si128(a, 1);
+ const __m128i a_u16 = _mm_unpacklo_epi8(a, zero);
+ const __m128i b_u16 = _mm_unpacklo_epi8(b, zero);
+ const __m128i a_filtered = _mm_mullo_epi16(a_u16, hfilter_0);
+ const __m128i b_filtered = _mm_mullo_epi16(b_u16, hfilter_1);
+ const __m128i sum = _mm_add_epi16(a_filtered, b_filtered);
+ const __m128i compensated = _mm_add_epi16(sum, round_factor);
+ const __m128i shifted = _mm_srai_epi16(compensated, VP8_FILTER_SHIFT);
+ _mm_store_si128((__m128i *)dst, shifted);
+ src += stride;
+ dst += 8;
+ }
+ }
+}
+
+static INLINE void vertical_8xN(uint16_t *src, uint8_t *dst, const int stride,
+ const int yoffset, const int height) {
+ int h;
+
+ if (yoffset == 0) {
+ for (h = 0; h < height; ++h) {
+ const __m128i row = _mm_load_si128((__m128i *)src);
+ const __m128i packed = _mm_packus_epi16(row, row);
+ _mm_storel_epi64((__m128i *)dst, packed);
+ src += 8;
+ dst += stride;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i vfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][0]);
+ const __m128i vfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][1]);
+
+ __m128i row_0 = _mm_load_si128((__m128i *)src);
+ src += 8;
+ for (h = 0; h < height; ++h) {
+ const __m128i row_1 = _mm_load_si128((__m128i *)src);
+ const __m128i row_0_filtered = _mm_mullo_epi16(row_0, vfilter_0);
+ const __m128i row_1_filtered = _mm_mullo_epi16(row_1, vfilter_1);
+ const __m128i sum = _mm_add_epi16(row_0_filtered, row_1_filtered);
+ const __m128i compensated = _mm_add_epi16(sum, round_factor);
+ const __m128i shifted = _mm_srai_epi16(compensated, VP8_FILTER_SHIFT);
+ const __m128i packed = _mm_packus_epi16(shifted, shifted);
+ _mm_storel_epi64((__m128i *)dst, packed);
+ row_0 = row_1;
+ src += 8;
+ dst += stride;
+ }
+ }
+}
+
+void vp8_bilinear_predict8x8_sse2(uint8_t *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, uint8_t *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, uint16_t, FData[8 * 9]);
+
+ assert((xoffset | yoffset) != 0);
+
+ horizontal_8xN(src_ptr, src_pixels_per_line, FData, xoffset, 9);
+
+ vertical_8xN(FData, dst_ptr, dst_pitch, yoffset, 8);
+}
+
+void vp8_bilinear_predict8x4_sse2(uint8_t *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, uint8_t *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, uint16_t, FData[8 * 5]);
+
+ assert((xoffset | yoffset) != 0);
+
+ horizontal_8xN(src_ptr, src_pixels_per_line, FData, xoffset, 5);
+
+ vertical_8xN(FData, dst_ptr, dst_pitch, yoffset, 4);
+}
+
+static INLINE void horizontal_4x4(uint8_t *src, const int stride, uint16_t *dst,
+ const int xoffset) {
+ int h;
+ const __m128i zero = _mm_setzero_si128();
+
+ if (xoffset == 0) {
+ for (h = 0; h < 5; ++h) {
+ const __m128i a = load_unaligned_u32(src);
+ const __m128i a_u16 = _mm_unpacklo_epi8(a, zero);
+ _mm_storel_epi64((__m128i *)dst, a_u16);
+ src += stride;
+ dst += 4;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i hfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][0]);
+ const __m128i hfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][1]);
+
+ for (h = 0; h < 5; ++h) {
+ const __m128i a = load_unaligned_u32(src);
+ const __m128i b = load_unaligned_u32(src + 1);
+ const __m128i a_u16 = _mm_unpacklo_epi8(a, zero);
+ const __m128i b_u16 = _mm_unpacklo_epi8(b, zero);
+ const __m128i a_filtered = _mm_mullo_epi16(a_u16, hfilter_0);
+ const __m128i b_filtered = _mm_mullo_epi16(b_u16, hfilter_1);
+ const __m128i sum = _mm_add_epi16(a_filtered, b_filtered);
+ const __m128i compensated = _mm_add_epi16(sum, round_factor);
+ const __m128i shifted = _mm_srai_epi16(compensated, VP8_FILTER_SHIFT);
+ _mm_storel_epi64((__m128i *)dst, shifted);
+ src += stride;
+ dst += 4;
+ }
+ }
+}
+
+static INLINE void vertical_4x4(uint16_t *src, uint8_t *dst, const int stride,
+ const int yoffset) {
+ int h;
+
+ if (yoffset == 0) {
+ for (h = 0; h < 4; h += 2) {
+ const __m128i row = _mm_load_si128((__m128i *)src);
+ __m128i packed = _mm_packus_epi16(row, row);
+ store_unaligned_u32(dst, packed);
+ dst += stride;
+ packed = _mm_srli_si128(packed, 4);
+ store_unaligned_u32(dst, packed);
+ dst += stride;
+ src += 8;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i vfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][0]);
+ const __m128i vfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][1]);
+
+ for (h = 0; h < 4; h += 2) {
+ const __m128i row_0 = _mm_load_si128((__m128i *)src);
+ const __m128i row_1 = _mm_loadu_si128((__m128i *)(src + 4));
+ const __m128i row_0_filtered = _mm_mullo_epi16(row_0, vfilter_0);
+ const __m128i row_1_filtered = _mm_mullo_epi16(row_1, vfilter_1);
+ const __m128i sum = _mm_add_epi16(row_0_filtered, row_1_filtered);
+ const __m128i compensated = _mm_add_epi16(sum, round_factor);
+ const __m128i shifted = _mm_srai_epi16(compensated, VP8_FILTER_SHIFT);
+ __m128i packed = _mm_packus_epi16(shifted, shifted);
+ storeu_int32(dst, _mm_cvtsi128_si32(packed));
+ packed = _mm_srli_si128(packed, 4);
+ dst += stride;
+ storeu_int32(dst, _mm_cvtsi128_si32(packed));
+ dst += stride;
+ src += 8;
+ }
+ }
+}
+
+void vp8_bilinear_predict4x4_sse2(uint8_t *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, uint8_t *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, uint16_t, FData[4 * 5]);
+
+ assert((xoffset | yoffset) != 0);
+
+ horizontal_4x4(src_ptr, src_pixels_per_line, FData, xoffset);
+
+ vertical_4x4(FData, dst_ptr, dst_pitch, yoffset);
+}
diff --git a/media/libvpx/libvpx/vp8/common/x86/dequantize_mmx.asm b/media/libvpx/libvpx/vp8/common/x86/dequantize_mmx.asm
new file mode 100644
index 0000000000..0a269e15f7
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/dequantize_mmx.asm
@@ -0,0 +1,259 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+;void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q)
+globalsym(vp8_dequantize_b_impl_mmx)
+sym(vp8_dequantize_b_impl_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 3
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;sq
+ mov rdi, arg(1) ;dq
+ mov rax, arg(2) ;q
+
+ movq mm1, [rsi]
+ pmullw mm1, [rax+0] ; mm4 *= kernel 0 modifiers.
+ movq [rdi], mm1
+
+ movq mm1, [rsi+8]
+ pmullw mm1, [rax+8] ; mm4 *= kernel 0 modifiers.
+ movq [rdi+8], mm1
+
+ movq mm1, [rsi+16]
+ pmullw mm1, [rax+16] ; mm4 *= kernel 0 modifiers.
+ movq [rdi+16], mm1
+
+ movq mm1, [rsi+24]
+ pmullw mm1, [rax+24] ; mm4 *= kernel 0 modifiers.
+ movq [rdi+24], mm1
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void dequant_idct_add_mmx(
+;short *input, 0
+;short *dq, 1
+;unsigned char *dest, 2
+;int stride) 3
+globalsym(vp8_dequant_idct_add_mmx)
+sym(vp8_dequant_idct_add_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ GET_GOT rbx
+ push rdi
+ ; end prolog
+
+ mov rax, arg(0) ;input
+ mov rdx, arg(1) ;dq
+
+
+ movq mm0, [rax ]
+ pmullw mm0, [rdx]
+
+ movq mm1, [rax +8]
+ pmullw mm1, [rdx +8]
+
+ movq mm2, [rax+16]
+ pmullw mm2, [rdx+16]
+
+ movq mm3, [rax+24]
+ pmullw mm3, [rdx+24]
+
+ mov rdx, arg(2) ;dest
+
+ pxor mm7, mm7
+
+
+ movq [rax], mm7
+ movq [rax+8], mm7
+
+ movq [rax+16],mm7
+ movq [rax+24],mm7
+
+
+ movsxd rdi, dword ptr arg(3) ;stride
+
+ psubw mm0, mm2 ; b1= 0-2
+ paddw mm2, mm2 ;
+
+ movq mm5, mm1
+ paddw mm2, mm0 ; a1 =0+2
+
+ pmulhw mm5, [GLOBAL(x_s1sqr2)];
+ paddw mm5, mm1 ; ip1 * sin(pi/8) * sqrt(2)
+
+ movq mm7, mm3 ;
+ pmulhw mm7, [GLOBAL(x_c1sqr2less1)];
+
+ paddw mm7, mm3 ; ip3 * cos(pi/8) * sqrt(2)
+ psubw mm7, mm5 ; c1
+
+ movq mm5, mm1
+ movq mm4, mm3
+
+ pmulhw mm5, [GLOBAL(x_c1sqr2less1)]
+ paddw mm5, mm1
+
+ pmulhw mm3, [GLOBAL(x_s1sqr2)]
+ paddw mm3, mm4
+
+ paddw mm3, mm5 ; d1
+ movq mm6, mm2 ; a1
+
+ movq mm4, mm0 ; b1
+ paddw mm2, mm3 ;0
+
+ paddw mm4, mm7 ;1
+ psubw mm0, mm7 ;2
+
+ psubw mm6, mm3 ;3
+
+ movq mm1, mm2 ; 03 02 01 00
+ movq mm3, mm4 ; 23 22 21 20
+
+ punpcklwd mm1, mm0 ; 11 01 10 00
+ punpckhwd mm2, mm0 ; 13 03 12 02
+
+ punpcklwd mm3, mm6 ; 31 21 30 20
+ punpckhwd mm4, mm6 ; 33 23 32 22
+
+ movq mm0, mm1 ; 11 01 10 00
+ movq mm5, mm2 ; 13 03 12 02
+
+ punpckldq mm0, mm3 ; 30 20 10 00
+ punpckhdq mm1, mm3 ; 31 21 11 01
+
+ punpckldq mm2, mm4 ; 32 22 12 02
+ punpckhdq mm5, mm4 ; 33 23 13 03
+
+ movq mm3, mm5 ; 33 23 13 03
+
+ psubw mm0, mm2 ; b1= 0-2
+ paddw mm2, mm2 ;
+
+ movq mm5, mm1
+ paddw mm2, mm0 ; a1 =0+2
+
+ pmulhw mm5, [GLOBAL(x_s1sqr2)];
+ paddw mm5, mm1 ; ip1 * sin(pi/8) * sqrt(2)
+
+ movq mm7, mm3 ;
+ pmulhw mm7, [GLOBAL(x_c1sqr2less1)];
+
+ paddw mm7, mm3 ; ip3 * cos(pi/8) * sqrt(2)
+ psubw mm7, mm5 ; c1
+
+ movq mm5, mm1
+ movq mm4, mm3
+
+ pmulhw mm5, [GLOBAL(x_c1sqr2less1)]
+ paddw mm5, mm1
+
+ pmulhw mm3, [GLOBAL(x_s1sqr2)]
+ paddw mm3, mm4
+
+ paddw mm3, mm5 ; d1
+ paddw mm0, [GLOBAL(fours)]
+
+ paddw mm2, [GLOBAL(fours)]
+ movq mm6, mm2 ; a1
+
+ movq mm4, mm0 ; b1
+ paddw mm2, mm3 ;0
+
+ paddw mm4, mm7 ;1
+ psubw mm0, mm7 ;2
+
+ psubw mm6, mm3 ;3
+ psraw mm2, 3
+
+ psraw mm0, 3
+ psraw mm4, 3
+
+ psraw mm6, 3
+
+ movq mm1, mm2 ; 03 02 01 00
+ movq mm3, mm4 ; 23 22 21 20
+
+ punpcklwd mm1, mm0 ; 11 01 10 00
+ punpckhwd mm2, mm0 ; 13 03 12 02
+
+ punpcklwd mm3, mm6 ; 31 21 30 20
+ punpckhwd mm4, mm6 ; 33 23 32 22
+
+ movq mm0, mm1 ; 11 01 10 00
+ movq mm5, mm2 ; 13 03 12 02
+
+ punpckldq mm0, mm3 ; 30 20 10 00
+ punpckhdq mm1, mm3 ; 31 21 11 01
+
+ punpckldq mm2, mm4 ; 32 22 12 02
+ punpckhdq mm5, mm4 ; 33 23 13 03
+
+ pxor mm7, mm7
+
+ movd mm4, [rdx]
+ punpcklbw mm4, mm7
+ paddsw mm0, mm4
+ packuswb mm0, mm7
+ movd [rdx], mm0
+
+ movd mm4, [rdx+rdi]
+ punpcklbw mm4, mm7
+ paddsw mm1, mm4
+ packuswb mm1, mm7
+ movd [rdx+rdi], mm1
+
+ movd mm4, [rdx+2*rdi]
+ punpcklbw mm4, mm7
+ paddsw mm2, mm4
+ packuswb mm2, mm7
+ movd [rdx+rdi*2], mm2
+
+ add rdx, rdi
+
+ movd mm4, [rdx+2*rdi]
+ punpcklbw mm4, mm7
+ paddsw mm5, mm4
+ packuswb mm5, mm7
+ movd [rdx+rdi*2], mm5
+
+ ; begin epilog
+ pop rdi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+x_s1sqr2:
+ times 4 dw 0x8A8C
+align 16
+x_c1sqr2less1:
+ times 4 dw 0x4E7B
+align 16
+fours:
+ times 4 dw 0x0004
diff --git a/media/libvpx/libvpx/vp8/common/x86/idct_blk_mmx.c b/media/libvpx/libvpx/vp8/common/x86/idct_blk_mmx.c
new file mode 100644
index 0000000000..fd804b1ca4
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/idct_blk_mmx.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vp8/common/blockd.h"
+#include "vpx_mem/vpx_mem.h"
+
+extern void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q);
+
+void vp8_dequantize_b_mmx(BLOCKD *d, short *DQC) {
+ short *sq = (short *)d->qcoeff;
+ short *dq = (short *)d->dqcoeff;
+
+ vp8_dequantize_b_impl_mmx(sq, dq, DQC);
+}
diff --git a/media/libvpx/libvpx/vp8/common/x86/idct_blk_sse2.c b/media/libvpx/libvpx/vp8/common/x86/idct_blk_sse2.c
new file mode 100644
index 0000000000..897ed5b652
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/idct_blk_sse2.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+
+void vp8_idct_dequant_0_2x_sse2(short *q, short *dq, unsigned char *dst,
+ int dst_stride);
+void vp8_idct_dequant_full_2x_sse2(short *q, short *dq, unsigned char *dst,
+ int dst_stride);
+
+void vp8_dequant_idct_add_y_block_sse2(short *q, short *dq, unsigned char *dst,
+ int stride, char *eobs) {
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ if (((short *)(eobs))[0]) {
+ if (((short *)(eobs))[0] & 0xfefe) {
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst, stride);
+ } else {
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst, stride);
+ }
+ }
+ if (((short *)(eobs))[1]) {
+ if (((short *)(eobs))[1] & 0xfefe) {
+ vp8_idct_dequant_full_2x_sse2(q + 32, dq, dst + 8, stride);
+ } else {
+ vp8_idct_dequant_0_2x_sse2(q + 32, dq, dst + 8, stride);
+ }
+ }
+ q += 64;
+ dst += stride * 4;
+ eobs += 4;
+ }
+}
+
+void vp8_dequant_idct_add_uv_block_sse2(short *q, short *dq,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int stride,
+ char *eobs) {
+ if (((short *)(eobs))[0]) {
+ if (((short *)(eobs))[0] & 0xfefe) {
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst_u, stride);
+ } else {
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst_u, stride);
+ }
+ }
+ q += 32;
+ dst_u += stride * 4;
+
+ if (((short *)(eobs))[1]) {
+ if (((short *)(eobs))[1] & 0xfefe) {
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst_u, stride);
+ } else {
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst_u, stride);
+ }
+ }
+ q += 32;
+
+ if (((short *)(eobs))[2]) {
+ if (((short *)(eobs))[2] & 0xfefe) {
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst_v, stride);
+ } else {
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst_v, stride);
+ }
+ }
+ q += 32;
+ dst_v += stride * 4;
+
+ if (((short *)(eobs))[3]) {
+ if (((short *)(eobs))[3] & 0xfefe) {
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst_v, stride);
+ } else {
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst_v, stride);
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/common/x86/idctllm_mmx.asm b/media/libvpx/libvpx/vp8/common/x86/idctllm_mmx.asm
new file mode 100644
index 0000000000..6cea86fe03
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/idctllm_mmx.asm
@@ -0,0 +1,296 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+; /****************************************************************************
+; * Notes:
+; *
+; * This implementation makes use of 16 bit fixed point version of two multiply
+; * constants:
+; * 1. sqrt(2) * cos (pi/8)
+; * 2. sqrt(2) * sin (pi/8)
+; * Because the first constant is bigger than 1, to maintain the same 16 bit
+; * fixed point precision as the second one, we use a trick of
+; * x * a = x + x*(a-1)
+; * so
+; * x * sqrt(2) * cos (pi/8) = x + x * (sqrt(2) *cos(pi/8)-1).
+; *
+; * For the second constant, because of the 16bit version is 35468, which
+; * is bigger than 32768, in signed 16 bit multiply, it becomes a negative
+; * number.
+; * (x * (unsigned)35468 >> 16) = x * (signed)35468 >> 16 + x
+; *
+; **************************************************************************/
+
+SECTION .text
+
+;void vp8_short_idct4x4llm_mmx(short *input, unsigned char *pred,
+;int pitch, unsigned char *dest,int stride)
+globalsym(vp8_short_idct4x4llm_mmx)
+sym(vp8_short_idct4x4llm_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rax, arg(0) ;input
+ mov rsi, arg(1) ;pred
+
+ movq mm0, [rax ]
+ movq mm1, [rax+ 8]
+ movq mm2, [rax+16]
+ movq mm3, [rax+24]
+
+%if 0
+ pxor mm7, mm7
+ movq [rax], mm7
+ movq [rax+8], mm7
+ movq [rax+16],mm7
+ movq [rax+24],mm7
+%endif
+ movsxd rax, dword ptr arg(2) ;pitch
+ mov rdx, arg(3) ;dest
+ movsxd rdi, dword ptr arg(4) ;stride
+
+
+ psubw mm0, mm2 ; b1= 0-2
+ paddw mm2, mm2 ;
+
+ movq mm5, mm1
+ paddw mm2, mm0 ; a1 =0+2
+
+ pmulhw mm5, [GLOBAL(x_s1sqr2)];
+ paddw mm5, mm1 ; ip1 * sin(pi/8) * sqrt(2)
+
+ movq mm7, mm3 ;
+ pmulhw mm7, [GLOBAL(x_c1sqr2less1)];
+
+ paddw mm7, mm3 ; ip3 * cos(pi/8) * sqrt(2)
+ psubw mm7, mm5 ; c1
+
+ movq mm5, mm1
+ movq mm4, mm3
+
+ pmulhw mm5, [GLOBAL(x_c1sqr2less1)]
+ paddw mm5, mm1
+
+ pmulhw mm3, [GLOBAL(x_s1sqr2)]
+ paddw mm3, mm4
+
+ paddw mm3, mm5 ; d1
+ movq mm6, mm2 ; a1
+
+ movq mm4, mm0 ; b1
+ paddw mm2, mm3 ;0
+
+ paddw mm4, mm7 ;1
+ psubw mm0, mm7 ;2
+
+ psubw mm6, mm3 ;3
+
+ movq mm1, mm2 ; 03 02 01 00
+ movq mm3, mm4 ; 23 22 21 20
+
+ punpcklwd mm1, mm0 ; 11 01 10 00
+ punpckhwd mm2, mm0 ; 13 03 12 02
+
+ punpcklwd mm3, mm6 ; 31 21 30 20
+ punpckhwd mm4, mm6 ; 33 23 32 22
+
+ movq mm0, mm1 ; 11 01 10 00
+ movq mm5, mm2 ; 13 03 12 02
+
+ punpckldq mm0, mm3 ; 30 20 10 00
+ punpckhdq mm1, mm3 ; 31 21 11 01
+
+ punpckldq mm2, mm4 ; 32 22 12 02
+ punpckhdq mm5, mm4 ; 33 23 13 03
+
+ movq mm3, mm5 ; 33 23 13 03
+
+ psubw mm0, mm2 ; b1= 0-2
+ paddw mm2, mm2 ;
+
+ movq mm5, mm1
+ paddw mm2, mm0 ; a1 =0+2
+
+ pmulhw mm5, [GLOBAL(x_s1sqr2)];
+ paddw mm5, mm1 ; ip1 * sin(pi/8) * sqrt(2)
+
+ movq mm7, mm3 ;
+ pmulhw mm7, [GLOBAL(x_c1sqr2less1)];
+
+ paddw mm7, mm3 ; ip3 * cos(pi/8) * sqrt(2)
+ psubw mm7, mm5 ; c1
+
+ movq mm5, mm1
+ movq mm4, mm3
+
+ pmulhw mm5, [GLOBAL(x_c1sqr2less1)]
+ paddw mm5, mm1
+
+ pmulhw mm3, [GLOBAL(x_s1sqr2)]
+ paddw mm3, mm4
+
+ paddw mm3, mm5 ; d1
+ paddw mm0, [GLOBAL(fours)]
+
+ paddw mm2, [GLOBAL(fours)]
+ movq mm6, mm2 ; a1
+
+ movq mm4, mm0 ; b1
+ paddw mm2, mm3 ;0
+
+ paddw mm4, mm7 ;1
+ psubw mm0, mm7 ;2
+
+ psubw mm6, mm3 ;3
+ psraw mm2, 3
+
+ psraw mm0, 3
+ psraw mm4, 3
+
+ psraw mm6, 3
+
+ movq mm1, mm2 ; 03 02 01 00
+ movq mm3, mm4 ; 23 22 21 20
+
+ punpcklwd mm1, mm0 ; 11 01 10 00
+ punpckhwd mm2, mm0 ; 13 03 12 02
+
+ punpcklwd mm3, mm6 ; 31 21 30 20
+ punpckhwd mm4, mm6 ; 33 23 32 22
+
+ movq mm0, mm1 ; 11 01 10 00
+ movq mm5, mm2 ; 13 03 12 02
+
+ punpckldq mm0, mm3 ; 30 20 10 00
+ punpckhdq mm1, mm3 ; 31 21 11 01
+
+ punpckldq mm2, mm4 ; 32 22 12 02
+ punpckhdq mm5, mm4 ; 33 23 13 03
+
+ pxor mm7, mm7
+
+ movd mm4, [rsi]
+ punpcklbw mm4, mm7
+ paddsw mm0, mm4
+ packuswb mm0, mm7
+ movd [rdx], mm0
+
+ movd mm4, [rsi+rax]
+ punpcklbw mm4, mm7
+ paddsw mm1, mm4
+ packuswb mm1, mm7
+ movd [rdx+rdi], mm1
+
+ movd mm4, [rsi+2*rax]
+ punpcklbw mm4, mm7
+ paddsw mm2, mm4
+ packuswb mm2, mm7
+ movd [rdx+rdi*2], mm2
+
+ add rdx, rdi
+ add rsi, rax
+
+ movd mm4, [rsi+2*rax]
+ punpcklbw mm4, mm7
+ paddsw mm5, mm4
+ packuswb mm5, mm7
+ movd [rdx+rdi*2], mm5
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp8_dc_only_idct_add_mmx(
+;short input_dc,
+;unsigned char *pred_ptr,
+;int pred_stride,
+;unsigned char *dst_ptr,
+;int stride)
+globalsym(vp8_dc_only_idct_add_mmx)
+sym(vp8_dc_only_idct_add_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ ; end prolog
+
+ movd mm5, arg(0) ;input_dc
+ mov rax, arg(1) ;pred_ptr
+ movsxd rdx, dword ptr arg(2) ;pred_stride
+
+ pxor mm0, mm0
+
+ paddw mm5, [GLOBAL(fours)]
+ lea rcx, [rdx + rdx*2]
+
+ psraw mm5, 3
+
+ punpcklwd mm5, mm5
+
+ punpckldq mm5, mm5
+
+ movd mm1, [rax]
+ movd mm2, [rax+rdx]
+ movd mm3, [rax+2*rdx]
+ movd mm4, [rax+rcx]
+
+ mov rax, arg(3) ;d -- destination
+ movsxd rdx, dword ptr arg(4) ;dst_stride
+
+ punpcklbw mm1, mm0
+ paddsw mm1, mm5
+ packuswb mm1, mm0 ; pack and unpack to saturate
+ lea rcx, [rdx + rdx*2]
+
+ punpcklbw mm2, mm0
+ paddsw mm2, mm5
+ packuswb mm2, mm0 ; pack and unpack to saturate
+
+ punpcklbw mm3, mm0
+ paddsw mm3, mm5
+ packuswb mm3, mm0 ; pack and unpack to saturate
+
+ punpcklbw mm4, mm0
+ paddsw mm4, mm5
+ packuswb mm4, mm0 ; pack and unpack to saturate
+
+ movd [rax], mm1
+ movd [rax+rdx], mm2
+ movd [rax+2*rdx], mm3
+ movd [rax+rcx], mm4
+
+ ; begin epilog
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+x_s1sqr2:
+ times 4 dw 0x8A8C
+align 16
+x_c1sqr2less1:
+ times 4 dw 0x4E7B
+align 16
+fours:
+ times 4 dw 0x0004
diff --git a/media/libvpx/libvpx/vp8/common/x86/idctllm_sse2.asm b/media/libvpx/libvpx/vp8/common/x86/idctllm_sse2.asm
new file mode 100644
index 0000000000..bb79d2da3b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/idctllm_sse2.asm
@@ -0,0 +1,710 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp8_idct_dequant_0_2x_sse2
+; (
+; short *qcoeff - 0
+; short *dequant - 1
+; unsigned char *dst - 2
+; int dst_stride - 3
+; )
+
+SECTION .text
+
+globalsym(vp8_idct_dequant_0_2x_sse2)
+sym(vp8_idct_dequant_0_2x_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ GET_GOT rbx
+ ; end prolog
+
+ mov rdx, arg(1) ; dequant
+ mov rax, arg(0) ; qcoeff
+
+ movd xmm4, [rax]
+ movd xmm5, [rdx]
+
+ pinsrw xmm4, [rax+32], 4
+ pinsrw xmm5, [rdx], 4
+
+ pmullw xmm4, xmm5
+
+ ; Zero out xmm5, for use unpacking
+ pxor xmm5, xmm5
+
+ ; clear coeffs
+ movd [rax], xmm5
+ movd [rax+32], xmm5
+;pshufb
+ mov rax, arg(2) ; dst
+ movsxd rdx, dword ptr arg(3) ; dst_stride
+
+ pshuflw xmm4, xmm4, 00000000b
+ pshufhw xmm4, xmm4, 00000000b
+
+ lea rcx, [rdx + rdx*2]
+ paddw xmm4, [GLOBAL(fours)]
+
+ psraw xmm4, 3
+
+ movq xmm0, [rax]
+ movq xmm1, [rax+rdx]
+ movq xmm2, [rax+2*rdx]
+ movq xmm3, [rax+rcx]
+
+ punpcklbw xmm0, xmm5
+ punpcklbw xmm1, xmm5
+ punpcklbw xmm2, xmm5
+ punpcklbw xmm3, xmm5
+
+
+ ; Add to predict buffer
+ paddw xmm0, xmm4
+ paddw xmm1, xmm4
+ paddw xmm2, xmm4
+ paddw xmm3, xmm4
+
+ ; pack up before storing
+ packuswb xmm0, xmm5
+ packuswb xmm1, xmm5
+ packuswb xmm2, xmm5
+ packuswb xmm3, xmm5
+
+ ; store blocks back out
+ movq [rax], xmm0
+ movq [rax + rdx], xmm1
+
+ lea rax, [rax + 2*rdx]
+
+ movq [rax], xmm2
+ movq [rax + rdx], xmm3
+
+ ; begin epilog
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp8_idct_dequant_full_2x_sse2
+; (
+; short *qcoeff - 0
+; short *dequant - 1
+; unsigned char *dst - 2
+; int dst_stride - 3
+; )
+globalsym(vp8_idct_dequant_full_2x_sse2)
+sym(vp8_idct_dequant_full_2x_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ; special case when 2 blocks have 0 or 1 coeffs
+ ; dc is set as first coeff, so no need to load qcoeff
+ mov rax, arg(0) ; qcoeff
+ mov rdx, arg(1) ; dequant
+ mov rdi, arg(2) ; dst
+
+
+ ; Zero out xmm7, for use unpacking
+ pxor xmm7, xmm7
+
+
+ ; note the transpose of xmm1 and xmm2, necessary for shuffle
+ ; to spit out sensicle data
+ movdqa xmm0, [rax]
+ movdqa xmm2, [rax+16]
+ movdqa xmm1, [rax+32]
+ movdqa xmm3, [rax+48]
+
+ ; Clear out coeffs
+ movdqa [rax], xmm7
+ movdqa [rax+16], xmm7
+ movdqa [rax+32], xmm7
+ movdqa [rax+48], xmm7
+
+ ; dequantize qcoeff buffer
+ pmullw xmm0, [rdx]
+ pmullw xmm2, [rdx+16]
+ pmullw xmm1, [rdx]
+ pmullw xmm3, [rdx+16]
+ movsxd rdx, dword ptr arg(3) ; dst_stride
+
+ ; repack so block 0 row x and block 1 row x are together
+ movdqa xmm4, xmm0
+ punpckldq xmm0, xmm1
+ punpckhdq xmm4, xmm1
+
+ pshufd xmm0, xmm0, 11011000b
+ pshufd xmm1, xmm4, 11011000b
+
+ movdqa xmm4, xmm2
+ punpckldq xmm2, xmm3
+ punpckhdq xmm4, xmm3
+
+ pshufd xmm2, xmm2, 11011000b
+ pshufd xmm3, xmm4, 11011000b
+
+ ; first pass
+ psubw xmm0, xmm2 ; b1 = 0-2
+ paddw xmm2, xmm2 ;
+
+ movdqa xmm5, xmm1
+ paddw xmm2, xmm0 ; a1 = 0+2
+
+ pmulhw xmm5, [GLOBAL(x_s1sqr2)]
+ lea rcx, [rdx + rdx*2] ;dst_stride * 3
+ paddw xmm5, xmm1 ; ip1 * sin(pi/8) * sqrt(2)
+
+ movdqa xmm7, xmm3
+ pmulhw xmm7, [GLOBAL(x_c1sqr2less1)]
+
+ paddw xmm7, xmm3 ; ip3 * cos(pi/8) * sqrt(2)
+ psubw xmm7, xmm5 ; c1
+
+ movdqa xmm5, xmm1
+ movdqa xmm4, xmm3
+
+ pmulhw xmm5, [GLOBAL(x_c1sqr2less1)]
+ paddw xmm5, xmm1
+
+ pmulhw xmm3, [GLOBAL(x_s1sqr2)]
+ paddw xmm3, xmm4
+
+ paddw xmm3, xmm5 ; d1
+ movdqa xmm6, xmm2 ; a1
+
+ movdqa xmm4, xmm0 ; b1
+ paddw xmm2, xmm3 ;0
+
+ paddw xmm4, xmm7 ;1
+ psubw xmm0, xmm7 ;2
+
+ psubw xmm6, xmm3 ;3
+
+ ; transpose for the second pass
+ movdqa xmm7, xmm2 ; 103 102 101 100 003 002 001 000
+ punpcklwd xmm2, xmm0 ; 007 003 006 002 005 001 004 000
+ punpckhwd xmm7, xmm0 ; 107 103 106 102 105 101 104 100
+
+ movdqa xmm5, xmm4 ; 111 110 109 108 011 010 009 008
+ punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008
+ punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108
+
+
+ movdqa xmm1, xmm2 ; 007 003 006 002 005 001 004 000
+ punpckldq xmm2, xmm4 ; 013 009 005 001 012 008 004 000
+ punpckhdq xmm1, xmm4 ; 015 011 007 003 014 010 006 002
+
+ movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100
+ punpckldq xmm7, xmm5 ; 113 109 105 101 112 108 104 100
+ punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102
+
+
+ movdqa xmm5, xmm2 ; 013 009 005 001 012 008 004 000
+ punpckldq xmm2, xmm7 ; 112 108 012 008 104 100 004 000
+ punpckhdq xmm5, xmm7 ; 113 109 013 009 105 101 005 001
+
+ movdqa xmm7, xmm1 ; 015 011 007 003 014 010 006 002
+ punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002
+ punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003
+
+ pshufd xmm0, xmm2, 11011000b
+ pshufd xmm2, xmm1, 11011000b
+
+ pshufd xmm1, xmm5, 11011000b
+ pshufd xmm3, xmm7, 11011000b
+
+ ; second pass
+ psubw xmm0, xmm2 ; b1 = 0-2
+ paddw xmm2, xmm2
+
+ movdqa xmm5, xmm1
+ paddw xmm2, xmm0 ; a1 = 0+2
+
+ pmulhw xmm5, [GLOBAL(x_s1sqr2)]
+ paddw xmm5, xmm1 ; ip1 * sin(pi/8) * sqrt(2)
+
+ movdqa xmm7, xmm3
+ pmulhw xmm7, [GLOBAL(x_c1sqr2less1)]
+
+ paddw xmm7, xmm3 ; ip3 * cos(pi/8) * sqrt(2)
+ psubw xmm7, xmm5 ; c1
+
+ movdqa xmm5, xmm1
+ movdqa xmm4, xmm3
+
+ pmulhw xmm5, [GLOBAL(x_c1sqr2less1)]
+ paddw xmm5, xmm1
+
+ pmulhw xmm3, [GLOBAL(x_s1sqr2)]
+ paddw xmm3, xmm4
+
+ paddw xmm3, xmm5 ; d1
+ paddw xmm0, [GLOBAL(fours)]
+
+ paddw xmm2, [GLOBAL(fours)]
+ movdqa xmm6, xmm2 ; a1
+
+ movdqa xmm4, xmm0 ; b1
+ paddw xmm2, xmm3 ;0
+
+ paddw xmm4, xmm7 ;1
+ psubw xmm0, xmm7 ;2
+
+ psubw xmm6, xmm3 ;3
+ psraw xmm2, 3
+
+ psraw xmm0, 3
+ psraw xmm4, 3
+
+ psraw xmm6, 3
+
+ ; transpose to save
+ movdqa xmm7, xmm2 ; 103 102 101 100 003 002 001 000
+ punpcklwd xmm2, xmm0 ; 007 003 006 002 005 001 004 000
+ punpckhwd xmm7, xmm0 ; 107 103 106 102 105 101 104 100
+
+ movdqa xmm5, xmm4 ; 111 110 109 108 011 010 009 008
+ punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008
+ punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108
+
+
+ movdqa xmm1, xmm2 ; 007 003 006 002 005 001 004 000
+ punpckldq xmm2, xmm4 ; 013 009 005 001 012 008 004 000
+ punpckhdq xmm1, xmm4 ; 015 011 007 003 014 010 006 002
+
+ movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100
+ punpckldq xmm7, xmm5 ; 113 109 105 101 112 108 104 100
+ punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102
+
+
+ movdqa xmm5, xmm2 ; 013 009 005 001 012 008 004 000
+ punpckldq xmm2, xmm7 ; 112 108 012 008 104 100 004 000
+ punpckhdq xmm5, xmm7 ; 113 109 013 009 105 101 005 001
+
+ movdqa xmm7, xmm1 ; 015 011 007 003 014 010 006 002
+ punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002
+ punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003
+
+ pshufd xmm0, xmm2, 11011000b
+ pshufd xmm2, xmm1, 11011000b
+
+ pshufd xmm1, xmm5, 11011000b
+ pshufd xmm3, xmm7, 11011000b
+
+ pxor xmm7, xmm7
+
+ ; Load up predict blocks
+ movq xmm4, [rdi]
+ movq xmm5, [rdi+rdx]
+
+ punpcklbw xmm4, xmm7
+ punpcklbw xmm5, xmm7
+
+ paddw xmm0, xmm4
+ paddw xmm1, xmm5
+
+ movq xmm4, [rdi+2*rdx]
+ movq xmm5, [rdi+rcx]
+
+ punpcklbw xmm4, xmm7
+ punpcklbw xmm5, xmm7
+
+ paddw xmm2, xmm4
+ paddw xmm3, xmm5
+
+.finish:
+
+ ; pack up before storing
+ packuswb xmm0, xmm7
+ packuswb xmm1, xmm7
+ packuswb xmm2, xmm7
+ packuswb xmm3, xmm7
+
+ ; store blocks back out
+ movq [rdi], xmm0
+ movq [rdi + rdx], xmm1
+ movq [rdi + rdx*2], xmm2
+ movq [rdi + rcx], xmm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp8_idct_dequant_dc_0_2x_sse2
+; (
+; short *qcoeff - 0
+; short *dequant - 1
+; unsigned char *dst - 2
+; int dst_stride - 3
+; short *dc - 4
+; )
+globalsym(vp8_idct_dequant_dc_0_2x_sse2)
+sym(vp8_idct_dequant_dc_0_2x_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rdi
+ ; end prolog
+
+ ; special case when 2 blocks have 0 or 1 coeffs
+ ; dc is set as first coeff, so no need to load qcoeff
+ mov rax, arg(0) ; qcoeff
+
+ mov rdi, arg(2) ; dst
+ mov rdx, arg(4) ; dc
+
+ ; Zero out xmm5, for use unpacking
+ pxor xmm5, xmm5
+
+ ; load up 2 dc words here == 2*16 = doubleword
+ movd xmm4, [rdx]
+
+ movsxd rdx, dword ptr arg(3) ; dst_stride
+ lea rcx, [rdx + rdx*2]
+ ; Load up predict blocks
+ movq xmm0, [rdi]
+ movq xmm1, [rdi+rdx*1]
+ movq xmm2, [rdi+rdx*2]
+ movq xmm3, [rdi+rcx]
+
+ ; Duplicate and expand dc across
+ punpcklwd xmm4, xmm4
+ punpckldq xmm4, xmm4
+
+ ; Rounding to dequant and downshift
+ paddw xmm4, [GLOBAL(fours)]
+ psraw xmm4, 3
+
+ ; Predict buffer needs to be expanded from bytes to words
+ punpcklbw xmm0, xmm5
+ punpcklbw xmm1, xmm5
+ punpcklbw xmm2, xmm5
+ punpcklbw xmm3, xmm5
+
+ ; Add to predict buffer
+ paddw xmm0, xmm4
+ paddw xmm1, xmm4
+ paddw xmm2, xmm4
+ paddw xmm3, xmm4
+
+ ; pack up before storing
+ packuswb xmm0, xmm5
+ packuswb xmm1, xmm5
+ packuswb xmm2, xmm5
+ packuswb xmm3, xmm5
+
+ ; store blocks back out
+ movq [rdi], xmm0
+ movq [rdi + rdx], xmm1
+ movq [rdi + rdx*2], xmm2
+ movq [rdi + rcx], xmm3
+
+ ; begin epilog
+ pop rdi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+;void vp8_idct_dequant_dc_full_2x_sse2
+; (
+; short *qcoeff - 0
+; short *dequant - 1
+; unsigned char *dst - 2
+; int dst_stride - 3
+; short *dc - 4
+; )
+globalsym(vp8_idct_dequant_dc_full_2x_sse2)
+sym(vp8_idct_dequant_dc_full_2x_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rdi
+ ; end prolog
+
+ ; special case when 2 blocks have 0 or 1 coeffs
+ ; dc is set as first coeff, so no need to load qcoeff
+ mov rax, arg(0) ; qcoeff
+ mov rdx, arg(1) ; dequant
+
+ mov rdi, arg(2) ; dst
+
+ ; Zero out xmm7, for use unpacking
+ pxor xmm7, xmm7
+
+
+ ; note the transpose of xmm1 and xmm2, necessary for shuffle
+ ; to spit out sensicle data
+ movdqa xmm0, [rax]
+ movdqa xmm2, [rax+16]
+ movdqa xmm1, [rax+32]
+ movdqa xmm3, [rax+48]
+
+ ; Clear out coeffs
+ movdqa [rax], xmm7
+ movdqa [rax+16], xmm7
+ movdqa [rax+32], xmm7
+ movdqa [rax+48], xmm7
+
+ ; dequantize qcoeff buffer
+ pmullw xmm0, [rdx]
+ pmullw xmm2, [rdx+16]
+ pmullw xmm1, [rdx]
+ pmullw xmm3, [rdx+16]
+
+ ; DC component
+ mov rdx, arg(4)
+
+ ; repack so block 0 row x and block 1 row x are together
+ movdqa xmm4, xmm0
+ punpckldq xmm0, xmm1
+ punpckhdq xmm4, xmm1
+
+ pshufd xmm0, xmm0, 11011000b
+ pshufd xmm1, xmm4, 11011000b
+
+ movdqa xmm4, xmm2
+ punpckldq xmm2, xmm3
+ punpckhdq xmm4, xmm3
+
+ pshufd xmm2, xmm2, 11011000b
+ pshufd xmm3, xmm4, 11011000b
+
+ ; insert DC component
+ pinsrw xmm0, [rdx], 0
+ pinsrw xmm0, [rdx+2], 4
+
+ ; first pass
+ psubw xmm0, xmm2 ; b1 = 0-2
+ paddw xmm2, xmm2 ;
+
+ movdqa xmm5, xmm1
+ paddw xmm2, xmm0 ; a1 = 0+2
+
+ pmulhw xmm5, [GLOBAL(x_s1sqr2)]
+ paddw xmm5, xmm1 ; ip1 * sin(pi/8) * sqrt(2)
+
+ movdqa xmm7, xmm3
+ pmulhw xmm7, [GLOBAL(x_c1sqr2less1)]
+
+ paddw xmm7, xmm3 ; ip3 * cos(pi/8) * sqrt(2)
+ psubw xmm7, xmm5 ; c1
+
+ movdqa xmm5, xmm1
+ movdqa xmm4, xmm3
+
+ pmulhw xmm5, [GLOBAL(x_c1sqr2less1)]
+ paddw xmm5, xmm1
+
+ pmulhw xmm3, [GLOBAL(x_s1sqr2)]
+ paddw xmm3, xmm4
+
+ paddw xmm3, xmm5 ; d1
+ movdqa xmm6, xmm2 ; a1
+
+ movdqa xmm4, xmm0 ; b1
+ paddw xmm2, xmm3 ;0
+
+ paddw xmm4, xmm7 ;1
+ psubw xmm0, xmm7 ;2
+
+ psubw xmm6, xmm3 ;3
+
+ ; transpose for the second pass
+ movdqa xmm7, xmm2 ; 103 102 101 100 003 002 001 000
+ punpcklwd xmm2, xmm0 ; 007 003 006 002 005 001 004 000
+ punpckhwd xmm7, xmm0 ; 107 103 106 102 105 101 104 100
+
+ movdqa xmm5, xmm4 ; 111 110 109 108 011 010 009 008
+ punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008
+ punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108
+
+
+ movdqa xmm1, xmm2 ; 007 003 006 002 005 001 004 000
+ punpckldq xmm2, xmm4 ; 013 009 005 001 012 008 004 000
+ punpckhdq xmm1, xmm4 ; 015 011 007 003 014 010 006 002
+
+ movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100
+ punpckldq xmm7, xmm5 ; 113 109 105 101 112 108 104 100
+ punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102
+
+
+ movdqa xmm5, xmm2 ; 013 009 005 001 012 008 004 000
+ punpckldq xmm2, xmm7 ; 112 108 012 008 104 100 004 000
+ punpckhdq xmm5, xmm7 ; 113 109 013 009 105 101 005 001
+
+ movdqa xmm7, xmm1 ; 015 011 007 003 014 010 006 002
+ punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002
+ punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003
+
+ pshufd xmm0, xmm2, 11011000b
+ pshufd xmm2, xmm1, 11011000b
+
+ pshufd xmm1, xmm5, 11011000b
+ pshufd xmm3, xmm7, 11011000b
+
+ ; second pass
+ psubw xmm0, xmm2 ; b1 = 0-2
+ paddw xmm2, xmm2
+
+ movdqa xmm5, xmm1
+ paddw xmm2, xmm0 ; a1 = 0+2
+
+ pmulhw xmm5, [GLOBAL(x_s1sqr2)]
+ paddw xmm5, xmm1 ; ip1 * sin(pi/8) * sqrt(2)
+
+ movdqa xmm7, xmm3
+ pmulhw xmm7, [GLOBAL(x_c1sqr2less1)]
+
+ paddw xmm7, xmm3 ; ip3 * cos(pi/8) * sqrt(2)
+ psubw xmm7, xmm5 ; c1
+
+ movdqa xmm5, xmm1
+ movdqa xmm4, xmm3
+
+ pmulhw xmm5, [GLOBAL(x_c1sqr2less1)]
+ paddw xmm5, xmm1
+
+ pmulhw xmm3, [GLOBAL(x_s1sqr2)]
+ paddw xmm3, xmm4
+
+ paddw xmm3, xmm5 ; d1
+ paddw xmm0, [GLOBAL(fours)]
+
+ paddw xmm2, [GLOBAL(fours)]
+ movdqa xmm6, xmm2 ; a1
+
+ movdqa xmm4, xmm0 ; b1
+ paddw xmm2, xmm3 ;0
+
+ paddw xmm4, xmm7 ;1
+ psubw xmm0, xmm7 ;2
+
+ psubw xmm6, xmm3 ;3
+ psraw xmm2, 3
+
+ psraw xmm0, 3
+ psraw xmm4, 3
+
+ psraw xmm6, 3
+
+ ; transpose to save
+ movdqa xmm7, xmm2 ; 103 102 101 100 003 002 001 000
+ punpcklwd xmm2, xmm0 ; 007 003 006 002 005 001 004 000
+ punpckhwd xmm7, xmm0 ; 107 103 106 102 105 101 104 100
+
+ movdqa xmm5, xmm4 ; 111 110 109 108 011 010 009 008
+ punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008
+ punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108
+
+
+ movdqa xmm1, xmm2 ; 007 003 006 002 005 001 004 000
+ punpckldq xmm2, xmm4 ; 013 009 005 001 012 008 004 000
+ punpckhdq xmm1, xmm4 ; 015 011 007 003 014 010 006 002
+
+ movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100
+ punpckldq xmm7, xmm5 ; 113 109 105 101 112 108 104 100
+ punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102
+
+
+ movdqa xmm5, xmm2 ; 013 009 005 001 012 008 004 000
+ punpckldq xmm2, xmm7 ; 112 108 012 008 104 100 004 000
+ punpckhdq xmm5, xmm7 ; 113 109 013 009 105 101 005 001
+
+ movdqa xmm7, xmm1 ; 015 011 007 003 014 010 006 002
+ punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002
+ punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003
+
+ pshufd xmm0, xmm2, 11011000b
+ pshufd xmm2, xmm1, 11011000b
+
+ pshufd xmm1, xmm5, 11011000b
+ pshufd xmm3, xmm7, 11011000b
+
+ pxor xmm7, xmm7
+
+ ; Load up predict blocks
+ movsxd rdx, dword ptr arg(3) ; dst_stride
+ movq xmm4, [rdi]
+ movq xmm5, [rdi+rdx]
+ lea rcx, [rdx + rdx*2]
+
+ punpcklbw xmm4, xmm7
+ punpcklbw xmm5, xmm7
+
+ paddw xmm0, xmm4
+ paddw xmm1, xmm5
+
+ movq xmm4, [rdi+rdx*2]
+ movq xmm5, [rdi+rcx]
+
+ punpcklbw xmm4, xmm7
+ punpcklbw xmm5, xmm7
+
+ paddw xmm2, xmm4
+ paddw xmm3, xmm5
+
+.finish:
+
+ ; pack up before storing
+ packuswb xmm0, xmm7
+ packuswb xmm1, xmm7
+ packuswb xmm2, xmm7
+ packuswb xmm3, xmm7
+
+ ; Load destination stride before writing out,
+ ; doesn't need to persist
+ movsxd rdx, dword ptr arg(3) ; dst_stride
+
+ ; store blocks back out
+ movq [rdi], xmm0
+ movq [rdi + rdx], xmm1
+
+ lea rdi, [rdi + 2*rdx]
+
+ movq [rdi], xmm2
+ movq [rdi + rdx], xmm3
+
+
+ ; begin epilog
+ pop rdi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+fours:
+ times 8 dw 0x0004
+align 16
+x_s1sqr2:
+ times 8 dw 0x8A8C
+align 16
+x_c1sqr2less1:
+ times 8 dw 0x4E7B
diff --git a/media/libvpx/libvpx/vp8/common/x86/iwalsh_sse2.asm b/media/libvpx/libvpx/vp8/common/x86/iwalsh_sse2.asm
new file mode 100644
index 0000000000..56f37c3e0f
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/iwalsh_sse2.asm
@@ -0,0 +1,123 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+;void vp8_short_inv_walsh4x4_sse2(short *input, short *mb_dqcoeff)
+globalsym(vp8_short_inv_walsh4x4_sse2)
+sym(vp8_short_inv_walsh4x4_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ ; end prolog
+
+ mov rcx, arg(0)
+ mov rdx, arg(1)
+ mov rax, 30003h
+
+ movdqa xmm0, [rcx + 0] ;ip[4] ip[0]
+ movdqa xmm1, [rcx + 16] ;ip[12] ip[8]
+
+
+ pshufd xmm2, xmm1, 4eh ;ip[8] ip[12]
+ movdqa xmm3, xmm0 ;ip[4] ip[0]
+
+ paddw xmm0, xmm2 ;ip[4]+ip[8] ip[0]+ip[12] aka b1 a1
+ psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d1
+
+ movdqa xmm4, xmm0
+ punpcklqdq xmm0, xmm3 ;d1 a1
+ punpckhqdq xmm4, xmm3 ;c1 b1
+
+ movdqa xmm1, xmm4 ;c1 b1
+ paddw xmm4, xmm0 ;dl+cl a1+b1 aka op[4] op[0]
+ psubw xmm0, xmm1 ;d1-c1 a1-b1 aka op[12] op[8]
+
+ ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ; 13 12 11 10 03 02 01 00
+ ;
+ ; 33 32 31 30 23 22 21 20
+ ;
+ movdqa xmm3, xmm4 ; 13 12 11 10 03 02 01 00
+ punpcklwd xmm4, xmm0 ; 23 03 22 02 21 01 20 00
+ punpckhwd xmm3, xmm0 ; 33 13 32 12 31 11 30 10
+ movdqa xmm1, xmm4 ; 23 03 22 02 21 01 20 00
+ punpcklwd xmm4, xmm3 ; 31 21 11 01 30 20 10 00
+ punpckhwd xmm1, xmm3 ; 33 23 13 03 32 22 12 02
+ ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ movd xmm0, eax
+ pshufd xmm2, xmm1, 4eh ;ip[8] ip[12]
+ movdqa xmm3, xmm4 ;ip[4] ip[0]
+
+ pshufd xmm0, xmm0, 0 ;03 03 03 03 03 03 03 03
+
+ paddw xmm4, xmm2 ;ip[4]+ip[8] ip[0]+ip[12] aka b1 a1
+ psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d1
+
+ movdqa xmm5, xmm4
+ punpcklqdq xmm4, xmm3 ;d1 a1
+ punpckhqdq xmm5, xmm3 ;c1 b1
+
+ movdqa xmm1, xmm5 ;c1 b1
+ paddw xmm5, xmm4 ;dl+cl a1+b1 aka op[4] op[0]
+ psubw xmm4, xmm1 ;d1-c1 a1-b1 aka op[12] op[8]
+
+ paddw xmm5, xmm0
+ paddw xmm4, xmm0
+ psraw xmm5, 3
+ psraw xmm4, 3
+
+ movd eax, xmm5
+ movd ecx, xmm4
+ psrldq xmm5, 4
+ psrldq xmm4, 4
+ mov word ptr[rdx+32*0], ax
+ mov word ptr[rdx+32*2], cx
+ shr eax, 16
+ shr ecx, 16
+ mov word ptr[rdx+32*4], ax
+ mov word ptr[rdx+32*6], cx
+ movd eax, xmm5
+ movd ecx, xmm4
+ psrldq xmm5, 4
+ psrldq xmm4, 4
+ mov word ptr[rdx+32*8], ax
+ mov word ptr[rdx+32*10], cx
+ shr eax, 16
+ shr ecx, 16
+ mov word ptr[rdx+32*12], ax
+ mov word ptr[rdx+32*14], cx
+
+ movd eax, xmm5
+ movd ecx, xmm4
+ psrldq xmm5, 4
+ psrldq xmm4, 4
+ mov word ptr[rdx+32*1], ax
+ mov word ptr[rdx+32*3], cx
+ shr eax, 16
+ shr ecx, 16
+ mov word ptr[rdx+32*5], ax
+ mov word ptr[rdx+32*7], cx
+ movd eax, xmm5
+ movd ecx, xmm4
+ mov word ptr[rdx+32*9], ax
+ mov word ptr[rdx+32*11], cx
+ shr eax, 16
+ shr ecx, 16
+ mov word ptr[rdx+32*13], ax
+ mov word ptr[rdx+32*15], cx
+
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/media/libvpx/libvpx/vp8/common/x86/loopfilter_block_sse2_x86_64.asm b/media/libvpx/libvpx/vp8/common/x86/loopfilter_block_sse2_x86_64.asm
new file mode 100644
index 0000000000..8d12f5385d
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/loopfilter_block_sse2_x86_64.asm
@@ -0,0 +1,817 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro LF_ABS 2
+ ; %1 value not preserved
+ ; %2 value preserved
+ ; output in %1
+ movdqa scratch1, %2 ; v2
+
+ psubusb scratch1, %1 ; v2 - v1
+ psubusb %1, %2 ; v1 - v2
+ por %1, scratch1 ; abs(v2 - v1)
+%endmacro
+
+%macro LF_FILTER_HEV_MASK 8-9
+
+ LF_ABS %1, %2 ; abs(p3 - p2)
+ LF_ABS %2, %3 ; abs(p2 - p1)
+ pmaxub %1, %2 ; accumulate mask
+%if %0 == 8
+ movdqa scratch2, %3 ; save p1
+ LF_ABS scratch2, %4 ; abs(p1 - p0)
+%endif
+ LF_ABS %4, %5 ; abs(p0 - q0)
+ LF_ABS %5, %6 ; abs(q0 - q1)
+%if %0 == 8
+ pmaxub %5, scratch2 ; accumulate hev
+%else
+ pmaxub %5, %9
+%endif
+ pmaxub %1, %5 ; accumulate mask
+
+ LF_ABS %3, %6 ; abs(p1 - q1)
+ LF_ABS %6, %7 ; abs(q1 - q2)
+ pmaxub %1, %6 ; accumulate mask
+ LF_ABS %7, %8 ; abs(q2 - q3)
+ pmaxub %1, %7 ; accumulate mask
+
+ paddusb %4, %4 ; 2 * abs(p0 - q0)
+ pand %3, [GLOBAL(tfe)]
+ psrlw %3, 1 ; abs(p1 - q1) / 2
+ paddusb %4, %3 ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2
+
+ psubusb %1, [limit]
+ psubusb %4, [blimit]
+ por %1, %4
+ pcmpeqb %1, zero ; mask
+
+ psubusb %5, [thresh]
+ pcmpeqb %5, zero ; ~hev
+%endmacro
+
+%macro LF_FILTER 6
+ ; %1-%4: p1-q1
+ ; %5: mask
+ ; %6: hev
+
+ movdqa scratch2, %6 ; save hev
+
+ pxor %1, [GLOBAL(t80)] ; ps1
+ pxor %4, [GLOBAL(t80)] ; qs1
+ movdqa scratch1, %1
+ psubsb scratch1, %4 ; signed_char_clamp(ps1 - qs1)
+ pandn scratch2, scratch1 ; vp8_filter &= hev
+
+ pxor %2, [GLOBAL(t80)] ; ps0
+ pxor %3, [GLOBAL(t80)] ; qs0
+ movdqa scratch1, %3
+ psubsb scratch1, %2 ; qs0 - ps0
+ paddsb scratch2, scratch1 ; vp8_filter += (qs0 - ps0)
+ paddsb scratch2, scratch1 ; vp8_filter += (qs0 - ps0)
+ paddsb scratch2, scratch1 ; vp8_filter += (qs0 - ps0)
+ pand %5, scratch2 ; &= mask
+
+ movdqa scratch2, %5
+ paddsb %5, [GLOBAL(t4)] ; Filter1
+ paddsb scratch2, [GLOBAL(t3)] ; Filter2
+
+ ; Filter1 >> 3
+ movdqa scratch1, zero
+ pcmpgtb scratch1, %5
+ psrlw %5, 3
+ pand scratch1, [GLOBAL(te0)]
+ pand %5, [GLOBAL(t1f)]
+ por %5, scratch1
+
+ psubsb %3, %5 ; qs0 - Filter1
+ pxor %3, [GLOBAL(t80)]
+
+ ; Filter2 >> 3
+ movdqa scratch1, zero
+ pcmpgtb scratch1, scratch2
+ psrlw scratch2, 3
+ pand scratch1, [GLOBAL(te0)]
+ pand scratch2, [GLOBAL(t1f)]
+ por scratch2, scratch1
+
+ paddsb %2, scratch2 ; ps0 + Filter2
+ pxor %2, [GLOBAL(t80)]
+
+ ; outer tap adjustments
+ paddsb %5, [GLOBAL(t1)]
+ movdqa scratch1, zero
+ pcmpgtb scratch1, %5
+ psrlw %5, 1
+ pand scratch1, [GLOBAL(t80)]
+ pand %5, [GLOBAL(t7f)]
+ por %5, scratch1
+ pand %5, %6 ; vp8_filter &= ~hev
+
+ psubsb %4, %5 ; qs1 - vp8_filter
+ pxor %4, [GLOBAL(t80)]
+
+ paddsb %1, %5 ; ps1 + vp8_filter
+ pxor %1, [GLOBAL(t80)]
+%endmacro
+
+SECTION .text
+
+;void vp8_loop_filter_bh_y_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh
+;)
+globalsym(vp8_loop_filter_bh_y_sse2)
+sym(vp8_loop_filter_bh_y_sse2):
+
+%if LIBVPX_YASM_WIN64
+ %define src rcx ; src_ptr
+ %define stride rdx ; src_pixel_step
+ %define blimit r8
+ %define limit r9
+ %define thresh r10
+
+ %define spp rax
+ %define stride3 r11
+ %define stride5 r12
+ %define stride7 r13
+
+ push rbp
+ mov rbp, rsp
+ SAVE_XMM 11
+ push r12
+ push r13
+ mov thresh, arg(4)
+%else
+ %define src rdi ; src_ptr
+ %define stride rsi ; src_pixel_step
+ %define blimit rdx
+ %define limit rcx
+ %define thresh r8
+
+ %define spp rax
+ %define stride3 r9
+ %define stride5 r10
+ %define stride7 r11
+%endif
+
+ %define scratch1 xmm5
+ %define scratch2 xmm6
+ %define zero xmm7
+
+ %define i0 [src]
+ %define i1 [spp]
+ %define i2 [src + 2 * stride]
+ %define i3 [spp + 2 * stride]
+ %define i4 [src + 4 * stride]
+ %define i5 [spp + 4 * stride]
+ %define i6 [src + 2 * stride3]
+ %define i7 [spp + 2 * stride3]
+ %define i8 [src + 8 * stride]
+ %define i9 [spp + 8 * stride]
+ %define i10 [src + 2 * stride5]
+ %define i11 [spp + 2 * stride5]
+ %define i12 [src + 4 * stride3]
+ %define i13 [spp + 4 * stride3]
+ %define i14 [src + 2 * stride7]
+ %define i15 [spp + 2 * stride7]
+
+ ; prep work
+ lea spp, [src + stride]
+ lea stride3, [stride + 2 * stride]
+ lea stride5, [stride3 + 2 * stride]
+ lea stride7, [stride3 + 4 * stride]
+ pxor zero, zero
+
+ ; load the first set into registers
+ movdqa xmm0, i0
+ movdqa xmm1, i1
+ movdqa xmm2, i2
+ movdqa xmm3, i3
+ movdqa xmm4, i4
+ movdqa xmm8, i5
+ movdqa xmm9, i6 ; q2, will contain abs(p1-p0)
+ movdqa xmm10, i7
+LF_FILTER_HEV_MASK xmm0, xmm1, xmm2, xmm3, xmm4, xmm8, xmm9, xmm10
+
+ movdqa xmm1, i2
+ movdqa xmm2, i3
+ movdqa xmm3, i4
+ movdqa xmm8, i5
+LF_FILTER xmm1, xmm2, xmm3, xmm8, xmm0, xmm4
+ movdqa i2, xmm1
+ movdqa i3, xmm2
+
+; second set
+ movdqa i4, xmm3
+ movdqa i5, xmm8
+
+ movdqa xmm0, i6
+ movdqa xmm1, i7
+ movdqa xmm2, i8
+ movdqa xmm4, i9
+ movdqa xmm10, i10 ; q2, will contain abs(p1-p0)
+ movdqa xmm11, i11
+LF_FILTER_HEV_MASK xmm3, xmm8, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm9
+
+ movdqa xmm0, i6
+ movdqa xmm1, i7
+ movdqa xmm4, i8
+ movdqa xmm8, i9
+LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
+ movdqa i6, xmm0
+ movdqa i7, xmm1
+
+; last set
+ movdqa i8, xmm4
+ movdqa i9, xmm8
+
+ movdqa xmm0, i10
+ movdqa xmm1, i11
+ movdqa xmm2, i12
+ movdqa xmm3, i13
+ movdqa xmm9, i14 ; q2, will contain abs(p1-p0)
+ movdqa xmm11, i15
+LF_FILTER_HEV_MASK xmm4, xmm8, xmm0, xmm1, xmm2, xmm3, xmm9, xmm11, xmm10
+
+ movdqa xmm0, i10
+ movdqa xmm1, i11
+ movdqa xmm3, i12
+ movdqa xmm8, i13
+LF_FILTER xmm0, xmm1, xmm3, xmm8, xmm4, xmm2
+ movdqa i10, xmm0
+ movdqa i11, xmm1
+ movdqa i12, xmm3
+ movdqa i13, xmm8
+
+%if LIBVPX_YASM_WIN64
+ pop r13
+ pop r12
+ RESTORE_XMM
+ pop rbp
+%endif
+
+ ret
+
+
+;void vp8_loop_filter_bv_y_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh
+;)
+
+globalsym(vp8_loop_filter_bv_y_sse2)
+sym(vp8_loop_filter_bv_y_sse2):
+
+%if LIBVPX_YASM_WIN64
+ %define src rcx ; src_ptr
+ %define stride rdx ; src_pixel_step
+ %define blimit r8
+ %define limit r9
+ %define thresh r10
+
+ %define spp rax
+ %define stride3 r11
+ %define stride5 r12
+ %define stride7 r13
+
+ push rbp
+ mov rbp, rsp
+ SAVE_XMM 15
+ push r12
+ push r13
+ mov thresh, arg(4)
+%else
+ %define src rdi
+ %define stride rsi
+ %define blimit rdx
+ %define limit rcx
+ %define thresh r8
+
+ %define spp rax
+ %define stride3 r9
+ %define stride5 r10
+ %define stride7 r11
+%endif
+
+ %define scratch1 xmm5
+ %define scratch2 xmm6
+ %define zero xmm7
+
+ %define s0 [src]
+ %define s1 [spp]
+ %define s2 [src + 2 * stride]
+ %define s3 [spp + 2 * stride]
+ %define s4 [src + 4 * stride]
+ %define s5 [spp + 4 * stride]
+ %define s6 [src + 2 * stride3]
+ %define s7 [spp + 2 * stride3]
+ %define s8 [src + 8 * stride]
+ %define s9 [spp + 8 * stride]
+ %define s10 [src + 2 * stride5]
+ %define s11 [spp + 2 * stride5]
+ %define s12 [src + 4 * stride3]
+ %define s13 [spp + 4 * stride3]
+ %define s14 [src + 2 * stride7]
+ %define s15 [spp + 2 * stride7]
+
+ %define i0 [rsp]
+ %define i1 [rsp + 16]
+ %define i2 [rsp + 32]
+ %define i3 [rsp + 48]
+ %define i4 [rsp + 64]
+ %define i5 [rsp + 80]
+ %define i6 [rsp + 96]
+ %define i7 [rsp + 112]
+ %define i8 [rsp + 128]
+ %define i9 [rsp + 144]
+ %define i10 [rsp + 160]
+ %define i11 [rsp + 176]
+ %define i12 [rsp + 192]
+ %define i13 [rsp + 208]
+ %define i14 [rsp + 224]
+ %define i15 [rsp + 240]
+
+ ALIGN_STACK 16, rax
+
+ ; reserve stack space
+ %define temp_storage 0 ; size is 256 (16*16)
+ %define stack_size 256
+ sub rsp, stack_size
+
+ ; prep work
+ lea spp, [src + stride]
+ lea stride3, [stride + 2 * stride]
+ lea stride5, [stride3 + 2 * stride]
+ lea stride7, [stride3 + 4 * stride]
+
+ ; 8-f
+ movdqa xmm0, s8
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, s9 ; 80 90
+ punpckhbw xmm1, s9 ; 88 98
+
+ movdqa xmm2, s10
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, s11 ; a0 b0
+ punpckhbw xmm3, s11 ; a8 b8
+
+ movdqa xmm4, xmm0
+ punpcklwd xmm0, xmm2 ; 80 90 a0 b0
+ punpckhwd xmm4, xmm2 ; 84 94 a4 b4
+
+ movdqa xmm2, xmm1
+ punpcklwd xmm1, xmm3 ; 88 98 a8 b8
+ punpckhwd xmm2, xmm3 ; 8c 9c ac bc
+
+ ; using xmm[0124]
+ ; work on next 4 rows
+
+ movdqa xmm3, s12
+ movdqa xmm5, xmm3
+ punpcklbw xmm3, s13 ; c0 d0
+ punpckhbw xmm5, s13 ; c8 d8
+
+ movdqa xmm6, s14
+ movdqa xmm7, xmm6
+ punpcklbw xmm6, s15 ; e0 f0
+ punpckhbw xmm7, s15 ; e8 f8
+
+ movdqa xmm8, xmm3
+ punpcklwd xmm3, xmm6 ; c0 d0 e0 f0
+ punpckhwd xmm8, xmm6 ; c4 d4 e4 f4
+
+ movdqa xmm6, xmm5
+ punpcklwd xmm5, xmm7 ; c8 d8 e8 f8
+ punpckhwd xmm6, xmm7 ; cc dc ec fc
+
+ ; pull the third and fourth sets together
+
+ movdqa xmm7, xmm0
+ punpckldq xmm0, xmm3 ; 80 90 a0 b0 c0 d0 e0 f0
+ punpckhdq xmm7, xmm3 ; 82 92 a2 b2 c2 d2 e2 f2
+
+ movdqa xmm3, xmm4
+ punpckldq xmm4, xmm8 ; 84 94 a4 b4 c4 d4 e4 f4
+ punpckhdq xmm3, xmm8 ; 86 96 a6 b6 c6 d6 e6 f6
+
+ movdqa xmm8, xmm1
+ punpckldq xmm1, xmm5 ; 88 88 a8 b8 c8 d8 e8 f8
+ punpckhdq xmm8, xmm5 ; 8a 9a aa ba ca da ea fa
+
+ movdqa xmm5, xmm2
+ punpckldq xmm2, xmm6 ; 8c 9c ac bc cc dc ec fc
+ punpckhdq xmm5, xmm6 ; 8e 9e ae be ce de ee fe
+
+ ; save the calculations. we only have 15 registers ...
+ movdqa i0, xmm0
+ movdqa i1, xmm7
+ movdqa i2, xmm4
+ movdqa i3, xmm3
+ movdqa i4, xmm1
+ movdqa i5, xmm8
+ movdqa i6, xmm2
+ movdqa i7, xmm5
+
+ ; 0-7
+ movdqa xmm0, s0
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, s1 ; 00 10
+ punpckhbw xmm1, s1 ; 08 18
+
+ movdqa xmm2, s2
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, s3 ; 20 30
+ punpckhbw xmm3, s3 ; 28 38
+
+ movdqa xmm4, xmm0
+ punpcklwd xmm0, xmm2 ; 00 10 20 30
+ punpckhwd xmm4, xmm2 ; 04 14 24 34
+
+ movdqa xmm2, xmm1
+ punpcklwd xmm1, xmm3 ; 08 18 28 38
+ punpckhwd xmm2, xmm3 ; 0c 1c 2c 3c
+
+ ; using xmm[0124]
+ ; work on next 4 rows
+
+ movdqa xmm3, s4
+ movdqa xmm5, xmm3
+ punpcklbw xmm3, s5 ; 40 50
+ punpckhbw xmm5, s5 ; 48 58
+
+ movdqa xmm6, s6
+ movdqa xmm7, xmm6
+ punpcklbw xmm6, s7 ; 60 70
+ punpckhbw xmm7, s7 ; 68 78
+
+ movdqa xmm8, xmm3
+ punpcklwd xmm3, xmm6 ; 40 50 60 70
+ punpckhwd xmm8, xmm6 ; 44 54 64 74
+
+ movdqa xmm6, xmm5
+ punpcklwd xmm5, xmm7 ; 48 58 68 78
+ punpckhwd xmm6, xmm7 ; 4c 5c 6c 7c
+
+ ; pull the first two sets together
+
+ movdqa xmm7, xmm0
+ punpckldq xmm0, xmm3 ; 00 10 20 30 40 50 60 70
+ punpckhdq xmm7, xmm3 ; 02 12 22 32 42 52 62 72
+
+ movdqa xmm3, xmm4
+ punpckldq xmm4, xmm8 ; 04 14 24 34 44 54 64 74
+ punpckhdq xmm3, xmm8 ; 06 16 26 36 46 56 66 76
+
+ movdqa xmm8, xmm1
+ punpckldq xmm1, xmm5 ; 08 18 28 38 48 58 68 78
+ punpckhdq xmm8, xmm5 ; 0a 1a 2a 3a 4a 5a 6a 7a
+
+ movdqa xmm5, xmm2
+ punpckldq xmm2, xmm6 ; 0c 1c 2c 3c 4c 5c 6c 7c
+ punpckhdq xmm5, xmm6 ; 0e 1e 2e 3e 4e 5e 6e 7e
+ ; final combination
+
+ movdqa xmm6, xmm0
+ punpcklqdq xmm0, i0
+ punpckhqdq xmm6, i0
+
+ movdqa xmm9, xmm7
+ punpcklqdq xmm7, i1
+ punpckhqdq xmm9, i1
+
+ movdqa xmm10, xmm4
+ punpcklqdq xmm4, i2
+ punpckhqdq xmm10, i2
+
+ movdqa xmm11, xmm3
+ punpcklqdq xmm3, i3
+ punpckhqdq xmm11, i3
+
+ movdqa xmm12, xmm1
+ punpcklqdq xmm1, i4
+ punpckhqdq xmm12, i4
+
+ movdqa xmm13, xmm8
+ punpcklqdq xmm8, i5
+ punpckhqdq xmm13, i5
+
+ movdqa xmm14, xmm2
+ punpcklqdq xmm2, i6
+ punpckhqdq xmm14, i6
+
+ movdqa xmm15, xmm5
+ punpcklqdq xmm5, i7
+ punpckhqdq xmm15, i7
+
+ movdqa i0, xmm0
+ movdqa i1, xmm6
+ movdqa i2, xmm7
+ movdqa i3, xmm9
+ movdqa i4, xmm4
+ movdqa i5, xmm10
+ movdqa i6, xmm3
+ movdqa i7, xmm11
+ movdqa i8, xmm1
+ movdqa i9, xmm12
+ movdqa i10, xmm8
+ movdqa i11, xmm13
+ movdqa i12, xmm2
+ movdqa i13, xmm14
+ movdqa i14, xmm5
+ movdqa i15, xmm15
+
+; TRANSPOSED DATA AVAILABLE ON THE STACK
+
+ movdqa xmm12, xmm6
+ movdqa xmm13, xmm7
+
+ pxor zero, zero
+
+LF_FILTER_HEV_MASK xmm0, xmm12, xmm13, xmm9, xmm4, xmm10, xmm3, xmm11
+
+ movdqa xmm1, i2
+ movdqa xmm2, i3
+ movdqa xmm8, i4
+ movdqa xmm9, i5
+LF_FILTER xmm1, xmm2, xmm8, xmm9, xmm0, xmm4
+ movdqa i2, xmm1
+ movdqa i3, xmm2
+
+; second set
+ movdqa i4, xmm8
+ movdqa i5, xmm9
+
+ movdqa xmm0, i6
+ movdqa xmm1, i7
+ movdqa xmm2, i8
+ movdqa xmm4, i9
+ movdqa xmm10, i10 ; q2, will contain abs(p1-p0)
+ movdqa xmm11, i11
+LF_FILTER_HEV_MASK xmm8, xmm9, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm3
+
+ movdqa xmm0, i6
+ movdqa xmm1, i7
+ movdqa xmm3, i8
+ movdqa xmm4, i9
+LF_FILTER xmm0, xmm1, xmm3, xmm4, xmm8, xmm2
+ movdqa i6, xmm0
+ movdqa i7, xmm1
+
+; last set
+ movdqa i8, xmm3
+ movdqa i9, xmm4
+
+ movdqa xmm0, i10
+ movdqa xmm1, i11
+ movdqa xmm2, i12
+ movdqa xmm8, i13
+ movdqa xmm9, i14 ; q2, will contain abs(p1-p0)
+ movdqa xmm11, i15
+LF_FILTER_HEV_MASK xmm3, xmm4, xmm0, xmm1, xmm2, xmm8, xmm9, xmm11, xmm10
+
+ movdqa xmm0, i10
+ movdqa xmm1, i11
+ movdqa xmm4, i12
+ movdqa xmm8, i13
+LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
+ movdqa i10, xmm0
+ movdqa i11, xmm1
+ movdqa i12, xmm4
+ movdqa i13, xmm8
+
+
+; RESHUFFLE AND WRITE OUT
+ ; 8-f
+ movdqa xmm0, i8
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, i9 ; 80 90
+ punpckhbw xmm1, i9 ; 88 98
+
+ movdqa xmm2, i10
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, i11 ; a0 b0
+ punpckhbw xmm3, i11 ; a8 b8
+
+ movdqa xmm4, xmm0
+ punpcklwd xmm0, xmm2 ; 80 90 a0 b0
+ punpckhwd xmm4, xmm2 ; 84 94 a4 b4
+
+ movdqa xmm2, xmm1
+ punpcklwd xmm1, xmm3 ; 88 98 a8 b8
+ punpckhwd xmm2, xmm3 ; 8c 9c ac bc
+
+ ; using xmm[0124]
+ ; work on next 4 rows
+
+ movdqa xmm3, i12
+ movdqa xmm5, xmm3
+ punpcklbw xmm3, i13 ; c0 d0
+ punpckhbw xmm5, i13 ; c8 d8
+
+ movdqa xmm6, i14
+ movdqa xmm7, xmm6
+ punpcklbw xmm6, i15 ; e0 f0
+ punpckhbw xmm7, i15 ; e8 f8
+
+ movdqa xmm8, xmm3
+ punpcklwd xmm3, xmm6 ; c0 d0 e0 f0
+ punpckhwd xmm8, xmm6 ; c4 d4 e4 f4
+
+ movdqa xmm6, xmm5
+ punpcklwd xmm5, xmm7 ; c8 d8 e8 f8
+ punpckhwd xmm6, xmm7 ; cc dc ec fc
+
+ ; pull the third and fourth sets together
+
+ movdqa xmm7, xmm0
+ punpckldq xmm0, xmm3 ; 80 90 a0 b0 c0 d0 e0 f0
+ punpckhdq xmm7, xmm3 ; 82 92 a2 b2 c2 d2 e2 f2
+
+ movdqa xmm3, xmm4
+ punpckldq xmm4, xmm8 ; 84 94 a4 b4 c4 d4 e4 f4
+ punpckhdq xmm3, xmm8 ; 86 96 a6 b6 c6 d6 e6 f6
+
+ movdqa xmm8, xmm1
+ punpckldq xmm1, xmm5 ; 88 88 a8 b8 c8 d8 e8 f8
+ punpckhdq xmm8, xmm5 ; 8a 9a aa ba ca da ea fa
+
+ movdqa xmm5, xmm2
+ punpckldq xmm2, xmm6 ; 8c 9c ac bc cc dc ec fc
+ punpckhdq xmm5, xmm6 ; 8e 9e ae be ce de ee fe
+
+ ; save the calculations. we only have 15 registers ...
+ movdqa i8, xmm0
+ movdqa i9, xmm7
+ movdqa i10, xmm4
+ movdqa i11, xmm3
+ movdqa i12, xmm1
+ movdqa i13, xmm8
+ movdqa i14, xmm2
+ movdqa i15, xmm5
+
+ ; 0-7
+ movdqa xmm0, i0
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, i1 ; 00 10
+ punpckhbw xmm1, i1 ; 08 18
+
+ movdqa xmm2, i2
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, i3 ; 20 30
+ punpckhbw xmm3, i3 ; 28 38
+
+ movdqa xmm4, xmm0
+ punpcklwd xmm0, xmm2 ; 00 10 20 30
+ punpckhwd xmm4, xmm2 ; 04 14 24 34
+
+ movdqa xmm2, xmm1
+ punpcklwd xmm1, xmm3 ; 08 18 28 38
+ punpckhwd xmm2, xmm3 ; 0c 1c 2c 3c
+
+ ; using xmm[0124]
+ ; work on next 4 rows
+
+ movdqa xmm3, i4
+ movdqa xmm5, xmm3
+ punpcklbw xmm3, i5 ; 40 50
+ punpckhbw xmm5, i5 ; 48 58
+
+ movdqa xmm6, i6
+ movdqa xmm7, xmm6
+ punpcklbw xmm6, i7 ; 60 70
+ punpckhbw xmm7, i7 ; 68 78
+
+ movdqa xmm8, xmm3
+ punpcklwd xmm3, xmm6 ; 40 50 60 70
+ punpckhwd xmm8, xmm6 ; 44 54 64 74
+
+ movdqa xmm6, xmm5
+ punpcklwd xmm5, xmm7 ; 48 58 68 78
+ punpckhwd xmm6, xmm7 ; 4c 5c 6c 7c
+
+ ; pull the first two sets together
+
+ movdqa xmm7, xmm0
+ punpckldq xmm0, xmm3 ; 00 10 20 30 40 50 60 70
+ punpckhdq xmm7, xmm3 ; 02 12 22 32 42 52 62 72
+
+ movdqa xmm3, xmm4
+ punpckldq xmm4, xmm8 ; 04 14 24 34 44 54 64 74
+ punpckhdq xmm3, xmm8 ; 06 16 26 36 46 56 66 76
+
+ movdqa xmm8, xmm1
+ punpckldq xmm1, xmm5 ; 08 18 28 38 48 58 68 78
+ punpckhdq xmm8, xmm5 ; 0a 1a 2a 3a 4a 5a 6a 7a
+
+ movdqa xmm5, xmm2
+ punpckldq xmm2, xmm6 ; 0c 1c 2c 3c 4c 5c 6c 7c
+ punpckhdq xmm5, xmm6 ; 0e 1e 2e 3e 4e 5e 6e 7e
+ ; final combination
+
+ movdqa xmm6, xmm0
+ punpcklqdq xmm0, i8
+ punpckhqdq xmm6, i8
+
+ movdqa xmm9, xmm7
+ punpcklqdq xmm7, i9
+ punpckhqdq xmm9, i9
+
+ movdqa xmm10, xmm4
+ punpcklqdq xmm4, i10
+ punpckhqdq xmm10, i10
+
+ movdqa xmm11, xmm3
+ punpcklqdq xmm3, i11
+ punpckhqdq xmm11, i11
+
+ movdqa xmm12, xmm1
+ punpcklqdq xmm1, i12
+ punpckhqdq xmm12, i12
+
+ movdqa xmm13, xmm8
+ punpcklqdq xmm8, i13
+ punpckhqdq xmm13, i13
+
+ movdqa xmm14, xmm2
+ punpcklqdq xmm2, i14
+ punpckhqdq xmm14, i14
+
+ movdqa xmm15, xmm5
+ punpcklqdq xmm5, i15
+ punpckhqdq xmm15, i15
+
+ movdqa s0, xmm0
+ movdqa s1, xmm6
+ movdqa s2, xmm7
+ movdqa s3, xmm9
+ movdqa s4, xmm4
+ movdqa s5, xmm10
+ movdqa s6, xmm3
+ movdqa s7, xmm11
+ movdqa s8, xmm1
+ movdqa s9, xmm12
+ movdqa s10, xmm8
+ movdqa s11, xmm13
+ movdqa s12, xmm2
+ movdqa s13, xmm14
+ movdqa s14, xmm5
+ movdqa s15, xmm15
+
+ ; free stack space
+ add rsp, stack_size
+
+ ; un-ALIGN_STACK
+ pop rsp
+
+%if LIBVPX_YASM_WIN64
+ pop r13
+ pop r12
+ RESTORE_XMM
+ pop rbp
+%endif
+
+ ret
+
+SECTION_RODATA
+align 16
+te0:
+ times 16 db 0xe0
+align 16
+t7f:
+ times 16 db 0x7f
+align 16
+tfe:
+ times 16 db 0xfe
+align 16
+t1f:
+ times 16 db 0x1f
+align 16
+t80:
+ times 16 db 0x80
+align 16
+t1:
+ times 16 db 0x01
+align 16
+t3:
+ times 16 db 0x03
+align 16
+t4:
+ times 16 db 0x04
diff --git a/media/libvpx/libvpx/vp8/common/x86/loopfilter_sse2.asm b/media/libvpx/libvpx/vp8/common/x86/loopfilter_sse2.asm
new file mode 100644
index 0000000000..ce5c313138
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/loopfilter_sse2.asm
@@ -0,0 +1,1642 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+%define _t0 0
+%define _t1 _t0 + 16
+%define _p3 _t1 + 16
+%define _p2 _p3 + 16
+%define _p1 _p2 + 16
+%define _p0 _p1 + 16
+%define _q0 _p0 + 16
+%define _q1 _q0 + 16
+%define _q2 _q1 + 16
+%define _q3 _q2 + 16
+%define lf_var_size 160
+
+; Use of pmaxub instead of psubusb to compute filter mask was seen
+; in ffvp8
+
+%macro LFH_FILTER_AND_HEV_MASK 1
+%if %1
+ movdqa xmm2, [rdi+2*rax] ; q3
+ movdqa xmm1, [rsi+2*rax] ; q2
+ movdqa xmm4, [rsi+rax] ; q1
+ movdqa xmm5, [rsi] ; q0
+ neg rax ; negate pitch to deal with above border
+%else
+ movlps xmm2, [rsi + rcx*2] ; q3
+ movlps xmm1, [rsi + rcx] ; q2
+ movlps xmm4, [rsi] ; q1
+ movlps xmm5, [rsi + rax] ; q0
+
+ movhps xmm2, [rdi + rcx*2]
+ movhps xmm1, [rdi + rcx]
+ movhps xmm4, [rdi]
+ movhps xmm5, [rdi + rax]
+
+ lea rsi, [rsi + rax*4]
+ lea rdi, [rdi + rax*4]
+
+ movdqa [rsp+_q2], xmm1 ; store q2
+ movdqa [rsp+_q1], xmm4 ; store q1
+%endif
+ movdqa xmm7, [rdx] ;limit
+
+ movdqa xmm6, xmm1 ; q2
+ movdqa xmm3, xmm4 ; q1
+
+ psubusb xmm1, xmm2 ; q2-=q3
+ psubusb xmm2, xmm6 ; q3-=q2
+
+ psubusb xmm4, xmm6 ; q1-=q2
+ psubusb xmm6, xmm3 ; q2-=q1
+
+ por xmm4, xmm6 ; abs(q2-q1)
+ por xmm1, xmm2 ; abs(q3-q2)
+
+ movdqa xmm0, xmm5 ; q0
+ pmaxub xmm1, xmm4
+
+ psubusb xmm5, xmm3 ; q0-=q1
+ psubusb xmm3, xmm0 ; q1-=q0
+
+ por xmm5, xmm3 ; abs(q0-q1)
+ movdqa [rsp+_t0], xmm5 ; save to t0
+
+ pmaxub xmm1, xmm5
+
+%if %1
+ movdqa xmm2, [rsi+4*rax] ; p3
+ movdqa xmm4, [rdi+4*rax] ; p2
+ movdqa xmm6, [rsi+2*rax] ; p1
+%else
+ movlps xmm2, [rsi + rax] ; p3
+ movlps xmm4, [rsi] ; p2
+ movlps xmm6, [rsi + rcx] ; p1
+
+ movhps xmm2, [rdi + rax]
+ movhps xmm4, [rdi]
+ movhps xmm6, [rdi + rcx]
+
+ movdqa [rsp+_p2], xmm4 ; store p2
+ movdqa [rsp+_p1], xmm6 ; store p1
+%endif
+
+ movdqa xmm5, xmm4 ; p2
+ movdqa xmm3, xmm6 ; p1
+
+ psubusb xmm4, xmm2 ; p2-=p3
+ psubusb xmm2, xmm5 ; p3-=p2
+
+ psubusb xmm3, xmm5 ; p1-=p2
+ pmaxub xmm1, xmm4 ; abs(p3 - p2)
+
+ psubusb xmm5, xmm6 ; p2-=p1
+ pmaxub xmm1, xmm2 ; abs(p3 - p2)
+
+ pmaxub xmm1, xmm5 ; abs(p2 - p1)
+ movdqa xmm2, xmm6 ; p1
+
+ pmaxub xmm1, xmm3 ; abs(p2 - p1)
+%if %1
+ movdqa xmm4, [rsi+rax] ; p0
+ movdqa xmm3, [rdi] ; q1
+%else
+ movlps xmm4, [rsi + rcx*2] ; p0
+ movhps xmm4, [rdi + rcx*2]
+ movdqa xmm3, [rsp+_q1] ; q1
+%endif
+
+ movdqa xmm5, xmm4 ; p0
+ psubusb xmm4, xmm6 ; p0-=p1
+
+ psubusb xmm6, xmm5 ; p1-=p0
+
+ por xmm6, xmm4 ; abs(p1 - p0)
+ mov rdx, arg(2) ; get blimit
+
+ movdqa [rsp+_t1], xmm6 ; save to t1
+
+ movdqa xmm4, xmm3 ; q1
+ pmaxub xmm1, xmm6
+
+ psubusb xmm3, xmm2 ; q1-=p1
+ psubusb xmm2, xmm4 ; p1-=q1
+
+ psubusb xmm1, xmm7
+ por xmm2, xmm3 ; abs(p1-q1)
+
+ movdqa xmm7, [rdx] ; blimit
+ mov rdx, arg(4) ; hev get thresh
+
+ movdqa xmm3, xmm0 ; q0
+ pand xmm2, [GLOBAL(tfe)] ; set lsb of each byte to zero
+
+ movdqa xmm6, xmm5 ; p0
+ psrlw xmm2, 1 ; abs(p1-q1)/2
+
+ psubusb xmm5, xmm3 ; p0-=q0
+ psubusb xmm3, xmm6 ; q0-=p0
+ por xmm5, xmm3 ; abs(p0 - q0)
+
+ paddusb xmm5, xmm5 ; abs(p0-q0)*2
+
+ movdqa xmm4, [rsp+_t0] ; hev get abs (q1 - q0)
+ movdqa xmm3, [rsp+_t1] ; get abs (p1 - p0)
+
+ paddusb xmm5, xmm2 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+
+ movdqa xmm2, [rdx] ; hev
+
+ psubusb xmm5, xmm7 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ psubusb xmm4, xmm2 ; hev
+
+ psubusb xmm3, xmm2 ; hev
+ por xmm1, xmm5
+
+ pxor xmm7, xmm7
+ paddb xmm4, xmm3 ; hev abs(q1 - q0) > thresh || abs(p1 - p0) > thresh
+
+ pcmpeqb xmm4, xmm5 ; hev
+ pcmpeqb xmm3, xmm3 ; hev
+
+ pcmpeqb xmm1, xmm7 ; mask xmm1
+ pxor xmm4, xmm3 ; hev
+%endmacro
+
+%macro B_FILTER 1
+ movdqa xmm3, [GLOBAL(t80)]
+%if %1 == 0
+ movdqa xmm2, [rsp+_p1] ; p1
+ movdqa xmm7, [rsp+_q1] ; q1
+%elif %1 == 1
+ movdqa xmm2, [rsi+2*rax] ; p1
+ movdqa xmm7, [rdi] ; q1
+%elif %1 == 2
+ movdqa xmm2, [rsp+_p1] ; p1
+ movdqa xmm6, [rsp+_p0] ; p0
+ movdqa xmm0, [rsp+_q0] ; q0
+ movdqa xmm7, [rsp+_q1] ; q1
+%endif
+
+ pxor xmm2, xmm3 ; p1 offset to convert to signed values
+ pxor xmm7, xmm3 ; q1 offset to convert to signed values
+
+ psubsb xmm2, xmm7 ; p1 - q1
+ pxor xmm6, xmm3 ; offset to convert to signed values
+
+ pand xmm2, xmm4 ; high var mask (hvm)(p1 - q1)
+ pxor xmm0, xmm3 ; offset to convert to signed values
+
+ movdqa xmm3, xmm0 ; q0
+ psubsb xmm0, xmm6 ; q0 - p0
+ paddsb xmm2, xmm0 ; 1 * (q0 - p0) + hvm(p1 - q1)
+ paddsb xmm2, xmm0 ; 2 * (q0 - p0) + hvm(p1 - q1)
+ paddsb xmm2, xmm0 ; 3 * (q0 - p0) + hvm(p1 - q1)
+ pand xmm1, xmm2 ; mask filter values we don't care about
+
+ movdqa xmm2, xmm1
+ paddsb xmm1, [GLOBAL(t4)] ; 3* (q0 - p0) + hvm(p1 - q1) + 4
+ paddsb xmm2, [GLOBAL(t3)] ; 3* (q0 - p0) + hvm(p1 - q1) + 3
+
+ punpckhbw xmm5, xmm2 ; axbxcxdx
+ punpcklbw xmm2, xmm2 ; exfxgxhx
+
+ punpcklbw xmm0, xmm1 ; exfxgxhx
+ psraw xmm5, 11 ; sign extended shift right by 3
+
+ punpckhbw xmm1, xmm1 ; axbxcxdx
+ psraw xmm2, 11 ; sign extended shift right by 3
+
+ packsswb xmm2, xmm5 ; (3* (q0 - p0) + hvm(p1 - q1) + 3) >> 3;
+ psraw xmm0, 11 ; sign extended shift right by 3
+
+ psraw xmm1, 11 ; sign extended shift right by 3
+ movdqa xmm5, xmm0 ; save results
+
+ packsswb xmm0, xmm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>3
+
+ paddsb xmm6, xmm2 ; p0+= p0 add
+
+ movdqa xmm2, [GLOBAL(ones)]
+ paddsw xmm5, xmm2
+ paddsw xmm1, xmm2
+ psraw xmm5, 1 ; partial shifted one more time for 2nd tap
+ psraw xmm1, 1 ; partial shifted one more time for 2nd tap
+ packsswb xmm5, xmm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>4
+ movdqa xmm2, [GLOBAL(t80)]
+
+%if %1 == 0
+ movdqa xmm1, [rsp+_p1] ; p1
+ lea rsi, [rsi + rcx*2]
+ lea rdi, [rdi + rcx*2]
+%elif %1 == 1
+ movdqa xmm1, [rsi+2*rax] ; p1
+%elif %1 == 2
+ movdqa xmm1, [rsp+_p1] ; p1
+%endif
+
+ pandn xmm4, xmm5 ; high edge variance additive
+ pxor xmm6, xmm2 ; unoffset
+
+ pxor xmm1, xmm2 ; reoffset
+ psubsb xmm3, xmm0 ; q0-= q0 add
+
+ paddsb xmm1, xmm4 ; p1+= p1 add
+ pxor xmm3, xmm2 ; unoffset
+
+ pxor xmm1, xmm2 ; unoffset
+ psubsb xmm7, xmm4 ; q1-= q1 add
+
+ pxor xmm7, xmm2 ; unoffset
+%if %1 == 0
+ movq [rsi], xmm6 ; p0
+ movhps [rdi], xmm6
+ movq [rsi + rax], xmm1 ; p1
+ movhps [rdi + rax], xmm1
+ movq [rsi + rcx], xmm3 ; q0
+ movhps [rdi + rcx], xmm3
+ movq [rsi + rcx*2], xmm7 ; q1
+ movhps [rdi + rcx*2], xmm7
+%elif %1 == 1
+ movdqa [rsi+rax], xmm6 ; write back
+ movdqa [rsi+2*rax], xmm1 ; write back
+ movdqa [rsi], xmm3 ; write back
+ movdqa [rdi], xmm7 ; write back
+%endif
+
+%endmacro
+
+SECTION .text
+
+%if ABI_IS_32BIT
+
+;void vp8_loop_filter_horizontal_edge_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+;)
+globalsym(vp8_loop_filter_horizontal_edge_sse2)
+sym(vp8_loop_filter_horizontal_edge_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, lf_var_size
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step
+
+ mov rdx, arg(3) ;limit
+
+ lea rdi, [rsi+rax] ; rdi points to row +1 for indirect addressing
+
+ ; calculate breakout conditions and high edge variance
+ LFH_FILTER_AND_HEV_MASK 1
+ ; filter and write back the result
+ B_FILTER 1
+
+ add rsp, lf_var_size
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+%endif
+
+;void vp8_loop_filter_horizontal_edge_uv_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; int count
+;)
+globalsym(vp8_loop_filter_horizontal_edge_uv_sse2)
+sym(vp8_loop_filter_horizontal_edge_uv_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, lf_var_size
+
+ mov rsi, arg(0) ; u
+ mov rdi, arg(5) ; v
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+ mov rcx, rax
+ neg rax ; negate pitch to deal with above border
+
+ mov rdx, arg(3) ;limit
+
+ lea rsi, [rsi + rcx]
+ lea rdi, [rdi + rcx]
+
+ ; calculate breakout conditions and high edge variance
+ LFH_FILTER_AND_HEV_MASK 0
+ ; filter and write back the result
+ B_FILTER 0
+
+ add rsp, lf_var_size
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+%macro MB_FILTER_AND_WRITEBACK 1
+ movdqa xmm3, [GLOBAL(t80)]
+%if %1 == 0
+ movdqa xmm2, [rsp+_p1] ; p1
+ movdqa xmm7, [rsp+_q1] ; q1
+%elif %1 == 1
+ movdqa xmm2, [rsi+2*rax] ; p1
+ movdqa xmm7, [rdi] ; q1
+
+ mov rcx, rax
+ neg rcx
+%elif %1 == 2
+ movdqa xmm2, [rsp+_p1] ; p1
+ movdqa xmm6, [rsp+_p0] ; p0
+ movdqa xmm0, [rsp+_q0] ; q0
+ movdqa xmm7, [rsp+_q1] ; q1
+%endif
+
+ pxor xmm2, xmm3 ; p1 offset to convert to signed values
+ pxor xmm7, xmm3 ; q1 offset to convert to signed values
+ pxor xmm6, xmm3 ; offset to convert to signed values
+ pxor xmm0, xmm3 ; offset to convert to signed values
+
+ psubsb xmm2, xmm7 ; p1 - q1
+
+ movdqa xmm3, xmm0 ; q0
+ psubsb xmm0, xmm6 ; q0 - p0
+ paddsb xmm2, xmm0 ; 1 * (q0 - p0) + (p1 - q1)
+ paddsb xmm2, xmm0 ; 2 * (q0 - p0)
+ paddsb xmm2, xmm0 ; 3 * (q0 - p0) + (p1 - q1)
+ pand xmm1, xmm2 ; mask filter values we don't care about
+
+ movdqa xmm2, xmm1 ; vp8_filter
+
+ pand xmm2, xmm4 ; Filter2 = vp8_filter & hev
+ pxor xmm0, xmm0
+
+ pandn xmm4, xmm1 ; vp8_filter&=~hev
+ pxor xmm1, xmm1
+
+ punpcklbw xmm0, xmm4 ; Filter 2 (hi)
+ punpckhbw xmm1, xmm4 ; Filter 2 (lo)
+
+ movdqa xmm5, xmm2
+
+ movdqa xmm4, [GLOBAL(s9)]
+ paddsb xmm5, [GLOBAL(t3)] ; vp8_signed_char_clamp(Filter2 + 3)
+ paddsb xmm2, [GLOBAL(t4)] ; vp8_signed_char_clamp(Filter2 + 4)
+
+ pmulhw xmm1, xmm4 ; Filter 2 (lo) * 9
+ pmulhw xmm0, xmm4 ; Filter 2 (hi) * 9
+
+ punpckhbw xmm7, xmm5 ; axbxcxdx
+ punpcklbw xmm5, xmm5 ; exfxgxhx
+
+ psraw xmm7, 11 ; sign extended shift right by 3
+
+ psraw xmm5, 11 ; sign extended shift right by 3
+ punpckhbw xmm4, xmm2 ; axbxcxdx
+
+ punpcklbw xmm2, xmm2 ; exfxgxhx
+ psraw xmm4, 11 ; sign extended shift right by 3
+
+ packsswb xmm5, xmm7 ; Filter2 >>=3;
+ psraw xmm2, 11 ; sign extended shift right by 3
+
+ packsswb xmm2, xmm4 ; Filter1 >>=3;
+
+ paddsb xmm6, xmm5 ; ps0 =ps0 + Fitler2
+
+ psubsb xmm3, xmm2 ; qs0 =qs0 - Filter1
+ movdqa xmm7, xmm1
+
+ movdqa xmm4, [GLOBAL(s63)]
+ movdqa xmm5, xmm0
+ movdqa xmm2, xmm5
+ paddw xmm0, xmm4 ; Filter 2 (hi) * 9 + 63
+ paddw xmm1, xmm4 ; Filter 2 (lo) * 9 + 63
+ movdqa xmm4, xmm7
+
+ paddw xmm5, xmm5 ; Filter 2 (hi) * 18
+
+ paddw xmm7, xmm7 ; Filter 2 (lo) * 18
+ paddw xmm5, xmm0 ; Filter 2 (hi) * 27 + 63
+
+ paddw xmm7, xmm1 ; Filter 2 (lo) * 27 + 63
+ paddw xmm2, xmm0 ; Filter 2 (hi) * 18 + 63
+ psraw xmm0, 7 ; (Filter 2 (hi) * 9 + 63) >> 7
+
+ paddw xmm4, xmm1 ; Filter 2 (lo) * 18 + 63
+ psraw xmm1, 7 ; (Filter 2 (lo) * 9 + 63) >> 7
+ psraw xmm2, 7 ; (Filter 2 (hi) * 18 + 63) >> 7
+
+ packsswb xmm0, xmm1 ; u1 = vp8_signed_char_clamp((63 + Filter2 * 9)>>7)
+
+ psraw xmm4, 7 ; (Filter 2 (lo) * 18 + 63) >> 7
+ psraw xmm5, 7 ; (Filter 2 (hi) * 27 + 63) >> 7
+ psraw xmm7, 7 ; (Filter 2 (lo) * 27 + 63) >> 7
+
+ packsswb xmm5, xmm7 ; u3 = vp8_signed_char_clamp((63 + Filter2 * 27)>>7)
+ packsswb xmm2, xmm4 ; u2 = vp8_signed_char_clamp((63 + Filter2 * 18)>>7)
+ movdqa xmm7, [GLOBAL(t80)]
+
+%if %1 == 0
+ movdqa xmm1, [rsp+_q1] ; q1
+ movdqa xmm4, [rsp+_p1] ; p1
+ lea rsi, [rsi+rcx*2]
+ lea rdi, [rdi+rcx*2]
+
+%elif %1 == 1
+ movdqa xmm1, [rdi] ; q1
+ movdqa xmm4, [rsi+rax*2] ; p1
+%elif %1 == 2
+ movdqa xmm4, [rsp+_p1] ; p1
+ movdqa xmm1, [rsp+_q1] ; q1
+%endif
+
+ pxor xmm1, xmm7
+ pxor xmm4, xmm7
+
+ psubsb xmm3, xmm5 ; sq = vp8_signed_char_clamp(qs0 - u3)
+ paddsb xmm6, xmm5 ; sp = vp8_signed_char_clamp(ps0 - u3)
+ psubsb xmm1, xmm2 ; sq = vp8_signed_char_clamp(qs1 - u2)
+ paddsb xmm4, xmm2 ; sp = vp8_signed_char_clamp(ps1 - u2)
+
+%if %1 == 1
+ movdqa xmm2, [rdi+rax*4] ; p2
+ movdqa xmm5, [rdi+rcx] ; q2
+%else
+ movdqa xmm2, [rsp+_p2] ; p2
+ movdqa xmm5, [rsp+_q2] ; q2
+%endif
+
+ pxor xmm1, xmm7 ; *oq1 = sq^0x80;
+ pxor xmm4, xmm7 ; *op1 = sp^0x80;
+ pxor xmm2, xmm7
+ pxor xmm5, xmm7
+ paddsb xmm2, xmm0 ; sp = vp8_signed_char_clamp(ps2 - u)
+ psubsb xmm5, xmm0 ; sq = vp8_signed_char_clamp(qs2 - u)
+ pxor xmm2, xmm7 ; *op2 = sp^0x80;
+ pxor xmm5, xmm7 ; *oq2 = sq^0x80;
+ pxor xmm3, xmm7 ; *oq0 = sq^0x80
+ pxor xmm6, xmm7 ; *oq0 = sp^0x80
+%if %1 == 0
+ movq [rsi], xmm6 ; p0
+ movhps [rdi], xmm6
+ movq [rsi + rcx], xmm3 ; q0
+ movhps [rdi + rcx], xmm3
+ lea rdx, [rcx + rcx*2]
+ movq [rsi+rcx*2], xmm1 ; q1
+ movhps [rdi+rcx*2], xmm1
+
+ movq [rsi + rax], xmm4 ; p1
+ movhps [rdi + rax], xmm4
+
+ movq [rsi+rax*2], xmm2 ; p2
+ movhps [rdi+rax*2], xmm2
+
+ movq [rsi+rdx], xmm5 ; q2
+ movhps [rdi+rdx], xmm5
+%elif %1 == 1
+ movdqa [rdi+rcx], xmm5 ; q2
+ movdqa [rdi], xmm1 ; q1
+ movdqa [rsi], xmm3 ; q0
+ movdqa [rsi+rax ], xmm6 ; p0
+ movdqa [rsi+rax*2], xmm4 ; p1
+ movdqa [rdi+rax*4], xmm2 ; p2
+%elif %1 == 2
+ movdqa [rsp+_p1], xmm4 ; p1
+ movdqa [rsp+_p0], xmm6 ; p0
+ movdqa [rsp+_q0], xmm3 ; q0
+ movdqa [rsp+_q1], xmm1 ; q1
+%endif
+
+%endmacro
+
+
+;void vp8_mbloop_filter_horizontal_edge_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+;)
+globalsym(vp8_mbloop_filter_horizontal_edge_sse2)
+sym(vp8_mbloop_filter_horizontal_edge_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, lf_var_size
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step
+ mov rdx, arg(3) ;limit
+
+ lea rdi, [rsi+rax] ; rdi points to row +1 for indirect addressing
+
+ ; calculate breakout conditions and high edge variance
+ LFH_FILTER_AND_HEV_MASK 1
+ ; filter and write back the results
+ MB_FILTER_AND_WRITEBACK 1
+
+ add rsp, lf_var_size
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_mbloop_filter_horizontal_edge_uv_sse2
+;(
+; unsigned char *u,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; unsigned char *v
+;)
+globalsym(vp8_mbloop_filter_horizontal_edge_uv_sse2)
+sym(vp8_mbloop_filter_horizontal_edge_uv_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, lf_var_size
+
+ mov rsi, arg(0) ; u
+ mov rdi, arg(5) ; v
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+ mov rcx, rax
+ neg rax ; negate pitch to deal with above border
+ mov rdx, arg(3) ;limit
+
+ lea rsi, [rsi + rcx]
+ lea rdi, [rdi + rcx]
+
+ ; calculate breakout conditions and high edge variance
+ LFH_FILTER_AND_HEV_MASK 0
+ ; filter and write back the results
+ MB_FILTER_AND_WRITEBACK 0
+
+ add rsp, lf_var_size
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+%macro TRANSPOSE_16X8 2
+ movq xmm4, [rsi] ; xx xx xx xx xx xx xx xx 07 06 05 04 03 02 01 00
+ movq xmm1, [rdi] ; xx xx xx xx xx xx xx xx 17 16 15 14 13 12 11 10
+ movq xmm0, [rsi+2*rax] ; xx xx xx xx xx xx xx xx 27 26 25 24 23 22 21 20
+ movq xmm7, [rdi+2*rax] ; xx xx xx xx xx xx xx xx 37 36 35 34 33 32 31 30
+ movq xmm5, [rsi+4*rax] ; xx xx xx xx xx xx xx xx 47 46 45 44 43 42 41 40
+ movq xmm2, [rdi+4*rax] ; xx xx xx xx xx xx xx xx 57 56 55 54 53 52 51 50
+
+ punpcklbw xmm4, xmm1 ; 17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00
+
+ movq xmm1, [rdi+2*rcx] ; xx xx xx xx xx xx xx xx 77 76 75 74 73 72 71 70
+
+ movdqa xmm3, xmm4 ; 17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00
+ punpcklbw xmm0, xmm7 ; 37 27 36 36 35 25 34 24 33 23 32 22 31 21 30 20
+
+ movq xmm7, [rsi+2*rcx] ; xx xx xx xx xx xx xx xx 67 66 65 64 63 62 61 60
+
+ punpcklbw xmm5, xmm2 ; 57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40
+%if %1
+ lea rsi, [rsi+rax*8]
+ lea rdi, [rdi+rax*8]
+%else
+ mov rsi, arg(5) ; v_ptr
+%endif
+
+ movdqa xmm6, xmm5 ; 57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40
+ punpcklbw xmm7, xmm1 ; 77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60
+ punpcklwd xmm5, xmm7 ; 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
+ punpckhwd xmm6, xmm7 ; 77 67 57 47 76 66 56 46 75 65 55 45 74 64 54 44
+ punpcklwd xmm3, xmm0 ; 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
+
+%if %1 == 0
+ lea rdi, [rsi + rax - 4] ; rdi points to row +1 for indirect addressing
+ lea rsi, [rsi - 4]
+%endif
+
+ movdqa xmm2, xmm3 ; 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
+ punpckhwd xmm4, xmm0 ; 37 27 17 07 36 26 16 06 35 25 15 05 34 24 14 04
+
+ movdqa xmm7, xmm4 ; 37 27 17 07 36 26 16 06 35 25 15 05 34 24 14 04
+ punpckhdq xmm3, xmm5 ; 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
+
+ punpckhdq xmm7, xmm6 ; 77 67 57 47 37 27 17 07 76 66 56 46 36 26 16 06
+
+ punpckldq xmm4, xmm6 ; 75 65 55 45 35 25 15 05 74 64 54 44 34 24 14 04
+
+ punpckldq xmm2, xmm5 ; 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
+
+ movdqa [rsp+_t0], xmm2 ; save to free XMM2
+
+ movq xmm2, [rsi] ; xx xx xx xx xx xx xx xx 87 86 85 84 83 82 81 80
+ movq xmm6, [rdi] ; xx xx xx xx xx xx xx xx 97 96 95 94 93 92 91 90
+ movq xmm0, [rsi+2*rax] ; xx xx xx xx xx xx xx xx a7 a6 a5 a4 a3 a2 a1 a0
+ movq xmm5, [rdi+2*rax] ; xx xx xx xx xx xx xx xx b7 b6 b5 b4 b3 b2 b1 b0
+ movq xmm1, [rsi+4*rax] ; xx xx xx xx xx xx xx xx c7 c6 c5 c4 c3 c2 c1 c0
+
+ punpcklbw xmm2, xmm6 ; 97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80
+
+ movq xmm6, [rdi+4*rax] ; xx xx xx xx xx xx xx xx d7 d6 d5 d4 d3 d2 d1 d0
+
+ punpcklbw xmm0, xmm5 ; b7 a7 b6 a6 b5 a5 b4 a4 b3 a3 b2 a2 b1 a1 b0 a0
+
+ movq xmm5, [rsi+2*rcx] ; xx xx xx xx xx xx xx xx e7 e6 e5 e4 e3 e2 e1 e0
+
+ punpcklbw xmm1, xmm6 ; d7 c7 d6 c6 d5 c5 d4 c4 d3 c3 d2 c2 d1 e1 d0 c0
+
+ movq xmm6, [rdi+2*rcx] ; xx xx xx xx xx xx xx xx f7 f6 f5 f4 f3 f2 f1 f0
+
+ punpcklbw xmm5, xmm6 ; f7 e7 f6 e6 f5 e5 f4 e4 f3 e3 f2 e2 f1 e1 f0 e0
+
+ movdqa xmm6, xmm1 ;
+ punpckhwd xmm6, xmm5 ; f7 e7 d7 c7 f6 e6 d6 c6 f5 e5 d5 c5 f4 e4 d4 c4
+
+ punpcklwd xmm1, xmm5 ; f3 e3 d3 c3 f2 e2 d2 c2 f1 e1 d1 c1 f0 e0 d0 c0
+ movdqa xmm5, xmm2 ; 97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80
+
+ punpcklwd xmm5, xmm0 ; b3 a3 93 83 b2 a2 92 82 b1 a1 91 81 b0 a0 90 80
+
+ punpckhwd xmm2, xmm0 ; b7 a7 97 87 b6 a6 96 86 b5 a5 95 85 b4 a4 94 84
+
+ movdqa xmm0, xmm5
+ punpckldq xmm0, xmm1 ; f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
+
+ punpckhdq xmm5, xmm1 ; f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
+ movdqa xmm1, xmm2 ; b7 a7 97 87 b6 a6 96 86 b5 a5 95 85 b4 a4 94 84
+
+ punpckldq xmm1, xmm6 ; f5 e5 d5 c5 b5 a5 95 85 f4 e4 d4 c4 b4 a4 94 84
+
+ punpckhdq xmm2, xmm6 ; f7 e7 d7 c7 b7 a7 97 87 f6 e6 d6 c6 b6 a6 96 86
+ movdqa xmm6, xmm7 ; 77 67 57 47 37 27 17 07 76 66 56 46 36 26 16 06
+
+ punpcklqdq xmm6, xmm2 ; f6 e6 d6 c6 b6 a6 96 86 76 66 56 46 36 26 16 06
+
+ punpckhqdq xmm7, xmm2 ; f7 e7 d7 c7 b7 a7 97 87 77 67 57 47 37 27 17 07
+
+%if %2 == 0
+ movdqa [rsp+_q3], xmm7 ; save 7
+ movdqa [rsp+_q2], xmm6 ; save 6
+%endif
+ movdqa xmm2, xmm3 ; 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
+ punpckhqdq xmm3, xmm5 ; f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
+ punpcklqdq xmm2, xmm5 ; f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ movdqa [rsp+_p1], xmm2 ; save 2
+
+ movdqa xmm5, xmm4 ; 75 65 55 45 35 25 15 05 74 64 54 44 34 24 14 04
+ punpcklqdq xmm4, xmm1 ; f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+ movdqa [rsp+_p0], xmm3 ; save 3
+
+ punpckhqdq xmm5, xmm1 ; f5 e5 d5 c5 b5 a5 95 85 75 65 55 45 35 25 15 05
+
+ movdqa [rsp+_q0], xmm4 ; save 4
+ movdqa [rsp+_q1], xmm5 ; save 5
+ movdqa xmm1, [rsp+_t0]
+
+ movdqa xmm2, xmm1 ;
+ punpckhqdq xmm1, xmm0 ; f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
+ punpcklqdq xmm2, xmm0 ; f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
+
+%if %2 == 0
+ movdqa [rsp+_p2], xmm1
+ movdqa [rsp+_p3], xmm2
+%endif
+
+%endmacro
+
+%macro LFV_FILTER_MASK_HEV_MASK 0
+ movdqa xmm0, xmm6 ; q2
+ psubusb xmm0, xmm7 ; q2-q3
+
+ psubusb xmm7, xmm6 ; q3-q2
+ movdqa xmm4, xmm5 ; q1
+
+ por xmm7, xmm0 ; abs (q3-q2)
+ psubusb xmm4, xmm6 ; q1-q2
+
+ movdqa xmm0, xmm1
+ psubusb xmm6, xmm5 ; q2-q1
+
+ por xmm6, xmm4 ; abs (q2-q1)
+ psubusb xmm0, xmm2 ; p2 - p3;
+
+ psubusb xmm2, xmm1 ; p3 - p2;
+ por xmm0, xmm2 ; abs(p2-p3)
+
+ movdqa xmm5, [rsp+_p1] ; p1
+ pmaxub xmm0, xmm7
+
+ movdqa xmm2, xmm5 ; p1
+ psubusb xmm5, xmm1 ; p1-p2
+ psubusb xmm1, xmm2 ; p2-p1
+
+ movdqa xmm7, xmm3 ; p0
+ psubusb xmm7, xmm2 ; p0-p1
+
+ por xmm1, xmm5 ; abs(p2-p1)
+ pmaxub xmm0, xmm6
+
+ pmaxub xmm0, xmm1
+ movdqa xmm1, xmm2 ; p1
+
+ psubusb xmm2, xmm3 ; p1-p0
+
+ por xmm2, xmm7 ; abs(p1-p0)
+
+ pmaxub xmm0, xmm2
+
+ movdqa xmm5, [rsp+_q0] ; q0
+ movdqa xmm7, [rsp+_q1] ; q1
+
+ mov rdx, arg(3) ; limit
+
+ movdqa xmm6, xmm5 ; q0
+ movdqa xmm4, xmm7 ; q1
+
+ psubusb xmm5, xmm7 ; q0-q1
+ psubusb xmm7, xmm6 ; q1-q0
+
+ por xmm7, xmm5 ; abs(q1-q0)
+
+ pmaxub xmm0, xmm7
+
+ psubusb xmm0, [rdx] ; limit
+
+ mov rdx, arg(2) ; blimit
+ movdqa xmm5, xmm4 ; q1
+
+ psubusb xmm5, xmm1 ; q1-=p1
+ psubusb xmm1, xmm4 ; p1-=q1
+
+ por xmm5, xmm1 ; abs(p1-q1)
+ movdqa xmm1, xmm3 ; p0
+
+ pand xmm5, [GLOBAL(tfe)] ; set lsb of each byte to zero
+ psubusb xmm1, xmm6 ; p0-q0
+
+ movdqa xmm4, [rdx] ; blimit
+ mov rdx, arg(4) ; get thresh
+
+ psrlw xmm5, 1 ; abs(p1-q1)/2
+ psubusb xmm6, xmm3 ; q0-p0
+
+ por xmm1, xmm6 ; abs(q0-p0)
+ paddusb xmm1, xmm1 ; abs(q0-p0)*2
+ movdqa xmm3, [rdx]
+
+ paddusb xmm1, xmm5 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+ psubusb xmm2, xmm3 ; abs(q1 - q0) > thresh
+
+ psubusb xmm7, xmm3 ; abs(p1 - p0)> thresh
+
+ psubusb xmm1, xmm4 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ por xmm2, xmm7 ; abs(q1 - q0) > thresh || abs(p1 - p0) > thresh
+
+ por xmm1, xmm0 ; mask
+ pcmpeqb xmm2, xmm0
+
+ pxor xmm0, xmm0
+ pcmpeqb xmm4, xmm4
+
+ pcmpeqb xmm1, xmm0
+ pxor xmm4, xmm2
+%endmacro
+
+%macro BV_TRANSPOSE 0
+ ; xmm1 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ ; xmm6 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
+ ; xmm3 = f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+ ; xmm7 = f5 e5 d5 c5 b5 a5 95 85 75 65 55 45 35 25 15 05
+ movdqa xmm2, xmm1 ; f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ punpcklbw xmm2, xmm6 ; 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
+
+ movdqa xmm4, xmm3 ; f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+ punpckhbw xmm1, xmm6 ; f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
+
+ punpcklbw xmm4, xmm7 ; 75 74 65 64 55 54 45 44 35 34 25 24 15 14 05 04
+
+ punpckhbw xmm3, xmm7 ; f5 f4 e5 e4 d5 d4 c5 c4 b5 b4 a5 a4 95 94 85 84
+
+ movdqa xmm6, xmm2 ; 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
+ punpcklwd xmm2, xmm4 ; 35 34 33 32 25 24 23 22 15 14 13 12 05 04 03 02
+
+ punpckhwd xmm6, xmm4 ; 75 74 73 72 65 64 63 62 55 54 53 52 45 44 43 42
+ movdqa xmm5, xmm1 ; f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
+
+ punpcklwd xmm1, xmm3 ; b5 b4 b3 b2 a5 a4 a3 a2 95 94 93 92 85 84 83 82
+
+ punpckhwd xmm5, xmm3 ; f5 f4 f3 f2 e5 e4 e3 e2 d5 d4 d3 d2 c5 c4 c3 c2
+ ; xmm2 = 35 34 33 32 25 24 23 22 15 14 13 12 05 04 03 02
+ ; xmm6 = 75 74 73 72 65 64 63 62 55 54 53 52 45 44 43 42
+ ; xmm1 = b5 b4 b3 b2 a5 a4 a3 a2 95 94 93 92 85 84 83 82
+ ; xmm5 = f5 f4 f3 f2 e5 e4 e3 e2 d5 d4 d3 d2 c5 c4 c3 c2
+%endmacro
+
+%macro BV_WRITEBACK 2
+ movd [rsi+2], %1
+ movd [rsi+4*rax+2], %2
+ psrldq %1, 4
+ psrldq %2, 4
+ movd [rdi+2], %1
+ movd [rdi+4*rax+2], %2
+ psrldq %1, 4
+ psrldq %2, 4
+ movd [rsi+2*rax+2], %1
+ movd [rsi+2*rcx+2], %2
+ psrldq %1, 4
+ psrldq %2, 4
+ movd [rdi+2*rax+2], %1
+ movd [rdi+2*rcx+2], %2
+%endmacro
+
+%if ABI_IS_32BIT
+
+;void vp8_loop_filter_vertical_edge_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+;)
+globalsym(vp8_loop_filter_vertical_edge_sse2)
+sym(vp8_loop_filter_vertical_edge_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, lf_var_size
+
+ mov rsi, arg(0) ; src_ptr
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+ lea rcx, [rax*2+rax]
+
+ ;transpose 16x8 to 8x16, and store the 8-line result on stack.
+ TRANSPOSE_16X8 1, 1
+
+ ; calculate filter mask and high edge variance
+ LFV_FILTER_MASK_HEV_MASK
+
+ ; start work on filters
+ B_FILTER 2
+
+ ; transpose and write back - only work on q1, q0, p0, p1
+ BV_TRANSPOSE
+ ; store 16-line result
+
+ lea rdx, [rax]
+ neg rdx
+
+ BV_WRITEBACK xmm1, xmm5
+
+ lea rsi, [rsi+rdx*8]
+ lea rdi, [rdi+rdx*8]
+ BV_WRITEBACK xmm2, xmm6
+
+ add rsp, lf_var_size
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+%endif
+
+;void vp8_loop_filter_vertical_edge_uv_sse2
+;(
+; unsigned char *u,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; unsigned char *v
+;)
+globalsym(vp8_loop_filter_vertical_edge_uv_sse2)
+sym(vp8_loop_filter_vertical_edge_uv_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, lf_var_size
+
+ mov rsi, arg(0) ; u_ptr
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+ lea rcx, [rax+2*rax]
+
+ ;transpose 16x8 to 8x16, and store the 8-line result on stack.
+ TRANSPOSE_16X8 0, 1
+
+ ; calculate filter mask and high edge variance
+ LFV_FILTER_MASK_HEV_MASK
+
+ ; start work on filters
+ B_FILTER 2
+
+ ; transpose and write back - only work on q1, q0, p0, p1
+ BV_TRANSPOSE
+
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+
+ ; store 16-line result
+ BV_WRITEBACK xmm1, xmm5
+
+ mov rsi, arg(0) ; u_ptr
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+ BV_WRITEBACK xmm2, xmm6
+
+ add rsp, lf_var_size
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+%macro MBV_TRANSPOSE 0
+ movdqa xmm0, [rsp+_p3] ; f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
+ movdqa xmm1, xmm0 ; f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
+
+ punpcklbw xmm0, xmm2 ; 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
+ punpckhbw xmm1, xmm2 ; f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
+
+ movdqa xmm7, [rsp+_p1] ; f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ movdqa xmm6, xmm7 ; f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+
+ punpcklbw xmm7, [rsp+_p0] ; 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
+ punpckhbw xmm6, [rsp+_p0] ; f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
+
+ movdqa xmm3, xmm0 ; 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
+ punpcklwd xmm0, xmm7 ; 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
+
+ punpckhwd xmm3, xmm7 ; 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
+ movdqa xmm4, xmm1 ; f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
+
+ punpcklwd xmm1, xmm6 ; b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
+ punpckhwd xmm4, xmm6 ; f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
+
+ movdqa xmm7, [rsp+_q0] ; f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+ punpcklbw xmm7, [rsp+_q1] ; 75 74 65 64 55 54 45 44 35 34 25 24 15 14 05 04
+
+ movdqa xmm6, xmm5 ; f6 e6 d6 c6 b6 a6 96 86 76 66 56 46 36 26 16 06
+ punpcklbw xmm6, [rsp+_q3] ; 77 76 67 66 57 56 47 46 37 36 27 26 17 16 07 06
+
+ movdqa xmm2, xmm7 ; 75 74 65 64 55 54 45 44 35 34 25 24 15 14 05 04
+ punpcklwd xmm7, xmm6 ; 37 36 35 34 27 26 25 24 17 16 15 14 07 06 05 04
+
+ punpckhwd xmm2, xmm6 ; 77 76 75 74 67 66 65 64 57 56 55 54 47 46 45 44
+ movdqa xmm6, xmm0 ; 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
+
+ punpckldq xmm0, xmm7 ; 17 16 15 14 13 12 11 10 07 06 05 04 03 02 01 00
+ punpckhdq xmm6, xmm7 ; 37 36 35 34 33 32 31 30 27 26 25 24 23 22 21 20
+%endmacro
+
+%macro MBV_WRITEBACK_1 0
+ movq [rsi], xmm0
+ movhps [rdi], xmm0
+
+ movq [rsi+2*rax], xmm6
+ movhps [rdi+2*rax], xmm6
+
+ movdqa xmm0, xmm3 ; 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
+ punpckldq xmm0, xmm2 ; 57 56 55 54 53 52 51 50 47 46 45 44 43 42 41 40
+ punpckhdq xmm3, xmm2 ; 77 76 75 74 73 72 71 70 67 66 65 64 63 62 61 60
+
+ movq [rsi+4*rax], xmm0
+ movhps [rdi+4*rax], xmm0
+
+ movq [rsi+2*rcx], xmm3
+ movhps [rdi+2*rcx], xmm3
+
+ movdqa xmm7, [rsp+_q0] ; f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+ punpckhbw xmm7, [rsp+_q1] ; f5 f4 e5 e4 d5 d4 c5 c4 b5 b4 a5 a4 95 94 85 84
+ punpckhbw xmm5, [rsp+_q3] ; f7 f6 e7 e6 d7 d6 c7 c6 b7 b6 a7 a6 97 96 87 86
+
+ movdqa xmm0, xmm7
+ punpcklwd xmm0, xmm5 ; b7 b6 b4 b4 a7 a6 a5 a4 97 96 95 94 87 86 85 84
+ punpckhwd xmm7, xmm5 ; f7 f6 f5 f4 e7 e6 e5 e4 d7 d6 d5 d4 c7 c6 c5 c4
+
+ movdqa xmm5, xmm1 ; b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
+ punpckldq xmm1, xmm0 ; 97 96 95 94 93 92 91 90 87 86 85 83 84 82 81 80
+ punpckhdq xmm5, xmm0 ; b7 b6 b5 b4 b3 b2 b1 b0 a7 a6 a5 a4 a3 a2 a1 a0
+%endmacro
+
+%macro MBV_WRITEBACK_2 0
+ movq [rsi], xmm1
+ movhps [rdi], xmm1
+
+ movq [rsi+2*rax], xmm5
+ movhps [rdi+2*rax], xmm5
+
+ movdqa xmm1, xmm4 ; f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
+ punpckldq xmm1, xmm7 ; d7 d6 d5 d4 d3 d2 d1 d0 c7 c6 c5 c4 c3 c2 c1 c0
+ punpckhdq xmm4, xmm7 ; f7 f6 f4 f4 f3 f2 f1 f0 e7 e6 e5 e4 e3 e2 e1 e0
+
+ movq [rsi+4*rax], xmm1
+ movhps [rdi+4*rax], xmm1
+
+ movq [rsi+2*rcx], xmm4
+ movhps [rdi+2*rcx], xmm4
+%endmacro
+
+
+;void vp8_mbloop_filter_vertical_edge_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+;)
+globalsym(vp8_mbloop_filter_vertical_edge_sse2)
+sym(vp8_mbloop_filter_vertical_edge_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, lf_var_size
+
+ mov rsi, arg(0) ; src_ptr
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+ lea rcx, [rax*2+rax]
+
+ ; Transpose
+ TRANSPOSE_16X8 1, 0
+
+ ; calculate filter mask and high edge variance
+ LFV_FILTER_MASK_HEV_MASK
+
+ neg rax
+ ; start work on filters
+ MB_FILTER_AND_WRITEBACK 2
+
+ lea rsi, [rsi+rax*8]
+ lea rdi, [rdi+rax*8]
+
+ ; transpose and write back
+ MBV_TRANSPOSE
+
+ neg rax
+
+ MBV_WRITEBACK_1
+
+
+ lea rsi, [rsi+rax*8]
+ lea rdi, [rdi+rax*8]
+ MBV_WRITEBACK_2
+
+ add rsp, lf_var_size
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_mbloop_filter_vertical_edge_uv_sse2
+;(
+; unsigned char *u,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; unsigned char *v
+;)
+globalsym(vp8_mbloop_filter_vertical_edge_uv_sse2)
+sym(vp8_mbloop_filter_vertical_edge_uv_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, lf_var_size
+
+ mov rsi, arg(0) ; u_ptr
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+ lea rcx, [rax+2*rax]
+
+ ; Transpose
+ TRANSPOSE_16X8 0, 0
+
+ ; calculate filter mask and high edge variance
+ LFV_FILTER_MASK_HEV_MASK
+
+ ; start work on filters
+ MB_FILTER_AND_WRITEBACK 2
+
+ ; transpose and write back
+ MBV_TRANSPOSE
+
+ mov rsi, arg(0) ;u_ptr
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax]
+ MBV_WRITEBACK_1
+ mov rsi, arg(5) ;v_ptr
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax]
+ MBV_WRITEBACK_2
+
+ add rsp, lf_var_size
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_loop_filter_simple_horizontal_edge_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+;)
+globalsym(vp8_loop_filter_simple_horizontal_edge_sse2)
+sym(vp8_loop_filter_simple_horizontal_edge_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 3
+ SAVE_XMM 7
+ GET_GOT rbx
+ ; end prolog
+
+ mov rcx, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step ; destination pitch?
+ movdqa xmm6, [GLOBAL(tfe)]
+ lea rdx, [rcx + rax]
+ neg rax
+
+ ; calculate mask
+ movdqa xmm0, [rdx] ; q1
+ mov rdx, arg(2) ;blimit
+ movdqa xmm1, [rcx+2*rax] ; p1
+
+ movdqa xmm2, xmm1
+ movdqa xmm3, xmm0
+
+ psubusb xmm0, xmm1 ; q1-=p1
+ psubusb xmm1, xmm3 ; p1-=q1
+ por xmm1, xmm0 ; abs(p1-q1)
+ pand xmm1, xmm6 ; set lsb of each byte to zero
+ psrlw xmm1, 1 ; abs(p1-q1)/2
+
+ movdqa xmm7, XMMWORD PTR [rdx]
+
+ movdqa xmm5, [rcx+rax] ; p0
+ movdqa xmm4, [rcx] ; q0
+ movdqa xmm0, xmm4 ; q0
+ movdqa xmm6, xmm5 ; p0
+ psubusb xmm5, xmm4 ; p0-=q0
+ psubusb xmm4, xmm6 ; q0-=p0
+ por xmm5, xmm4 ; abs(p0 - q0)
+
+ movdqa xmm4, [GLOBAL(t80)]
+
+ paddusb xmm5, xmm5 ; abs(p0-q0)*2
+ paddusb xmm5, xmm1 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+ psubusb xmm5, xmm7 ; abs(p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ pxor xmm7, xmm7
+ pcmpeqb xmm5, xmm7
+
+
+ ; start work on filters
+ pxor xmm2, xmm4 ; p1 offset to convert to signed values
+ pxor xmm3, xmm4 ; q1 offset to convert to signed values
+ psubsb xmm2, xmm3 ; p1 - q1
+
+ pxor xmm6, xmm4 ; offset to convert to signed values
+ pxor xmm0, xmm4 ; offset to convert to signed values
+ movdqa xmm3, xmm0 ; q0
+ psubsb xmm0, xmm6 ; q0 - p0
+ paddsb xmm2, xmm0 ; p1 - q1 + 1 * (q0 - p0)
+ paddsb xmm2, xmm0 ; p1 - q1 + 2 * (q0 - p0)
+ paddsb xmm2, xmm0 ; p1 - q1 + 3 * (q0 - p0)
+ pand xmm5, xmm2 ; mask filter values we don't care about
+
+ movdqa xmm0, xmm5
+ paddsb xmm5, [GLOBAL(t3)] ; 3* (q0 - p0) + (p1 - q1) + 4
+ paddsb xmm0, [GLOBAL(t4)] ; +3 instead of +4
+
+ movdqa xmm1, [GLOBAL(te0)]
+ movdqa xmm2, [GLOBAL(t1f)]
+
+; pxor xmm7, xmm7
+ pcmpgtb xmm7, xmm0 ;save sign
+ pand xmm7, xmm1 ;preserve the upper 3 bits
+ psrlw xmm0, 3
+ pand xmm0, xmm2 ;clear out upper 3 bits
+ por xmm0, xmm7 ;add sign
+ psubsb xmm3, xmm0 ; q0-= q0sz add
+
+ pxor xmm7, xmm7
+ pcmpgtb xmm7, xmm5 ;save sign
+ pand xmm7, xmm1 ;preserve the upper 3 bits
+ psrlw xmm5, 3
+ pand xmm5, xmm2 ;clear out upper 3 bits
+ por xmm5, xmm7 ;add sign
+ paddsb xmm6, xmm5 ; p0+= p0 add
+
+ pxor xmm3, xmm4 ; unoffset
+ movdqa [rcx], xmm3 ; write back
+
+ pxor xmm6, xmm4 ; unoffset
+ movdqa [rcx+rax], xmm6 ; write back
+
+ ; begin epilog
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_loop_filter_simple_vertical_edge_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+;)
+globalsym(vp8_loop_filter_simple_vertical_edge_sse2)
+sym(vp8_loop_filter_simple_vertical_edge_sse2):
+ push rbp ; save old base pointer value.
+ mov rbp, rsp ; set new base pointer value.
+ SHADOW_ARGS_TO_STACK 3
+ SAVE_XMM 7
+ GET_GOT rbx ; save callee-saved reg
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 32 ; reserve 32 bytes
+ %define t0 [rsp + 0] ;__declspec(align(16)) char t0[16];
+ %define t1 [rsp + 16] ;__declspec(align(16)) char t1[16];
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step ; destination pitch?
+
+ lea rsi, [rsi - 2 ]
+ lea rdi, [rsi + rax]
+ lea rdx, [rsi + rax*4]
+ lea rcx, [rdx + rax]
+
+ movd xmm0, [rsi] ; (high 96 bits unused) 03 02 01 00
+ movd xmm1, [rdx] ; (high 96 bits unused) 43 42 41 40
+ movd xmm2, [rdi] ; 13 12 11 10
+ movd xmm3, [rcx] ; 53 52 51 50
+ punpckldq xmm0, xmm1 ; (high 64 bits unused) 43 42 41 40 03 02 01 00
+ punpckldq xmm2, xmm3 ; 53 52 51 50 13 12 11 10
+
+ movd xmm4, [rsi + rax*2] ; 23 22 21 20
+ movd xmm5, [rdx + rax*2] ; 63 62 61 60
+ movd xmm6, [rdi + rax*2] ; 33 32 31 30
+ movd xmm7, [rcx + rax*2] ; 73 72 71 70
+ punpckldq xmm4, xmm5 ; 63 62 61 60 23 22 21 20
+ punpckldq xmm6, xmm7 ; 73 72 71 70 33 32 31 30
+
+ punpcklbw xmm0, xmm2 ; 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00
+ punpcklbw xmm4, xmm6 ; 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20
+
+ movdqa xmm1, xmm0
+ punpcklwd xmm0, xmm4 ; 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
+ punpckhwd xmm1, xmm4 ; 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
+
+ movdqa xmm2, xmm0
+ punpckldq xmm0, xmm1 ; 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
+ punpckhdq xmm2, xmm1 ; 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
+
+ lea rsi, [rsi + rax*8]
+ lea rdi, [rsi + rax]
+ lea rdx, [rsi + rax*4]
+ lea rcx, [rdx + rax]
+
+ movd xmm4, [rsi] ; 83 82 81 80
+ movd xmm1, [rdx] ; c3 c2 c1 c0
+ movd xmm6, [rdi] ; 93 92 91 90
+ movd xmm3, [rcx] ; d3 d2 d1 d0
+ punpckldq xmm4, xmm1 ; c3 c2 c1 c0 83 82 81 80
+ punpckldq xmm6, xmm3 ; d3 d2 d1 d0 93 92 91 90
+
+ movd xmm1, [rsi + rax*2] ; a3 a2 a1 a0
+ movd xmm5, [rdx + rax*2] ; e3 e2 e1 e0
+ movd xmm3, [rdi + rax*2] ; b3 b2 b1 b0
+ movd xmm7, [rcx + rax*2] ; f3 f2 f1 f0
+ punpckldq xmm1, xmm5 ; e3 e2 e1 e0 a3 a2 a1 a0
+ punpckldq xmm3, xmm7 ; f3 f2 f1 f0 b3 b2 b1 b0
+
+ punpcklbw xmm4, xmm6 ; d3 c3 d2 c2 d1 c1 d0 c0 93 83 92 82 91 81 90 80
+ punpcklbw xmm1, xmm3 ; f3 e3 f2 e2 f1 e1 f0 e0 b3 a3 b2 a2 b1 a1 b0 a0
+
+ movdqa xmm7, xmm4
+ punpcklwd xmm4, xmm1 ; b3 a3 93 83 b2 a2 92 82 b1 a1 91 81 b0 a0 90 80
+ punpckhwd xmm7, xmm1 ; f3 e3 d3 c3 f2 e2 d2 c2 f1 e1 d1 c1 f0 e0 d0 c0
+
+ movdqa xmm6, xmm4
+ punpckldq xmm4, xmm7 ; f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
+ punpckhdq xmm6, xmm7 ; f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
+
+ movdqa xmm1, xmm0
+ movdqa xmm3, xmm2
+
+ punpcklqdq xmm0, xmm4 ; p1 f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
+ punpckhqdq xmm1, xmm4 ; p0 f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
+ punpcklqdq xmm2, xmm6 ; q0 f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ punpckhqdq xmm3, xmm6 ; q1 f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
+
+ mov rdx, arg(2) ;blimit
+
+ ; calculate mask
+ movdqa xmm6, xmm0 ; p1
+ movdqa xmm7, xmm3 ; q1
+ psubusb xmm7, xmm0 ; q1-=p1
+ psubusb xmm6, xmm3 ; p1-=q1
+ por xmm6, xmm7 ; abs(p1-q1)
+ pand xmm6, [GLOBAL(tfe)] ; set lsb of each byte to zero
+ psrlw xmm6, 1 ; abs(p1-q1)/2
+
+ movdqa xmm7, [rdx]
+
+ movdqa xmm5, xmm1 ; p0
+ movdqa xmm4, xmm2 ; q0
+ psubusb xmm5, xmm2 ; p0-=q0
+ psubusb xmm4, xmm1 ; q0-=p0
+ por xmm5, xmm4 ; abs(p0 - q0)
+ paddusb xmm5, xmm5 ; abs(p0-q0)*2
+ paddusb xmm5, xmm6 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+
+ movdqa xmm4, [GLOBAL(t80)]
+
+ psubusb xmm5, xmm7 ; abs(p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ pxor xmm7, xmm7
+ pcmpeqb xmm5, xmm7 ; mm5 = mask
+
+ ; start work on filters
+ movdqa t0, xmm0
+ movdqa t1, xmm3
+
+ pxor xmm0, xmm4 ; p1 offset to convert to signed values
+ pxor xmm3, xmm4 ; q1 offset to convert to signed values
+ psubsb xmm0, xmm3 ; p1 - q1
+
+ pxor xmm1, xmm4 ; offset to convert to signed values
+ pxor xmm2, xmm4 ; offset to convert to signed values
+
+ movdqa xmm3, xmm2 ; offseted ; q0
+ psubsb xmm2, xmm1 ; q0 - p0
+ paddsb xmm0, xmm2 ; p1 - q1 + 1 * (q0 - p0)
+ paddsb xmm0, xmm2 ; p1 - q1 + 2 * (q0 - p0)
+ paddsb xmm0, xmm2 ; p1 - q1 + 3 * (q0 - p0)
+ pand xmm5, xmm0 ; mask filter values we don't care about
+
+ movdqa xmm0, xmm5
+ paddsb xmm5, [GLOBAL(t3)] ; 3* (q0 - p0) + (p1 - q1) + 4
+ paddsb xmm0, [GLOBAL(t4)] ; +3 instead of +4
+
+ movdqa xmm6, [GLOBAL(te0)]
+ movdqa xmm2, [GLOBAL(t1f)]
+
+; pxor xmm7, xmm7
+ pcmpgtb xmm7, xmm0 ;save sign
+ pand xmm7, xmm6 ;preserve the upper 3 bits
+ psrlw xmm0, 3
+ pand xmm0, xmm2 ;clear out upper 3 bits
+ por xmm0, xmm7 ;add sign
+ psubsb xmm3, xmm0 ; q0-= q0sz add
+
+ pxor xmm7, xmm7
+ pcmpgtb xmm7, xmm5 ;save sign
+ pand xmm7, xmm6 ;preserve the upper 3 bits
+ psrlw xmm5, 3
+ pand xmm5, xmm2 ;clear out upper 3 bits
+ por xmm5, xmm7 ;add sign
+ paddsb xmm1, xmm5 ; p0+= p0 add
+
+ pxor xmm3, xmm4 ; unoffset q0
+ pxor xmm1, xmm4 ; unoffset p0
+
+ movdqa xmm0, t0 ; p1
+ movdqa xmm4, t1 ; q1
+
+ ; write out order: xmm0 xmm2 xmm1 xmm3
+ lea rdx, [rsi + rax*4]
+
+ ; transpose back to write out
+ ; p1 f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
+ ; p0 f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
+ ; q0 f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ ; q1 f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
+ movdqa xmm6, xmm0
+ punpcklbw xmm0, xmm1 ; 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
+ punpckhbw xmm6, xmm1 ; f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
+
+ movdqa xmm5, xmm3
+ punpcklbw xmm3, xmm4 ; 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
+ punpckhbw xmm5, xmm4 ; f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
+
+ movdqa xmm2, xmm0
+ punpcklwd xmm0, xmm3 ; 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
+ punpckhwd xmm2, xmm3 ; 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
+
+ movdqa xmm3, xmm6
+ punpcklwd xmm6, xmm5 ; b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
+ punpckhwd xmm3, xmm5 ; f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
+
+ movd [rsi], xmm6 ; write the second 8-line result
+ movd [rdx], xmm3
+ psrldq xmm6, 4
+ psrldq xmm3, 4
+ movd [rdi], xmm6
+ movd [rcx], xmm3
+ psrldq xmm6, 4
+ psrldq xmm3, 4
+ movd [rsi + rax*2], xmm6
+ movd [rdx + rax*2], xmm3
+ psrldq xmm6, 4
+ psrldq xmm3, 4
+ movd [rdi + rax*2], xmm6
+ movd [rcx + rax*2], xmm3
+
+ neg rax
+ lea rsi, [rsi + rax*8]
+ neg rax
+ lea rdi, [rsi + rax]
+ lea rdx, [rsi + rax*4]
+ lea rcx, [rdx + rax]
+
+ movd [rsi], xmm0 ; write the first 8-line result
+ movd [rdx], xmm2
+ psrldq xmm0, 4
+ psrldq xmm2, 4
+ movd [rdi], xmm0
+ movd [rcx], xmm2
+ psrldq xmm0, 4
+ psrldq xmm2, 4
+ movd [rsi + rax*2], xmm0
+ movd [rdx + rax*2], xmm2
+ psrldq xmm0, 4
+ psrldq xmm2, 4
+ movd [rdi + rax*2], xmm0
+ movd [rcx + rax*2], xmm2
+
+ add rsp, 32
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+tfe:
+ times 16 db 0xfe
+align 16
+t80:
+ times 16 db 0x80
+align 16
+t1s:
+ times 16 db 0x01
+align 16
+t3:
+ times 16 db 0x03
+align 16
+t4:
+ times 16 db 0x04
+align 16
+ones:
+ times 8 dw 0x0001
+align 16
+s9:
+ times 8 dw 0x0900
+align 16
+s63:
+ times 8 dw 0x003f
+align 16
+te0:
+ times 16 db 0xe0
+align 16
+t1f:
+ times 16 db 0x1f
diff --git a/media/libvpx/libvpx/vp8/common/x86/loopfilter_x86.c b/media/libvpx/libvpx/vp8/common/x86/loopfilter_x86.c
new file mode 100644
index 0000000000..cfa13a2ddb
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/loopfilter_x86.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8/common/loopfilter.h"
+
+#define prototype_loopfilter(sym) \
+ void sym(unsigned char *src, int pitch, const unsigned char *blimit, \
+ const unsigned char *limit, const unsigned char *thresh, int count)
+
+#define prototype_loopfilter_nc(sym) \
+ void sym(unsigned char *src, int pitch, const unsigned char *blimit, \
+ const unsigned char *limit, const unsigned char *thresh)
+
+#define prototype_simple_loopfilter(sym) \
+ void sym(unsigned char *y, int ystride, const unsigned char *blimit)
+
+#if HAVE_SSE2 && VPX_ARCH_X86_64
+prototype_loopfilter(vp8_loop_filter_bv_y_sse2);
+prototype_loopfilter(vp8_loop_filter_bh_y_sse2);
+#else
+prototype_loopfilter_nc(vp8_loop_filter_vertical_edge_sse2);
+prototype_loopfilter_nc(vp8_loop_filter_horizontal_edge_sse2);
+#endif
+prototype_loopfilter_nc(vp8_mbloop_filter_vertical_edge_sse2);
+prototype_loopfilter_nc(vp8_mbloop_filter_horizontal_edge_sse2);
+
+extern loop_filter_uvfunction vp8_loop_filter_horizontal_edge_uv_sse2;
+extern loop_filter_uvfunction vp8_loop_filter_vertical_edge_uv_sse2;
+extern loop_filter_uvfunction vp8_mbloop_filter_horizontal_edge_uv_sse2;
+extern loop_filter_uvfunction vp8_mbloop_filter_vertical_edge_uv_sse2;
+
+/* Horizontal MB filtering */
+#if HAVE_SSE2
+void vp8_loop_filter_mbh_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ vp8_mbloop_filter_horizontal_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr);
+
+ if (u_ptr) {
+ vp8_mbloop_filter_horizontal_edge_uv_sse2(u_ptr, uv_stride, lfi->mblim,
+ lfi->lim, lfi->hev_thr, v_ptr);
+ }
+}
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+ vp8_mbloop_filter_vertical_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim,
+ lfi->hev_thr);
+
+ if (u_ptr) {
+ vp8_mbloop_filter_vertical_edge_uv_sse2(u_ptr, uv_stride, lfi->mblim,
+ lfi->lim, lfi->hev_thr, v_ptr);
+ }
+}
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+#if VPX_ARCH_X86_64
+ vp8_loop_filter_bh_y_sse2(y_ptr, y_stride, lfi->blim, lfi->lim, lfi->hev_thr,
+ 2);
+#else
+ vp8_loop_filter_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride,
+ lfi->blim, lfi->lim, lfi->hev_thr);
+ vp8_loop_filter_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride,
+ lfi->blim, lfi->lim, lfi->hev_thr);
+ vp8_loop_filter_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride,
+ lfi->blim, lfi->lim, lfi->hev_thr);
+#endif
+
+ if (u_ptr) {
+ vp8_loop_filter_horizontal_edge_uv_sse2(u_ptr + 4 * uv_stride, uv_stride,
+ lfi->blim, lfi->lim, lfi->hev_thr,
+ v_ptr + 4 * uv_stride);
+ }
+}
+
+void vp8_loop_filter_bhs_sse2(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride,
+ blimit);
+ vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride,
+ blimit);
+ vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride,
+ blimit);
+}
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
+ unsigned char *v_ptr, int y_stride, int uv_stride,
+ loop_filter_info *lfi) {
+#if VPX_ARCH_X86_64
+ vp8_loop_filter_bv_y_sse2(y_ptr, y_stride, lfi->blim, lfi->lim, lfi->hev_thr,
+ 2);
+#else
+ vp8_loop_filter_vertical_edge_sse2(y_ptr + 4, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr);
+ vp8_loop_filter_vertical_edge_sse2(y_ptr + 8, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr);
+ vp8_loop_filter_vertical_edge_sse2(y_ptr + 12, y_stride, lfi->blim, lfi->lim,
+ lfi->hev_thr);
+#endif
+
+ if (u_ptr) {
+ vp8_loop_filter_vertical_edge_uv_sse2(u_ptr + 4, uv_stride, lfi->blim,
+ lfi->lim, lfi->hev_thr, v_ptr + 4);
+ }
+}
+
+void vp8_loop_filter_bvs_sse2(unsigned char *y_ptr, int y_stride,
+ const unsigned char *blimit) {
+ vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 4, y_stride, blimit);
+ vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 8, y_stride, blimit);
+ vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 12, y_stride, blimit);
+}
+
+#endif
diff --git a/media/libvpx/libvpx/vp8/common/x86/mfqe_sse2.asm b/media/libvpx/libvpx/vp8/common/x86/mfqe_sse2.asm
new file mode 100644
index 0000000000..3ec2a99ec2
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/mfqe_sse2.asm
@@ -0,0 +1,289 @@
+;
+; Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+;void vp8_filter_by_weight16x16_sse2
+;(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride,
+; int src_weight
+;)
+globalsym(vp8_filter_by_weight16x16_sse2)
+sym(vp8_filter_by_weight16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movd xmm0, arg(4) ; src_weight
+ pshuflw xmm0, xmm0, 0x0 ; replicate to all low words
+ punpcklqdq xmm0, xmm0 ; replicate to all hi words
+
+ movdqa xmm1, [GLOBAL(tMFQE)]
+ psubw xmm1, xmm0 ; dst_weight
+
+ mov rax, arg(0) ; src
+ mov rsi, arg(1) ; src_stride
+ mov rdx, arg(2) ; dst
+ mov rdi, arg(3) ; dst_stride
+
+ mov rcx, 16 ; loop count
+ pxor xmm6, xmm6
+
+.combine:
+ movdqa xmm2, [rax]
+ movdqa xmm4, [rdx]
+ add rax, rsi
+
+ ; src * src_weight
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, xmm6
+ punpckhbw xmm3, xmm6
+ pmullw xmm2, xmm0
+ pmullw xmm3, xmm0
+
+ ; dst * dst_weight
+ movdqa xmm5, xmm4
+ punpcklbw xmm4, xmm6
+ punpckhbw xmm5, xmm6
+ pmullw xmm4, xmm1
+ pmullw xmm5, xmm1
+
+ ; sum, round and shift
+ paddw xmm2, xmm4
+ paddw xmm3, xmm5
+ paddw xmm2, [GLOBAL(tMFQE_round)]
+ paddw xmm3, [GLOBAL(tMFQE_round)]
+ psrlw xmm2, 4
+ psrlw xmm3, 4
+
+ packuswb xmm2, xmm3
+ movdqa [rdx], xmm2
+ add rdx, rdi
+
+ dec rcx
+ jnz .combine
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+
+ ret
+
+;void vp8_filter_by_weight8x8_sse2
+;(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride,
+; int src_weight
+;)
+globalsym(vp8_filter_by_weight8x8_sse2)
+sym(vp8_filter_by_weight8x8_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movd xmm0, arg(4) ; src_weight
+ pshuflw xmm0, xmm0, 0x0 ; replicate to all low words
+ punpcklqdq xmm0, xmm0 ; replicate to all hi words
+
+ movdqa xmm1, [GLOBAL(tMFQE)]
+ psubw xmm1, xmm0 ; dst_weight
+
+ mov rax, arg(0) ; src
+ mov rsi, arg(1) ; src_stride
+ mov rdx, arg(2) ; dst
+ mov rdi, arg(3) ; dst_stride
+
+ mov rcx, 8 ; loop count
+ pxor xmm4, xmm4
+
+.combine:
+ movq xmm2, [rax]
+ movq xmm3, [rdx]
+ add rax, rsi
+
+ ; src * src_weight
+ punpcklbw xmm2, xmm4
+ pmullw xmm2, xmm0
+
+ ; dst * dst_weight
+ punpcklbw xmm3, xmm4
+ pmullw xmm3, xmm1
+
+ ; sum, round and shift
+ paddw xmm2, xmm3
+ paddw xmm2, [GLOBAL(tMFQE_round)]
+ psrlw xmm2, 4
+
+ packuswb xmm2, xmm4
+ movq [rdx], xmm2
+ add rdx, rdi
+
+ dec rcx
+ jnz .combine
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+
+ ret
+
+;void vp8_variance_and_sad_16x16_sse2 | arg
+;(
+; unsigned char *src1, 0
+; int stride1, 1
+; unsigned char *src2, 2
+; int stride2, 3
+; unsigned int *variance, 4
+; unsigned int *sad, 5
+;)
+globalsym(vp8_variance_and_sad_16x16_sse2)
+sym(vp8_variance_and_sad_16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rax, arg(0) ; src1
+ mov rcx, arg(1) ; stride1
+ mov rdx, arg(2) ; src2
+ mov rdi, arg(3) ; stride2
+
+ mov rsi, 16 ; block height
+
+ ; Prep accumulator registers
+ pxor xmm3, xmm3 ; SAD
+ pxor xmm4, xmm4 ; sum of src2
+ pxor xmm5, xmm5 ; sum of src2^2
+
+ ; Because we're working with the actual output frames
+ ; we can't depend on any kind of data alignment.
+.accumulate:
+ movdqa xmm0, [rax] ; src1
+ movdqa xmm1, [rdx] ; src2
+ add rax, rcx ; src1 + stride1
+ add rdx, rdi ; src2 + stride2
+
+ ; SAD(src1, src2)
+ psadbw xmm0, xmm1
+ paddusw xmm3, xmm0
+
+ ; SUM(src2)
+ pxor xmm2, xmm2
+ psadbw xmm2, xmm1 ; sum src2 by misusing SAD against 0
+ paddusw xmm4, xmm2
+
+ ; pmaddubsw would be ideal if it took two unsigned values. instead,
+ ; it expects a signed and an unsigned value. so instead we zero extend
+ ; and operate on words.
+ pxor xmm2, xmm2
+ movdqa xmm0, xmm1
+ punpcklbw xmm0, xmm2
+ punpckhbw xmm1, xmm2
+ pmaddwd xmm0, xmm0
+ pmaddwd xmm1, xmm1
+ paddd xmm5, xmm0
+ paddd xmm5, xmm1
+
+ sub rsi, 1
+ jnz .accumulate
+
+ ; phaddd only operates on adjacent double words.
+ ; Finalize SAD and store
+ movdqa xmm0, xmm3
+ psrldq xmm0, 8
+ paddusw xmm0, xmm3
+ paddd xmm0, [GLOBAL(t128)]
+ psrld xmm0, 8
+
+ mov rax, arg(5)
+ movd [rax], xmm0
+
+ ; Accumulate sum of src2
+ movdqa xmm0, xmm4
+ psrldq xmm0, 8
+ paddusw xmm0, xmm4
+ ; Square src2. Ignore high value
+ pmuludq xmm0, xmm0
+ psrld xmm0, 8
+
+ ; phaddw could be used to sum adjacent values but we want
+ ; all the values summed. promote to doubles, accumulate,
+ ; shift and sum
+ pxor xmm2, xmm2
+ movdqa xmm1, xmm5
+ punpckldq xmm1, xmm2
+ punpckhdq xmm5, xmm2
+ paddd xmm1, xmm5
+ movdqa xmm2, xmm1
+ psrldq xmm1, 8
+ paddd xmm1, xmm2
+
+ psubd xmm1, xmm0
+
+ ; (variance + 128) >> 8
+ paddd xmm1, [GLOBAL(t128)]
+ psrld xmm1, 8
+ mov rax, arg(4)
+
+ movd [rax], xmm1
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+t128:
+%ifndef __NASM_VER__
+ ddq 128
+%elif CONFIG_BIG_ENDIAN
+ dq 0, 128
+%else
+ dq 128, 0
+%endif
+align 16
+tMFQE: ; 1 << MFQE_PRECISION
+ times 8 dw 0x10
+align 16
+tMFQE_round: ; 1 << (MFQE_PRECISION - 1)
+ times 8 dw 0x08
+
diff --git a/media/libvpx/libvpx/vp8/common/x86/recon_mmx.asm b/media/libvpx/libvpx/vp8/common/x86/recon_mmx.asm
new file mode 100644
index 0000000000..01cf066837
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/recon_mmx.asm
@@ -0,0 +1,120 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+;void copy_mem8x8_mmx(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride
+; )
+globalsym(vp8_copy_mem8x8_mmx)
+sym(vp8_copy_mem8x8_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src;
+ movq mm0, [rsi]
+
+ movsxd rax, dword ptr arg(1) ;src_stride;
+ mov rdi, arg(2) ;dst;
+
+ movq mm1, [rsi+rax]
+ movq mm2, [rsi+rax*2]
+
+ movsxd rcx, dword ptr arg(3) ;dst_stride
+ lea rsi, [rsi+rax*2]
+
+ movq [rdi], mm0
+ add rsi, rax
+
+ movq [rdi+rcx], mm1
+ movq [rdi+rcx*2], mm2
+
+
+ lea rdi, [rdi+rcx*2]
+ movq mm3, [rsi]
+
+ add rdi, rcx
+ movq mm4, [rsi+rax]
+
+ movq mm5, [rsi+rax*2]
+ movq [rdi], mm3
+
+ lea rsi, [rsi+rax*2]
+ movq [rdi+rcx], mm4
+
+ movq [rdi+rcx*2], mm5
+ lea rdi, [rdi+rcx*2]
+
+ movq mm0, [rsi+rax]
+ movq mm1, [rsi+rax*2]
+
+ movq [rdi+rcx], mm0
+ movq [rdi+rcx*2],mm1
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void copy_mem8x4_mmx(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride
+; )
+globalsym(vp8_copy_mem8x4_mmx)
+sym(vp8_copy_mem8x4_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src;
+ movq mm0, [rsi]
+
+ movsxd rax, dword ptr arg(1) ;src_stride;
+ mov rdi, arg(2) ;dst;
+
+ movq mm1, [rsi+rax]
+ movq mm2, [rsi+rax*2]
+
+ movsxd rcx, dword ptr arg(3) ;dst_stride
+ lea rsi, [rsi+rax*2]
+
+ movq [rdi], mm0
+ movq [rdi+rcx], mm1
+
+ movq [rdi+rcx*2], mm2
+ lea rdi, [rdi+rcx*2]
+
+ movq mm3, [rsi+rax]
+ movq [rdi+rcx], mm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/media/libvpx/libvpx/vp8/common/x86/recon_sse2.asm b/media/libvpx/libvpx/vp8/common/x86/recon_sse2.asm
new file mode 100644
index 0000000000..17baf094ef
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/recon_sse2.asm
@@ -0,0 +1,118 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+;void copy_mem16x16_sse2(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride
+; )
+globalsym(vp8_copy_mem16x16_sse2)
+sym(vp8_copy_mem16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src;
+ movdqu xmm0, [rsi]
+
+ movsxd rax, dword ptr arg(1) ;src_stride;
+ mov rdi, arg(2) ;dst;
+
+ movdqu xmm1, [rsi+rax]
+ movdqu xmm2, [rsi+rax*2]
+
+ movsxd rcx, dword ptr arg(3) ;dst_stride
+ lea rsi, [rsi+rax*2]
+
+ movdqa [rdi], xmm0
+ add rsi, rax
+
+ movdqa [rdi+rcx], xmm1
+ movdqa [rdi+rcx*2],xmm2
+
+ lea rdi, [rdi+rcx*2]
+ movdqu xmm3, [rsi]
+
+ add rdi, rcx
+ movdqu xmm4, [rsi+rax]
+
+ movdqu xmm5, [rsi+rax*2]
+ lea rsi, [rsi+rax*2]
+
+ movdqa [rdi], xmm3
+ add rsi, rax
+
+ movdqa [rdi+rcx], xmm4
+ movdqa [rdi+rcx*2],xmm5
+
+ lea rdi, [rdi+rcx*2]
+ movdqu xmm0, [rsi]
+
+ add rdi, rcx
+ movdqu xmm1, [rsi+rax]
+
+ movdqu xmm2, [rsi+rax*2]
+ lea rsi, [rsi+rax*2]
+
+ movdqa [rdi], xmm0
+ add rsi, rax
+
+ movdqa [rdi+rcx], xmm1
+
+ movdqa [rdi+rcx*2], xmm2
+ movdqu xmm3, [rsi]
+
+ movdqu xmm4, [rsi+rax]
+ lea rdi, [rdi+rcx*2]
+
+ add rdi, rcx
+ movdqu xmm5, [rsi+rax*2]
+
+ lea rsi, [rsi+rax*2]
+ movdqa [rdi], xmm3
+
+ add rsi, rax
+ movdqa [rdi+rcx], xmm4
+
+ movdqa [rdi+rcx*2],xmm5
+ movdqu xmm0, [rsi]
+
+ lea rdi, [rdi+rcx*2]
+ movdqu xmm1, [rsi+rax]
+
+ add rdi, rcx
+ movdqu xmm2, [rsi+rax*2]
+
+ lea rsi, [rsi+rax*2]
+ movdqa [rdi], xmm0
+
+ movdqa [rdi+rcx], xmm1
+ movdqa [rdi+rcx*2],xmm2
+
+ movdqu xmm3, [rsi+rax]
+ lea rdi, [rdi+rcx*2]
+
+ movdqa [rdi+rcx], xmm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/media/libvpx/libvpx/vp8/common/x86/subpixel_mmx.asm b/media/libvpx/libvpx/vp8/common/x86/subpixel_mmx.asm
new file mode 100644
index 0000000000..8f0f6fcc89
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/subpixel_mmx.asm
@@ -0,0 +1,270 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%define BLOCK_HEIGHT_WIDTH 4
+%define vp8_filter_weight 128
+%define VP8_FILTER_SHIFT 7
+
+SECTION .text
+
+;void vp8_filter_block1d_h6_mmx
+;(
+; unsigned char *src_ptr,
+; unsigned short *output_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned int pixel_step,
+; unsigned int output_height,
+; unsigned int output_width,
+; short * vp8_filter
+;)
+globalsym(vp8_filter_block1d_h6_mmx)
+sym(vp8_filter_block1d_h6_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rdx, arg(6) ;vp8_filter
+
+ movq mm1, [rdx + 16] ; do both the negative taps first!!!
+ movq mm2, [rdx + 32] ;
+ movq mm6, [rdx + 48] ;
+ movq mm7, [rdx + 64] ;
+
+ mov rdi, arg(1) ;output_ptr
+ mov rsi, arg(0) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;output_height
+ movsxd rax, dword ptr arg(5) ;output_width ; destination pitch?
+ pxor mm0, mm0 ; mm0 = 00000000
+
+.nextrow:
+ movq mm3, [rsi-2] ; mm3 = p-2..p5
+ movq mm4, mm3 ; mm4 = p-2..p5
+ psrlq mm3, 8 ; mm3 = p-1..p5
+ punpcklbw mm3, mm0 ; mm3 = p-1..p2
+ pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
+
+ movq mm5, mm4 ; mm5 = p-2..p5
+ punpckhbw mm4, mm0 ; mm5 = p2..p5
+ pmullw mm4, mm7 ; mm5 *= kernel 4 modifiers
+ paddsw mm3, mm4 ; mm3 += mm5
+
+ movq mm4, mm5 ; mm4 = p-2..p5;
+ psrlq mm5, 16 ; mm5 = p0..p5;
+ punpcklbw mm5, mm0 ; mm5 = p0..p3
+ pmullw mm5, mm2 ; mm5 *= kernel 2 modifiers
+ paddsw mm3, mm5 ; mm3 += mm5
+
+ movq mm5, mm4 ; mm5 = p-2..p5
+ psrlq mm4, 24 ; mm4 = p1..p5
+ punpcklbw mm4, mm0 ; mm4 = p1..p4
+ pmullw mm4, mm6 ; mm5 *= kernel 3 modifiers
+ paddsw mm3, mm4 ; mm3 += mm5
+
+ ; do outer positive taps
+ movd mm4, [rsi+3]
+ punpcklbw mm4, mm0 ; mm5 = p3..p6
+ pmullw mm4, [rdx+80] ; mm5 *= kernel 0 modifiers
+ paddsw mm3, mm4 ; mm3 += mm5
+
+ punpcklbw mm5, mm0 ; mm5 = p-2..p1
+ pmullw mm5, [rdx] ; mm5 *= kernel 5 modifiers
+ paddsw mm3, mm5 ; mm3 += mm5
+
+ paddsw mm3, [GLOBAL(rd)] ; mm3 += round value
+ psraw mm3, VP8_FILTER_SHIFT ; mm3 /= 128
+ packuswb mm3, mm0 ; pack and unpack to saturate
+ punpcklbw mm3, mm0 ;
+
+ movq [rdi], mm3 ; store the results in the destination
+
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(2) ;src_pixels_per_line ; next line
+ add rdi, rax;
+%else
+ movsxd r8, dword ptr arg(2) ;src_pixels_per_line
+ add rdi, rax;
+
+ add rsi, r8 ; next line
+%endif
+
+ dec rcx ; decrement count
+ jnz .nextrow ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_filter_block1dc_v6_mmx
+;(
+; short *src_ptr,
+; unsigned char *output_ptr,
+; int output_pitch,
+; unsigned int pixels_per_line,
+; unsigned int pixel_step,
+; unsigned int output_height,
+; unsigned int output_width,
+; short * vp8_filter
+;)
+globalsym(vp8_filter_block1dc_v6_mmx)
+sym(vp8_filter_block1dc_v6_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movq mm5, [GLOBAL(rd)]
+ push rbx
+ mov rbx, arg(7) ;vp8_filter
+ movq mm1, [rbx + 16] ; do both the negative taps first!!!
+ movq mm2, [rbx + 32] ;
+ movq mm6, [rbx + 48] ;
+ movq mm7, [rbx + 64] ;
+
+ movsxd rdx, dword ptr arg(3) ;pixels_per_line
+ mov rdi, arg(1) ;output_ptr
+ mov rsi, arg(0) ;src_ptr
+ sub rsi, rdx
+ sub rsi, rdx
+ movsxd rcx, DWORD PTR arg(5) ;output_height
+ movsxd rax, DWORD PTR arg(2) ;output_pitch ; destination pitch?
+ pxor mm0, mm0 ; mm0 = 00000000
+
+
+.nextrow_cv:
+ movq mm3, [rsi+rdx] ; mm3 = p0..p8 = row -1
+ pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
+
+
+ movq mm4, [rsi + 4*rdx] ; mm4 = p0..p3 = row 2
+ pmullw mm4, mm7 ; mm4 *= kernel 4 modifiers.
+ paddsw mm3, mm4 ; mm3 += mm4
+
+ movq mm4, [rsi + 2*rdx] ; mm4 = p0..p3 = row 0
+ pmullw mm4, mm2 ; mm4 *= kernel 2 modifiers.
+ paddsw mm3, mm4 ; mm3 += mm4
+
+ movq mm4, [rsi] ; mm4 = p0..p3 = row -2
+ pmullw mm4, [rbx] ; mm4 *= kernel 0 modifiers.
+ paddsw mm3, mm4 ; mm3 += mm4
+
+
+ add rsi, rdx ; move source forward 1 line to avoid 3 * pitch
+ movq mm4, [rsi + 2*rdx] ; mm4 = p0..p3 = row 1
+ pmullw mm4, mm6 ; mm4 *= kernel 3 modifiers.
+ paddsw mm3, mm4 ; mm3 += mm4
+
+ movq mm4, [rsi + 4*rdx] ; mm4 = p0..p3 = row 3
+ pmullw mm4, [rbx +80] ; mm4 *= kernel 3 modifiers.
+ paddsw mm3, mm4 ; mm3 += mm4
+
+
+ paddsw mm3, mm5 ; mm3 += round value
+ psraw mm3, VP8_FILTER_SHIFT ; mm3 /= 128
+ packuswb mm3, mm0 ; pack and saturate
+
+ movd [rdi],mm3 ; store the results in the destination
+ ; the subsequent iterations repeat 3 out of 4 of these reads. Since the
+ ; recon block should be in cache this shouldn't cost much. Its obviously
+ ; avoidable!!!.
+ lea rdi, [rdi+rax] ;
+ dec rcx ; decrement count
+ jnz .nextrow_cv ; next row
+
+ pop rbx
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+align 16
+rd:
+ times 4 dw 0x40
+
+align 16
+global HIDDEN_DATA(sym(vp8_six_tap_x86))
+sym(vp8_six_tap_x86):
+ times 8 dw 0
+ times 8 dw 0
+ times 8 dw 128
+ times 8 dw 0
+ times 8 dw 0
+ times 8 dw 0
+
+ times 8 dw 0
+ times 8 dw -6
+ times 8 dw 123
+ times 8 dw 12
+ times 8 dw -1
+ times 8 dw 0
+
+ times 8 dw 2
+ times 8 dw -11
+ times 8 dw 108
+ times 8 dw 36
+ times 8 dw -8
+ times 8 dw 1
+
+ times 8 dw 0
+ times 8 dw -9
+ times 8 dw 93
+ times 8 dw 50
+ times 8 dw -6
+ times 8 dw 0
+
+ times 8 dw 3
+ times 8 dw -16
+ times 8 dw 77
+ times 8 dw 77
+ times 8 dw -16
+ times 8 dw 3
+
+ times 8 dw 0
+ times 8 dw -6
+ times 8 dw 50
+ times 8 dw 93
+ times 8 dw -9
+ times 8 dw 0
+
+ times 8 dw 1
+ times 8 dw -8
+ times 8 dw 36
+ times 8 dw 108
+ times 8 dw -11
+ times 8 dw 2
+
+ times 8 dw 0
+ times 8 dw -1
+ times 8 dw 12
+ times 8 dw 123
+ times 8 dw -6
+ times 8 dw 0
+
+
diff --git a/media/libvpx/libvpx/vp8/common/x86/subpixel_sse2.asm b/media/libvpx/libvpx/vp8/common/x86/subpixel_sse2.asm
new file mode 100644
index 0000000000..94e14aed6c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/subpixel_sse2.asm
@@ -0,0 +1,963 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%define BLOCK_HEIGHT_WIDTH 4
+%define VP8_FILTER_WEIGHT 128
+%define VP8_FILTER_SHIFT 7
+
+SECTION .text
+
+;/************************************************************************************
+; Notes: filter_block1d_h6 applies a 6 tap filter horizontally to the input pixels. The
+; input pixel array has output_height rows. This routine assumes that output_height is an
+; even number. This function handles 8 pixels in horizontal direction, calculating ONE
+; rows each iteration to take advantage of the 128 bits operations.
+;*************************************************************************************/
+;void vp8_filter_block1d8_h6_sse2
+;(
+; unsigned char *src_ptr,
+; unsigned short *output_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned int pixel_step,
+; unsigned int output_height,
+; unsigned int output_width,
+; short *vp8_filter
+;)
+globalsym(vp8_filter_block1d8_h6_sse2)
+sym(vp8_filter_block1d8_h6_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rdx, arg(6) ;vp8_filter
+ mov rsi, arg(0) ;src_ptr
+
+ mov rdi, arg(1) ;output_ptr
+
+ movsxd rcx, dword ptr arg(4) ;output_height
+ movsxd rax, dword ptr arg(2) ;src_pixels_per_line ; Pitch for Source
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(5) ;output_width
+%endif
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+
+.filter_block1d8_h6_rowloop:
+ movq xmm3, MMWORD PTR [rsi - 2]
+ movq xmm1, MMWORD PTR [rsi + 6]
+
+ prefetcht2 [rsi+rax-2]
+
+ pslldq xmm1, 8
+ por xmm1, xmm3
+
+ movdqa xmm4, xmm1
+ movdqa xmm5, xmm1
+
+ movdqa xmm6, xmm1
+ movdqa xmm7, xmm1
+
+ punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
+ psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1
+
+ pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
+ punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1
+
+ psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
+
+
+ punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00
+ psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01
+
+ pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
+
+ punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01
+ psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02
+
+ pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
+
+ punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02
+ psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
+
+
+ pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
+
+ punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03
+ pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
+
+
+ paddsw xmm4, xmm7
+ paddsw xmm4, xmm5
+
+ paddsw xmm4, xmm3
+ paddsw xmm4, xmm6
+
+ paddsw xmm4, xmm1
+ paddsw xmm4, [GLOBAL(rd)]
+
+ psraw xmm4, 7
+
+ packuswb xmm4, xmm0
+ punpcklbw xmm4, xmm0
+
+ movdqa XMMWORD Ptr [rdi], xmm4
+ lea rsi, [rsi + rax]
+
+%if ABI_IS_32BIT
+ add rdi, DWORD Ptr arg(5) ;[output_width]
+%else
+ add rdi, r8
+%endif
+ dec rcx
+
+ jnz .filter_block1d8_h6_rowloop ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_filter_block1d16_h6_sse2
+;(
+; unsigned char *src_ptr,
+; unsigned short *output_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned int pixel_step,
+; unsigned int output_height,
+; unsigned int output_width,
+; short *vp8_filter
+;)
+;/************************************************************************************
+; Notes: filter_block1d_h6 applies a 6 tap filter horizontally to the input pixels. The
+; input pixel array has output_height rows. This routine assumes that output_height is an
+; even number. This function handles 8 pixels in horizontal direction, calculating ONE
+; rows each iteration to take advantage of the 128 bits operations.
+;*************************************************************************************/
+globalsym(vp8_filter_block1d16_h6_sse2)
+sym(vp8_filter_block1d16_h6_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rdx, arg(6) ;vp8_filter
+ mov rsi, arg(0) ;src_ptr
+
+ mov rdi, arg(1) ;output_ptr
+
+ movsxd rcx, dword ptr arg(4) ;output_height
+ movsxd rax, dword ptr arg(2) ;src_pixels_per_line ; Pitch for Source
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(5) ;output_width
+%endif
+
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+
+.filter_block1d16_h6_sse2_rowloop:
+ movq xmm3, MMWORD PTR [rsi - 2]
+ movq xmm1, MMWORD PTR [rsi + 6]
+
+ ; Load from 11 to avoid reading out of bounds.
+ movq xmm2, MMWORD PTR [rsi +11]
+ ; The lower bits are not cleared before 'or'ing with xmm1,
+ ; but that is OK because the values in the overlapping positions
+ ; are already equal to the ones in xmm1.
+ pslldq xmm2, 5
+
+ por xmm2, xmm1
+ prefetcht2 [rsi+rax-2]
+
+ pslldq xmm1, 8
+ por xmm1, xmm3
+
+ movdqa xmm4, xmm1
+ movdqa xmm5, xmm1
+
+ movdqa xmm6, xmm1
+ movdqa xmm7, xmm1
+
+ punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
+ psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1
+
+ pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
+ punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1
+
+ psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
+
+
+ punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00
+ psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01
+
+ pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
+
+ punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01
+ psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02
+
+ pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
+
+ punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02
+ psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
+
+
+ pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
+
+ punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03
+ pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
+
+ paddsw xmm4, xmm7
+ paddsw xmm4, xmm5
+
+ paddsw xmm4, xmm3
+ paddsw xmm4, xmm6
+
+ paddsw xmm4, xmm1
+ paddsw xmm4, [GLOBAL(rd)]
+
+ psraw xmm4, 7
+
+ packuswb xmm4, xmm0
+ punpcklbw xmm4, xmm0
+
+ movdqa XMMWORD Ptr [rdi], xmm4
+
+ movdqa xmm3, xmm2
+ movdqa xmm4, xmm2
+
+ movdqa xmm5, xmm2
+ movdqa xmm6, xmm2
+
+ movdqa xmm7, xmm2
+
+ punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
+ psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1
+
+ pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
+ punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1
+
+ psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
+
+
+ punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00
+ psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01
+
+ pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
+
+ punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01
+ psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02
+
+ pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
+
+ punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02
+ psrldq xmm2, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
+
+ pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
+
+ punpcklbw xmm2, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03
+ pmullw xmm2, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
+
+
+ paddsw xmm4, xmm7
+ paddsw xmm4, xmm5
+
+ paddsw xmm4, xmm3
+ paddsw xmm4, xmm6
+
+ paddsw xmm4, xmm2
+ paddsw xmm4, [GLOBAL(rd)]
+
+ psraw xmm4, 7
+
+ packuswb xmm4, xmm0
+ punpcklbw xmm4, xmm0
+
+ movdqa XMMWORD Ptr [rdi+16], xmm4
+
+ lea rsi, [rsi + rax]
+%if ABI_IS_32BIT
+ add rdi, DWORD Ptr arg(5) ;[output_width]
+%else
+ add rdi, r8
+%endif
+
+ dec rcx
+ jnz .filter_block1d16_h6_sse2_rowloop ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_filter_block1d8_v6_sse2
+;(
+; short *src_ptr,
+; unsigned char *output_ptr,
+; int dst_ptich,
+; unsigned int pixels_per_line,
+; unsigned int pixel_step,
+; unsigned int output_height,
+; unsigned int output_width,
+; short * vp8_filter
+;)
+;/************************************************************************************
+; Notes: filter_block1d8_v6 applies a 6 tap filter vertically to the input pixels. The
+; input pixel array has output_height rows.
+;*************************************************************************************/
+globalsym(vp8_filter_block1d8_v6_sse2)
+sym(vp8_filter_block1d8_v6_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rax, arg(7) ;vp8_filter
+ movsxd rdx, dword ptr arg(3) ;pixels_per_line
+
+ mov rdi, arg(1) ;output_ptr
+ mov rsi, arg(0) ;src_ptr
+
+ sub rsi, rdx
+ sub rsi, rdx
+
+ movsxd rcx, DWORD PTR arg(5) ;[output_height]
+ pxor xmm0, xmm0 ; clear xmm0
+
+ movdqa xmm7, XMMWORD PTR [GLOBAL(rd)]
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(2) ; dst_ptich
+%endif
+
+.vp8_filter_block1d8_v6_sse2_loop:
+ movdqa xmm1, XMMWORD PTR [rsi]
+ pmullw xmm1, [rax]
+
+ movdqa xmm2, XMMWORD PTR [rsi + rdx]
+ pmullw xmm2, [rax + 16]
+
+ movdqa xmm3, XMMWORD PTR [rsi + rdx * 2]
+ pmullw xmm3, [rax + 32]
+
+ movdqa xmm5, XMMWORD PTR [rsi + rdx * 4]
+ pmullw xmm5, [rax + 64]
+
+ add rsi, rdx
+ movdqa xmm4, XMMWORD PTR [rsi + rdx * 2]
+
+ pmullw xmm4, [rax + 48]
+ movdqa xmm6, XMMWORD PTR [rsi + rdx * 4]
+
+ pmullw xmm6, [rax + 80]
+
+ paddsw xmm2, xmm5
+ paddsw xmm2, xmm3
+
+ paddsw xmm2, xmm1
+ paddsw xmm2, xmm4
+
+ paddsw xmm2, xmm6
+ paddsw xmm2, xmm7
+
+ psraw xmm2, 7
+ packuswb xmm2, xmm0 ; pack and saturate
+
+ movq QWORD PTR [rdi], xmm2 ; store the results in the destination
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(2) ;[dst_ptich]
+%else
+ add rdi, r8
+%endif
+ dec rcx ; decrement count
+ jnz .vp8_filter_block1d8_v6_sse2_loop ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_filter_block1d16_v6_sse2
+;(
+; unsigned short *src_ptr,
+; unsigned char *output_ptr,
+; int dst_ptich,
+; unsigned int pixels_per_line,
+; unsigned int pixel_step,
+; unsigned int output_height,
+; unsigned int output_width,
+; const short *vp8_filter
+;)
+;/************************************************************************************
+; Notes: filter_block1d16_v6 applies a 6 tap filter vertically to the input pixels. The
+; input pixel array has output_height rows.
+;*************************************************************************************/
+globalsym(vp8_filter_block1d16_v6_sse2)
+sym(vp8_filter_block1d16_v6_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rax, arg(7) ;vp8_filter
+ movsxd rdx, dword ptr arg(3) ;pixels_per_line
+
+ mov rdi, arg(1) ;output_ptr
+ mov rsi, arg(0) ;src_ptr
+
+ sub rsi, rdx
+ sub rsi, rdx
+
+ movsxd rcx, DWORD PTR arg(5) ;[output_height]
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(2) ; dst_ptich
+%endif
+
+.vp8_filter_block1d16_v6_sse2_loop:
+; The order for adding 6-tap is 2 5 3 1 4 6. Read in data in that order.
+ movdqa xmm1, XMMWORD PTR [rsi + rdx] ; line 2
+ movdqa xmm2, XMMWORD PTR [rsi + rdx + 16]
+ pmullw xmm1, [rax + 16]
+ pmullw xmm2, [rax + 16]
+
+ movdqa xmm3, XMMWORD PTR [rsi + rdx * 4] ; line 5
+ movdqa xmm4, XMMWORD PTR [rsi + rdx * 4 + 16]
+ pmullw xmm3, [rax + 64]
+ pmullw xmm4, [rax + 64]
+
+ movdqa xmm5, XMMWORD PTR [rsi + rdx * 2] ; line 3
+ movdqa xmm6, XMMWORD PTR [rsi + rdx * 2 + 16]
+ pmullw xmm5, [rax + 32]
+ pmullw xmm6, [rax + 32]
+
+ movdqa xmm7, XMMWORD PTR [rsi] ; line 1
+ movdqa xmm0, XMMWORD PTR [rsi + 16]
+ pmullw xmm7, [rax]
+ pmullw xmm0, [rax]
+
+ paddsw xmm1, xmm3
+ paddsw xmm2, xmm4
+ paddsw xmm1, xmm5
+ paddsw xmm2, xmm6
+ paddsw xmm1, xmm7
+ paddsw xmm2, xmm0
+
+ add rsi, rdx
+
+ movdqa xmm3, XMMWORD PTR [rsi + rdx * 2] ; line 4
+ movdqa xmm4, XMMWORD PTR [rsi + rdx * 2 + 16]
+ pmullw xmm3, [rax + 48]
+ pmullw xmm4, [rax + 48]
+
+ movdqa xmm5, XMMWORD PTR [rsi + rdx * 4] ; line 6
+ movdqa xmm6, XMMWORD PTR [rsi + rdx * 4 + 16]
+ pmullw xmm5, [rax + 80]
+ pmullw xmm6, [rax + 80]
+
+ movdqa xmm7, XMMWORD PTR [GLOBAL(rd)]
+ pxor xmm0, xmm0 ; clear xmm0
+
+ paddsw xmm1, xmm3
+ paddsw xmm2, xmm4
+ paddsw xmm1, xmm5
+ paddsw xmm2, xmm6
+
+ paddsw xmm1, xmm7
+ paddsw xmm2, xmm7
+
+ psraw xmm1, 7
+ psraw xmm2, 7
+
+ packuswb xmm1, xmm2 ; pack and saturate
+ movdqa XMMWORD PTR [rdi], xmm1 ; store the results in the destination
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(2) ;[dst_ptich]
+%else
+ add rdi, r8
+%endif
+ dec rcx ; decrement count
+ jnz .vp8_filter_block1d16_v6_sse2_loop ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_filter_block1d8_h6_only_sse2
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; int dst_ptich,
+; unsigned int output_height,
+; const short *vp8_filter
+;)
+; First-pass filter only when yoffset==0
+globalsym(vp8_filter_block1d8_h6_only_sse2)
+sym(vp8_filter_block1d8_h6_only_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rdx, arg(5) ;vp8_filter
+ mov rsi, arg(0) ;src_ptr
+
+ mov rdi, arg(2) ;output_ptr
+
+ movsxd rcx, dword ptr arg(4) ;output_height
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line ; Pitch for Source
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(3) ;dst_ptich
+%endif
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+
+.filter_block1d8_h6_only_rowloop:
+ movq xmm3, MMWORD PTR [rsi - 2]
+ movq xmm1, MMWORD PTR [rsi + 6]
+
+ prefetcht2 [rsi+rax-2]
+
+ pslldq xmm1, 8
+ por xmm1, xmm3
+
+ movdqa xmm4, xmm1
+ movdqa xmm5, xmm1
+
+ movdqa xmm6, xmm1
+ movdqa xmm7, xmm1
+
+ punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
+ psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1
+
+ pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
+ punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1
+
+ psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
+
+
+ punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00
+ psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01
+
+ pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
+
+ punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01
+ psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02
+
+ pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
+
+ punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02
+ psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
+
+
+ pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
+
+ punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03
+ pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
+
+
+ paddsw xmm4, xmm7
+ paddsw xmm4, xmm5
+
+ paddsw xmm4, xmm3
+ paddsw xmm4, xmm6
+
+ paddsw xmm4, xmm1
+ paddsw xmm4, [GLOBAL(rd)]
+
+ psraw xmm4, 7
+
+ packuswb xmm4, xmm0
+
+ movq QWORD PTR [rdi], xmm4 ; store the results in the destination
+ lea rsi, [rsi + rax]
+
+%if ABI_IS_32BIT
+ add rdi, DWORD Ptr arg(3) ;dst_ptich
+%else
+ add rdi, r8
+%endif
+ dec rcx
+
+ jnz .filter_block1d8_h6_only_rowloop ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_filter_block1d16_h6_only_sse2
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; int dst_ptich,
+; unsigned int output_height,
+; const short *vp8_filter
+;)
+; First-pass filter only when yoffset==0
+globalsym(vp8_filter_block1d16_h6_only_sse2)
+sym(vp8_filter_block1d16_h6_only_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rdx, arg(5) ;vp8_filter
+ mov rsi, arg(0) ;src_ptr
+
+ mov rdi, arg(2) ;output_ptr
+
+ movsxd rcx, dword ptr arg(4) ;output_height
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line ; Pitch for Source
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(3) ;dst_ptich
+%endif
+
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+
+.filter_block1d16_h6_only_sse2_rowloop:
+ movq xmm3, MMWORD PTR [rsi - 2]
+ movq xmm1, MMWORD PTR [rsi + 6]
+
+ movq xmm2, MMWORD PTR [rsi +14]
+ pslldq xmm2, 8
+
+ por xmm2, xmm1
+ prefetcht2 [rsi+rax-2]
+
+ pslldq xmm1, 8
+ por xmm1, xmm3
+
+ movdqa xmm4, xmm1
+ movdqa xmm5, xmm1
+
+ movdqa xmm6, xmm1
+ movdqa xmm7, xmm1
+
+ punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
+ psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1
+
+ pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
+ punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1
+
+ psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
+
+ punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00
+ psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01
+
+ pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
+
+ punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01
+ psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02
+
+ pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
+
+ punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02
+ psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
+
+ pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
+
+ punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03
+ pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
+
+ paddsw xmm4, xmm7
+ paddsw xmm4, xmm5
+
+ paddsw xmm4, xmm3
+ paddsw xmm4, xmm6
+
+ paddsw xmm4, xmm1
+ paddsw xmm4, [GLOBAL(rd)]
+
+ psraw xmm4, 7
+
+ packuswb xmm4, xmm0 ; lower 8 bytes
+
+ movq QWORD Ptr [rdi], xmm4 ; store the results in the destination
+
+ movdqa xmm3, xmm2
+ movdqa xmm4, xmm2
+
+ movdqa xmm5, xmm2
+ movdqa xmm6, xmm2
+
+ movdqa xmm7, xmm2
+
+ punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
+ psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1
+
+ pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
+ punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1
+
+ psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
+
+ punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00
+ psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01
+
+ pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
+
+ punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01
+ psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02
+
+ pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
+
+ punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02
+ psrldq xmm2, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
+
+ pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
+
+ punpcklbw xmm2, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03
+ pmullw xmm2, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
+
+ paddsw xmm4, xmm7
+ paddsw xmm4, xmm5
+
+ paddsw xmm4, xmm3
+ paddsw xmm4, xmm6
+
+ paddsw xmm4, xmm2
+ paddsw xmm4, [GLOBAL(rd)]
+
+ psraw xmm4, 7
+
+ packuswb xmm4, xmm0 ; higher 8 bytes
+
+ movq QWORD Ptr [rdi+8], xmm4 ; store the results in the destination
+
+ lea rsi, [rsi + rax]
+%if ABI_IS_32BIT
+ add rdi, DWORD Ptr arg(3) ;dst_ptich
+%else
+ add rdi, r8
+%endif
+
+ dec rcx
+ jnz .filter_block1d16_h6_only_sse2_rowloop ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_filter_block1d8_v6_only_sse2
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; int dst_ptich,
+; unsigned int output_height,
+; const short *vp8_filter
+;)
+; Second-pass filter only when xoffset==0
+globalsym(vp8_filter_block1d8_v6_only_sse2)
+sym(vp8_filter_block1d8_v6_only_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+
+ movsxd rcx, dword ptr arg(4) ;output_height
+ movsxd rdx, dword ptr arg(1) ;src_pixels_per_line
+
+ mov rax, arg(5) ;vp8_filter
+
+ pxor xmm0, xmm0 ; clear xmm0
+
+ movdqa xmm7, XMMWORD PTR [GLOBAL(rd)]
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(3) ; dst_ptich
+%endif
+
+.vp8_filter_block1d8_v6_only_sse2_loop:
+ movq xmm1, MMWORD PTR [rsi]
+ movq xmm2, MMWORD PTR [rsi + rdx]
+ movq xmm3, MMWORD PTR [rsi + rdx * 2]
+ movq xmm5, MMWORD PTR [rsi + rdx * 4]
+ add rsi, rdx
+ movq xmm4, MMWORD PTR [rsi + rdx * 2]
+ movq xmm6, MMWORD PTR [rsi + rdx * 4]
+
+ punpcklbw xmm1, xmm0
+ pmullw xmm1, [rax]
+
+ punpcklbw xmm2, xmm0
+ pmullw xmm2, [rax + 16]
+
+ punpcklbw xmm3, xmm0
+ pmullw xmm3, [rax + 32]
+
+ punpcklbw xmm5, xmm0
+ pmullw xmm5, [rax + 64]
+
+ punpcklbw xmm4, xmm0
+ pmullw xmm4, [rax + 48]
+
+ punpcklbw xmm6, xmm0
+ pmullw xmm6, [rax + 80]
+
+ paddsw xmm2, xmm5
+ paddsw xmm2, xmm3
+
+ paddsw xmm2, xmm1
+ paddsw xmm2, xmm4
+
+ paddsw xmm2, xmm6
+ paddsw xmm2, xmm7
+
+ psraw xmm2, 7
+ packuswb xmm2, xmm0 ; pack and saturate
+
+ movq QWORD PTR [rdi], xmm2 ; store the results in the destination
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;[dst_ptich]
+%else
+ add rdi, r8
+%endif
+ dec rcx ; decrement count
+ jnz .vp8_filter_block1d8_v6_only_sse2_loop ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp8_unpack_block1d16_h6_sse2
+;(
+; unsigned char *src_ptr,
+; unsigned short *output_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned int output_height,
+; unsigned int output_width
+;)
+globalsym(vp8_unpack_block1d16_h6_sse2)
+sym(vp8_unpack_block1d16_h6_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(1) ;output_ptr
+
+ movsxd rcx, dword ptr arg(3) ;output_height
+ movsxd rax, dword ptr arg(2) ;src_pixels_per_line ; Pitch for Source
+
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(4) ;output_width ; Pitch for Source
+%endif
+
+.unpack_block1d16_h6_sse2_rowloop:
+ movq xmm1, MMWORD PTR [rsi] ; 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1 -2
+ movq xmm3, MMWORD PTR [rsi+8] ; make copy of xmm1
+
+ punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
+ punpcklbw xmm1, xmm0
+
+ movdqa XMMWORD Ptr [rdi], xmm1
+ movdqa XMMWORD Ptr [rdi + 16], xmm3
+
+ lea rsi, [rsi + rax]
+%if ABI_IS_32BIT
+ add rdi, DWORD Ptr arg(4) ;[output_width]
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .unpack_block1d16_h6_sse2_rowloop ; next row
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+align 16
+rd:
+ times 8 dw 0x40
diff --git a/media/libvpx/libvpx/vp8/common/x86/subpixel_ssse3.asm b/media/libvpx/libvpx/vp8/common/x86/subpixel_ssse3.asm
new file mode 100644
index 0000000000..17247227db
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/subpixel_ssse3.asm
@@ -0,0 +1,1515 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%define BLOCK_HEIGHT_WIDTH 4
+%define VP8_FILTER_WEIGHT 128
+%define VP8_FILTER_SHIFT 7
+
+SECTION .text
+
+;/************************************************************************************
+; Notes: filter_block1d_h6 applies a 6 tap filter horizontally to the input pixels. The
+; input pixel array has output_height rows. This routine assumes that output_height is an
+; even number. This function handles 8 pixels in horizontal direction, calculating ONE
+; rows each iteration to take advantage of the 128 bits operations.
+;
+; This is an implementation of some of the SSE optimizations first seen in ffvp8
+;
+;*************************************************************************************/
+;void vp8_filter_block1d8_h6_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; unsigned int vp8_filter_index
+;)
+globalsym(vp8_filter_block1d8_h6_ssse3)
+sym(vp8_filter_block1d8_h6_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movsxd rdx, DWORD PTR arg(5) ;table index
+ xor rsi, rsi
+ shl rdx, 4
+
+ movdqa xmm7, [GLOBAL(rd)]
+
+ lea rax, [GLOBAL(k0_k5)]
+ add rax, rdx
+ mov rdi, arg(2) ;output_ptr
+
+ cmp esi, DWORD PTR [rax]
+ je vp8_filter_block1d8_h4_ssse3
+
+ movdqa xmm4, XMMWORD PTR [rax] ;k0_k5
+ movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+
+ sub rdi, rdx
+;xmm3 free
+.filter_block1d8_h6_rowloop_ssse3:
+ movq xmm0, MMWORD PTR [rsi - 2] ; -2 -1 0 1 2 3 4 5
+
+ movq xmm2, MMWORD PTR [rsi + 3] ; 3 4 5 6 7 8 9 10
+
+ punpcklbw xmm0, xmm2 ; -2 3 -1 4 0 5 1 6 2 7 3 8 4 9 5 10
+
+ movdqa xmm1, xmm0
+ pmaddubsw xmm0, xmm4
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf2bfrom1)]
+
+ pshufb xmm2, [GLOBAL(shuf3bfrom1)]
+ pmaddubsw xmm1, xmm5
+
+ lea rdi, [rdi + rdx]
+ pmaddubsw xmm2, xmm6
+
+ lea rsi, [rsi + rax]
+ dec rcx
+
+ paddsw xmm0, xmm1
+ paddsw xmm2, xmm7
+
+ paddsw xmm0, xmm2
+
+ psraw xmm0, 7
+
+ packuswb xmm0, xmm0
+
+ movq MMWORD Ptr [rdi], xmm0
+ jnz .filter_block1d8_h6_rowloop_ssse3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+vp8_filter_block1d8_h4_ssse3:
+ movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3
+
+ movdqa xmm3, XMMWORD PTR [GLOBAL(shuf2bfrom1)]
+ movdqa xmm4, XMMWORD PTR [GLOBAL(shuf3bfrom1)]
+
+ mov rsi, arg(0) ;src_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+
+ sub rdi, rdx
+
+.filter_block1d8_h4_rowloop_ssse3:
+ movq xmm0, MMWORD PTR [rsi - 2] ; -2 -1 0 1 2 3 4 5
+
+ movq xmm1, MMWORD PTR [rsi + 3] ; 3 4 5 6 7 8 9 10
+
+ punpcklbw xmm0, xmm1 ; -2 3 -1 4 0 5 1 6 2 7 3 8 4 9 5 10
+
+ movdqa xmm2, xmm0
+ pshufb xmm0, xmm3
+
+ pshufb xmm2, xmm4
+ pmaddubsw xmm0, xmm5
+
+ lea rdi, [rdi + rdx]
+ pmaddubsw xmm2, xmm6
+
+ lea rsi, [rsi + rax]
+ dec rcx
+
+ paddsw xmm0, xmm7
+
+ paddsw xmm0, xmm2
+
+ psraw xmm0, 7
+
+ packuswb xmm0, xmm0
+
+ movq MMWORD Ptr [rdi], xmm0
+
+ jnz .filter_block1d8_h4_rowloop_ssse3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+;void vp8_filter_block1d16_h6_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; unsigned int vp8_filter_index
+;)
+globalsym(vp8_filter_block1d16_h6_ssse3)
+sym(vp8_filter_block1d16_h6_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movsxd rdx, DWORD PTR arg(5) ;table index
+ xor rsi, rsi
+ shl rdx, 4 ;
+
+ lea rax, [GLOBAL(k0_k5)]
+ add rax, rdx
+
+ mov rdi, arg(2) ;output_ptr
+
+ mov rsi, arg(0) ;src_ptr
+
+ movdqa xmm4, XMMWORD PTR [rax] ;k0_k5
+ movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3
+
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rcx, dword ptr arg(4) ;output_height
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+
+.filter_block1d16_h6_rowloop_ssse3:
+ movq xmm0, MMWORD PTR [rsi - 2] ; -2 -1 0 1 2 3 4 5
+
+ movq xmm3, MMWORD PTR [rsi + 3] ; 3 4 5 6 7 8 9 10
+
+ punpcklbw xmm0, xmm3 ; -2 3 -1 4 0 5 1 6 2 7 3 8 4 9 5 10
+
+ movdqa xmm1, xmm0
+ pmaddubsw xmm0, xmm4
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf2bfrom1)]
+
+ pshufb xmm2, [GLOBAL(shuf3bfrom1)]
+ movq xmm3, MMWORD PTR [rsi + 6]
+
+ pmaddubsw xmm1, xmm5
+ movq xmm7, MMWORD PTR [rsi + 11]
+
+ pmaddubsw xmm2, xmm6
+ punpcklbw xmm3, xmm7
+
+ paddsw xmm0, xmm1
+ movdqa xmm1, xmm3
+
+ pmaddubsw xmm3, xmm4
+ paddsw xmm0, xmm2
+
+ movdqa xmm2, xmm1
+ paddsw xmm0, [GLOBAL(rd)]
+
+ pshufb xmm1, [GLOBAL(shuf2bfrom1)]
+ pshufb xmm2, [GLOBAL(shuf3bfrom1)]
+
+ psraw xmm0, 7
+ pmaddubsw xmm1, xmm5
+
+ pmaddubsw xmm2, xmm6
+ packuswb xmm0, xmm0
+
+ lea rsi, [rsi + rax]
+ paddsw xmm3, xmm1
+
+ paddsw xmm3, xmm2
+
+ paddsw xmm3, [GLOBAL(rd)]
+
+ psraw xmm3, 7
+
+ packuswb xmm3, xmm3
+
+ punpcklqdq xmm0, xmm3
+
+ movdqa XMMWORD Ptr [rdi], xmm0
+
+ lea rdi, [rdi + rdx]
+ dec rcx
+ jnz .filter_block1d16_h6_rowloop_ssse3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp8_filter_block1d4_h6_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; unsigned int vp8_filter_index
+;)
+globalsym(vp8_filter_block1d4_h6_ssse3)
+sym(vp8_filter_block1d4_h6_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movsxd rdx, DWORD PTR arg(5) ;table index
+ xor rsi, rsi
+ shl rdx, 4 ;
+
+ lea rax, [GLOBAL(k0_k5)]
+ add rax, rdx
+ movdqa xmm7, [GLOBAL(rd)]
+
+ cmp esi, DWORD PTR [rax]
+ je .vp8_filter_block1d4_h4_ssse3
+
+ movdqa xmm4, XMMWORD PTR [rax] ;k0_k5
+ movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+
+;xmm3 free
+.filter_block1d4_h6_rowloop_ssse3:
+ movdqu xmm0, XMMWORD PTR [rsi - 2]
+
+ movdqa xmm1, xmm0
+ pshufb xmm0, [GLOBAL(shuf1b)]
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf2b)]
+ pmaddubsw xmm0, xmm4
+ pshufb xmm2, [GLOBAL(shuf3b)]
+ pmaddubsw xmm1, xmm5
+
+;--
+ pmaddubsw xmm2, xmm6
+
+ lea rsi, [rsi + rax]
+;--
+ paddsw xmm0, xmm1
+ paddsw xmm0, xmm7
+ pxor xmm1, xmm1
+ paddsw xmm0, xmm2
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+ movd DWORD PTR [rdi], xmm0
+
+ add rdi, rdx
+ dec rcx
+ jnz .filter_block1d4_h6_rowloop_ssse3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+.vp8_filter_block1d4_h4_ssse3:
+ movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3
+ movdqa xmm0, XMMWORD PTR [GLOBAL(shuf2b)]
+ movdqa xmm3, XMMWORD PTR [GLOBAL(shuf3b)]
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+
+.filter_block1d4_h4_rowloop_ssse3:
+ movdqu xmm1, XMMWORD PTR [rsi - 2]
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, xmm0 ;;[GLOBAL(shuf2b)]
+ pshufb xmm2, xmm3 ;;[GLOBAL(shuf3b)]
+ pmaddubsw xmm1, xmm5
+
+;--
+ pmaddubsw xmm2, xmm6
+
+ lea rsi, [rsi + rax]
+;--
+ paddsw xmm1, xmm7
+ paddsw xmm1, xmm2
+ psraw xmm1, 7
+ packuswb xmm1, xmm1
+
+ movd DWORD PTR [rdi], xmm1
+
+ add rdi, rdx
+ dec rcx
+ jnz .filter_block1d4_h4_rowloop_ssse3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+;void vp8_filter_block1d16_v6_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; unsigned int vp8_filter_index
+;)
+globalsym(vp8_filter_block1d16_v6_ssse3)
+sym(vp8_filter_block1d16_v6_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movsxd rdx, DWORD PTR arg(5) ;table index
+ xor rsi, rsi
+ shl rdx, 4 ;
+
+ lea rax, [GLOBAL(k0_k5)]
+ add rax, rdx
+
+ cmp esi, DWORD PTR [rax]
+ je .vp8_filter_block1d16_v4_ssse3
+
+ movdqa xmm5, XMMWORD PTR [rax] ;k0_k5
+ movdqa xmm6, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm7, XMMWORD PTR [rax+128] ;k1_k3
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+ mov rdi, arg(2) ;output_ptr
+
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ;out_pitch
+%endif
+ mov rax, rsi
+ movsxd rcx, DWORD PTR arg(4) ;output_height
+ add rax, rdx
+
+
+.vp8_filter_block1d16_v6_ssse3_loop:
+ movq xmm1, MMWORD PTR [rsi] ;A
+ movq xmm2, MMWORD PTR [rsi + rdx] ;B
+ movq xmm3, MMWORD PTR [rsi + rdx * 2] ;C
+ movq xmm4, MMWORD PTR [rax + rdx * 2] ;D
+ movq xmm0, MMWORD PTR [rsi + rdx * 4] ;E
+
+ punpcklbw xmm2, xmm4 ;B D
+ punpcklbw xmm3, xmm0 ;C E
+
+ movq xmm0, MMWORD PTR [rax + rdx * 4] ;F
+
+ pmaddubsw xmm3, xmm6
+ punpcklbw xmm1, xmm0 ;A F
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm5
+
+ paddsw xmm2, xmm3
+ paddsw xmm2, xmm1
+ paddsw xmm2, [GLOBAL(rd)]
+ psraw xmm2, 7
+ packuswb xmm2, xmm2
+
+ movq MMWORD PTR [rdi], xmm2 ;store the results
+
+ movq xmm1, MMWORD PTR [rsi + 8] ;A
+ movq xmm2, MMWORD PTR [rsi + rdx + 8] ;B
+ movq xmm3, MMWORD PTR [rsi + rdx * 2 + 8] ;C
+ movq xmm4, MMWORD PTR [rax + rdx * 2 + 8] ;D
+ movq xmm0, MMWORD PTR [rsi + rdx * 4 + 8] ;E
+
+ punpcklbw xmm2, xmm4 ;B D
+ punpcklbw xmm3, xmm0 ;C E
+
+ movq xmm0, MMWORD PTR [rax + rdx * 4 + 8] ;F
+ pmaddubsw xmm3, xmm6
+ punpcklbw xmm1, xmm0 ;A F
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm5
+
+ add rsi, rdx
+ add rax, rdx
+;--
+;--
+ paddsw xmm2, xmm3
+ paddsw xmm2, xmm1
+ paddsw xmm2, [GLOBAL(rd)]
+ psraw xmm2, 7
+ packuswb xmm2, xmm2
+
+ movq MMWORD PTR [rdi+8], xmm2
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;out_pitch
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .vp8_filter_block1d16_v6_ssse3_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+.vp8_filter_block1d16_v4_ssse3:
+ movdqa xmm6, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm7, XMMWORD PTR [rax+128] ;k1_k3
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+ mov rdi, arg(2) ;output_ptr
+
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ;out_pitch
+%endif
+ mov rax, rsi
+ movsxd rcx, DWORD PTR arg(4) ;output_height
+ add rax, rdx
+
+.vp8_filter_block1d16_v4_ssse3_loop:
+ movq xmm2, MMWORD PTR [rsi + rdx] ;B
+ movq xmm3, MMWORD PTR [rsi + rdx * 2] ;C
+ movq xmm4, MMWORD PTR [rax + rdx * 2] ;D
+ movq xmm0, MMWORD PTR [rsi + rdx * 4] ;E
+
+ punpcklbw xmm2, xmm4 ;B D
+ punpcklbw xmm3, xmm0 ;C E
+
+ pmaddubsw xmm3, xmm6
+ pmaddubsw xmm2, xmm7
+ movq xmm5, MMWORD PTR [rsi + rdx + 8] ;B
+ movq xmm1, MMWORD PTR [rsi + rdx * 2 + 8] ;C
+ movq xmm4, MMWORD PTR [rax + rdx * 2 + 8] ;D
+ movq xmm0, MMWORD PTR [rsi + rdx * 4 + 8] ;E
+
+ paddsw xmm2, [GLOBAL(rd)]
+ paddsw xmm2, xmm3
+ psraw xmm2, 7
+ packuswb xmm2, xmm2
+
+ punpcklbw xmm5, xmm4 ;B D
+ punpcklbw xmm1, xmm0 ;C E
+
+ pmaddubsw xmm1, xmm6
+ pmaddubsw xmm5, xmm7
+
+ movdqa xmm4, [GLOBAL(rd)]
+ add rsi, rdx
+ add rax, rdx
+;--
+;--
+ paddsw xmm5, xmm1
+ paddsw xmm5, xmm4
+ psraw xmm5, 7
+ packuswb xmm5, xmm5
+
+ punpcklqdq xmm2, xmm5
+
+ movdqa XMMWORD PTR [rdi], xmm2
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;out_pitch
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .vp8_filter_block1d16_v4_ssse3_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp8_filter_block1d8_v6_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; unsigned int vp8_filter_index
+;)
+globalsym(vp8_filter_block1d8_v6_ssse3)
+sym(vp8_filter_block1d8_v6_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movsxd rdx, DWORD PTR arg(5) ;table index
+ xor rsi, rsi
+ shl rdx, 4 ;
+
+ lea rax, [GLOBAL(k0_k5)]
+ add rax, rdx
+
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+ mov rdi, arg(2) ;output_ptr
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ; out_pitch
+%endif
+ movsxd rcx, DWORD PTR arg(4) ;[output_height]
+
+ cmp esi, DWORD PTR [rax]
+ je .vp8_filter_block1d8_v4_ssse3
+
+ movdqa xmm5, XMMWORD PTR [rax] ;k0_k5
+ movdqa xmm6, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm7, XMMWORD PTR [rax+128] ;k1_k3
+
+ mov rsi, arg(0) ;src_ptr
+
+ mov rax, rsi
+ add rax, rdx
+
+.vp8_filter_block1d8_v6_ssse3_loop:
+ movq xmm1, MMWORD PTR [rsi] ;A
+ movq xmm2, MMWORD PTR [rsi + rdx] ;B
+ movq xmm3, MMWORD PTR [rsi + rdx * 2] ;C
+ movq xmm4, MMWORD PTR [rax + rdx * 2] ;D
+ movq xmm0, MMWORD PTR [rsi + rdx * 4] ;E
+
+ punpcklbw xmm2, xmm4 ;B D
+ punpcklbw xmm3, xmm0 ;C E
+
+ movq xmm0, MMWORD PTR [rax + rdx * 4] ;F
+ movdqa xmm4, [GLOBAL(rd)]
+
+ pmaddubsw xmm3, xmm6
+ punpcklbw xmm1, xmm0 ;A F
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm5
+ add rsi, rdx
+ add rax, rdx
+;--
+;--
+ paddsw xmm2, xmm3
+ paddsw xmm2, xmm1
+ paddsw xmm2, xmm4
+ psraw xmm2, 7
+ packuswb xmm2, xmm2
+
+ movq MMWORD PTR [rdi], xmm2
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;[out_pitch]
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .vp8_filter_block1d8_v6_ssse3_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+.vp8_filter_block1d8_v4_ssse3:
+ movdqa xmm6, XMMWORD PTR [rax+256] ;k2_k4
+ movdqa xmm7, XMMWORD PTR [rax+128] ;k1_k3
+ movdqa xmm5, [GLOBAL(rd)]
+
+ mov rsi, arg(0) ;src_ptr
+
+ mov rax, rsi
+ add rax, rdx
+
+.vp8_filter_block1d8_v4_ssse3_loop:
+ movq xmm2, MMWORD PTR [rsi + rdx] ;B
+ movq xmm3, MMWORD PTR [rsi + rdx * 2] ;C
+ movq xmm4, MMWORD PTR [rax + rdx * 2] ;D
+ movq xmm0, MMWORD PTR [rsi + rdx * 4] ;E
+
+ punpcklbw xmm2, xmm4 ;B D
+ punpcklbw xmm3, xmm0 ;C E
+
+ pmaddubsw xmm3, xmm6
+ pmaddubsw xmm2, xmm7
+ add rsi, rdx
+ add rax, rdx
+;--
+;--
+ paddsw xmm2, xmm3
+ paddsw xmm2, xmm5
+ psraw xmm2, 7
+ packuswb xmm2, xmm2
+
+ movq MMWORD PTR [rdi], xmm2
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;[out_pitch]
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .vp8_filter_block1d8_v4_ssse3_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+;void vp8_filter_block1d4_v6_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; unsigned int vp8_filter_index
+;)
+globalsym(vp8_filter_block1d4_v6_ssse3)
+sym(vp8_filter_block1d4_v6_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movsxd rdx, DWORD PTR arg(5) ;table index
+ xor rsi, rsi
+ shl rdx, 4 ;
+
+ lea rax, [GLOBAL(k0_k5)]
+ add rax, rdx
+
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+ mov rdi, arg(2) ;output_ptr
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ; out_pitch
+%endif
+ movsxd rcx, DWORD PTR arg(4) ;[output_height]
+
+ cmp esi, DWORD PTR [rax]
+ je .vp8_filter_block1d4_v4_ssse3
+
+ movq mm5, MMWORD PTR [rax] ;k0_k5
+ movq mm6, MMWORD PTR [rax+256] ;k2_k4
+ movq mm7, MMWORD PTR [rax+128] ;k1_k3
+
+ mov rsi, arg(0) ;src_ptr
+
+ mov rax, rsi
+ add rax, rdx
+
+.vp8_filter_block1d4_v6_ssse3_loop:
+ movd mm1, DWORD PTR [rsi] ;A
+ movd mm2, DWORD PTR [rsi + rdx] ;B
+ movd mm3, DWORD PTR [rsi + rdx * 2] ;C
+ movd mm4, DWORD PTR [rax + rdx * 2] ;D
+ movd mm0, DWORD PTR [rsi + rdx * 4] ;E
+
+ punpcklbw mm2, mm4 ;B D
+ punpcklbw mm3, mm0 ;C E
+
+ movd mm0, DWORD PTR [rax + rdx * 4] ;F
+
+ movq mm4, [GLOBAL(rd)]
+
+ pmaddubsw mm3, mm6
+ punpcklbw mm1, mm0 ;A F
+ pmaddubsw mm2, mm7
+ pmaddubsw mm1, mm5
+ add rsi, rdx
+ add rax, rdx
+;--
+;--
+ paddsw mm2, mm3
+ paddsw mm2, mm1
+ paddsw mm2, mm4
+ psraw mm2, 7
+ packuswb mm2, mm2
+
+ movd DWORD PTR [rdi], mm2
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;[out_pitch]
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .vp8_filter_block1d4_v6_ssse3_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+.vp8_filter_block1d4_v4_ssse3:
+ movq mm6, MMWORD PTR [rax+256] ;k2_k4
+ movq mm7, MMWORD PTR [rax+128] ;k1_k3
+ movq mm5, MMWORD PTR [GLOBAL(rd)]
+
+ mov rsi, arg(0) ;src_ptr
+
+ mov rax, rsi
+ add rax, rdx
+
+.vp8_filter_block1d4_v4_ssse3_loop:
+ movd mm2, DWORD PTR [rsi + rdx] ;B
+ movd mm3, DWORD PTR [rsi + rdx * 2] ;C
+ movd mm4, DWORD PTR [rax + rdx * 2] ;D
+ movd mm0, DWORD PTR [rsi + rdx * 4] ;E
+
+ punpcklbw mm2, mm4 ;B D
+ punpcklbw mm3, mm0 ;C E
+
+ pmaddubsw mm3, mm6
+ pmaddubsw mm2, mm7
+ add rsi, rdx
+ add rax, rdx
+;--
+;--
+ paddsw mm2, mm3
+ paddsw mm2, mm5
+ psraw mm2, 7
+ packuswb mm2, mm2
+
+ movd DWORD PTR [rdi], mm2
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;[out_pitch]
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .vp8_filter_block1d4_v4_ssse3_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp8_bilinear_predict16x16_ssse3
+;(
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; int xoffset,
+; int yoffset,
+; unsigned char *dst_ptr,
+; int dst_pitch
+;)
+globalsym(vp8_bilinear_predict16x16_ssse3)
+sym(vp8_bilinear_predict16x16_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ lea rcx, [GLOBAL(vp8_bilinear_filters_ssse3)]
+ movsxd rax, dword ptr arg(2) ; xoffset
+
+ cmp rax, 0 ; skip first_pass filter if xoffset=0
+ je .b16x16_sp_only
+
+ shl rax, 4
+ lea rax, [rax + rcx] ; HFilter
+
+ mov rdi, arg(4) ; dst_ptr
+ mov rsi, arg(0) ; src_ptr
+ movsxd rdx, dword ptr arg(5) ; dst_pitch
+
+ movdqa xmm1, [rax]
+
+ movsxd rax, dword ptr arg(3) ; yoffset
+
+ cmp rax, 0 ; skip second_pass filter if yoffset=0
+ je .b16x16_fp_only
+
+ shl rax, 4
+ lea rax, [rax + rcx] ; VFilter
+
+ lea rcx, [rdi+rdx*8]
+ lea rcx, [rcx+rdx*8]
+ movsxd rdx, dword ptr arg(1) ; src_pixels_per_line
+
+ movdqa xmm2, [rax]
+
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(5) ; dst_pitch
+%endif
+ movq xmm3, [rsi] ; 00 01 02 03 04 05 06 07
+ movq xmm5, [rsi+1] ; 01 02 03 04 05 06 07 08
+
+ punpcklbw xmm3, xmm5 ; 00 01 01 02 02 03 03 04 04 05 05 06 06 07 07 08
+ movq xmm4, [rsi+8] ; 08 09 10 11 12 13 14 15
+
+ movq xmm5, [rsi+9] ; 09 10 11 12 13 14 15 16
+
+ lea rsi, [rsi + rdx] ; next line
+
+ pmaddubsw xmm3, xmm1 ; 00 02 04 06 08 10 12 14
+
+ punpcklbw xmm4, xmm5 ; 08 09 09 10 10 11 11 12 12 13 13 14 14 15 15 16
+ pmaddubsw xmm4, xmm1 ; 01 03 05 07 09 11 13 15
+
+ paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
+ psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
+
+ paddw xmm4, [GLOBAL(rd)] ; xmm4 += round value
+ psraw xmm4, VP8_FILTER_SHIFT ; xmm4 /= 128
+
+ movdqa xmm7, xmm3
+ packuswb xmm7, xmm4 ; 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15
+
+.next_row:
+ movq xmm6, [rsi] ; 00 01 02 03 04 05 06 07
+ movq xmm5, [rsi+1] ; 01 02 03 04 05 06 07 08
+
+ punpcklbw xmm6, xmm5
+ movq xmm4, [rsi+8] ; 08 09 10 11 12 13 14 15
+
+ movq xmm5, [rsi+9] ; 09 10 11 12 13 14 15 16
+ lea rsi, [rsi + rdx] ; next line
+
+ pmaddubsw xmm6, xmm1
+
+ punpcklbw xmm4, xmm5
+ pmaddubsw xmm4, xmm1
+
+ paddw xmm6, [GLOBAL(rd)] ; xmm6 += round value
+ psraw xmm6, VP8_FILTER_SHIFT ; xmm6 /= 128
+
+ paddw xmm4, [GLOBAL(rd)] ; xmm4 += round value
+ psraw xmm4, VP8_FILTER_SHIFT ; xmm4 /= 128
+
+ packuswb xmm6, xmm4
+ movdqa xmm5, xmm7
+
+ punpcklbw xmm5, xmm6
+ pmaddubsw xmm5, xmm2
+
+ punpckhbw xmm7, xmm6
+ pmaddubsw xmm7, xmm2
+
+ paddw xmm5, [GLOBAL(rd)] ; xmm5 += round value
+ psraw xmm5, VP8_FILTER_SHIFT ; xmm5 /= 128
+
+ paddw xmm7, [GLOBAL(rd)] ; xmm7 += round value
+ psraw xmm7, VP8_FILTER_SHIFT ; xmm7 /= 128
+
+ packuswb xmm5, xmm7
+ movdqa xmm7, xmm6
+
+ movdqa [rdi], xmm5 ; store the results in the destination
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(5) ; dst_pitch
+%else
+ add rdi, r8
+%endif
+
+ cmp rdi, rcx
+ jne .next_row
+
+ jmp .done
+
+.b16x16_sp_only:
+ movsxd rax, dword ptr arg(3) ; yoffset
+ shl rax, 4
+ lea rax, [rax + rcx] ; VFilter
+
+ mov rdi, arg(4) ; dst_ptr
+ mov rsi, arg(0) ; src_ptr
+ movsxd rdx, dword ptr arg(5) ; dst_pitch
+
+ movdqa xmm1, [rax] ; VFilter
+
+ lea rcx, [rdi+rdx*8]
+ lea rcx, [rcx+rdx*8]
+ movsxd rax, dword ptr arg(1) ; src_pixels_per_line
+
+ ; get the first horizontal line done
+ movq xmm4, [rsi] ; load row 0
+ movq xmm2, [rsi + 8] ; load row 0
+
+ lea rsi, [rsi + rax] ; next line
+.next_row_sp:
+ movq xmm3, [rsi] ; load row + 1
+ movq xmm5, [rsi + 8] ; load row + 1
+
+ punpcklbw xmm4, xmm3
+ punpcklbw xmm2, xmm5
+
+ pmaddubsw xmm4, xmm1
+ movq xmm7, [rsi + rax] ; load row + 2
+
+ pmaddubsw xmm2, xmm1
+ movq xmm6, [rsi + rax + 8] ; load row + 2
+
+ punpcklbw xmm3, xmm7
+ punpcklbw xmm5, xmm6
+
+ pmaddubsw xmm3, xmm1
+ paddw xmm4, [GLOBAL(rd)]
+
+ pmaddubsw xmm5, xmm1
+ paddw xmm2, [GLOBAL(rd)]
+
+ psraw xmm4, VP8_FILTER_SHIFT
+ psraw xmm2, VP8_FILTER_SHIFT
+
+ packuswb xmm4, xmm2
+ paddw xmm3, [GLOBAL(rd)]
+
+ movdqa [rdi], xmm4 ; store row 0
+ paddw xmm5, [GLOBAL(rd)]
+
+ psraw xmm3, VP8_FILTER_SHIFT
+ psraw xmm5, VP8_FILTER_SHIFT
+
+ packuswb xmm3, xmm5
+ movdqa xmm4, xmm7
+
+ movdqa [rdi + rdx],xmm3 ; store row 1
+ lea rsi, [rsi + 2*rax]
+
+ movdqa xmm2, xmm6
+ lea rdi, [rdi + 2*rdx]
+
+ cmp rdi, rcx
+ jne .next_row_sp
+
+ jmp .done
+
+.b16x16_fp_only:
+ lea rcx, [rdi+rdx*8]
+ lea rcx, [rcx+rdx*8]
+ movsxd rax, dword ptr arg(1) ; src_pixels_per_line
+
+.next_row_fp:
+ movq xmm2, [rsi] ; 00 01 02 03 04 05 06 07
+ movq xmm4, [rsi+1] ; 01 02 03 04 05 06 07 08
+
+ punpcklbw xmm2, xmm4
+ movq xmm3, [rsi+8] ; 08 09 10 11 12 13 14 15
+
+ pmaddubsw xmm2, xmm1
+ movq xmm4, [rsi+9] ; 09 10 11 12 13 14 15 16
+
+ lea rsi, [rsi + rax] ; next line
+ punpcklbw xmm3, xmm4
+
+ pmaddubsw xmm3, xmm1
+ movq xmm5, [rsi]
+
+ paddw xmm2, [GLOBAL(rd)]
+ movq xmm7, [rsi+1]
+
+ movq xmm6, [rsi+8]
+ psraw xmm2, VP8_FILTER_SHIFT
+
+ punpcklbw xmm5, xmm7
+ movq xmm7, [rsi+9]
+
+ paddw xmm3, [GLOBAL(rd)]
+ pmaddubsw xmm5, xmm1
+
+ psraw xmm3, VP8_FILTER_SHIFT
+ punpcklbw xmm6, xmm7
+
+ packuswb xmm2, xmm3
+ pmaddubsw xmm6, xmm1
+
+ movdqa [rdi], xmm2 ; store the results in the destination
+ paddw xmm5, [GLOBAL(rd)]
+
+ lea rdi, [rdi + rdx] ; dst_pitch
+ psraw xmm5, VP8_FILTER_SHIFT
+
+ paddw xmm6, [GLOBAL(rd)]
+ psraw xmm6, VP8_FILTER_SHIFT
+
+ packuswb xmm5, xmm6
+ lea rsi, [rsi + rax] ; next line
+
+ movdqa [rdi], xmm5 ; store the results in the destination
+ lea rdi, [rdi + rdx] ; dst_pitch
+
+ cmp rdi, rcx
+
+ jne .next_row_fp
+
+.done:
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp8_bilinear_predict8x8_ssse3
+;(
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; int xoffset,
+; int yoffset,
+; unsigned char *dst_ptr,
+; int dst_pitch
+;)
+globalsym(vp8_bilinear_predict8x8_ssse3)
+sym(vp8_bilinear_predict8x8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 144 ; reserve 144 bytes
+
+ lea rcx, [GLOBAL(vp8_bilinear_filters_ssse3)]
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rdx, dword ptr arg(1) ;src_pixels_per_line
+
+ ;Read 9-line unaligned data in and put them on stack. This gives a big
+ ;performance boost.
+ movdqu xmm0, [rsi]
+ lea rax, [rdx + rdx*2]
+ movdqu xmm1, [rsi+rdx]
+ movdqu xmm2, [rsi+rdx*2]
+ add rsi, rax
+ movdqu xmm3, [rsi]
+ movdqu xmm4, [rsi+rdx]
+ movdqu xmm5, [rsi+rdx*2]
+ add rsi, rax
+ movdqu xmm6, [rsi]
+ movdqu xmm7, [rsi+rdx]
+
+ movdqa XMMWORD PTR [rsp], xmm0
+
+ movdqu xmm0, [rsi+rdx*2]
+
+ movdqa XMMWORD PTR [rsp+16], xmm1
+ movdqa XMMWORD PTR [rsp+32], xmm2
+ movdqa XMMWORD PTR [rsp+48], xmm3
+ movdqa XMMWORD PTR [rsp+64], xmm4
+ movdqa XMMWORD PTR [rsp+80], xmm5
+ movdqa XMMWORD PTR [rsp+96], xmm6
+ movdqa XMMWORD PTR [rsp+112], xmm7
+ movdqa XMMWORD PTR [rsp+128], xmm0
+
+ movsxd rax, dword ptr arg(2) ; xoffset
+ cmp rax, 0 ; skip first_pass filter if xoffset=0
+ je .b8x8_sp_only
+
+ shl rax, 4
+ add rax, rcx ; HFilter
+
+ mov rdi, arg(4) ; dst_ptr
+ movsxd rdx, dword ptr arg(5) ; dst_pitch
+
+ movdqa xmm0, [rax]
+
+ movsxd rax, dword ptr arg(3) ; yoffset
+ cmp rax, 0 ; skip second_pass filter if yoffset=0
+ je .b8x8_fp_only
+
+ shl rax, 4
+ lea rax, [rax + rcx] ; VFilter
+
+ lea rcx, [rdi+rdx*8]
+
+ movdqa xmm1, [rax]
+
+ ; get the first horizontal line done
+ movdqa xmm3, [rsp] ; 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15
+ movdqa xmm5, xmm3 ; 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 xx
+
+ psrldq xmm5, 1
+ lea rsp, [rsp + 16] ; next line
+
+ punpcklbw xmm3, xmm5 ; 00 01 01 02 02 03 03 04 04 05 05 06 06 07 07 08
+ pmaddubsw xmm3, xmm0 ; 00 02 04 06 08 10 12 14
+
+ paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
+ psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
+
+ movdqa xmm7, xmm3
+ packuswb xmm7, xmm7 ; 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15
+
+.next_row:
+ movdqa xmm6, [rsp] ; 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15
+ lea rsp, [rsp + 16] ; next line
+
+ movdqa xmm5, xmm6
+
+ psrldq xmm5, 1
+
+ punpcklbw xmm6, xmm5
+ pmaddubsw xmm6, xmm0
+
+ paddw xmm6, [GLOBAL(rd)] ; xmm6 += round value
+ psraw xmm6, VP8_FILTER_SHIFT ; xmm6 /= 128
+
+ packuswb xmm6, xmm6
+
+ punpcklbw xmm7, xmm6
+ pmaddubsw xmm7, xmm1
+
+ paddw xmm7, [GLOBAL(rd)] ; xmm7 += round value
+ psraw xmm7, VP8_FILTER_SHIFT ; xmm7 /= 128
+
+ packuswb xmm7, xmm7
+
+ movq [rdi], xmm7 ; store the results in the destination
+ lea rdi, [rdi + rdx]
+
+ movdqa xmm7, xmm6
+
+ cmp rdi, rcx
+ jne .next_row
+
+ jmp .done8x8
+
+.b8x8_sp_only:
+ movsxd rax, dword ptr arg(3) ; yoffset
+ shl rax, 4
+ lea rax, [rax + rcx] ; VFilter
+
+ mov rdi, arg(4) ;dst_ptr
+ movsxd rdx, dword ptr arg(5) ; dst_pitch
+
+ movdqa xmm0, [rax] ; VFilter
+
+ movq xmm1, XMMWORD PTR [rsp]
+ movq xmm2, XMMWORD PTR [rsp+16]
+
+ movq xmm3, XMMWORD PTR [rsp+32]
+ punpcklbw xmm1, xmm2
+
+ movq xmm4, XMMWORD PTR [rsp+48]
+ punpcklbw xmm2, xmm3
+
+ movq xmm5, XMMWORD PTR [rsp+64]
+ punpcklbw xmm3, xmm4
+
+ movq xmm6, XMMWORD PTR [rsp+80]
+ punpcklbw xmm4, xmm5
+
+ movq xmm7, XMMWORD PTR [rsp+96]
+ punpcklbw xmm5, xmm6
+
+ ; Because the source register (xmm0) is always treated as signed by
+ ; pmaddubsw, the constant '128' is treated as '-128'.
+ pmaddubsw xmm1, xmm0
+ pmaddubsw xmm2, xmm0
+
+ pmaddubsw xmm3, xmm0
+ pmaddubsw xmm4, xmm0
+
+ pmaddubsw xmm5, xmm0
+ punpcklbw xmm6, xmm7
+
+ pmaddubsw xmm6, xmm0
+ paddw xmm1, [GLOBAL(rd)]
+
+ paddw xmm2, [GLOBAL(rd)]
+ psraw xmm1, VP8_FILTER_SHIFT
+
+ paddw xmm3, [GLOBAL(rd)]
+ psraw xmm2, VP8_FILTER_SHIFT
+
+ paddw xmm4, [GLOBAL(rd)]
+ psraw xmm3, VP8_FILTER_SHIFT
+
+ paddw xmm5, [GLOBAL(rd)]
+ psraw xmm4, VP8_FILTER_SHIFT
+
+ paddw xmm6, [GLOBAL(rd)]
+ psraw xmm5, VP8_FILTER_SHIFT
+
+ psraw xmm6, VP8_FILTER_SHIFT
+
+ ; Having multiplied everything by '-128' and obtained negative
+ ; numbers, the unsigned saturation truncates those values to 0,
+ ; resulting in incorrect handling of xoffset == 0 && yoffset == 0
+ packuswb xmm1, xmm1
+
+ packuswb xmm2, xmm2
+ movq [rdi], xmm1
+
+ packuswb xmm3, xmm3
+ movq [rdi+rdx], xmm2
+
+ packuswb xmm4, xmm4
+ movq xmm1, XMMWORD PTR [rsp+112]
+
+ lea rdi, [rdi + 2*rdx]
+ movq xmm2, XMMWORD PTR [rsp+128]
+
+ packuswb xmm5, xmm5
+ movq [rdi], xmm3
+
+ packuswb xmm6, xmm6
+ movq [rdi+rdx], xmm4
+
+ lea rdi, [rdi + 2*rdx]
+ punpcklbw xmm7, xmm1
+
+ movq [rdi], xmm5
+ pmaddubsw xmm7, xmm0
+
+ movq [rdi+rdx], xmm6
+ punpcklbw xmm1, xmm2
+
+ pmaddubsw xmm1, xmm0
+ paddw xmm7, [GLOBAL(rd)]
+
+ psraw xmm7, VP8_FILTER_SHIFT
+ paddw xmm1, [GLOBAL(rd)]
+
+ psraw xmm1, VP8_FILTER_SHIFT
+ packuswb xmm7, xmm7
+
+ packuswb xmm1, xmm1
+ lea rdi, [rdi + 2*rdx]
+
+ movq [rdi], xmm7
+
+ movq [rdi+rdx], xmm1
+ lea rsp, [rsp + 144]
+
+ jmp .done8x8
+
+.b8x8_fp_only:
+ lea rcx, [rdi+rdx*8]
+
+.next_row_fp:
+ movdqa xmm1, XMMWORD PTR [rsp]
+ movdqa xmm3, XMMWORD PTR [rsp+16]
+
+ movdqa xmm2, xmm1
+ movdqa xmm5, XMMWORD PTR [rsp+32]
+
+ psrldq xmm2, 1
+ movdqa xmm7, XMMWORD PTR [rsp+48]
+
+ movdqa xmm4, xmm3
+ psrldq xmm4, 1
+
+ movdqa xmm6, xmm5
+ psrldq xmm6, 1
+
+ punpcklbw xmm1, xmm2
+ pmaddubsw xmm1, xmm0
+
+ punpcklbw xmm3, xmm4
+ pmaddubsw xmm3, xmm0
+
+ punpcklbw xmm5, xmm6
+ pmaddubsw xmm5, xmm0
+
+ movdqa xmm2, xmm7
+ psrldq xmm2, 1
+
+ punpcklbw xmm7, xmm2
+ pmaddubsw xmm7, xmm0
+
+ paddw xmm1, [GLOBAL(rd)]
+ psraw xmm1, VP8_FILTER_SHIFT
+
+ paddw xmm3, [GLOBAL(rd)]
+ psraw xmm3, VP8_FILTER_SHIFT
+
+ paddw xmm5, [GLOBAL(rd)]
+ psraw xmm5, VP8_FILTER_SHIFT
+
+ paddw xmm7, [GLOBAL(rd)]
+ psraw xmm7, VP8_FILTER_SHIFT
+
+ packuswb xmm1, xmm1
+ packuswb xmm3, xmm3
+
+ packuswb xmm5, xmm5
+ movq [rdi], xmm1
+
+ packuswb xmm7, xmm7
+ movq [rdi+rdx], xmm3
+
+ lea rdi, [rdi + 2*rdx]
+ movq [rdi], xmm5
+
+ lea rsp, [rsp + 4*16]
+ movq [rdi+rdx], xmm7
+
+ lea rdi, [rdi + 2*rdx]
+ cmp rdi, rcx
+
+ jne .next_row_fp
+
+ lea rsp, [rsp + 16]
+
+.done8x8:
+ ;add rsp, 144
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+shuf1b:
+ db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
+shuf2b:
+ db 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 10, 9, 11
+shuf3b:
+ db 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 10
+
+align 16
+shuf2bfrom1:
+ db 4, 8, 6, 1, 8, 3, 1, 5, 3, 7, 5, 9, 7,11, 9,13
+align 16
+shuf3bfrom1:
+ db 2, 6, 4, 8, 6, 1, 8, 3, 1, 5, 3, 7, 5, 9, 7,11
+
+align 16
+rd:
+ times 8 dw 0x40
+
+align 16
+k0_k5:
+ times 8 db 0, 0 ;placeholder
+ times 8 db 0, 0
+ times 8 db 2, 1
+ times 8 db 0, 0
+ times 8 db 3, 3
+ times 8 db 0, 0
+ times 8 db 1, 2
+ times 8 db 0, 0
+k1_k3:
+ times 8 db 0, 0 ;placeholder
+ times 8 db -6, 12
+ times 8 db -11, 36
+ times 8 db -9, 50
+ times 8 db -16, 77
+ times 8 db -6, 93
+ times 8 db -8, 108
+ times 8 db -1, 123
+k2_k4:
+ times 8 db 128, 0 ;placeholder
+ times 8 db 123, -1
+ times 8 db 108, -8
+ times 8 db 93, -6
+ times 8 db 77, -16
+ times 8 db 50, -9
+ times 8 db 36, -11
+ times 8 db 12, -6
+align 16
+vp8_bilinear_filters_ssse3:
+ times 8 db 128, 0
+ times 8 db 112, 16
+ times 8 db 96, 32
+ times 8 db 80, 48
+ times 8 db 64, 64
+ times 8 db 48, 80
+ times 8 db 32, 96
+ times 8 db 16, 112
+
diff --git a/media/libvpx/libvpx/vp8/common/x86/vp8_asm_stubs.c b/media/libvpx/libvpx/vp8/common/x86/vp8_asm_stubs.c
new file mode 100644
index 0000000000..7fb83c2d5e
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/common/x86/vp8_asm_stubs.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx_ports/mem.h"
+
+extern const short vp8_six_tap_x86[8][6 * 8];
+
+extern void vp8_filter_block1d_h6_mmx(unsigned char *src_ptr,
+ unsigned short *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const short *vp8_filter);
+extern void vp8_filter_block1dc_v6_mmx(
+ unsigned short *src_ptr, unsigned char *output_ptr, int output_pitch,
+ unsigned int pixels_per_line, unsigned int pixel_step,
+ unsigned int output_height, unsigned int output_width,
+ const short *vp8_filter);
+extern void vp8_filter_block1d8_h6_sse2(unsigned char *src_ptr,
+ unsigned short *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const short *vp8_filter);
+extern void vp8_filter_block1d16_h6_sse2(unsigned char *src_ptr,
+ unsigned short *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const short *vp8_filter);
+extern void vp8_filter_block1d8_v6_sse2(
+ unsigned short *src_ptr, unsigned char *output_ptr, int dst_ptich,
+ unsigned int pixels_per_line, unsigned int pixel_step,
+ unsigned int output_height, unsigned int output_width,
+ const short *vp8_filter);
+extern void vp8_filter_block1d16_v6_sse2(
+ unsigned short *src_ptr, unsigned char *output_ptr, int dst_ptich,
+ unsigned int pixels_per_line, unsigned int pixel_step,
+ unsigned int output_height, unsigned int output_width,
+ const short *vp8_filter);
+extern void vp8_unpack_block1d16_h6_sse2(unsigned char *src_ptr,
+ unsigned short *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int output_height,
+ unsigned int output_width);
+extern void vp8_filter_block1d8_h6_only_sse2(unsigned char *src_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned char *output_ptr,
+ int dst_ptich,
+ unsigned int output_height,
+ const short *vp8_filter);
+extern void vp8_filter_block1d16_h6_only_sse2(unsigned char *src_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned char *output_ptr,
+ int dst_ptich,
+ unsigned int output_height,
+ const short *vp8_filter);
+extern void vp8_filter_block1d8_v6_only_sse2(unsigned char *src_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned char *output_ptr,
+ int dst_ptich,
+ unsigned int output_height,
+ const short *vp8_filter);
+
+#if HAVE_MMX
+void vp8_sixtap_predict4x4_mmx(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, unsigned short,
+ FData2[16 * 16]); /* Temp data bufffer used in filtering */
+ const short *HFilter, *VFilter;
+ HFilter = vp8_six_tap_x86[xoffset];
+ vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2,
+ src_pixels_per_line, 1, 9, 8, HFilter);
+ VFilter = vp8_six_tap_x86[yoffset];
+ vp8_filter_block1dc_v6_mmx(FData2 + 8, dst_ptr, dst_pitch, 8, 4, 4, 4,
+ VFilter);
+}
+#endif
+
+#if HAVE_SSE2
+void vp8_sixtap_predict16x16_sse2(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, unsigned short,
+ FData2[24 * 24]); /* Temp data bufffer used in filtering */
+
+ const short *HFilter, *VFilter;
+
+ if (xoffset) {
+ if (yoffset) {
+ HFilter = vp8_six_tap_x86[xoffset];
+ vp8_filter_block1d16_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2,
+ src_pixels_per_line, 1, 21, 32, HFilter);
+ VFilter = vp8_six_tap_x86[yoffset];
+ vp8_filter_block1d16_v6_sse2(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16,
+ dst_pitch, VFilter);
+ } else {
+ /* First-pass only */
+ HFilter = vp8_six_tap_x86[xoffset];
+ vp8_filter_block1d16_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr,
+ dst_pitch, 16, HFilter);
+ }
+ } else {
+ /* Second-pass only */
+ VFilter = vp8_six_tap_x86[yoffset];
+ vp8_unpack_block1d16_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2,
+ src_pixels_per_line, 21, 32);
+ vp8_filter_block1d16_v6_sse2(FData2 + 32, dst_ptr, dst_pitch, 32, 16, 16,
+ dst_pitch, VFilter);
+ }
+}
+
+void vp8_sixtap_predict8x8_sse2(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset,
+ unsigned char *dst_ptr, int dst_pitch) {
+ DECLARE_ALIGNED(16, unsigned short,
+ FData2[256]); /* Temp data bufffer used in filtering */
+ const short *HFilter, *VFilter;
+
+ if (xoffset) {
+ if (yoffset) {
+ HFilter = vp8_six_tap_x86[xoffset];
+ vp8_filter_block1d8_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2,
+ src_pixels_per_line, 1, 13, 16, HFilter);
+ VFilter = vp8_six_tap_x86[yoffset];
+ vp8_filter_block1d8_v6_sse2(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 8,
+ dst_pitch, VFilter);
+ } else {
+ /* First-pass only */
+ HFilter = vp8_six_tap_x86[xoffset];
+ vp8_filter_block1d8_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr,
+ dst_pitch, 8, HFilter);
+ }
+ } else {
+ /* Second-pass only */
+ VFilter = vp8_six_tap_x86[yoffset];
+ vp8_filter_block1d8_v6_only_sse2(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, dst_ptr, dst_pitch, 8,
+ VFilter);
+ }
+}
+
+void vp8_sixtap_predict8x4_sse2(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset,
+ unsigned char *dst_ptr, int dst_pitch) {
+ DECLARE_ALIGNED(16, unsigned short,
+ FData2[256]); /* Temp data bufffer used in filtering */
+ const short *HFilter, *VFilter;
+
+ if (xoffset) {
+ if (yoffset) {
+ HFilter = vp8_six_tap_x86[xoffset];
+ vp8_filter_block1d8_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2,
+ src_pixels_per_line, 1, 9, 16, HFilter);
+ VFilter = vp8_six_tap_x86[yoffset];
+ vp8_filter_block1d8_v6_sse2(FData2 + 16, dst_ptr, dst_pitch, 16, 8, 4,
+ dst_pitch, VFilter);
+ } else {
+ /* First-pass only */
+ HFilter = vp8_six_tap_x86[xoffset];
+ vp8_filter_block1d8_h6_only_sse2(src_ptr, src_pixels_per_line, dst_ptr,
+ dst_pitch, 4, HFilter);
+ }
+ } else {
+ /* Second-pass only */
+ VFilter = vp8_six_tap_x86[yoffset];
+ vp8_filter_block1d8_v6_only_sse2(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, dst_ptr, dst_pitch, 4,
+ VFilter);
+ }
+}
+
+#endif
+
+#if HAVE_SSSE3
+
+extern void vp8_filter_block1d8_h6_ssse3(unsigned char *src_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned char *output_ptr,
+ unsigned int output_pitch,
+ unsigned int output_height,
+ unsigned int vp8_filter_index);
+
+extern void vp8_filter_block1d16_h6_ssse3(unsigned char *src_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned char *output_ptr,
+ unsigned int output_pitch,
+ unsigned int output_height,
+ unsigned int vp8_filter_index);
+
+extern void vp8_filter_block1d16_v6_ssse3(unsigned char *src_ptr,
+ unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ unsigned int vp8_filter_index);
+
+extern void vp8_filter_block1d8_v6_ssse3(unsigned char *src_ptr,
+ unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ unsigned int vp8_filter_index);
+
+extern void vp8_filter_block1d4_h6_ssse3(unsigned char *src_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned char *output_ptr,
+ unsigned int output_pitch,
+ unsigned int output_height,
+ unsigned int vp8_filter_index);
+
+extern void vp8_filter_block1d4_v6_ssse3(unsigned char *src_ptr,
+ unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ unsigned int vp8_filter_index);
+
+void vp8_sixtap_predict16x16_ssse3(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, unsigned char, FData2[24 * 24]);
+
+ if (xoffset) {
+ if (yoffset) {
+ vp8_filter_block1d16_h6_ssse3(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, FData2, 16, 21,
+ xoffset);
+ vp8_filter_block1d16_v6_ssse3(FData2, 16, dst_ptr, dst_pitch, 16,
+ yoffset);
+ } else {
+ /* First-pass only */
+ vp8_filter_block1d16_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr,
+ dst_pitch, 16, xoffset);
+ }
+ } else {
+ if (yoffset) {
+ /* Second-pass only */
+ vp8_filter_block1d16_v6_ssse3(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, dst_ptr, dst_pitch, 16,
+ yoffset);
+ } else {
+ /* ssse3 second-pass only function couldn't handle (xoffset==0 &&
+ * yoffset==0) case correctly. Add copy function here to guarantee
+ * six-tap function handles all possible offsets. */
+ vp8_copy_mem16x16(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch);
+ }
+ }
+}
+
+void vp8_sixtap_predict8x8_ssse3(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, unsigned char, FData2[256]);
+
+ if (xoffset) {
+ if (yoffset) {
+ vp8_filter_block1d8_h6_ssse3(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, FData2, 8, 13, xoffset);
+ vp8_filter_block1d8_v6_ssse3(FData2, 8, dst_ptr, dst_pitch, 8, yoffset);
+ } else {
+ vp8_filter_block1d8_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr,
+ dst_pitch, 8, xoffset);
+ }
+ } else {
+ if (yoffset) {
+ /* Second-pass only */
+ vp8_filter_block1d8_v6_ssse3(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, dst_ptr, dst_pitch, 8,
+ yoffset);
+ } else {
+ /* ssse3 second-pass only function couldn't handle (xoffset==0 &&
+ * yoffset==0) case correctly. Add copy function here to guarantee
+ * six-tap function handles all possible offsets. */
+ vp8_copy_mem8x8(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch);
+ }
+ }
+}
+
+void vp8_sixtap_predict8x4_ssse3(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, unsigned char, FData2[256]);
+
+ if (xoffset) {
+ if (yoffset) {
+ vp8_filter_block1d8_h6_ssse3(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, FData2, 8, 9, xoffset);
+ vp8_filter_block1d8_v6_ssse3(FData2, 8, dst_ptr, dst_pitch, 4, yoffset);
+ } else {
+ /* First-pass only */
+ vp8_filter_block1d8_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr,
+ dst_pitch, 4, xoffset);
+ }
+ } else {
+ if (yoffset) {
+ /* Second-pass only */
+ vp8_filter_block1d8_v6_ssse3(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, dst_ptr, dst_pitch, 4,
+ yoffset);
+ } else {
+ /* ssse3 second-pass only function couldn't handle (xoffset==0 &&
+ * yoffset==0) case correctly. Add copy function here to guarantee
+ * six-tap function handles all possible offsets. */
+ vp8_copy_mem8x4(src_ptr, src_pixels_per_line, dst_ptr, dst_pitch);
+ }
+ }
+}
+
+void vp8_sixtap_predict4x4_ssse3(unsigned char *src_ptr,
+ int src_pixels_per_line, int xoffset,
+ int yoffset, unsigned char *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, unsigned char, FData2[4 * 9]);
+
+ if (xoffset) {
+ if (yoffset) {
+ vp8_filter_block1d4_h6_ssse3(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, FData2, 4, 9, xoffset);
+ vp8_filter_block1d4_v6_ssse3(FData2, 4, dst_ptr, dst_pitch, 4, yoffset);
+ } else {
+ vp8_filter_block1d4_h6_ssse3(src_ptr, src_pixels_per_line, dst_ptr,
+ dst_pitch, 4, xoffset);
+ }
+ } else {
+ if (yoffset) {
+ vp8_filter_block1d4_v6_ssse3(src_ptr - (2 * src_pixels_per_line),
+ src_pixels_per_line, dst_ptr, dst_pitch, 4,
+ yoffset);
+ } else {
+ /* ssse3 second-pass only function couldn't handle (xoffset==0 &&
+ * yoffset==0) case correctly. Add copy function here to guarantee
+ * six-tap function handles all possible offsets. */
+ int r;
+
+ for (r = 0; r < 4; ++r) {
+ dst_ptr[0] = src_ptr[0];
+ dst_ptr[1] = src_ptr[1];
+ dst_ptr[2] = src_ptr[2];
+ dst_ptr[3] = src_ptr[3];
+ dst_ptr += dst_pitch;
+ src_ptr += src_pixels_per_line;
+ }
+ }
+ }
+}
+
+#endif
diff --git a/media/libvpx/libvpx/vp8/decoder/dboolhuff.c b/media/libvpx/libvpx/vp8/decoder/dboolhuff.c
new file mode 100644
index 0000000000..11099c453c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/dboolhuff.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "dboolhuff.h"
+#include "vp8/common/common.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+
+int vp8dx_start_decode(BOOL_DECODER *br, const unsigned char *source,
+ unsigned int source_sz, vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state) {
+ if (source_sz && !source) return 1;
+
+ // To simplify calling code this fuction can be called with |source| == null
+ // and |source_sz| == 0. This and vp8dx_bool_decoder_fill() are essentially
+ // no-ops in this case.
+ // Work around a ubsan warning with a ternary to avoid adding 0 to null.
+ br->user_buffer_end = source ? source + source_sz : source;
+ br->user_buffer = source;
+ br->value = 0;
+ br->count = -8;
+ br->range = 255;
+ br->decrypt_cb = decrypt_cb;
+ br->decrypt_state = decrypt_state;
+
+ /* Populate the buffer */
+ vp8dx_bool_decoder_fill(br);
+
+ return 0;
+}
+
+void vp8dx_bool_decoder_fill(BOOL_DECODER *br) {
+ const unsigned char *bufptr = br->user_buffer;
+ VP8_BD_VALUE value = br->value;
+ int count = br->count;
+ int shift = VP8_BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT);
+ size_t bytes_left = br->user_buffer_end - bufptr;
+ size_t bits_left = bytes_left * CHAR_BIT;
+ int x = shift + CHAR_BIT - (int)bits_left;
+ int loop_end = 0;
+ unsigned char decrypted[sizeof(VP8_BD_VALUE) + 1];
+
+ if (br->decrypt_cb) {
+ size_t n = VPXMIN(sizeof(decrypted), bytes_left);
+ br->decrypt_cb(br->decrypt_state, bufptr, decrypted, (int)n);
+ bufptr = decrypted;
+ }
+
+ if (x >= 0) {
+ count += VP8_LOTS_OF_BITS;
+ loop_end = x;
+ }
+
+ if (x < 0 || bits_left) {
+ while (shift >= loop_end) {
+ count += CHAR_BIT;
+ value |= (VP8_BD_VALUE)*bufptr << shift;
+ ++bufptr;
+ ++br->user_buffer;
+ shift -= CHAR_BIT;
+ }
+ }
+
+ br->value = value;
+ br->count = count;
+}
diff --git a/media/libvpx/libvpx/vp8/decoder/dboolhuff.h b/media/libvpx/libvpx/vp8/decoder/dboolhuff.h
new file mode 100644
index 0000000000..673b2fbd5d
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/dboolhuff.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_DECODER_DBOOLHUFF_H_
+#define VPX_VP8_DECODER_DBOOLHUFF_H_
+
+#include <stddef.h>
+#include <limits.h>
+
+#include "./vpx_config.h"
+#include "vpx_ports/compiler_attributes.h"
+#include "vpx_ports/mem.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_integer.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef size_t VP8_BD_VALUE;
+
+#define VP8_BD_VALUE_SIZE ((int)sizeof(VP8_BD_VALUE) * CHAR_BIT)
+
+/*This is meant to be a large, positive constant that can still be efficiently
+ loaded as an immediate (on platforms like ARM, for example).
+ Even relatively modest values like 100 would work fine.*/
+#define VP8_LOTS_OF_BITS (0x40000000)
+
+typedef struct {
+ const unsigned char *user_buffer_end;
+ const unsigned char *user_buffer;
+ VP8_BD_VALUE value;
+ int count;
+ unsigned int range;
+ vpx_decrypt_cb decrypt_cb;
+ void *decrypt_state;
+} BOOL_DECODER;
+
+DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
+
+int vp8dx_start_decode(BOOL_DECODER *br, const unsigned char *source,
+ unsigned int source_sz, vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state);
+
+void vp8dx_bool_decoder_fill(BOOL_DECODER *br);
+
+static VPX_NO_UNSIGNED_SHIFT_CHECK int vp8dx_decode_bool(BOOL_DECODER *br,
+ int probability) {
+ unsigned int bit = 0;
+ VP8_BD_VALUE value;
+ unsigned int split;
+ VP8_BD_VALUE bigsplit;
+ int count;
+ unsigned int range;
+
+ split = 1 + (((br->range - 1) * probability) >> 8);
+
+ if (br->count < 0) vp8dx_bool_decoder_fill(br);
+
+ value = br->value;
+ count = br->count;
+
+ bigsplit = (VP8_BD_VALUE)split << (VP8_BD_VALUE_SIZE - 8);
+
+ range = split;
+
+ if (value >= bigsplit) {
+ range = br->range - split;
+ value = value - bigsplit;
+ bit = 1;
+ }
+
+ {
+ const unsigned char shift = vp8_norm[(unsigned char)range];
+ range <<= shift;
+ value <<= shift;
+ count -= shift;
+ }
+ br->value = value;
+ br->count = count;
+ br->range = range;
+
+ return bit;
+}
+
+static INLINE int vp8_decode_value(BOOL_DECODER *br, int bits) {
+ int z = 0;
+ int bit;
+
+ for (bit = bits - 1; bit >= 0; bit--) {
+ z |= (vp8dx_decode_bool(br, 0x80) << bit);
+ }
+
+ return z;
+}
+
+static INLINE int vp8dx_bool_error(BOOL_DECODER *br) {
+ /* Check if we have reached the end of the buffer.
+ *
+ * Variable 'count' stores the number of bits in the 'value' buffer, minus
+ * 8. The top byte is part of the algorithm, and the remainder is buffered
+ * to be shifted into it. So if count == 8, the top 16 bits of 'value' are
+ * occupied, 8 for the algorithm and 8 in the buffer.
+ *
+ * When reading a byte from the user's buffer, count is filled with 8 and
+ * one byte is filled into the value buffer. When we reach the end of the
+ * data, count is additionally filled with VP8_LOTS_OF_BITS. So when
+ * count == VP8_LOTS_OF_BITS - 1, the user's data has been exhausted.
+ */
+ if ((br->count > VP8_BD_VALUE_SIZE) && (br->count < VP8_LOTS_OF_BITS)) {
+ /* We have tried to decode bits after the end of
+ * stream was encountered.
+ */
+ return 1;
+ }
+
+ /* No error. */
+ return 0;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_DECODER_DBOOLHUFF_H_
diff --git a/media/libvpx/libvpx/vp8/decoder/decodeframe.c b/media/libvpx/libvpx/vp8/decoder/decodeframe.c
new file mode 100644
index 0000000000..1c1566766b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/decodeframe.c
@@ -0,0 +1,1271 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#include "onyxd_int.h"
+#include "vp8/common/header.h"
+#include "vp8/common/reconintra4x4.h"
+#include "vp8/common/reconinter.h"
+#include "detokenize.h"
+#include "vp8/common/common.h"
+#include "vp8/common/invtrans.h"
+#include "vp8/common/alloccommon.h"
+#include "vp8/common/entropymode.h"
+#include "vp8/common/quant_common.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp8/common/reconintra.h"
+#include "vp8/common/setupintrarecon.h"
+
+#include "decodemv.h"
+#include "vp8/common/extend.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/threading.h"
+#include "decoderthreading.h"
+#include "dboolhuff.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+void vp8cx_init_de_quantizer(VP8D_COMP *pbi) {
+ int Q;
+ VP8_COMMON *const pc = &pbi->common;
+
+ for (Q = 0; Q < QINDEX_RANGE; ++Q) {
+ pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q);
+ pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q);
+ pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q);
+
+ pc->Y1dequant[Q][1] = (short)vp8_ac_yquant(Q);
+ pc->Y2dequant[Q][1] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q);
+ pc->UVdequant[Q][1] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q);
+ }
+}
+
+void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) {
+ int i;
+ int QIndex;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ VP8_COMMON *const pc = &pbi->common;
+
+ /* Decide whether to use the default or alternate baseline Q value. */
+ if (xd->segmentation_enabled) {
+ /* Abs Value */
+ if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
+ QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
+
+ /* Delta Value */
+ } else {
+ QIndex = pc->base_qindex +
+ xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
+ }
+
+ QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ)
+ : 0; /* Clamp to valid range */
+ } else {
+ QIndex = pc->base_qindex;
+ }
+
+ /* Set up the macroblock dequant constants */
+ xd->dequant_y1_dc[0] = 1;
+ xd->dequant_y1[0] = pc->Y1dequant[QIndex][0];
+ xd->dequant_y2[0] = pc->Y2dequant[QIndex][0];
+ xd->dequant_uv[0] = pc->UVdequant[QIndex][0];
+
+ for (i = 1; i < 16; ++i) {
+ xd->dequant_y1_dc[i] = xd->dequant_y1[i] = pc->Y1dequant[QIndex][1];
+ xd->dequant_y2[i] = pc->Y2dequant[QIndex][1];
+ xd->dequant_uv[i] = pc->UVdequant[QIndex][1];
+ }
+}
+
+static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
+ unsigned int mb_idx) {
+ MB_PREDICTION_MODE mode;
+ int i;
+#if CONFIG_ERROR_CONCEALMENT
+ int corruption_detected = 0;
+#else
+ (void)mb_idx;
+#endif
+
+ if (xd->mode_info_context->mbmi.mb_skip_coeff) {
+ vp8_reset_mb_tokens_context(xd);
+ } else if (!vp8dx_bool_error(xd->current_bc)) {
+ int eobtotal;
+ eobtotal = vp8_decode_mb_tokens(pbi, xd);
+
+ /* Special case: Force the loopfilter to skip when eobtotal is zero */
+ xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal == 0);
+ }
+
+ mode = xd->mode_info_context->mbmi.mode;
+
+ if (xd->segmentation_enabled) vp8_mb_init_dequantizer(pbi, xd);
+
+#if CONFIG_ERROR_CONCEALMENT
+
+ if (pbi->ec_active) {
+ int throw_residual;
+ /* When we have independent partitions we can apply residual even
+ * though other partitions within the frame are corrupt.
+ */
+ throw_residual =
+ (!pbi->independent_partitions && pbi->frame_corrupt_residual);
+ throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
+
+ if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) {
+ /* MB with corrupt residuals or corrupt mode/motion vectors.
+ * Better to use the predictor as reconstruction.
+ */
+ pbi->frame_corrupt_residual = 1;
+ memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
+
+ corruption_detected = 1;
+
+ /* force idct to be skipped for B_PRED and use the
+ * prediction only for reconstruction
+ * */
+ memset(xd->eobs, 0, 25);
+ }
+ }
+#endif
+
+ /* do prediction */
+ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ vp8_build_intra_predictors_mbuv_s(
+ xd, xd->recon_above[1], xd->recon_above[2], xd->recon_left[1],
+ xd->recon_left[2], xd->recon_left_stride[1], xd->dst.u_buffer,
+ xd->dst.v_buffer, xd->dst.uv_stride);
+
+ if (mode != B_PRED) {
+ vp8_build_intra_predictors_mby_s(
+ xd, xd->recon_above[0], xd->recon_left[0], xd->recon_left_stride[0],
+ xd->dst.y_buffer, xd->dst.y_stride);
+ } else {
+ short *DQC = xd->dequant_y1;
+ int dst_stride = xd->dst.y_stride;
+
+ /* clear out residual eob info */
+ if (xd->mode_info_context->mbmi.mb_skip_coeff) memset(xd->eobs, 0, 25);
+
+ intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
+
+ for (i = 0; i < 16; ++i) {
+ BLOCKD *b = &xd->block[i];
+ unsigned char *dst = xd->dst.y_buffer + b->offset;
+ B_PREDICTION_MODE b_mode = xd->mode_info_context->bmi[i].as_mode;
+ unsigned char *Above = dst - dst_stride;
+ unsigned char *yleft = dst - 1;
+ int left_stride = dst_stride;
+ unsigned char top_left = Above[-1];
+
+ vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, dst, dst_stride,
+ top_left);
+
+ if (xd->eobs[i]) {
+ if (xd->eobs[i] > 1) {
+ vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride);
+ } else {
+ vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0], dst, dst_stride, dst,
+ dst_stride);
+ memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+ }
+ }
+ }
+ }
+ } else {
+ vp8_build_inter_predictors_mb(xd);
+ }
+
+#if CONFIG_ERROR_CONCEALMENT
+ if (corruption_detected) {
+ return;
+ }
+#endif
+
+ if (!xd->mode_info_context->mbmi.mb_skip_coeff) {
+ /* dequantization and idct */
+ if (mode != B_PRED) {
+ short *DQC = xd->dequant_y1;
+
+ if (mode != SPLITMV) {
+ BLOCKD *b = &xd->block[24];
+
+ /* do 2nd order transform on the dc block */
+ if (xd->eobs[24] > 1) {
+ vp8_dequantize_b(b, xd->dequant_y2);
+
+ vp8_short_inv_walsh4x4(&b->dqcoeff[0], xd->qcoeff);
+ memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
+ } else {
+ b->dqcoeff[0] = (short)(b->qcoeff[0] * xd->dequant_y2[0]);
+ vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], xd->qcoeff);
+ memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+ }
+
+ /* override the dc dequant constant in order to preserve the
+ * dc components
+ */
+ DQC = xd->dequant_y1_dc;
+ }
+
+ vp8_dequant_idct_add_y_block(xd->qcoeff, DQC, xd->dst.y_buffer,
+ xd->dst.y_stride, xd->eobs);
+ }
+
+ vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
+ xd->dst.u_buffer, xd->dst.v_buffer,
+ xd->dst.uv_stride, xd->eobs + 16);
+ }
+}
+
+static int get_delta_q(vp8_reader *bc, int prev, int *q_update) {
+ int ret_val = 0;
+
+ if (vp8_read_bit(bc)) {
+ ret_val = vp8_read_literal(bc, 4);
+
+ if (vp8_read_bit(bc)) ret_val = -ret_val;
+ }
+
+ /* Trigger a quantizer update if the delta-q value has changed */
+ if (ret_val != prev) *q_update = 1;
+
+ return ret_val;
+}
+
+#ifdef PACKET_TESTING
+#include <stdio.h>
+FILE *vpxlog = 0;
+#endif
+
+static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf) {
+ int i;
+ unsigned char *src_ptr1;
+ unsigned char *dest_ptr1;
+
+ unsigned int Border;
+ int plane_stride;
+
+ /***********/
+ /* Y Plane */
+ /***********/
+ Border = ybf->border;
+ plane_stride = ybf->y_stride;
+ src_ptr1 = ybf->y_buffer - Border;
+ dest_ptr1 = src_ptr1 - (Border * plane_stride);
+
+ for (i = 0; i < (int)Border; ++i) {
+ memcpy(dest_ptr1, src_ptr1, plane_stride);
+ dest_ptr1 += plane_stride;
+ }
+
+ /***********/
+ /* U Plane */
+ /***********/
+ plane_stride = ybf->uv_stride;
+ Border /= 2;
+ src_ptr1 = ybf->u_buffer - Border;
+ dest_ptr1 = src_ptr1 - (Border * plane_stride);
+
+ for (i = 0; i < (int)(Border); ++i) {
+ memcpy(dest_ptr1, src_ptr1, plane_stride);
+ dest_ptr1 += plane_stride;
+ }
+
+ /***********/
+ /* V Plane */
+ /***********/
+
+ src_ptr1 = ybf->v_buffer - Border;
+ dest_ptr1 = src_ptr1 - (Border * plane_stride);
+
+ for (i = 0; i < (int)(Border); ++i) {
+ memcpy(dest_ptr1, src_ptr1, plane_stride);
+ dest_ptr1 += plane_stride;
+ }
+}
+
+static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) {
+ int i;
+ unsigned char *src_ptr1, *src_ptr2;
+ unsigned char *dest_ptr2;
+
+ unsigned int Border;
+ int plane_stride;
+ int plane_height;
+
+ /***********/
+ /* Y Plane */
+ /***********/
+ Border = ybf->border;
+ plane_stride = ybf->y_stride;
+ plane_height = ybf->y_height;
+
+ src_ptr1 = ybf->y_buffer - Border;
+ src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
+ dest_ptr2 = src_ptr2 + plane_stride;
+
+ for (i = 0; i < (int)Border; ++i) {
+ memcpy(dest_ptr2, src_ptr2, plane_stride);
+ dest_ptr2 += plane_stride;
+ }
+
+ /***********/
+ /* U Plane */
+ /***********/
+ plane_stride = ybf->uv_stride;
+ plane_height = ybf->uv_height;
+ Border /= 2;
+
+ src_ptr1 = ybf->u_buffer - Border;
+ src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
+ dest_ptr2 = src_ptr2 + plane_stride;
+
+ for (i = 0; i < (int)(Border); ++i) {
+ memcpy(dest_ptr2, src_ptr2, plane_stride);
+ dest_ptr2 += plane_stride;
+ }
+
+ /***********/
+ /* V Plane */
+ /***********/
+
+ src_ptr1 = ybf->v_buffer - Border;
+ src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
+ dest_ptr2 = src_ptr2 + plane_stride;
+
+ for (i = 0; i < (int)(Border); ++i) {
+ memcpy(dest_ptr2, src_ptr2, plane_stride);
+ dest_ptr2 += plane_stride;
+ }
+}
+
+static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf,
+ unsigned char *y_src,
+ unsigned char *u_src,
+ unsigned char *v_src) {
+ int i;
+ unsigned char *src_ptr1, *src_ptr2;
+ unsigned char *dest_ptr1, *dest_ptr2;
+
+ unsigned int Border;
+ int plane_stride;
+ int plane_height;
+ int plane_width;
+
+ /***********/
+ /* Y Plane */
+ /***********/
+ Border = ybf->border;
+ plane_stride = ybf->y_stride;
+ plane_height = 16;
+ plane_width = ybf->y_width;
+
+ /* copy the left and right most columns out */
+ src_ptr1 = y_src;
+ src_ptr2 = src_ptr1 + plane_width - 1;
+ dest_ptr1 = src_ptr1 - Border;
+ dest_ptr2 = src_ptr2 + 1;
+
+ for (i = 0; i < plane_height; ++i) {
+ memset(dest_ptr1, src_ptr1[0], Border);
+ memset(dest_ptr2, src_ptr2[0], Border);
+ src_ptr1 += plane_stride;
+ src_ptr2 += plane_stride;
+ dest_ptr1 += plane_stride;
+ dest_ptr2 += plane_stride;
+ }
+
+ /***********/
+ /* U Plane */
+ /***********/
+ plane_stride = ybf->uv_stride;
+ plane_height = 8;
+ plane_width = ybf->uv_width;
+ Border /= 2;
+
+ /* copy the left and right most columns out */
+ src_ptr1 = u_src;
+ src_ptr2 = src_ptr1 + plane_width - 1;
+ dest_ptr1 = src_ptr1 - Border;
+ dest_ptr2 = src_ptr2 + 1;
+
+ for (i = 0; i < plane_height; ++i) {
+ memset(dest_ptr1, src_ptr1[0], Border);
+ memset(dest_ptr2, src_ptr2[0], Border);
+ src_ptr1 += plane_stride;
+ src_ptr2 += plane_stride;
+ dest_ptr1 += plane_stride;
+ dest_ptr2 += plane_stride;
+ }
+
+ /***********/
+ /* V Plane */
+ /***********/
+
+ /* copy the left and right most columns out */
+ src_ptr1 = v_src;
+ src_ptr2 = src_ptr1 + plane_width - 1;
+ dest_ptr1 = src_ptr1 - Border;
+ dest_ptr2 = src_ptr2 + 1;
+
+ for (i = 0; i < plane_height; ++i) {
+ memset(dest_ptr1, src_ptr1[0], Border);
+ memset(dest_ptr2, src_ptr2[0], Border);
+ src_ptr1 += plane_stride;
+ src_ptr2 += plane_stride;
+ dest_ptr1 += plane_stride;
+ dest_ptr2 += plane_stride;
+ }
+}
+
+static void decode_mb_rows(VP8D_COMP *pbi) {
+ VP8_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ MODE_INFO *lf_mic = xd->mode_info_context;
+
+ int ibc = 0;
+ int num_part = 1 << pc->multi_token_partition;
+
+ int recon_yoffset, recon_uvoffset;
+ int mb_row, mb_col;
+ int mb_idx = 0;
+
+ YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];
+
+ int recon_y_stride = yv12_fb_new->y_stride;
+ int recon_uv_stride = yv12_fb_new->uv_stride;
+
+ unsigned char *ref_buffer[MAX_REF_FRAMES][3];
+ unsigned char *dst_buffer[3];
+ unsigned char *lf_dst[3];
+ unsigned char *eb_dst[3];
+ int i;
+ int ref_fb_corrupted[MAX_REF_FRAMES];
+
+ ref_fb_corrupted[INTRA_FRAME] = 0;
+
+ for (i = 1; i < MAX_REF_FRAMES; ++i) {
+ YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i];
+
+ ref_buffer[i][0] = this_fb->y_buffer;
+ ref_buffer[i][1] = this_fb->u_buffer;
+ ref_buffer[i][2] = this_fb->v_buffer;
+
+ ref_fb_corrupted[i] = this_fb->corrupted;
+ }
+
+ /* Set up the buffer pointers */
+ eb_dst[0] = lf_dst[0] = dst_buffer[0] = yv12_fb_new->y_buffer;
+ eb_dst[1] = lf_dst[1] = dst_buffer[1] = yv12_fb_new->u_buffer;
+ eb_dst[2] = lf_dst[2] = dst_buffer[2] = yv12_fb_new->v_buffer;
+
+ xd->up_available = 0;
+
+ /* Initialize the loop filter for this frame. */
+ if (pc->filter_level) vp8_loop_filter_frame_init(pc, xd, pc->filter_level);
+
+ vp8_setup_intra_recon_top_line(yv12_fb_new);
+
+ /* Decode the individual macro block */
+ for (mb_row = 0; mb_row < pc->mb_rows; ++mb_row) {
+ if (num_part > 1) {
+ xd->current_bc = &pbi->mbc[ibc];
+ ibc++;
+
+ if (ibc == num_part) ibc = 0;
+ }
+
+ recon_yoffset = mb_row * recon_y_stride * 16;
+ recon_uvoffset = mb_row * recon_uv_stride * 8;
+
+ /* reset contexts */
+ xd->above_context = pc->above_context;
+ memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ xd->left_available = 0;
+
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
+
+ xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
+ xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
+ xd->recon_above[2] = dst_buffer[2] + recon_uvoffset;
+
+ xd->recon_left[0] = xd->recon_above[0] - 1;
+ xd->recon_left[1] = xd->recon_above[1] - 1;
+ xd->recon_left[2] = xd->recon_above[2] - 1;
+
+ xd->recon_above[0] -= xd->dst.y_stride;
+ xd->recon_above[1] -= xd->dst.uv_stride;
+ xd->recon_above[2] -= xd->dst.uv_stride;
+
+ /* TODO: move to outside row loop */
+ xd->recon_left_stride[0] = xd->dst.y_stride;
+ xd->recon_left_stride[1] = xd->dst.uv_stride;
+
+ setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1],
+ xd->recon_left[2], xd->dst.y_stride,
+ xd->dst.uv_stride);
+
+ for (mb_col = 0; mb_col < pc->mb_cols; ++mb_col) {
+ /* Distance of Mb to the various image edges.
+ * These are specified to 8th pel as they are always compared to values
+ * that are in 1/8th pel units
+ */
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
+
+#if CONFIG_ERROR_CONCEALMENT
+ {
+ int corrupt_residual =
+ (!pbi->independent_partitions && pbi->frame_corrupt_residual) ||
+ vp8dx_bool_error(xd->current_bc);
+ if (pbi->ec_active &&
+ xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
+ corrupt_residual) {
+ /* We have an intra block with corrupt coefficients, better to
+ * conceal with an inter block. Interpolate MVs from neighboring
+ * MBs.
+ *
+ * Note that for the first mb with corrupt residual in a frame,
+ * we might not discover that before decoding the residual. That
+ * happens after this check, and therefore no inter concealment
+ * will be done.
+ */
+ vp8_interpolate_motion(xd, mb_row, mb_col, pc->mb_rows, pc->mb_cols);
+ }
+ }
+#endif
+
+ xd->dst.y_buffer = dst_buffer[0] + recon_yoffset;
+ xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset;
+ xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset;
+
+ if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) {
+ const MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame;
+ xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset;
+ xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset;
+ xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset;
+ } else {
+ // ref_frame is INTRA_FRAME, pre buffer should not be used.
+ xd->pre.y_buffer = 0;
+ xd->pre.u_buffer = 0;
+ xd->pre.v_buffer = 0;
+ }
+
+ /* propagate errors from reference frames */
+ xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];
+
+ decode_macroblock(pbi, xd, mb_idx);
+
+ mb_idx++;
+ xd->left_available = 1;
+
+ /* check if the boolean decoder has suffered an error */
+ xd->corrupted |= vp8dx_bool_error(xd->current_bc);
+
+ xd->recon_above[0] += 16;
+ xd->recon_above[1] += 8;
+ xd->recon_above[2] += 8;
+ xd->recon_left[0] += 16;
+ xd->recon_left[1] += 8;
+ xd->recon_left[2] += 8;
+
+ recon_yoffset += 16;
+ recon_uvoffset += 8;
+
+ ++xd->mode_info_context; /* next mb */
+
+ xd->above_context++;
+ }
+
+ /* adjust to the next row of mbs */
+ vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8,
+ xd->dst.v_buffer + 8);
+
+ ++xd->mode_info_context; /* skip prediction column */
+ xd->up_available = 1;
+
+ if (pc->filter_level) {
+ if (mb_row > 0) {
+ if (pc->filter_type == NORMAL_LOOPFILTER) {
+ vp8_loop_filter_row_normal(pc, lf_mic, mb_row - 1, recon_y_stride,
+ recon_uv_stride, lf_dst[0], lf_dst[1],
+ lf_dst[2]);
+ } else {
+ vp8_loop_filter_row_simple(pc, lf_mic, mb_row - 1, recon_y_stride,
+ lf_dst[0]);
+ }
+ if (mb_row > 1) {
+ yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1],
+ eb_dst[2]);
+
+ eb_dst[0] += recon_y_stride * 16;
+ eb_dst[1] += recon_uv_stride * 8;
+ eb_dst[2] += recon_uv_stride * 8;
+ }
+
+ lf_dst[0] += recon_y_stride * 16;
+ lf_dst[1] += recon_uv_stride * 8;
+ lf_dst[2] += recon_uv_stride * 8;
+ lf_mic += pc->mb_cols;
+ lf_mic++; /* Skip border mb */
+ }
+ } else {
+ if (mb_row > 0) {
+ /**/
+ yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1],
+ eb_dst[2]);
+ eb_dst[0] += recon_y_stride * 16;
+ eb_dst[1] += recon_uv_stride * 8;
+ eb_dst[2] += recon_uv_stride * 8;
+ }
+ }
+ }
+
+ if (pc->filter_level) {
+ if (pc->filter_type == NORMAL_LOOPFILTER) {
+ vp8_loop_filter_row_normal(pc, lf_mic, mb_row - 1, recon_y_stride,
+ recon_uv_stride, lf_dst[0], lf_dst[1],
+ lf_dst[2]);
+ } else {
+ vp8_loop_filter_row_simple(pc, lf_mic, mb_row - 1, recon_y_stride,
+ lf_dst[0]);
+ }
+
+ yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1],
+ eb_dst[2]);
+ eb_dst[0] += recon_y_stride * 16;
+ eb_dst[1] += recon_uv_stride * 8;
+ eb_dst[2] += recon_uv_stride * 8;
+ }
+ yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], eb_dst[2]);
+ yv12_extend_frame_top_c(yv12_fb_new);
+ yv12_extend_frame_bottom_c(yv12_fb_new);
+}
+
+static unsigned int read_partition_size(VP8D_COMP *pbi,
+ const unsigned char *cx_size) {
+ unsigned char temp[3];
+ if (pbi->decrypt_cb) {
+ pbi->decrypt_cb(pbi->decrypt_state, cx_size, temp, 3);
+ cx_size = temp;
+ }
+ return cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
+}
+
+static int read_is_valid(const unsigned char *start, size_t len,
+ const unsigned char *end) {
+ return len != 0 && end > start && len <= (size_t)(end - start);
+}
+
+static unsigned int read_available_partition_size(
+ VP8D_COMP *pbi, const unsigned char *token_part_sizes,
+ const unsigned char *fragment_start,
+ const unsigned char *first_fragment_end, const unsigned char *fragment_end,
+ int i, int num_part) {
+ VP8_COMMON *pc = &pbi->common;
+ const unsigned char *partition_size_ptr = token_part_sizes + i * 3;
+ unsigned int partition_size = 0;
+ ptrdiff_t bytes_left = fragment_end - fragment_start;
+ if (bytes_left < 0) {
+ vpx_internal_error(
+ &pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt partition. No bytes left %d.",
+ (int)bytes_left);
+ }
+ /* Calculate the length of this partition. The last partition
+ * size is implicit. If the partition size can't be read, then
+ * either use the remaining data in the buffer (for EC mode)
+ * or throw an error.
+ */
+ if (i < num_part - 1) {
+ if (read_is_valid(partition_size_ptr, 3, first_fragment_end)) {
+ partition_size = read_partition_size(pbi, partition_size_ptr);
+ } else if (pbi->ec_active) {
+ partition_size = (unsigned int)bytes_left;
+ } else {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated partition size data");
+ }
+ } else {
+ partition_size = (unsigned int)bytes_left;
+ }
+
+ /* Validate the calculated partition length. If the buffer
+ * described by the partition can't be fully read, then restrict
+ * it to the portion that can be (for EC mode) or throw an error.
+ */
+ if (!read_is_valid(fragment_start, partition_size, fragment_end)) {
+ if (pbi->ec_active) {
+ partition_size = (unsigned int)bytes_left;
+ } else {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt partition "
+ "%d length",
+ i + 1);
+ }
+ }
+ return partition_size;
+}
+
+static void setup_token_decoder(VP8D_COMP *pbi,
+ const unsigned char *token_part_sizes) {
+ vp8_reader *bool_decoder = &pbi->mbc[0];
+ unsigned int partition_idx;
+ unsigned int fragment_idx;
+ unsigned int num_token_partitions;
+ const unsigned char *first_fragment_end =
+ pbi->fragments.ptrs[0] + pbi->fragments.sizes[0];
+
+ TOKEN_PARTITION multi_token_partition =
+ (TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2);
+ if (!vp8dx_bool_error(&pbi->mbc[8])) {
+ pbi->common.multi_token_partition = multi_token_partition;
+ }
+ num_token_partitions = 1 << pbi->common.multi_token_partition;
+
+ /* Check for partitions within the fragments and unpack the fragments
+ * so that each fragment pointer points to its corresponding partition. */
+ for (fragment_idx = 0; fragment_idx < pbi->fragments.count; ++fragment_idx) {
+ unsigned int fragment_size = pbi->fragments.sizes[fragment_idx];
+ const unsigned char *fragment_end =
+ pbi->fragments.ptrs[fragment_idx] + fragment_size;
+ /* Special case for handling the first partition since we have already
+ * read its size. */
+ if (fragment_idx == 0) {
+ /* Size of first partition + token partition sizes element */
+ ptrdiff_t ext_first_part_size = token_part_sizes -
+ pbi->fragments.ptrs[0] +
+ 3 * (num_token_partitions - 1);
+ if (fragment_size < (unsigned int)ext_first_part_size)
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME,
+ "Corrupted fragment size %d", fragment_size);
+ fragment_size -= (unsigned int)ext_first_part_size;
+ if (fragment_size > 0) {
+ pbi->fragments.sizes[0] = (unsigned int)ext_first_part_size;
+ /* The fragment contains an additional partition. Move to
+ * next. */
+ fragment_idx++;
+ pbi->fragments.ptrs[fragment_idx] =
+ pbi->fragments.ptrs[0] + pbi->fragments.sizes[0];
+ }
+ }
+ /* Split the chunk into partitions read from the bitstream */
+ while (fragment_size > 0) {
+ ptrdiff_t partition_size = read_available_partition_size(
+ pbi, token_part_sizes, pbi->fragments.ptrs[fragment_idx],
+ first_fragment_end, fragment_end, fragment_idx - 1,
+ num_token_partitions);
+ pbi->fragments.sizes[fragment_idx] = (unsigned int)partition_size;
+ if (fragment_size < (unsigned int)partition_size)
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME,
+ "Corrupted fragment size %d", fragment_size);
+ fragment_size -= (unsigned int)partition_size;
+ assert(fragment_idx <= num_token_partitions);
+ if (fragment_size > 0) {
+ /* The fragment contains an additional partition.
+ * Move to next. */
+ fragment_idx++;
+ pbi->fragments.ptrs[fragment_idx] =
+ pbi->fragments.ptrs[fragment_idx - 1] + partition_size;
+ }
+ }
+ }
+
+ pbi->fragments.count = num_token_partitions + 1;
+
+ for (partition_idx = 1; partition_idx < pbi->fragments.count;
+ ++partition_idx) {
+ if (vp8dx_start_decode(bool_decoder, pbi->fragments.ptrs[partition_idx],
+ pbi->fragments.sizes[partition_idx], pbi->decrypt_cb,
+ pbi->decrypt_state)) {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder %d", partition_idx);
+ }
+
+ bool_decoder++;
+ }
+
+#if CONFIG_MULTITHREAD
+ /* Clamp number of decoder threads */
+ if (pbi->decoding_thread_count > num_token_partitions - 1) {
+ pbi->decoding_thread_count = num_token_partitions - 1;
+ }
+ if ((int)pbi->decoding_thread_count > pbi->common.mb_rows - 1) {
+ assert(pbi->common.mb_rows > 0);
+ pbi->decoding_thread_count = pbi->common.mb_rows - 1;
+ }
+#endif
+}
+
+static void init_frame(VP8D_COMP *pbi) {
+ VP8_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ if (pc->frame_type == KEY_FRAME) {
+ /* Various keyframe initializations */
+ memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+
+ vp8_init_mbmode_probs(pc);
+
+ vp8_default_coef_probs(pc);
+
+ /* reset the segment feature data to 0 with delta coding (Default state). */
+ memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
+ xd->mb_segement_abs_delta = SEGMENT_DELTADATA;
+
+ /* reset the mode ref deltasa for loop filter */
+ memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
+ memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
+
+ /* All buffers are implicitly updated on key frames. */
+ pc->refresh_golden_frame = 1;
+ pc->refresh_alt_ref_frame = 1;
+ pc->copy_buffer_to_gf = 0;
+ pc->copy_buffer_to_arf = 0;
+
+ /* Note that Golden and Altref modes cannot be used on a key frame so
+ * ref_frame_sign_bias[] is undefined and meaningless
+ */
+ pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0;
+ pc->ref_frame_sign_bias[ALTREF_FRAME] = 0;
+ } else {
+ /* To enable choice of different interploation filters */
+ if (!pc->use_bilinear_mc_filter) {
+ xd->subpixel_predict = vp8_sixtap_predict4x4;
+ xd->subpixel_predict8x4 = vp8_sixtap_predict8x4;
+ xd->subpixel_predict8x8 = vp8_sixtap_predict8x8;
+ xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
+ } else {
+ xd->subpixel_predict = vp8_bilinear_predict4x4;
+ xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
+ xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
+ xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
+ }
+
+ if (pbi->decoded_key_frame && pbi->ec_enabled && !pbi->ec_active) {
+ pbi->ec_active = 1;
+ }
+ }
+
+ xd->left_context = &pc->left_context;
+ xd->mode_info_context = pc->mi;
+ xd->frame_type = pc->frame_type;
+ xd->mode_info_context->mbmi.mode = DC_PRED;
+ xd->mode_info_stride = pc->mode_info_stride;
+ xd->corrupted = 0; /* init without corruption */
+
+ xd->fullpixel_mask = ~0;
+ if (pc->full_pixel) xd->fullpixel_mask = ~7;
+}
+
+int vp8_decode_frame(VP8D_COMP *pbi) {
+ vp8_reader *const bc = &pbi->mbc[8];
+ VP8_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const unsigned char *data = pbi->fragments.ptrs[0];
+ const unsigned int data_sz = pbi->fragments.sizes[0];
+ const unsigned char *data_end = data + data_sz;
+ ptrdiff_t first_partition_length_in_bytes;
+
+ int i, j, k, l;
+ const int *const mb_feature_data_bits = vp8_mb_feature_data_bits;
+ int corrupt_tokens = 0;
+ int prev_independent_partitions = pbi->independent_partitions;
+
+ YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];
+
+ /* start with no corruption of current frame */
+ xd->corrupted = 0;
+ yv12_fb_new->corrupted = 0;
+
+ if (data_end - data < 3) {
+ if (!pbi->ec_active) {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet");
+ }
+
+ /* Declare the missing frame as an inter frame since it will
+ be handled as an inter frame when we have estimated its
+ motion vectors. */
+ pc->frame_type = INTER_FRAME;
+ pc->version = 0;
+ pc->show_frame = 1;
+ first_partition_length_in_bytes = 0;
+ } else {
+ unsigned char clear_buffer[10];
+ const unsigned char *clear = data;
+ if (pbi->decrypt_cb) {
+ int n = (int)VPXMIN(sizeof(clear_buffer), data_sz);
+ pbi->decrypt_cb(pbi->decrypt_state, data, clear_buffer, n);
+ clear = clear_buffer;
+ }
+
+ pc->frame_type = (FRAME_TYPE)(clear[0] & 1);
+ pc->version = (clear[0] >> 1) & 7;
+ pc->show_frame = (clear[0] >> 4) & 1;
+ first_partition_length_in_bytes =
+ (clear[0] | (clear[1] << 8) | (clear[2] << 16)) >> 5;
+
+ if (!pbi->ec_active && (data + first_partition_length_in_bytes > data_end ||
+ data + first_partition_length_in_bytes < data)) {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt partition 0 length");
+ }
+
+ data += 3;
+ clear += 3;
+
+ vp8_setup_version(pc);
+
+ if (pc->frame_type == KEY_FRAME) {
+ /* vet via sync code */
+ /* When error concealment is enabled we should only check the sync
+ * code if we have enough bits available
+ */
+ if (data + 3 < data_end) {
+ if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a) {
+ vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame sync code");
+ }
+ }
+
+ /* If error concealment is enabled we should only parse the new size
+ * if we have enough data. Otherwise we will end up with the wrong
+ * size.
+ */
+ if (data + 6 < data_end) {
+ pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff;
+ pc->horiz_scale = clear[4] >> 6;
+ pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff;
+ pc->vert_scale = clear[6] >> 6;
+ data += 7;
+ } else if (!pbi->ec_active) {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated key frame header");
+ } else {
+ /* Error concealment is active, clear the frame. */
+ data = data_end;
+ }
+ } else {
+ memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
+ memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
+ }
+ }
+ if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME)) {
+ return -1;
+ }
+
+ init_frame(pbi);
+
+ if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data),
+ pbi->decrypt_cb, pbi->decrypt_state)) {
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder 0");
+ }
+ if (pc->frame_type == KEY_FRAME) {
+ (void)vp8_read_bit(bc); // colorspace
+ pc->clamp_type = (CLAMP_TYPE)vp8_read_bit(bc);
+ }
+
+ /* Is segmentation enabled */
+ xd->segmentation_enabled = (unsigned char)vp8_read_bit(bc);
+
+ if (xd->segmentation_enabled) {
+ /* Signal whether or not the segmentation map is being explicitly updated
+ * this frame. */
+ xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc);
+ xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc);
+
+ if (xd->update_mb_segmentation_data) {
+ xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc);
+
+ memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
+
+ /* For each segmentation feature (Quant and loop filter level) */
+ for (i = 0; i < MB_LVL_MAX; ++i) {
+ for (j = 0; j < MAX_MB_SEGMENTS; ++j) {
+ /* Frame level data */
+ if (vp8_read_bit(bc)) {
+ xd->segment_feature_data[i][j] =
+ (signed char)vp8_read_literal(bc, mb_feature_data_bits[i]);
+
+ if (vp8_read_bit(bc)) {
+ xd->segment_feature_data[i][j] = -xd->segment_feature_data[i][j];
+ }
+ } else {
+ xd->segment_feature_data[i][j] = 0;
+ }
+ }
+ }
+ }
+
+ if (xd->update_mb_segmentation_map) {
+ /* Which macro block level features are enabled */
+ memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
+
+ /* Read the probs used to decode the segment id for each macro block. */
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
+ /* If not explicitly set value is defaulted to 255 by memset above */
+ if (vp8_read_bit(bc)) {
+ xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc, 8);
+ }
+ }
+ }
+ } else {
+ /* No segmentation updates on this frame */
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ }
+
+ /* Read the loop filter level and type */
+ pc->filter_type = (LOOPFILTERTYPE)vp8_read_bit(bc);
+ pc->filter_level = vp8_read_literal(bc, 6);
+ pc->sharpness_level = vp8_read_literal(bc, 3);
+
+ /* Read in loop filter deltas applied at the MB level based on mode or ref
+ * frame. */
+ xd->mode_ref_lf_delta_update = 0;
+ xd->mode_ref_lf_delta_enabled = (unsigned char)vp8_read_bit(bc);
+
+ if (xd->mode_ref_lf_delta_enabled) {
+ /* Do the deltas need to be updated */
+ xd->mode_ref_lf_delta_update = (unsigned char)vp8_read_bit(bc);
+
+ if (xd->mode_ref_lf_delta_update) {
+ /* Send update */
+ for (i = 0; i < MAX_REF_LF_DELTAS; ++i) {
+ if (vp8_read_bit(bc)) {
+ /*sign = vp8_read_bit( bc );*/
+ xd->ref_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6);
+
+ if (vp8_read_bit(bc)) { /* Apply sign */
+ xd->ref_lf_deltas[i] = xd->ref_lf_deltas[i] * -1;
+ }
+ }
+ }
+
+ /* Send update */
+ for (i = 0; i < MAX_MODE_LF_DELTAS; ++i) {
+ if (vp8_read_bit(bc)) {
+ /*sign = vp8_read_bit( bc );*/
+ xd->mode_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6);
+
+ if (vp8_read_bit(bc)) { /* Apply sign */
+ xd->mode_lf_deltas[i] = xd->mode_lf_deltas[i] * -1;
+ }
+ }
+ }
+ }
+ }
+
+ setup_token_decoder(pbi, data + first_partition_length_in_bytes);
+
+ xd->current_bc = &pbi->mbc[0];
+
+ /* Read the default quantizers. */
+ {
+ int Q, q_update;
+
+ Q = vp8_read_literal(bc, 7); /* AC 1st order Q = default */
+ pc->base_qindex = Q;
+ q_update = 0;
+ pc->y1dc_delta_q = get_delta_q(bc, pc->y1dc_delta_q, &q_update);
+ pc->y2dc_delta_q = get_delta_q(bc, pc->y2dc_delta_q, &q_update);
+ pc->y2ac_delta_q = get_delta_q(bc, pc->y2ac_delta_q, &q_update);
+ pc->uvdc_delta_q = get_delta_q(bc, pc->uvdc_delta_q, &q_update);
+ pc->uvac_delta_q = get_delta_q(bc, pc->uvac_delta_q, &q_update);
+
+ if (q_update) vp8cx_init_de_quantizer(pbi);
+
+ /* MB level dequantizer setup */
+ vp8_mb_init_dequantizer(pbi, &pbi->mb);
+ }
+
+ /* Determine if the golden frame or ARF buffer should be updated and how.
+ * For all non key frames the GF and ARF refresh flags and sign bias
+ * flags must be set explicitly.
+ */
+ if (pc->frame_type != KEY_FRAME) {
+ /* Should the GF or ARF be updated from the current frame */
+ pc->refresh_golden_frame = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't refresh golden if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_active && xd->corrupted) pc->refresh_golden_frame = 0;
+#endif
+
+ pc->refresh_alt_ref_frame = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't refresh altref if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_active && xd->corrupted) pc->refresh_alt_ref_frame = 0;
+#endif
+
+ /* Buffer to buffer copy flags. */
+ pc->copy_buffer_to_gf = 0;
+
+ if (!pc->refresh_golden_frame) {
+ pc->copy_buffer_to_gf = vp8_read_literal(bc, 2);
+ }
+
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't copy to the golden if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_active && xd->corrupted) pc->copy_buffer_to_gf = 0;
+#endif
+
+ pc->copy_buffer_to_arf = 0;
+
+ if (!pc->refresh_alt_ref_frame) {
+ pc->copy_buffer_to_arf = vp8_read_literal(bc, 2);
+ }
+
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't copy to the alt-ref if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_active && xd->corrupted) pc->copy_buffer_to_arf = 0;
+#endif
+
+ pc->ref_frame_sign_bias[GOLDEN_FRAME] = vp8_read_bit(bc);
+ pc->ref_frame_sign_bias[ALTREF_FRAME] = vp8_read_bit(bc);
+ }
+
+ pc->refresh_entropy_probs = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we shouldn't refresh the probabilities if the bit is
+ * missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_active && xd->corrupted) pc->refresh_entropy_probs = 0;
+#endif
+ if (pc->refresh_entropy_probs == 0) {
+ memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
+ }
+
+ pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc);
+
+#if CONFIG_ERROR_CONCEALMENT
+ /* Assume we should refresh the last frame if the bit is missing */
+ xd->corrupted |= vp8dx_bool_error(bc);
+ if (pbi->ec_active && xd->corrupted) pc->refresh_last_frame = 1;
+#endif
+
+ if (0) {
+ FILE *z = fopen("decodestats.stt", "a");
+ fprintf(z, "%6d F:%d,G:%d,A:%d,L:%d,Q:%d\n", pc->current_video_frame,
+ pc->frame_type, pc->refresh_golden_frame, pc->refresh_alt_ref_frame,
+ pc->refresh_last_frame, pc->base_qindex);
+ fclose(z);
+ }
+
+ {
+ pbi->independent_partitions = 1;
+
+ /* read coef probability tree */
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ for (l = 0; l < ENTROPY_NODES; ++l) {
+ vp8_prob *const p = pc->fc.coef_probs[i][j][k] + l;
+
+ if (vp8_read(bc, vp8_coef_update_probs[i][j][k][l])) {
+ *p = (vp8_prob)vp8_read_literal(bc, 8);
+ }
+ if (k > 0 && *p != pc->fc.coef_probs[i][j][k - 1][l]) {
+ pbi->independent_partitions = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* clear out the coeff buffer */
+ memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
+
+ vp8_decode_mode_mvs(pbi);
+
+#if CONFIG_ERROR_CONCEALMENT
+ if (pbi->ec_active &&
+ pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows) {
+ /* Motion vectors are missing in this frame. We will try to estimate
+ * them and then continue decoding the frame as usual */
+ vp8_estimate_missing_mvs(pbi);
+ }
+#endif
+
+ memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
+ pbi->frame_corrupt_residual = 0;
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd) &&
+ pc->multi_token_partition != ONE_PARTITION) {
+ unsigned int thread;
+ if (vp8mt_decode_mb_rows(pbi, xd)) {
+ vp8_decoder_remove_threads(pbi);
+ pbi->restart_threads = 1;
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME, NULL);
+ }
+ vp8_yv12_extend_frame_borders(yv12_fb_new);
+ for (thread = 0; thread < pbi->decoding_thread_count; ++thread) {
+ corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted;
+ }
+ } else
+#endif
+ {
+ decode_mb_rows(pbi);
+ corrupt_tokens |= xd->corrupted;
+ }
+
+ /* Collect information about decoder corruption. */
+ /* 1. Check first boolean decoder for errors. */
+ yv12_fb_new->corrupted = vp8dx_bool_error(bc);
+ /* 2. Check the macroblock information */
+ yv12_fb_new->corrupted |= corrupt_tokens;
+
+ if (!pbi->decoded_key_frame) {
+ if (pc->frame_type == KEY_FRAME && !yv12_fb_new->corrupted) {
+ pbi->decoded_key_frame = 1;
+ } else {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME,
+ "A stream must start with a complete key frame");
+ }
+ }
+
+ /* vpx_log("Decoder: Frame Decoded, Size Roughly:%d bytes
+ * \n",bc->pos+pbi->bc2.pos); */
+
+ if (pc->refresh_entropy_probs == 0) {
+ memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc));
+ pbi->independent_partitions = prev_independent_partitions;
+ }
+
+#ifdef PACKET_TESTING
+ {
+ FILE *f = fopen("decompressor.VP8", "ab");
+ unsigned int size = pbi->bc2.pos + pbi->bc.pos + 8;
+ fwrite((void *)&size, 4, 1, f);
+ fwrite((void *)pbi->Source, size, 1, f);
+ fclose(f);
+ }
+#endif
+
+ return 0;
+}
diff --git a/media/libvpx/libvpx/vp8/decoder/decodemv.c b/media/libvpx/libvpx/vp8/decoder/decodemv.c
new file mode 100644
index 0000000000..3f459d623f
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/decodemv.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "decodemv.h"
+#include "treereader.h"
+#include "vp8/common/entropymv.h"
+#include "vp8/common/entropymode.h"
+#include "onyxd_int.h"
+#include "vp8/common/findnearmv.h"
+
+static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) {
+ const int i = vp8_treed_read(bc, vp8_bmode_tree, p);
+
+ return (B_PREDICTION_MODE)i;
+}
+
+static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) {
+ const int i = vp8_treed_read(bc, vp8_ymode_tree, p);
+
+ return (MB_PREDICTION_MODE)i;
+}
+
+static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) {
+ const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p);
+
+ return (MB_PREDICTION_MODE)i;
+}
+
+static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) {
+ const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p);
+
+ return (MB_PREDICTION_MODE)i;
+}
+
+static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) {
+ vp8_reader *const bc = &pbi->mbc[8];
+ const int mis = pbi->common.mode_info_stride;
+
+ mi->mbmi.ref_frame = INTRA_FRAME;
+ mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob);
+
+ if (mi->mbmi.mode == B_PRED) {
+ int i = 0;
+ mi->mbmi.is_4x4 = 1;
+
+ do {
+ const B_PREDICTION_MODE A = above_block_mode(mi, i, mis);
+ const B_PREDICTION_MODE L = left_block_mode(mi, i);
+
+ mi->bmi[i].as_mode = read_bmode(bc, vp8_kf_bmode_prob[A][L]);
+ } while (++i < 16);
+ }
+
+ mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob);
+}
+
+static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) {
+ const vp8_prob *const p = (const vp8_prob *)mvc;
+ int x = 0;
+
+ if (vp8_read(r, p[mvpis_short])) { /* Large */
+ int i = 0;
+
+ do {
+ x += vp8_read(r, p[MVPbits + i]) << i;
+ } while (++i < 3);
+
+ i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
+
+ do {
+ x += vp8_read(r, p[MVPbits + i]) << i;
+ } while (--i > 3);
+
+ if (!(x & 0xFFF0) || vp8_read(r, p[MVPbits + 3])) x += 8;
+ } else { /* small */
+ x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort);
+ }
+
+ if (x && vp8_read(r, p[MVPsign])) x = -x;
+
+ return x;
+}
+
+static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) {
+ mv->row = (short)(read_mvcomponent(r, mvc) * 2);
+ mv->col = (short)(read_mvcomponent(r, ++mvc) * 2);
+}
+
+static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) {
+ int i = 0;
+
+ do {
+ const vp8_prob *up = vp8_mv_update_probs[i].prob;
+ vp8_prob *p = (vp8_prob *)(mvc + i);
+ vp8_prob *const pstop = p + MVPcount;
+
+ do {
+ if (vp8_read(bc, *up++)) {
+ const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
+
+ *p = x ? x << 1 : 1;
+ }
+ } while (++p < pstop);
+ } while (++i < 2);
+}
+
+static const unsigned char mbsplit_fill_count[4] = { 8, 8, 4, 1 };
+static const unsigned char mbsplit_fill_offset[4][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
+ { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
+};
+
+static void mb_mode_mv_init(VP8D_COMP *pbi) {
+ vp8_reader *const bc = &pbi->mbc[8];
+ MV_CONTEXT *const mvc = pbi->common.fc.mvc;
+
+#if CONFIG_ERROR_CONCEALMENT
+ /* Default is that no macroblock is corrupt, therefore we initialize
+ * mvs_corrupt_from_mb to something very big, which we can be sure is
+ * outside the frame. */
+ pbi->mvs_corrupt_from_mb = UINT_MAX;
+#endif
+ /* Read the mb_no_coeff_skip flag */
+ pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc);
+
+ pbi->prob_skip_false = 0;
+ if (pbi->common.mb_no_coeff_skip) {
+ pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
+ }
+
+ if (pbi->common.frame_type != KEY_FRAME) {
+ pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8);
+ pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8);
+ pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8);
+
+ if (vp8_read_bit(bc)) {
+ int i = 0;
+
+ do {
+ pbi->common.fc.ymode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8);
+ } while (++i < 4);
+ }
+
+ if (vp8_read_bit(bc)) {
+ int i = 0;
+
+ do {
+ pbi->common.fc.uv_mode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8);
+ } while (++i < 3);
+ }
+
+ read_mvcontexts(bc, mvc);
+ }
+}
+
+const vp8_prob vp8_sub_mv_ref_prob3[8][VP8_SUBMVREFS - 1] = {
+ { 147, 136, 18 }, /* SUBMVREF_NORMAL */
+ { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */
+ { 106, 145, 1 }, /* SUBMVREF_LEFT_ZED */
+ { 208, 1, 1 }, /* SUBMVREF_LEFT_ABOVE_ZED */
+ { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */
+ { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */
+ { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */
+ { 208, 1, 1 } /* SUBMVREF_LEFT_ABOVE_ZED */
+};
+
+static const vp8_prob *get_sub_mv_ref_prob(const uint32_t left,
+ const uint32_t above) {
+ int lez = (left == 0);
+ int aez = (above == 0);
+ int lea = (left == above);
+ const vp8_prob *prob;
+
+ prob = vp8_sub_mv_ref_prob3[(aez << 2) | (lez << 1) | (lea)];
+
+ return prob;
+}
+
+static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi,
+ const MODE_INFO *left_mb, const MODE_INFO *above_mb,
+ MB_MODE_INFO *mbmi, int_mv best_mv,
+ MV_CONTEXT *const mvc, int mb_to_left_edge,
+ int mb_to_right_edge, int mb_to_top_edge,
+ int mb_to_bottom_edge) {
+ int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */
+ /* number of partitions in the split configuration (see vp8_mbsplit_count) */
+ int num_p;
+ int j = 0;
+
+ s = 3;
+ num_p = 16;
+ if (vp8_read(bc, 110)) {
+ s = 2;
+ num_p = 4;
+ if (vp8_read(bc, 111)) {
+ s = vp8_read(bc, 150);
+ num_p = 2;
+ }
+ }
+
+ do /* for each subset j */
+ {
+ int_mv leftmv, abovemv;
+ int_mv blockmv;
+ int k; /* first block in subset j */
+
+ const vp8_prob *prob;
+ k = vp8_mbsplit_offset[s][j];
+
+ if (!(k & 3)) {
+ /* On L edge, get from MB to left of us */
+ if (left_mb->mbmi.mode != SPLITMV) {
+ leftmv.as_int = left_mb->mbmi.mv.as_int;
+ } else {
+ leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int;
+ }
+ } else {
+ leftmv.as_int = (mi->bmi + k - 1)->mv.as_int;
+ }
+
+ if (!(k >> 2)) {
+ /* On top edge, get from MB above us */
+ if (above_mb->mbmi.mode != SPLITMV) {
+ abovemv.as_int = above_mb->mbmi.mv.as_int;
+ } else {
+ abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int;
+ }
+ } else {
+ abovemv.as_int = (mi->bmi + k - 4)->mv.as_int;
+ }
+
+ prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int);
+
+ if (vp8_read(bc, prob[0])) {
+ if (vp8_read(bc, prob[1])) {
+ blockmv.as_int = 0;
+ if (vp8_read(bc, prob[2])) {
+ blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2;
+ blockmv.as_mv.row += best_mv.as_mv.row;
+ blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2;
+ blockmv.as_mv.col += best_mv.as_mv.col;
+ }
+ } else {
+ blockmv.as_int = abovemv.as_int;
+ }
+ } else {
+ blockmv.as_int = leftmv.as_int;
+ }
+
+ mbmi->need_to_clamp_mvs |=
+ vp8_check_mv_bounds(&blockmv, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
+
+ {
+ /* Fill (uniform) modes, mvs of jth subset.
+ Must do it here because ensuing subsets can
+ refer back to us via "left" or "above". */
+ const unsigned char *fill_offset;
+ unsigned int fill_count = mbsplit_fill_count[s];
+
+ fill_offset =
+ &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]];
+
+ do {
+ mi->bmi[*fill_offset].mv.as_int = blockmv.as_int;
+ fill_offset++;
+ } while (--fill_count);
+ }
+
+ } while (++j < num_p);
+
+ mbmi->partitioning = s;
+}
+
+static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi,
+ MB_MODE_INFO *mbmi) {
+ vp8_reader *const bc = &pbi->mbc[8];
+ mbmi->ref_frame = (MV_REFERENCE_FRAME)vp8_read(bc, pbi->prob_intra);
+ if (mbmi->ref_frame) { /* inter MB */
+ enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
+ int cnt[4];
+ int *cntx = cnt;
+ int_mv near_mvs[4];
+ int_mv *nmv = near_mvs;
+ const int mis = pbi->mb.mode_info_stride;
+ const MODE_INFO *above = mi - mis;
+ const MODE_INFO *left = mi - 1;
+ const MODE_INFO *aboveleft = above - 1;
+ int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias;
+
+ mbmi->need_to_clamp_mvs = 0;
+
+ if (vp8_read(bc, pbi->prob_last)) {
+ mbmi->ref_frame =
+ (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf)));
+ }
+
+ /* Zero accumulators */
+ nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0;
+ cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
+
+ /* Process above */
+ if (above->mbmi.ref_frame != INTRA_FRAME) {
+ if (above->mbmi.mv.as_int) {
+ (++nmv)->as_int = above->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], mbmi->ref_frame,
+ nmv, ref_frame_sign_bias);
+ ++cntx;
+ }
+
+ *cntx += 2;
+ }
+
+ /* Process left */
+ if (left->mbmi.ref_frame != INTRA_FRAME) {
+ if (left->mbmi.mv.as_int) {
+ int_mv this_mv;
+
+ this_mv.as_int = left->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], mbmi->ref_frame,
+ &this_mv, ref_frame_sign_bias);
+
+ if (this_mv.as_int != nmv->as_int) {
+ (++nmv)->as_int = this_mv.as_int;
+ ++cntx;
+ }
+
+ *cntx += 2;
+ } else {
+ cnt[CNT_INTRA] += 2;
+ }
+ }
+
+ /* Process above left */
+ if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
+ if (aboveleft->mbmi.mv.as_int) {
+ int_mv this_mv;
+
+ this_mv.as_int = aboveleft->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], mbmi->ref_frame,
+ &this_mv, ref_frame_sign_bias);
+
+ if (this_mv.as_int != nmv->as_int) {
+ (++nmv)->as_int = this_mv.as_int;
+ ++cntx;
+ }
+
+ *cntx += 1;
+ } else {
+ cnt[CNT_INTRA] += 1;
+ }
+ }
+
+ if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_INTRA]][0])) {
+ /* If we have three distinct MV's ... */
+ /* See if above-left MV can be merged with NEAREST */
+ cnt[CNT_NEAREST] += ((cnt[CNT_SPLITMV] > 0) &
+ (nmv->as_int == near_mvs[CNT_NEAREST].as_int));
+
+ /* Swap near and nearest if necessary */
+ if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
+ int tmp;
+ tmp = cnt[CNT_NEAREST];
+ cnt[CNT_NEAREST] = cnt[CNT_NEAR];
+ cnt[CNT_NEAR] = tmp;
+ tmp = (int)near_mvs[CNT_NEAREST].as_int;
+ near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int;
+ near_mvs[CNT_NEAR].as_int = (uint32_t)tmp;
+ }
+
+ if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
+ if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
+ int mb_to_top_edge;
+ int mb_to_bottom_edge;
+ int mb_to_left_edge;
+ int mb_to_right_edge;
+ MV_CONTEXT *const mvc = pbi->common.fc.mvc;
+ int near_index;
+
+ mb_to_top_edge = pbi->mb.mb_to_top_edge;
+ mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge;
+ mb_to_top_edge -= LEFT_TOP_MARGIN;
+ mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN;
+ mb_to_right_edge = pbi->mb.mb_to_right_edge;
+ mb_to_right_edge += RIGHT_BOTTOM_MARGIN;
+ mb_to_left_edge = pbi->mb.mb_to_left_edge;
+ mb_to_left_edge -= LEFT_TOP_MARGIN;
+
+ /* Use near_mvs[0] to store the "best" MV */
+ near_index = CNT_INTRA + (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]);
+
+ vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb);
+
+ cnt[CNT_SPLITMV] =
+ ((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) *
+ 2 +
+ (aboveleft->mbmi.mode == SPLITMV);
+
+ if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
+ decode_split_mv(bc, mi, left, above, mbmi, near_mvs[near_index],
+ mvc, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
+ mbmi->mv.as_int = mi->bmi[15].mv.as_int;
+ mbmi->mode = SPLITMV;
+ mbmi->is_4x4 = 1;
+ } else {
+ int_mv *const mbmi_mv = &mbmi->mv;
+ read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *)mvc);
+ mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row;
+ mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col;
+
+ /* Don't need to check this on NEARMV and NEARESTMV
+ * modes since those modes clamp the MV. The NEWMV mode
+ * does not, so signal to the prediction stage whether
+ * special handling may be required.
+ */
+ mbmi->need_to_clamp_mvs =
+ vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
+ mbmi->mode = NEWMV;
+ }
+ } else {
+ mbmi->mode = NEARMV;
+ mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int;
+ vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
+ }
+ } else {
+ mbmi->mode = NEARESTMV;
+ mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int;
+ vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
+ }
+ } else {
+ mbmi->mode = ZEROMV;
+ mbmi->mv.as_int = 0;
+ }
+
+#if CONFIG_ERROR_CONCEALMENT
+ if (pbi->ec_enabled && (mbmi->mode != SPLITMV)) {
+ mi->bmi[0].mv.as_int = mi->bmi[1].mv.as_int = mi->bmi[2].mv.as_int =
+ mi->bmi[3].mv.as_int = mi->bmi[4].mv.as_int = mi->bmi[5].mv.as_int =
+ mi->bmi[6].mv.as_int = mi->bmi[7].mv.as_int =
+ mi->bmi[8].mv.as_int = mi->bmi[9].mv.as_int =
+ mi->bmi[10].mv.as_int = mi->bmi[11].mv.as_int =
+ mi->bmi[12].mv.as_int = mi->bmi[13].mv.as_int =
+ mi->bmi[14].mv.as_int = mi->bmi[15].mv.as_int =
+ mbmi->mv.as_int;
+ }
+#endif
+ } else {
+ /* required for left and above block mv */
+ mbmi->mv.as_int = 0;
+
+ /* MB is intra coded */
+ if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) {
+ int j = 0;
+ mbmi->is_4x4 = 1;
+ do {
+ mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob);
+ } while (++j < 16);
+ }
+
+ mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob);
+ }
+}
+
+static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) {
+ /* Is segmentation enabled */
+ if (x->segmentation_enabled && x->update_mb_segmentation_map) {
+ /* If so then read the segment id. */
+ if (vp8_read(r, x->mb_segment_tree_probs[0])) {
+ mi->segment_id =
+ (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2]));
+ } else {
+ mi->segment_id =
+ (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1]));
+ }
+ }
+}
+
+static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi) {
+ /* Read the Macroblock segmentation map if it is being updated explicitly
+ * this frame (reset to 0 above by default)
+ * By default on a key frame reset all MBs to segment 0
+ */
+ if (pbi->mb.update_mb_segmentation_map) {
+ read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb);
+ } else if (pbi->common.frame_type == KEY_FRAME) {
+ mi->mbmi.segment_id = 0;
+ }
+
+ /* Read the macroblock coeff skip flag if this feature is in use,
+ * else default to 0 */
+ if (pbi->common.mb_no_coeff_skip) {
+ mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false);
+ } else {
+ mi->mbmi.mb_skip_coeff = 0;
+ }
+
+ mi->mbmi.is_4x4 = 0;
+ if (pbi->common.frame_type == KEY_FRAME) {
+ read_kf_modes(pbi, mi);
+ } else {
+ read_mb_modes_mv(pbi, mi, &mi->mbmi);
+ }
+}
+
+void vp8_decode_mode_mvs(VP8D_COMP *pbi) {
+ MODE_INFO *mi = pbi->common.mi;
+ int mb_row = -1;
+ int mb_to_right_edge_start;
+
+ mb_mode_mv_init(pbi);
+
+ pbi->mb.mb_to_top_edge = 0;
+ pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3;
+ mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3;
+
+ while (++mb_row < pbi->common.mb_rows) {
+ int mb_col = -1;
+
+ pbi->mb.mb_to_left_edge = 0;
+ pbi->mb.mb_to_right_edge = mb_to_right_edge_start;
+
+ while (++mb_col < pbi->common.mb_cols) {
+#if CONFIG_ERROR_CONCEALMENT
+ int mb_num = mb_row * pbi->common.mb_cols + mb_col;
+#endif
+
+ decode_mb_mode_mvs(pbi, mi);
+
+#if CONFIG_ERROR_CONCEALMENT
+ /* look for corruption. set mvs_corrupt_from_mb to the current
+ * mb_num if the frame is corrupt from this macroblock. */
+ if (vp8dx_bool_error(&pbi->mbc[8]) &&
+ mb_num < (int)pbi->mvs_corrupt_from_mb) {
+ pbi->mvs_corrupt_from_mb = mb_num;
+ /* no need to continue since the partition is corrupt from
+ * here on.
+ */
+ return;
+ }
+#endif
+
+ pbi->mb.mb_to_left_edge -= (16 << 3);
+ pbi->mb.mb_to_right_edge -= (16 << 3);
+ mi++; /* next macroblock */
+ }
+ pbi->mb.mb_to_top_edge -= (16 << 3);
+ pbi->mb.mb_to_bottom_edge -= (16 << 3);
+
+ mi++; /* skip left predictor each row */
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/decoder/decodemv.h b/media/libvpx/libvpx/vp8/decoder/decodemv.h
new file mode 100644
index 0000000000..504e943d85
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/decodemv.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_DECODER_DECODEMV_H_
+#define VPX_VP8_DECODER_DECODEMV_H_
+
+#include "onyxd_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_decode_mode_mvs(VP8D_COMP *);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_DECODER_DECODEMV_H_
diff --git a/media/libvpx/libvpx/vp8/decoder/decoderthreading.h b/media/libvpx/libvpx/vp8/decoder/decoderthreading.h
new file mode 100644
index 0000000000..3d49bc8317
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/decoderthreading.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_DECODER_DECODERTHREADING_H_
+#define VPX_VP8_DECODER_DECODERTHREADING_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if CONFIG_MULTITHREAD
+int vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd);
+void vp8_decoder_remove_threads(VP8D_COMP *pbi);
+void vp8_decoder_create_threads(VP8D_COMP *pbi);
+void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows);
+void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows);
+#endif
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_DECODER_DECODERTHREADING_H_
diff --git a/media/libvpx/libvpx/vp8/decoder/detokenize.c b/media/libvpx/libvpx/vp8/decoder/detokenize.c
new file mode 100644
index 0000000000..1c77873f0b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/detokenize.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/common/blockd.h"
+#include "onyxd_int.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/compiler_attributes.h"
+#include "vpx_ports/mem.h"
+#include "detokenize.h"
+
+void vp8_reset_mb_tokens_context(MACROBLOCKD *x) {
+ ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context);
+ ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context);
+
+ memset(a_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
+ memset(l_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
+
+ /* Clear entropy contexts for Y2 blocks */
+ if (!x->mode_info_context->mbmi.is_4x4) {
+ a_ctx[8] = l_ctx[8] = 0;
+ }
+}
+
+/*
+ ------------------------------------------------------------------------------
+ Residual decoding (Paragraph 13.2 / 13.3)
+*/
+static const uint8_t kBands[16 + 1] = {
+ 0, 1, 2, 3, 6, 4, 5, 6, 6,
+ 6, 6, 6, 6, 6, 6, 7, 0 /* extra entry as sentinel */
+};
+
+static const uint8_t kCat3[] = { 173, 148, 140, 0 };
+static const uint8_t kCat4[] = { 176, 155, 140, 135, 0 };
+static const uint8_t kCat5[] = { 180, 157, 141, 134, 130, 0 };
+static const uint8_t kCat6[] = { 254, 254, 243, 230, 196, 177,
+ 153, 140, 133, 130, 129, 0 };
+static const uint8_t *const kCat3456[] = { kCat3, kCat4, kCat5, kCat6 };
+static const uint8_t kZigzag[16] = { 0, 1, 4, 8, 5, 2, 3, 6,
+ 9, 12, 13, 10, 7, 11, 14, 15 };
+
+#define VP8GetBit vp8dx_decode_bool
+#define NUM_PROBAS 11
+#define NUM_CTX 3
+
+/* for const-casting */
+typedef const uint8_t (*ProbaArray)[NUM_CTX][NUM_PROBAS];
+
+// With corrupt / fuzzed streams the calculation of br->value may overflow. See
+// b/148271109.
+static VPX_NO_UNSIGNED_OVERFLOW_CHECK int GetSigned(BOOL_DECODER *br,
+ int value_to_sign) {
+ int split = (br->range + 1) >> 1;
+ VP8_BD_VALUE bigsplit = (VP8_BD_VALUE)split << (VP8_BD_VALUE_SIZE - 8);
+ int v;
+
+ if (br->count < 0) vp8dx_bool_decoder_fill(br);
+
+ if (br->value < bigsplit) {
+ br->range = split;
+ v = value_to_sign;
+ } else {
+ br->range = br->range - split;
+ br->value = br->value - bigsplit;
+ v = -value_to_sign;
+ }
+ br->range += br->range;
+ br->value += br->value;
+ br->count--;
+
+ return v;
+}
+/*
+ Returns the position of the last non-zero coeff plus one
+ (and 0 if there's no coeff at all)
+*/
+static int GetCoeffs(BOOL_DECODER *br, ProbaArray prob, int ctx, int n,
+ int16_t *out) {
+ const uint8_t *p = prob[n][ctx];
+ if (!VP8GetBit(br, p[0])) { /* first EOB is more a 'CBP' bit. */
+ return 0;
+ }
+ while (1) {
+ ++n;
+ if (!VP8GetBit(br, p[1])) {
+ p = prob[kBands[n]][0];
+ } else { /* non zero coeff */
+ int v, j;
+ if (!VP8GetBit(br, p[2])) {
+ p = prob[kBands[n]][1];
+ v = 1;
+ } else {
+ if (!VP8GetBit(br, p[3])) {
+ if (!VP8GetBit(br, p[4])) {
+ v = 2;
+ } else {
+ v = 3 + VP8GetBit(br, p[5]);
+ }
+ } else {
+ if (!VP8GetBit(br, p[6])) {
+ if (!VP8GetBit(br, p[7])) {
+ v = 5 + VP8GetBit(br, 159);
+ } else {
+ v = 7 + 2 * VP8GetBit(br, 165);
+ v += VP8GetBit(br, 145);
+ }
+ } else {
+ const uint8_t *tab;
+ const int bit1 = VP8GetBit(br, p[8]);
+ const int bit0 = VP8GetBit(br, p[9 + bit1]);
+ const int cat = 2 * bit1 + bit0;
+ v = 0;
+ for (tab = kCat3456[cat]; *tab; ++tab) {
+ v += v + VP8GetBit(br, *tab);
+ }
+ v += 3 + (8 << cat);
+ }
+ }
+ p = prob[kBands[n]][2];
+ }
+ j = kZigzag[n - 1];
+
+ out[j] = GetSigned(br, v);
+
+ if (n == 16 || !VP8GetBit(br, p[0])) { /* EOB */
+ return n;
+ }
+ }
+ if (n == 16) {
+ return 16;
+ }
+ }
+}
+
+int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *x) {
+ BOOL_DECODER *bc = x->current_bc;
+ const FRAME_CONTEXT *const fc = &dx->common.fc;
+ char *eobs = x->eobs;
+
+ int i;
+ int nonzeros;
+ int eobtotal = 0;
+
+ short *qcoeff_ptr;
+ ProbaArray coef_probs;
+ ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context);
+ ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context);
+ ENTROPY_CONTEXT *a;
+ ENTROPY_CONTEXT *l;
+ int skip_dc = 0;
+
+ qcoeff_ptr = &x->qcoeff[0];
+
+ if (!x->mode_info_context->mbmi.is_4x4) {
+ a = a_ctx + 8;
+ l = l_ctx + 8;
+
+ coef_probs = fc->coef_probs[1];
+
+ nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), 0, qcoeff_ptr + 24 * 16);
+ *a = *l = (nonzeros > 0);
+
+ eobs[24] = nonzeros;
+ eobtotal += nonzeros - 16;
+
+ coef_probs = fc->coef_probs[0];
+ skip_dc = 1;
+ } else {
+ coef_probs = fc->coef_probs[3];
+ skip_dc = 0;
+ }
+
+ for (i = 0; i < 16; ++i) {
+ a = a_ctx + (i & 3);
+ l = l_ctx + ((i & 0xc) >> 2);
+
+ nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), skip_dc, qcoeff_ptr);
+ *a = *l = (nonzeros > 0);
+
+ nonzeros += skip_dc;
+ eobs[i] = nonzeros;
+ eobtotal += nonzeros;
+ qcoeff_ptr += 16;
+ }
+
+ coef_probs = fc->coef_probs[2];
+
+ a_ctx += 4;
+ l_ctx += 4;
+ for (i = 16; i < 24; ++i) {
+ a = a_ctx + ((i > 19) << 1) + (i & 1);
+ l = l_ctx + ((i > 19) << 1) + ((i & 3) > 1);
+
+ nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), 0, qcoeff_ptr);
+ *a = *l = (nonzeros > 0);
+
+ eobs[i] = nonzeros;
+ eobtotal += nonzeros;
+ qcoeff_ptr += 16;
+ }
+
+ return eobtotal;
+}
diff --git a/media/libvpx/libvpx/vp8/decoder/detokenize.h b/media/libvpx/libvpx/vp8/decoder/detokenize.h
new file mode 100644
index 0000000000..410a431ba0
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/detokenize.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_DECODER_DETOKENIZE_H_
+#define VPX_VP8_DECODER_DETOKENIZE_H_
+
+#include "onyxd_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_reset_mb_tokens_context(MACROBLOCKD *x);
+int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_DECODER_DETOKENIZE_H_
diff --git a/media/libvpx/libvpx/vp8/decoder/ec_types.h b/media/libvpx/libvpx/vp8/decoder/ec_types.h
new file mode 100644
index 0000000000..84feb269df
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/ec_types.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_DECODER_EC_TYPES_H_
+#define VPX_VP8_DECODER_EC_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_OVERLAPS 16
+
+/* The area (pixel area in Q6) the block pointed to by bmi overlaps
+ * another block with.
+ */
+typedef struct {
+ int overlap;
+ union b_mode_info *bmi;
+} OVERLAP_NODE;
+
+/* Structure to keep track of overlapping blocks on a block level. */
+typedef struct {
+ /* TODO(holmer): This array should be exchanged for a linked list */
+ OVERLAP_NODE overlaps[MAX_OVERLAPS];
+} B_OVERLAP;
+
+/* Structure used to hold all the overlaps of a macroblock. The overlaps of a
+ * macroblock is further divided into block overlaps.
+ */
+typedef struct {
+ B_OVERLAP overlaps[16];
+} MB_OVERLAP;
+
+/* Structure for keeping track of motion vectors and which reference frame they
+ * refer to. Used for motion vector interpolation.
+ */
+typedef struct {
+ MV mv;
+ MV_REFERENCE_FRAME ref_frame;
+} EC_BLOCK;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_DECODER_EC_TYPES_H_
diff --git a/media/libvpx/libvpx/vp8/decoder/error_concealment.c b/media/libvpx/libvpx/vp8/decoder/error_concealment.c
new file mode 100644
index 0000000000..85982e4de3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/error_concealment.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "error_concealment.h"
+#include "onyxd_int.h"
+#include "decodemv.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/findnearmv.h"
+#include "vp8/common/common.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+
+#define FLOOR(x, q) ((x) & -(1 << (q)))
+
+#define NUM_NEIGHBORS 20
+
+typedef struct ec_position {
+ int row;
+ int col;
+} EC_POS;
+
+/*
+ * Regenerate the table in Matlab with:
+ * x = meshgrid((1:4), (1:4));
+ * y = meshgrid((1:4), (1:4))';
+ * W = round((1./(sqrt(x.^2 + y.^2))*2^7));
+ * W(1,1) = 0;
+ */
+static const int weights_q7[5][5] = { { 0, 128, 64, 43, 32 },
+ { 128, 91, 57, 40, 31 },
+ { 64, 57, 45, 36, 29 },
+ { 43, 40, 36, 30, 26 },
+ { 32, 31, 29, 26, 23 } };
+
+int vp8_alloc_overlap_lists(VP8D_COMP *pbi) {
+ if (pbi->overlaps != NULL) {
+ vpx_free(pbi->overlaps);
+ pbi->overlaps = NULL;
+ }
+
+ pbi->overlaps =
+ vpx_calloc(pbi->common.mb_rows * pbi->common.mb_cols, sizeof(MB_OVERLAP));
+
+ if (pbi->overlaps == NULL) return -1;
+
+ return 0;
+}
+
+void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi) {
+ vpx_free(pbi->overlaps);
+ pbi->overlaps = NULL;
+}
+
+/* Inserts a new overlap area value to the list of overlaps of a block */
+static void assign_overlap(OVERLAP_NODE *overlaps, union b_mode_info *bmi,
+ int overlap) {
+ int i;
+ if (overlap <= 0) return;
+ /* Find and assign to the next empty overlap node in the list of overlaps.
+ * Empty is defined as bmi == NULL */
+ for (i = 0; i < MAX_OVERLAPS; ++i) {
+ if (overlaps[i].bmi == NULL) {
+ overlaps[i].bmi = bmi;
+ overlaps[i].overlap = overlap;
+ break;
+ }
+ }
+}
+
+/* Calculates the overlap area between two 4x4 squares, where the first
+ * square has its upper-left corner at (b1_row, b1_col) and the second
+ * square has its upper-left corner at (b2_row, b2_col). Doesn't
+ * properly handle squares which do not overlap.
+ */
+static int block_overlap(int b1_row, int b1_col, int b2_row, int b2_col) {
+ const int int_top = VPXMAX(b1_row, b2_row); // top
+ const int int_left = VPXMAX(b1_col, b2_col); // left
+ /* Since each block is 4x4 pixels, adding 4 (Q3) to the left/top edge
+ * gives us the right/bottom edge.
+ */
+ const int int_right = VPXMIN(b1_col + (4 << 3), b2_col + (4 << 3)); // right
+ const int int_bottom =
+ VPXMIN(b1_row + (4 << 3), b2_row + (4 << 3)); // bottom
+ return (int_bottom - int_top) * (int_right - int_left);
+}
+
+/* Calculates the overlap area for all blocks in a macroblock at position
+ * (mb_row, mb_col) in macroblocks, which are being overlapped by a given
+ * overlapping block at position (new_row, new_col) (in pixels, Q3). The
+ * first block being overlapped in the macroblock has position (first_blk_row,
+ * first_blk_col) in blocks relative the upper-left corner of the image.
+ */
+static void calculate_overlaps_mb(B_OVERLAP *b_overlaps, union b_mode_info *bmi,
+ int new_row, int new_col, int mb_row,
+ int mb_col, int first_blk_row,
+ int first_blk_col) {
+ /* Find the blocks within this MB (defined by mb_row, mb_col) which are
+ * overlapped by bmi and calculate and assign overlap for each of those
+ * blocks. */
+
+ /* Block coordinates relative the upper-left block */
+ const int rel_ol_blk_row = first_blk_row - mb_row * 4;
+ const int rel_ol_blk_col = first_blk_col - mb_col * 4;
+ /* If the block partly overlaps any previous MB, these coordinates
+ * can be < 0. We don't want to access blocks in previous MBs.
+ */
+ const int blk_idx = VPXMAX(rel_ol_blk_row, 0) * 4 + VPXMAX(rel_ol_blk_col, 0);
+ /* Upper left overlapping block */
+ B_OVERLAP *b_ol_ul = &(b_overlaps[blk_idx]);
+
+ /* Calculate and assign overlaps for all blocks in this MB
+ * which the motion compensated block overlaps
+ */
+ /* Avoid calculating overlaps for blocks in later MBs */
+ int end_row = VPXMIN(4 + mb_row * 4 - first_blk_row, 2);
+ int end_col = VPXMIN(4 + mb_col * 4 - first_blk_col, 2);
+ int row, col;
+
+ /* Check if new_row and new_col are evenly divisible by 4 (Q3),
+ * and if so we shouldn't check neighboring blocks
+ */
+ if (new_row >= 0 && (new_row & 0x1F) == 0) end_row = 1;
+ if (new_col >= 0 && (new_col & 0x1F) == 0) end_col = 1;
+
+ /* Check if the overlapping block partly overlaps a previous MB
+ * and if so, we're overlapping fewer blocks in this MB.
+ */
+ if (new_row < (mb_row * 16) << 3) end_row = 1;
+ if (new_col < (mb_col * 16) << 3) end_col = 1;
+
+ for (row = 0; row < end_row; ++row) {
+ for (col = 0; col < end_col; ++col) {
+ /* input in Q3, result in Q6 */
+ const int overlap =
+ block_overlap(new_row, new_col, (((first_blk_row + row) * 4) << 3),
+ (((first_blk_col + col) * 4) << 3));
+ assign_overlap(b_ol_ul[row * 4 + col].overlaps, bmi, overlap);
+ }
+ }
+}
+
+static void calculate_overlaps(MB_OVERLAP *overlap_ul, int mb_rows, int mb_cols,
+ union b_mode_info *bmi, int b_row, int b_col) {
+ MB_OVERLAP *mb_overlap;
+ int row, col, rel_row, rel_col;
+ int new_row, new_col;
+ int end_row, end_col;
+ int overlap_b_row, overlap_b_col;
+ int overlap_mb_row, overlap_mb_col;
+
+ /* mb subpixel position */
+ row = (4 * b_row) << 3; /* Q3 */
+ col = (4 * b_col) << 3; /* Q3 */
+
+ /* reverse compensate for motion */
+ new_row = row - bmi->mv.as_mv.row;
+ new_col = col - bmi->mv.as_mv.col;
+
+ if (new_row >= ((16 * mb_rows) << 3) || new_col >= ((16 * mb_cols) << 3)) {
+ /* the new block ended up outside the frame */
+ return;
+ }
+
+ if (new_row <= -32 || new_col <= -32) {
+ /* outside the frame */
+ return;
+ }
+ /* overlapping block's position in blocks */
+ overlap_b_row = FLOOR(new_row / 4, 3) >> 3;
+ overlap_b_col = FLOOR(new_col / 4, 3) >> 3;
+
+ /* overlapping block's MB position in MBs
+ * operations are done in Q3
+ */
+ overlap_mb_row = FLOOR((overlap_b_row << 3) / 4, 3) >> 3;
+ overlap_mb_col = FLOOR((overlap_b_col << 3) / 4, 3) >> 3;
+
+ end_row = VPXMIN(mb_rows - overlap_mb_row, 2);
+ end_col = VPXMIN(mb_cols - overlap_mb_col, 2);
+
+ /* Don't calculate overlap for MBs we don't overlap */
+ /* Check if the new block row starts at the last block row of the MB */
+ if (abs(new_row - ((16 * overlap_mb_row) << 3)) < ((3 * 4) << 3)) end_row = 1;
+ /* Check if the new block col starts at the last block col of the MB */
+ if (abs(new_col - ((16 * overlap_mb_col) << 3)) < ((3 * 4) << 3)) end_col = 1;
+
+ /* find the MB(s) this block is overlapping */
+ for (rel_row = 0; rel_row < end_row; ++rel_row) {
+ for (rel_col = 0; rel_col < end_col; ++rel_col) {
+ if (overlap_mb_row + rel_row < 0 || overlap_mb_col + rel_col < 0)
+ continue;
+ mb_overlap = overlap_ul + (overlap_mb_row + rel_row) * mb_cols +
+ overlap_mb_col + rel_col;
+
+ calculate_overlaps_mb(mb_overlap->overlaps, bmi, new_row, new_col,
+ overlap_mb_row + rel_row, overlap_mb_col + rel_col,
+ overlap_b_row + rel_row, overlap_b_col + rel_col);
+ }
+ }
+}
+
+/* Estimates a motion vector given the overlapping blocks' motion vectors.
+ * Filters out all overlapping blocks which do not refer to the correct
+ * reference frame type.
+ */
+static void estimate_mv(const OVERLAP_NODE *overlaps, union b_mode_info *bmi) {
+ int i;
+ int overlap_sum = 0;
+ int row_acc = 0;
+ int col_acc = 0;
+
+ bmi->mv.as_int = 0;
+ for (i = 0; i < MAX_OVERLAPS; ++i) {
+ if (overlaps[i].bmi == NULL) break;
+ col_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.col;
+ row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row;
+ overlap_sum += overlaps[i].overlap;
+ }
+ if (overlap_sum > 0) {
+ /* Q9 / Q6 = Q3 */
+ bmi->mv.as_mv.col = col_acc / overlap_sum;
+ bmi->mv.as_mv.row = row_acc / overlap_sum;
+ } else {
+ bmi->mv.as_mv.col = 0;
+ bmi->mv.as_mv.row = 0;
+ }
+}
+
+/* Estimates all motion vectors for a macroblock given the lists of
+ * overlaps for each block. Decides whether or not the MVs must be clamped.
+ */
+static void estimate_mb_mvs(const B_OVERLAP *block_overlaps, MODE_INFO *mi,
+ int mb_to_left_edge, int mb_to_right_edge,
+ int mb_to_top_edge, int mb_to_bottom_edge) {
+ int row, col;
+ int non_zero_count = 0;
+ MV *const filtered_mv = &(mi->mbmi.mv.as_mv);
+ union b_mode_info *const bmi = mi->bmi;
+ filtered_mv->col = 0;
+ filtered_mv->row = 0;
+ mi->mbmi.need_to_clamp_mvs = 0;
+ for (row = 0; row < 4; ++row) {
+ int this_b_to_top_edge = mb_to_top_edge + ((row * 4) << 3);
+ int this_b_to_bottom_edge = mb_to_bottom_edge - ((row * 4) << 3);
+ for (col = 0; col < 4; ++col) {
+ int i = row * 4 + col;
+ int this_b_to_left_edge = mb_to_left_edge + ((col * 4) << 3);
+ int this_b_to_right_edge = mb_to_right_edge - ((col * 4) << 3);
+ /* Estimate vectors for all blocks which are overlapped by this */
+ /* type. Interpolate/extrapolate the rest of the block's MVs */
+ estimate_mv(block_overlaps[i].overlaps, &(bmi[i]));
+ mi->mbmi.need_to_clamp_mvs |= vp8_check_mv_bounds(
+ &bmi[i].mv, this_b_to_left_edge, this_b_to_right_edge,
+ this_b_to_top_edge, this_b_to_bottom_edge);
+ if (bmi[i].mv.as_int != 0) {
+ ++non_zero_count;
+ filtered_mv->col += bmi[i].mv.as_mv.col;
+ filtered_mv->row += bmi[i].mv.as_mv.row;
+ }
+ }
+ }
+ if (non_zero_count > 0) {
+ filtered_mv->col /= non_zero_count;
+ filtered_mv->row /= non_zero_count;
+ }
+}
+
+static void calc_prev_mb_overlaps(MB_OVERLAP *overlaps, MODE_INFO *prev_mi,
+ int mb_row, int mb_col, int mb_rows,
+ int mb_cols) {
+ int sub_row;
+ int sub_col;
+ for (sub_row = 0; sub_row < 4; ++sub_row) {
+ for (sub_col = 0; sub_col < 4; ++sub_col) {
+ calculate_overlaps(overlaps, mb_rows, mb_cols,
+ &(prev_mi->bmi[sub_row * 4 + sub_col]),
+ 4 * mb_row + sub_row, 4 * mb_col + sub_col);
+ }
+ }
+}
+
+/* Estimate all missing motion vectors. This function does the same as the one
+ * above, but has different input arguments. */
+static void estimate_missing_mvs(MB_OVERLAP *overlaps, MODE_INFO *mi,
+ MODE_INFO *prev_mi, int mb_rows, int mb_cols,
+ unsigned int first_corrupt) {
+ int mb_row, mb_col;
+ memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols);
+ /* First calculate the overlaps for all blocks */
+ for (mb_row = 0; mb_row < mb_rows; ++mb_row) {
+ for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
+ /* We're only able to use blocks referring to the last frame
+ * when extrapolating new vectors.
+ */
+ if (prev_mi->mbmi.ref_frame == LAST_FRAME) {
+ calc_prev_mb_overlaps(overlaps, prev_mi, mb_row, mb_col, mb_rows,
+ mb_cols);
+ }
+ ++prev_mi;
+ }
+ ++prev_mi;
+ }
+
+ mb_row = first_corrupt / mb_cols;
+ mb_col = first_corrupt - mb_row * mb_cols;
+ mi += mb_row * (mb_cols + 1) + mb_col;
+ /* Go through all macroblocks in the current image with missing MVs
+ * and calculate new MVs using the overlaps.
+ */
+ for (; mb_row < mb_rows; ++mb_row) {
+ int mb_to_top_edge = -((mb_row * 16)) << 3;
+ int mb_to_bottom_edge = ((mb_rows - 1 - mb_row) * 16) << 3;
+ for (; mb_col < mb_cols; ++mb_col) {
+ int mb_to_left_edge = -((mb_col * 16) << 3);
+ int mb_to_right_edge = ((mb_cols - 1 - mb_col) * 16) << 3;
+ const B_OVERLAP *block_overlaps =
+ overlaps[mb_row * mb_cols + mb_col].overlaps;
+ mi->mbmi.ref_frame = LAST_FRAME;
+ mi->mbmi.mode = SPLITMV;
+ mi->mbmi.uv_mode = DC_PRED;
+ mi->mbmi.partitioning = 3;
+ mi->mbmi.segment_id = 0;
+ estimate_mb_mvs(block_overlaps, mi, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
+ ++mi;
+ }
+ mb_col = 0;
+ ++mi;
+ }
+}
+
+void vp8_estimate_missing_mvs(VP8D_COMP *pbi) {
+ VP8_COMMON *const pc = &pbi->common;
+ estimate_missing_mvs(pbi->overlaps, pc->mi, pc->prev_mi, pc->mb_rows,
+ pc->mb_cols, pbi->mvs_corrupt_from_mb);
+}
+
+static void assign_neighbor(EC_BLOCK *neighbor, MODE_INFO *mi, int block_idx) {
+ assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
+ neighbor->ref_frame = mi->mbmi.ref_frame;
+ neighbor->mv = mi->bmi[block_idx].mv.as_mv;
+}
+
+/* Finds the neighboring blocks of a macroblocks. In the general case
+ * 20 blocks are found. If a fewer number of blocks are found due to
+ * image boundaries, those positions in the EC_BLOCK array are left "empty".
+ * The neighbors are enumerated with the upper-left neighbor as the first
+ * element, the second element refers to the neighbor to right of the previous
+ * neighbor, and so on. The last element refers to the neighbor below the first
+ * neighbor.
+ */
+static void find_neighboring_blocks(MODE_INFO *mi, EC_BLOCK *neighbors,
+ int mb_row, int mb_col, int mb_rows,
+ int mb_cols, int mi_stride) {
+ int i = 0;
+ int j;
+ if (mb_row > 0) {
+ /* upper left */
+ if (mb_col > 0) assign_neighbor(&neighbors[i], mi - mi_stride - 1, 15);
+ ++i;
+ /* above */
+ for (j = 12; j < 16; ++j, ++i)
+ assign_neighbor(&neighbors[i], mi - mi_stride, j);
+ } else
+ i += 5;
+ if (mb_col < mb_cols - 1) {
+ /* upper right */
+ if (mb_row > 0) assign_neighbor(&neighbors[i], mi - mi_stride + 1, 12);
+ ++i;
+ /* right */
+ for (j = 0; j <= 12; j += 4, ++i) assign_neighbor(&neighbors[i], mi + 1, j);
+ } else
+ i += 5;
+ if (mb_row < mb_rows - 1) {
+ /* lower right */
+ if (mb_col < mb_cols - 1)
+ assign_neighbor(&neighbors[i], mi + mi_stride + 1, 0);
+ ++i;
+ /* below */
+ for (j = 0; j < 4; ++j, ++i)
+ assign_neighbor(&neighbors[i], mi + mi_stride, j);
+ } else
+ i += 5;
+ if (mb_col > 0) {
+ /* lower left */
+ if (mb_row < mb_rows - 1)
+ assign_neighbor(&neighbors[i], mi + mi_stride - 1, 4);
+ ++i;
+ /* left */
+ for (j = 3; j < 16; j += 4, ++i) {
+ assign_neighbor(&neighbors[i], mi - 1, j);
+ }
+ } else
+ i += 5;
+ assert(i == 20);
+}
+
+/* Interpolates all motion vectors for a macroblock from the neighboring blocks'
+ * motion vectors.
+ */
+static void interpolate_mvs(MACROBLOCKD *mb, EC_BLOCK *neighbors,
+ MV_REFERENCE_FRAME dom_ref_frame) {
+ int row, col, i;
+ MODE_INFO *const mi = mb->mode_info_context;
+ /* Table with the position of the neighboring blocks relative the position
+ * of the upper left block of the current MB. Starting with the upper left
+ * neighbor and going to the right.
+ */
+ const EC_POS neigh_pos[NUM_NEIGHBORS] = {
+ { -1, -1 }, { -1, 0 }, { -1, 1 }, { -1, 2 }, { -1, 3 }, { -1, 4 }, { 0, 4 },
+ { 1, 4 }, { 2, 4 }, { 3, 4 }, { 4, 4 }, { 4, 3 }, { 4, 2 }, { 4, 1 },
+ { 4, 0 }, { 4, -1 }, { 3, -1 }, { 2, -1 }, { 1, -1 }, { 0, -1 }
+ };
+ mi->mbmi.need_to_clamp_mvs = 0;
+ for (row = 0; row < 4; ++row) {
+ int mb_to_top_edge = mb->mb_to_top_edge + ((row * 4) << 3);
+ int mb_to_bottom_edge = mb->mb_to_bottom_edge - ((row * 4) << 3);
+ for (col = 0; col < 4; ++col) {
+ int mb_to_left_edge = mb->mb_to_left_edge + ((col * 4) << 3);
+ int mb_to_right_edge = mb->mb_to_right_edge - ((col * 4) << 3);
+ int w_sum = 0;
+ int mv_row_sum = 0;
+ int mv_col_sum = 0;
+ int_mv *const mv = &(mi->bmi[row * 4 + col].mv);
+ mv->as_int = 0;
+ for (i = 0; i < NUM_NEIGHBORS; ++i) {
+ /* Calculate the weighted sum of neighboring MVs referring
+ * to the dominant frame type.
+ */
+ const int w = weights_q7[abs(row - neigh_pos[i].row)]
+ [abs(col - neigh_pos[i].col)];
+ if (neighbors[i].ref_frame != dom_ref_frame) continue;
+ w_sum += w;
+ /* Q7 * Q3 = Q10 */
+ mv_row_sum += w * neighbors[i].mv.row;
+ mv_col_sum += w * neighbors[i].mv.col;
+ }
+ if (w_sum > 0) {
+ /* Avoid division by zero.
+ * Normalize with the sum of the coefficients
+ * Q3 = Q10 / Q7
+ */
+ mv->as_mv.row = mv_row_sum / w_sum;
+ mv->as_mv.col = mv_col_sum / w_sum;
+ mi->mbmi.need_to_clamp_mvs |=
+ vp8_check_mv_bounds(mv, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
+ }
+ }
+ }
+}
+
+void vp8_interpolate_motion(MACROBLOCKD *mb, int mb_row, int mb_col,
+ int mb_rows, int mb_cols) {
+ /* Find relevant neighboring blocks */
+ EC_BLOCK neighbors[NUM_NEIGHBORS];
+ int i;
+ /* Initialize the array. MAX_REF_FRAMES is interpreted as "doesn't exist" */
+ for (i = 0; i < NUM_NEIGHBORS; ++i) {
+ neighbors[i].ref_frame = MAX_REF_FRAMES;
+ neighbors[i].mv.row = neighbors[i].mv.col = 0;
+ }
+ find_neighboring_blocks(mb->mode_info_context, neighbors, mb_row, mb_col,
+ mb_rows, mb_cols, mb->mode_info_stride);
+ /* Interpolate MVs for the missing blocks from the surrounding
+ * blocks which refer to the last frame. */
+ interpolate_mvs(mb, neighbors, LAST_FRAME);
+
+ mb->mode_info_context->mbmi.ref_frame = LAST_FRAME;
+ mb->mode_info_context->mbmi.mode = SPLITMV;
+ mb->mode_info_context->mbmi.uv_mode = DC_PRED;
+ mb->mode_info_context->mbmi.partitioning = 3;
+ mb->mode_info_context->mbmi.segment_id = 0;
+}
diff --git a/media/libvpx/libvpx/vp8/decoder/error_concealment.h b/media/libvpx/libvpx/vp8/decoder/error_concealment.h
new file mode 100644
index 0000000000..608a79f189
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/error_concealment.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_DECODER_ERROR_CONCEALMENT_H_
+#define VPX_VP8_DECODER_ERROR_CONCEALMENT_H_
+
+#include "onyxd_int.h"
+#include "ec_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Allocate memory for the overlap lists */
+int vp8_alloc_overlap_lists(VP8D_COMP *pbi);
+
+/* Deallocate the overlap lists */
+void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi);
+
+/* Estimate all missing motion vectors. */
+void vp8_estimate_missing_mvs(VP8D_COMP *pbi);
+
+/* Functions for spatial MV interpolation */
+
+/* Interpolates all motion vectors for a macroblock mb at position
+ * (mb_row, mb_col). */
+void vp8_interpolate_motion(MACROBLOCKD *mb, int mb_row, int mb_col,
+ int mb_rows, int mb_cols);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_DECODER_ERROR_CONCEALMENT_H_
diff --git a/media/libvpx/libvpx/vp8/decoder/onyxd_if.c b/media/libvpx/libvpx/vp8/decoder/onyxd_if.c
new file mode 100644
index 0000000000..765d2ec83e
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/onyxd_if.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/common/onyxc_int.h"
+#if CONFIG_POSTPROC
+#include "vp8/common/postproc.h"
+#endif
+#include "vp8/common/onyxd.h"
+#include "onyxd_int.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/alloccommon.h"
+#include "vp8/common/common.h"
+#include "vp8/common/loopfilter.h"
+#include "vp8/common/swapyv12buffer.h"
+#include "vp8/common/threading.h"
+#include "decoderthreading.h"
+#include <stdio.h>
+#include <assert.h>
+
+#include "vp8/common/quant_common.h"
+#include "vp8/common/reconintra.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp8/common/systemdependent.h"
+#include "vpx_ports/system_state.h"
+#include "vpx_ports/vpx_once.h"
+#include "vpx_ports/vpx_timer.h"
+#include "detokenize.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
+#if VPX_ARCH_ARM
+#include "vpx_ports/arm.h"
+#endif
+
+extern void vp8_init_loop_filter(VP8_COMMON *cm);
+static int get_free_fb(VP8_COMMON *cm);
+static void ref_cnt_fb(int *buf, int *idx, int new_idx);
+
+static void initialize_dec(void) {
+ static volatile int init_done = 0;
+
+ if (!init_done) {
+ vpx_dsp_rtcd();
+ vp8_init_intra_predictors();
+ init_done = 1;
+ }
+}
+
+static void remove_decompressor(VP8D_COMP *pbi) {
+#if CONFIG_ERROR_CONCEALMENT
+ vp8_de_alloc_overlap_lists(pbi);
+#endif
+ vp8_remove_common(&pbi->common);
+ vpx_free(pbi);
+}
+
+static struct VP8D_COMP *create_decompressor(VP8D_CONFIG *oxcf) {
+ VP8D_COMP *pbi = vpx_memalign(32, sizeof(VP8D_COMP));
+
+ if (!pbi) return NULL;
+
+ memset(pbi, 0, sizeof(VP8D_COMP));
+
+ if (setjmp(pbi->common.error.jmp)) {
+ pbi->common.error.setjmp = 0;
+ remove_decompressor(pbi);
+ return 0;
+ }
+
+ pbi->common.error.setjmp = 1;
+
+ vp8_create_common(&pbi->common);
+
+ pbi->common.current_video_frame = 0;
+ pbi->ready_for_new_data = 1;
+
+ /* vp8cx_init_de_quantizer() is first called here. Add check in
+ * frame_init_dequantizer() to avoid
+ * unnecessary calling of vp8cx_init_de_quantizer() for every frame.
+ */
+ vp8cx_init_de_quantizer(pbi);
+
+ vp8_loop_filter_init(&pbi->common);
+
+ pbi->common.error.setjmp = 0;
+
+#if CONFIG_ERROR_CONCEALMENT
+ pbi->ec_enabled = oxcf->error_concealment;
+ pbi->overlaps = NULL;
+#else
+ (void)oxcf;
+ pbi->ec_enabled = 0;
+#endif
+ /* Error concealment is activated after a key frame has been
+ * decoded without errors when error concealment is enabled.
+ */
+ pbi->ec_active = 0;
+
+ pbi->decoded_key_frame = 0;
+
+ /* Independent partitions is activated when a frame updates the
+ * token probability table to have equal probabilities over the
+ * PREV_COEF context.
+ */
+ pbi->independent_partitions = 0;
+
+ vp8_setup_block_dptrs(&pbi->mb);
+
+ once(initialize_dec);
+
+ return pbi;
+}
+
+vpx_codec_err_t vp8dx_get_reference(VP8D_COMP *pbi,
+ enum vpx_ref_frame_type ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP8_COMMON *cm = &pbi->common;
+ int ref_fb_idx;
+
+ if (ref_frame_flag == VP8_LAST_FRAME) {
+ ref_fb_idx = cm->lst_fb_idx;
+ } else if (ref_frame_flag == VP8_GOLD_FRAME) {
+ ref_fb_idx = cm->gld_fb_idx;
+ } else if (ref_frame_flag == VP8_ALTR_FRAME) {
+ ref_fb_idx = cm->alt_fb_idx;
+ } else {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Invalid reference frame");
+ return pbi->common.error.error_code;
+ }
+
+ if (cm->yv12_fb[ref_fb_idx].y_height != sd->y_height ||
+ cm->yv12_fb[ref_fb_idx].y_width != sd->y_width ||
+ cm->yv12_fb[ref_fb_idx].uv_height != sd->uv_height ||
+ cm->yv12_fb[ref_fb_idx].uv_width != sd->uv_width) {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ } else
+ vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
+
+ return pbi->common.error.error_code;
+}
+
+vpx_codec_err_t vp8dx_set_reference(VP8D_COMP *pbi,
+ enum vpx_ref_frame_type ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP8_COMMON *cm = &pbi->common;
+ int *ref_fb_ptr = NULL;
+ int free_fb;
+
+ if (ref_frame_flag == VP8_LAST_FRAME) {
+ ref_fb_ptr = &cm->lst_fb_idx;
+ } else if (ref_frame_flag == VP8_GOLD_FRAME) {
+ ref_fb_ptr = &cm->gld_fb_idx;
+ } else if (ref_frame_flag == VP8_ALTR_FRAME) {
+ ref_fb_ptr = &cm->alt_fb_idx;
+ } else {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Invalid reference frame");
+ return pbi->common.error.error_code;
+ }
+
+ if (cm->yv12_fb[*ref_fb_ptr].y_height != sd->y_height ||
+ cm->yv12_fb[*ref_fb_ptr].y_width != sd->y_width ||
+ cm->yv12_fb[*ref_fb_ptr].uv_height != sd->uv_height ||
+ cm->yv12_fb[*ref_fb_ptr].uv_width != sd->uv_width) {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ } else {
+ /* Find an empty frame buffer. */
+ free_fb = get_free_fb(cm);
+ /* Decrease fb_idx_ref_cnt since it will be increased again in
+ * ref_cnt_fb() below. */
+ cm->fb_idx_ref_cnt[free_fb]--;
+
+ /* Manage the reference counters and copy image. */
+ ref_cnt_fb(cm->fb_idx_ref_cnt, ref_fb_ptr, free_fb);
+ vp8_yv12_copy_frame(sd, &cm->yv12_fb[*ref_fb_ptr]);
+ }
+
+ return pbi->common.error.error_code;
+}
+
+static int get_free_fb(VP8_COMMON *cm) {
+ int i;
+ for (i = 0; i < NUM_YV12_BUFFERS; ++i) {
+ if (cm->fb_idx_ref_cnt[i] == 0) break;
+ }
+
+ assert(i < NUM_YV12_BUFFERS);
+ cm->fb_idx_ref_cnt[i] = 1;
+ return i;
+}
+
+static void ref_cnt_fb(int *buf, int *idx, int new_idx) {
+ if (buf[*idx] > 0) buf[*idx]--;
+
+ *idx = new_idx;
+
+ buf[new_idx]++;
+}
+
+/* If any buffer copy / swapping is signalled it should be done here. */
+static int swap_frame_buffers(VP8_COMMON *cm) {
+ int err = 0;
+
+ /* The alternate reference frame or golden frame can be updated
+ * using the new, last, or golden/alt ref frame. If it
+ * is updated using the newly decoded frame it is a refresh.
+ * An update using the last or golden/alt ref frame is a copy.
+ */
+ if (cm->copy_buffer_to_arf) {
+ int new_fb = 0;
+
+ if (cm->copy_buffer_to_arf == 1) {
+ new_fb = cm->lst_fb_idx;
+ } else if (cm->copy_buffer_to_arf == 2) {
+ new_fb = cm->gld_fb_idx;
+ } else {
+ err = -1;
+ }
+
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->alt_fb_idx, new_fb);
+ }
+
+ if (cm->copy_buffer_to_gf) {
+ int new_fb = 0;
+
+ if (cm->copy_buffer_to_gf == 1) {
+ new_fb = cm->lst_fb_idx;
+ } else if (cm->copy_buffer_to_gf == 2) {
+ new_fb = cm->alt_fb_idx;
+ } else {
+ err = -1;
+ }
+
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->gld_fb_idx, new_fb);
+ }
+
+ if (cm->refresh_golden_frame) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->gld_fb_idx, cm->new_fb_idx);
+ }
+
+ if (cm->refresh_alt_ref_frame) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->alt_fb_idx, cm->new_fb_idx);
+ }
+
+ if (cm->refresh_last_frame) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->lst_fb_idx, cm->new_fb_idx);
+
+ cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx];
+ } else {
+ cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
+ }
+
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+
+ return err;
+}
+
+static int check_fragments_for_errors(VP8D_COMP *pbi) {
+ if (!pbi->ec_active && pbi->fragments.count <= 1 &&
+ pbi->fragments.sizes[0] == 0) {
+ VP8_COMMON *cm = &pbi->common;
+
+ /* If error concealment is disabled we won't signal missing frames
+ * to the decoder.
+ */
+ if (cm->fb_idx_ref_cnt[cm->lst_fb_idx] > 1) {
+ /* The last reference shares buffer with another reference
+ * buffer. Move it to its own buffer before setting it as
+ * corrupt, otherwise we will make multiple buffers corrupt.
+ */
+ const int prev_idx = cm->lst_fb_idx;
+ cm->fb_idx_ref_cnt[prev_idx]--;
+ cm->lst_fb_idx = get_free_fb(cm);
+ vp8_yv12_copy_frame(&cm->yv12_fb[prev_idx], &cm->yv12_fb[cm->lst_fb_idx]);
+ }
+ /* This is used to signal that we are missing frames.
+ * We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ */
+ cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
+
+ /* Signal that we have no frame to show. */
+ cm->show_frame = 0;
+
+ /* Nothing more to do. */
+ return 0;
+ }
+
+ return 1;
+}
+
+int vp8dx_receive_compressed_data(VP8D_COMP *pbi, int64_t time_stamp) {
+ VP8_COMMON *cm = &pbi->common;
+ int retcode = -1;
+
+ pbi->common.error.error_code = VPX_CODEC_OK;
+
+ retcode = check_fragments_for_errors(pbi);
+ if (retcode <= 0) return retcode;
+
+ cm->new_fb_idx = get_free_fb(cm);
+
+ /* setup reference frames for vp8_decode_frame */
+ pbi->dec_fb_ref[INTRA_FRAME] = &cm->yv12_fb[cm->new_fb_idx];
+ pbi->dec_fb_ref[LAST_FRAME] = &cm->yv12_fb[cm->lst_fb_idx];
+ pbi->dec_fb_ref[GOLDEN_FRAME] = &cm->yv12_fb[cm->gld_fb_idx];
+ pbi->dec_fb_ref[ALTREF_FRAME] = &cm->yv12_fb[cm->alt_fb_idx];
+
+ retcode = vp8_decode_frame(pbi);
+
+ if (retcode < 0) {
+ if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) {
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+ }
+
+ pbi->common.error.error_code = VPX_CODEC_ERROR;
+ // Propagate the error info.
+ if (pbi->mb.error_info.error_code != 0) {
+ pbi->common.error.error_code = pbi->mb.error_info.error_code;
+ memcpy(pbi->common.error.detail, pbi->mb.error_info.detail,
+ sizeof(pbi->mb.error_info.detail));
+ }
+ goto decode_exit;
+ }
+
+ if (swap_frame_buffers(cm)) {
+ pbi->common.error.error_code = VPX_CODEC_ERROR;
+ goto decode_exit;
+ }
+
+ vpx_clear_system_state();
+
+ if (cm->show_frame) {
+ cm->current_video_frame++;
+ cm->show_frame_mi = cm->mi;
+ }
+
+#if CONFIG_ERROR_CONCEALMENT
+ /* swap the mode infos to storage for future error concealment */
+ if (pbi->ec_enabled && pbi->common.prev_mi) {
+ MODE_INFO *tmp = pbi->common.prev_mi;
+ int row, col;
+ pbi->common.prev_mi = pbi->common.mi;
+ pbi->common.mi = tmp;
+
+ /* Propagate the segment_ids to the next frame */
+ for (row = 0; row < pbi->common.mb_rows; ++row) {
+ for (col = 0; col < pbi->common.mb_cols; ++col) {
+ const int i = row * pbi->common.mode_info_stride + col;
+ pbi->common.mi[i].mbmi.segment_id =
+ pbi->common.prev_mi[i].mbmi.segment_id;
+ }
+ }
+ }
+#endif
+
+ pbi->ready_for_new_data = 0;
+ pbi->last_time_stamp = time_stamp;
+
+decode_exit:
+ vpx_clear_system_state();
+ return retcode;
+}
+int vp8dx_get_raw_frame(VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd,
+ int64_t *time_stamp, int64_t *time_end_stamp,
+ vp8_ppflags_t *flags) {
+ int ret = -1;
+
+ if (pbi->ready_for_new_data == 1) return ret;
+
+ /* ie no raw frame to show!!! */
+ if (pbi->common.show_frame == 0) return ret;
+
+ pbi->ready_for_new_data = 1;
+ *time_stamp = pbi->last_time_stamp;
+ *time_end_stamp = 0;
+
+#if CONFIG_POSTPROC
+ ret = vp8_post_proc_frame(&pbi->common, sd, flags);
+#else
+ (void)flags;
+
+ if (pbi->common.frame_to_show) {
+ *sd = *pbi->common.frame_to_show;
+ sd->y_width = pbi->common.Width;
+ sd->y_height = pbi->common.Height;
+ sd->uv_height = pbi->common.Height / 2;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+#endif /*!CONFIG_POSTPROC*/
+ vpx_clear_system_state();
+ return ret;
+}
+
+/* This function as written isn't decoder specific, but the encoder has
+ * much faster ways of computing this, so it's ok for it to live in a
+ * decode specific file.
+ */
+int vp8dx_references_buffer(VP8_COMMON *oci, int ref_frame) {
+ const MODE_INFO *mi = oci->mi;
+ int mb_row, mb_col;
+
+ for (mb_row = 0; mb_row < oci->mb_rows; ++mb_row) {
+ for (mb_col = 0; mb_col < oci->mb_cols; mb_col++, mi++) {
+ if (mi->mbmi.ref_frame == ref_frame) return 1;
+ }
+ mi++;
+ }
+ return 0;
+}
+
+int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf) {
+ /* decoder instance for single thread mode */
+ fb->pbi[0] = create_decompressor(oxcf);
+ if (!fb->pbi[0]) return VPX_CODEC_ERROR;
+
+#if CONFIG_MULTITHREAD
+ if (setjmp(fb->pbi[0]->common.error.jmp)) {
+ vp8_remove_decoder_instances(fb);
+ vp8_zero(fb->pbi);
+ vpx_clear_system_state();
+ return VPX_CODEC_ERROR;
+ }
+
+ fb->pbi[0]->common.error.setjmp = 1;
+ fb->pbi[0]->max_threads = oxcf->max_threads;
+ vp8_decoder_create_threads(fb->pbi[0]);
+ fb->pbi[0]->common.error.setjmp = 0;
+#endif
+ return VPX_CODEC_OK;
+}
+
+int vp8_remove_decoder_instances(struct frame_buffers *fb) {
+ VP8D_COMP *pbi = fb->pbi[0];
+
+ if (!pbi) return VPX_CODEC_ERROR;
+#if CONFIG_MULTITHREAD
+ vp8_decoder_remove_threads(pbi);
+#endif
+
+ /* decoder instance for single thread mode */
+ remove_decompressor(pbi);
+ return VPX_CODEC_OK;
+}
+
+int vp8dx_get_quantizer(const VP8D_COMP *pbi) {
+ return pbi->common.base_qindex;
+}
diff --git a/media/libvpx/libvpx/vp8/decoder/onyxd_int.h b/media/libvpx/libvpx/vp8/decoder/onyxd_int.h
new file mode 100644
index 0000000000..56500a8506
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/onyxd_int.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_DECODER_ONYXD_INT_H_
+#define VPX_VP8_DECODER_ONYXD_INT_H_
+
+#include <assert.h>
+
+#include "vpx_config.h"
+#include "vp8/common/onyxd.h"
+#include "treereader.h"
+#include "vp8/common/onyxc_int.h"
+#include "vp8/common/threading.h"
+
+#if CONFIG_ERROR_CONCEALMENT
+#include "ec_types.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ int ithread;
+ void *ptr1;
+ void *ptr2;
+} DECODETHREAD_DATA;
+
+typedef struct {
+ MACROBLOCKD mbd;
+} MB_ROW_DEC;
+
+typedef struct {
+ int enabled;
+ unsigned int count;
+ const unsigned char *ptrs[MAX_PARTITIONS];
+ unsigned int sizes[MAX_PARTITIONS];
+} FRAGMENT_DATA;
+
+#define MAX_FB_MT_DEC 32
+
+struct frame_buffers {
+ /*
+ * this struct will be populated with frame buffer management
+ * info in future commits. */
+
+ /* decoder instances */
+ struct VP8D_COMP *pbi[MAX_FB_MT_DEC];
+};
+
+typedef struct VP8D_COMP {
+ DECLARE_ALIGNED(16, MACROBLOCKD, mb);
+
+ YV12_BUFFER_CONFIG *dec_fb_ref[NUM_YV12_BUFFERS];
+
+ DECLARE_ALIGNED(16, VP8_COMMON, common);
+
+ /* the last partition will be used for the modes/mvs */
+ vp8_reader mbc[MAX_PARTITIONS];
+
+ VP8D_CONFIG oxcf;
+
+ FRAGMENT_DATA fragments;
+
+#if CONFIG_MULTITHREAD
+ /* variable for threading */
+
+ vpx_atomic_int b_multithreaded_rd;
+ int max_threads;
+ int current_mb_col_main;
+ unsigned int decoding_thread_count;
+ int allocated_decoding_thread_count;
+
+ int mt_baseline_filter_level[MAX_MB_SEGMENTS];
+ int sync_range;
+ /* Each row remembers its already decoded column. */
+ vpx_atomic_int *mt_current_mb_col;
+
+ unsigned char **mt_yabove_row; /* mb_rows x width */
+ unsigned char **mt_uabove_row;
+ unsigned char **mt_vabove_row;
+ unsigned char **mt_yleft_col; /* mb_rows x 16 */
+ unsigned char **mt_uleft_col; /* mb_rows x 8 */
+ unsigned char **mt_vleft_col; /* mb_rows x 8 */
+
+ MB_ROW_DEC *mb_row_di;
+ DECODETHREAD_DATA *de_thread_data;
+
+ pthread_t *h_decoding_thread;
+ sem_t *h_event_start_decoding;
+ sem_t h_event_end_decoding;
+/* end of threading data */
+#endif
+
+ int64_t last_time_stamp;
+ int ready_for_new_data;
+
+ vp8_prob prob_intra;
+ vp8_prob prob_last;
+ vp8_prob prob_gf;
+ vp8_prob prob_skip_false;
+
+#if CONFIG_ERROR_CONCEALMENT
+ MB_OVERLAP *overlaps;
+ /* the mb num from which modes and mvs (first partition) are corrupt */
+ unsigned int mvs_corrupt_from_mb;
+#endif
+ int ec_enabled;
+ int ec_active;
+ int decoded_key_frame;
+ int independent_partitions;
+ int frame_corrupt_residual;
+
+ vpx_decrypt_cb decrypt_cb;
+ void *decrypt_state;
+#if CONFIG_MULTITHREAD
+ // Restart threads on next frame if set to 1.
+ // This is set when error happens in multithreaded decoding and all threads
+ // are shut down.
+ int restart_threads;
+#endif
+} VP8D_COMP;
+
+void vp8cx_init_de_quantizer(VP8D_COMP *pbi);
+void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
+int vp8_decode_frame(VP8D_COMP *pbi);
+
+int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf);
+int vp8_remove_decoder_instances(struct frame_buffers *fb);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_DECODER_ONYXD_INT_H_
diff --git a/media/libvpx/libvpx/vp8/decoder/threading.c b/media/libvpx/libvpx/vp8/decoder/threading.c
new file mode 100644
index 0000000000..9ea6a4f34a
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/threading.c
@@ -0,0 +1,907 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#if !defined(_WIN32) && CONFIG_OS_SUPPORT == 1
+#include <unistd.h>
+#endif
+#include "onyxd_int.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/common.h"
+#include "vp8/common/threading.h"
+#include "vp8/common/loopfilter.h"
+#include "vp8/common/extend.h"
+#include "vpx_ports/vpx_timer.h"
+#include "decoderthreading.h"
+#include "detokenize.h"
+#include "vp8/common/reconintra4x4.h"
+#include "vp8/common/reconinter.h"
+#include "vp8/common/reconintra.h"
+#include "vp8/common/setupintrarecon.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
+
+#define CALLOC_ARRAY(p, n) \
+ CHECK_MEM_ERROR(&pbi->common.error, (p), vpx_calloc(sizeof(*(p)), (n)))
+#define CALLOC_ARRAY_ALIGNED(p, n, algn) \
+ do { \
+ CHECK_MEM_ERROR(&pbi->common.error, (p), \
+ vpx_memalign((algn), sizeof(*(p)) * (n))); \
+ memset((p), 0, (n) * sizeof(*(p))); \
+ } while (0)
+
+static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd,
+ MB_ROW_DEC *mbrd, int count) {
+ VP8_COMMON *const pc = &pbi->common;
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ MACROBLOCKD *mbd = &mbrd[i].mbd;
+ mbd->subpixel_predict = xd->subpixel_predict;
+ mbd->subpixel_predict8x4 = xd->subpixel_predict8x4;
+ mbd->subpixel_predict8x8 = xd->subpixel_predict8x8;
+ mbd->subpixel_predict16x16 = xd->subpixel_predict16x16;
+
+ mbd->frame_type = pc->frame_type;
+ mbd->pre = xd->pre;
+ mbd->dst = xd->dst;
+
+ mbd->segmentation_enabled = xd->segmentation_enabled;
+ mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
+ memcpy(mbd->segment_feature_data, xd->segment_feature_data,
+ sizeof(xd->segment_feature_data));
+
+ /*signed char ref_lf_deltas[MAX_REF_LF_DELTAS];*/
+ memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas));
+ /*signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];*/
+ memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas));
+ /*unsigned char mode_ref_lf_delta_enabled;
+ unsigned char mode_ref_lf_delta_update;*/
+ mbd->mode_ref_lf_delta_enabled = xd->mode_ref_lf_delta_enabled;
+ mbd->mode_ref_lf_delta_update = xd->mode_ref_lf_delta_update;
+
+ mbd->current_bc = &pbi->mbc[0];
+
+ memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
+ memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
+ memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
+ memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
+
+ mbd->fullpixel_mask = ~0;
+
+ if (pc->full_pixel) mbd->fullpixel_mask = ~7;
+ }
+
+ for (i = 0; i < pc->mb_rows; ++i)
+ vpx_atomic_store_release(&pbi->mt_current_mb_col[i], -1);
+}
+
+static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
+ unsigned int mb_idx) {
+ MB_PREDICTION_MODE mode;
+ int i;
+#if CONFIG_ERROR_CONCEALMENT
+ int corruption_detected = 0;
+#else
+ (void)mb_idx;
+#endif
+
+ if (xd->mode_info_context->mbmi.mb_skip_coeff) {
+ vp8_reset_mb_tokens_context(xd);
+ } else if (!vp8dx_bool_error(xd->current_bc)) {
+ int eobtotal;
+ eobtotal = vp8_decode_mb_tokens(pbi, xd);
+
+ /* Special case: Force the loopfilter to skip when eobtotal is zero */
+ xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal == 0);
+ }
+
+ mode = xd->mode_info_context->mbmi.mode;
+
+ if (xd->segmentation_enabled) vp8_mb_init_dequantizer(pbi, xd);
+
+#if CONFIG_ERROR_CONCEALMENT
+
+ if (pbi->ec_active) {
+ int throw_residual;
+ /* When we have independent partitions we can apply residual even
+ * though other partitions within the frame are corrupt.
+ */
+ throw_residual =
+ (!pbi->independent_partitions && pbi->frame_corrupt_residual);
+ throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
+
+ if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) {
+ /* MB with corrupt residuals or corrupt mode/motion vectors.
+ * Better to use the predictor as reconstruction.
+ */
+ pbi->frame_corrupt_residual = 1;
+ memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
+
+ corruption_detected = 1;
+
+ /* force idct to be skipped for B_PRED and use the
+ * prediction only for reconstruction
+ * */
+ memset(xd->eobs, 0, 25);
+ }
+ }
+#endif
+
+ /* do prediction */
+ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ vp8_build_intra_predictors_mbuv_s(
+ xd, xd->recon_above[1], xd->recon_above[2], xd->recon_left[1],
+ xd->recon_left[2], xd->recon_left_stride[1], xd->dst.u_buffer,
+ xd->dst.v_buffer, xd->dst.uv_stride);
+
+ if (mode != B_PRED) {
+ vp8_build_intra_predictors_mby_s(
+ xd, xd->recon_above[0], xd->recon_left[0], xd->recon_left_stride[0],
+ xd->dst.y_buffer, xd->dst.y_stride);
+ } else {
+ short *DQC = xd->dequant_y1;
+ int dst_stride = xd->dst.y_stride;
+
+ /* clear out residual eob info */
+ if (xd->mode_info_context->mbmi.mb_skip_coeff) memset(xd->eobs, 0, 25);
+
+ intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
+
+ for (i = 0; i < 16; ++i) {
+ BLOCKD *b = &xd->block[i];
+ unsigned char *dst = xd->dst.y_buffer + b->offset;
+ B_PREDICTION_MODE b_mode = xd->mode_info_context->bmi[i].as_mode;
+ unsigned char *Above;
+ unsigned char *yleft;
+ int left_stride;
+ unsigned char top_left;
+
+ /*Caution: For some b_mode, it needs 8 pixels (4 above + 4
+ * above-right).*/
+ if (i < 4 && pbi->common.filter_level) {
+ Above = xd->recon_above[0] + b->offset;
+ } else {
+ Above = dst - dst_stride;
+ }
+
+ if (i % 4 == 0 && pbi->common.filter_level) {
+ yleft = xd->recon_left[0] + i;
+ left_stride = 1;
+ } else {
+ yleft = dst - 1;
+ left_stride = dst_stride;
+ }
+
+ if ((i == 4 || i == 8 || i == 12) && pbi->common.filter_level) {
+ top_left = *(xd->recon_left[0] + i - 1);
+ } else {
+ top_left = Above[-1];
+ }
+
+ vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, dst, dst_stride,
+ top_left);
+
+ if (xd->eobs[i]) {
+ if (xd->eobs[i] > 1) {
+ vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride);
+ } else {
+ vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0], dst, dst_stride, dst,
+ dst_stride);
+ memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+ }
+ }
+ }
+ }
+ } else {
+ vp8_build_inter_predictors_mb(xd);
+ }
+
+#if CONFIG_ERROR_CONCEALMENT
+ if (corruption_detected) {
+ return;
+ }
+#endif
+
+ if (!xd->mode_info_context->mbmi.mb_skip_coeff) {
+ /* dequantization and idct */
+ if (mode != B_PRED) {
+ short *DQC = xd->dequant_y1;
+
+ if (mode != SPLITMV) {
+ BLOCKD *b = &xd->block[24];
+
+ /* do 2nd order transform on the dc block */
+ if (xd->eobs[24] > 1) {
+ vp8_dequantize_b(b, xd->dequant_y2);
+
+ vp8_short_inv_walsh4x4(&b->dqcoeff[0], xd->qcoeff);
+ memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
+ } else {
+ b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
+ vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], xd->qcoeff);
+ memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+ }
+
+ /* override the dc dequant constant in order to preserve the
+ * dc components
+ */
+ DQC = xd->dequant_y1_dc;
+ }
+
+ vp8_dequant_idct_add_y_block(xd->qcoeff, DQC, xd->dst.y_buffer,
+ xd->dst.y_stride, xd->eobs);
+ }
+
+ vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
+ xd->dst.u_buffer, xd->dst.v_buffer,
+ xd->dst.uv_stride, xd->eobs + 16);
+ }
+}
+
+static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
+ int start_mb_row) {
+ const vpx_atomic_int *last_row_current_mb_col;
+ vpx_atomic_int *current_mb_col;
+ int mb_row;
+ VP8_COMMON *pc = &pbi->common;
+ const int nsync = pbi->sync_range;
+ const vpx_atomic_int first_row_no_sync_above =
+ VPX_ATOMIC_INIT(pc->mb_cols + nsync);
+ int num_part = 1 << pbi->common.multi_token_partition;
+ int last_mb_row = start_mb_row;
+
+ YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];
+ YV12_BUFFER_CONFIG *yv12_fb_lst = pbi->dec_fb_ref[LAST_FRAME];
+
+ int recon_y_stride = yv12_fb_new->y_stride;
+ int recon_uv_stride = yv12_fb_new->uv_stride;
+
+ unsigned char *ref_buffer[MAX_REF_FRAMES][3];
+ unsigned char *dst_buffer[3];
+ int i;
+ int ref_fb_corrupted[MAX_REF_FRAMES];
+
+ ref_fb_corrupted[INTRA_FRAME] = 0;
+
+ for (i = 1; i < MAX_REF_FRAMES; ++i) {
+ YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i];
+
+ ref_buffer[i][0] = this_fb->y_buffer;
+ ref_buffer[i][1] = this_fb->u_buffer;
+ ref_buffer[i][2] = this_fb->v_buffer;
+
+ ref_fb_corrupted[i] = this_fb->corrupted;
+ }
+
+ dst_buffer[0] = yv12_fb_new->y_buffer;
+ dst_buffer[1] = yv12_fb_new->u_buffer;
+ dst_buffer[2] = yv12_fb_new->v_buffer;
+
+ xd->up_available = (start_mb_row != 0);
+
+ xd->mode_info_context = pc->mi + pc->mode_info_stride * start_mb_row;
+ xd->mode_info_stride = pc->mode_info_stride;
+
+ for (mb_row = start_mb_row; mb_row < pc->mb_rows;
+ mb_row += (pbi->decoding_thread_count + 1)) {
+ int recon_yoffset, recon_uvoffset;
+ int mb_col;
+ int filter_level;
+ loop_filter_info_n *lfi_n = &pc->lf_info;
+
+ /* save last row processed by this thread */
+ last_mb_row = mb_row;
+ /* select bool coder for current partition */
+ xd->current_bc = &pbi->mbc[mb_row % num_part];
+
+ if (mb_row > 0) {
+ last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row - 1];
+ } else {
+ last_row_current_mb_col = &first_row_no_sync_above;
+ }
+
+ current_mb_col = &pbi->mt_current_mb_col[mb_row];
+
+ recon_yoffset = mb_row * recon_y_stride * 16;
+ recon_uvoffset = mb_row * recon_uv_stride * 8;
+
+ /* reset contexts */
+ xd->above_context = pc->above_context;
+ memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ xd->left_available = 0;
+
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
+
+ if (pbi->common.filter_level) {
+ xd->recon_above[0] = pbi->mt_yabove_row[mb_row] + 0 * 16 + 32;
+ xd->recon_above[1] = pbi->mt_uabove_row[mb_row] + 0 * 8 + 16;
+ xd->recon_above[2] = pbi->mt_vabove_row[mb_row] + 0 * 8 + 16;
+
+ xd->recon_left[0] = pbi->mt_yleft_col[mb_row];
+ xd->recon_left[1] = pbi->mt_uleft_col[mb_row];
+ xd->recon_left[2] = pbi->mt_vleft_col[mb_row];
+
+ /* TODO: move to outside row loop */
+ xd->recon_left_stride[0] = 1;
+ xd->recon_left_stride[1] = 1;
+ } else {
+ xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
+ xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
+ xd->recon_above[2] = dst_buffer[2] + recon_uvoffset;
+
+ xd->recon_left[0] = xd->recon_above[0] - 1;
+ xd->recon_left[1] = xd->recon_above[1] - 1;
+ xd->recon_left[2] = xd->recon_above[2] - 1;
+
+ xd->recon_above[0] -= xd->dst.y_stride;
+ xd->recon_above[1] -= xd->dst.uv_stride;
+ xd->recon_above[2] -= xd->dst.uv_stride;
+
+ /* TODO: move to outside row loop */
+ xd->recon_left_stride[0] = xd->dst.y_stride;
+ xd->recon_left_stride[1] = xd->dst.uv_stride;
+
+ setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1],
+ xd->recon_left[2], xd->dst.y_stride,
+ xd->dst.uv_stride);
+ }
+
+ for (mb_col = 0; mb_col < pc->mb_cols; ++mb_col) {
+ if (((mb_col - 1) % nsync) == 0) {
+ vpx_atomic_store_release(current_mb_col, mb_col - 1);
+ }
+
+ if (mb_row && !(mb_col & (nsync - 1))) {
+ vp8_atomic_spin_wait(mb_col, last_row_current_mb_col, nsync);
+ }
+
+ /* Distance of MB to the various image edges.
+ * These are specified to 8th pel as they are always
+ * compared to values that are in 1/8th pel units.
+ */
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
+
+#if CONFIG_ERROR_CONCEALMENT
+ {
+ int corrupt_residual =
+ (!pbi->independent_partitions && pbi->frame_corrupt_residual) ||
+ vp8dx_bool_error(xd->current_bc);
+ if (pbi->ec_active &&
+ (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
+ corrupt_residual) {
+ /* We have an intra block with corrupt
+ * coefficients, better to conceal with an inter
+ * block.
+ * Interpolate MVs from neighboring MBs
+ *
+ * Note that for the first mb with corrupt
+ * residual in a frame, we might not discover
+ * that before decoding the residual. That
+ * happens after this check, and therefore no
+ * inter concealment will be done.
+ */
+ vp8_interpolate_motion(xd, mb_row, mb_col, pc->mb_rows, pc->mb_cols);
+ }
+ }
+#endif
+
+ xd->dst.y_buffer = dst_buffer[0] + recon_yoffset;
+ xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset;
+ xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset;
+
+ /* propagate errors from reference frames */
+ xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];
+
+ if (xd->corrupted) {
+ // Move current decoding marcoblock to the end of row for all rows
+ // assigned to this thread, such that other threads won't be waiting.
+ for (; mb_row < pc->mb_rows;
+ mb_row += (pbi->decoding_thread_count + 1)) {
+ current_mb_col = &pbi->mt_current_mb_col[mb_row];
+ vpx_atomic_store_release(current_mb_col, pc->mb_cols + nsync);
+ }
+ vpx_internal_error(&xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Corrupted reference frame");
+ }
+
+ if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) {
+ const MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame;
+ xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset;
+ xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset;
+ xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset;
+ } else {
+ // ref_frame is INTRA_FRAME, pre buffer should not be used.
+ xd->pre.y_buffer = 0;
+ xd->pre.u_buffer = 0;
+ xd->pre.v_buffer = 0;
+ }
+ mt_decode_macroblock(pbi, xd, 0);
+
+ xd->left_available = 1;
+
+ /* check if the boolean decoder has suffered an error */
+ xd->corrupted |= vp8dx_bool_error(xd->current_bc);
+
+ xd->recon_above[0] += 16;
+ xd->recon_above[1] += 8;
+ xd->recon_above[2] += 8;
+
+ if (!pbi->common.filter_level) {
+ xd->recon_left[0] += 16;
+ xd->recon_left[1] += 8;
+ xd->recon_left[2] += 8;
+ }
+
+ if (pbi->common.filter_level) {
+ int skip_lf = (xd->mode_info_context->mbmi.mode != B_PRED &&
+ xd->mode_info_context->mbmi.mode != SPLITMV &&
+ xd->mode_info_context->mbmi.mb_skip_coeff);
+
+ const int mode_index =
+ lfi_n->mode_lf_lut[xd->mode_info_context->mbmi.mode];
+ const int seg = xd->mode_info_context->mbmi.segment_id;
+ const int ref_frame = xd->mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
+
+ if (mb_row != pc->mb_rows - 1) {
+ /* Save decoded MB last row data for next-row decoding */
+ memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col * 16),
+ (xd->dst.y_buffer + 15 * recon_y_stride), 16);
+ memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col * 8),
+ (xd->dst.u_buffer + 7 * recon_uv_stride), 8);
+ memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col * 8),
+ (xd->dst.v_buffer + 7 * recon_uv_stride), 8);
+ }
+
+ /* save left_col for next MB decoding */
+ if (mb_col != pc->mb_cols - 1) {
+ MODE_INFO *next = xd->mode_info_context + 1;
+
+ if (next->mbmi.ref_frame == INTRA_FRAME) {
+ for (i = 0; i < 16; ++i) {
+ pbi->mt_yleft_col[mb_row][i] =
+ xd->dst.y_buffer[i * recon_y_stride + 15];
+ }
+ for (i = 0; i < 8; ++i) {
+ pbi->mt_uleft_col[mb_row][i] =
+ xd->dst.u_buffer[i * recon_uv_stride + 7];
+ pbi->mt_vleft_col[mb_row][i] =
+ xd->dst.v_buffer[i * recon_uv_stride + 7];
+ }
+ }
+ }
+
+ /* loopfilter on this macroblock. */
+ if (filter_level) {
+ if (pc->filter_type == NORMAL_LOOPFILTER) {
+ loop_filter_info lfi;
+ FRAME_TYPE frame_type = pc->frame_type;
+ const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level];
+ lfi.mblim = lfi_n->mblim[filter_level];
+ lfi.blim = lfi_n->blim[filter_level];
+ lfi.lim = lfi_n->lim[filter_level];
+ lfi.hev_thr = lfi_n->hev_thr[hev_index];
+
+ if (mb_col > 0)
+ vp8_loop_filter_mbv(xd->dst.y_buffer, xd->dst.u_buffer,
+ xd->dst.v_buffer, recon_y_stride,
+ recon_uv_stride, &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bv(xd->dst.y_buffer, xd->dst.u_buffer,
+ xd->dst.v_buffer, recon_y_stride,
+ recon_uv_stride, &lfi);
+
+ /* don't apply across umv border */
+ if (mb_row > 0)
+ vp8_loop_filter_mbh(xd->dst.y_buffer, xd->dst.u_buffer,
+ xd->dst.v_buffer, recon_y_stride,
+ recon_uv_stride, &lfi);
+
+ if (!skip_lf)
+ vp8_loop_filter_bh(xd->dst.y_buffer, xd->dst.u_buffer,
+ xd->dst.v_buffer, recon_y_stride,
+ recon_uv_stride, &lfi);
+ } else {
+ if (mb_col > 0)
+ vp8_loop_filter_simple_mbv(xd->dst.y_buffer, recon_y_stride,
+ lfi_n->mblim[filter_level]);
+
+ if (!skip_lf)
+ vp8_loop_filter_simple_bv(xd->dst.y_buffer, recon_y_stride,
+ lfi_n->blim[filter_level]);
+
+ /* don't apply across umv border */
+ if (mb_row > 0)
+ vp8_loop_filter_simple_mbh(xd->dst.y_buffer, recon_y_stride,
+ lfi_n->mblim[filter_level]);
+
+ if (!skip_lf)
+ vp8_loop_filter_simple_bh(xd->dst.y_buffer, recon_y_stride,
+ lfi_n->blim[filter_level]);
+ }
+ }
+ }
+
+ recon_yoffset += 16;
+ recon_uvoffset += 8;
+
+ ++xd->mode_info_context; /* next mb */
+
+ xd->above_context++;
+ }
+
+ /* adjust to the next row of mbs */
+ if (pbi->common.filter_level) {
+ if (mb_row != pc->mb_rows - 1) {
+ int lasty = yv12_fb_lst->y_width + VP8BORDERINPIXELS;
+ int lastuv = (yv12_fb_lst->y_width >> 1) + (VP8BORDERINPIXELS >> 1);
+
+ for (i = 0; i < 4; ++i) {
+ pbi->mt_yabove_row[mb_row + 1][lasty + i] =
+ pbi->mt_yabove_row[mb_row + 1][lasty - 1];
+ pbi->mt_uabove_row[mb_row + 1][lastuv + i] =
+ pbi->mt_uabove_row[mb_row + 1][lastuv - 1];
+ pbi->mt_vabove_row[mb_row + 1][lastuv + i] =
+ pbi->mt_vabove_row[mb_row + 1][lastuv - 1];
+ }
+ }
+ } else {
+ vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16,
+ xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
+ }
+
+ /* last MB of row is ready just after extension is done */
+ vpx_atomic_store_release(current_mb_col, mb_col + nsync);
+
+ ++xd->mode_info_context; /* skip prediction column */
+ xd->up_available = 1;
+
+ /* since we have multithread */
+ xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_count;
+ }
+
+ /* signal end of decoding of current thread for current frame */
+ if (last_mb_row + (int)pbi->decoding_thread_count + 1 >= pc->mb_rows)
+ sem_post(&pbi->h_event_end_decoding);
+}
+
+static THREAD_FUNCTION thread_decoding_proc(void *p_data) {
+ int ithread = ((DECODETHREAD_DATA *)p_data)->ithread;
+ VP8D_COMP *pbi = (VP8D_COMP *)(((DECODETHREAD_DATA *)p_data)->ptr1);
+ MB_ROW_DEC *mbrd = (MB_ROW_DEC *)(((DECODETHREAD_DATA *)p_data)->ptr2);
+ ENTROPY_CONTEXT_PLANES mb_row_left_context;
+
+ while (1) {
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd) == 0) break;
+
+ if (sem_wait(&pbi->h_event_start_decoding[ithread]) == 0) {
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd) == 0) {
+ break;
+ } else {
+ MACROBLOCKD *xd = &mbrd->mbd;
+ xd->left_context = &mb_row_left_context;
+ if (setjmp(xd->error_info.jmp)) {
+ xd->error_info.setjmp = 0;
+ // Signal the end of decoding for current thread.
+ sem_post(&pbi->h_event_end_decoding);
+ continue;
+ }
+ xd->error_info.setjmp = 1;
+ mt_decode_mb_rows(pbi, xd, ithread + 1);
+ }
+ }
+ }
+
+ return 0;
+}
+
+void vp8_decoder_create_threads(VP8D_COMP *pbi) {
+ int core_count = 0;
+ unsigned int ithread;
+
+ vpx_atomic_init(&pbi->b_multithreaded_rd, 0);
+ pbi->allocated_decoding_thread_count = 0;
+
+ /* limit decoding threads to the max number of token partitions */
+ core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads;
+
+ /* limit decoding threads to the available cores */
+ if (core_count > pbi->common.processor_core_count) {
+ core_count = pbi->common.processor_core_count;
+ }
+
+ if (core_count > 1) {
+ vpx_atomic_init(&pbi->b_multithreaded_rd, 1);
+ pbi->decoding_thread_count = core_count - 1;
+
+ CALLOC_ARRAY(pbi->h_decoding_thread, pbi->decoding_thread_count);
+ CALLOC_ARRAY(pbi->h_event_start_decoding, pbi->decoding_thread_count);
+ CALLOC_ARRAY_ALIGNED(pbi->mb_row_di, pbi->decoding_thread_count, 32);
+ CALLOC_ARRAY(pbi->de_thread_data, pbi->decoding_thread_count);
+
+ if (sem_init(&pbi->h_event_end_decoding, 0, 0)) {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to initialize semaphore");
+ }
+
+ for (ithread = 0; ithread < pbi->decoding_thread_count; ++ithread) {
+ if (sem_init(&pbi->h_event_start_decoding[ithread], 0, 0)) break;
+
+ vp8_setup_block_dptrs(&pbi->mb_row_di[ithread].mbd);
+
+ pbi->de_thread_data[ithread].ithread = ithread;
+ pbi->de_thread_data[ithread].ptr1 = (void *)pbi;
+ pbi->de_thread_data[ithread].ptr2 = (void *)&pbi->mb_row_di[ithread];
+
+ if (pthread_create(&pbi->h_decoding_thread[ithread], 0,
+ thread_decoding_proc, &pbi->de_thread_data[ithread])) {
+ sem_destroy(&pbi->h_event_start_decoding[ithread]);
+ break;
+ }
+ }
+
+ pbi->allocated_decoding_thread_count = ithread;
+ if (pbi->allocated_decoding_thread_count !=
+ (int)pbi->decoding_thread_count) {
+ /* the remainder of cleanup cases will be handled in
+ * vp8_decoder_remove_threads(). */
+ if (pbi->allocated_decoding_thread_count == 0) {
+ sem_destroy(&pbi->h_event_end_decoding);
+ }
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to create threads");
+ }
+ }
+}
+
+void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) {
+ int i;
+
+ vpx_free(pbi->mt_current_mb_col);
+ pbi->mt_current_mb_col = NULL;
+
+ /* Free above_row buffers. */
+ if (pbi->mt_yabove_row) {
+ for (i = 0; i < mb_rows; ++i) {
+ vpx_free(pbi->mt_yabove_row[i]);
+ pbi->mt_yabove_row[i] = NULL;
+ }
+ vpx_free(pbi->mt_yabove_row);
+ pbi->mt_yabove_row = NULL;
+ }
+
+ if (pbi->mt_uabove_row) {
+ for (i = 0; i < mb_rows; ++i) {
+ vpx_free(pbi->mt_uabove_row[i]);
+ pbi->mt_uabove_row[i] = NULL;
+ }
+ vpx_free(pbi->mt_uabove_row);
+ pbi->mt_uabove_row = NULL;
+ }
+
+ if (pbi->mt_vabove_row) {
+ for (i = 0; i < mb_rows; ++i) {
+ vpx_free(pbi->mt_vabove_row[i]);
+ pbi->mt_vabove_row[i] = NULL;
+ }
+ vpx_free(pbi->mt_vabove_row);
+ pbi->mt_vabove_row = NULL;
+ }
+
+ /* Free left_col buffers. */
+ if (pbi->mt_yleft_col) {
+ for (i = 0; i < mb_rows; ++i) {
+ vpx_free(pbi->mt_yleft_col[i]);
+ pbi->mt_yleft_col[i] = NULL;
+ }
+ vpx_free(pbi->mt_yleft_col);
+ pbi->mt_yleft_col = NULL;
+ }
+
+ if (pbi->mt_uleft_col) {
+ for (i = 0; i < mb_rows; ++i) {
+ vpx_free(pbi->mt_uleft_col[i]);
+ pbi->mt_uleft_col[i] = NULL;
+ }
+ vpx_free(pbi->mt_uleft_col);
+ pbi->mt_uleft_col = NULL;
+ }
+
+ if (pbi->mt_vleft_col) {
+ for (i = 0; i < mb_rows; ++i) {
+ vpx_free(pbi->mt_vleft_col[i]);
+ pbi->mt_vleft_col[i] = NULL;
+ }
+ vpx_free(pbi->mt_vleft_col);
+ pbi->mt_vleft_col = NULL;
+ }
+}
+
+void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) {
+ VP8_COMMON *const pc = &pbi->common;
+ int i;
+ int uv_width;
+
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd)) {
+ vp8mt_de_alloc_temp_buffers(pbi, prev_mb_rows);
+
+ /* our internal buffers are always multiples of 16 */
+ if ((width & 0xf) != 0) width += 16 - (width & 0xf);
+
+ if (width < 640) {
+ pbi->sync_range = 1;
+ } else if (width <= 1280) {
+ pbi->sync_range = 8;
+ } else if (width <= 2560) {
+ pbi->sync_range = 16;
+ } else {
+ pbi->sync_range = 32;
+ }
+
+ uv_width = width >> 1;
+
+ /* Allocate a vpx_atomic_int for each mb row. */
+ CHECK_MEM_ERROR(&pc->error, pbi->mt_current_mb_col,
+ vpx_malloc(sizeof(*pbi->mt_current_mb_col) * pc->mb_rows));
+ for (i = 0; i < pc->mb_rows; ++i)
+ vpx_atomic_init(&pbi->mt_current_mb_col[i], 0);
+
+ /* Allocate memory for above_row buffers. */
+ CALLOC_ARRAY(pbi->mt_yabove_row, pc->mb_rows);
+ for (i = 0; i < pc->mb_rows; ++i) {
+ CHECK_MEM_ERROR(&pc->error, pbi->mt_yabove_row[i],
+ vpx_memalign(16, sizeof(unsigned char) *
+ (width + (VP8BORDERINPIXELS << 1))));
+ vp8_zero_array(pbi->mt_yabove_row[i], width + (VP8BORDERINPIXELS << 1));
+ }
+
+ CALLOC_ARRAY(pbi->mt_uabove_row, pc->mb_rows);
+ for (i = 0; i < pc->mb_rows; ++i) {
+ CHECK_MEM_ERROR(&pc->error, pbi->mt_uabove_row[i],
+ vpx_memalign(16, sizeof(unsigned char) *
+ (uv_width + VP8BORDERINPIXELS)));
+ vp8_zero_array(pbi->mt_uabove_row[i], uv_width + VP8BORDERINPIXELS);
+ }
+
+ CALLOC_ARRAY(pbi->mt_vabove_row, pc->mb_rows);
+ for (i = 0; i < pc->mb_rows; ++i) {
+ CHECK_MEM_ERROR(&pc->error, pbi->mt_vabove_row[i],
+ vpx_memalign(16, sizeof(unsigned char) *
+ (uv_width + VP8BORDERINPIXELS)));
+ vp8_zero_array(pbi->mt_vabove_row[i], uv_width + VP8BORDERINPIXELS);
+ }
+
+ /* Allocate memory for left_col buffers. */
+ CALLOC_ARRAY(pbi->mt_yleft_col, pc->mb_rows);
+ for (i = 0; i < pc->mb_rows; ++i)
+ CHECK_MEM_ERROR(&pc->error, pbi->mt_yleft_col[i],
+ vpx_calloc(sizeof(unsigned char) * 16, 1));
+
+ CALLOC_ARRAY(pbi->mt_uleft_col, pc->mb_rows);
+ for (i = 0; i < pc->mb_rows; ++i)
+ CHECK_MEM_ERROR(&pc->error, pbi->mt_uleft_col[i],
+ vpx_calloc(sizeof(unsigned char) * 8, 1));
+
+ CALLOC_ARRAY(pbi->mt_vleft_col, pc->mb_rows);
+ for (i = 0; i < pc->mb_rows; ++i)
+ CHECK_MEM_ERROR(&pc->error, pbi->mt_vleft_col[i],
+ vpx_calloc(sizeof(unsigned char) * 8, 1));
+ }
+}
+
+void vp8_decoder_remove_threads(VP8D_COMP *pbi) {
+ /* shutdown MB Decoding thread; */
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd)) {
+ int i;
+ vpx_atomic_store_release(&pbi->b_multithreaded_rd, 0);
+
+ /* allow all threads to exit */
+ for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) {
+ sem_post(&pbi->h_event_start_decoding[i]);
+ pthread_join(pbi->h_decoding_thread[i], NULL);
+ }
+
+ for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) {
+ sem_destroy(&pbi->h_event_start_decoding[i]);
+ }
+
+ if (pbi->allocated_decoding_thread_count) {
+ sem_destroy(&pbi->h_event_end_decoding);
+ }
+
+ vpx_free(pbi->h_decoding_thread);
+ pbi->h_decoding_thread = NULL;
+
+ vpx_free(pbi->h_event_start_decoding);
+ pbi->h_event_start_decoding = NULL;
+
+ vpx_free(pbi->mb_row_di);
+ pbi->mb_row_di = NULL;
+
+ vpx_free(pbi->de_thread_data);
+ pbi->de_thread_data = NULL;
+
+ vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
+ }
+}
+
+int vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
+ VP8_COMMON *pc = &pbi->common;
+ unsigned int i;
+ int j;
+
+ int filter_level = pc->filter_level;
+ YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];
+
+ if (filter_level) {
+ /* Set above_row buffer to 127 for decoding first MB row */
+ memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS - 1, 127,
+ yv12_fb_new->y_width + 5);
+ memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS >> 1) - 1, 127,
+ (yv12_fb_new->y_width >> 1) + 5);
+ memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS >> 1) - 1, 127,
+ (yv12_fb_new->y_width >> 1) + 5);
+
+ for (j = 1; j < pc->mb_rows; ++j) {
+ memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS - 1, (unsigned char)129,
+ 1);
+ memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS >> 1) - 1,
+ (unsigned char)129, 1);
+ memset(pbi->mt_vabove_row[j] + (VP8BORDERINPIXELS >> 1) - 1,
+ (unsigned char)129, 1);
+ }
+
+ /* Set left_col to 129 initially */
+ for (j = 0; j < pc->mb_rows; ++j) {
+ memset(pbi->mt_yleft_col[j], (unsigned char)129, 16);
+ memset(pbi->mt_uleft_col[j], (unsigned char)129, 8);
+ memset(pbi->mt_vleft_col[j], (unsigned char)129, 8);
+ }
+
+ /* Initialize the loop filter for this frame. */
+ vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level);
+ } else {
+ vp8_setup_intra_recon_top_line(yv12_fb_new);
+ }
+
+ setup_decoding_thread_data(pbi, xd, pbi->mb_row_di,
+ pbi->decoding_thread_count);
+
+ for (i = 0; i < pbi->decoding_thread_count; ++i) {
+ sem_post(&pbi->h_event_start_decoding[i]);
+ }
+
+ if (setjmp(xd->error_info.jmp)) {
+ xd->error_info.setjmp = 0;
+ xd->corrupted = 1;
+ // Wait for other threads to finish. This prevents other threads decoding
+ // the current frame while the main thread starts decoding the next frame,
+ // which causes a data race.
+ for (i = 0; i < pbi->decoding_thread_count; ++i)
+ sem_wait(&pbi->h_event_end_decoding);
+ return -1;
+ }
+
+ xd->error_info.setjmp = 1;
+ mt_decode_mb_rows(pbi, xd, 0);
+
+ for (i = 0; i < pbi->decoding_thread_count + 1; ++i)
+ sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */
+
+ return 0;
+}
diff --git a/media/libvpx/libvpx/vp8/decoder/treereader.h b/media/libvpx/libvpx/vp8/decoder/treereader.h
new file mode 100644
index 0000000000..4bf938a741
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/decoder/treereader.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_DECODER_TREEREADER_H_
+#define VPX_VP8_DECODER_TREEREADER_H_
+
+#include "./vpx_config.h"
+#include "vp8/common/treecoder.h"
+#include "dboolhuff.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef BOOL_DECODER vp8_reader;
+
+#define vp8_read vp8dx_decode_bool
+#define vp8_read_literal vp8_decode_value
+#define vp8_read_bit(R) vp8_read(R, vp8_prob_half)
+
+/* Intent of tree data structure is to make decoding trivial. */
+
+static INLINE int vp8_treed_read(
+ vp8_reader *const r, /* !!! must return a 0 or 1 !!! */
+ vp8_tree t, const vp8_prob *const p) {
+ vp8_tree_index i = 0;
+
+ while ((i = t[i + vp8_read(r, p[i >> 1])]) > 0) {
+ }
+
+ return -i;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_DECODER_TREEREADER_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/arm/neon/denoising_neon.c b/media/libvpx/libvpx/vp8/encoder/arm/neon/denoising_neon.c
new file mode 100644
index 0000000000..67267b8f3a
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/arm/neon/denoising_neon.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "vp8/encoder/denoising.h"
+#include "vpx_mem/vpx_mem.h"
+#include "./vp8_rtcd.h"
+
+/*
+ * The filter function was modified to reduce the computational complexity.
+ *
+ * Step 1:
+ * Instead of applying tap coefficients for each pixel, we calculated the
+ * pixel adjustments vs. pixel diff value ahead of time.
+ * adjustment = filtered_value - current_raw
+ * = (filter_coefficient * diff + 128) >> 8
+ * where
+ * filter_coefficient = (255 << 8) / (256 + ((abs_diff * 330) >> 3));
+ * filter_coefficient += filter_coefficient /
+ * (3 + motion_magnitude_adjustment);
+ * filter_coefficient is clamped to 0 ~ 255.
+ *
+ * Step 2:
+ * The adjustment vs. diff curve becomes flat very quick when diff increases.
+ * This allowed us to use only several levels to approximate the curve without
+ * changing the filtering algorithm too much.
+ * The adjustments were further corrected by checking the motion magnitude.
+ * The levels used are:
+ * diff level adjustment w/o adjustment w/
+ * motion correction motion correction
+ * [-255, -16] 3 -6 -7
+ * [-15, -8] 2 -4 -5
+ * [-7, -4] 1 -3 -4
+ * [-3, 3] 0 diff diff
+ * [4, 7] 1 3 4
+ * [8, 15] 2 4 5
+ * [16, 255] 3 6 7
+ */
+
+int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y,
+ int mc_running_avg_y_stride,
+ unsigned char *running_avg_y,
+ int running_avg_y_stride, unsigned char *sig,
+ int sig_stride, unsigned int motion_magnitude,
+ int increase_denoising) {
+ /* If motion_magnitude is small, making the denoiser more aggressive by
+ * increasing the adjustment for each level, level1 adjustment is
+ * increased, the deltas stay the same.
+ */
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
+ ? 1
+ : 0;
+ const uint8x16_t v_level1_adjustment = vmovq_n_u8(
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
+ const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
+ const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
+ const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
+ const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
+ const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
+ int64x2_t v_sum_diff_total = vdupq_n_s64(0);
+
+ /* Go over lines. */
+ int r;
+ for (r = 0; r < 16; ++r) {
+ /* Load inputs. */
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ /* Calculate absolute difference and sign masks. */
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
+
+ /* Figure out which level that put us in. */
+ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
+ const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
+ const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
+
+ /* Calculate absolute adjustments for level 1, 2 and 3. */
+ const uint8x16_t v_level2_adjustment =
+ vandq_u8(v_level2_mask, v_delta_level_1_and_2);
+ const uint8x16_t v_level3_adjustment =
+ vandq_u8(v_level3_mask, v_delta_level_2_and_3);
+ const uint8x16_t v_level1and2_adjustment =
+ vaddq_u8(v_level1_adjustment, v_level2_adjustment);
+ const uint8x16_t v_level1and2and3_adjustment =
+ vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
+
+ /* Figure adjustment absolute value by selecting between the absolute
+ * difference if in level0 or the value for level 1, 2 and 3.
+ */
+ const uint8x16_t v_abs_adjustment =
+ vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
+
+ /* Calculate positive and negative adjustments. Apply them to the signal
+ * and accumulate them. Adjustments are less than eight and the maximum
+ * sum of them (7 * 16) can fit in a signed char.
+ */
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
+
+ /* Store results. */
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ /* Sum all the accumulators to have the sum of all pixel differences
+ * for this macroblock.
+ */
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
+ vreinterpretq_s8_u8(v_neg_adjustment));
+
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+
+ const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+
+ /* Update pointers for next iteration. */
+ sig += sig_stride;
+ mc_running_avg_y += mc_running_avg_y_stride;
+ running_avg_y += running_avg_y_stride;
+ }
+
+ /* Too much adjustments => copy block. */
+ {
+ int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+ int sum_diff_thresh = SUM_DIFF_THRESHOLD;
+
+ if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
+ if (sum_diff > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // checK if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the accceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const uint8x16_t k_delta = vmovq_n_u8(delta);
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_running_avg_y_stride * 16;
+ running_avg_y -= running_avg_y_stride * 16;
+ for (r = 0; r < 16; ++r) {
+ uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ /* Calculate absolute difference and sign masks. */
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask =
+ vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask =
+ vcgtq_u8(v_sig, v_mc_running_avg_y);
+ // Clamp absolute difference to delta to get the adjustment.
+ const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
+
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
+ v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);
+
+ /* Store results. */
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
+ vreinterpretq_s8_u8(v_pos_adjustment));
+
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+ const int64x2_t fedcba98_76543210 =
+ vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+ /* Update pointers for next iteration. */
+ sig += sig_stride;
+ mc_running_avg_y += mc_running_avg_y_stride;
+ running_avg_y += running_avg_y_stride;
+ }
+ {
+ // Update the sum of all pixel differences of this MB.
+ x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+
+ if (sum_diff > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ /* Tell above level that block was filtered. */
+ running_avg_y -= running_avg_y_stride * 16;
+ sig -= sig_stride * 16;
+
+ vp8_copy_mem16x16(running_avg_y, running_avg_y_stride, sig, sig_stride);
+
+ return FILTER_BLOCK;
+}
+
+int vp8_denoiser_filter_uv_neon(unsigned char *mc_running_avg,
+ int mc_running_avg_stride,
+ unsigned char *running_avg,
+ int running_avg_stride, unsigned char *sig,
+ int sig_stride, unsigned int motion_magnitude,
+ int increase_denoising) {
+ /* If motion_magnitude is small, making the denoiser more aggressive by
+ * increasing the adjustment for each level, level1 adjustment is
+ * increased, the deltas stay the same.
+ */
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV)
+ ? 1
+ : 0;
+ const uint8x16_t v_level1_adjustment = vmovq_n_u8(
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 4 + shift_inc : 3);
+
+ const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
+ const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
+ const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
+ const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
+ const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
+ int64x2_t v_sum_diff_total = vdupq_n_s64(0);
+ int r;
+
+ {
+ uint16x4_t v_sum_block = vdup_n_u16(0);
+
+ // Avoid denoising color signal if its close to average level.
+ for (r = 0; r < 8; ++r) {
+ const uint8x8_t v_sig = vld1_u8(sig);
+ const uint16x4_t _76_54_32_10 = vpaddl_u8(v_sig);
+ v_sum_block = vqadd_u16(v_sum_block, _76_54_32_10);
+ sig += sig_stride;
+ }
+ sig -= sig_stride * 8;
+ {
+ const uint32x2_t _7654_3210 = vpaddl_u16(v_sum_block);
+ const uint64x1_t _76543210 = vpaddl_u32(_7654_3210);
+ const int sum_block = vget_lane_s32(vreinterpret_s32_u64(_76543210), 0);
+ if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ /* Go over lines. */
+ for (r = 0; r < 4; ++r) {
+ /* Load inputs. */
+ const uint8x8_t v_sig_lo = vld1_u8(sig);
+ const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]);
+ const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi);
+ const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg);
+ const uint8x8_t v_mc_running_avg_hi =
+ vld1_u8(&mc_running_avg[mc_running_avg_stride]);
+ const uint8x16_t v_mc_running_avg =
+ vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi);
+ /* Calculate absolute difference and sign masks. */
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg);
+ const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg);
+ const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg);
+
+ /* Figure out which level that put us in. */
+ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
+ const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
+ const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
+
+ /* Calculate absolute adjustments for level 1, 2 and 3. */
+ const uint8x16_t v_level2_adjustment =
+ vandq_u8(v_level2_mask, v_delta_level_1_and_2);
+ const uint8x16_t v_level3_adjustment =
+ vandq_u8(v_level3_mask, v_delta_level_2_and_3);
+ const uint8x16_t v_level1and2_adjustment =
+ vaddq_u8(v_level1_adjustment, v_level2_adjustment);
+ const uint8x16_t v_level1and2and3_adjustment =
+ vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
+
+ /* Figure adjustment absolute value by selecting between the absolute
+ * difference if in level0 or the value for level 1, 2 and 3.
+ */
+ const uint8x16_t v_abs_adjustment =
+ vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
+
+ /* Calculate positive and negative adjustments. Apply them to the signal
+ * and accumulate them. Adjustments are less than eight and the maximum
+ * sum of them (7 * 16) can fit in a signed char.
+ */
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ uint8x16_t v_running_avg = vqaddq_u8(v_sig, v_pos_adjustment);
+ v_running_avg = vqsubq_u8(v_running_avg, v_neg_adjustment);
+
+ /* Store results. */
+ vst1_u8(running_avg, vget_low_u8(v_running_avg));
+ vst1_u8(&running_avg[running_avg_stride], vget_high_u8(v_running_avg));
+
+ /* Sum all the accumulators to have the sum of all pixel differences
+ * for this macroblock.
+ */
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
+ vreinterpretq_s8_u8(v_neg_adjustment));
+
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+
+ const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+
+ /* Update pointers for next iteration. */
+ sig += sig_stride * 2;
+ mc_running_avg += mc_running_avg_stride * 2;
+ running_avg += running_avg_stride * 2;
+ }
+
+ /* Too much adjustments => copy block. */
+ {
+ int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+ int sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
+ if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV;
+ if (sum_diff > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // checK if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the accceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const uint8x16_t k_delta = vmovq_n_u8(delta);
+ sig -= sig_stride * 8;
+ mc_running_avg -= mc_running_avg_stride * 8;
+ running_avg -= running_avg_stride * 8;
+ for (r = 0; r < 4; ++r) {
+ const uint8x8_t v_sig_lo = vld1_u8(sig);
+ const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]);
+ const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi);
+ const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg);
+ const uint8x8_t v_mc_running_avg_hi =
+ vld1_u8(&mc_running_avg[mc_running_avg_stride]);
+ const uint8x16_t v_mc_running_avg =
+ vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi);
+ /* Calculate absolute difference and sign masks. */
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg);
+ const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg);
+ const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg);
+ // Clamp absolute difference to delta to get the adjustment.
+ const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
+
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+ const uint8x8_t v_running_avg_lo = vld1_u8(running_avg);
+ const uint8x8_t v_running_avg_hi =
+ vld1_u8(&running_avg[running_avg_stride]);
+ uint8x16_t v_running_avg =
+ vcombine_u8(v_running_avg_lo, v_running_avg_hi);
+
+ v_running_avg = vqsubq_u8(v_running_avg, v_pos_adjustment);
+ v_running_avg = vqaddq_u8(v_running_avg, v_neg_adjustment);
+
+ /* Store results. */
+ vst1_u8(running_avg, vget_low_u8(v_running_avg));
+ vst1_u8(&running_avg[running_avg_stride],
+ vget_high_u8(v_running_avg));
+
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
+ vreinterpretq_s8_u8(v_pos_adjustment));
+
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+ const int64x2_t fedcba98_76543210 =
+ vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+ /* Update pointers for next iteration. */
+ sig += sig_stride * 2;
+ mc_running_avg += mc_running_avg_stride * 2;
+ running_avg += running_avg_stride * 2;
+ }
+ {
+ // Update the sum of all pixel differences of this MB.
+ x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+
+ if (sum_diff > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ /* Tell above level that block was filtered. */
+ running_avg -= running_avg_stride * 8;
+ sig -= sig_stride * 8;
+
+ vp8_copy_mem8x8(running_avg, running_avg_stride, sig, sig_stride);
+
+ return FILTER_BLOCK;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c b/media/libvpx/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c
new file mode 100644
index 0000000000..950c943343
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+#include "vp8/encoder/block.h"
+
+static const uint16_t inv_zig_zag[16] = { 1, 2, 6, 7, 3, 5, 8, 13,
+ 4, 9, 12, 14, 10, 11, 15, 16 };
+
+void vp8_fast_quantize_b_neon(BLOCK *b, BLOCKD *d) {
+ const int16x8_t one_q = vdupq_n_s16(-1), z0 = vld1q_s16(b->coeff),
+ z1 = vld1q_s16(b->coeff + 8), round0 = vld1q_s16(b->round),
+ round1 = vld1q_s16(b->round + 8),
+ quant0 = vld1q_s16(b->quant_fast),
+ quant1 = vld1q_s16(b->quant_fast + 8),
+ dequant0 = vld1q_s16(d->dequant),
+ dequant1 = vld1q_s16(d->dequant + 8);
+ const uint16x8_t zig_zag0 = vld1q_u16(inv_zig_zag),
+ zig_zag1 = vld1q_u16(inv_zig_zag + 8);
+ int16x8_t x0, x1, sz0, sz1, y0, y1;
+ uint16x8_t eob0, eob1;
+#if !VPX_ARCH_AARCH64
+ uint16x4_t eob_d16;
+ uint32x2_t eob_d32;
+ uint32x4_t eob_q32;
+#endif // !VPX_ARCH_AARCH64
+
+ /* sign of z: z >> 15 */
+ sz0 = vshrq_n_s16(z0, 15);
+ sz1 = vshrq_n_s16(z1, 15);
+
+ /* x = abs(z) */
+ x0 = vabsq_s16(z0);
+ x1 = vabsq_s16(z1);
+
+ /* x += round */
+ x0 = vaddq_s16(x0, round0);
+ x1 = vaddq_s16(x1, round1);
+
+ /* y = 2 * (x * quant) >> 16 */
+ y0 = vqdmulhq_s16(x0, quant0);
+ y1 = vqdmulhq_s16(x1, quant1);
+
+ /* Compensate for doubling in vqdmulhq */
+ y0 = vshrq_n_s16(y0, 1);
+ y1 = vshrq_n_s16(y1, 1);
+
+ /* Restore sign bit */
+ y0 = veorq_s16(y0, sz0);
+ y1 = veorq_s16(y1, sz1);
+ x0 = vsubq_s16(y0, sz0);
+ x1 = vsubq_s16(y1, sz1);
+
+ /* find non-zero elements */
+ eob0 = vtstq_s16(x0, one_q);
+ eob1 = vtstq_s16(x1, one_q);
+
+ /* mask zig zag */
+ eob0 = vandq_u16(eob0, zig_zag0);
+ eob1 = vandq_u16(eob1, zig_zag1);
+
+ /* select the largest value */
+ eob0 = vmaxq_u16(eob0, eob1);
+#if VPX_ARCH_AARCH64
+ *d->eob = (int8_t)vmaxvq_u16(eob0);
+#else
+ eob_d16 = vmax_u16(vget_low_u16(eob0), vget_high_u16(eob0));
+ eob_q32 = vmovl_u16(eob_d16);
+ eob_d32 = vmax_u32(vget_low_u32(eob_q32), vget_high_u32(eob_q32));
+ eob_d32 = vpmax_u32(eob_d32, eob_d32);
+
+ vst1_lane_s8((int8_t *)d->eob, vreinterpret_s8_u32(eob_d32), 0);
+#endif // VPX_ARCH_AARCH64
+
+ /* qcoeff = x */
+ vst1q_s16(d->qcoeff, x0);
+ vst1q_s16(d->qcoeff + 8, x1);
+
+ /* dqcoeff = x * dequant */
+ vst1q_s16(d->dqcoeff, vmulq_s16(dequant0, x0));
+ vst1q_s16(d->dqcoeff + 8, vmulq_s16(dequant1, x1));
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c b/media/libvpx/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c
new file mode 100644
index 0000000000..99dff6b520
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+
+void vp8_short_fdct4x4_neon(int16_t *input, int16_t *output, int pitch) {
+ int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
+ int16x4_t d16s16, d17s16, d26s16, dEmptys16;
+ uint16x4_t d4u16;
+ int16x8_t q0s16, q1s16;
+ int32x4_t q9s32, q10s32, q11s32, q12s32;
+ int16x4x2_t v2tmp0, v2tmp1;
+ int32x2x2_t v2tmp2, v2tmp3;
+
+ d16s16 = vdup_n_s16(5352);
+ d17s16 = vdup_n_s16(2217);
+ q9s32 = vdupq_n_s32(14500);
+ q10s32 = vdupq_n_s32(7500);
+ q11s32 = vdupq_n_s32(12000);
+ q12s32 = vdupq_n_s32(51000);
+
+ // Part one
+ pitch >>= 1;
+ d0s16 = vld1_s16(input);
+ input += pitch;
+ d1s16 = vld1_s16(input);
+ input += pitch;
+ d2s16 = vld1_s16(input);
+ input += pitch;
+ d3s16 = vld1_s16(input);
+
+ v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16));
+ v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16));
+ v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0
+ vreinterpret_s16_s32(v2tmp3.val[0])); // d1
+ v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2
+ vreinterpret_s16_s32(v2tmp3.val[1])); // d3
+
+ d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
+ d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
+
+ d4s16 = vshl_n_s16(d4s16, 3);
+ d5s16 = vshl_n_s16(d5s16, 3);
+ d6s16 = vshl_n_s16(d6s16, 3);
+ d7s16 = vshl_n_s16(d7s16, 3);
+
+ d0s16 = vadd_s16(d4s16, d5s16);
+ d2s16 = vsub_s16(d4s16, d5s16);
+
+ q9s32 = vmlal_s16(q9s32, d7s16, d16s16);
+ q10s32 = vmlal_s16(q10s32, d7s16, d17s16);
+ q9s32 = vmlal_s16(q9s32, d6s16, d17s16);
+ q10s32 = vmlsl_s16(q10s32, d6s16, d16s16);
+
+ d1s16 = vshrn_n_s32(q9s32, 12);
+ d3s16 = vshrn_n_s32(q10s32, 12);
+
+ // Part two
+ v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16));
+ v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16));
+ v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0
+ vreinterpret_s16_s32(v2tmp3.val[0])); // d1
+ v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2
+ vreinterpret_s16_s32(v2tmp3.val[1])); // d3
+
+ d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
+ d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
+
+ d26s16 = vdup_n_s16(7);
+ d4s16 = vadd_s16(d4s16, d26s16);
+
+ d0s16 = vadd_s16(d4s16, d5s16);
+ d2s16 = vsub_s16(d4s16, d5s16);
+
+ q11s32 = vmlal_s16(q11s32, d7s16, d16s16);
+ q12s32 = vmlal_s16(q12s32, d7s16, d17s16);
+
+ dEmptys16 = vdup_n_s16(0);
+ d4u16 = vceq_s16(d7s16, dEmptys16);
+
+ d0s16 = vshr_n_s16(d0s16, 4);
+ d2s16 = vshr_n_s16(d2s16, 4);
+
+ q11s32 = vmlal_s16(q11s32, d6s16, d17s16);
+ q12s32 = vmlsl_s16(q12s32, d6s16, d16s16);
+
+ d4u16 = vmvn_u16(d4u16);
+ d1s16 = vshrn_n_s32(q11s32, 16);
+ d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d4u16));
+ d3s16 = vshrn_n_s32(q12s32, 16);
+
+ q0s16 = vcombine_s16(d0s16, d1s16);
+ q1s16 = vcombine_s16(d2s16, d3s16);
+
+ vst1q_s16(output, q0s16);
+ vst1q_s16(output + 8, q1s16);
+ return;
+}
+
+void vp8_short_fdct8x4_neon(int16_t *input, int16_t *output, int pitch) {
+ int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
+ int16x4_t d16s16, d17s16, d26s16, d27s16, d28s16, d29s16;
+ uint16x4_t d28u16, d29u16;
+ uint16x8_t q14u16;
+ int16x8_t q0s16, q1s16, q2s16, q3s16;
+ int16x8_t q11s16, q12s16, q13s16, q14s16, q15s16, qEmptys16;
+ int32x4_t q9s32, q10s32, q11s32, q12s32;
+ int16x8x2_t v2tmp0, v2tmp1;
+ int32x4x2_t v2tmp2, v2tmp3;
+
+ d16s16 = vdup_n_s16(5352);
+ d17s16 = vdup_n_s16(2217);
+ q9s32 = vdupq_n_s32(14500);
+ q10s32 = vdupq_n_s32(7500);
+
+ // Part one
+ pitch >>= 1;
+ q0s16 = vld1q_s16(input);
+ input += pitch;
+ q1s16 = vld1q_s16(input);
+ input += pitch;
+ q2s16 = vld1q_s16(input);
+ input += pitch;
+ q3s16 = vld1q_s16(input);
+
+ v2tmp2 =
+ vtrnq_s32(vreinterpretq_s32_s16(q0s16), vreinterpretq_s32_s16(q2s16));
+ v2tmp3 =
+ vtrnq_s32(vreinterpretq_s32_s16(q1s16), vreinterpretq_s32_s16(q3s16));
+ v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0
+ vreinterpretq_s16_s32(v2tmp3.val[0])); // q1
+ v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2
+ vreinterpretq_s16_s32(v2tmp3.val[1])); // q3
+
+ q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]);
+ q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]);
+
+ q11s16 = vshlq_n_s16(q11s16, 3);
+ q12s16 = vshlq_n_s16(q12s16, 3);
+ q13s16 = vshlq_n_s16(q13s16, 3);
+ q14s16 = vshlq_n_s16(q14s16, 3);
+
+ q0s16 = vaddq_s16(q11s16, q12s16);
+ q2s16 = vsubq_s16(q11s16, q12s16);
+
+ q11s32 = q9s32;
+ q12s32 = q10s32;
+
+ d26s16 = vget_low_s16(q13s16);
+ d27s16 = vget_high_s16(q13s16);
+ d28s16 = vget_low_s16(q14s16);
+ d29s16 = vget_high_s16(q14s16);
+
+ q9s32 = vmlal_s16(q9s32, d28s16, d16s16);
+ q10s32 = vmlal_s16(q10s32, d28s16, d17s16);
+ q11s32 = vmlal_s16(q11s32, d29s16, d16s16);
+ q12s32 = vmlal_s16(q12s32, d29s16, d17s16);
+
+ q9s32 = vmlal_s16(q9s32, d26s16, d17s16);
+ q10s32 = vmlsl_s16(q10s32, d26s16, d16s16);
+ q11s32 = vmlal_s16(q11s32, d27s16, d17s16);
+ q12s32 = vmlsl_s16(q12s32, d27s16, d16s16);
+
+ d2s16 = vshrn_n_s32(q9s32, 12);
+ d6s16 = vshrn_n_s32(q10s32, 12);
+ d3s16 = vshrn_n_s32(q11s32, 12);
+ d7s16 = vshrn_n_s32(q12s32, 12);
+ q1s16 = vcombine_s16(d2s16, d3s16);
+ q3s16 = vcombine_s16(d6s16, d7s16);
+
+ // Part two
+ q9s32 = vdupq_n_s32(12000);
+ q10s32 = vdupq_n_s32(51000);
+
+ v2tmp2 =
+ vtrnq_s32(vreinterpretq_s32_s16(q0s16), vreinterpretq_s32_s16(q2s16));
+ v2tmp3 =
+ vtrnq_s32(vreinterpretq_s32_s16(q1s16), vreinterpretq_s32_s16(q3s16));
+ v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0
+ vreinterpretq_s16_s32(v2tmp3.val[0])); // q1
+ v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2
+ vreinterpretq_s16_s32(v2tmp3.val[1])); // q3
+
+ q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]);
+ q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]);
+ q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]);
+
+ q15s16 = vdupq_n_s16(7);
+ q11s16 = vaddq_s16(q11s16, q15s16);
+ q0s16 = vaddq_s16(q11s16, q12s16);
+ q1s16 = vsubq_s16(q11s16, q12s16);
+
+ q11s32 = q9s32;
+ q12s32 = q10s32;
+
+ d0s16 = vget_low_s16(q0s16);
+ d1s16 = vget_high_s16(q0s16);
+ d2s16 = vget_low_s16(q1s16);
+ d3s16 = vget_high_s16(q1s16);
+
+ d0s16 = vshr_n_s16(d0s16, 4);
+ d4s16 = vshr_n_s16(d1s16, 4);
+ d2s16 = vshr_n_s16(d2s16, 4);
+ d6s16 = vshr_n_s16(d3s16, 4);
+
+ d26s16 = vget_low_s16(q13s16);
+ d27s16 = vget_high_s16(q13s16);
+ d28s16 = vget_low_s16(q14s16);
+ d29s16 = vget_high_s16(q14s16);
+
+ q9s32 = vmlal_s16(q9s32, d28s16, d16s16);
+ q10s32 = vmlal_s16(q10s32, d28s16, d17s16);
+ q11s32 = vmlal_s16(q11s32, d29s16, d16s16);
+ q12s32 = vmlal_s16(q12s32, d29s16, d17s16);
+
+ q9s32 = vmlal_s16(q9s32, d26s16, d17s16);
+ q10s32 = vmlsl_s16(q10s32, d26s16, d16s16);
+ q11s32 = vmlal_s16(q11s32, d27s16, d17s16);
+ q12s32 = vmlsl_s16(q12s32, d27s16, d16s16);
+
+ d1s16 = vshrn_n_s32(q9s32, 16);
+ d3s16 = vshrn_n_s32(q10s32, 16);
+ d5s16 = vshrn_n_s32(q11s32, 16);
+ d7s16 = vshrn_n_s32(q12s32, 16);
+
+ qEmptys16 = vdupq_n_s16(0);
+ q14u16 = vceqq_s16(q14s16, qEmptys16);
+ q14u16 = vmvnq_u16(q14u16);
+
+ d28u16 = vget_low_u16(q14u16);
+ d29u16 = vget_high_u16(q14u16);
+ d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d28u16));
+ d5s16 = vsub_s16(d5s16, vreinterpret_s16_u16(d29u16));
+
+ q0s16 = vcombine_s16(d0s16, d1s16);
+ q1s16 = vcombine_s16(d2s16, d3s16);
+ q2s16 = vcombine_s16(d4s16, d5s16);
+ q3s16 = vcombine_s16(d6s16, d7s16);
+
+ vst1q_s16(output, q0s16);
+ vst1q_s16(output + 8, q1s16);
+ vst1q_s16(output + 16, q2s16);
+ vst1q_s16(output + 24, q3s16);
+ return;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c b/media/libvpx/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c
new file mode 100644
index 0000000000..02056f2f90
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp8_rtcd.h"
+#include "vpx_ports/arm.h"
+
+#ifdef VPX_INCOMPATIBLE_GCC
+#include "./vp8_rtcd.h"
+void vp8_short_walsh4x4_neon(int16_t *input, int16_t *output, int pitch) {
+ vp8_short_walsh4x4_c(input, output, pitch);
+}
+#else
+void vp8_short_walsh4x4_neon(int16_t *input, int16_t *output, int pitch) {
+ uint16x4_t d16u16;
+ int16x8_t q0s16, q1s16;
+ int16x4_t dEmptys16, d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
+ int32x4_t qEmptys32, q0s32, q1s32, q2s32, q3s32, q8s32;
+ int32x4_t q9s32, q10s32, q11s32, q15s32;
+ uint32x4_t q8u32, q9u32, q10u32, q11u32;
+ int16x4x2_t v2tmp0, v2tmp1;
+ int32x2x2_t v2tmp2, v2tmp3;
+
+ dEmptys16 = vdup_n_s16(0);
+ qEmptys32 = vdupq_n_s32(0);
+ q15s32 = vdupq_n_s32(3);
+
+ d0s16 = vld1_s16(input);
+ input += pitch / 2;
+ d1s16 = vld1_s16(input);
+ input += pitch / 2;
+ d2s16 = vld1_s16(input);
+ input += pitch / 2;
+ d3s16 = vld1_s16(input);
+
+ v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16));
+ v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16));
+ v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0
+ vreinterpret_s16_s32(v2tmp3.val[0])); // d1
+ v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2
+ vreinterpret_s16_s32(v2tmp3.val[1])); // d3
+
+ d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[0]);
+ d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[1]);
+ d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[1]);
+ d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[0]);
+
+ d4s16 = vshl_n_s16(d4s16, 2);
+ d5s16 = vshl_n_s16(d5s16, 2);
+ d6s16 = vshl_n_s16(d6s16, 2);
+ d7s16 = vshl_n_s16(d7s16, 2);
+
+ d16u16 = vceq_s16(d4s16, dEmptys16);
+ d16u16 = vmvn_u16(d16u16);
+
+ d0s16 = vadd_s16(d4s16, d5s16);
+ d3s16 = vsub_s16(d4s16, d5s16);
+ d1s16 = vadd_s16(d7s16, d6s16);
+ d2s16 = vsub_s16(d7s16, d6s16);
+
+ d0s16 = vsub_s16(d0s16, vreinterpret_s16_u16(d16u16));
+
+ // Second for-loop
+ v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16));
+ v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16));
+ v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[1]), // d2
+ vreinterpret_s16_s32(v2tmp2.val[1])); // d3
+ v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[0]), // d0
+ vreinterpret_s16_s32(v2tmp2.val[0])); // d1
+
+ q8s32 = vaddl_s16(v2tmp1.val[0], v2tmp0.val[0]);
+ q9s32 = vaddl_s16(v2tmp1.val[1], v2tmp0.val[1]);
+ q10s32 = vsubl_s16(v2tmp1.val[1], v2tmp0.val[1]);
+ q11s32 = vsubl_s16(v2tmp1.val[0], v2tmp0.val[0]);
+
+ q0s32 = vaddq_s32(q8s32, q9s32);
+ q1s32 = vaddq_s32(q11s32, q10s32);
+ q2s32 = vsubq_s32(q11s32, q10s32);
+ q3s32 = vsubq_s32(q8s32, q9s32);
+
+ q8u32 = vcltq_s32(q0s32, qEmptys32);
+ q9u32 = vcltq_s32(q1s32, qEmptys32);
+ q10u32 = vcltq_s32(q2s32, qEmptys32);
+ q11u32 = vcltq_s32(q3s32, qEmptys32);
+
+ q8s32 = vreinterpretq_s32_u32(q8u32);
+ q9s32 = vreinterpretq_s32_u32(q9u32);
+ q10s32 = vreinterpretq_s32_u32(q10u32);
+ q11s32 = vreinterpretq_s32_u32(q11u32);
+
+ q0s32 = vsubq_s32(q0s32, q8s32);
+ q1s32 = vsubq_s32(q1s32, q9s32);
+ q2s32 = vsubq_s32(q2s32, q10s32);
+ q3s32 = vsubq_s32(q3s32, q11s32);
+
+ q8s32 = vaddq_s32(q0s32, q15s32);
+ q9s32 = vaddq_s32(q1s32, q15s32);
+ q10s32 = vaddq_s32(q2s32, q15s32);
+ q11s32 = vaddq_s32(q3s32, q15s32);
+
+ d0s16 = vshrn_n_s32(q8s32, 3);
+ d1s16 = vshrn_n_s32(q9s32, 3);
+ d2s16 = vshrn_n_s32(q10s32, 3);
+ d3s16 = vshrn_n_s32(q11s32, 3);
+
+ q0s16 = vcombine_s16(d0s16, d1s16);
+ q1s16 = vcombine_s16(d2s16, d3s16);
+
+ vst1q_s16(output, q0s16);
+ vst1q_s16(output + 8, q1s16);
+ return;
+}
+#endif // VPX_INCOMPATIBLE_GCC
diff --git a/media/libvpx/libvpx/vp8/encoder/bitstream.c b/media/libvpx/libvpx/vp8/encoder/bitstream.c
new file mode 100644
index 0000000000..190b013afd
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/bitstream.c
@@ -0,0 +1,1381 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/common/header.h"
+#include "encodemv.h"
+#include "vp8/common/entropymode.h"
+#include "vp8/common/findnearmv.h"
+#include "mcomp.h"
+#include "vp8/common/systemdependent.h"
+#include <assert.h>
+#include <stdio.h>
+#include <limits.h>
+#include "vpx/vpx_encoder.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/compiler_attributes.h"
+#include "vpx_ports/system_state.h"
+#include "bitstream.h"
+
+#include "defaultcoefcounts.h"
+#include "vp8/common/common.h"
+
+const int vp8cx_base_skip_false_prob[128] = {
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 251, 248, 244, 240,
+ 236, 232, 229, 225, 221, 217, 213, 208, 204, 199, 194, 190, 187, 183, 179,
+ 175, 172, 168, 164, 160, 157, 153, 149, 145, 142, 138, 134, 130, 127, 124,
+ 120, 117, 114, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, 77,
+ 74, 71, 68, 65, 62, 59, 56, 53, 50, 47, 44, 41, 38, 35, 32,
+ 30, 28, 26, 24, 22, 20, 18, 16,
+};
+
+#if defined(SECTIONBITS_OUTPUT)
+unsigned __int64 Sectionbits[500];
+#endif
+
+#ifdef MODE_STATS
+int count_mb_seg[4] = { 0, 0, 0, 0 };
+#endif
+
+static void update_mode(vp8_writer *const w, int n, vp8_token tok[/* n */],
+ vp8_tree tree, vp8_prob Pnew[/* n-1 */],
+ vp8_prob Pcur[/* n-1 */],
+ unsigned int bct[/* n-1 */][2],
+ const unsigned int num_events[/* n */]) {
+ unsigned int new_b = 0, old_b = 0;
+ int i = 0;
+
+ vp8_tree_probs_from_distribution(n--, tok, tree, Pnew, bct, num_events, 256,
+ 1);
+
+ do {
+ new_b += vp8_cost_branch(bct[i], Pnew[i]);
+ old_b += vp8_cost_branch(bct[i], Pcur[i]);
+ } while (++i < n);
+
+ if (new_b + (n << 8) < old_b) {
+ int j = 0;
+
+ vp8_write_bit(w, 1);
+
+ do {
+ const vp8_prob p = Pnew[j];
+
+ vp8_write_literal(w, Pcur[j] = p ? p : 1, 8);
+ } while (++j < n);
+ } else
+ vp8_write_bit(w, 0);
+}
+
+static void update_mbintra_mode_probs(VP8_COMP *cpi) {
+ VP8_COMMON *const x = &cpi->common;
+
+ vp8_writer *const w = cpi->bc;
+
+ {
+ vp8_prob Pnew[VP8_YMODES - 1];
+ unsigned int bct[VP8_YMODES - 1][2];
+
+ update_mode(w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree, Pnew,
+ x->fc.ymode_prob, bct, (unsigned int *)cpi->mb.ymode_count);
+ }
+ {
+ vp8_prob Pnew[VP8_UV_MODES - 1];
+ unsigned int bct[VP8_UV_MODES - 1][2];
+
+ update_mode(w, VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree, Pnew,
+ x->fc.uv_mode_prob, bct, (unsigned int *)cpi->mb.uv_mode_count);
+ }
+}
+
+static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
+}
+
+static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m);
+}
+
+static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m);
+}
+
+static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m);
+}
+
+static void write_split(vp8_writer *bc, int x) {
+ vp8_write_token(bc, vp8_mbsplit_tree, vp8_mbsplit_probs,
+ vp8_mbsplit_encodings + x);
+}
+
+void VPX_NO_UNSIGNED_SHIFT_CHECK vp8_pack_tokens(vp8_writer *w,
+ const TOKENEXTRA *p,
+ int xcount) {
+ const TOKENEXTRA *stop = p + xcount;
+ unsigned int split;
+ int shift;
+ int count = w->count;
+ unsigned int range = w->range;
+ unsigned int lowvalue = w->lowvalue;
+
+ while (p < stop) {
+ const int t = p->Token;
+ vp8_token *a = vp8_coef_encodings + t;
+ const vp8_extra_bit_struct *b = vp8_extra_bits + t;
+ int i = 0;
+ const unsigned char *pp = p->context_tree;
+ int v = a->value;
+ int n = a->Len;
+
+ if (p->skip_eob_node) {
+ n--;
+ i = 2;
+ }
+
+ do {
+ const int bb = (v >> --n) & 1;
+ split = 1 + (((range - 1) * pp[i >> 1]) >> 8);
+ i = vp8_coef_tree[i + bb];
+
+ if (bb) {
+ lowvalue += split;
+ range = range - split;
+ } else {
+ range = split;
+ }
+
+ shift = vp8_norm[range];
+ range <<= shift;
+ count += shift;
+
+ if (count >= 0) {
+ int offset = shift - count;
+
+ if ((lowvalue << (offset - 1)) & 0x80000000) {
+ int x = w->pos - 1;
+
+ while (x >= 0 && w->buffer[x] == 0xff) {
+ w->buffer[x] = (unsigned char)0;
+ x--;
+ }
+
+ w->buffer[x] += 1;
+ }
+
+ validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
+
+ w->buffer[w->pos++] = (lowvalue >> (24 - offset)) & 0xff;
+ shift = count;
+ lowvalue = (int)(((uint64_t)lowvalue << offset) & 0xffffff);
+ count -= 8;
+ }
+
+ lowvalue <<= shift;
+ } while (n);
+
+ if (b->base_val) {
+ const int e = p->Extra, L = b->Len;
+
+ if (L) {
+ const unsigned char *proba = b->prob;
+ const int v2 = e >> 1;
+ int n2 = L; /* number of bits in v2, assumed nonzero */
+ i = 0;
+
+ do {
+ const int bb = (v2 >> --n2) & 1;
+ split = 1 + (((range - 1) * proba[i >> 1]) >> 8);
+ i = b->tree[i + bb];
+
+ if (bb) {
+ lowvalue += split;
+ range = range - split;
+ } else {
+ range = split;
+ }
+
+ shift = vp8_norm[range];
+ range <<= shift;
+ count += shift;
+
+ if (count >= 0) {
+ int offset = shift - count;
+
+ if ((lowvalue << (offset - 1)) & 0x80000000) {
+ int x = w->pos - 1;
+
+ while (x >= 0 && w->buffer[x] == 0xff) {
+ w->buffer[x] = (unsigned char)0;
+ x--;
+ }
+
+ w->buffer[x] += 1;
+ }
+
+ validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
+
+ w->buffer[w->pos++] = (lowvalue >> (24 - offset)) & 0xff;
+ shift = count;
+ lowvalue = (int)(((uint64_t)lowvalue << offset) & 0xffffff);
+ count -= 8;
+ }
+
+ lowvalue <<= shift;
+ } while (n2);
+ }
+
+ {
+ split = (range + 1) >> 1;
+
+ if (e & 1) {
+ lowvalue += split;
+ range = range - split;
+ } else {
+ range = split;
+ }
+
+ range <<= 1;
+
+ if ((lowvalue & 0x80000000)) {
+ int x = w->pos - 1;
+
+ while (x >= 0 && w->buffer[x] == 0xff) {
+ w->buffer[x] = (unsigned char)0;
+ x--;
+ }
+
+ w->buffer[x] += 1;
+ }
+
+ lowvalue <<= 1;
+
+ if (!++count) {
+ count = -8;
+
+ validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
+
+ w->buffer[w->pos++] = (lowvalue >> 24);
+ lowvalue &= 0xffffff;
+ }
+ }
+ }
+
+ ++p;
+ }
+
+ w->count = count;
+ w->lowvalue = lowvalue;
+ w->range = range;
+}
+
+static void write_partition_size(unsigned char *cx_data, int size) {
+ signed char csize;
+
+ csize = size & 0xff;
+ *cx_data = csize;
+ csize = (size >> 8) & 0xff;
+ *(cx_data + 1) = csize;
+ csize = (size >> 16) & 0xff;
+ *(cx_data + 2) = csize;
+}
+
+static void pack_tokens_into_partitions(VP8_COMP *cpi, unsigned char *cx_data,
+ unsigned char *cx_data_end,
+ int num_part) {
+ int i;
+ unsigned char *ptr = cx_data;
+ unsigned char *ptr_end = cx_data_end;
+ vp8_writer *w;
+
+ for (i = 0; i < num_part; ++i) {
+ int mb_row;
+
+ w = cpi->bc + i + 1;
+
+ vp8_start_encode(w, ptr, ptr_end);
+
+ for (mb_row = i; mb_row < cpi->common.mb_rows; mb_row += num_part) {
+ const TOKENEXTRA *p = cpi->tplist[mb_row].start;
+ const TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
+ int tokens = (int)(stop - p);
+
+ vp8_pack_tokens(w, p, tokens);
+ }
+
+ vp8_stop_encode(w);
+ ptr += w->pos;
+ }
+}
+
+#if CONFIG_MULTITHREAD
+static void pack_mb_row_tokens(VP8_COMP *cpi, vp8_writer *w) {
+ int mb_row;
+
+ for (mb_row = 0; mb_row < cpi->common.mb_rows; ++mb_row) {
+ const TOKENEXTRA *p = cpi->tplist[mb_row].start;
+ const TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
+ int tokens = (int)(stop - p);
+
+ vp8_pack_tokens(w, p, tokens);
+ }
+}
+#endif // CONFIG_MULTITHREAD
+
+static void write_mv_ref(vp8_writer *w, MB_PREDICTION_MODE m,
+ const vp8_prob *p) {
+ assert(NEARESTMV <= m && m <= SPLITMV);
+ vp8_write_token(w, vp8_mv_ref_tree, p,
+ vp8_mv_ref_encoding_array + (m - NEARESTMV));
+}
+
+static void write_sub_mv_ref(vp8_writer *w, B_PREDICTION_MODE m,
+ const vp8_prob *p) {
+ assert(LEFT4X4 <= m && m <= NEW4X4);
+ vp8_write_token(w, vp8_sub_mv_ref_tree, p,
+ vp8_sub_mv_ref_encoding_array + (m - LEFT4X4));
+}
+
+static void write_mv(vp8_writer *w, const MV *mv, const int_mv *ref,
+ const MV_CONTEXT *mvc) {
+ MV e;
+ e.row = mv->row - ref->as_mv.row;
+ e.col = mv->col - ref->as_mv.col;
+
+ vp8_encode_motion_vector(w, &e, mvc);
+}
+
+static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi,
+ const MACROBLOCKD *x) {
+ /* Encode the MB segment id. */
+ if (x->segmentation_enabled && x->update_mb_segmentation_map) {
+ switch (mi->segment_id) {
+ case 0:
+ vp8_write(w, 0, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 0, x->mb_segment_tree_probs[1]);
+ break;
+ case 1:
+ vp8_write(w, 0, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 1, x->mb_segment_tree_probs[1]);
+ break;
+ case 2:
+ vp8_write(w, 1, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 0, x->mb_segment_tree_probs[2]);
+ break;
+ case 3:
+ vp8_write(w, 1, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 1, x->mb_segment_tree_probs[2]);
+ break;
+
+ /* TRAP.. This should not happen */
+ default:
+ vp8_write(w, 0, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 0, x->mb_segment_tree_probs[1]);
+ break;
+ }
+ }
+}
+void vp8_convert_rfct_to_prob(VP8_COMP *const cpi) {
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
+ const int rf_intra = rfct[INTRA_FRAME];
+ const int rf_inter =
+ rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
+
+ /* Calculate the probabilities used to code the ref frame based on usage */
+ if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter))) {
+ cpi->prob_intra_coded = 1;
+ }
+
+ cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
+
+ if (!cpi->prob_last_coded) cpi->prob_last_coded = 1;
+
+ cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
+ ? (rfct[GOLDEN_FRAME] * 255) /
+ (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
+ : 128;
+
+ if (!cpi->prob_gf_coded) cpi->prob_gf_coded = 1;
+}
+
+static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
+ VP8_COMMON *const pc = &cpi->common;
+ vp8_writer *const w = cpi->bc;
+ const MV_CONTEXT *mvc = pc->fc.mvc;
+
+ MODE_INFO *m = pc->mi;
+ const int mis = pc->mode_info_stride;
+ int mb_row = -1;
+
+ int prob_skip_false = 0;
+
+ cpi->mb.partition_info = cpi->mb.pi;
+
+ vp8_convert_rfct_to_prob(cpi);
+
+ if (pc->mb_no_coeff_skip) {
+ int total_mbs = pc->mb_rows * pc->mb_cols;
+
+ prob_skip_false = (total_mbs - cpi->mb.skip_true_count) * 256 / total_mbs;
+
+ if (prob_skip_false <= 1) prob_skip_false = 1;
+
+ if (prob_skip_false > 255) prob_skip_false = 255;
+
+ cpi->prob_skip_false = prob_skip_false;
+ vp8_write_literal(w, prob_skip_false, 8);
+ }
+
+ vp8_write_literal(w, cpi->prob_intra_coded, 8);
+ vp8_write_literal(w, cpi->prob_last_coded, 8);
+ vp8_write_literal(w, cpi->prob_gf_coded, 8);
+
+ update_mbintra_mode_probs(cpi);
+
+ vp8_write_mvprobs(cpi);
+
+ while (++mb_row < pc->mb_rows) {
+ int mb_col = -1;
+
+ while (++mb_col < pc->mb_cols) {
+ const MB_MODE_INFO *const mi = &m->mbmi;
+ const MV_REFERENCE_FRAME rf = mi->ref_frame;
+ const MB_PREDICTION_MODE mode = mi->mode;
+
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+
+ /* Distance of Mb to the various image edges.
+ * These specified to 8th pel as they are always compared to MV
+ * values that are in 1/8th pel units
+ */
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
+
+ if (cpi->mb.e_mbd.update_mb_segmentation_map) {
+ write_mb_features(w, mi, &cpi->mb.e_mbd);
+ }
+
+ if (pc->mb_no_coeff_skip) {
+ vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false);
+ }
+
+ if (rf == INTRA_FRAME) {
+ vp8_write(w, 0, cpi->prob_intra_coded);
+ write_ymode(w, mode, pc->fc.ymode_prob);
+
+ if (mode == B_PRED) {
+ int j = 0;
+
+ do {
+ write_bmode(w, m->bmi[j].as_mode, pc->fc.bmode_prob);
+ } while (++j < 16);
+ }
+
+ write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob);
+ } else { /* inter coded */
+ int_mv best_mv;
+ vp8_prob mv_ref_p[VP8_MVREFS - 1];
+
+ vp8_write(w, 1, cpi->prob_intra_coded);
+
+ if (rf == LAST_FRAME)
+ vp8_write(w, 0, cpi->prob_last_coded);
+ else {
+ vp8_write(w, 1, cpi->prob_last_coded);
+ vp8_write(w, (rf == GOLDEN_FRAME) ? 0 : 1, cpi->prob_gf_coded);
+ }
+
+ {
+ int_mv n1, n2;
+ int ct[4];
+
+ vp8_find_near_mvs(xd, m, &n1, &n2, &best_mv, ct, rf,
+ cpi->common.ref_frame_sign_bias);
+ vp8_clamp_mv2(&best_mv, xd);
+
+ vp8_mv_ref_probs(mv_ref_p, ct);
+ }
+
+ write_mv_ref(w, mode, mv_ref_p);
+
+ switch (mode) /* new, split require MVs */
+ {
+ case NEWMV: write_mv(w, &mi->mv.as_mv, &best_mv, mvc); break;
+
+ case SPLITMV: {
+ int j = 0;
+
+#ifdef MODE_STATS
+ ++count_mb_seg[mi->partitioning];
+#endif
+
+ write_split(w, mi->partitioning);
+
+ do {
+ B_PREDICTION_MODE blockmode;
+ int_mv blockmv;
+ const int *const L = vp8_mbsplits[mi->partitioning];
+ int k = -1; /* first block in subset j */
+ int mv_contz;
+ int_mv leftmv, abovemv;
+
+ blockmode = cpi->mb.partition_info->bmi[j].mode;
+ blockmv = cpi->mb.partition_info->bmi[j].mv;
+ while (j != L[++k]) {
+ assert(k < 16);
+ }
+ leftmv.as_int = left_block_mv(m, k);
+ abovemv.as_int = above_block_mv(m, k, mis);
+ mv_contz = vp8_mv_cont(&leftmv, &abovemv);
+
+ write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2[mv_contz]);
+
+ if (blockmode == NEW4X4) {
+ write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *)mvc);
+ }
+ } while (++j < cpi->mb.partition_info->count);
+ break;
+ }
+ default: break;
+ }
+ }
+
+ ++m;
+ cpi->mb.partition_info++;
+ }
+
+ ++m; /* skip L prediction border */
+ cpi->mb.partition_info++;
+ }
+}
+
+static void write_kfmodes(VP8_COMP *cpi) {
+ vp8_writer *const bc = cpi->bc;
+ const VP8_COMMON *const c = &cpi->common;
+ /* const */
+ MODE_INFO *m = c->mi;
+
+ int mb_row = -1;
+ int prob_skip_false = 0;
+
+ if (c->mb_no_coeff_skip) {
+ int total_mbs = c->mb_rows * c->mb_cols;
+
+ prob_skip_false = (total_mbs - cpi->mb.skip_true_count) * 256 / total_mbs;
+
+ if (prob_skip_false <= 1) prob_skip_false = 1;
+
+ if (prob_skip_false >= 255) prob_skip_false = 255;
+
+ cpi->prob_skip_false = prob_skip_false;
+ vp8_write_literal(bc, prob_skip_false, 8);
+ }
+
+ while (++mb_row < c->mb_rows) {
+ int mb_col = -1;
+
+ while (++mb_col < c->mb_cols) {
+ const int ym = m->mbmi.mode;
+
+ if (cpi->mb.e_mbd.update_mb_segmentation_map) {
+ write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
+ }
+
+ if (c->mb_no_coeff_skip) {
+ vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
+ }
+
+ kfwrite_ymode(bc, ym, vp8_kf_ymode_prob);
+
+ if (ym == B_PRED) {
+ const int mis = c->mode_info_stride;
+ int i = 0;
+
+ do {
+ const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
+ const B_PREDICTION_MODE L = left_block_mode(m, i);
+ const int bm = m->bmi[i].as_mode;
+
+ write_bmode(bc, bm, vp8_kf_bmode_prob[A][L]);
+ } while (++i < 16);
+ }
+
+ write_uv_mode(bc, (m++)->mbmi.uv_mode, vp8_kf_uv_mode_prob);
+ }
+
+ m++; /* skip L prediction border */
+ }
+}
+
+#if 0
+/* This function is used for debugging probability trees. */
+static void print_prob_tree(vp8_prob
+ coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES])
+{
+ /* print coef probability tree */
+ int i,j,k,l;
+ FILE* f = fopen("enc_tree_probs.txt", "a");
+ fprintf(f, "{\n");
+ for (i = 0; i < BLOCK_TYPES; ++i)
+ {
+ fprintf(f, " {\n");
+ for (j = 0; j < COEF_BANDS; ++j)
+ {
+ fprintf(f, " {\n");
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
+ {
+ fprintf(f, " {");
+ for (l = 0; l < ENTROPY_NODES; ++l)
+ {
+ fprintf(f, "%3u, ",
+ (unsigned int)(coef_probs [i][j][k][l]));
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, "}\n");
+ fclose(f);
+}
+#endif
+
+static void sum_probs_over_prev_coef_context(
+ const unsigned int probs[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS],
+ unsigned int *out) {
+ int i, j;
+ for (i = 0; i < MAX_ENTROPY_TOKENS; ++i) {
+ for (j = 0; j < PREV_COEF_CONTEXTS; ++j) {
+ const unsigned int tmp = out[i];
+ out[i] += probs[j][i];
+ /* check for wrap */
+ if (out[i] < tmp) out[i] = UINT_MAX;
+ }
+ }
+}
+
+static int prob_update_savings(const unsigned int *ct, const vp8_prob oldp,
+ const vp8_prob newp, const vp8_prob upd) {
+ const int old_b = vp8_cost_branch(ct, oldp);
+ const int new_b = vp8_cost_branch(ct, newp);
+ const int update_b = 8 + ((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8);
+
+ return old_b - new_b - update_b;
+}
+
+static int independent_coef_context_savings(VP8_COMP *cpi) {
+ MACROBLOCK *const x = &cpi->mb;
+ int savings = 0;
+ int i = 0;
+ do {
+ int j = 0;
+ do {
+ int k = 0;
+ unsigned int prev_coef_count_sum[MAX_ENTROPY_TOKENS] = { 0 };
+ int prev_coef_savings[MAX_ENTROPY_TOKENS] = { 0 };
+ const unsigned int(*probs)[MAX_ENTROPY_TOKENS];
+ /* Calculate new probabilities given the constraint that
+ * they must be equal over the prev coef contexts
+ */
+
+ probs = (const unsigned int(*)[MAX_ENTROPY_TOKENS])x->coef_counts[i][j];
+
+ /* Reset to default probabilities at key frames */
+ if (cpi->common.frame_type == KEY_FRAME) {
+ probs = default_coef_counts[i][j];
+ }
+
+ sum_probs_over_prev_coef_context(probs, prev_coef_count_sum);
+
+ do {
+ /* at every context */
+
+ /* calc probs and branch cts for this frame only */
+ int t = 0; /* token/prob index */
+
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ cpi->frame_coef_probs[i][j][k], cpi->frame_branch_ct[i][j][k],
+ prev_coef_count_sum, 256, 1);
+
+ do {
+ const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
+ const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
+ const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
+ const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
+ const int s = prob_update_savings(ct, oldp, newp, upd);
+
+ if (cpi->common.frame_type != KEY_FRAME ||
+ (cpi->common.frame_type == KEY_FRAME && newp != oldp)) {
+ prev_coef_savings[t] += s;
+ }
+ } while (++t < ENTROPY_NODES);
+ } while (++k < PREV_COEF_CONTEXTS);
+ k = 0;
+ do {
+ /* We only update probabilities if we can save bits, except
+ * for key frames where we have to update all probabilities
+ * to get the equal probabilities across the prev coef
+ * contexts.
+ */
+ if (prev_coef_savings[k] > 0 || cpi->common.frame_type == KEY_FRAME) {
+ savings += prev_coef_savings[k];
+ }
+ } while (++k < ENTROPY_NODES);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES);
+ return savings;
+}
+
+static int default_coef_context_savings(VP8_COMP *cpi) {
+ MACROBLOCK *const x = &cpi->mb;
+ int savings = 0;
+ int i = 0;
+ do {
+ int j = 0;
+ do {
+ int k = 0;
+ do {
+ /* at every context */
+
+ /* calc probs and branch cts for this frame only */
+ int t = 0; /* token/prob index */
+
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ cpi->frame_coef_probs[i][j][k], cpi->frame_branch_ct[i][j][k],
+ x->coef_counts[i][j][k], 256, 1);
+
+ do {
+ const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
+ const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
+ const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
+ const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
+ const int s = prob_update_savings(ct, oldp, newp, upd);
+
+ if (s > 0) {
+ savings += s;
+ }
+ } while (++t < ENTROPY_NODES);
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES);
+ return savings;
+}
+
+void vp8_calc_ref_frame_costs(int *ref_frame_cost, int prob_intra,
+ int prob_last, int prob_garf) {
+ assert(prob_intra >= 0);
+ assert(prob_intra <= 255);
+ assert(prob_last >= 0);
+ assert(prob_last <= 255);
+ assert(prob_garf >= 0);
+ assert(prob_garf <= 255);
+ ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(prob_intra);
+ ref_frame_cost[LAST_FRAME] =
+ vp8_cost_one(prob_intra) + vp8_cost_zero(prob_last);
+ ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(prob_intra) +
+ vp8_cost_one(prob_last) +
+ vp8_cost_zero(prob_garf);
+ ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(prob_intra) +
+ vp8_cost_one(prob_last) +
+ vp8_cost_one(prob_garf);
+}
+
+int vp8_estimate_entropy_savings(VP8_COMP *cpi) {
+ int savings = 0;
+
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
+ const int rf_intra = rfct[INTRA_FRAME];
+ const int rf_inter =
+ rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
+ int new_intra, new_last, new_garf, oldtotal, newtotal;
+ int ref_frame_cost[MAX_REF_FRAMES];
+
+ vpx_clear_system_state();
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ if (!(new_intra = rf_intra * 255 / (rf_intra + rf_inter))) new_intra = 1;
+
+ new_last = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
+
+ new_garf = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
+ ? (rfct[GOLDEN_FRAME] * 255) /
+ (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
+ : 128;
+
+ vp8_calc_ref_frame_costs(ref_frame_cost, new_intra, new_last, new_garf);
+
+ newtotal = rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] +
+ rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] +
+ rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] +
+ rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
+
+ /* old costs */
+ vp8_calc_ref_frame_costs(ref_frame_cost, cpi->prob_intra_coded,
+ cpi->prob_last_coded, cpi->prob_gf_coded);
+
+ oldtotal = rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] +
+ rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] +
+ rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] +
+ rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
+
+ savings += (oldtotal - newtotal) / 256;
+ }
+
+ if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
+ savings += independent_coef_context_savings(cpi);
+ } else {
+ savings += default_coef_context_savings(cpi);
+ }
+
+ return savings;
+}
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+int vp8_update_coef_context(VP8_COMP *cpi) {
+ int savings = 0;
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ /* Reset to default counts/probabilities at key frames */
+ vp8_copy(cpi->mb.coef_counts, default_coef_counts);
+ }
+
+ if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
+ savings += independent_coef_context_savings(cpi);
+ else
+ savings += default_coef_context_savings(cpi);
+
+ return savings;
+}
+#endif
+
+void vp8_update_coef_probs(VP8_COMP *cpi) {
+ int i = 0;
+#if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ vp8_writer *const w = cpi->bc;
+#endif
+
+ vpx_clear_system_state();
+
+ do {
+ int j = 0;
+
+ do {
+ int k = 0;
+ int prev_coef_savings[ENTROPY_NODES] = { 0 };
+ if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ int t; /* token/prob index */
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
+ const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
+ const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
+ const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
+
+ prev_coef_savings[t] += prob_update_savings(ct, oldp, newp, upd);
+ }
+ }
+ k = 0;
+ }
+ do {
+ /* note: use result from vp8_estimate_entropy_savings, so no
+ * need to call vp8_tree_probs_from_distribution here.
+ */
+
+ /* at every context */
+
+ /* calc probs and branch cts for this frame only */
+ int t = 0; /* token/prob index */
+
+ do {
+ const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
+
+ vp8_prob *Pold = cpi->common.fc.coef_probs[i][j][k] + t;
+ const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
+
+ int s = prev_coef_savings[t];
+ int u = 0;
+
+ if (!(cpi->oxcf.error_resilient_mode &
+ VPX_ERROR_RESILIENT_PARTITIONS)) {
+ s = prob_update_savings(cpi->frame_branch_ct[i][j][k][t], *Pold,
+ newp, upd);
+ }
+
+ if (s > 0) u = 1;
+
+ /* Force updates on key frames if the new is different,
+ * so that we can be sure we end up with equal probabilities
+ * over the prev coef contexts.
+ */
+ if ((cpi->oxcf.error_resilient_mode &
+ VPX_ERROR_RESILIENT_PARTITIONS) &&
+ cpi->common.frame_type == KEY_FRAME && newp != *Pold) {
+ u = 1;
+ }
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ cpi->update_probs[i][j][k][t] = u;
+#else
+ vp8_write(w, u, upd);
+#endif
+
+ if (u) {
+ /* send/use new probability */
+
+ *Pold = newp;
+#if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ vp8_write_literal(w, newp, 8);
+#endif
+ }
+
+ } while (++t < ENTROPY_NODES);
+
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES);
+}
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+static void pack_coef_probs(VP8_COMP *cpi) {
+ int i = 0;
+ vp8_writer *const w = cpi->bc;
+
+ do {
+ int j = 0;
+
+ do {
+ int k = 0;
+
+ do {
+ int t = 0; /* token/prob index */
+
+ do {
+ const vp8_prob newp = cpi->common.fc.coef_probs[i][j][k][t];
+ const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
+
+ const char u = cpi->update_probs[i][j][k][t];
+
+ vp8_write(w, u, upd);
+
+ if (u) {
+ /* send/use new probability */
+ vp8_write_literal(w, newp, 8);
+ }
+ } while (++t < ENTROPY_NODES);
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES);
+}
+#endif
+
+#ifdef PACKET_TESTING
+FILE *vpxlogc = 0;
+#endif
+
+static void put_delta_q(vp8_writer *bc, int delta_q) {
+ if (delta_q != 0) {
+ vp8_write_bit(bc, 1);
+ vp8_write_literal(bc, abs(delta_q), 4);
+
+ if (delta_q < 0)
+ vp8_write_bit(bc, 1);
+ else
+ vp8_write_bit(bc, 0);
+ } else
+ vp8_write_bit(bc, 0);
+}
+
+void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
+ unsigned char *dest_end, size_t *size) {
+ int i, j;
+ VP8_HEADER oh;
+ VP8_COMMON *const pc = &cpi->common;
+ vp8_writer *const bc = cpi->bc;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ int extra_bytes_packed = 0;
+
+ unsigned char *cx_data = dest;
+ unsigned char *cx_data_end = dest_end;
+ const int *mb_feature_data_bits;
+
+ oh.show_frame = (int)pc->show_frame;
+ oh.type = (int)pc->frame_type;
+ oh.version = pc->version;
+ oh.first_partition_length_in_bytes = 0;
+
+ mb_feature_data_bits = vp8_mb_feature_data_bits;
+
+ bc[0].error = &pc->error;
+
+ validate_buffer(cx_data, 3, cx_data_end, &cpi->common.error);
+ cx_data += 3;
+
+#if defined(SECTIONBITS_OUTPUT)
+ Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256;
+#endif
+
+ /* every keyframe send startcode, width, height, scale factor, clamp
+ * and color type
+ */
+ if (oh.type == KEY_FRAME) {
+ int v;
+
+ validate_buffer(cx_data, 7, cx_data_end, &cpi->common.error);
+
+ /* Start / synch code */
+ cx_data[0] = 0x9D;
+ cx_data[1] = 0x01;
+ cx_data[2] = 0x2a;
+
+ /* Pack scale and frame size into 16 bits. Store it 8 bits at a time.
+ * https://tools.ietf.org/html/rfc6386
+ * 9.1. Uncompressed Data Chunk
+ * 16 bits : (2 bits Horizontal Scale << 14) | Width (14 bits)
+ * 16 bits : (2 bits Vertical Scale << 14) | Height (14 bits)
+ */
+ v = (pc->horiz_scale << 14) | pc->Width;
+ cx_data[3] = v & 0xff;
+ cx_data[4] = v >> 8;
+
+ v = (pc->vert_scale << 14) | pc->Height;
+ cx_data[5] = v & 0xff;
+ cx_data[6] = v >> 8;
+
+ extra_bytes_packed = 7;
+ cx_data += extra_bytes_packed;
+
+ vp8_start_encode(bc, cx_data, cx_data_end);
+
+ /* signal clr type */
+ vp8_write_bit(bc, 0);
+ vp8_write_bit(bc, pc->clamp_type);
+
+ } else {
+ vp8_start_encode(bc, cx_data, cx_data_end);
+ }
+
+ /* Signal whether or not Segmentation is enabled */
+ vp8_write_bit(bc, xd->segmentation_enabled);
+
+ /* Indicate which features are enabled */
+ if (xd->segmentation_enabled) {
+ /* Signal whether or not the segmentation map is being updated. */
+ vp8_write_bit(bc, xd->update_mb_segmentation_map);
+ vp8_write_bit(bc, xd->update_mb_segmentation_data);
+
+ if (xd->update_mb_segmentation_data) {
+ signed char Data;
+
+ vp8_write_bit(bc, xd->mb_segement_abs_delta);
+
+ /* For each segmentation feature (Quant and loop filter level) */
+ for (i = 0; i < MB_LVL_MAX; ++i) {
+ /* For each of the segments */
+ for (j = 0; j < MAX_MB_SEGMENTS; ++j) {
+ Data = xd->segment_feature_data[i][j];
+
+ /* Frame level data */
+ if (Data) {
+ vp8_write_bit(bc, 1);
+
+ if (Data < 0) {
+ Data = -Data;
+ vp8_write_literal(bc, Data, mb_feature_data_bits[i]);
+ vp8_write_bit(bc, 1);
+ } else {
+ vp8_write_literal(bc, Data, mb_feature_data_bits[i]);
+ vp8_write_bit(bc, 0);
+ }
+ } else
+ vp8_write_bit(bc, 0);
+ }
+ }
+ }
+
+ if (xd->update_mb_segmentation_map) {
+ /* Write the probs used to decode the segment id for each mb */
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
+ int Data = xd->mb_segment_tree_probs[i];
+
+ if (Data != 255) {
+ vp8_write_bit(bc, 1);
+ vp8_write_literal(bc, Data, 8);
+ } else
+ vp8_write_bit(bc, 0);
+ }
+ }
+ }
+
+ vp8_write_bit(bc, pc->filter_type);
+ vp8_write_literal(bc, pc->filter_level, 6);
+ vp8_write_literal(bc, pc->sharpness_level, 3);
+
+ /* Write out loop filter deltas applied at the MB level based on mode
+ * or ref frame (if they are enabled).
+ */
+ vp8_write_bit(bc, xd->mode_ref_lf_delta_enabled);
+
+ if (xd->mode_ref_lf_delta_enabled) {
+ /* Do the deltas need to be updated */
+ int send_update =
+ xd->mode_ref_lf_delta_update || cpi->oxcf.error_resilient_mode;
+
+ vp8_write_bit(bc, send_update);
+ if (send_update) {
+ int Data;
+
+ /* Send update */
+ for (i = 0; i < MAX_REF_LF_DELTAS; ++i) {
+ Data = xd->ref_lf_deltas[i];
+
+ /* Frame level data */
+ if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i] ||
+ cpi->oxcf.error_resilient_mode) {
+ xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i];
+ vp8_write_bit(bc, 1);
+
+ if (Data > 0) {
+ vp8_write_literal(bc, (Data & 0x3F), 6);
+ vp8_write_bit(bc, 0); /* sign */
+ } else {
+ Data = -Data;
+ vp8_write_literal(bc, (Data & 0x3F), 6);
+ vp8_write_bit(bc, 1); /* sign */
+ }
+ } else
+ vp8_write_bit(bc, 0);
+ }
+
+ /* Send update */
+ for (i = 0; i < MAX_MODE_LF_DELTAS; ++i) {
+ Data = xd->mode_lf_deltas[i];
+
+ if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i] ||
+ cpi->oxcf.error_resilient_mode) {
+ xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i];
+ vp8_write_bit(bc, 1);
+
+ if (Data > 0) {
+ vp8_write_literal(bc, (Data & 0x3F), 6);
+ vp8_write_bit(bc, 0); /* sign */
+ } else {
+ Data = -Data;
+ vp8_write_literal(bc, (Data & 0x3F), 6);
+ vp8_write_bit(bc, 1); /* sign */
+ }
+ } else
+ vp8_write_bit(bc, 0);
+ }
+ }
+ }
+
+ /* signal here is multi token partition is enabled */
+ vp8_write_literal(bc, pc->multi_token_partition, 2);
+
+ /* Frame Qbaseline quantizer index */
+ vp8_write_literal(bc, pc->base_qindex, 7);
+
+ /* Transmit Dc, Second order and Uv quantizer delta information */
+ put_delta_q(bc, pc->y1dc_delta_q);
+ put_delta_q(bc, pc->y2dc_delta_q);
+ put_delta_q(bc, pc->y2ac_delta_q);
+ put_delta_q(bc, pc->uvdc_delta_q);
+ put_delta_q(bc, pc->uvac_delta_q);
+
+ /* When there is a key frame all reference buffers are updated using
+ * the new key frame
+ */
+ if (pc->frame_type != KEY_FRAME) {
+ /* Should the GF or ARF be updated using the transmitted frame
+ * or buffer
+ */
+ vp8_write_bit(bc, pc->refresh_golden_frame);
+ vp8_write_bit(bc, pc->refresh_alt_ref_frame);
+
+ /* If not being updated from current frame should either GF or ARF
+ * be updated from another buffer
+ */
+ if (!pc->refresh_golden_frame)
+ vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
+
+ if (!pc->refresh_alt_ref_frame)
+ vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
+
+ /* Indicate reference frame sign bias for Golden and ARF frames
+ * (always 0 for last frame buffer)
+ */
+ vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
+ vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
+ }
+
+#if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
+ if (pc->frame_type == KEY_FRAME) {
+ pc->refresh_entropy_probs = 1;
+ } else {
+ pc->refresh_entropy_probs = 0;
+ }
+ }
+#endif
+
+ vp8_write_bit(bc, pc->refresh_entropy_probs);
+
+ if (pc->frame_type != KEY_FRAME) vp8_write_bit(bc, pc->refresh_last_frame);
+
+ vpx_clear_system_state();
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ pack_coef_probs(cpi);
+#else
+ if (pc->refresh_entropy_probs == 0) {
+ /* save a copy for later refresh */
+ memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
+ }
+
+ vp8_update_coef_probs(cpi);
+#endif
+
+ /* Write out the mb_no_coeff_skip flag */
+ vp8_write_bit(bc, pc->mb_no_coeff_skip);
+
+ if (pc->frame_type == KEY_FRAME) {
+ write_kfmodes(cpi);
+ } else {
+ pack_inter_mode_mvs(cpi);
+ }
+
+ vp8_stop_encode(bc);
+
+ cx_data += bc->pos;
+
+ oh.first_partition_length_in_bytes = cpi->bc->pos;
+
+ /* update frame tag */
+ {
+ /* Pack partition size, show frame, version and frame type into to 24 bits.
+ * Store it 8 bits at a time.
+ * https://tools.ietf.org/html/rfc6386
+ * 9.1. Uncompressed Data Chunk
+ * The uncompressed data chunk comprises a common (for key frames and
+ * interframes) 3-byte frame tag that contains four fields, as follows:
+ *
+ * 1. A 1-bit frame type (0 for key frames, 1 for interframes).
+ *
+ * 2. A 3-bit version number (0 - 3 are defined as four different
+ * profiles with different decoding complexity; other values may be
+ * defined for future variants of the VP8 data format).
+ *
+ * 3. A 1-bit show_frame flag (0 when current frame is not for display,
+ * 1 when current frame is for display).
+ *
+ * 4. A 19-bit field containing the size of the first data partition in
+ * bytes
+ */
+ int v = (oh.first_partition_length_in_bytes << 5) | (oh.show_frame << 4) |
+ (oh.version << 1) | oh.type;
+
+ dest[0] = v & 0xff;
+ dest[1] = (v >> 8) & 0xff;
+ dest[2] = v >> 16;
+ }
+
+ *size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc->pos;
+
+ cpi->partition_sz[0] = (unsigned int)*size;
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ {
+ const int num_part = (1 << pc->multi_token_partition);
+ unsigned char *dp = cpi->partition_d[0] + cpi->partition_sz[0];
+
+ if (num_part > 1) {
+ /* write token part sizes (all but last) if more than 1 */
+ validate_buffer(dp, 3 * (num_part - 1), cpi->partition_d_end[0],
+ &pc->error);
+
+ cpi->partition_sz[0] += 3 * (num_part - 1);
+
+ for (i = 1; i < num_part; ++i) {
+ write_partition_size(dp, cpi->partition_sz[i]);
+ dp += 3;
+ }
+ }
+
+ if (!cpi->output_partition) {
+ /* concatenate partition buffers */
+ for (i = 0; i < num_part; ++i) {
+ memmove(dp, cpi->partition_d[i + 1], cpi->partition_sz[i + 1]);
+ cpi->partition_d[i + 1] = dp;
+ dp += cpi->partition_sz[i + 1];
+ }
+ }
+
+ /* update total size */
+ *size = 0;
+ for (i = 0; i < num_part + 1; ++i) {
+ *size += cpi->partition_sz[i];
+ }
+ }
+#else
+ if (pc->multi_token_partition != ONE_PARTITION) {
+ int num_part = 1 << pc->multi_token_partition;
+
+ /* partition size table at the end of first partition */
+ cpi->partition_sz[0] += 3 * (num_part - 1);
+ *size += 3 * (num_part - 1);
+
+ validate_buffer(cx_data, 3 * (num_part - 1), cx_data_end, &pc->error);
+
+ for (i = 1; i < num_part + 1; ++i) {
+ cpi->bc[i].error = &pc->error;
+ }
+
+ pack_tokens_into_partitions(cpi, cx_data + 3 * (num_part - 1), cx_data_end,
+ num_part);
+
+ for (i = 1; i < num_part; ++i) {
+ cpi->partition_sz[i] = cpi->bc[i].pos;
+ write_partition_size(cx_data, cpi->partition_sz[i]);
+ cx_data += 3;
+ *size += cpi->partition_sz[i]; /* add to total */
+ }
+
+ /* add last partition to total size */
+ cpi->partition_sz[i] = cpi->bc[i].pos;
+ *size += cpi->partition_sz[i];
+ } else {
+ bc[1].error = &pc->error;
+
+ vp8_start_encode(&cpi->bc[1], cx_data, cx_data_end);
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
+ pack_mb_row_tokens(cpi, &cpi->bc[1]);
+ } else {
+ vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count);
+ }
+#else
+ vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count);
+#endif // CONFIG_MULTITHREAD
+
+ vp8_stop_encode(&cpi->bc[1]);
+
+ *size += cpi->bc[1].pos;
+ cpi->partition_sz[1] = cpi->bc[1].pos;
+ }
+#endif
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/bitstream.h b/media/libvpx/libvpx/vp8/encoder/bitstream.h
new file mode 100644
index 0000000000..ee3f3e4aab
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/bitstream.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_BITSTREAM_H_
+#define VPX_VP8_ENCODER_BITSTREAM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "vp8/encoder/treewriter.h"
+#include "vp8/encoder/tokenize.h"
+
+void vp8_pack_tokens(vp8_writer *w, const TOKENEXTRA *p, int xcount);
+void vp8_convert_rfct_to_prob(struct VP8_COMP *const cpi);
+void vp8_calc_ref_frame_costs(int *ref_frame_cost, int prob_intra,
+ int prob_last, int prob_garf);
+int vp8_estimate_entropy_savings(struct VP8_COMP *cpi);
+void vp8_update_coef_probs(struct VP8_COMP *cpi);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_BITSTREAM_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/block.h b/media/libvpx/libvpx/vp8/encoder/block.h
new file mode 100644
index 0000000000..1bc5ef75bc
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/block.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_BLOCK_H_
+#define VPX_VP8_ENCODER_BLOCK_H_
+
+#include "vp8/common/onyx.h"
+#include "vp8/common/blockd.h"
+#include "vp8/common/entropymv.h"
+#include "vp8/common/entropy.h"
+#include "vpx_ports/mem.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_MODES 20
+#define MAX_ERROR_BINS 1024
+
+/* motion search site */
+typedef struct {
+ MV mv;
+ int offset;
+} search_site;
+
+typedef struct block {
+ /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
+ short *src_diff;
+ short *coeff;
+
+ /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
+ short *quant;
+ short *quant_fast;
+ short *quant_shift;
+ short *zbin;
+ short *zrun_zbin_boost;
+ short *round;
+
+ /* Zbin Over Quant value */
+ short zbin_extra;
+
+ unsigned char **base_src;
+ int src;
+ int src_stride;
+} BLOCK;
+
+typedef struct {
+ int count;
+ struct {
+ B_PREDICTION_MODE mode;
+ int_mv mv;
+ } bmi[16];
+} PARTITION_INFO;
+
+typedef struct macroblock {
+ DECLARE_ALIGNED(16, short, src_diff[400]); /* 25 blocks Y,U,V,Y2 */
+ DECLARE_ALIGNED(16, short, coeff[400]); /* 25 blocks Y,U,V,Y2 */
+ DECLARE_ALIGNED(16, unsigned char, thismb[256]);
+
+ unsigned char *thismb_ptr;
+ /* 16 Y, 4 U, 4 V, 1 DC 2nd order block */
+ BLOCK block[25];
+
+ YV12_BUFFER_CONFIG src;
+
+ MACROBLOCKD e_mbd;
+ PARTITION_INFO *partition_info; /* work pointer */
+ PARTITION_INFO *pi; /* Corresponds to upper left visible macroblock */
+ PARTITION_INFO *pip; /* Base of allocated array */
+
+ int ref_frame_cost[MAX_REF_FRAMES];
+
+ search_site *ss;
+ int ss_count;
+ int searches_per_step;
+
+ int errorperbit;
+ int sadperbit16;
+ int sadperbit4;
+ int rddiv;
+ int rdmult;
+ unsigned int *mb_activity_ptr;
+ int *mb_norm_activity_ptr;
+ signed int act_zbin_adj;
+ signed int last_act_zbin_adj;
+
+ int *mvcost[2];
+ int *mvsadcost[2];
+ int (*mbmode_cost)[MB_MODE_COUNT];
+ int (*intra_uv_mode_cost)[MB_MODE_COUNT];
+ int (*bmode_costs)[10][10];
+ int *inter_bmode_costs;
+ int (*token_costs)[COEF_BANDS][PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
+
+ /* These define limits to motion vector components to prevent
+ * them from extending outside the UMV borders.
+ */
+ int mv_col_min;
+ int mv_col_max;
+ int mv_row_min;
+ int mv_row_max;
+
+ int skip;
+
+ unsigned int encode_breakout;
+
+ signed char *gf_active_ptr;
+
+ unsigned char *active_ptr;
+ MV_CONTEXT *mvc;
+
+ int optimize;
+ int q_index;
+ int is_skin;
+ int denoise_zeromv;
+
+#if CONFIG_TEMPORAL_DENOISING
+ int increase_denoising;
+ MB_PREDICTION_MODE best_sse_inter_mode;
+ int_mv best_sse_mv;
+ MV_REFERENCE_FRAME best_reference_frame;
+ MV_REFERENCE_FRAME best_zeromv_reference_frame;
+ unsigned char need_to_clamp_best_mvs;
+#endif
+
+ int skip_true_count;
+ unsigned int coef_counts[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [MAX_ENTROPY_TOKENS];
+ unsigned int MVcount[2][MVvals]; /* (row,col) MV cts this frame */
+ int ymode_count[VP8_YMODES]; /* intra MB type cts this frame */
+ int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
+ int64_t prediction_error;
+ int64_t intra_error;
+ int count_mb_ref_frame_usage[MAX_REF_FRAMES];
+
+ int rd_thresh_mult[MAX_MODES];
+ int rd_threshes[MAX_MODES];
+ unsigned int mbs_tested_so_far;
+ unsigned int mode_test_hit_counts[MAX_MODES];
+ int zbin_mode_boost_enabled;
+ int zbin_mode_boost;
+ int last_zbin_mode_boost;
+
+ int last_zbin_over_quant;
+ int zbin_over_quant;
+ int error_bins[MAX_ERROR_BINS];
+
+ void (*short_fdct4x4)(short *input, short *output, int pitch);
+ void (*short_fdct8x4)(short *input, short *output, int pitch);
+ void (*short_walsh4x4)(short *input, short *output, int pitch);
+ void (*quantize_b)(BLOCK *b, BLOCKD *d);
+
+ unsigned int mbs_zero_last_dot_suppress;
+ int zero_last_dot_suppress;
+} MACROBLOCK;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_BLOCK_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/boolhuff.c b/media/libvpx/libvpx/vp8/encoder/boolhuff.c
new file mode 100644
index 0000000000..819c2f22a0
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/boolhuff.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "boolhuff.h"
+
+#if defined(SECTIONBITS_OUTPUT)
+unsigned __int64 Sectionbits[500];
+
+#endif
+
+const unsigned int vp8_prob_cost[256] = {
+ 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129,
+ 1099, 1072, 1046, 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858,
+ 843, 829, 816, 803, 790, 778, 767, 755, 744, 733, 723, 713, 703,
+ 693, 684, 675, 666, 657, 649, 641, 633, 625, 617, 609, 602, 594,
+ 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516, 511,
+ 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442,
+ 437, 433, 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385,
+ 381, 377, 373, 369, 365, 361, 357, 353, 349, 346, 342, 338, 335,
+ 331, 328, 324, 321, 317, 314, 311, 307, 304, 301, 297, 294, 291,
+ 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257, 255, 252,
+ 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216,
+ 214, 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184,
+ 181, 179, 177, 174, 172, 170, 168, 165, 163, 161, 159, 156, 154,
+ 152, 150, 148, 145, 143, 141, 139, 137, 135, 133, 131, 129, 127,
+ 125, 123, 121, 119, 117, 115, 113, 111, 109, 107, 105, 103, 101,
+ 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77,
+ 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55,
+ 53, 51, 50, 48, 46, 45, 43, 41, 40, 38, 37, 35, 33,
+ 32, 30, 29, 27, 25, 24, 22, 21, 19, 18, 16, 15, 13,
+ 12, 10, 9, 7, 6, 4, 3, 1, 1
+};
+
+void vp8_start_encode(BOOL_CODER *bc, unsigned char *source,
+ unsigned char *source_end) {
+ bc->lowvalue = 0;
+ bc->range = 255;
+ bc->count = -24;
+ bc->buffer = source;
+ bc->buffer_end = source_end;
+ bc->pos = 0;
+}
+
+void vp8_stop_encode(BOOL_CODER *bc) {
+ int i;
+
+ for (i = 0; i < 32; ++i) vp8_encode_bool(bc, 0, 128);
+}
+
+void vp8_encode_value(BOOL_CODER *bc, int data, int bits) {
+ int bit;
+
+ for (bit = bits - 1; bit >= 0; bit--) {
+ vp8_encode_bool(bc, (1 & (data >> bit)), 0x80);
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/boolhuff.h b/media/libvpx/libvpx/vp8/encoder/boolhuff.h
new file mode 100644
index 0000000000..a8c536b99c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/boolhuff.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/****************************************************************************
+ *
+ * Module Title : boolhuff.h
+ *
+ * Description : Bool Coder header file.
+ *
+ ****************************************************************************/
+#ifndef VPX_VP8_ENCODER_BOOLHUFF_H_
+#define VPX_VP8_ENCODER_BOOLHUFF_H_
+
+#include "vpx_ports/mem.h"
+#include "vpx/internal/vpx_codec_internal.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ unsigned int lowvalue;
+ unsigned int range;
+ int count;
+ unsigned int pos;
+ unsigned char *buffer;
+ unsigned char *buffer_end;
+ struct vpx_internal_error_info *error;
+} BOOL_CODER;
+
+void vp8_start_encode(BOOL_CODER *bc, unsigned char *source,
+ unsigned char *source_end);
+
+void vp8_encode_value(BOOL_CODER *bc, int data, int bits);
+void vp8_stop_encode(BOOL_CODER *bc);
+extern const unsigned int vp8_prob_cost[256];
+
+DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
+
+static int validate_buffer(const unsigned char *start, size_t len,
+ const unsigned char *end,
+ struct vpx_internal_error_info *error) {
+ if (start + len > start && start + len < end) {
+ return 1;
+ } else {
+ vpx_internal_error(error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt partition ");
+ }
+
+ return 0;
+}
+static void vp8_encode_bool(BOOL_CODER *bc, int bit, int probability) {
+ unsigned int split;
+ int count = bc->count;
+ unsigned int range = bc->range;
+ unsigned int lowvalue = bc->lowvalue;
+ int shift;
+
+ split = 1 + (((range - 1) * probability) >> 8);
+
+ range = split;
+
+ if (bit) {
+ lowvalue += split;
+ range = bc->range - split;
+ }
+
+ shift = vp8_norm[range];
+
+ range <<= shift;
+ count += shift;
+
+ if (count >= 0) {
+ int offset = shift - count;
+
+ if ((lowvalue << (offset - 1)) & 0x80000000) {
+ int x = bc->pos - 1;
+
+ while (x >= 0 && bc->buffer[x] == 0xff) {
+ bc->buffer[x] = (unsigned char)0;
+ x--;
+ }
+
+ bc->buffer[x] += 1;
+ }
+
+ validate_buffer(bc->buffer + bc->pos, 1, bc->buffer_end, bc->error);
+ bc->buffer[bc->pos++] = (lowvalue >> (24 - offset) & 0xff);
+
+ shift = count;
+ lowvalue = (int)(((uint64_t)lowvalue << offset) & 0xffffff);
+ count -= 8;
+ }
+
+ lowvalue <<= shift;
+ bc->count = count;
+ bc->lowvalue = lowvalue;
+ bc->range = range;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_BOOLHUFF_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/copy_c.c b/media/libvpx/libvpx/vp8/encoder/copy_c.c
new file mode 100644
index 0000000000..4746125245
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/copy_c.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "./vp8_rtcd.h"
+#include "vpx/vpx_integer.h"
+
+/* Copy 2 macroblocks to a buffer */
+void vp8_copy32xn_c(const unsigned char *src_ptr, int src_stride,
+ unsigned char *dst_ptr, int dst_stride, int height) {
+ int r;
+
+ for (r = 0; r < height; ++r) {
+ memcpy(dst_ptr, src_ptr, 32);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/dct.c b/media/libvpx/libvpx/vp8/encoder/dct.c
new file mode 100644
index 0000000000..7d214eafb0
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/dct.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "./vp8_rtcd.h"
+
+void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
+ int i;
+ int a1, b1, c1, d1;
+ short *ip = input;
+ short *op = output;
+
+ for (i = 0; i < 4; ++i) {
+ a1 = ((ip[0] + ip[3]) * 8);
+ b1 = ((ip[1] + ip[2]) * 8);
+ c1 = ((ip[1] - ip[2]) * 8);
+ d1 = ((ip[0] - ip[3]) * 8);
+
+ op[0] = a1 + b1;
+ op[2] = a1 - b1;
+
+ op[1] = (c1 * 2217 + d1 * 5352 + 14500) >> 12;
+ op[3] = (d1 * 2217 - c1 * 5352 + 7500) >> 12;
+
+ ip += pitch / 2;
+ op += 4;
+ }
+ ip = output;
+ op = output;
+ for (i = 0; i < 4; ++i) {
+ a1 = ip[0] + ip[12];
+ b1 = ip[4] + ip[8];
+ c1 = ip[4] - ip[8];
+ d1 = ip[0] - ip[12];
+
+ op[0] = (a1 + b1 + 7) >> 4;
+ op[8] = (a1 - b1 + 7) >> 4;
+
+ op[4] = ((c1 * 2217 + d1 * 5352 + 12000) >> 16) + (d1 != 0);
+ op[12] = (d1 * 2217 - c1 * 5352 + 51000) >> 16;
+
+ ip++;
+ op++;
+ }
+}
+
+void vp8_short_fdct8x4_c(short *input, short *output, int pitch) {
+ vp8_short_fdct4x4_c(input, output, pitch);
+ vp8_short_fdct4x4_c(input + 4, output + 16, pitch);
+}
+
+void vp8_short_walsh4x4_c(short *input, short *output, int pitch) {
+ int i;
+ int a1, b1, c1, d1;
+ int a2, b2, c2, d2;
+ short *ip = input;
+ short *op = output;
+
+ for (i = 0; i < 4; ++i) {
+ a1 = ((ip[0] + ip[2]) * 4);
+ d1 = ((ip[1] + ip[3]) * 4);
+ c1 = ((ip[1] - ip[3]) * 4);
+ b1 = ((ip[0] - ip[2]) * 4);
+
+ op[0] = a1 + d1 + (a1 != 0);
+ op[1] = b1 + c1;
+ op[2] = b1 - c1;
+ op[3] = a1 - d1;
+ ip += pitch / 2;
+ op += 4;
+ }
+
+ ip = output;
+ op = output;
+
+ for (i = 0; i < 4; ++i) {
+ a1 = ip[0] + ip[8];
+ d1 = ip[4] + ip[12];
+ c1 = ip[4] - ip[12];
+ b1 = ip[0] - ip[8];
+
+ a2 = a1 + d1;
+ b2 = b1 + c1;
+ c2 = b1 - c1;
+ d2 = a1 - d1;
+
+ a2 += a2 < 0;
+ b2 += b2 < 0;
+ c2 += c2 < 0;
+ d2 += d2 < 0;
+
+ op[0] = (a2 + 3) >> 3;
+ op[4] = (b2 + 3) >> 3;
+ op[8] = (c2 + 3) >> 3;
+ op[12] = (d2 + 3) >> 3;
+
+ ip++;
+ op++;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/dct_value_cost.h b/media/libvpx/libvpx/vp8/encoder/dct_value_cost.h
new file mode 100644
index 0000000000..0cd6cb4e65
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/dct_value_cost.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_DCT_VALUE_COST_H_
+#define VPX_VP8_ENCODER_DCT_VALUE_COST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Generated file, included by tokenize.c */
+/* Values generated by fill_value_tokens() */
+
+static const short dct_value_cost[2048 * 2] = {
+ 8285, 8277, 8267, 8259, 8253, 8245, 8226, 8218, 8212, 8204, 8194, 8186, 8180,
+ 8172, 8150, 8142, 8136, 8128, 8118, 8110, 8104, 8096, 8077, 8069, 8063, 8055,
+ 8045, 8037, 8031, 8023, 7997, 7989, 7983, 7975, 7965, 7957, 7951, 7943, 7924,
+ 7916, 7910, 7902, 7892, 7884, 7878, 7870, 7848, 7840, 7834, 7826, 7816, 7808,
+ 7802, 7794, 7775, 7767, 7761, 7753, 7743, 7735, 7729, 7721, 7923, 7915, 7909,
+ 7901, 7891, 7883, 7877, 7869, 7850, 7842, 7836, 7828, 7818, 7810, 7804, 7796,
+ 7774, 7766, 7760, 7752, 7742, 7734, 7728, 7720, 7701, 7693, 7687, 7679, 7669,
+ 7661, 7655, 7647, 7621, 7613, 7607, 7599, 7589, 7581, 7575, 7567, 7548, 7540,
+ 7534, 7526, 7516, 7508, 7502, 7494, 7472, 7464, 7458, 7450, 7440, 7432, 7426,
+ 7418, 7399, 7391, 7385, 7377, 7367, 7359, 7353, 7345, 7479, 7471, 7465, 7457,
+ 7447, 7439, 7433, 7425, 7406, 7398, 7392, 7384, 7374, 7366, 7360, 7352, 7330,
+ 7322, 7316, 7308, 7298, 7290, 7284, 7276, 7257, 7249, 7243, 7235, 7225, 7217,
+ 7211, 7203, 7177, 7169, 7163, 7155, 7145, 7137, 7131, 7123, 7104, 7096, 7090,
+ 7082, 7072, 7064, 7058, 7050, 7028, 7020, 7014, 7006, 6996, 6988, 6982, 6974,
+ 6955, 6947, 6941, 6933, 6923, 6915, 6909, 6901, 7632, 7624, 7618, 7610, 7600,
+ 7592, 7586, 7578, 7559, 7551, 7545, 7537, 7527, 7519, 7513, 7505, 7483, 7475,
+ 7469, 7461, 7451, 7443, 7437, 7429, 7410, 7402, 7396, 7388, 7378, 7370, 7364,
+ 7356, 7330, 7322, 7316, 7308, 7298, 7290, 7284, 7276, 7257, 7249, 7243, 7235,
+ 7225, 7217, 7211, 7203, 7181, 7173, 7167, 7159, 7149, 7141, 7135, 7127, 7108,
+ 7100, 7094, 7086, 7076, 7068, 7062, 7054, 7188, 7180, 7174, 7166, 7156, 7148,
+ 7142, 7134, 7115, 7107, 7101, 7093, 7083, 7075, 7069, 7061, 7039, 7031, 7025,
+ 7017, 7007, 6999, 6993, 6985, 6966, 6958, 6952, 6944, 6934, 6926, 6920, 6912,
+ 6886, 6878, 6872, 6864, 6854, 6846, 6840, 6832, 6813, 6805, 6799, 6791, 6781,
+ 6773, 6767, 6759, 6737, 6729, 6723, 6715, 6705, 6697, 6691, 6683, 6664, 6656,
+ 6650, 6642, 6632, 6624, 6618, 6610, 6812, 6804, 6798, 6790, 6780, 6772, 6766,
+ 6758, 6739, 6731, 6725, 6717, 6707, 6699, 6693, 6685, 6663, 6655, 6649, 6641,
+ 6631, 6623, 6617, 6609, 6590, 6582, 6576, 6568, 6558, 6550, 6544, 6536, 6510,
+ 6502, 6496, 6488, 6478, 6470, 6464, 6456, 6437, 6429, 6423, 6415, 6405, 6397,
+ 6391, 6383, 6361, 6353, 6347, 6339, 6329, 6321, 6315, 6307, 6288, 6280, 6274,
+ 6266, 6256, 6248, 6242, 6234, 6368, 6360, 6354, 6346, 6336, 6328, 6322, 6314,
+ 6295, 6287, 6281, 6273, 6263, 6255, 6249, 6241, 6219, 6211, 6205, 6197, 6187,
+ 6179, 6173, 6165, 6146, 6138, 6132, 6124, 6114, 6106, 6100, 6092, 6066, 6058,
+ 6052, 6044, 6034, 6026, 6020, 6012, 5993, 5985, 5979, 5971, 5961, 5953, 5947,
+ 5939, 5917, 5909, 5903, 5895, 5885, 5877, 5871, 5863, 5844, 5836, 5830, 5822,
+ 5812, 5804, 5798, 5790, 6697, 6689, 6683, 6675, 6665, 6657, 6651, 6643, 6624,
+ 6616, 6610, 6602, 6592, 6584, 6578, 6570, 6548, 6540, 6534, 6526, 6516, 6508,
+ 6502, 6494, 6475, 6467, 6461, 6453, 6443, 6435, 6429, 6421, 6395, 6387, 6381,
+ 6373, 6363, 6355, 6349, 6341, 6322, 6314, 6308, 6300, 6290, 6282, 6276, 6268,
+ 6246, 6238, 6232, 6224, 6214, 6206, 6200, 6192, 6173, 6165, 6159, 6151, 6141,
+ 6133, 6127, 6119, 6253, 6245, 6239, 6231, 6221, 6213, 6207, 6199, 6180, 6172,
+ 6166, 6158, 6148, 6140, 6134, 6126, 6104, 6096, 6090, 6082, 6072, 6064, 6058,
+ 6050, 6031, 6023, 6017, 6009, 5999, 5991, 5985, 5977, 5951, 5943, 5937, 5929,
+ 5919, 5911, 5905, 5897, 5878, 5870, 5864, 5856, 5846, 5838, 5832, 5824, 5802,
+ 5794, 5788, 5780, 5770, 5762, 5756, 5748, 5729, 5721, 5715, 5707, 5697, 5689,
+ 5683, 5675, 5877, 5869, 5863, 5855, 5845, 5837, 5831, 5823, 5804, 5796, 5790,
+ 5782, 5772, 5764, 5758, 5750, 5728, 5720, 5714, 5706, 5696, 5688, 5682, 5674,
+ 5655, 5647, 5641, 5633, 5623, 5615, 5609, 5601, 5575, 5567, 5561, 5553, 5543,
+ 5535, 5529, 5521, 5502, 5494, 5488, 5480, 5470, 5462, 5456, 5448, 5426, 5418,
+ 5412, 5404, 5394, 5386, 5380, 5372, 5353, 5345, 5339, 5331, 5321, 5313, 5307,
+ 5299, 5433, 5425, 5419, 5411, 5401, 5393, 5387, 5379, 5360, 5352, 5346, 5338,
+ 5328, 5320, 5314, 5306, 5284, 5276, 5270, 5262, 5252, 5244, 5238, 5230, 5211,
+ 5203, 5197, 5189, 5179, 5171, 5165, 5157, 5131, 5123, 5117, 5109, 5099, 5091,
+ 5085, 5077, 5058, 5050, 5044, 5036, 5026, 5018, 5012, 5004, 4982, 4974, 4968,
+ 4960, 4950, 4942, 4936, 4928, 4909, 4901, 4895, 4887, 4877, 4869, 4863, 4855,
+ 5586, 5578, 5572, 5564, 5554, 5546, 5540, 5532, 5513, 5505, 5499, 5491, 5481,
+ 5473, 5467, 5459, 5437, 5429, 5423, 5415, 5405, 5397, 5391, 5383, 5364, 5356,
+ 5350, 5342, 5332, 5324, 5318, 5310, 5284, 5276, 5270, 5262, 5252, 5244, 5238,
+ 5230, 5211, 5203, 5197, 5189, 5179, 5171, 5165, 5157, 5135, 5127, 5121, 5113,
+ 5103, 5095, 5089, 5081, 5062, 5054, 5048, 5040, 5030, 5022, 5016, 5008, 5142,
+ 5134, 5128, 5120, 5110, 5102, 5096, 5088, 5069, 5061, 5055, 5047, 5037, 5029,
+ 5023, 5015, 4993, 4985, 4979, 4971, 4961, 4953, 4947, 4939, 4920, 4912, 4906,
+ 4898, 4888, 4880, 4874, 4866, 4840, 4832, 4826, 4818, 4808, 4800, 4794, 4786,
+ 4767, 4759, 4753, 4745, 4735, 4727, 4721, 4713, 4691, 4683, 4677, 4669, 4659,
+ 4651, 4645, 4637, 4618, 4610, 4604, 4596, 4586, 4578, 4572, 4564, 4766, 4758,
+ 4752, 4744, 4734, 4726, 4720, 4712, 4693, 4685, 4679, 4671, 4661, 4653, 4647,
+ 4639, 4617, 4609, 4603, 4595, 4585, 4577, 4571, 4563, 4544, 4536, 4530, 4522,
+ 4512, 4504, 4498, 4490, 4464, 4456, 4450, 4442, 4432, 4424, 4418, 4410, 4391,
+ 4383, 4377, 4369, 4359, 4351, 4345, 4337, 4315, 4307, 4301, 4293, 4283, 4275,
+ 4269, 4261, 4242, 4234, 4228, 4220, 4210, 4202, 4196, 4188, 4322, 4314, 4308,
+ 4300, 4290, 4282, 4276, 4268, 4249, 4241, 4235, 4227, 4217, 4209, 4203, 4195,
+ 4173, 4165, 4159, 4151, 4141, 4133, 4127, 4119, 4100, 4092, 4086, 4078, 4068,
+ 4060, 4054, 4046, 4020, 4012, 4006, 3998, 3988, 3980, 3974, 3966, 3947, 3939,
+ 3933, 3925, 3915, 3907, 3901, 3893, 3871, 3863, 3857, 3849, 3839, 3831, 3825,
+ 3817, 3798, 3790, 3784, 3776, 3766, 3758, 3752, 3744, 6697, 6689, 6683, 6675,
+ 6665, 6657, 6651, 6643, 6624, 6616, 6610, 6602, 6592, 6584, 6578, 6570, 6548,
+ 6540, 6534, 6526, 6516, 6508, 6502, 6494, 6475, 6467, 6461, 6453, 6443, 6435,
+ 6429, 6421, 6395, 6387, 6381, 6373, 6363, 6355, 6349, 6341, 6322, 6314, 6308,
+ 6300, 6290, 6282, 6276, 6268, 6246, 6238, 6232, 6224, 6214, 6206, 6200, 6192,
+ 6173, 6165, 6159, 6151, 6141, 6133, 6127, 6119, 6253, 6245, 6239, 6231, 6221,
+ 6213, 6207, 6199, 6180, 6172, 6166, 6158, 6148, 6140, 6134, 6126, 6104, 6096,
+ 6090, 6082, 6072, 6064, 6058, 6050, 6031, 6023, 6017, 6009, 5999, 5991, 5985,
+ 5977, 5951, 5943, 5937, 5929, 5919, 5911, 5905, 5897, 5878, 5870, 5864, 5856,
+ 5846, 5838, 5832, 5824, 5802, 5794, 5788, 5780, 5770, 5762, 5756, 5748, 5729,
+ 5721, 5715, 5707, 5697, 5689, 5683, 5675, 5877, 5869, 5863, 5855, 5845, 5837,
+ 5831, 5823, 5804, 5796, 5790, 5782, 5772, 5764, 5758, 5750, 5728, 5720, 5714,
+ 5706, 5696, 5688, 5682, 5674, 5655, 5647, 5641, 5633, 5623, 5615, 5609, 5601,
+ 5575, 5567, 5561, 5553, 5543, 5535, 5529, 5521, 5502, 5494, 5488, 5480, 5470,
+ 5462, 5456, 5448, 5426, 5418, 5412, 5404, 5394, 5386, 5380, 5372, 5353, 5345,
+ 5339, 5331, 5321, 5313, 5307, 5299, 5433, 5425, 5419, 5411, 5401, 5393, 5387,
+ 5379, 5360, 5352, 5346, 5338, 5328, 5320, 5314, 5306, 5284, 5276, 5270, 5262,
+ 5252, 5244, 5238, 5230, 5211, 5203, 5197, 5189, 5179, 5171, 5165, 5157, 5131,
+ 5123, 5117, 5109, 5099, 5091, 5085, 5077, 5058, 5050, 5044, 5036, 5026, 5018,
+ 5012, 5004, 4982, 4974, 4968, 4960, 4950, 4942, 4936, 4928, 4909, 4901, 4895,
+ 4887, 4877, 4869, 4863, 4855, 5586, 5578, 5572, 5564, 5554, 5546, 5540, 5532,
+ 5513, 5505, 5499, 5491, 5481, 5473, 5467, 5459, 5437, 5429, 5423, 5415, 5405,
+ 5397, 5391, 5383, 5364, 5356, 5350, 5342, 5332, 5324, 5318, 5310, 5284, 5276,
+ 5270, 5262, 5252, 5244, 5238, 5230, 5211, 5203, 5197, 5189, 5179, 5171, 5165,
+ 5157, 5135, 5127, 5121, 5113, 5103, 5095, 5089, 5081, 5062, 5054, 5048, 5040,
+ 5030, 5022, 5016, 5008, 5142, 5134, 5128, 5120, 5110, 5102, 5096, 5088, 5069,
+ 5061, 5055, 5047, 5037, 5029, 5023, 5015, 4993, 4985, 4979, 4971, 4961, 4953,
+ 4947, 4939, 4920, 4912, 4906, 4898, 4888, 4880, 4874, 4866, 4840, 4832, 4826,
+ 4818, 4808, 4800, 4794, 4786, 4767, 4759, 4753, 4745, 4735, 4727, 4721, 4713,
+ 4691, 4683, 4677, 4669, 4659, 4651, 4645, 4637, 4618, 4610, 4604, 4596, 4586,
+ 4578, 4572, 4564, 4766, 4758, 4752, 4744, 4734, 4726, 4720, 4712, 4693, 4685,
+ 4679, 4671, 4661, 4653, 4647, 4639, 4617, 4609, 4603, 4595, 4585, 4577, 4571,
+ 4563, 4544, 4536, 4530, 4522, 4512, 4504, 4498, 4490, 4464, 4456, 4450, 4442,
+ 4432, 4424, 4418, 4410, 4391, 4383, 4377, 4369, 4359, 4351, 4345, 4337, 4315,
+ 4307, 4301, 4293, 4283, 4275, 4269, 4261, 4242, 4234, 4228, 4220, 4210, 4202,
+ 4196, 4188, 4322, 4314, 4308, 4300, 4290, 4282, 4276, 4268, 4249, 4241, 4235,
+ 4227, 4217, 4209, 4203, 4195, 4173, 4165, 4159, 4151, 4141, 4133, 4127, 4119,
+ 4100, 4092, 4086, 4078, 4068, 4060, 4054, 4046, 4020, 4012, 4006, 3998, 3988,
+ 3980, 3974, 3966, 3947, 3939, 3933, 3925, 3915, 3907, 3901, 3893, 3871, 3863,
+ 3857, 3849, 3839, 3831, 3825, 3817, 3798, 3790, 3784, 3776, 3766, 3758, 3752,
+ 3744, 4651, 4643, 4637, 4629, 4619, 4611, 4605, 4597, 4578, 4570, 4564, 4556,
+ 4546, 4538, 4532, 4524, 4502, 4494, 4488, 4480, 4470, 4462, 4456, 4448, 4429,
+ 4421, 4415, 4407, 4397, 4389, 4383, 4375, 4349, 4341, 4335, 4327, 4317, 4309,
+ 4303, 4295, 4276, 4268, 4262, 4254, 4244, 4236, 4230, 4222, 4200, 4192, 4186,
+ 4178, 4168, 4160, 4154, 4146, 4127, 4119, 4113, 4105, 4095, 4087, 4081, 4073,
+ 4207, 4199, 4193, 4185, 4175, 4167, 4161, 4153, 4134, 4126, 4120, 4112, 4102,
+ 4094, 4088, 4080, 4058, 4050, 4044, 4036, 4026, 4018, 4012, 4004, 3985, 3977,
+ 3971, 3963, 3953, 3945, 3939, 3931, 3905, 3897, 3891, 3883, 3873, 3865, 3859,
+ 3851, 3832, 3824, 3818, 3810, 3800, 3792, 3786, 3778, 3756, 3748, 3742, 3734,
+ 3724, 3716, 3710, 3702, 3683, 3675, 3669, 3661, 3651, 3643, 3637, 3629, 3831,
+ 3823, 3817, 3809, 3799, 3791, 3785, 3777, 3758, 3750, 3744, 3736, 3726, 3718,
+ 3712, 3704, 3682, 3674, 3668, 3660, 3650, 3642, 3636, 3628, 3609, 3601, 3595,
+ 3587, 3577, 3569, 3563, 3555, 3529, 3521, 3515, 3507, 3497, 3489, 3483, 3475,
+ 3456, 3448, 3442, 3434, 3424, 3416, 3410, 3402, 3380, 3372, 3366, 3358, 3348,
+ 3340, 3334, 3326, 3307, 3299, 3293, 3285, 3275, 3267, 3261, 3253, 3387, 3379,
+ 3373, 3365, 3355, 3347, 3341, 3333, 3314, 3306, 3300, 3292, 3282, 3274, 3268,
+ 3260, 3238, 3230, 3224, 3216, 3206, 3198, 3192, 3184, 3165, 3157, 3151, 3143,
+ 3133, 3125, 3119, 3111, 3085, 3077, 3071, 3063, 3053, 3045, 3039, 3031, 3012,
+ 3004, 2998, 2990, 2980, 2972, 2966, 2958, 2936, 2928, 2922, 2914, 2904, 2896,
+ 2890, 2882, 2863, 2855, 2849, 2841, 2831, 2823, 2817, 2809, 3540, 3532, 3526,
+ 3518, 3508, 3500, 3494, 3486, 3467, 3459, 3453, 3445, 3435, 3427, 3421, 3413,
+ 3391, 3383, 3377, 3369, 3359, 3351, 3345, 3337, 3318, 3310, 3304, 3296, 3286,
+ 3278, 3272, 3264, 3238, 3230, 3224, 3216, 3206, 3198, 3192, 3184, 3165, 3157,
+ 3151, 3143, 3133, 3125, 3119, 3111, 3089, 3081, 3075, 3067, 3057, 3049, 3043,
+ 3035, 3016, 3008, 3002, 2994, 2984, 2976, 2970, 2962, 3096, 3088, 3082, 3074,
+ 3064, 3056, 3050, 3042, 3023, 3015, 3009, 3001, 2991, 2983, 2977, 2969, 2947,
+ 2939, 2933, 2925, 2915, 2907, 2901, 2893, 2874, 2866, 2860, 2852, 2842, 2834,
+ 2828, 2820, 2794, 2786, 2780, 2772, 2762, 2754, 2748, 2740, 2721, 2713, 2707,
+ 2699, 2689, 2681, 2675, 2667, 2645, 2637, 2631, 2623, 2613, 2605, 2599, 2591,
+ 2572, 2564, 2558, 2550, 2540, 2532, 2526, 2518, 2720, 2712, 2706, 2698, 2688,
+ 2680, 2674, 2666, 2647, 2639, 2633, 2625, 2615, 2607, 2601, 2593, 2571, 2563,
+ 2557, 2549, 2539, 2531, 2525, 2517, 2498, 2490, 2484, 2476, 2466, 2458, 2452,
+ 2444, 2418, 2410, 2404, 2396, 2386, 2378, 2372, 2364, 2345, 2337, 2331, 2323,
+ 2313, 2305, 2299, 2291, 2269, 2261, 2255, 2247, 2237, 2229, 2223, 2215, 2196,
+ 2188, 2182, 2174, 2164, 2156, 2150, 2142, 2276, 2268, 2262, 2254, 2244, 2236,
+ 2230, 2222, 2203, 2195, 2189, 2181, 2171, 2163, 2157, 2149, 2127, 2119, 2113,
+ 2105, 2095, 2087, 2081, 2073, 2054, 2046, 2040, 2032, 2022, 2014, 2008, 2000,
+ 1974, 1966, 1960, 1952, 1942, 1934, 1928, 1920, 1901, 1893, 1887, 1879, 1869,
+ 1861, 1855, 1847, 1825, 1817, 1811, 1803, 1793, 1785, 1779, 1771, 1752, 1744,
+ 1738, 1730, 1720, 1712, 1706, 1698, 1897, 1883, 1860, 1846, 1819, 1805, 1782,
+ 1768, 1723, 1709, 1686, 1672, 1645, 1631, 1608, 1594, 1574, 1560, 1537, 1523,
+ 1496, 1482, 1459, 1445, 1400, 1386, 1363, 1349, 1322, 1308, 1285, 1271, 1608,
+ 1565, 1535, 1492, 1446, 1403, 1373, 1330, 1312, 1269, 1239, 1196, 1150, 1107,
+ 1077, 1034, 1291, 1218, 1171, 1098, 1015, 942, 895, 822, 953, 850, 729,
+ 626, 618, 431, 257, 257, 257, 257, 0, 255, 255, 255, 255, 429,
+ 616, 624, 727, 848, 951, 820, 893, 940, 1013, 1096, 1169, 1216, 1289,
+ 1032, 1075, 1105, 1148, 1194, 1237, 1267, 1310, 1328, 1371, 1401, 1444, 1490,
+ 1533, 1563, 1606, 1269, 1283, 1306, 1320, 1347, 1361, 1384, 1398, 1443, 1457,
+ 1480, 1494, 1521, 1535, 1558, 1572, 1592, 1606, 1629, 1643, 1670, 1684, 1707,
+ 1721, 1766, 1780, 1803, 1817, 1844, 1858, 1881, 1895, 1696, 1704, 1710, 1718,
+ 1728, 1736, 1742, 1750, 1769, 1777, 1783, 1791, 1801, 1809, 1815, 1823, 1845,
+ 1853, 1859, 1867, 1877, 1885, 1891, 1899, 1918, 1926, 1932, 1940, 1950, 1958,
+ 1964, 1972, 1998, 2006, 2012, 2020, 2030, 2038, 2044, 2052, 2071, 2079, 2085,
+ 2093, 2103, 2111, 2117, 2125, 2147, 2155, 2161, 2169, 2179, 2187, 2193, 2201,
+ 2220, 2228, 2234, 2242, 2252, 2260, 2266, 2274, 2140, 2148, 2154, 2162, 2172,
+ 2180, 2186, 2194, 2213, 2221, 2227, 2235, 2245, 2253, 2259, 2267, 2289, 2297,
+ 2303, 2311, 2321, 2329, 2335, 2343, 2362, 2370, 2376, 2384, 2394, 2402, 2408,
+ 2416, 2442, 2450, 2456, 2464, 2474, 2482, 2488, 2496, 2515, 2523, 2529, 2537,
+ 2547, 2555, 2561, 2569, 2591, 2599, 2605, 2613, 2623, 2631, 2637, 2645, 2664,
+ 2672, 2678, 2686, 2696, 2704, 2710, 2718, 2516, 2524, 2530, 2538, 2548, 2556,
+ 2562, 2570, 2589, 2597, 2603, 2611, 2621, 2629, 2635, 2643, 2665, 2673, 2679,
+ 2687, 2697, 2705, 2711, 2719, 2738, 2746, 2752, 2760, 2770, 2778, 2784, 2792,
+ 2818, 2826, 2832, 2840, 2850, 2858, 2864, 2872, 2891, 2899, 2905, 2913, 2923,
+ 2931, 2937, 2945, 2967, 2975, 2981, 2989, 2999, 3007, 3013, 3021, 3040, 3048,
+ 3054, 3062, 3072, 3080, 3086, 3094, 2960, 2968, 2974, 2982, 2992, 3000, 3006,
+ 3014, 3033, 3041, 3047, 3055, 3065, 3073, 3079, 3087, 3109, 3117, 3123, 3131,
+ 3141, 3149, 3155, 3163, 3182, 3190, 3196, 3204, 3214, 3222, 3228, 3236, 3262,
+ 3270, 3276, 3284, 3294, 3302, 3308, 3316, 3335, 3343, 3349, 3357, 3367, 3375,
+ 3381, 3389, 3411, 3419, 3425, 3433, 3443, 3451, 3457, 3465, 3484, 3492, 3498,
+ 3506, 3516, 3524, 3530, 3538, 2807, 2815, 2821, 2829, 2839, 2847, 2853, 2861,
+ 2880, 2888, 2894, 2902, 2912, 2920, 2926, 2934, 2956, 2964, 2970, 2978, 2988,
+ 2996, 3002, 3010, 3029, 3037, 3043, 3051, 3061, 3069, 3075, 3083, 3109, 3117,
+ 3123, 3131, 3141, 3149, 3155, 3163, 3182, 3190, 3196, 3204, 3214, 3222, 3228,
+ 3236, 3258, 3266, 3272, 3280, 3290, 3298, 3304, 3312, 3331, 3339, 3345, 3353,
+ 3363, 3371, 3377, 3385, 3251, 3259, 3265, 3273, 3283, 3291, 3297, 3305, 3324,
+ 3332, 3338, 3346, 3356, 3364, 3370, 3378, 3400, 3408, 3414, 3422, 3432, 3440,
+ 3446, 3454, 3473, 3481, 3487, 3495, 3505, 3513, 3519, 3527, 3553, 3561, 3567,
+ 3575, 3585, 3593, 3599, 3607, 3626, 3634, 3640, 3648, 3658, 3666, 3672, 3680,
+ 3702, 3710, 3716, 3724, 3734, 3742, 3748, 3756, 3775, 3783, 3789, 3797, 3807,
+ 3815, 3821, 3829, 3627, 3635, 3641, 3649, 3659, 3667, 3673, 3681, 3700, 3708,
+ 3714, 3722, 3732, 3740, 3746, 3754, 3776, 3784, 3790, 3798, 3808, 3816, 3822,
+ 3830, 3849, 3857, 3863, 3871, 3881, 3889, 3895, 3903, 3929, 3937, 3943, 3951,
+ 3961, 3969, 3975, 3983, 4002, 4010, 4016, 4024, 4034, 4042, 4048, 4056, 4078,
+ 4086, 4092, 4100, 4110, 4118, 4124, 4132, 4151, 4159, 4165, 4173, 4183, 4191,
+ 4197, 4205, 4071, 4079, 4085, 4093, 4103, 4111, 4117, 4125, 4144, 4152, 4158,
+ 4166, 4176, 4184, 4190, 4198, 4220, 4228, 4234, 4242, 4252, 4260, 4266, 4274,
+ 4293, 4301, 4307, 4315, 4325, 4333, 4339, 4347, 4373, 4381, 4387, 4395, 4405,
+ 4413, 4419, 4427, 4446, 4454, 4460, 4468, 4478, 4486, 4492, 4500, 4522, 4530,
+ 4536, 4544, 4554, 4562, 4568, 4576, 4595, 4603, 4609, 4617, 4627, 4635, 4641,
+ 4649, 3742, 3750, 3756, 3764, 3774, 3782, 3788, 3796, 3815, 3823, 3829, 3837,
+ 3847, 3855, 3861, 3869, 3891, 3899, 3905, 3913, 3923, 3931, 3937, 3945, 3964,
+ 3972, 3978, 3986, 3996, 4004, 4010, 4018, 4044, 4052, 4058, 4066, 4076, 4084,
+ 4090, 4098, 4117, 4125, 4131, 4139, 4149, 4157, 4163, 4171, 4193, 4201, 4207,
+ 4215, 4225, 4233, 4239, 4247, 4266, 4274, 4280, 4288, 4298, 4306, 4312, 4320,
+ 4186, 4194, 4200, 4208, 4218, 4226, 4232, 4240, 4259, 4267, 4273, 4281, 4291,
+ 4299, 4305, 4313, 4335, 4343, 4349, 4357, 4367, 4375, 4381, 4389, 4408, 4416,
+ 4422, 4430, 4440, 4448, 4454, 4462, 4488, 4496, 4502, 4510, 4520, 4528, 4534,
+ 4542, 4561, 4569, 4575, 4583, 4593, 4601, 4607, 4615, 4637, 4645, 4651, 4659,
+ 4669, 4677, 4683, 4691, 4710, 4718, 4724, 4732, 4742, 4750, 4756, 4764, 4562,
+ 4570, 4576, 4584, 4594, 4602, 4608, 4616, 4635, 4643, 4649, 4657, 4667, 4675,
+ 4681, 4689, 4711, 4719, 4725, 4733, 4743, 4751, 4757, 4765, 4784, 4792, 4798,
+ 4806, 4816, 4824, 4830, 4838, 4864, 4872, 4878, 4886, 4896, 4904, 4910, 4918,
+ 4937, 4945, 4951, 4959, 4969, 4977, 4983, 4991, 5013, 5021, 5027, 5035, 5045,
+ 5053, 5059, 5067, 5086, 5094, 5100, 5108, 5118, 5126, 5132, 5140, 5006, 5014,
+ 5020, 5028, 5038, 5046, 5052, 5060, 5079, 5087, 5093, 5101, 5111, 5119, 5125,
+ 5133, 5155, 5163, 5169, 5177, 5187, 5195, 5201, 5209, 5228, 5236, 5242, 5250,
+ 5260, 5268, 5274, 5282, 5308, 5316, 5322, 5330, 5340, 5348, 5354, 5362, 5381,
+ 5389, 5395, 5403, 5413, 5421, 5427, 5435, 5457, 5465, 5471, 5479, 5489, 5497,
+ 5503, 5511, 5530, 5538, 5544, 5552, 5562, 5570, 5576, 5584, 4853, 4861, 4867,
+ 4875, 4885, 4893, 4899, 4907, 4926, 4934, 4940, 4948, 4958, 4966, 4972, 4980,
+ 5002, 5010, 5016, 5024, 5034, 5042, 5048, 5056, 5075, 5083, 5089, 5097, 5107,
+ 5115, 5121, 5129, 5155, 5163, 5169, 5177, 5187, 5195, 5201, 5209, 5228, 5236,
+ 5242, 5250, 5260, 5268, 5274, 5282, 5304, 5312, 5318, 5326, 5336, 5344, 5350,
+ 5358, 5377, 5385, 5391, 5399, 5409, 5417, 5423, 5431, 5297, 5305, 5311, 5319,
+ 5329, 5337, 5343, 5351, 5370, 5378, 5384, 5392, 5402, 5410, 5416, 5424, 5446,
+ 5454, 5460, 5468, 5478, 5486, 5492, 5500, 5519, 5527, 5533, 5541, 5551, 5559,
+ 5565, 5573, 5599, 5607, 5613, 5621, 5631, 5639, 5645, 5653, 5672, 5680, 5686,
+ 5694, 5704, 5712, 5718, 5726, 5748, 5756, 5762, 5770, 5780, 5788, 5794, 5802,
+ 5821, 5829, 5835, 5843, 5853, 5861, 5867, 5875, 5673, 5681, 5687, 5695, 5705,
+ 5713, 5719, 5727, 5746, 5754, 5760, 5768, 5778, 5786, 5792, 5800, 5822, 5830,
+ 5836, 5844, 5854, 5862, 5868, 5876, 5895, 5903, 5909, 5917, 5927, 5935, 5941,
+ 5949, 5975, 5983, 5989, 5997, 6007, 6015, 6021, 6029, 6048, 6056, 6062, 6070,
+ 6080, 6088, 6094, 6102, 6124, 6132, 6138, 6146, 6156, 6164, 6170, 6178, 6197,
+ 6205, 6211, 6219, 6229, 6237, 6243, 6251, 6117, 6125, 6131, 6139, 6149, 6157,
+ 6163, 6171, 6190, 6198, 6204, 6212, 6222, 6230, 6236, 6244, 6266, 6274, 6280,
+ 6288, 6298, 6306, 6312, 6320, 6339, 6347, 6353, 6361, 6371, 6379, 6385, 6393,
+ 6419, 6427, 6433, 6441, 6451, 6459, 6465, 6473, 6492, 6500, 6506, 6514, 6524,
+ 6532, 6538, 6546, 6568, 6576, 6582, 6590, 6600, 6608, 6614, 6622, 6641, 6649,
+ 6655, 6663, 6673, 6681, 6687, 6695, 3742, 3750, 3756, 3764, 3774, 3782, 3788,
+ 3796, 3815, 3823, 3829, 3837, 3847, 3855, 3861, 3869, 3891, 3899, 3905, 3913,
+ 3923, 3931, 3937, 3945, 3964, 3972, 3978, 3986, 3996, 4004, 4010, 4018, 4044,
+ 4052, 4058, 4066, 4076, 4084, 4090, 4098, 4117, 4125, 4131, 4139, 4149, 4157,
+ 4163, 4171, 4193, 4201, 4207, 4215, 4225, 4233, 4239, 4247, 4266, 4274, 4280,
+ 4288, 4298, 4306, 4312, 4320, 4186, 4194, 4200, 4208, 4218, 4226, 4232, 4240,
+ 4259, 4267, 4273, 4281, 4291, 4299, 4305, 4313, 4335, 4343, 4349, 4357, 4367,
+ 4375, 4381, 4389, 4408, 4416, 4422, 4430, 4440, 4448, 4454, 4462, 4488, 4496,
+ 4502, 4510, 4520, 4528, 4534, 4542, 4561, 4569, 4575, 4583, 4593, 4601, 4607,
+ 4615, 4637, 4645, 4651, 4659, 4669, 4677, 4683, 4691, 4710, 4718, 4724, 4732,
+ 4742, 4750, 4756, 4764, 4562, 4570, 4576, 4584, 4594, 4602, 4608, 4616, 4635,
+ 4643, 4649, 4657, 4667, 4675, 4681, 4689, 4711, 4719, 4725, 4733, 4743, 4751,
+ 4757, 4765, 4784, 4792, 4798, 4806, 4816, 4824, 4830, 4838, 4864, 4872, 4878,
+ 4886, 4896, 4904, 4910, 4918, 4937, 4945, 4951, 4959, 4969, 4977, 4983, 4991,
+ 5013, 5021, 5027, 5035, 5045, 5053, 5059, 5067, 5086, 5094, 5100, 5108, 5118,
+ 5126, 5132, 5140, 5006, 5014, 5020, 5028, 5038, 5046, 5052, 5060, 5079, 5087,
+ 5093, 5101, 5111, 5119, 5125, 5133, 5155, 5163, 5169, 5177, 5187, 5195, 5201,
+ 5209, 5228, 5236, 5242, 5250, 5260, 5268, 5274, 5282, 5308, 5316, 5322, 5330,
+ 5340, 5348, 5354, 5362, 5381, 5389, 5395, 5403, 5413, 5421, 5427, 5435, 5457,
+ 5465, 5471, 5479, 5489, 5497, 5503, 5511, 5530, 5538, 5544, 5552, 5562, 5570,
+ 5576, 5584, 4853, 4861, 4867, 4875, 4885, 4893, 4899, 4907, 4926, 4934, 4940,
+ 4948, 4958, 4966, 4972, 4980, 5002, 5010, 5016, 5024, 5034, 5042, 5048, 5056,
+ 5075, 5083, 5089, 5097, 5107, 5115, 5121, 5129, 5155, 5163, 5169, 5177, 5187,
+ 5195, 5201, 5209, 5228, 5236, 5242, 5250, 5260, 5268, 5274, 5282, 5304, 5312,
+ 5318, 5326, 5336, 5344, 5350, 5358, 5377, 5385, 5391, 5399, 5409, 5417, 5423,
+ 5431, 5297, 5305, 5311, 5319, 5329, 5337, 5343, 5351, 5370, 5378, 5384, 5392,
+ 5402, 5410, 5416, 5424, 5446, 5454, 5460, 5468, 5478, 5486, 5492, 5500, 5519,
+ 5527, 5533, 5541, 5551, 5559, 5565, 5573, 5599, 5607, 5613, 5621, 5631, 5639,
+ 5645, 5653, 5672, 5680, 5686, 5694, 5704, 5712, 5718, 5726, 5748, 5756, 5762,
+ 5770, 5780, 5788, 5794, 5802, 5821, 5829, 5835, 5843, 5853, 5861, 5867, 5875,
+ 5673, 5681, 5687, 5695, 5705, 5713, 5719, 5727, 5746, 5754, 5760, 5768, 5778,
+ 5786, 5792, 5800, 5822, 5830, 5836, 5844, 5854, 5862, 5868, 5876, 5895, 5903,
+ 5909, 5917, 5927, 5935, 5941, 5949, 5975, 5983, 5989, 5997, 6007, 6015, 6021,
+ 6029, 6048, 6056, 6062, 6070, 6080, 6088, 6094, 6102, 6124, 6132, 6138, 6146,
+ 6156, 6164, 6170, 6178, 6197, 6205, 6211, 6219, 6229, 6237, 6243, 6251, 6117,
+ 6125, 6131, 6139, 6149, 6157, 6163, 6171, 6190, 6198, 6204, 6212, 6222, 6230,
+ 6236, 6244, 6266, 6274, 6280, 6288, 6298, 6306, 6312, 6320, 6339, 6347, 6353,
+ 6361, 6371, 6379, 6385, 6393, 6419, 6427, 6433, 6441, 6451, 6459, 6465, 6473,
+ 6492, 6500, 6506, 6514, 6524, 6532, 6538, 6546, 6568, 6576, 6582, 6590, 6600,
+ 6608, 6614, 6622, 6641, 6649, 6655, 6663, 6673, 6681, 6687, 6695, 5788, 5796,
+ 5802, 5810, 5820, 5828, 5834, 5842, 5861, 5869, 5875, 5883, 5893, 5901, 5907,
+ 5915, 5937, 5945, 5951, 5959, 5969, 5977, 5983, 5991, 6010, 6018, 6024, 6032,
+ 6042, 6050, 6056, 6064, 6090, 6098, 6104, 6112, 6122, 6130, 6136, 6144, 6163,
+ 6171, 6177, 6185, 6195, 6203, 6209, 6217, 6239, 6247, 6253, 6261, 6271, 6279,
+ 6285, 6293, 6312, 6320, 6326, 6334, 6344, 6352, 6358, 6366, 6232, 6240, 6246,
+ 6254, 6264, 6272, 6278, 6286, 6305, 6313, 6319, 6327, 6337, 6345, 6351, 6359,
+ 6381, 6389, 6395, 6403, 6413, 6421, 6427, 6435, 6454, 6462, 6468, 6476, 6486,
+ 6494, 6500, 6508, 6534, 6542, 6548, 6556, 6566, 6574, 6580, 6588, 6607, 6615,
+ 6621, 6629, 6639, 6647, 6653, 6661, 6683, 6691, 6697, 6705, 6715, 6723, 6729,
+ 6737, 6756, 6764, 6770, 6778, 6788, 6796, 6802, 6810, 6608, 6616, 6622, 6630,
+ 6640, 6648, 6654, 6662, 6681, 6689, 6695, 6703, 6713, 6721, 6727, 6735, 6757,
+ 6765, 6771, 6779, 6789, 6797, 6803, 6811, 6830, 6838, 6844, 6852, 6862, 6870,
+ 6876, 6884, 6910, 6918, 6924, 6932, 6942, 6950, 6956, 6964, 6983, 6991, 6997,
+ 7005, 7015, 7023, 7029, 7037, 7059, 7067, 7073, 7081, 7091, 7099, 7105, 7113,
+ 7132, 7140, 7146, 7154, 7164, 7172, 7178, 7186, 7052, 7060, 7066, 7074, 7084,
+ 7092, 7098, 7106, 7125, 7133, 7139, 7147, 7157, 7165, 7171, 7179, 7201, 7209,
+ 7215, 7223, 7233, 7241, 7247, 7255, 7274, 7282, 7288, 7296, 7306, 7314, 7320,
+ 7328, 7354, 7362, 7368, 7376, 7386, 7394, 7400, 7408, 7427, 7435, 7441, 7449,
+ 7459, 7467, 7473, 7481, 7503, 7511, 7517, 7525, 7535, 7543, 7549, 7557, 7576,
+ 7584, 7590, 7598, 7608, 7616, 7622, 7630, 6899, 6907, 6913, 6921, 6931, 6939,
+ 6945, 6953, 6972, 6980, 6986, 6994, 7004, 7012, 7018, 7026, 7048, 7056, 7062,
+ 7070, 7080, 7088, 7094, 7102, 7121, 7129, 7135, 7143, 7153, 7161, 7167, 7175,
+ 7201, 7209, 7215, 7223, 7233, 7241, 7247, 7255, 7274, 7282, 7288, 7296, 7306,
+ 7314, 7320, 7328, 7350, 7358, 7364, 7372, 7382, 7390, 7396, 7404, 7423, 7431,
+ 7437, 7445, 7455, 7463, 7469, 7477, 7343, 7351, 7357, 7365, 7375, 7383, 7389,
+ 7397, 7416, 7424, 7430, 7438, 7448, 7456, 7462, 7470, 7492, 7500, 7506, 7514,
+ 7524, 7532, 7538, 7546, 7565, 7573, 7579, 7587, 7597, 7605, 7611, 7619, 7645,
+ 7653, 7659, 7667, 7677, 7685, 7691, 7699, 7718, 7726, 7732, 7740, 7750, 7758,
+ 7764, 7772, 7794, 7802, 7808, 7816, 7826, 7834, 7840, 7848, 7867, 7875, 7881,
+ 7889, 7899, 7907, 7913, 7921, 7719, 7727, 7733, 7741, 7751, 7759, 7765, 7773,
+ 7792, 7800, 7806, 7814, 7824, 7832, 7838, 7846, 7868, 7876, 7882, 7890, 7900,
+ 7908, 7914, 7922, 7941, 7949, 7955, 7963, 7973, 7981, 7987, 7995, 8021, 8029,
+ 8035, 8043, 8053, 8061, 8067, 8075, 8094, 8102, 8108, 8116, 8126, 8134, 8140,
+ 8148, 8170, 8178, 8184, 8192, 8202, 8210, 8216, 8224, 8243, 8251, 8257, 8265,
+ 8275
+};
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_DCT_VALUE_COST_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/dct_value_tokens.h b/media/libvpx/libvpx/vp8/encoder/dct_value_tokens.h
new file mode 100644
index 0000000000..5cc4505f09
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/dct_value_tokens.h
@@ -0,0 +1,848 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_DCT_VALUE_TOKENS_H_
+#define VPX_VP8_ENCODER_DCT_VALUE_TOKENS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Generated file, included by tokenize.c */
+/* Values generated by fill_value_tokens() */
+
+static const TOKENVALUE dct_value_tokens[2048 * 2] = {
+ { 10, 3963 }, { 10, 3961 }, { 10, 3959 }, { 10, 3957 }, { 10, 3955 },
+ { 10, 3953 }, { 10, 3951 }, { 10, 3949 }, { 10, 3947 }, { 10, 3945 },
+ { 10, 3943 }, { 10, 3941 }, { 10, 3939 }, { 10, 3937 }, { 10, 3935 },
+ { 10, 3933 }, { 10, 3931 }, { 10, 3929 }, { 10, 3927 }, { 10, 3925 },
+ { 10, 3923 }, { 10, 3921 }, { 10, 3919 }, { 10, 3917 }, { 10, 3915 },
+ { 10, 3913 }, { 10, 3911 }, { 10, 3909 }, { 10, 3907 }, { 10, 3905 },
+ { 10, 3903 }, { 10, 3901 }, { 10, 3899 }, { 10, 3897 }, { 10, 3895 },
+ { 10, 3893 }, { 10, 3891 }, { 10, 3889 }, { 10, 3887 }, { 10, 3885 },
+ { 10, 3883 }, { 10, 3881 }, { 10, 3879 }, { 10, 3877 }, { 10, 3875 },
+ { 10, 3873 }, { 10, 3871 }, { 10, 3869 }, { 10, 3867 }, { 10, 3865 },
+ { 10, 3863 }, { 10, 3861 }, { 10, 3859 }, { 10, 3857 }, { 10, 3855 },
+ { 10, 3853 }, { 10, 3851 }, { 10, 3849 }, { 10, 3847 }, { 10, 3845 },
+ { 10, 3843 }, { 10, 3841 }, { 10, 3839 }, { 10, 3837 }, { 10, 3835 },
+ { 10, 3833 }, { 10, 3831 }, { 10, 3829 }, { 10, 3827 }, { 10, 3825 },
+ { 10, 3823 }, { 10, 3821 }, { 10, 3819 }, { 10, 3817 }, { 10, 3815 },
+ { 10, 3813 }, { 10, 3811 }, { 10, 3809 }, { 10, 3807 }, { 10, 3805 },
+ { 10, 3803 }, { 10, 3801 }, { 10, 3799 }, { 10, 3797 }, { 10, 3795 },
+ { 10, 3793 }, { 10, 3791 }, { 10, 3789 }, { 10, 3787 }, { 10, 3785 },
+ { 10, 3783 }, { 10, 3781 }, { 10, 3779 }, { 10, 3777 }, { 10, 3775 },
+ { 10, 3773 }, { 10, 3771 }, { 10, 3769 }, { 10, 3767 }, { 10, 3765 },
+ { 10, 3763 }, { 10, 3761 }, { 10, 3759 }, { 10, 3757 }, { 10, 3755 },
+ { 10, 3753 }, { 10, 3751 }, { 10, 3749 }, { 10, 3747 }, { 10, 3745 },
+ { 10, 3743 }, { 10, 3741 }, { 10, 3739 }, { 10, 3737 }, { 10, 3735 },
+ { 10, 3733 }, { 10, 3731 }, { 10, 3729 }, { 10, 3727 }, { 10, 3725 },
+ { 10, 3723 }, { 10, 3721 }, { 10, 3719 }, { 10, 3717 }, { 10, 3715 },
+ { 10, 3713 }, { 10, 3711 }, { 10, 3709 }, { 10, 3707 }, { 10, 3705 },
+ { 10, 3703 }, { 10, 3701 }, { 10, 3699 }, { 10, 3697 }, { 10, 3695 },
+ { 10, 3693 }, { 10, 3691 }, { 10, 3689 }, { 10, 3687 }, { 10, 3685 },
+ { 10, 3683 }, { 10, 3681 }, { 10, 3679 }, { 10, 3677 }, { 10, 3675 },
+ { 10, 3673 }, { 10, 3671 }, { 10, 3669 }, { 10, 3667 }, { 10, 3665 },
+ { 10, 3663 }, { 10, 3661 }, { 10, 3659 }, { 10, 3657 }, { 10, 3655 },
+ { 10, 3653 }, { 10, 3651 }, { 10, 3649 }, { 10, 3647 }, { 10, 3645 },
+ { 10, 3643 }, { 10, 3641 }, { 10, 3639 }, { 10, 3637 }, { 10, 3635 },
+ { 10, 3633 }, { 10, 3631 }, { 10, 3629 }, { 10, 3627 }, { 10, 3625 },
+ { 10, 3623 }, { 10, 3621 }, { 10, 3619 }, { 10, 3617 }, { 10, 3615 },
+ { 10, 3613 }, { 10, 3611 }, { 10, 3609 }, { 10, 3607 }, { 10, 3605 },
+ { 10, 3603 }, { 10, 3601 }, { 10, 3599 }, { 10, 3597 }, { 10, 3595 },
+ { 10, 3593 }, { 10, 3591 }, { 10, 3589 }, { 10, 3587 }, { 10, 3585 },
+ { 10, 3583 }, { 10, 3581 }, { 10, 3579 }, { 10, 3577 }, { 10, 3575 },
+ { 10, 3573 }, { 10, 3571 }, { 10, 3569 }, { 10, 3567 }, { 10, 3565 },
+ { 10, 3563 }, { 10, 3561 }, { 10, 3559 }, { 10, 3557 }, { 10, 3555 },
+ { 10, 3553 }, { 10, 3551 }, { 10, 3549 }, { 10, 3547 }, { 10, 3545 },
+ { 10, 3543 }, { 10, 3541 }, { 10, 3539 }, { 10, 3537 }, { 10, 3535 },
+ { 10, 3533 }, { 10, 3531 }, { 10, 3529 }, { 10, 3527 }, { 10, 3525 },
+ { 10, 3523 }, { 10, 3521 }, { 10, 3519 }, { 10, 3517 }, { 10, 3515 },
+ { 10, 3513 }, { 10, 3511 }, { 10, 3509 }, { 10, 3507 }, { 10, 3505 },
+ { 10, 3503 }, { 10, 3501 }, { 10, 3499 }, { 10, 3497 }, { 10, 3495 },
+ { 10, 3493 }, { 10, 3491 }, { 10, 3489 }, { 10, 3487 }, { 10, 3485 },
+ { 10, 3483 }, { 10, 3481 }, { 10, 3479 }, { 10, 3477 }, { 10, 3475 },
+ { 10, 3473 }, { 10, 3471 }, { 10, 3469 }, { 10, 3467 }, { 10, 3465 },
+ { 10, 3463 }, { 10, 3461 }, { 10, 3459 }, { 10, 3457 }, { 10, 3455 },
+ { 10, 3453 }, { 10, 3451 }, { 10, 3449 }, { 10, 3447 }, { 10, 3445 },
+ { 10, 3443 }, { 10, 3441 }, { 10, 3439 }, { 10, 3437 }, { 10, 3435 },
+ { 10, 3433 }, { 10, 3431 }, { 10, 3429 }, { 10, 3427 }, { 10, 3425 },
+ { 10, 3423 }, { 10, 3421 }, { 10, 3419 }, { 10, 3417 }, { 10, 3415 },
+ { 10, 3413 }, { 10, 3411 }, { 10, 3409 }, { 10, 3407 }, { 10, 3405 },
+ { 10, 3403 }, { 10, 3401 }, { 10, 3399 }, { 10, 3397 }, { 10, 3395 },
+ { 10, 3393 }, { 10, 3391 }, { 10, 3389 }, { 10, 3387 }, { 10, 3385 },
+ { 10, 3383 }, { 10, 3381 }, { 10, 3379 }, { 10, 3377 }, { 10, 3375 },
+ { 10, 3373 }, { 10, 3371 }, { 10, 3369 }, { 10, 3367 }, { 10, 3365 },
+ { 10, 3363 }, { 10, 3361 }, { 10, 3359 }, { 10, 3357 }, { 10, 3355 },
+ { 10, 3353 }, { 10, 3351 }, { 10, 3349 }, { 10, 3347 }, { 10, 3345 },
+ { 10, 3343 }, { 10, 3341 }, { 10, 3339 }, { 10, 3337 }, { 10, 3335 },
+ { 10, 3333 }, { 10, 3331 }, { 10, 3329 }, { 10, 3327 }, { 10, 3325 },
+ { 10, 3323 }, { 10, 3321 }, { 10, 3319 }, { 10, 3317 }, { 10, 3315 },
+ { 10, 3313 }, { 10, 3311 }, { 10, 3309 }, { 10, 3307 }, { 10, 3305 },
+ { 10, 3303 }, { 10, 3301 }, { 10, 3299 }, { 10, 3297 }, { 10, 3295 },
+ { 10, 3293 }, { 10, 3291 }, { 10, 3289 }, { 10, 3287 }, { 10, 3285 },
+ { 10, 3283 }, { 10, 3281 }, { 10, 3279 }, { 10, 3277 }, { 10, 3275 },
+ { 10, 3273 }, { 10, 3271 }, { 10, 3269 }, { 10, 3267 }, { 10, 3265 },
+ { 10, 3263 }, { 10, 3261 }, { 10, 3259 }, { 10, 3257 }, { 10, 3255 },
+ { 10, 3253 }, { 10, 3251 }, { 10, 3249 }, { 10, 3247 }, { 10, 3245 },
+ { 10, 3243 }, { 10, 3241 }, { 10, 3239 }, { 10, 3237 }, { 10, 3235 },
+ { 10, 3233 }, { 10, 3231 }, { 10, 3229 }, { 10, 3227 }, { 10, 3225 },
+ { 10, 3223 }, { 10, 3221 }, { 10, 3219 }, { 10, 3217 }, { 10, 3215 },
+ { 10, 3213 }, { 10, 3211 }, { 10, 3209 }, { 10, 3207 }, { 10, 3205 },
+ { 10, 3203 }, { 10, 3201 }, { 10, 3199 }, { 10, 3197 }, { 10, 3195 },
+ { 10, 3193 }, { 10, 3191 }, { 10, 3189 }, { 10, 3187 }, { 10, 3185 },
+ { 10, 3183 }, { 10, 3181 }, { 10, 3179 }, { 10, 3177 }, { 10, 3175 },
+ { 10, 3173 }, { 10, 3171 }, { 10, 3169 }, { 10, 3167 }, { 10, 3165 },
+ { 10, 3163 }, { 10, 3161 }, { 10, 3159 }, { 10, 3157 }, { 10, 3155 },
+ { 10, 3153 }, { 10, 3151 }, { 10, 3149 }, { 10, 3147 }, { 10, 3145 },
+ { 10, 3143 }, { 10, 3141 }, { 10, 3139 }, { 10, 3137 }, { 10, 3135 },
+ { 10, 3133 }, { 10, 3131 }, { 10, 3129 }, { 10, 3127 }, { 10, 3125 },
+ { 10, 3123 }, { 10, 3121 }, { 10, 3119 }, { 10, 3117 }, { 10, 3115 },
+ { 10, 3113 }, { 10, 3111 }, { 10, 3109 }, { 10, 3107 }, { 10, 3105 },
+ { 10, 3103 }, { 10, 3101 }, { 10, 3099 }, { 10, 3097 }, { 10, 3095 },
+ { 10, 3093 }, { 10, 3091 }, { 10, 3089 }, { 10, 3087 }, { 10, 3085 },
+ { 10, 3083 }, { 10, 3081 }, { 10, 3079 }, { 10, 3077 }, { 10, 3075 },
+ { 10, 3073 }, { 10, 3071 }, { 10, 3069 }, { 10, 3067 }, { 10, 3065 },
+ { 10, 3063 }, { 10, 3061 }, { 10, 3059 }, { 10, 3057 }, { 10, 3055 },
+ { 10, 3053 }, { 10, 3051 }, { 10, 3049 }, { 10, 3047 }, { 10, 3045 },
+ { 10, 3043 }, { 10, 3041 }, { 10, 3039 }, { 10, 3037 }, { 10, 3035 },
+ { 10, 3033 }, { 10, 3031 }, { 10, 3029 }, { 10, 3027 }, { 10, 3025 },
+ { 10, 3023 }, { 10, 3021 }, { 10, 3019 }, { 10, 3017 }, { 10, 3015 },
+ { 10, 3013 }, { 10, 3011 }, { 10, 3009 }, { 10, 3007 }, { 10, 3005 },
+ { 10, 3003 }, { 10, 3001 }, { 10, 2999 }, { 10, 2997 }, { 10, 2995 },
+ { 10, 2993 }, { 10, 2991 }, { 10, 2989 }, { 10, 2987 }, { 10, 2985 },
+ { 10, 2983 }, { 10, 2981 }, { 10, 2979 }, { 10, 2977 }, { 10, 2975 },
+ { 10, 2973 }, { 10, 2971 }, { 10, 2969 }, { 10, 2967 }, { 10, 2965 },
+ { 10, 2963 }, { 10, 2961 }, { 10, 2959 }, { 10, 2957 }, { 10, 2955 },
+ { 10, 2953 }, { 10, 2951 }, { 10, 2949 }, { 10, 2947 }, { 10, 2945 },
+ { 10, 2943 }, { 10, 2941 }, { 10, 2939 }, { 10, 2937 }, { 10, 2935 },
+ { 10, 2933 }, { 10, 2931 }, { 10, 2929 }, { 10, 2927 }, { 10, 2925 },
+ { 10, 2923 }, { 10, 2921 }, { 10, 2919 }, { 10, 2917 }, { 10, 2915 },
+ { 10, 2913 }, { 10, 2911 }, { 10, 2909 }, { 10, 2907 }, { 10, 2905 },
+ { 10, 2903 }, { 10, 2901 }, { 10, 2899 }, { 10, 2897 }, { 10, 2895 },
+ { 10, 2893 }, { 10, 2891 }, { 10, 2889 }, { 10, 2887 }, { 10, 2885 },
+ { 10, 2883 }, { 10, 2881 }, { 10, 2879 }, { 10, 2877 }, { 10, 2875 },
+ { 10, 2873 }, { 10, 2871 }, { 10, 2869 }, { 10, 2867 }, { 10, 2865 },
+ { 10, 2863 }, { 10, 2861 }, { 10, 2859 }, { 10, 2857 }, { 10, 2855 },
+ { 10, 2853 }, { 10, 2851 }, { 10, 2849 }, { 10, 2847 }, { 10, 2845 },
+ { 10, 2843 }, { 10, 2841 }, { 10, 2839 }, { 10, 2837 }, { 10, 2835 },
+ { 10, 2833 }, { 10, 2831 }, { 10, 2829 }, { 10, 2827 }, { 10, 2825 },
+ { 10, 2823 }, { 10, 2821 }, { 10, 2819 }, { 10, 2817 }, { 10, 2815 },
+ { 10, 2813 }, { 10, 2811 }, { 10, 2809 }, { 10, 2807 }, { 10, 2805 },
+ { 10, 2803 }, { 10, 2801 }, { 10, 2799 }, { 10, 2797 }, { 10, 2795 },
+ { 10, 2793 }, { 10, 2791 }, { 10, 2789 }, { 10, 2787 }, { 10, 2785 },
+ { 10, 2783 }, { 10, 2781 }, { 10, 2779 }, { 10, 2777 }, { 10, 2775 },
+ { 10, 2773 }, { 10, 2771 }, { 10, 2769 }, { 10, 2767 }, { 10, 2765 },
+ { 10, 2763 }, { 10, 2761 }, { 10, 2759 }, { 10, 2757 }, { 10, 2755 },
+ { 10, 2753 }, { 10, 2751 }, { 10, 2749 }, { 10, 2747 }, { 10, 2745 },
+ { 10, 2743 }, { 10, 2741 }, { 10, 2739 }, { 10, 2737 }, { 10, 2735 },
+ { 10, 2733 }, { 10, 2731 }, { 10, 2729 }, { 10, 2727 }, { 10, 2725 },
+ { 10, 2723 }, { 10, 2721 }, { 10, 2719 }, { 10, 2717 }, { 10, 2715 },
+ { 10, 2713 }, { 10, 2711 }, { 10, 2709 }, { 10, 2707 }, { 10, 2705 },
+ { 10, 2703 }, { 10, 2701 }, { 10, 2699 }, { 10, 2697 }, { 10, 2695 },
+ { 10, 2693 }, { 10, 2691 }, { 10, 2689 }, { 10, 2687 }, { 10, 2685 },
+ { 10, 2683 }, { 10, 2681 }, { 10, 2679 }, { 10, 2677 }, { 10, 2675 },
+ { 10, 2673 }, { 10, 2671 }, { 10, 2669 }, { 10, 2667 }, { 10, 2665 },
+ { 10, 2663 }, { 10, 2661 }, { 10, 2659 }, { 10, 2657 }, { 10, 2655 },
+ { 10, 2653 }, { 10, 2651 }, { 10, 2649 }, { 10, 2647 }, { 10, 2645 },
+ { 10, 2643 }, { 10, 2641 }, { 10, 2639 }, { 10, 2637 }, { 10, 2635 },
+ { 10, 2633 }, { 10, 2631 }, { 10, 2629 }, { 10, 2627 }, { 10, 2625 },
+ { 10, 2623 }, { 10, 2621 }, { 10, 2619 }, { 10, 2617 }, { 10, 2615 },
+ { 10, 2613 }, { 10, 2611 }, { 10, 2609 }, { 10, 2607 }, { 10, 2605 },
+ { 10, 2603 }, { 10, 2601 }, { 10, 2599 }, { 10, 2597 }, { 10, 2595 },
+ { 10, 2593 }, { 10, 2591 }, { 10, 2589 }, { 10, 2587 }, { 10, 2585 },
+ { 10, 2583 }, { 10, 2581 }, { 10, 2579 }, { 10, 2577 }, { 10, 2575 },
+ { 10, 2573 }, { 10, 2571 }, { 10, 2569 }, { 10, 2567 }, { 10, 2565 },
+ { 10, 2563 }, { 10, 2561 }, { 10, 2559 }, { 10, 2557 }, { 10, 2555 },
+ { 10, 2553 }, { 10, 2551 }, { 10, 2549 }, { 10, 2547 }, { 10, 2545 },
+ { 10, 2543 }, { 10, 2541 }, { 10, 2539 }, { 10, 2537 }, { 10, 2535 },
+ { 10, 2533 }, { 10, 2531 }, { 10, 2529 }, { 10, 2527 }, { 10, 2525 },
+ { 10, 2523 }, { 10, 2521 }, { 10, 2519 }, { 10, 2517 }, { 10, 2515 },
+ { 10, 2513 }, { 10, 2511 }, { 10, 2509 }, { 10, 2507 }, { 10, 2505 },
+ { 10, 2503 }, { 10, 2501 }, { 10, 2499 }, { 10, 2497 }, { 10, 2495 },
+ { 10, 2493 }, { 10, 2491 }, { 10, 2489 }, { 10, 2487 }, { 10, 2485 },
+ { 10, 2483 }, { 10, 2481 }, { 10, 2479 }, { 10, 2477 }, { 10, 2475 },
+ { 10, 2473 }, { 10, 2471 }, { 10, 2469 }, { 10, 2467 }, { 10, 2465 },
+ { 10, 2463 }, { 10, 2461 }, { 10, 2459 }, { 10, 2457 }, { 10, 2455 },
+ { 10, 2453 }, { 10, 2451 }, { 10, 2449 }, { 10, 2447 }, { 10, 2445 },
+ { 10, 2443 }, { 10, 2441 }, { 10, 2439 }, { 10, 2437 }, { 10, 2435 },
+ { 10, 2433 }, { 10, 2431 }, { 10, 2429 }, { 10, 2427 }, { 10, 2425 },
+ { 10, 2423 }, { 10, 2421 }, { 10, 2419 }, { 10, 2417 }, { 10, 2415 },
+ { 10, 2413 }, { 10, 2411 }, { 10, 2409 }, { 10, 2407 }, { 10, 2405 },
+ { 10, 2403 }, { 10, 2401 }, { 10, 2399 }, { 10, 2397 }, { 10, 2395 },
+ { 10, 2393 }, { 10, 2391 }, { 10, 2389 }, { 10, 2387 }, { 10, 2385 },
+ { 10, 2383 }, { 10, 2381 }, { 10, 2379 }, { 10, 2377 }, { 10, 2375 },
+ { 10, 2373 }, { 10, 2371 }, { 10, 2369 }, { 10, 2367 }, { 10, 2365 },
+ { 10, 2363 }, { 10, 2361 }, { 10, 2359 }, { 10, 2357 }, { 10, 2355 },
+ { 10, 2353 }, { 10, 2351 }, { 10, 2349 }, { 10, 2347 }, { 10, 2345 },
+ { 10, 2343 }, { 10, 2341 }, { 10, 2339 }, { 10, 2337 }, { 10, 2335 },
+ { 10, 2333 }, { 10, 2331 }, { 10, 2329 }, { 10, 2327 }, { 10, 2325 },
+ { 10, 2323 }, { 10, 2321 }, { 10, 2319 }, { 10, 2317 }, { 10, 2315 },
+ { 10, 2313 }, { 10, 2311 }, { 10, 2309 }, { 10, 2307 }, { 10, 2305 },
+ { 10, 2303 }, { 10, 2301 }, { 10, 2299 }, { 10, 2297 }, { 10, 2295 },
+ { 10, 2293 }, { 10, 2291 }, { 10, 2289 }, { 10, 2287 }, { 10, 2285 },
+ { 10, 2283 }, { 10, 2281 }, { 10, 2279 }, { 10, 2277 }, { 10, 2275 },
+ { 10, 2273 }, { 10, 2271 }, { 10, 2269 }, { 10, 2267 }, { 10, 2265 },
+ { 10, 2263 }, { 10, 2261 }, { 10, 2259 }, { 10, 2257 }, { 10, 2255 },
+ { 10, 2253 }, { 10, 2251 }, { 10, 2249 }, { 10, 2247 }, { 10, 2245 },
+ { 10, 2243 }, { 10, 2241 }, { 10, 2239 }, { 10, 2237 }, { 10, 2235 },
+ { 10, 2233 }, { 10, 2231 }, { 10, 2229 }, { 10, 2227 }, { 10, 2225 },
+ { 10, 2223 }, { 10, 2221 }, { 10, 2219 }, { 10, 2217 }, { 10, 2215 },
+ { 10, 2213 }, { 10, 2211 }, { 10, 2209 }, { 10, 2207 }, { 10, 2205 },
+ { 10, 2203 }, { 10, 2201 }, { 10, 2199 }, { 10, 2197 }, { 10, 2195 },
+ { 10, 2193 }, { 10, 2191 }, { 10, 2189 }, { 10, 2187 }, { 10, 2185 },
+ { 10, 2183 }, { 10, 2181 }, { 10, 2179 }, { 10, 2177 }, { 10, 2175 },
+ { 10, 2173 }, { 10, 2171 }, { 10, 2169 }, { 10, 2167 }, { 10, 2165 },
+ { 10, 2163 }, { 10, 2161 }, { 10, 2159 }, { 10, 2157 }, { 10, 2155 },
+ { 10, 2153 }, { 10, 2151 }, { 10, 2149 }, { 10, 2147 }, { 10, 2145 },
+ { 10, 2143 }, { 10, 2141 }, { 10, 2139 }, { 10, 2137 }, { 10, 2135 },
+ { 10, 2133 }, { 10, 2131 }, { 10, 2129 }, { 10, 2127 }, { 10, 2125 },
+ { 10, 2123 }, { 10, 2121 }, { 10, 2119 }, { 10, 2117 }, { 10, 2115 },
+ { 10, 2113 }, { 10, 2111 }, { 10, 2109 }, { 10, 2107 }, { 10, 2105 },
+ { 10, 2103 }, { 10, 2101 }, { 10, 2099 }, { 10, 2097 }, { 10, 2095 },
+ { 10, 2093 }, { 10, 2091 }, { 10, 2089 }, { 10, 2087 }, { 10, 2085 },
+ { 10, 2083 }, { 10, 2081 }, { 10, 2079 }, { 10, 2077 }, { 10, 2075 },
+ { 10, 2073 }, { 10, 2071 }, { 10, 2069 }, { 10, 2067 }, { 10, 2065 },
+ { 10, 2063 }, { 10, 2061 }, { 10, 2059 }, { 10, 2057 }, { 10, 2055 },
+ { 10, 2053 }, { 10, 2051 }, { 10, 2049 }, { 10, 2047 }, { 10, 2045 },
+ { 10, 2043 }, { 10, 2041 }, { 10, 2039 }, { 10, 2037 }, { 10, 2035 },
+ { 10, 2033 }, { 10, 2031 }, { 10, 2029 }, { 10, 2027 }, { 10, 2025 },
+ { 10, 2023 }, { 10, 2021 }, { 10, 2019 }, { 10, 2017 }, { 10, 2015 },
+ { 10, 2013 }, { 10, 2011 }, { 10, 2009 }, { 10, 2007 }, { 10, 2005 },
+ { 10, 2003 }, { 10, 2001 }, { 10, 1999 }, { 10, 1997 }, { 10, 1995 },
+ { 10, 1993 }, { 10, 1991 }, { 10, 1989 }, { 10, 1987 }, { 10, 1985 },
+ { 10, 1983 }, { 10, 1981 }, { 10, 1979 }, { 10, 1977 }, { 10, 1975 },
+ { 10, 1973 }, { 10, 1971 }, { 10, 1969 }, { 10, 1967 }, { 10, 1965 },
+ { 10, 1963 }, { 10, 1961 }, { 10, 1959 }, { 10, 1957 }, { 10, 1955 },
+ { 10, 1953 }, { 10, 1951 }, { 10, 1949 }, { 10, 1947 }, { 10, 1945 },
+ { 10, 1943 }, { 10, 1941 }, { 10, 1939 }, { 10, 1937 }, { 10, 1935 },
+ { 10, 1933 }, { 10, 1931 }, { 10, 1929 }, { 10, 1927 }, { 10, 1925 },
+ { 10, 1923 }, { 10, 1921 }, { 10, 1919 }, { 10, 1917 }, { 10, 1915 },
+ { 10, 1913 }, { 10, 1911 }, { 10, 1909 }, { 10, 1907 }, { 10, 1905 },
+ { 10, 1903 }, { 10, 1901 }, { 10, 1899 }, { 10, 1897 }, { 10, 1895 },
+ { 10, 1893 }, { 10, 1891 }, { 10, 1889 }, { 10, 1887 }, { 10, 1885 },
+ { 10, 1883 }, { 10, 1881 }, { 10, 1879 }, { 10, 1877 }, { 10, 1875 },
+ { 10, 1873 }, { 10, 1871 }, { 10, 1869 }, { 10, 1867 }, { 10, 1865 },
+ { 10, 1863 }, { 10, 1861 }, { 10, 1859 }, { 10, 1857 }, { 10, 1855 },
+ { 10, 1853 }, { 10, 1851 }, { 10, 1849 }, { 10, 1847 }, { 10, 1845 },
+ { 10, 1843 }, { 10, 1841 }, { 10, 1839 }, { 10, 1837 }, { 10, 1835 },
+ { 10, 1833 }, { 10, 1831 }, { 10, 1829 }, { 10, 1827 }, { 10, 1825 },
+ { 10, 1823 }, { 10, 1821 }, { 10, 1819 }, { 10, 1817 }, { 10, 1815 },
+ { 10, 1813 }, { 10, 1811 }, { 10, 1809 }, { 10, 1807 }, { 10, 1805 },
+ { 10, 1803 }, { 10, 1801 }, { 10, 1799 }, { 10, 1797 }, { 10, 1795 },
+ { 10, 1793 }, { 10, 1791 }, { 10, 1789 }, { 10, 1787 }, { 10, 1785 },
+ { 10, 1783 }, { 10, 1781 }, { 10, 1779 }, { 10, 1777 }, { 10, 1775 },
+ { 10, 1773 }, { 10, 1771 }, { 10, 1769 }, { 10, 1767 }, { 10, 1765 },
+ { 10, 1763 }, { 10, 1761 }, { 10, 1759 }, { 10, 1757 }, { 10, 1755 },
+ { 10, 1753 }, { 10, 1751 }, { 10, 1749 }, { 10, 1747 }, { 10, 1745 },
+ { 10, 1743 }, { 10, 1741 }, { 10, 1739 }, { 10, 1737 }, { 10, 1735 },
+ { 10, 1733 }, { 10, 1731 }, { 10, 1729 }, { 10, 1727 }, { 10, 1725 },
+ { 10, 1723 }, { 10, 1721 }, { 10, 1719 }, { 10, 1717 }, { 10, 1715 },
+ { 10, 1713 }, { 10, 1711 }, { 10, 1709 }, { 10, 1707 }, { 10, 1705 },
+ { 10, 1703 }, { 10, 1701 }, { 10, 1699 }, { 10, 1697 }, { 10, 1695 },
+ { 10, 1693 }, { 10, 1691 }, { 10, 1689 }, { 10, 1687 }, { 10, 1685 },
+ { 10, 1683 }, { 10, 1681 }, { 10, 1679 }, { 10, 1677 }, { 10, 1675 },
+ { 10, 1673 }, { 10, 1671 }, { 10, 1669 }, { 10, 1667 }, { 10, 1665 },
+ { 10, 1663 }, { 10, 1661 }, { 10, 1659 }, { 10, 1657 }, { 10, 1655 },
+ { 10, 1653 }, { 10, 1651 }, { 10, 1649 }, { 10, 1647 }, { 10, 1645 },
+ { 10, 1643 }, { 10, 1641 }, { 10, 1639 }, { 10, 1637 }, { 10, 1635 },
+ { 10, 1633 }, { 10, 1631 }, { 10, 1629 }, { 10, 1627 }, { 10, 1625 },
+ { 10, 1623 }, { 10, 1621 }, { 10, 1619 }, { 10, 1617 }, { 10, 1615 },
+ { 10, 1613 }, { 10, 1611 }, { 10, 1609 }, { 10, 1607 }, { 10, 1605 },
+ { 10, 1603 }, { 10, 1601 }, { 10, 1599 }, { 10, 1597 }, { 10, 1595 },
+ { 10, 1593 }, { 10, 1591 }, { 10, 1589 }, { 10, 1587 }, { 10, 1585 },
+ { 10, 1583 }, { 10, 1581 }, { 10, 1579 }, { 10, 1577 }, { 10, 1575 },
+ { 10, 1573 }, { 10, 1571 }, { 10, 1569 }, { 10, 1567 }, { 10, 1565 },
+ { 10, 1563 }, { 10, 1561 }, { 10, 1559 }, { 10, 1557 }, { 10, 1555 },
+ { 10, 1553 }, { 10, 1551 }, { 10, 1549 }, { 10, 1547 }, { 10, 1545 },
+ { 10, 1543 }, { 10, 1541 }, { 10, 1539 }, { 10, 1537 }, { 10, 1535 },
+ { 10, 1533 }, { 10, 1531 }, { 10, 1529 }, { 10, 1527 }, { 10, 1525 },
+ { 10, 1523 }, { 10, 1521 }, { 10, 1519 }, { 10, 1517 }, { 10, 1515 },
+ { 10, 1513 }, { 10, 1511 }, { 10, 1509 }, { 10, 1507 }, { 10, 1505 },
+ { 10, 1503 }, { 10, 1501 }, { 10, 1499 }, { 10, 1497 }, { 10, 1495 },
+ { 10, 1493 }, { 10, 1491 }, { 10, 1489 }, { 10, 1487 }, { 10, 1485 },
+ { 10, 1483 }, { 10, 1481 }, { 10, 1479 }, { 10, 1477 }, { 10, 1475 },
+ { 10, 1473 }, { 10, 1471 }, { 10, 1469 }, { 10, 1467 }, { 10, 1465 },
+ { 10, 1463 }, { 10, 1461 }, { 10, 1459 }, { 10, 1457 }, { 10, 1455 },
+ { 10, 1453 }, { 10, 1451 }, { 10, 1449 }, { 10, 1447 }, { 10, 1445 },
+ { 10, 1443 }, { 10, 1441 }, { 10, 1439 }, { 10, 1437 }, { 10, 1435 },
+ { 10, 1433 }, { 10, 1431 }, { 10, 1429 }, { 10, 1427 }, { 10, 1425 },
+ { 10, 1423 }, { 10, 1421 }, { 10, 1419 }, { 10, 1417 }, { 10, 1415 },
+ { 10, 1413 }, { 10, 1411 }, { 10, 1409 }, { 10, 1407 }, { 10, 1405 },
+ { 10, 1403 }, { 10, 1401 }, { 10, 1399 }, { 10, 1397 }, { 10, 1395 },
+ { 10, 1393 }, { 10, 1391 }, { 10, 1389 }, { 10, 1387 }, { 10, 1385 },
+ { 10, 1383 }, { 10, 1381 }, { 10, 1379 }, { 10, 1377 }, { 10, 1375 },
+ { 10, 1373 }, { 10, 1371 }, { 10, 1369 }, { 10, 1367 }, { 10, 1365 },
+ { 10, 1363 }, { 10, 1361 }, { 10, 1359 }, { 10, 1357 }, { 10, 1355 },
+ { 10, 1353 }, { 10, 1351 }, { 10, 1349 }, { 10, 1347 }, { 10, 1345 },
+ { 10, 1343 }, { 10, 1341 }, { 10, 1339 }, { 10, 1337 }, { 10, 1335 },
+ { 10, 1333 }, { 10, 1331 }, { 10, 1329 }, { 10, 1327 }, { 10, 1325 },
+ { 10, 1323 }, { 10, 1321 }, { 10, 1319 }, { 10, 1317 }, { 10, 1315 },
+ { 10, 1313 }, { 10, 1311 }, { 10, 1309 }, { 10, 1307 }, { 10, 1305 },
+ { 10, 1303 }, { 10, 1301 }, { 10, 1299 }, { 10, 1297 }, { 10, 1295 },
+ { 10, 1293 }, { 10, 1291 }, { 10, 1289 }, { 10, 1287 }, { 10, 1285 },
+ { 10, 1283 }, { 10, 1281 }, { 10, 1279 }, { 10, 1277 }, { 10, 1275 },
+ { 10, 1273 }, { 10, 1271 }, { 10, 1269 }, { 10, 1267 }, { 10, 1265 },
+ { 10, 1263 }, { 10, 1261 }, { 10, 1259 }, { 10, 1257 }, { 10, 1255 },
+ { 10, 1253 }, { 10, 1251 }, { 10, 1249 }, { 10, 1247 }, { 10, 1245 },
+ { 10, 1243 }, { 10, 1241 }, { 10, 1239 }, { 10, 1237 }, { 10, 1235 },
+ { 10, 1233 }, { 10, 1231 }, { 10, 1229 }, { 10, 1227 }, { 10, 1225 },
+ { 10, 1223 }, { 10, 1221 }, { 10, 1219 }, { 10, 1217 }, { 10, 1215 },
+ { 10, 1213 }, { 10, 1211 }, { 10, 1209 }, { 10, 1207 }, { 10, 1205 },
+ { 10, 1203 }, { 10, 1201 }, { 10, 1199 }, { 10, 1197 }, { 10, 1195 },
+ { 10, 1193 }, { 10, 1191 }, { 10, 1189 }, { 10, 1187 }, { 10, 1185 },
+ { 10, 1183 }, { 10, 1181 }, { 10, 1179 }, { 10, 1177 }, { 10, 1175 },
+ { 10, 1173 }, { 10, 1171 }, { 10, 1169 }, { 10, 1167 }, { 10, 1165 },
+ { 10, 1163 }, { 10, 1161 }, { 10, 1159 }, { 10, 1157 }, { 10, 1155 },
+ { 10, 1153 }, { 10, 1151 }, { 10, 1149 }, { 10, 1147 }, { 10, 1145 },
+ { 10, 1143 }, { 10, 1141 }, { 10, 1139 }, { 10, 1137 }, { 10, 1135 },
+ { 10, 1133 }, { 10, 1131 }, { 10, 1129 }, { 10, 1127 }, { 10, 1125 },
+ { 10, 1123 }, { 10, 1121 }, { 10, 1119 }, { 10, 1117 }, { 10, 1115 },
+ { 10, 1113 }, { 10, 1111 }, { 10, 1109 }, { 10, 1107 }, { 10, 1105 },
+ { 10, 1103 }, { 10, 1101 }, { 10, 1099 }, { 10, 1097 }, { 10, 1095 },
+ { 10, 1093 }, { 10, 1091 }, { 10, 1089 }, { 10, 1087 }, { 10, 1085 },
+ { 10, 1083 }, { 10, 1081 }, { 10, 1079 }, { 10, 1077 }, { 10, 1075 },
+ { 10, 1073 }, { 10, 1071 }, { 10, 1069 }, { 10, 1067 }, { 10, 1065 },
+ { 10, 1063 }, { 10, 1061 }, { 10, 1059 }, { 10, 1057 }, { 10, 1055 },
+ { 10, 1053 }, { 10, 1051 }, { 10, 1049 }, { 10, 1047 }, { 10, 1045 },
+ { 10, 1043 }, { 10, 1041 }, { 10, 1039 }, { 10, 1037 }, { 10, 1035 },
+ { 10, 1033 }, { 10, 1031 }, { 10, 1029 }, { 10, 1027 }, { 10, 1025 },
+ { 10, 1023 }, { 10, 1021 }, { 10, 1019 }, { 10, 1017 }, { 10, 1015 },
+ { 10, 1013 }, { 10, 1011 }, { 10, 1009 }, { 10, 1007 }, { 10, 1005 },
+ { 10, 1003 }, { 10, 1001 }, { 10, 999 }, { 10, 997 }, { 10, 995 },
+ { 10, 993 }, { 10, 991 }, { 10, 989 }, { 10, 987 }, { 10, 985 },
+ { 10, 983 }, { 10, 981 }, { 10, 979 }, { 10, 977 }, { 10, 975 },
+ { 10, 973 }, { 10, 971 }, { 10, 969 }, { 10, 967 }, { 10, 965 },
+ { 10, 963 }, { 10, 961 }, { 10, 959 }, { 10, 957 }, { 10, 955 },
+ { 10, 953 }, { 10, 951 }, { 10, 949 }, { 10, 947 }, { 10, 945 },
+ { 10, 943 }, { 10, 941 }, { 10, 939 }, { 10, 937 }, { 10, 935 },
+ { 10, 933 }, { 10, 931 }, { 10, 929 }, { 10, 927 }, { 10, 925 },
+ { 10, 923 }, { 10, 921 }, { 10, 919 }, { 10, 917 }, { 10, 915 },
+ { 10, 913 }, { 10, 911 }, { 10, 909 }, { 10, 907 }, { 10, 905 },
+ { 10, 903 }, { 10, 901 }, { 10, 899 }, { 10, 897 }, { 10, 895 },
+ { 10, 893 }, { 10, 891 }, { 10, 889 }, { 10, 887 }, { 10, 885 },
+ { 10, 883 }, { 10, 881 }, { 10, 879 }, { 10, 877 }, { 10, 875 },
+ { 10, 873 }, { 10, 871 }, { 10, 869 }, { 10, 867 }, { 10, 865 },
+ { 10, 863 }, { 10, 861 }, { 10, 859 }, { 10, 857 }, { 10, 855 },
+ { 10, 853 }, { 10, 851 }, { 10, 849 }, { 10, 847 }, { 10, 845 },
+ { 10, 843 }, { 10, 841 }, { 10, 839 }, { 10, 837 }, { 10, 835 },
+ { 10, 833 }, { 10, 831 }, { 10, 829 }, { 10, 827 }, { 10, 825 },
+ { 10, 823 }, { 10, 821 }, { 10, 819 }, { 10, 817 }, { 10, 815 },
+ { 10, 813 }, { 10, 811 }, { 10, 809 }, { 10, 807 }, { 10, 805 },
+ { 10, 803 }, { 10, 801 }, { 10, 799 }, { 10, 797 }, { 10, 795 },
+ { 10, 793 }, { 10, 791 }, { 10, 789 }, { 10, 787 }, { 10, 785 },
+ { 10, 783 }, { 10, 781 }, { 10, 779 }, { 10, 777 }, { 10, 775 },
+ { 10, 773 }, { 10, 771 }, { 10, 769 }, { 10, 767 }, { 10, 765 },
+ { 10, 763 }, { 10, 761 }, { 10, 759 }, { 10, 757 }, { 10, 755 },
+ { 10, 753 }, { 10, 751 }, { 10, 749 }, { 10, 747 }, { 10, 745 },
+ { 10, 743 }, { 10, 741 }, { 10, 739 }, { 10, 737 }, { 10, 735 },
+ { 10, 733 }, { 10, 731 }, { 10, 729 }, { 10, 727 }, { 10, 725 },
+ { 10, 723 }, { 10, 721 }, { 10, 719 }, { 10, 717 }, { 10, 715 },
+ { 10, 713 }, { 10, 711 }, { 10, 709 }, { 10, 707 }, { 10, 705 },
+ { 10, 703 }, { 10, 701 }, { 10, 699 }, { 10, 697 }, { 10, 695 },
+ { 10, 693 }, { 10, 691 }, { 10, 689 }, { 10, 687 }, { 10, 685 },
+ { 10, 683 }, { 10, 681 }, { 10, 679 }, { 10, 677 }, { 10, 675 },
+ { 10, 673 }, { 10, 671 }, { 10, 669 }, { 10, 667 }, { 10, 665 },
+ { 10, 663 }, { 10, 661 }, { 10, 659 }, { 10, 657 }, { 10, 655 },
+ { 10, 653 }, { 10, 651 }, { 10, 649 }, { 10, 647 }, { 10, 645 },
+ { 10, 643 }, { 10, 641 }, { 10, 639 }, { 10, 637 }, { 10, 635 },
+ { 10, 633 }, { 10, 631 }, { 10, 629 }, { 10, 627 }, { 10, 625 },
+ { 10, 623 }, { 10, 621 }, { 10, 619 }, { 10, 617 }, { 10, 615 },
+ { 10, 613 }, { 10, 611 }, { 10, 609 }, { 10, 607 }, { 10, 605 },
+ { 10, 603 }, { 10, 601 }, { 10, 599 }, { 10, 597 }, { 10, 595 },
+ { 10, 593 }, { 10, 591 }, { 10, 589 }, { 10, 587 }, { 10, 585 },
+ { 10, 583 }, { 10, 581 }, { 10, 579 }, { 10, 577 }, { 10, 575 },
+ { 10, 573 }, { 10, 571 }, { 10, 569 }, { 10, 567 }, { 10, 565 },
+ { 10, 563 }, { 10, 561 }, { 10, 559 }, { 10, 557 }, { 10, 555 },
+ { 10, 553 }, { 10, 551 }, { 10, 549 }, { 10, 547 }, { 10, 545 },
+ { 10, 543 }, { 10, 541 }, { 10, 539 }, { 10, 537 }, { 10, 535 },
+ { 10, 533 }, { 10, 531 }, { 10, 529 }, { 10, 527 }, { 10, 525 },
+ { 10, 523 }, { 10, 521 }, { 10, 519 }, { 10, 517 }, { 10, 515 },
+ { 10, 513 }, { 10, 511 }, { 10, 509 }, { 10, 507 }, { 10, 505 },
+ { 10, 503 }, { 10, 501 }, { 10, 499 }, { 10, 497 }, { 10, 495 },
+ { 10, 493 }, { 10, 491 }, { 10, 489 }, { 10, 487 }, { 10, 485 },
+ { 10, 483 }, { 10, 481 }, { 10, 479 }, { 10, 477 }, { 10, 475 },
+ { 10, 473 }, { 10, 471 }, { 10, 469 }, { 10, 467 }, { 10, 465 },
+ { 10, 463 }, { 10, 461 }, { 10, 459 }, { 10, 457 }, { 10, 455 },
+ { 10, 453 }, { 10, 451 }, { 10, 449 }, { 10, 447 }, { 10, 445 },
+ { 10, 443 }, { 10, 441 }, { 10, 439 }, { 10, 437 }, { 10, 435 },
+ { 10, 433 }, { 10, 431 }, { 10, 429 }, { 10, 427 }, { 10, 425 },
+ { 10, 423 }, { 10, 421 }, { 10, 419 }, { 10, 417 }, { 10, 415 },
+ { 10, 413 }, { 10, 411 }, { 10, 409 }, { 10, 407 }, { 10, 405 },
+ { 10, 403 }, { 10, 401 }, { 10, 399 }, { 10, 397 }, { 10, 395 },
+ { 10, 393 }, { 10, 391 }, { 10, 389 }, { 10, 387 }, { 10, 385 },
+ { 10, 383 }, { 10, 381 }, { 10, 379 }, { 10, 377 }, { 10, 375 },
+ { 10, 373 }, { 10, 371 }, { 10, 369 }, { 10, 367 }, { 10, 365 },
+ { 10, 363 }, { 10, 361 }, { 10, 359 }, { 10, 357 }, { 10, 355 },
+ { 10, 353 }, { 10, 351 }, { 10, 349 }, { 10, 347 }, { 10, 345 },
+ { 10, 343 }, { 10, 341 }, { 10, 339 }, { 10, 337 }, { 10, 335 },
+ { 10, 333 }, { 10, 331 }, { 10, 329 }, { 10, 327 }, { 10, 325 },
+ { 10, 323 }, { 10, 321 }, { 10, 319 }, { 10, 317 }, { 10, 315 },
+ { 10, 313 }, { 10, 311 }, { 10, 309 }, { 10, 307 }, { 10, 305 },
+ { 10, 303 }, { 10, 301 }, { 10, 299 }, { 10, 297 }, { 10, 295 },
+ { 10, 293 }, { 10, 291 }, { 10, 289 }, { 10, 287 }, { 10, 285 },
+ { 10, 283 }, { 10, 281 }, { 10, 279 }, { 10, 277 }, { 10, 275 },
+ { 10, 273 }, { 10, 271 }, { 10, 269 }, { 10, 267 }, { 10, 265 },
+ { 10, 263 }, { 10, 261 }, { 10, 259 }, { 10, 257 }, { 10, 255 },
+ { 10, 253 }, { 10, 251 }, { 10, 249 }, { 10, 247 }, { 10, 245 },
+ { 10, 243 }, { 10, 241 }, { 10, 239 }, { 10, 237 }, { 10, 235 },
+ { 10, 233 }, { 10, 231 }, { 10, 229 }, { 10, 227 }, { 10, 225 },
+ { 10, 223 }, { 10, 221 }, { 10, 219 }, { 10, 217 }, { 10, 215 },
+ { 10, 213 }, { 10, 211 }, { 10, 209 }, { 10, 207 }, { 10, 205 },
+ { 10, 203 }, { 10, 201 }, { 10, 199 }, { 10, 197 }, { 10, 195 },
+ { 10, 193 }, { 10, 191 }, { 10, 189 }, { 10, 187 }, { 10, 185 },
+ { 10, 183 }, { 10, 181 }, { 10, 179 }, { 10, 177 }, { 10, 175 },
+ { 10, 173 }, { 10, 171 }, { 10, 169 }, { 10, 167 }, { 10, 165 },
+ { 10, 163 }, { 10, 161 }, { 10, 159 }, { 10, 157 }, { 10, 155 },
+ { 10, 153 }, { 10, 151 }, { 10, 149 }, { 10, 147 }, { 10, 145 },
+ { 10, 143 }, { 10, 141 }, { 10, 139 }, { 10, 137 }, { 10, 135 },
+ { 10, 133 }, { 10, 131 }, { 10, 129 }, { 10, 127 }, { 10, 125 },
+ { 10, 123 }, { 10, 121 }, { 10, 119 }, { 10, 117 }, { 10, 115 },
+ { 10, 113 }, { 10, 111 }, { 10, 109 }, { 10, 107 }, { 10, 105 },
+ { 10, 103 }, { 10, 101 }, { 10, 99 }, { 10, 97 }, { 10, 95 },
+ { 10, 93 }, { 10, 91 }, { 10, 89 }, { 10, 87 }, { 10, 85 },
+ { 10, 83 }, { 10, 81 }, { 10, 79 }, { 10, 77 }, { 10, 75 },
+ { 10, 73 }, { 10, 71 }, { 10, 69 }, { 10, 67 }, { 10, 65 },
+ { 10, 63 }, { 10, 61 }, { 10, 59 }, { 10, 57 }, { 10, 55 },
+ { 10, 53 }, { 10, 51 }, { 10, 49 }, { 10, 47 }, { 10, 45 },
+ { 10, 43 }, { 10, 41 }, { 10, 39 }, { 10, 37 }, { 10, 35 },
+ { 10, 33 }, { 10, 31 }, { 10, 29 }, { 10, 27 }, { 10, 25 },
+ { 10, 23 }, { 10, 21 }, { 10, 19 }, { 10, 17 }, { 10, 15 },
+ { 10, 13 }, { 10, 11 }, { 10, 9 }, { 10, 7 }, { 10, 5 },
+ { 10, 3 }, { 10, 1 }, { 9, 63 }, { 9, 61 }, { 9, 59 },
+ { 9, 57 }, { 9, 55 }, { 9, 53 }, { 9, 51 }, { 9, 49 },
+ { 9, 47 }, { 9, 45 }, { 9, 43 }, { 9, 41 }, { 9, 39 },
+ { 9, 37 }, { 9, 35 }, { 9, 33 }, { 9, 31 }, { 9, 29 },
+ { 9, 27 }, { 9, 25 }, { 9, 23 }, { 9, 21 }, { 9, 19 },
+ { 9, 17 }, { 9, 15 }, { 9, 13 }, { 9, 11 }, { 9, 9 },
+ { 9, 7 }, { 9, 5 }, { 9, 3 }, { 9, 1 }, { 8, 31 },
+ { 8, 29 }, { 8, 27 }, { 8, 25 }, { 8, 23 }, { 8, 21 },
+ { 8, 19 }, { 8, 17 }, { 8, 15 }, { 8, 13 }, { 8, 11 },
+ { 8, 9 }, { 8, 7 }, { 8, 5 }, { 8, 3 }, { 8, 1 },
+ { 7, 15 }, { 7, 13 }, { 7, 11 }, { 7, 9 }, { 7, 7 },
+ { 7, 5 }, { 7, 3 }, { 7, 1 }, { 6, 7 }, { 6, 5 },
+ { 6, 3 }, { 6, 1 }, { 5, 3 }, { 5, 1 }, { 4, 1 },
+ { 3, 1 }, { 2, 1 }, { 1, 1 }, { 0, 0 }, { 1, 0 },
+ { 2, 0 }, { 3, 0 }, { 4, 0 }, { 5, 0 }, { 5, 2 },
+ { 6, 0 }, { 6, 2 }, { 6, 4 }, { 6, 6 }, { 7, 0 },
+ { 7, 2 }, { 7, 4 }, { 7, 6 }, { 7, 8 }, { 7, 10 },
+ { 7, 12 }, { 7, 14 }, { 8, 0 }, { 8, 2 }, { 8, 4 },
+ { 8, 6 }, { 8, 8 }, { 8, 10 }, { 8, 12 }, { 8, 14 },
+ { 8, 16 }, { 8, 18 }, { 8, 20 }, { 8, 22 }, { 8, 24 },
+ { 8, 26 }, { 8, 28 }, { 8, 30 }, { 9, 0 }, { 9, 2 },
+ { 9, 4 }, { 9, 6 }, { 9, 8 }, { 9, 10 }, { 9, 12 },
+ { 9, 14 }, { 9, 16 }, { 9, 18 }, { 9, 20 }, { 9, 22 },
+ { 9, 24 }, { 9, 26 }, { 9, 28 }, { 9, 30 }, { 9, 32 },
+ { 9, 34 }, { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 },
+ { 9, 44 }, { 9, 46 }, { 9, 48 }, { 9, 50 }, { 9, 52 },
+ { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 },
+ { 10, 0 }, { 10, 2 }, { 10, 4 }, { 10, 6 }, { 10, 8 },
+ { 10, 10 }, { 10, 12 }, { 10, 14 }, { 10, 16 }, { 10, 18 },
+ { 10, 20 }, { 10, 22 }, { 10, 24 }, { 10, 26 }, { 10, 28 },
+ { 10, 30 }, { 10, 32 }, { 10, 34 }, { 10, 36 }, { 10, 38 },
+ { 10, 40 }, { 10, 42 }, { 10, 44 }, { 10, 46 }, { 10, 48 },
+ { 10, 50 }, { 10, 52 }, { 10, 54 }, { 10, 56 }, { 10, 58 },
+ { 10, 60 }, { 10, 62 }, { 10, 64 }, { 10, 66 }, { 10, 68 },
+ { 10, 70 }, { 10, 72 }, { 10, 74 }, { 10, 76 }, { 10, 78 },
+ { 10, 80 }, { 10, 82 }, { 10, 84 }, { 10, 86 }, { 10, 88 },
+ { 10, 90 }, { 10, 92 }, { 10, 94 }, { 10, 96 }, { 10, 98 },
+ { 10, 100 }, { 10, 102 }, { 10, 104 }, { 10, 106 }, { 10, 108 },
+ { 10, 110 }, { 10, 112 }, { 10, 114 }, { 10, 116 }, { 10, 118 },
+ { 10, 120 }, { 10, 122 }, { 10, 124 }, { 10, 126 }, { 10, 128 },
+ { 10, 130 }, { 10, 132 }, { 10, 134 }, { 10, 136 }, { 10, 138 },
+ { 10, 140 }, { 10, 142 }, { 10, 144 }, { 10, 146 }, { 10, 148 },
+ { 10, 150 }, { 10, 152 }, { 10, 154 }, { 10, 156 }, { 10, 158 },
+ { 10, 160 }, { 10, 162 }, { 10, 164 }, { 10, 166 }, { 10, 168 },
+ { 10, 170 }, { 10, 172 }, { 10, 174 }, { 10, 176 }, { 10, 178 },
+ { 10, 180 }, { 10, 182 }, { 10, 184 }, { 10, 186 }, { 10, 188 },
+ { 10, 190 }, { 10, 192 }, { 10, 194 }, { 10, 196 }, { 10, 198 },
+ { 10, 200 }, { 10, 202 }, { 10, 204 }, { 10, 206 }, { 10, 208 },
+ { 10, 210 }, { 10, 212 }, { 10, 214 }, { 10, 216 }, { 10, 218 },
+ { 10, 220 }, { 10, 222 }, { 10, 224 }, { 10, 226 }, { 10, 228 },
+ { 10, 230 }, { 10, 232 }, { 10, 234 }, { 10, 236 }, { 10, 238 },
+ { 10, 240 }, { 10, 242 }, { 10, 244 }, { 10, 246 }, { 10, 248 },
+ { 10, 250 }, { 10, 252 }, { 10, 254 }, { 10, 256 }, { 10, 258 },
+ { 10, 260 }, { 10, 262 }, { 10, 264 }, { 10, 266 }, { 10, 268 },
+ { 10, 270 }, { 10, 272 }, { 10, 274 }, { 10, 276 }, { 10, 278 },
+ { 10, 280 }, { 10, 282 }, { 10, 284 }, { 10, 286 }, { 10, 288 },
+ { 10, 290 }, { 10, 292 }, { 10, 294 }, { 10, 296 }, { 10, 298 },
+ { 10, 300 }, { 10, 302 }, { 10, 304 }, { 10, 306 }, { 10, 308 },
+ { 10, 310 }, { 10, 312 }, { 10, 314 }, { 10, 316 }, { 10, 318 },
+ { 10, 320 }, { 10, 322 }, { 10, 324 }, { 10, 326 }, { 10, 328 },
+ { 10, 330 }, { 10, 332 }, { 10, 334 }, { 10, 336 }, { 10, 338 },
+ { 10, 340 }, { 10, 342 }, { 10, 344 }, { 10, 346 }, { 10, 348 },
+ { 10, 350 }, { 10, 352 }, { 10, 354 }, { 10, 356 }, { 10, 358 },
+ { 10, 360 }, { 10, 362 }, { 10, 364 }, { 10, 366 }, { 10, 368 },
+ { 10, 370 }, { 10, 372 }, { 10, 374 }, { 10, 376 }, { 10, 378 },
+ { 10, 380 }, { 10, 382 }, { 10, 384 }, { 10, 386 }, { 10, 388 },
+ { 10, 390 }, { 10, 392 }, { 10, 394 }, { 10, 396 }, { 10, 398 },
+ { 10, 400 }, { 10, 402 }, { 10, 404 }, { 10, 406 }, { 10, 408 },
+ { 10, 410 }, { 10, 412 }, { 10, 414 }, { 10, 416 }, { 10, 418 },
+ { 10, 420 }, { 10, 422 }, { 10, 424 }, { 10, 426 }, { 10, 428 },
+ { 10, 430 }, { 10, 432 }, { 10, 434 }, { 10, 436 }, { 10, 438 },
+ { 10, 440 }, { 10, 442 }, { 10, 444 }, { 10, 446 }, { 10, 448 },
+ { 10, 450 }, { 10, 452 }, { 10, 454 }, { 10, 456 }, { 10, 458 },
+ { 10, 460 }, { 10, 462 }, { 10, 464 }, { 10, 466 }, { 10, 468 },
+ { 10, 470 }, { 10, 472 }, { 10, 474 }, { 10, 476 }, { 10, 478 },
+ { 10, 480 }, { 10, 482 }, { 10, 484 }, { 10, 486 }, { 10, 488 },
+ { 10, 490 }, { 10, 492 }, { 10, 494 }, { 10, 496 }, { 10, 498 },
+ { 10, 500 }, { 10, 502 }, { 10, 504 }, { 10, 506 }, { 10, 508 },
+ { 10, 510 }, { 10, 512 }, { 10, 514 }, { 10, 516 }, { 10, 518 },
+ { 10, 520 }, { 10, 522 }, { 10, 524 }, { 10, 526 }, { 10, 528 },
+ { 10, 530 }, { 10, 532 }, { 10, 534 }, { 10, 536 }, { 10, 538 },
+ { 10, 540 }, { 10, 542 }, { 10, 544 }, { 10, 546 }, { 10, 548 },
+ { 10, 550 }, { 10, 552 }, { 10, 554 }, { 10, 556 }, { 10, 558 },
+ { 10, 560 }, { 10, 562 }, { 10, 564 }, { 10, 566 }, { 10, 568 },
+ { 10, 570 }, { 10, 572 }, { 10, 574 }, { 10, 576 }, { 10, 578 },
+ { 10, 580 }, { 10, 582 }, { 10, 584 }, { 10, 586 }, { 10, 588 },
+ { 10, 590 }, { 10, 592 }, { 10, 594 }, { 10, 596 }, { 10, 598 },
+ { 10, 600 }, { 10, 602 }, { 10, 604 }, { 10, 606 }, { 10, 608 },
+ { 10, 610 }, { 10, 612 }, { 10, 614 }, { 10, 616 }, { 10, 618 },
+ { 10, 620 }, { 10, 622 }, { 10, 624 }, { 10, 626 }, { 10, 628 },
+ { 10, 630 }, { 10, 632 }, { 10, 634 }, { 10, 636 }, { 10, 638 },
+ { 10, 640 }, { 10, 642 }, { 10, 644 }, { 10, 646 }, { 10, 648 },
+ { 10, 650 }, { 10, 652 }, { 10, 654 }, { 10, 656 }, { 10, 658 },
+ { 10, 660 }, { 10, 662 }, { 10, 664 }, { 10, 666 }, { 10, 668 },
+ { 10, 670 }, { 10, 672 }, { 10, 674 }, { 10, 676 }, { 10, 678 },
+ { 10, 680 }, { 10, 682 }, { 10, 684 }, { 10, 686 }, { 10, 688 },
+ { 10, 690 }, { 10, 692 }, { 10, 694 }, { 10, 696 }, { 10, 698 },
+ { 10, 700 }, { 10, 702 }, { 10, 704 }, { 10, 706 }, { 10, 708 },
+ { 10, 710 }, { 10, 712 }, { 10, 714 }, { 10, 716 }, { 10, 718 },
+ { 10, 720 }, { 10, 722 }, { 10, 724 }, { 10, 726 }, { 10, 728 },
+ { 10, 730 }, { 10, 732 }, { 10, 734 }, { 10, 736 }, { 10, 738 },
+ { 10, 740 }, { 10, 742 }, { 10, 744 }, { 10, 746 }, { 10, 748 },
+ { 10, 750 }, { 10, 752 }, { 10, 754 }, { 10, 756 }, { 10, 758 },
+ { 10, 760 }, { 10, 762 }, { 10, 764 }, { 10, 766 }, { 10, 768 },
+ { 10, 770 }, { 10, 772 }, { 10, 774 }, { 10, 776 }, { 10, 778 },
+ { 10, 780 }, { 10, 782 }, { 10, 784 }, { 10, 786 }, { 10, 788 },
+ { 10, 790 }, { 10, 792 }, { 10, 794 }, { 10, 796 }, { 10, 798 },
+ { 10, 800 }, { 10, 802 }, { 10, 804 }, { 10, 806 }, { 10, 808 },
+ { 10, 810 }, { 10, 812 }, { 10, 814 }, { 10, 816 }, { 10, 818 },
+ { 10, 820 }, { 10, 822 }, { 10, 824 }, { 10, 826 }, { 10, 828 },
+ { 10, 830 }, { 10, 832 }, { 10, 834 }, { 10, 836 }, { 10, 838 },
+ { 10, 840 }, { 10, 842 }, { 10, 844 }, { 10, 846 }, { 10, 848 },
+ { 10, 850 }, { 10, 852 }, { 10, 854 }, { 10, 856 }, { 10, 858 },
+ { 10, 860 }, { 10, 862 }, { 10, 864 }, { 10, 866 }, { 10, 868 },
+ { 10, 870 }, { 10, 872 }, { 10, 874 }, { 10, 876 }, { 10, 878 },
+ { 10, 880 }, { 10, 882 }, { 10, 884 }, { 10, 886 }, { 10, 888 },
+ { 10, 890 }, { 10, 892 }, { 10, 894 }, { 10, 896 }, { 10, 898 },
+ { 10, 900 }, { 10, 902 }, { 10, 904 }, { 10, 906 }, { 10, 908 },
+ { 10, 910 }, { 10, 912 }, { 10, 914 }, { 10, 916 }, { 10, 918 },
+ { 10, 920 }, { 10, 922 }, { 10, 924 }, { 10, 926 }, { 10, 928 },
+ { 10, 930 }, { 10, 932 }, { 10, 934 }, { 10, 936 }, { 10, 938 },
+ { 10, 940 }, { 10, 942 }, { 10, 944 }, { 10, 946 }, { 10, 948 },
+ { 10, 950 }, { 10, 952 }, { 10, 954 }, { 10, 956 }, { 10, 958 },
+ { 10, 960 }, { 10, 962 }, { 10, 964 }, { 10, 966 }, { 10, 968 },
+ { 10, 970 }, { 10, 972 }, { 10, 974 }, { 10, 976 }, { 10, 978 },
+ { 10, 980 }, { 10, 982 }, { 10, 984 }, { 10, 986 }, { 10, 988 },
+ { 10, 990 }, { 10, 992 }, { 10, 994 }, { 10, 996 }, { 10, 998 },
+ { 10, 1000 }, { 10, 1002 }, { 10, 1004 }, { 10, 1006 }, { 10, 1008 },
+ { 10, 1010 }, { 10, 1012 }, { 10, 1014 }, { 10, 1016 }, { 10, 1018 },
+ { 10, 1020 }, { 10, 1022 }, { 10, 1024 }, { 10, 1026 }, { 10, 1028 },
+ { 10, 1030 }, { 10, 1032 }, { 10, 1034 }, { 10, 1036 }, { 10, 1038 },
+ { 10, 1040 }, { 10, 1042 }, { 10, 1044 }, { 10, 1046 }, { 10, 1048 },
+ { 10, 1050 }, { 10, 1052 }, { 10, 1054 }, { 10, 1056 }, { 10, 1058 },
+ { 10, 1060 }, { 10, 1062 }, { 10, 1064 }, { 10, 1066 }, { 10, 1068 },
+ { 10, 1070 }, { 10, 1072 }, { 10, 1074 }, { 10, 1076 }, { 10, 1078 },
+ { 10, 1080 }, { 10, 1082 }, { 10, 1084 }, { 10, 1086 }, { 10, 1088 },
+ { 10, 1090 }, { 10, 1092 }, { 10, 1094 }, { 10, 1096 }, { 10, 1098 },
+ { 10, 1100 }, { 10, 1102 }, { 10, 1104 }, { 10, 1106 }, { 10, 1108 },
+ { 10, 1110 }, { 10, 1112 }, { 10, 1114 }, { 10, 1116 }, { 10, 1118 },
+ { 10, 1120 }, { 10, 1122 }, { 10, 1124 }, { 10, 1126 }, { 10, 1128 },
+ { 10, 1130 }, { 10, 1132 }, { 10, 1134 }, { 10, 1136 }, { 10, 1138 },
+ { 10, 1140 }, { 10, 1142 }, { 10, 1144 }, { 10, 1146 }, { 10, 1148 },
+ { 10, 1150 }, { 10, 1152 }, { 10, 1154 }, { 10, 1156 }, { 10, 1158 },
+ { 10, 1160 }, { 10, 1162 }, { 10, 1164 }, { 10, 1166 }, { 10, 1168 },
+ { 10, 1170 }, { 10, 1172 }, { 10, 1174 }, { 10, 1176 }, { 10, 1178 },
+ { 10, 1180 }, { 10, 1182 }, { 10, 1184 }, { 10, 1186 }, { 10, 1188 },
+ { 10, 1190 }, { 10, 1192 }, { 10, 1194 }, { 10, 1196 }, { 10, 1198 },
+ { 10, 1200 }, { 10, 1202 }, { 10, 1204 }, { 10, 1206 }, { 10, 1208 },
+ { 10, 1210 }, { 10, 1212 }, { 10, 1214 }, { 10, 1216 }, { 10, 1218 },
+ { 10, 1220 }, { 10, 1222 }, { 10, 1224 }, { 10, 1226 }, { 10, 1228 },
+ { 10, 1230 }, { 10, 1232 }, { 10, 1234 }, { 10, 1236 }, { 10, 1238 },
+ { 10, 1240 }, { 10, 1242 }, { 10, 1244 }, { 10, 1246 }, { 10, 1248 },
+ { 10, 1250 }, { 10, 1252 }, { 10, 1254 }, { 10, 1256 }, { 10, 1258 },
+ { 10, 1260 }, { 10, 1262 }, { 10, 1264 }, { 10, 1266 }, { 10, 1268 },
+ { 10, 1270 }, { 10, 1272 }, { 10, 1274 }, { 10, 1276 }, { 10, 1278 },
+ { 10, 1280 }, { 10, 1282 }, { 10, 1284 }, { 10, 1286 }, { 10, 1288 },
+ { 10, 1290 }, { 10, 1292 }, { 10, 1294 }, { 10, 1296 }, { 10, 1298 },
+ { 10, 1300 }, { 10, 1302 }, { 10, 1304 }, { 10, 1306 }, { 10, 1308 },
+ { 10, 1310 }, { 10, 1312 }, { 10, 1314 }, { 10, 1316 }, { 10, 1318 },
+ { 10, 1320 }, { 10, 1322 }, { 10, 1324 }, { 10, 1326 }, { 10, 1328 },
+ { 10, 1330 }, { 10, 1332 }, { 10, 1334 }, { 10, 1336 }, { 10, 1338 },
+ { 10, 1340 }, { 10, 1342 }, { 10, 1344 }, { 10, 1346 }, { 10, 1348 },
+ { 10, 1350 }, { 10, 1352 }, { 10, 1354 }, { 10, 1356 }, { 10, 1358 },
+ { 10, 1360 }, { 10, 1362 }, { 10, 1364 }, { 10, 1366 }, { 10, 1368 },
+ { 10, 1370 }, { 10, 1372 }, { 10, 1374 }, { 10, 1376 }, { 10, 1378 },
+ { 10, 1380 }, { 10, 1382 }, { 10, 1384 }, { 10, 1386 }, { 10, 1388 },
+ { 10, 1390 }, { 10, 1392 }, { 10, 1394 }, { 10, 1396 }, { 10, 1398 },
+ { 10, 1400 }, { 10, 1402 }, { 10, 1404 }, { 10, 1406 }, { 10, 1408 },
+ { 10, 1410 }, { 10, 1412 }, { 10, 1414 }, { 10, 1416 }, { 10, 1418 },
+ { 10, 1420 }, { 10, 1422 }, { 10, 1424 }, { 10, 1426 }, { 10, 1428 },
+ { 10, 1430 }, { 10, 1432 }, { 10, 1434 }, { 10, 1436 }, { 10, 1438 },
+ { 10, 1440 }, { 10, 1442 }, { 10, 1444 }, { 10, 1446 }, { 10, 1448 },
+ { 10, 1450 }, { 10, 1452 }, { 10, 1454 }, { 10, 1456 }, { 10, 1458 },
+ { 10, 1460 }, { 10, 1462 }, { 10, 1464 }, { 10, 1466 }, { 10, 1468 },
+ { 10, 1470 }, { 10, 1472 }, { 10, 1474 }, { 10, 1476 }, { 10, 1478 },
+ { 10, 1480 }, { 10, 1482 }, { 10, 1484 }, { 10, 1486 }, { 10, 1488 },
+ { 10, 1490 }, { 10, 1492 }, { 10, 1494 }, { 10, 1496 }, { 10, 1498 },
+ { 10, 1500 }, { 10, 1502 }, { 10, 1504 }, { 10, 1506 }, { 10, 1508 },
+ { 10, 1510 }, { 10, 1512 }, { 10, 1514 }, { 10, 1516 }, { 10, 1518 },
+ { 10, 1520 }, { 10, 1522 }, { 10, 1524 }, { 10, 1526 }, { 10, 1528 },
+ { 10, 1530 }, { 10, 1532 }, { 10, 1534 }, { 10, 1536 }, { 10, 1538 },
+ { 10, 1540 }, { 10, 1542 }, { 10, 1544 }, { 10, 1546 }, { 10, 1548 },
+ { 10, 1550 }, { 10, 1552 }, { 10, 1554 }, { 10, 1556 }, { 10, 1558 },
+ { 10, 1560 }, { 10, 1562 }, { 10, 1564 }, { 10, 1566 }, { 10, 1568 },
+ { 10, 1570 }, { 10, 1572 }, { 10, 1574 }, { 10, 1576 }, { 10, 1578 },
+ { 10, 1580 }, { 10, 1582 }, { 10, 1584 }, { 10, 1586 }, { 10, 1588 },
+ { 10, 1590 }, { 10, 1592 }, { 10, 1594 }, { 10, 1596 }, { 10, 1598 },
+ { 10, 1600 }, { 10, 1602 }, { 10, 1604 }, { 10, 1606 }, { 10, 1608 },
+ { 10, 1610 }, { 10, 1612 }, { 10, 1614 }, { 10, 1616 }, { 10, 1618 },
+ { 10, 1620 }, { 10, 1622 }, { 10, 1624 }, { 10, 1626 }, { 10, 1628 },
+ { 10, 1630 }, { 10, 1632 }, { 10, 1634 }, { 10, 1636 }, { 10, 1638 },
+ { 10, 1640 }, { 10, 1642 }, { 10, 1644 }, { 10, 1646 }, { 10, 1648 },
+ { 10, 1650 }, { 10, 1652 }, { 10, 1654 }, { 10, 1656 }, { 10, 1658 },
+ { 10, 1660 }, { 10, 1662 }, { 10, 1664 }, { 10, 1666 }, { 10, 1668 },
+ { 10, 1670 }, { 10, 1672 }, { 10, 1674 }, { 10, 1676 }, { 10, 1678 },
+ { 10, 1680 }, { 10, 1682 }, { 10, 1684 }, { 10, 1686 }, { 10, 1688 },
+ { 10, 1690 }, { 10, 1692 }, { 10, 1694 }, { 10, 1696 }, { 10, 1698 },
+ { 10, 1700 }, { 10, 1702 }, { 10, 1704 }, { 10, 1706 }, { 10, 1708 },
+ { 10, 1710 }, { 10, 1712 }, { 10, 1714 }, { 10, 1716 }, { 10, 1718 },
+ { 10, 1720 }, { 10, 1722 }, { 10, 1724 }, { 10, 1726 }, { 10, 1728 },
+ { 10, 1730 }, { 10, 1732 }, { 10, 1734 }, { 10, 1736 }, { 10, 1738 },
+ { 10, 1740 }, { 10, 1742 }, { 10, 1744 }, { 10, 1746 }, { 10, 1748 },
+ { 10, 1750 }, { 10, 1752 }, { 10, 1754 }, { 10, 1756 }, { 10, 1758 },
+ { 10, 1760 }, { 10, 1762 }, { 10, 1764 }, { 10, 1766 }, { 10, 1768 },
+ { 10, 1770 }, { 10, 1772 }, { 10, 1774 }, { 10, 1776 }, { 10, 1778 },
+ { 10, 1780 }, { 10, 1782 }, { 10, 1784 }, { 10, 1786 }, { 10, 1788 },
+ { 10, 1790 }, { 10, 1792 }, { 10, 1794 }, { 10, 1796 }, { 10, 1798 },
+ { 10, 1800 }, { 10, 1802 }, { 10, 1804 }, { 10, 1806 }, { 10, 1808 },
+ { 10, 1810 }, { 10, 1812 }, { 10, 1814 }, { 10, 1816 }, { 10, 1818 },
+ { 10, 1820 }, { 10, 1822 }, { 10, 1824 }, { 10, 1826 }, { 10, 1828 },
+ { 10, 1830 }, { 10, 1832 }, { 10, 1834 }, { 10, 1836 }, { 10, 1838 },
+ { 10, 1840 }, { 10, 1842 }, { 10, 1844 }, { 10, 1846 }, { 10, 1848 },
+ { 10, 1850 }, { 10, 1852 }, { 10, 1854 }, { 10, 1856 }, { 10, 1858 },
+ { 10, 1860 }, { 10, 1862 }, { 10, 1864 }, { 10, 1866 }, { 10, 1868 },
+ { 10, 1870 }, { 10, 1872 }, { 10, 1874 }, { 10, 1876 }, { 10, 1878 },
+ { 10, 1880 }, { 10, 1882 }, { 10, 1884 }, { 10, 1886 }, { 10, 1888 },
+ { 10, 1890 }, { 10, 1892 }, { 10, 1894 }, { 10, 1896 }, { 10, 1898 },
+ { 10, 1900 }, { 10, 1902 }, { 10, 1904 }, { 10, 1906 }, { 10, 1908 },
+ { 10, 1910 }, { 10, 1912 }, { 10, 1914 }, { 10, 1916 }, { 10, 1918 },
+ { 10, 1920 }, { 10, 1922 }, { 10, 1924 }, { 10, 1926 }, { 10, 1928 },
+ { 10, 1930 }, { 10, 1932 }, { 10, 1934 }, { 10, 1936 }, { 10, 1938 },
+ { 10, 1940 }, { 10, 1942 }, { 10, 1944 }, { 10, 1946 }, { 10, 1948 },
+ { 10, 1950 }, { 10, 1952 }, { 10, 1954 }, { 10, 1956 }, { 10, 1958 },
+ { 10, 1960 }, { 10, 1962 }, { 10, 1964 }, { 10, 1966 }, { 10, 1968 },
+ { 10, 1970 }, { 10, 1972 }, { 10, 1974 }, { 10, 1976 }, { 10, 1978 },
+ { 10, 1980 }, { 10, 1982 }, { 10, 1984 }, { 10, 1986 }, { 10, 1988 },
+ { 10, 1990 }, { 10, 1992 }, { 10, 1994 }, { 10, 1996 }, { 10, 1998 },
+ { 10, 2000 }, { 10, 2002 }, { 10, 2004 }, { 10, 2006 }, { 10, 2008 },
+ { 10, 2010 }, { 10, 2012 }, { 10, 2014 }, { 10, 2016 }, { 10, 2018 },
+ { 10, 2020 }, { 10, 2022 }, { 10, 2024 }, { 10, 2026 }, { 10, 2028 },
+ { 10, 2030 }, { 10, 2032 }, { 10, 2034 }, { 10, 2036 }, { 10, 2038 },
+ { 10, 2040 }, { 10, 2042 }, { 10, 2044 }, { 10, 2046 }, { 10, 2048 },
+ { 10, 2050 }, { 10, 2052 }, { 10, 2054 }, { 10, 2056 }, { 10, 2058 },
+ { 10, 2060 }, { 10, 2062 }, { 10, 2064 }, { 10, 2066 }, { 10, 2068 },
+ { 10, 2070 }, { 10, 2072 }, { 10, 2074 }, { 10, 2076 }, { 10, 2078 },
+ { 10, 2080 }, { 10, 2082 }, { 10, 2084 }, { 10, 2086 }, { 10, 2088 },
+ { 10, 2090 }, { 10, 2092 }, { 10, 2094 }, { 10, 2096 }, { 10, 2098 },
+ { 10, 2100 }, { 10, 2102 }, { 10, 2104 }, { 10, 2106 }, { 10, 2108 },
+ { 10, 2110 }, { 10, 2112 }, { 10, 2114 }, { 10, 2116 }, { 10, 2118 },
+ { 10, 2120 }, { 10, 2122 }, { 10, 2124 }, { 10, 2126 }, { 10, 2128 },
+ { 10, 2130 }, { 10, 2132 }, { 10, 2134 }, { 10, 2136 }, { 10, 2138 },
+ { 10, 2140 }, { 10, 2142 }, { 10, 2144 }, { 10, 2146 }, { 10, 2148 },
+ { 10, 2150 }, { 10, 2152 }, { 10, 2154 }, { 10, 2156 }, { 10, 2158 },
+ { 10, 2160 }, { 10, 2162 }, { 10, 2164 }, { 10, 2166 }, { 10, 2168 },
+ { 10, 2170 }, { 10, 2172 }, { 10, 2174 }, { 10, 2176 }, { 10, 2178 },
+ { 10, 2180 }, { 10, 2182 }, { 10, 2184 }, { 10, 2186 }, { 10, 2188 },
+ { 10, 2190 }, { 10, 2192 }, { 10, 2194 }, { 10, 2196 }, { 10, 2198 },
+ { 10, 2200 }, { 10, 2202 }, { 10, 2204 }, { 10, 2206 }, { 10, 2208 },
+ { 10, 2210 }, { 10, 2212 }, { 10, 2214 }, { 10, 2216 }, { 10, 2218 },
+ { 10, 2220 }, { 10, 2222 }, { 10, 2224 }, { 10, 2226 }, { 10, 2228 },
+ { 10, 2230 }, { 10, 2232 }, { 10, 2234 }, { 10, 2236 }, { 10, 2238 },
+ { 10, 2240 }, { 10, 2242 }, { 10, 2244 }, { 10, 2246 }, { 10, 2248 },
+ { 10, 2250 }, { 10, 2252 }, { 10, 2254 }, { 10, 2256 }, { 10, 2258 },
+ { 10, 2260 }, { 10, 2262 }, { 10, 2264 }, { 10, 2266 }, { 10, 2268 },
+ { 10, 2270 }, { 10, 2272 }, { 10, 2274 }, { 10, 2276 }, { 10, 2278 },
+ { 10, 2280 }, { 10, 2282 }, { 10, 2284 }, { 10, 2286 }, { 10, 2288 },
+ { 10, 2290 }, { 10, 2292 }, { 10, 2294 }, { 10, 2296 }, { 10, 2298 },
+ { 10, 2300 }, { 10, 2302 }, { 10, 2304 }, { 10, 2306 }, { 10, 2308 },
+ { 10, 2310 }, { 10, 2312 }, { 10, 2314 }, { 10, 2316 }, { 10, 2318 },
+ { 10, 2320 }, { 10, 2322 }, { 10, 2324 }, { 10, 2326 }, { 10, 2328 },
+ { 10, 2330 }, { 10, 2332 }, { 10, 2334 }, { 10, 2336 }, { 10, 2338 },
+ { 10, 2340 }, { 10, 2342 }, { 10, 2344 }, { 10, 2346 }, { 10, 2348 },
+ { 10, 2350 }, { 10, 2352 }, { 10, 2354 }, { 10, 2356 }, { 10, 2358 },
+ { 10, 2360 }, { 10, 2362 }, { 10, 2364 }, { 10, 2366 }, { 10, 2368 },
+ { 10, 2370 }, { 10, 2372 }, { 10, 2374 }, { 10, 2376 }, { 10, 2378 },
+ { 10, 2380 }, { 10, 2382 }, { 10, 2384 }, { 10, 2386 }, { 10, 2388 },
+ { 10, 2390 }, { 10, 2392 }, { 10, 2394 }, { 10, 2396 }, { 10, 2398 },
+ { 10, 2400 }, { 10, 2402 }, { 10, 2404 }, { 10, 2406 }, { 10, 2408 },
+ { 10, 2410 }, { 10, 2412 }, { 10, 2414 }, { 10, 2416 }, { 10, 2418 },
+ { 10, 2420 }, { 10, 2422 }, { 10, 2424 }, { 10, 2426 }, { 10, 2428 },
+ { 10, 2430 }, { 10, 2432 }, { 10, 2434 }, { 10, 2436 }, { 10, 2438 },
+ { 10, 2440 }, { 10, 2442 }, { 10, 2444 }, { 10, 2446 }, { 10, 2448 },
+ { 10, 2450 }, { 10, 2452 }, { 10, 2454 }, { 10, 2456 }, { 10, 2458 },
+ { 10, 2460 }, { 10, 2462 }, { 10, 2464 }, { 10, 2466 }, { 10, 2468 },
+ { 10, 2470 }, { 10, 2472 }, { 10, 2474 }, { 10, 2476 }, { 10, 2478 },
+ { 10, 2480 }, { 10, 2482 }, { 10, 2484 }, { 10, 2486 }, { 10, 2488 },
+ { 10, 2490 }, { 10, 2492 }, { 10, 2494 }, { 10, 2496 }, { 10, 2498 },
+ { 10, 2500 }, { 10, 2502 }, { 10, 2504 }, { 10, 2506 }, { 10, 2508 },
+ { 10, 2510 }, { 10, 2512 }, { 10, 2514 }, { 10, 2516 }, { 10, 2518 },
+ { 10, 2520 }, { 10, 2522 }, { 10, 2524 }, { 10, 2526 }, { 10, 2528 },
+ { 10, 2530 }, { 10, 2532 }, { 10, 2534 }, { 10, 2536 }, { 10, 2538 },
+ { 10, 2540 }, { 10, 2542 }, { 10, 2544 }, { 10, 2546 }, { 10, 2548 },
+ { 10, 2550 }, { 10, 2552 }, { 10, 2554 }, { 10, 2556 }, { 10, 2558 },
+ { 10, 2560 }, { 10, 2562 }, { 10, 2564 }, { 10, 2566 }, { 10, 2568 },
+ { 10, 2570 }, { 10, 2572 }, { 10, 2574 }, { 10, 2576 }, { 10, 2578 },
+ { 10, 2580 }, { 10, 2582 }, { 10, 2584 }, { 10, 2586 }, { 10, 2588 },
+ { 10, 2590 }, { 10, 2592 }, { 10, 2594 }, { 10, 2596 }, { 10, 2598 },
+ { 10, 2600 }, { 10, 2602 }, { 10, 2604 }, { 10, 2606 }, { 10, 2608 },
+ { 10, 2610 }, { 10, 2612 }, { 10, 2614 }, { 10, 2616 }, { 10, 2618 },
+ { 10, 2620 }, { 10, 2622 }, { 10, 2624 }, { 10, 2626 }, { 10, 2628 },
+ { 10, 2630 }, { 10, 2632 }, { 10, 2634 }, { 10, 2636 }, { 10, 2638 },
+ { 10, 2640 }, { 10, 2642 }, { 10, 2644 }, { 10, 2646 }, { 10, 2648 },
+ { 10, 2650 }, { 10, 2652 }, { 10, 2654 }, { 10, 2656 }, { 10, 2658 },
+ { 10, 2660 }, { 10, 2662 }, { 10, 2664 }, { 10, 2666 }, { 10, 2668 },
+ { 10, 2670 }, { 10, 2672 }, { 10, 2674 }, { 10, 2676 }, { 10, 2678 },
+ { 10, 2680 }, { 10, 2682 }, { 10, 2684 }, { 10, 2686 }, { 10, 2688 },
+ { 10, 2690 }, { 10, 2692 }, { 10, 2694 }, { 10, 2696 }, { 10, 2698 },
+ { 10, 2700 }, { 10, 2702 }, { 10, 2704 }, { 10, 2706 }, { 10, 2708 },
+ { 10, 2710 }, { 10, 2712 }, { 10, 2714 }, { 10, 2716 }, { 10, 2718 },
+ { 10, 2720 }, { 10, 2722 }, { 10, 2724 }, { 10, 2726 }, { 10, 2728 },
+ { 10, 2730 }, { 10, 2732 }, { 10, 2734 }, { 10, 2736 }, { 10, 2738 },
+ { 10, 2740 }, { 10, 2742 }, { 10, 2744 }, { 10, 2746 }, { 10, 2748 },
+ { 10, 2750 }, { 10, 2752 }, { 10, 2754 }, { 10, 2756 }, { 10, 2758 },
+ { 10, 2760 }, { 10, 2762 }, { 10, 2764 }, { 10, 2766 }, { 10, 2768 },
+ { 10, 2770 }, { 10, 2772 }, { 10, 2774 }, { 10, 2776 }, { 10, 2778 },
+ { 10, 2780 }, { 10, 2782 }, { 10, 2784 }, { 10, 2786 }, { 10, 2788 },
+ { 10, 2790 }, { 10, 2792 }, { 10, 2794 }, { 10, 2796 }, { 10, 2798 },
+ { 10, 2800 }, { 10, 2802 }, { 10, 2804 }, { 10, 2806 }, { 10, 2808 },
+ { 10, 2810 }, { 10, 2812 }, { 10, 2814 }, { 10, 2816 }, { 10, 2818 },
+ { 10, 2820 }, { 10, 2822 }, { 10, 2824 }, { 10, 2826 }, { 10, 2828 },
+ { 10, 2830 }, { 10, 2832 }, { 10, 2834 }, { 10, 2836 }, { 10, 2838 },
+ { 10, 2840 }, { 10, 2842 }, { 10, 2844 }, { 10, 2846 }, { 10, 2848 },
+ { 10, 2850 }, { 10, 2852 }, { 10, 2854 }, { 10, 2856 }, { 10, 2858 },
+ { 10, 2860 }, { 10, 2862 }, { 10, 2864 }, { 10, 2866 }, { 10, 2868 },
+ { 10, 2870 }, { 10, 2872 }, { 10, 2874 }, { 10, 2876 }, { 10, 2878 },
+ { 10, 2880 }, { 10, 2882 }, { 10, 2884 }, { 10, 2886 }, { 10, 2888 },
+ { 10, 2890 }, { 10, 2892 }, { 10, 2894 }, { 10, 2896 }, { 10, 2898 },
+ { 10, 2900 }, { 10, 2902 }, { 10, 2904 }, { 10, 2906 }, { 10, 2908 },
+ { 10, 2910 }, { 10, 2912 }, { 10, 2914 }, { 10, 2916 }, { 10, 2918 },
+ { 10, 2920 }, { 10, 2922 }, { 10, 2924 }, { 10, 2926 }, { 10, 2928 },
+ { 10, 2930 }, { 10, 2932 }, { 10, 2934 }, { 10, 2936 }, { 10, 2938 },
+ { 10, 2940 }, { 10, 2942 }, { 10, 2944 }, { 10, 2946 }, { 10, 2948 },
+ { 10, 2950 }, { 10, 2952 }, { 10, 2954 }, { 10, 2956 }, { 10, 2958 },
+ { 10, 2960 }, { 10, 2962 }, { 10, 2964 }, { 10, 2966 }, { 10, 2968 },
+ { 10, 2970 }, { 10, 2972 }, { 10, 2974 }, { 10, 2976 }, { 10, 2978 },
+ { 10, 2980 }, { 10, 2982 }, { 10, 2984 }, { 10, 2986 }, { 10, 2988 },
+ { 10, 2990 }, { 10, 2992 }, { 10, 2994 }, { 10, 2996 }, { 10, 2998 },
+ { 10, 3000 }, { 10, 3002 }, { 10, 3004 }, { 10, 3006 }, { 10, 3008 },
+ { 10, 3010 }, { 10, 3012 }, { 10, 3014 }, { 10, 3016 }, { 10, 3018 },
+ { 10, 3020 }, { 10, 3022 }, { 10, 3024 }, { 10, 3026 }, { 10, 3028 },
+ { 10, 3030 }, { 10, 3032 }, { 10, 3034 }, { 10, 3036 }, { 10, 3038 },
+ { 10, 3040 }, { 10, 3042 }, { 10, 3044 }, { 10, 3046 }, { 10, 3048 },
+ { 10, 3050 }, { 10, 3052 }, { 10, 3054 }, { 10, 3056 }, { 10, 3058 },
+ { 10, 3060 }, { 10, 3062 }, { 10, 3064 }, { 10, 3066 }, { 10, 3068 },
+ { 10, 3070 }, { 10, 3072 }, { 10, 3074 }, { 10, 3076 }, { 10, 3078 },
+ { 10, 3080 }, { 10, 3082 }, { 10, 3084 }, { 10, 3086 }, { 10, 3088 },
+ { 10, 3090 }, { 10, 3092 }, { 10, 3094 }, { 10, 3096 }, { 10, 3098 },
+ { 10, 3100 }, { 10, 3102 }, { 10, 3104 }, { 10, 3106 }, { 10, 3108 },
+ { 10, 3110 }, { 10, 3112 }, { 10, 3114 }, { 10, 3116 }, { 10, 3118 },
+ { 10, 3120 }, { 10, 3122 }, { 10, 3124 }, { 10, 3126 }, { 10, 3128 },
+ { 10, 3130 }, { 10, 3132 }, { 10, 3134 }, { 10, 3136 }, { 10, 3138 },
+ { 10, 3140 }, { 10, 3142 }, { 10, 3144 }, { 10, 3146 }, { 10, 3148 },
+ { 10, 3150 }, { 10, 3152 }, { 10, 3154 }, { 10, 3156 }, { 10, 3158 },
+ { 10, 3160 }, { 10, 3162 }, { 10, 3164 }, { 10, 3166 }, { 10, 3168 },
+ { 10, 3170 }, { 10, 3172 }, { 10, 3174 }, { 10, 3176 }, { 10, 3178 },
+ { 10, 3180 }, { 10, 3182 }, { 10, 3184 }, { 10, 3186 }, { 10, 3188 },
+ { 10, 3190 }, { 10, 3192 }, { 10, 3194 }, { 10, 3196 }, { 10, 3198 },
+ { 10, 3200 }, { 10, 3202 }, { 10, 3204 }, { 10, 3206 }, { 10, 3208 },
+ { 10, 3210 }, { 10, 3212 }, { 10, 3214 }, { 10, 3216 }, { 10, 3218 },
+ { 10, 3220 }, { 10, 3222 }, { 10, 3224 }, { 10, 3226 }, { 10, 3228 },
+ { 10, 3230 }, { 10, 3232 }, { 10, 3234 }, { 10, 3236 }, { 10, 3238 },
+ { 10, 3240 }, { 10, 3242 }, { 10, 3244 }, { 10, 3246 }, { 10, 3248 },
+ { 10, 3250 }, { 10, 3252 }, { 10, 3254 }, { 10, 3256 }, { 10, 3258 },
+ { 10, 3260 }, { 10, 3262 }, { 10, 3264 }, { 10, 3266 }, { 10, 3268 },
+ { 10, 3270 }, { 10, 3272 }, { 10, 3274 }, { 10, 3276 }, { 10, 3278 },
+ { 10, 3280 }, { 10, 3282 }, { 10, 3284 }, { 10, 3286 }, { 10, 3288 },
+ { 10, 3290 }, { 10, 3292 }, { 10, 3294 }, { 10, 3296 }, { 10, 3298 },
+ { 10, 3300 }, { 10, 3302 }, { 10, 3304 }, { 10, 3306 }, { 10, 3308 },
+ { 10, 3310 }, { 10, 3312 }, { 10, 3314 }, { 10, 3316 }, { 10, 3318 },
+ { 10, 3320 }, { 10, 3322 }, { 10, 3324 }, { 10, 3326 }, { 10, 3328 },
+ { 10, 3330 }, { 10, 3332 }, { 10, 3334 }, { 10, 3336 }, { 10, 3338 },
+ { 10, 3340 }, { 10, 3342 }, { 10, 3344 }, { 10, 3346 }, { 10, 3348 },
+ { 10, 3350 }, { 10, 3352 }, { 10, 3354 }, { 10, 3356 }, { 10, 3358 },
+ { 10, 3360 }, { 10, 3362 }, { 10, 3364 }, { 10, 3366 }, { 10, 3368 },
+ { 10, 3370 }, { 10, 3372 }, { 10, 3374 }, { 10, 3376 }, { 10, 3378 },
+ { 10, 3380 }, { 10, 3382 }, { 10, 3384 }, { 10, 3386 }, { 10, 3388 },
+ { 10, 3390 }, { 10, 3392 }, { 10, 3394 }, { 10, 3396 }, { 10, 3398 },
+ { 10, 3400 }, { 10, 3402 }, { 10, 3404 }, { 10, 3406 }, { 10, 3408 },
+ { 10, 3410 }, { 10, 3412 }, { 10, 3414 }, { 10, 3416 }, { 10, 3418 },
+ { 10, 3420 }, { 10, 3422 }, { 10, 3424 }, { 10, 3426 }, { 10, 3428 },
+ { 10, 3430 }, { 10, 3432 }, { 10, 3434 }, { 10, 3436 }, { 10, 3438 },
+ { 10, 3440 }, { 10, 3442 }, { 10, 3444 }, { 10, 3446 }, { 10, 3448 },
+ { 10, 3450 }, { 10, 3452 }, { 10, 3454 }, { 10, 3456 }, { 10, 3458 },
+ { 10, 3460 }, { 10, 3462 }, { 10, 3464 }, { 10, 3466 }, { 10, 3468 },
+ { 10, 3470 }, { 10, 3472 }, { 10, 3474 }, { 10, 3476 }, { 10, 3478 },
+ { 10, 3480 }, { 10, 3482 }, { 10, 3484 }, { 10, 3486 }, { 10, 3488 },
+ { 10, 3490 }, { 10, 3492 }, { 10, 3494 }, { 10, 3496 }, { 10, 3498 },
+ { 10, 3500 }, { 10, 3502 }, { 10, 3504 }, { 10, 3506 }, { 10, 3508 },
+ { 10, 3510 }, { 10, 3512 }, { 10, 3514 }, { 10, 3516 }, { 10, 3518 },
+ { 10, 3520 }, { 10, 3522 }, { 10, 3524 }, { 10, 3526 }, { 10, 3528 },
+ { 10, 3530 }, { 10, 3532 }, { 10, 3534 }, { 10, 3536 }, { 10, 3538 },
+ { 10, 3540 }, { 10, 3542 }, { 10, 3544 }, { 10, 3546 }, { 10, 3548 },
+ { 10, 3550 }, { 10, 3552 }, { 10, 3554 }, { 10, 3556 }, { 10, 3558 },
+ { 10, 3560 }, { 10, 3562 }, { 10, 3564 }, { 10, 3566 }, { 10, 3568 },
+ { 10, 3570 }, { 10, 3572 }, { 10, 3574 }, { 10, 3576 }, { 10, 3578 },
+ { 10, 3580 }, { 10, 3582 }, { 10, 3584 }, { 10, 3586 }, { 10, 3588 },
+ { 10, 3590 }, { 10, 3592 }, { 10, 3594 }, { 10, 3596 }, { 10, 3598 },
+ { 10, 3600 }, { 10, 3602 }, { 10, 3604 }, { 10, 3606 }, { 10, 3608 },
+ { 10, 3610 }, { 10, 3612 }, { 10, 3614 }, { 10, 3616 }, { 10, 3618 },
+ { 10, 3620 }, { 10, 3622 }, { 10, 3624 }, { 10, 3626 }, { 10, 3628 },
+ { 10, 3630 }, { 10, 3632 }, { 10, 3634 }, { 10, 3636 }, { 10, 3638 },
+ { 10, 3640 }, { 10, 3642 }, { 10, 3644 }, { 10, 3646 }, { 10, 3648 },
+ { 10, 3650 }, { 10, 3652 }, { 10, 3654 }, { 10, 3656 }, { 10, 3658 },
+ { 10, 3660 }, { 10, 3662 }, { 10, 3664 }, { 10, 3666 }, { 10, 3668 },
+ { 10, 3670 }, { 10, 3672 }, { 10, 3674 }, { 10, 3676 }, { 10, 3678 },
+ { 10, 3680 }, { 10, 3682 }, { 10, 3684 }, { 10, 3686 }, { 10, 3688 },
+ { 10, 3690 }, { 10, 3692 }, { 10, 3694 }, { 10, 3696 }, { 10, 3698 },
+ { 10, 3700 }, { 10, 3702 }, { 10, 3704 }, { 10, 3706 }, { 10, 3708 },
+ { 10, 3710 }, { 10, 3712 }, { 10, 3714 }, { 10, 3716 }, { 10, 3718 },
+ { 10, 3720 }, { 10, 3722 }, { 10, 3724 }, { 10, 3726 }, { 10, 3728 },
+ { 10, 3730 }, { 10, 3732 }, { 10, 3734 }, { 10, 3736 }, { 10, 3738 },
+ { 10, 3740 }, { 10, 3742 }, { 10, 3744 }, { 10, 3746 }, { 10, 3748 },
+ { 10, 3750 }, { 10, 3752 }, { 10, 3754 }, { 10, 3756 }, { 10, 3758 },
+ { 10, 3760 }, { 10, 3762 }, { 10, 3764 }, { 10, 3766 }, { 10, 3768 },
+ { 10, 3770 }, { 10, 3772 }, { 10, 3774 }, { 10, 3776 }, { 10, 3778 },
+ { 10, 3780 }, { 10, 3782 }, { 10, 3784 }, { 10, 3786 }, { 10, 3788 },
+ { 10, 3790 }, { 10, 3792 }, { 10, 3794 }, { 10, 3796 }, { 10, 3798 },
+ { 10, 3800 }, { 10, 3802 }, { 10, 3804 }, { 10, 3806 }, { 10, 3808 },
+ { 10, 3810 }, { 10, 3812 }, { 10, 3814 }, { 10, 3816 }, { 10, 3818 },
+ { 10, 3820 }, { 10, 3822 }, { 10, 3824 }, { 10, 3826 }, { 10, 3828 },
+ { 10, 3830 }, { 10, 3832 }, { 10, 3834 }, { 10, 3836 }, { 10, 3838 },
+ { 10, 3840 }, { 10, 3842 }, { 10, 3844 }, { 10, 3846 }, { 10, 3848 },
+ { 10, 3850 }, { 10, 3852 }, { 10, 3854 }, { 10, 3856 }, { 10, 3858 },
+ { 10, 3860 }, { 10, 3862 }, { 10, 3864 }, { 10, 3866 }, { 10, 3868 },
+ { 10, 3870 }, { 10, 3872 }, { 10, 3874 }, { 10, 3876 }, { 10, 3878 },
+ { 10, 3880 }, { 10, 3882 }, { 10, 3884 }, { 10, 3886 }, { 10, 3888 },
+ { 10, 3890 }, { 10, 3892 }, { 10, 3894 }, { 10, 3896 }, { 10, 3898 },
+ { 10, 3900 }, { 10, 3902 }, { 10, 3904 }, { 10, 3906 }, { 10, 3908 },
+ { 10, 3910 }, { 10, 3912 }, { 10, 3914 }, { 10, 3916 }, { 10, 3918 },
+ { 10, 3920 }, { 10, 3922 }, { 10, 3924 }, { 10, 3926 }, { 10, 3928 },
+ { 10, 3930 }, { 10, 3932 }, { 10, 3934 }, { 10, 3936 }, { 10, 3938 },
+ { 10, 3940 }, { 10, 3942 }, { 10, 3944 }, { 10, 3946 }, { 10, 3948 },
+ { 10, 3950 }, { 10, 3952 }, { 10, 3954 }, { 10, 3956 }, { 10, 3958 },
+ { 10, 3960 }
+};
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_DCT_VALUE_TOKENS_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/defaultcoefcounts.h b/media/libvpx/libvpx/vp8/encoder/defaultcoefcounts.h
new file mode 100644
index 0000000000..a3ab34c8a0
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/defaultcoefcounts.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_DEFAULTCOEFCOUNTS_H_
+#define VPX_VP8_ENCODER_DEFAULTCOEFCOUNTS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Generated file, included by entropy.c */
+
+static const unsigned int default_coef_counts
+ [BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS] = {
+
+ {
+ /* Block Type ( 0 ) */
+ {
+ /* Coeff Band ( 0 ) */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ },
+ {
+ /* Coeff Band ( 1 ) */
+ { 30190, 26544, 225, 24, 4, 0, 0, 0, 0, 0, 0, 4171593 },
+ { 26846, 25157, 1241, 130, 26, 6, 1, 0, 0, 0, 0, 149987 },
+ { 10484, 9538, 1006, 160, 36, 18, 0, 0, 0, 0, 0, 15104 },
+ },
+ {
+ /* Coeff Band ( 2 ) */
+ { 25842, 40456, 1126, 83, 11, 2, 0, 0, 0, 0, 0, 0 },
+ { 9338, 8010, 512, 73, 7, 3, 2, 0, 0, 0, 0, 43294 },
+ { 1047, 751, 149, 31, 13, 6, 1, 0, 0, 0, 0, 879 },
+ },
+ {
+ /* Coeff Band ( 3 ) */
+ { 26136, 9826, 252, 13, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 8134, 5574, 191, 14, 2, 0, 0, 0, 0, 0, 0, 35302 },
+ { 605, 677, 116, 9, 1, 0, 0, 0, 0, 0, 0, 611 },
+ },
+ {
+ /* Coeff Band ( 4 ) */
+ { 10263, 15463, 283, 17, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 2773, 2191, 128, 9, 2, 2, 0, 0, 0, 0, 0, 10073 },
+ { 134, 125, 32, 4, 0, 2, 0, 0, 0, 0, 0, 50 },
+ },
+ {
+ /* Coeff Band ( 5 ) */
+ { 10483, 2663, 23, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 2137, 1251, 27, 1, 1, 0, 0, 0, 0, 0, 0, 14362 },
+ { 116, 156, 14, 2, 1, 0, 0, 0, 0, 0, 0, 190 },
+ },
+ {
+ /* Coeff Band ( 6 ) */
+ { 40977, 27614, 412, 28, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 6113, 5213, 261, 22, 3, 0, 0, 0, 0, 0, 0, 26164 },
+ { 382, 312, 50, 14, 2, 0, 0, 0, 0, 0, 0, 345 },
+ },
+ {
+ /* Coeff Band ( 7 ) */
+ { 0, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 319 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 },
+ },
+ },
+ {
+ /* Block Type ( 1 ) */
+ {
+ /* Coeff Band ( 0 ) */
+ { 3268, 19382, 1043, 250, 93, 82, 49, 26, 17, 8, 25, 82289 },
+ { 8758, 32110, 5436, 1832, 827, 668, 420, 153, 24, 0, 3, 52914 },
+ { 9337, 23725, 8487, 3954, 2107, 1836, 1069, 399, 59, 0, 0,
+ 18620 },
+ },
+ {
+ /* Coeff Band ( 1 ) */
+ { 12419, 8420, 452, 62, 9, 1, 0, 0, 0, 0, 0, 0 },
+ { 11715, 8705, 693, 92, 15, 7, 2, 0, 0, 0, 0, 53988 },
+ { 7603, 8585, 2306, 778, 270, 145, 39, 5, 0, 0, 0, 9136 },
+ },
+ {
+ /* Coeff Band ( 2 ) */
+ { 15938, 14335, 1207, 184, 55, 13, 4, 1, 0, 0, 0, 0 },
+ { 7415, 6829, 1138, 244, 71, 26, 7, 0, 0, 0, 0, 9980 },
+ { 1580, 1824, 655, 241, 89, 46, 10, 2, 0, 0, 0, 429 },
+ },
+ {
+ /* Coeff Band ( 3 ) */
+ { 19453, 5260, 201, 19, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 9173, 3758, 213, 22, 1, 1, 0, 0, 0, 0, 0, 9820 },
+ { 1689, 1277, 276, 51, 17, 4, 0, 0, 0, 0, 0, 679 },
+ },
+ {
+ /* Coeff Band ( 4 ) */
+ { 12076, 10667, 620, 85, 19, 9, 5, 0, 0, 0, 0, 0 },
+ { 4665, 3625, 423, 55, 19, 9, 0, 0, 0, 0, 0, 5127 },
+ { 415, 440, 143, 34, 20, 7, 2, 0, 0, 0, 0, 101 },
+ },
+ {
+ /* Coeff Band ( 5 ) */
+ { 12183, 4846, 115, 11, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 4226, 3149, 177, 21, 2, 0, 0, 0, 0, 0, 0, 7157 },
+ { 375, 621, 189, 51, 11, 4, 1, 0, 0, 0, 0, 198 },
+ },
+ {
+ /* Coeff Band ( 6 ) */
+ { 61658, 37743, 1203, 94, 10, 3, 0, 0, 0, 0, 0, 0 },
+ { 15514, 11563, 903, 111, 14, 5, 0, 0, 0, 0, 0, 25195 },
+ { 929, 1077, 291, 78, 14, 7, 1, 0, 0, 0, 0, 507 },
+ },
+ {
+ /* Coeff Band ( 7 ) */
+ { 0, 990, 15, 3, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 412, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1641 },
+ { 0, 18, 7, 1, 0, 0, 0, 0, 0, 0, 0, 30 },
+ },
+ },
+ {
+ /* Block Type ( 2 ) */
+ {
+ /* Coeff Band ( 0 ) */
+ { 953, 24519, 628, 120, 28, 12, 4, 0, 0, 0, 0, 2248798 },
+ { 1525, 25654, 2647, 617, 239, 143, 42, 5, 0, 0, 0, 66837 },
+ { 1180, 11011, 3001, 1237, 532, 448, 239, 54, 5, 0, 0, 7122 },
+ },
+ {
+ /* Coeff Band ( 1 ) */
+ { 1356, 2220, 67, 10, 4, 1, 0, 0, 0, 0, 0, 0 },
+ { 1450, 2544, 102, 18, 4, 3, 0, 0, 0, 0, 0, 57063 },
+ { 1182, 2110, 470, 130, 41, 21, 0, 0, 0, 0, 0, 6047 },
+ },
+ {
+ /* Coeff Band ( 2 ) */
+ { 370, 3378, 200, 30, 5, 4, 1, 0, 0, 0, 0, 0 },
+ { 293, 1006, 131, 29, 11, 0, 0, 0, 0, 0, 0, 5404 },
+ { 114, 387, 98, 23, 4, 8, 1, 0, 0, 0, 0, 236 },
+ },
+ {
+ /* Coeff Band ( 3 ) */
+ { 579, 194, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 395, 213, 5, 1, 0, 0, 0, 0, 0, 0, 0, 4157 },
+ { 119, 122, 4, 0, 0, 0, 0, 0, 0, 0, 0, 300 },
+ },
+ {
+ /* Coeff Band ( 4 ) */
+ { 38, 557, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 21, 114, 12, 1, 0, 0, 0, 0, 0, 0, 0, 427 },
+ { 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 },
+ },
+ {
+ /* Coeff Band ( 5 ) */
+ { 52, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 18, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 652 },
+ { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
+ },
+ {
+ /* Coeff Band ( 6 ) */
+ { 640, 569, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 25, 77, 2, 0, 0, 0, 0, 0, 0, 0, 0, 517 },
+ { 4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 },
+ },
+ {
+ /* Coeff Band ( 7 ) */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ },
+ },
+ {
+ /* Block Type ( 3 ) */
+ {
+ /* Coeff Band ( 0 ) */
+ { 2506, 20161, 2707, 767, 261, 178, 107, 30, 14, 3, 0, 100694 },
+ { 8806, 36478, 8817, 3268, 1280, 850, 401, 114, 42, 0, 0, 58572 },
+ { 11003, 27214, 11798, 5716, 2482, 2072, 1048, 175, 32, 0, 0,
+ 19284 },
+ },
+ {
+ /* Coeff Band ( 1 ) */
+ { 9738, 11313, 959, 205, 70, 18, 11, 1, 0, 0, 0, 0 },
+ { 12628, 15085, 1507, 273, 52, 19, 9, 0, 0, 0, 0, 54280 },
+ { 10701, 15846, 5561, 1926, 813, 570, 249, 36, 0, 0, 0, 6460 },
+ },
+ {
+ /* Coeff Band ( 2 ) */
+ { 6781, 22539, 2784, 634, 182, 123, 20, 4, 0, 0, 0, 0 },
+ { 6263, 11544, 2649, 790, 259, 168, 27, 5, 0, 0, 0, 20539 },
+ { 3109, 4075, 2031, 896, 457, 386, 158, 29, 0, 0, 0, 1138 },
+ },
+ {
+ /* Coeff Band ( 3 ) */
+ { 11515, 4079, 465, 73, 5, 14, 2, 0, 0, 0, 0, 0 },
+ { 9361, 5834, 650, 96, 24, 8, 4, 0, 0, 0, 0, 22181 },
+ { 4343, 3974, 1360, 415, 132, 96, 14, 1, 0, 0, 0, 1267 },
+ },
+ {
+ /* Coeff Band ( 4 ) */
+ { 4787, 9297, 823, 168, 44, 12, 4, 0, 0, 0, 0, 0 },
+ { 3619, 4472, 719, 198, 60, 31, 3, 0, 0, 0, 0, 8401 },
+ { 1157, 1175, 483, 182, 88, 31, 8, 0, 0, 0, 0, 268 },
+ },
+ {
+ /* Coeff Band ( 5 ) */
+ { 8299, 1226, 32, 5, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 3502, 1568, 57, 4, 1, 1, 0, 0, 0, 0, 0, 9811 },
+ { 1055, 1070, 166, 29, 6, 1, 0, 0, 0, 0, 0, 527 },
+ },
+ {
+ /* Coeff Band ( 6 ) */
+ { 27414, 27927, 1989, 347, 69, 26, 0, 0, 0, 0, 0, 0 },
+ { 5876, 10074, 1574, 341, 91, 24, 4, 0, 0, 0, 0, 21954 },
+ { 1571, 2171, 778, 324, 124, 65, 16, 0, 0, 0, 0, 979 },
+ },
+ {
+ /* Coeff Band ( 7 ) */
+ { 0, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 459 },
+ { 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 },
+ },
+ },
+ };
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_DEFAULTCOEFCOUNTS_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/denoising.c b/media/libvpx/libvpx/vp8/encoder/denoising.c
new file mode 100644
index 0000000000..e54d1e9f4b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/denoising.c
@@ -0,0 +1,725 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include "denoising.h"
+
+#include "vp8/common/reconinter.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8_rtcd.h"
+
+static const unsigned int NOISE_MOTION_THRESHOLD = 25 * 25;
+/* SSE_DIFF_THRESHOLD is selected as ~95% confidence assuming
+ * var(noise) ~= 100.
+ */
+static const unsigned int SSE_DIFF_THRESHOLD = 16 * 16 * 20;
+static const unsigned int SSE_THRESHOLD = 16 * 16 * 40;
+static const unsigned int SSE_THRESHOLD_HIGH = 16 * 16 * 80;
+
+/*
+ * The filter function was modified to reduce the computational complexity.
+ * Step 1:
+ * Instead of applying tap coefficients for each pixel, we calculated the
+ * pixel adjustments vs. pixel diff value ahead of time.
+ * adjustment = filtered_value - current_raw
+ * = (filter_coefficient * diff + 128) >> 8
+ * where
+ * filter_coefficient = (255 << 8) / (256 + ((absdiff * 330) >> 3));
+ * filter_coefficient += filter_coefficient /
+ * (3 + motion_magnitude_adjustment);
+ * filter_coefficient is clamped to 0 ~ 255.
+ *
+ * Step 2:
+ * The adjustment vs. diff curve becomes flat very quick when diff increases.
+ * This allowed us to use only several levels to approximate the curve without
+ * changing the filtering algorithm too much.
+ * The adjustments were further corrected by checking the motion magnitude.
+ * The levels used are:
+ * diff adjustment w/o motion correction adjustment w/ motion correction
+ * [-255, -16] -6 -7
+ * [-15, -8] -4 -5
+ * [-7, -4] -3 -4
+ * [-3, 3] diff diff
+ * [4, 7] 3 4
+ * [8, 15] 4 5
+ * [16, 255] 6 7
+ */
+
+int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride,
+ unsigned char *running_avg_y, int avg_y_stride,
+ unsigned char *sig, int sig_stride,
+ unsigned int motion_magnitude,
+ int increase_denoising) {
+ unsigned char *running_avg_y_start = running_avg_y;
+ unsigned char *sig_start = sig;
+ int sum_diff_thresh;
+ int r, c;
+ int sum_diff = 0;
+ int adj_val[3] = { 3, 4, 6 };
+ int shift_inc1 = 0;
+ int shift_inc2 = 1;
+ int col_sum[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ /* If motion_magnitude is small, making the denoiser more aggressive by
+ * increasing the adjustment for each level. Add another increment for
+ * blocks that are labeled for increase denoising. */
+ if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) {
+ if (increase_denoising) {
+ shift_inc1 = 1;
+ shift_inc2 = 2;
+ }
+ adj_val[0] += shift_inc2;
+ adj_val[1] += shift_inc2;
+ adj_val[2] += shift_inc2;
+ }
+
+ for (r = 0; r < 16; ++r) {
+ for (c = 0; c < 16; ++c) {
+ int diff = 0;
+ int adjustment = 0;
+ int absdiff = 0;
+
+ diff = mc_running_avg_y[c] - sig[c];
+ absdiff = abs(diff);
+
+ // When |diff| <= |3 + shift_inc1|, use pixel value from
+ // last denoised raw.
+ if (absdiff <= 3 + shift_inc1) {
+ running_avg_y[c] = mc_running_avg_y[c];
+ col_sum[c] += diff;
+ } else {
+ if (absdiff >= 4 + shift_inc1 && absdiff <= 7) {
+ adjustment = adj_val[0];
+ } else if (absdiff >= 8 && absdiff <= 15) {
+ adjustment = adj_val[1];
+ } else {
+ adjustment = adj_val[2];
+ }
+
+ if (diff > 0) {
+ if ((sig[c] + adjustment) > 255) {
+ running_avg_y[c] = 255;
+ } else {
+ running_avg_y[c] = sig[c] + adjustment;
+ }
+
+ col_sum[c] += adjustment;
+ } else {
+ if ((sig[c] - adjustment) < 0) {
+ running_avg_y[c] = 0;
+ } else {
+ running_avg_y[c] = sig[c] - adjustment;
+ }
+
+ col_sum[c] -= adjustment;
+ }
+ }
+ }
+
+ /* Update pointers for next iteration. */
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ for (c = 0; c < 16; ++c) {
+ // Below we clip the value in the same way which SSE code use.
+ // When adopting aggressive denoiser, the adj_val for each pixel
+ // could be at most 8 (this is current max adjustment of the map).
+ // In SSE code, we calculate the sum of adj_val for
+ // the columns, so the sum could be upto 128(16 rows). However,
+ // the range of the value is -128 ~ 127 in SSE code, that's why
+ // we do this change in C code.
+ // We don't do this for UV denoiser, since there are only 8 rows,
+ // and max adjustments <= 8, so the sum of the columns will not
+ // exceed 64.
+ if (col_sum[c] >= 128) {
+ col_sum[c] = 127;
+ }
+ sum_diff += col_sum[c];
+ }
+
+ sum_diff_thresh = SUM_DIFF_THRESHOLD;
+ if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
+ if (abs(sum_diff) > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising), check
+ // if we can still apply some (weaker) temporal filtering to this block,
+ // that would otherwise not be denoised at all. Simplest is to apply
+ // an additional adjustment to running_avg_y to bring it closer to sig.
+ // The adjustment is capped by a maximum delta, and chosen such that
+ // in most cases the resulting sum_diff will be within the
+ // accceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over threshold.
+ int delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_avg_y_stride * 16;
+ running_avg_y -= avg_y_stride * 16;
+ for (r = 0; r < 16; ++r) {
+ for (c = 0; c < 16; ++c) {
+ int diff = mc_running_avg_y[c] - sig[c];
+ int adjustment = abs(diff);
+ if (adjustment > delta) adjustment = delta;
+ if (diff > 0) {
+ // Bring denoised signal down.
+ if (running_avg_y[c] - adjustment < 0) {
+ running_avg_y[c] = 0;
+ } else {
+ running_avg_y[c] = running_avg_y[c] - adjustment;
+ }
+ col_sum[c] -= adjustment;
+ } else if (diff < 0) {
+ // Bring denoised signal up.
+ if (running_avg_y[c] + adjustment > 255) {
+ running_avg_y[c] = 255;
+ } else {
+ running_avg_y[c] = running_avg_y[c] + adjustment;
+ }
+ col_sum[c] += adjustment;
+ }
+ }
+ // TODO(marpan): Check here if abs(sum_diff) has gone below the
+ // threshold sum_diff_thresh, and if so, we can exit the row loop.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ sum_diff = 0;
+ for (c = 0; c < 16; ++c) {
+ if (col_sum[c] >= 128) {
+ col_sum[c] = 127;
+ }
+ sum_diff += col_sum[c];
+ }
+
+ if (abs(sum_diff) > sum_diff_thresh) return COPY_BLOCK;
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+
+ vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride);
+ return FILTER_BLOCK;
+}
+
+int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg, int mc_avg_stride,
+ unsigned char *running_avg, int avg_stride,
+ unsigned char *sig, int sig_stride,
+ unsigned int motion_magnitude,
+ int increase_denoising) {
+ unsigned char *running_avg_start = running_avg;
+ unsigned char *sig_start = sig;
+ int sum_diff_thresh;
+ int r, c;
+ int sum_diff = 0;
+ int sum_block = 0;
+ int adj_val[3] = { 3, 4, 6 };
+ int shift_inc1 = 0;
+ int shift_inc2 = 1;
+ /* If motion_magnitude is small, making the denoiser more aggressive by
+ * increasing the adjustment for each level. Add another increment for
+ * blocks that are labeled for increase denoising. */
+ if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) {
+ if (increase_denoising) {
+ shift_inc1 = 1;
+ shift_inc2 = 2;
+ }
+ adj_val[0] += shift_inc2;
+ adj_val[1] += shift_inc2;
+ adj_val[2] += shift_inc2;
+ }
+
+ // Avoid denoising color signal if its close to average level.
+ for (r = 0; r < 8; ++r) {
+ for (c = 0; c < 8; ++c) {
+ sum_block += sig[c];
+ }
+ sig += sig_stride;
+ }
+ if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) {
+ return COPY_BLOCK;
+ }
+
+ sig -= sig_stride * 8;
+ for (r = 0; r < 8; ++r) {
+ for (c = 0; c < 8; ++c) {
+ int diff = 0;
+ int adjustment = 0;
+ int absdiff = 0;
+
+ diff = mc_running_avg[c] - sig[c];
+ absdiff = abs(diff);
+
+ // When |diff| <= |3 + shift_inc1|, use pixel value from
+ // last denoised raw.
+ if (absdiff <= 3 + shift_inc1) {
+ running_avg[c] = mc_running_avg[c];
+ sum_diff += diff;
+ } else {
+ if (absdiff >= 4 && absdiff <= 7) {
+ adjustment = adj_val[0];
+ } else if (absdiff >= 8 && absdiff <= 15) {
+ adjustment = adj_val[1];
+ } else {
+ adjustment = adj_val[2];
+ }
+ if (diff > 0) {
+ if ((sig[c] + adjustment) > 255) {
+ running_avg[c] = 255;
+ } else {
+ running_avg[c] = sig[c] + adjustment;
+ }
+ sum_diff += adjustment;
+ } else {
+ if ((sig[c] - adjustment) < 0) {
+ running_avg[c] = 0;
+ } else {
+ running_avg[c] = sig[c] - adjustment;
+ }
+ sum_diff -= adjustment;
+ }
+ }
+ }
+ /* Update pointers for next iteration. */
+ sig += sig_stride;
+ mc_running_avg += mc_avg_stride;
+ running_avg += avg_stride;
+ }
+
+ sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
+ if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV;
+ if (abs(sum_diff) > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising), check
+ // if we can still apply some (weaker) temporal filtering to this block,
+ // that would otherwise not be denoised at all. Simplest is to apply
+ // an additional adjustment to running_avg_y to bring it closer to sig.
+ // The adjustment is capped by a maximum delta, and chosen such that
+ // in most cases the resulting sum_diff will be within the
+ // accceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over threshold.
+ int delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ sig -= sig_stride * 8;
+ mc_running_avg -= mc_avg_stride * 8;
+ running_avg -= avg_stride * 8;
+ for (r = 0; r < 8; ++r) {
+ for (c = 0; c < 8; ++c) {
+ int diff = mc_running_avg[c] - sig[c];
+ int adjustment = abs(diff);
+ if (adjustment > delta) adjustment = delta;
+ if (diff > 0) {
+ // Bring denoised signal down.
+ if (running_avg[c] - adjustment < 0) {
+ running_avg[c] = 0;
+ } else {
+ running_avg[c] = running_avg[c] - adjustment;
+ }
+ sum_diff -= adjustment;
+ } else if (diff < 0) {
+ // Bring denoised signal up.
+ if (running_avg[c] + adjustment > 255) {
+ running_avg[c] = 255;
+ } else {
+ running_avg[c] = running_avg[c] + adjustment;
+ }
+ sum_diff += adjustment;
+ }
+ }
+ // TODO(marpan): Check here if abs(sum_diff) has gone below the
+ // threshold sum_diff_thresh, and if so, we can exit the row loop.
+ sig += sig_stride;
+ mc_running_avg += mc_avg_stride;
+ running_avg += avg_stride;
+ }
+ if (abs(sum_diff) > sum_diff_thresh) return COPY_BLOCK;
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+
+ vp8_copy_mem8x8(running_avg_start, avg_stride, sig_start, sig_stride);
+ return FILTER_BLOCK;
+}
+
+void vp8_denoiser_set_parameters(VP8_DENOISER *denoiser, int mode) {
+ assert(mode > 0); // Denoiser is allocated only if mode > 0.
+ if (mode == 1) {
+ denoiser->denoiser_mode = kDenoiserOnYOnly;
+ } else if (mode == 2) {
+ denoiser->denoiser_mode = kDenoiserOnYUV;
+ } else if (mode == 3) {
+ denoiser->denoiser_mode = kDenoiserOnYUVAggressive;
+ } else {
+ denoiser->denoiser_mode = kDenoiserOnYUV;
+ }
+ if (denoiser->denoiser_mode != kDenoiserOnYUVAggressive) {
+ denoiser->denoise_pars.scale_sse_thresh = 1;
+ denoiser->denoise_pars.scale_motion_thresh = 8;
+ denoiser->denoise_pars.scale_increase_filter = 0;
+ denoiser->denoise_pars.denoise_mv_bias = 95;
+ denoiser->denoise_pars.pickmode_mv_bias = 100;
+ denoiser->denoise_pars.qp_thresh = 0;
+ denoiser->denoise_pars.consec_zerolast = UINT_MAX;
+ denoiser->denoise_pars.spatial_blur = 0;
+ } else {
+ denoiser->denoise_pars.scale_sse_thresh = 2;
+ denoiser->denoise_pars.scale_motion_thresh = 16;
+ denoiser->denoise_pars.scale_increase_filter = 1;
+ denoiser->denoise_pars.denoise_mv_bias = 60;
+ denoiser->denoise_pars.pickmode_mv_bias = 75;
+ denoiser->denoise_pars.qp_thresh = 80;
+ denoiser->denoise_pars.consec_zerolast = 15;
+ denoiser->denoise_pars.spatial_blur = 0;
+ }
+}
+
+int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height,
+ int num_mb_rows, int num_mb_cols, int mode) {
+ int i;
+ assert(denoiser);
+ denoiser->num_mb_cols = num_mb_cols;
+
+ for (i = 0; i < MAX_REF_FRAMES; ++i) {
+ denoiser->yv12_running_avg[i].flags = 0;
+
+ if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_running_avg[i]), width,
+ height, VP8BORDERINPIXELS) < 0) {
+ vp8_denoiser_free(denoiser);
+ return 1;
+ }
+ memset(denoiser->yv12_running_avg[i].buffer_alloc, 0,
+ denoiser->yv12_running_avg[i].frame_size);
+ }
+ denoiser->yv12_mc_running_avg.flags = 0;
+
+ if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_mc_running_avg), width,
+ height, VP8BORDERINPIXELS) < 0) {
+ vp8_denoiser_free(denoiser);
+ return 1;
+ }
+
+ memset(denoiser->yv12_mc_running_avg.buffer_alloc, 0,
+ denoiser->yv12_mc_running_avg.frame_size);
+
+ if (vp8_yv12_alloc_frame_buffer(&denoiser->yv12_last_source, width, height,
+ VP8BORDERINPIXELS) < 0) {
+ vp8_denoiser_free(denoiser);
+ return 1;
+ }
+ memset(denoiser->yv12_last_source.buffer_alloc, 0,
+ denoiser->yv12_last_source.frame_size);
+
+ denoiser->denoise_state = vpx_calloc((num_mb_rows * num_mb_cols), 1);
+ if (!denoiser->denoise_state) {
+ vp8_denoiser_free(denoiser);
+ return 1;
+ }
+ memset(denoiser->denoise_state, 0, (num_mb_rows * num_mb_cols));
+ vp8_denoiser_set_parameters(denoiser, mode);
+ denoiser->nmse_source_diff = 0;
+ denoiser->nmse_source_diff_count = 0;
+ denoiser->qp_avg = 0;
+ // QP threshold below which we can go up to aggressive mode.
+ denoiser->qp_threshold_up = 80;
+ // QP threshold above which we can go back down to normal mode.
+ // For now keep this second threshold high, so not used currently.
+ denoiser->qp_threshold_down = 128;
+ // Bitrate thresholds and noise metric (nmse) thresholds for switching to
+ // aggressive mode.
+ // TODO(marpan): Adjust thresholds, including effect on resolution.
+ denoiser->bitrate_threshold = 400000; // (bits/sec).
+ denoiser->threshold_aggressive_mode = 80;
+ if (width * height > 1280 * 720) {
+ denoiser->bitrate_threshold = 3000000;
+ denoiser->threshold_aggressive_mode = 200;
+ } else if (width * height > 960 * 540) {
+ denoiser->bitrate_threshold = 1200000;
+ denoiser->threshold_aggressive_mode = 120;
+ } else if (width * height > 640 * 480) {
+ denoiser->bitrate_threshold = 600000;
+ denoiser->threshold_aggressive_mode = 100;
+ }
+ return 0;
+}
+
+void vp8_denoiser_free(VP8_DENOISER *denoiser) {
+ int i;
+ assert(denoiser);
+
+ for (i = 0; i < MAX_REF_FRAMES; ++i) {
+ vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_running_avg[i]);
+ }
+ vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_mc_running_avg);
+ vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_last_source);
+ vpx_free(denoiser->denoise_state);
+}
+
+void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x,
+ unsigned int best_sse, unsigned int zero_mv_sse,
+ int recon_yoffset, int recon_uvoffset,
+ loop_filter_info_n *lfi_n, int mb_row, int mb_col,
+ int block_index, int consec_zero_last)
+
+{
+ int mv_row;
+ int mv_col;
+ unsigned int motion_threshold;
+ unsigned int motion_magnitude2;
+ unsigned int sse_thresh;
+ int sse_diff_thresh = 0;
+ // Spatial loop filter: only applied selectively based on
+ // temporal filter state of block relative to top/left neighbors.
+ int apply_spatial_loop_filter = 1;
+ MV_REFERENCE_FRAME frame = x->best_reference_frame;
+ MV_REFERENCE_FRAME zero_frame = x->best_zeromv_reference_frame;
+
+ enum vp8_denoiser_decision decision = FILTER_BLOCK;
+ enum vp8_denoiser_decision decision_u = COPY_BLOCK;
+ enum vp8_denoiser_decision decision_v = COPY_BLOCK;
+
+ if (zero_frame) {
+ YV12_BUFFER_CONFIG *src = &denoiser->yv12_running_avg[frame];
+ YV12_BUFFER_CONFIG *dst = &denoiser->yv12_mc_running_avg;
+ YV12_BUFFER_CONFIG saved_pre, saved_dst;
+ MB_MODE_INFO saved_mbmi;
+ MACROBLOCKD *filter_xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &filter_xd->mode_info_context->mbmi;
+ int sse_diff = 0;
+ // Bias on zero motion vector sse.
+ const int zero_bias = denoiser->denoise_pars.denoise_mv_bias;
+ zero_mv_sse = (unsigned int)((int64_t)zero_mv_sse * zero_bias / 100);
+ sse_diff = (int)zero_mv_sse - (int)best_sse;
+
+ saved_mbmi = *mbmi;
+
+ /* Use the best MV for the compensation. */
+ mbmi->ref_frame = x->best_reference_frame;
+ mbmi->mode = x->best_sse_inter_mode;
+ mbmi->mv = x->best_sse_mv;
+ mbmi->need_to_clamp_mvs = x->need_to_clamp_best_mvs;
+ mv_col = x->best_sse_mv.as_mv.col;
+ mv_row = x->best_sse_mv.as_mv.row;
+ // Bias to zero_mv if small amount of motion.
+ // Note sse_diff_thresh is intialized to zero, so this ensures
+ // we will always choose zero_mv for denoising if
+ // zero_mv_see <= best_sse (i.e., sse_diff <= 0).
+ if ((unsigned int)(mv_row * mv_row + mv_col * mv_col) <=
+ NOISE_MOTION_THRESHOLD) {
+ sse_diff_thresh = (int)SSE_DIFF_THRESHOLD;
+ }
+
+ if (frame == INTRA_FRAME || sse_diff <= sse_diff_thresh) {
+ /*
+ * Handle intra blocks as referring to last frame with zero motion
+ * and let the absolute pixel difference affect the filter factor.
+ * Also consider small amount of motion as being random walk due
+ * to noise, if it doesn't mean that we get a much bigger error.
+ * Note that any changes to the mode info only affects the
+ * denoising.
+ */
+ x->denoise_zeromv = 1;
+ mbmi->ref_frame = x->best_zeromv_reference_frame;
+
+ src = &denoiser->yv12_running_avg[zero_frame];
+
+ mbmi->mode = ZEROMV;
+ mbmi->mv.as_int = 0;
+ x->best_sse_inter_mode = ZEROMV;
+ x->best_sse_mv.as_int = 0;
+ best_sse = zero_mv_sse;
+ }
+
+ mv_row = x->best_sse_mv.as_mv.row;
+ mv_col = x->best_sse_mv.as_mv.col;
+ motion_magnitude2 = mv_row * mv_row + mv_col * mv_col;
+ motion_threshold =
+ denoiser->denoise_pars.scale_motion_thresh * NOISE_MOTION_THRESHOLD;
+
+ if (motion_magnitude2 <
+ denoiser->denoise_pars.scale_increase_filter * NOISE_MOTION_THRESHOLD) {
+ x->increase_denoising = 1;
+ }
+
+ sse_thresh = denoiser->denoise_pars.scale_sse_thresh * SSE_THRESHOLD;
+ if (x->increase_denoising) {
+ sse_thresh = denoiser->denoise_pars.scale_sse_thresh * SSE_THRESHOLD_HIGH;
+ }
+
+ if (best_sse > sse_thresh || motion_magnitude2 > motion_threshold) {
+ decision = COPY_BLOCK;
+ }
+
+ // If block is considered skin, don't denoise if the block
+ // (1) is selected as non-zero motion for current frame, or
+ // (2) has not been selected as ZERO_LAST mode at least x past frames
+ // in a row.
+ // TODO(marpan): Parameter "x" should be varied with framerate.
+ // In particualar, should be reduced for layers (base layer/LAST).
+ if (x->is_skin && (consec_zero_last < 2 || motion_magnitude2 > 0)) {
+ decision = COPY_BLOCK;
+ }
+
+ if (decision == FILTER_BLOCK) {
+ saved_pre = filter_xd->pre;
+ saved_dst = filter_xd->dst;
+
+ /* Compensate the running average. */
+ filter_xd->pre.y_buffer = src->y_buffer + recon_yoffset;
+ filter_xd->pre.u_buffer = src->u_buffer + recon_uvoffset;
+ filter_xd->pre.v_buffer = src->v_buffer + recon_uvoffset;
+ /* Write the compensated running average to the destination buffer. */
+ filter_xd->dst.y_buffer = dst->y_buffer + recon_yoffset;
+ filter_xd->dst.u_buffer = dst->u_buffer + recon_uvoffset;
+ filter_xd->dst.v_buffer = dst->v_buffer + recon_uvoffset;
+
+ if (!x->skip) {
+ vp8_build_inter_predictors_mb(filter_xd);
+ } else {
+ vp8_build_inter16x16_predictors_mb(
+ filter_xd, filter_xd->dst.y_buffer, filter_xd->dst.u_buffer,
+ filter_xd->dst.v_buffer, filter_xd->dst.y_stride,
+ filter_xd->dst.uv_stride);
+ }
+ filter_xd->pre = saved_pre;
+ filter_xd->dst = saved_dst;
+ *mbmi = saved_mbmi;
+ }
+ } else {
+ // zero_frame should always be 1 for real-time mode, as the
+ // ZEROMV mode is always checked, so we should never go into this branch.
+ // If case ZEROMV is not checked, then we will force no denoise (COPY).
+ decision = COPY_BLOCK;
+ }
+
+ if (decision == FILTER_BLOCK) {
+ unsigned char *mc_running_avg_y =
+ denoiser->yv12_mc_running_avg.y_buffer + recon_yoffset;
+ int mc_avg_y_stride = denoiser->yv12_mc_running_avg.y_stride;
+ unsigned char *running_avg_y =
+ denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset;
+ int avg_y_stride = denoiser->yv12_running_avg[INTRA_FRAME].y_stride;
+
+ /* Filter. */
+ decision = vp8_denoiser_filter(mc_running_avg_y, mc_avg_y_stride,
+ running_avg_y, avg_y_stride, x->thismb, 16,
+ motion_magnitude2, x->increase_denoising);
+ denoiser->denoise_state[block_index] =
+ motion_magnitude2 > 0 ? kFilterNonZeroMV : kFilterZeroMV;
+ // Only denoise UV for zero motion, and if y channel was denoised.
+ if (denoiser->denoiser_mode != kDenoiserOnYOnly && motion_magnitude2 == 0 &&
+ decision == FILTER_BLOCK) {
+ unsigned char *mc_running_avg_u =
+ denoiser->yv12_mc_running_avg.u_buffer + recon_uvoffset;
+ unsigned char *running_avg_u =
+ denoiser->yv12_running_avg[INTRA_FRAME].u_buffer + recon_uvoffset;
+ unsigned char *mc_running_avg_v =
+ denoiser->yv12_mc_running_avg.v_buffer + recon_uvoffset;
+ unsigned char *running_avg_v =
+ denoiser->yv12_running_avg[INTRA_FRAME].v_buffer + recon_uvoffset;
+ int mc_avg_uv_stride = denoiser->yv12_mc_running_avg.uv_stride;
+ int avg_uv_stride = denoiser->yv12_running_avg[INTRA_FRAME].uv_stride;
+ int signal_stride = x->block[16].src_stride;
+ decision_u = vp8_denoiser_filter_uv(
+ mc_running_avg_u, mc_avg_uv_stride, running_avg_u, avg_uv_stride,
+ x->block[16].src + *x->block[16].base_src, signal_stride,
+ motion_magnitude2, 0);
+ decision_v = vp8_denoiser_filter_uv(
+ mc_running_avg_v, mc_avg_uv_stride, running_avg_v, avg_uv_stride,
+ x->block[20].src + *x->block[20].base_src, signal_stride,
+ motion_magnitude2, 0);
+ }
+ }
+ if (decision == COPY_BLOCK) {
+ /* No filtering of this block; it differs too much from the predictor,
+ * or the motion vector magnitude is considered too big.
+ */
+ x->denoise_zeromv = 0;
+ vp8_copy_mem16x16(
+ x->thismb, 16,
+ denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset,
+ denoiser->yv12_running_avg[INTRA_FRAME].y_stride);
+ denoiser->denoise_state[block_index] = kNoFilter;
+ }
+ if (denoiser->denoiser_mode != kDenoiserOnYOnly) {
+ if (decision_u == COPY_BLOCK) {
+ vp8_copy_mem8x8(
+ x->block[16].src + *x->block[16].base_src, x->block[16].src_stride,
+ denoiser->yv12_running_avg[INTRA_FRAME].u_buffer + recon_uvoffset,
+ denoiser->yv12_running_avg[INTRA_FRAME].uv_stride);
+ }
+ if (decision_v == COPY_BLOCK) {
+ vp8_copy_mem8x8(
+ x->block[20].src + *x->block[20].base_src, x->block[16].src_stride,
+ denoiser->yv12_running_avg[INTRA_FRAME].v_buffer + recon_uvoffset,
+ denoiser->yv12_running_avg[INTRA_FRAME].uv_stride);
+ }
+ }
+ // Option to selectively deblock the denoised signal, for y channel only.
+ if (apply_spatial_loop_filter) {
+ loop_filter_info lfi;
+ int apply_filter_col = 0;
+ int apply_filter_row = 0;
+ int apply_filter = 0;
+ int y_stride = denoiser->yv12_running_avg[INTRA_FRAME].y_stride;
+ int uv_stride = denoiser->yv12_running_avg[INTRA_FRAME].uv_stride;
+
+ // Fix filter level to some nominal value for now.
+ int filter_level = 48;
+
+ int hev_index = lfi_n->hev_thr_lut[INTER_FRAME][filter_level];
+ lfi.mblim = lfi_n->mblim[filter_level];
+ lfi.blim = lfi_n->blim[filter_level];
+ lfi.lim = lfi_n->lim[filter_level];
+ lfi.hev_thr = lfi_n->hev_thr[hev_index];
+
+ // Apply filter if there is a difference in the denoiser filter state
+ // between the current and left/top block, or if non-zero motion vector
+ // is used for the motion-compensated filtering.
+ if (mb_col > 0) {
+ apply_filter_col =
+ !((denoiser->denoise_state[block_index] ==
+ denoiser->denoise_state[block_index - 1]) &&
+ denoiser->denoise_state[block_index] != kFilterNonZeroMV);
+ if (apply_filter_col) {
+ // Filter left vertical edge.
+ apply_filter = 1;
+ vp8_loop_filter_mbv(
+ denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset,
+ NULL, NULL, y_stride, uv_stride, &lfi);
+ }
+ }
+ if (mb_row > 0) {
+ apply_filter_row =
+ !((denoiser->denoise_state[block_index] ==
+ denoiser->denoise_state[block_index - denoiser->num_mb_cols]) &&
+ denoiser->denoise_state[block_index] != kFilterNonZeroMV);
+ if (apply_filter_row) {
+ // Filter top horizontal edge.
+ apply_filter = 1;
+ vp8_loop_filter_mbh(
+ denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset,
+ NULL, NULL, y_stride, uv_stride, &lfi);
+ }
+ }
+ if (apply_filter) {
+ // Update the signal block |x|. Pixel changes are only to top and/or
+ // left boundary pixels: can we avoid full block copy here.
+ vp8_copy_mem16x16(
+ denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset,
+ y_stride, x->thismb, 16);
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/denoising.h b/media/libvpx/libvpx/vp8/encoder/denoising.h
new file mode 100644
index 0000000000..51ae3b0ab3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/denoising.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_DENOISING_H_
+#define VPX_VP8_ENCODER_DENOISING_H_
+
+#include "block.h"
+#include "vp8/common/loopfilter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SUM_DIFF_THRESHOLD 512
+#define SUM_DIFF_THRESHOLD_HIGH 600
+#define MOTION_MAGNITUDE_THRESHOLD (8 * 3)
+
+#define SUM_DIFF_THRESHOLD_UV (96) // (8 * 8 * 1.5)
+#define SUM_DIFF_THRESHOLD_HIGH_UV (8 * 8 * 2)
+#define SUM_DIFF_FROM_AVG_THRESH_UV (8 * 8 * 8)
+#define MOTION_MAGNITUDE_THRESHOLD_UV (8 * 3)
+
+#define MAX_GF_ARF_DENOISE_RANGE (8)
+
+enum vp8_denoiser_decision { COPY_BLOCK, FILTER_BLOCK };
+
+enum vp8_denoiser_filter_state { kNoFilter, kFilterZeroMV, kFilterNonZeroMV };
+
+enum vp8_denoiser_mode {
+ kDenoiserOff,
+ kDenoiserOnYOnly,
+ kDenoiserOnYUV,
+ kDenoiserOnYUVAggressive,
+ kDenoiserOnAdaptive
+};
+
+typedef struct {
+ // Scale factor on sse threshold above which no denoising is done.
+ unsigned int scale_sse_thresh;
+ // Scale factor on motion magnitude threshold above which no
+ // denoising is done.
+ unsigned int scale_motion_thresh;
+ // Scale factor on motion magnitude below which we increase the strength of
+ // the temporal filter (in function vp8_denoiser_filter).
+ unsigned int scale_increase_filter;
+ // Scale factor to bias to ZEROMV for denoising.
+ unsigned int denoise_mv_bias;
+ // Scale factor to bias to ZEROMV for coding mode selection.
+ unsigned int pickmode_mv_bias;
+ // Quantizer threshold below which we use the segmentation map to switch off
+ // loop filter for blocks that have been coded as ZEROMV-LAST a certain number
+ // (consec_zerolast) of consecutive frames. Note that the delta-QP is set to
+ // 0 when segmentation map is used for shutting off loop filter.
+ unsigned int qp_thresh;
+ // Threshold for number of consecutive frames for blocks coded as ZEROMV-LAST.
+ unsigned int consec_zerolast;
+ // Threshold for amount of spatial blur on Y channel. 0 means no spatial blur.
+ unsigned int spatial_blur;
+} denoise_params;
+
+typedef struct vp8_denoiser {
+ YV12_BUFFER_CONFIG yv12_running_avg[MAX_REF_FRAMES];
+ YV12_BUFFER_CONFIG yv12_mc_running_avg;
+ // TODO(marpan): Should remove yv12_last_source and use vp8_lookahead_peak.
+ YV12_BUFFER_CONFIG yv12_last_source;
+ unsigned char *denoise_state;
+ int num_mb_cols;
+ int denoiser_mode;
+ int threshold_aggressive_mode;
+ int nmse_source_diff;
+ int nmse_source_diff_count;
+ int qp_avg;
+ int qp_threshold_up;
+ int qp_threshold_down;
+ int bitrate_threshold;
+ denoise_params denoise_pars;
+} VP8_DENOISER;
+
+int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height,
+ int num_mb_rows, int num_mb_cols, int mode);
+
+void vp8_denoiser_free(VP8_DENOISER *denoiser);
+
+void vp8_denoiser_set_parameters(VP8_DENOISER *denoiser, int mode);
+
+void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x,
+ unsigned int best_sse, unsigned int zero_mv_sse,
+ int recon_yoffset, int recon_uvoffset,
+ loop_filter_info_n *lfi_n, int mb_row, int mb_col,
+ int block_index, int consec_zero_last);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_DENOISING_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/encodeframe.c b/media/libvpx/libvpx/vp8/encoder/encodeframe.c
new file mode 100644
index 0000000000..dc29945729
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/encodeframe.c
@@ -0,0 +1,1287 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "bitstream.h"
+#include "encodemb.h"
+#include "encodemv.h"
+#if CONFIG_MULTITHREAD
+#include "ethreading.h"
+#endif
+#include "vp8/common/common.h"
+#include "onyx_int.h"
+#include "vp8/common/extend.h"
+#include "vp8/common/entropymode.h"
+#include "vp8/common/quant_common.h"
+#include "segmentation.h"
+#include "vp8/common/setupintrarecon.h"
+#include "encodeintra.h"
+#include "vp8/common/reconinter.h"
+#include "rdopt.h"
+#include "pickinter.h"
+#include "vp8/common/findnearmv.h"
+#include <stdio.h>
+#include <limits.h>
+#include "vp8/common/invtrans.h"
+#include "vpx_ports/vpx_timer.h"
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+#include "bitstream.h"
+#endif
+#include "encodeframe.h"
+
+extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
+static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x);
+
+#ifdef MODE_STATS
+unsigned int inter_y_modes[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+unsigned int inter_uv_modes[4] = { 0, 0, 0, 0 };
+unsigned int inter_b_modes[15] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+unsigned int y_modes[5] = { 0, 0, 0, 0, 0 };
+unsigned int uv_modes[4] = { 0, 0, 0, 0 };
+unsigned int b_modes[14] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+#endif
+
+/* activity_avg must be positive, or flat regions could get a zero weight
+ * (infinite lambda), which confounds analysis.
+ * This also avoids the need for divide by zero checks in
+ * vp8_activity_masking().
+ */
+#define VP8_ACTIVITY_AVG_MIN (64)
+
+/* This is used as a reference when computing the source variance for the
+ * purposes of activity masking.
+ * Eventually this should be replaced by custom no-reference routines,
+ * which will be faster.
+ */
+static const unsigned char VP8_VAR_OFFS[16] = { 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128 };
+
+/* Original activity measure from Tim T's code. */
+static unsigned int tt_activity_measure(MACROBLOCK *x) {
+ unsigned int act;
+ unsigned int sse;
+ /* TODO: This could also be done over smaller areas (8x8), but that would
+ * require extensive changes elsewhere, as lambda is assumed to be fixed
+ * over an entire MB in most of the code.
+ * Another option is to compute four 8x8 variances, and pick a single
+ * lambda using a non-linear combination (e.g., the smallest, or second
+ * smallest, etc.).
+ */
+ act = vpx_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0,
+ &sse);
+ act = act << 4;
+
+ /* If the region is flat, lower the activity some more. */
+ if (act < 8 << 12) act = act < 5 << 12 ? act : 5 << 12;
+
+ return act;
+}
+
+/* Measure the activity of the current macroblock
+ * What we measure here is TBD so abstracted to this function
+ */
+#define ALT_ACT_MEASURE 1
+static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) {
+ unsigned int mb_activity;
+
+ if (ALT_ACT_MEASURE) {
+ int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+
+ /* Or use an alternative. */
+ mb_activity = vp8_encode_intra(x, use_dc_pred);
+ } else {
+ /* Original activity measure from Tim T's code. */
+ mb_activity = tt_activity_measure(x);
+ }
+
+ if (mb_activity < VP8_ACTIVITY_AVG_MIN) mb_activity = VP8_ACTIVITY_AVG_MIN;
+
+ return mb_activity;
+}
+
+/* Calculate an "average" mb activity value for the frame */
+#define ACT_MEDIAN 0
+static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) {
+#if ACT_MEDIAN
+ /* Find median: Simple n^2 algorithm for experimentation */
+ {
+ unsigned int median;
+ unsigned int i, j;
+ unsigned int *sortlist;
+ unsigned int tmp;
+
+ /* Create a list to sort to */
+ CHECK_MEM_ERROR(&cpi->common.error, sortlist,
+ vpx_calloc(sizeof(unsigned int), cpi->common.MBs));
+
+ /* Copy map to sort list */
+ memcpy(sortlist, cpi->mb_activity_map,
+ sizeof(unsigned int) * cpi->common.MBs);
+
+ /* Ripple each value down to its correct position */
+ for (i = 1; i < cpi->common.MBs; ++i) {
+ for (j = i; j > 0; j--) {
+ if (sortlist[j] < sortlist[j - 1]) {
+ /* Swap values */
+ tmp = sortlist[j - 1];
+ sortlist[j - 1] = sortlist[j];
+ sortlist[j] = tmp;
+ } else
+ break;
+ }
+ }
+
+ /* Even number MBs so estimate median as mean of two either side. */
+ median = (1 + sortlist[cpi->common.MBs >> 1] +
+ sortlist[(cpi->common.MBs >> 1) + 1]) >>
+ 1;
+
+ cpi->activity_avg = median;
+
+ vpx_free(sortlist);
+ }
+#else
+ /* Simple mean for now */
+ cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
+#endif
+
+ if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) {
+ cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
+ }
+
+ /* Experimental code: return fixed value normalized for several clips */
+ if (ALT_ACT_MEASURE) cpi->activity_avg = 100000;
+}
+
+#define USE_ACT_INDEX 0
+#define OUTPUT_NORM_ACT_STATS 0
+
+#if USE_ACT_INDEX
+/* Calculate and activity index for each mb */
+static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
+ VP8_COMMON *const cm = &cpi->common;
+ int mb_row, mb_col;
+
+ int64_t act;
+ int64_t a;
+ int64_t b;
+
+#if OUTPUT_NORM_ACT_STATS
+ FILE *f = fopen("norm_act.stt", "a");
+ fprintf(f, "\n%12d\n", cpi->activity_avg);
+#endif
+
+ /* Reset pointers to start of activity map */
+ x->mb_activity_ptr = cpi->mb_activity_map;
+
+ /* Calculate normalized mb activity number. */
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ /* for each macroblock col in image */
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ /* Read activity from the map */
+ act = *(x->mb_activity_ptr);
+
+ /* Calculate a normalized activity number */
+ a = act + 4 * cpi->activity_avg;
+ b = 4 * act + cpi->activity_avg;
+
+ if (b >= a)
+ *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
+ else
+ *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
+
+#if OUTPUT_NORM_ACT_STATS
+ fprintf(f, " %6d", *(x->mb_activity_ptr));
+#endif
+ /* Increment activity map pointers */
+ x->mb_activity_ptr++;
+ }
+
+#if OUTPUT_NORM_ACT_STATS
+ fprintf(f, "\n");
+#endif
+ }
+
+#if OUTPUT_NORM_ACT_STATS
+ fclose(f);
+#endif
+}
+#endif
+
+/* Loop through all MBs. Note activity of each, average activity and
+ * calculate a normalized activity for each
+ */
+static void build_activity_map(VP8_COMP *cpi) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *xd = &x->e_mbd;
+ VP8_COMMON *const cm = &cpi->common;
+
+#if ALT_ACT_MEASURE
+ YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
+ int recon_yoffset;
+ int recon_y_stride = new_yv12->y_stride;
+#endif
+
+ int mb_row, mb_col;
+ unsigned int mb_activity;
+ int64_t activity_sum = 0;
+
+ /* for each macroblock row in image */
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+#if ALT_ACT_MEASURE
+ /* reset above block coeffs */
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+#endif
+ /* for each macroblock col in image */
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+#if ALT_ACT_MEASURE
+ xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
+ xd->left_available = (mb_col != 0);
+ recon_yoffset += 16;
+#endif
+ /* Copy current mb to a buffer */
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+
+ /* measure activity */
+ mb_activity = mb_activity_measure(x, mb_row, mb_col);
+
+ /* Keep frame sum */
+ activity_sum += mb_activity;
+
+ /* Store MB level activity details. */
+ *x->mb_activity_ptr = mb_activity;
+
+ /* Increment activity map pointer */
+ x->mb_activity_ptr++;
+
+ /* adjust to the next column of source macroblocks */
+ x->src.y_buffer += 16;
+ }
+
+ /* adjust to the next row of mbs */
+ x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
+
+#if ALT_ACT_MEASURE
+ /* extend the recon for intra prediction */
+ vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8,
+ xd->dst.v_buffer + 8);
+#endif
+ }
+
+ /* Calculate an "average" MB activity */
+ calc_av_activity(cpi, activity_sum);
+
+#if USE_ACT_INDEX
+ /* Calculate an activity index number of each mb */
+ calc_activity_index(cpi, x);
+#endif
+}
+
+/* Macroblock activity masking */
+void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) {
+#if USE_ACT_INDEX
+ x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
+ x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
+ x->errorperbit += (x->errorperbit == 0);
+#else
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
+
+ /* Apply the masking to the RD multiplier. */
+ a = act + (2 * cpi->activity_avg);
+ b = (2 * act) + cpi->activity_avg;
+
+ x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
+ x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
+ x->errorperbit += (x->errorperbit == 0);
+#endif
+
+ /* Activity based Zbin adjustment */
+ adjust_act_zbin(cpi, x);
+}
+
+static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
+ MACROBLOCK *x, MACROBLOCKD *xd, TOKENEXTRA **tp,
+ int *segment_counts, int *totalrate) {
+ int recon_yoffset, recon_uvoffset;
+ int mb_col;
+ int ref_fb_idx = cm->lst_fb_idx;
+ int dst_fb_idx = cm->new_fb_idx;
+ int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
+ int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
+ int map_index = (mb_row * cpi->common.mb_cols);
+
+#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ const int num_part = (1 << cm->multi_token_partition);
+ TOKENEXTRA *tp_start = cpi->tok;
+ vp8_writer *w;
+#endif
+
+#if CONFIG_MULTITHREAD
+ const int nsync = cpi->mt_sync_range;
+ vpx_atomic_int rightmost_col = VPX_ATOMIC_INIT(cm->mb_cols + nsync);
+ const vpx_atomic_int *last_row_current_mb_col;
+ vpx_atomic_int *current_mb_col = NULL;
+
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) {
+ current_mb_col = &cpi->mt_current_mb_col[mb_row];
+ }
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0 && mb_row != 0) {
+ last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
+ } else {
+ last_row_current_mb_col = &rightmost_col;
+ }
+#endif
+
+#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ if (num_part > 1)
+ w = &cpi->bc[1 + (mb_row % num_part)];
+ else
+ w = &cpi->bc[1];
+#endif
+
+ /* reset above block coeffs */
+ xd->above_context = cm->above_context;
+
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8);
+
+ cpi->tplist[mb_row].start = *tp;
+ /* printf("Main mb_row = %d\n", mb_row); */
+
+ /* Distance of Mb to the top & bottom edges, specified in 1/8th pel
+ * units as they are always compared to values that are in 1/8th pel
+ */
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
+
+ /* Set up limit values for vertical motion vector components
+ * to prevent them extending beyond the UMV borders
+ */
+ x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
+
+ /* Set the mb activity pointer to the start of the row. */
+ x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
+
+ /* for each macroblock col in image */
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ *tp = cpi->tok;
+#endif
+ /* Distance of Mb to the left & right edges, specified in
+ * 1/8th pel units as they are always compared to values
+ * that are in 1/8th pel units
+ */
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
+
+ /* Set up limit values for horizontal motion vector components
+ * to prevent them extending beyond the UMV borders
+ */
+ x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_col_max =
+ ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
+
+ xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
+ xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
+ xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
+ xd->left_available = (mb_col != 0);
+
+ x->rddiv = cpi->RDDIV;
+ x->rdmult = cpi->RDMULT;
+
+ /* Copy current mb to a buffer */
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) {
+ if (((mb_col - 1) % nsync) == 0) {
+ vpx_atomic_store_release(current_mb_col, mb_col - 1);
+ }
+
+ if (mb_row && !(mb_col & (nsync - 1))) {
+ vp8_atomic_spin_wait(mb_col, last_row_current_mb_col, nsync);
+ }
+ }
+#endif
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x);
+
+ /* Is segmentation enabled */
+ /* MB level adjustment to quantizer */
+ if (xd->segmentation_enabled) {
+ /* Code to set segment id in xd->mbmi.segment_id for current MB
+ * (with range checking)
+ */
+ if (cpi->segmentation_map[map_index + mb_col] <= 3) {
+ xd->mode_info_context->mbmi.segment_id =
+ cpi->segmentation_map[map_index + mb_col];
+ } else {
+ xd->mode_info_context->mbmi.segment_id = 0;
+ }
+
+ vp8cx_mb_init_quantizer(cpi, x, 1);
+ } else {
+ /* Set to Segment 0 by default */
+ xd->mode_info_context->mbmi.segment_id = 0;
+ }
+
+ x->active_ptr = cpi->active_map + map_index + mb_col;
+
+ if (cm->frame_type == KEY_FRAME) {
+ *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp);
+#ifdef MODE_STATS
+ y_modes[xd->mbmi.mode]++;
+#endif
+ } else {
+ *totalrate += vp8cx_encode_inter_macroblock(
+ cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);
+
+#ifdef MODE_STATS
+ inter_y_modes[xd->mbmi.mode]++;
+
+ if (xd->mbmi.mode == SPLITMV) {
+ int b;
+
+ for (b = 0; b < xd->mbmi.partition_count; ++b) {
+ inter_b_modes[x->partition->bmi[b].mode]++;
+ }
+ }
+
+#endif
+
+ // Keep track of how many (consecutive) times a block is coded
+ // as ZEROMV_LASTREF, for base layer frames.
+ // Reset to 0 if its coded as anything else.
+ if (cpi->current_layer == 0) {
+ if (xd->mode_info_context->mbmi.mode == ZEROMV &&
+ xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
+ // Increment, check for wrap-around.
+ if (cpi->consec_zero_last[map_index + mb_col] < 255) {
+ cpi->consec_zero_last[map_index + mb_col] += 1;
+ }
+ if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
+ cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
+ }
+ } else {
+ cpi->consec_zero_last[map_index + mb_col] = 0;
+ cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
+ }
+ if (x->zero_last_dot_suppress) {
+ cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
+ }
+ }
+
+ /* Special case code for cyclic refresh
+ * If cyclic update enabled then copy xd->mbmi.segment_id; (which
+ * may have been updated based on mode during
+ * vp8cx_encode_inter_macroblock()) back into the global
+ * segmentation map
+ */
+ if ((cpi->current_layer == 0) &&
+ (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) {
+ cpi->segmentation_map[map_index + mb_col] =
+ xd->mode_info_context->mbmi.segment_id;
+
+ /* If the block has been refreshed mark it as clean (the
+ * magnitude of the -ve influences how long it will be before
+ * we consider another refresh):
+ * Else if it was coded (last frame 0,0) and has not already
+ * been refreshed then mark it as a candidate for cleanup
+ * next time (marked 0) else mark it as dirty (1).
+ */
+ if (xd->mode_info_context->mbmi.segment_id) {
+ cpi->cyclic_refresh_map[map_index + mb_col] = -1;
+ } else if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
+ (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) {
+ if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
+ cpi->cyclic_refresh_map[map_index + mb_col] = 0;
+ }
+ } else {
+ cpi->cyclic_refresh_map[map_index + mb_col] = 1;
+ }
+ }
+ }
+
+ cpi->tplist[mb_row].stop = *tp;
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ /* pack tokens for this MB */
+ {
+ int tok_count = *tp - tp_start;
+ vp8_pack_tokens(w, tp_start, tok_count);
+ }
+#endif
+ /* Increment pointer into gf usage flags structure. */
+ x->gf_active_ptr++;
+
+ /* Increment the activity mask pointers. */
+ x->mb_activity_ptr++;
+
+ /* adjust to the next column of macroblocks */
+ x->src.y_buffer += 16;
+ x->src.u_buffer += 8;
+ x->src.v_buffer += 8;
+
+ recon_yoffset += 16;
+ recon_uvoffset += 8;
+
+ /* Keep track of segment usage */
+ segment_counts[xd->mode_info_context->mbmi.segment_id]++;
+
+ /* skip to next mb */
+ xd->mode_info_context++;
+ x->partition_info++;
+ xd->above_context++;
+ }
+
+ /* extend the recon for intra prediction */
+ vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16,
+ xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) {
+ vpx_atomic_store_release(current_mb_col,
+ vpx_atomic_load_acquire(&rightmost_col));
+ }
+#endif
+
+ /* this is to account for the border */
+ xd->mode_info_context++;
+ x->partition_info++;
+}
+
+static void init_encode_frame_mb_context(VP8_COMP *cpi) {
+ MACROBLOCK *const x = &cpi->mb;
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ /* GF active flags data structure */
+ x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
+
+ /* Activity map pointer */
+ x->mb_activity_ptr = cpi->mb_activity_map;
+
+ x->act_zbin_adj = 0;
+
+ x->partition_info = x->pi;
+
+ xd->mode_info_context = cm->mi;
+ xd->mode_info_stride = cm->mode_info_stride;
+
+ xd->frame_type = cm->frame_type;
+
+ /* reset intra mode contexts */
+ if (cm->frame_type == KEY_FRAME) vp8_init_mbmode_probs(cm);
+
+ /* Copy data over into macro block data structures. */
+ x->src = *cpi->Source;
+ xd->pre = cm->yv12_fb[cm->lst_fb_idx];
+ xd->dst = cm->yv12_fb[cm->new_fb_idx];
+
+ /* set up frame for intra coded blocks */
+ vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
+
+ vp8_build_block_offsets(x);
+
+ xd->mode_info_context->mbmi.mode = DC_PRED;
+ xd->mode_info_context->mbmi.uv_mode = DC_PRED;
+
+ xd->left_context = &cm->left_context;
+
+ x->mvc = cm->fc.mvc;
+
+ memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
+
+ /* Special case treatment when GF and ARF are not sensible options
+ * for reference
+ */
+ if (cpi->ref_frame_flags == VP8_LAST_FRAME) {
+ vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 255,
+ 128);
+ } else if ((cpi->oxcf.number_of_layers > 1) &&
+ (cpi->ref_frame_flags == VP8_GOLD_FRAME)) {
+ vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 255);
+ } else if ((cpi->oxcf.number_of_layers > 1) &&
+ (cpi->ref_frame_flags == VP8_ALTR_FRAME)) {
+ vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 1);
+ } else {
+ vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded,
+ cpi->prob_last_coded, cpi->prob_gf_coded);
+ }
+
+ xd->fullpixel_mask = ~0;
+ if (cm->full_pixel) xd->fullpixel_mask = ~7;
+
+ vp8_zero(x->coef_counts);
+ vp8_zero(x->ymode_count);
+ vp8_zero(x->uv_mode_count);
+ x->prediction_error = 0;
+ x->intra_error = 0;
+ vp8_zero(x->count_mb_ref_frame_usage);
+}
+
+#if CONFIG_MULTITHREAD
+static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) {
+ int i = 0;
+ do {
+ int j = 0;
+ do {
+ int k = 0;
+ do {
+ /* at every context */
+
+ /* calc probs and branch cts for this frame only */
+ int t = 0; /* token/prob index */
+
+ do {
+ x->coef_counts[i][j][k][t] += x_thread->coef_counts[i][j][k][t];
+ } while (++t < ENTROPY_NODES);
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES);
+}
+#endif // CONFIG_MULTITHREAD
+
+void vp8_encode_frame(VP8_COMP *cpi) {
+ int mb_row;
+ MACROBLOCK *const x = &cpi->mb;
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ TOKENEXTRA *tp = cpi->tok;
+ int segment_counts[MAX_MB_SEGMENTS];
+ int totalrate;
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ BOOL_CODER *bc = &cpi->bc[1]; /* bc[0] is for control partition */
+ const int num_part = (1 << cm->multi_token_partition);
+#endif
+
+ memset(segment_counts, 0, sizeof(segment_counts));
+ totalrate = 0;
+
+ if (cpi->compressor_speed == 2) {
+ if (cpi->oxcf.cpu_used < 0) {
+ cpi->Speed = -(cpi->oxcf.cpu_used);
+ } else {
+ vp8_auto_select_speed(cpi);
+ }
+ }
+
+ /* Functions setup for all frame types so we can use MC in AltRef */
+ if (!cm->use_bilinear_mc_filter) {
+ xd->subpixel_predict = vp8_sixtap_predict4x4;
+ xd->subpixel_predict8x4 = vp8_sixtap_predict8x4;
+ xd->subpixel_predict8x8 = vp8_sixtap_predict8x8;
+ xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
+ } else {
+ xd->subpixel_predict = vp8_bilinear_predict4x4;
+ xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
+ xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
+ xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
+ }
+
+ cpi->mb.skip_true_count = 0;
+ cpi->tok_count = 0;
+
+#if 0
+ /* Experimental code */
+ cpi->frame_distortion = 0;
+ cpi->last_mb_distortion = 0;
+#endif
+
+ xd->mode_info_context = cm->mi;
+
+ vp8_zero(cpi->mb.MVcount);
+
+ vp8cx_frame_init_quantizer(cpi);
+
+ vp8_initialize_rd_consts(cpi, x,
+ vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
+
+ vp8cx_initialize_me_consts(cpi, cm->base_qindex);
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ /* Initialize encode frame context. */
+ init_encode_frame_mb_context(cpi);
+
+ /* Build a frame level activity map */
+ build_activity_map(cpi);
+ }
+
+ /* re-init encode frame context. */
+ init_encode_frame_mb_context(cpi);
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ {
+ int i;
+ for (i = 0; i < num_part; ++i) {
+ vp8_start_encode(&bc[i], cpi->partition_d[i + 1],
+ cpi->partition_d_end[i + 1]);
+ bc[i].error = &cm->error;
+ }
+ }
+
+#endif
+
+ {
+ struct vpx_usec_timer emr_timer;
+ vpx_usec_timer_start(&emr_timer);
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
+ int i;
+
+ vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
+ cpi->encoding_thread_count);
+
+ for (i = 0; i < cm->mb_rows; ++i)
+ vpx_atomic_store_release(&cpi->mt_current_mb_col[i], -1);
+
+ for (i = 0; i < cpi->encoding_thread_count; ++i) {
+ sem_post(&cpi->h_event_start_encoding[i]);
+ }
+
+ for (mb_row = 0; mb_row < cm->mb_rows;
+ mb_row += (cpi->encoding_thread_count + 1)) {
+ vp8_zero(cm->left_context);
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ tp = cpi->tok;
+#else
+ tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
+#endif
+
+ encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
+
+ /* adjust to the next row of mbs */
+ x->src.y_buffer +=
+ 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) -
+ 16 * cm->mb_cols;
+ x->src.u_buffer +=
+ 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
+ 8 * cm->mb_cols;
+ x->src.v_buffer +=
+ 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
+ 8 * cm->mb_cols;
+
+ xd->mode_info_context +=
+ xd->mode_info_stride * cpi->encoding_thread_count;
+ x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
+ x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
+ }
+ /* Wait for all the threads to finish. */
+ for (i = 0; i < cpi->encoding_thread_count; ++i) {
+ sem_wait(&cpi->h_event_end_encoding[i]);
+ }
+
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ cpi->tok_count += (unsigned int)(cpi->tplist[mb_row].stop -
+ cpi->tplist[mb_row].start);
+ }
+
+ if (xd->segmentation_enabled) {
+ int j;
+
+ if (xd->segmentation_enabled) {
+ for (i = 0; i < cpi->encoding_thread_count; ++i) {
+ for (j = 0; j < 4; ++j) {
+ segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < cpi->encoding_thread_count; ++i) {
+ int mode_count;
+ int c_idx;
+ totalrate += cpi->mb_row_ei[i].totalrate;
+
+ cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
+
+ for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count) {
+ cpi->mb.ymode_count[mode_count] +=
+ cpi->mb_row_ei[i].mb.ymode_count[mode_count];
+ }
+
+ for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count) {
+ cpi->mb.uv_mode_count[mode_count] +=
+ cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
+ }
+
+ for (c_idx = 0; c_idx < MVvals; ++c_idx) {
+ cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
+ cpi->mb.MVcount[1][c_idx] += cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
+ }
+
+ cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error;
+ cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
+
+ for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx) {
+ cpi->mb.count_mb_ref_frame_usage[c_idx] +=
+ cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
+ }
+
+ for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx) {
+ cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx];
+ }
+
+ /* add up counts for each thread */
+ sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
+ }
+
+ } else
+#endif // CONFIG_MULTITHREAD
+ {
+
+ /* for each macroblock row in image */
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ vp8_zero(cm->left_context);
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ tp = cpi->tok;
+#endif
+
+ encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
+
+ /* adjust to the next row of mbs */
+ x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
+ x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+ x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+ }
+
+ cpi->tok_count = (unsigned int)(tp - cpi->tok);
+ }
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ {
+ int i;
+ for (i = 0; i < num_part; ++i) {
+ vp8_stop_encode(&bc[i]);
+ cpi->partition_sz[i + 1] = bc[i].pos;
+ }
+ }
+#endif
+
+ vpx_usec_timer_mark(&emr_timer);
+ cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
+ }
+
+ // Work out the segment probabilities if segmentation is enabled
+ // and needs to be updated
+ if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
+ int tot_count;
+ int i;
+
+ /* Set to defaults */
+ memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
+
+ tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] +
+ segment_counts[3];
+
+ if (tot_count) {
+ xd->mb_segment_tree_probs[0] =
+ ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
+
+ tot_count = segment_counts[0] + segment_counts[1];
+
+ if (tot_count > 0) {
+ xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
+ }
+
+ tot_count = segment_counts[2] + segment_counts[3];
+
+ if (tot_count > 0) {
+ xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
+ }
+
+ /* Zero probabilities not allowed */
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
+ if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1;
+ }
+ }
+ }
+
+ /* projected_frame_size in units of BYTES */
+ cpi->projected_frame_size = totalrate >> 8;
+
+ /* Make a note of the percentage MBs coded Intra. */
+ if (cm->frame_type == KEY_FRAME) {
+ cpi->this_frame_percent_intra = 100;
+ } else {
+ int tot_modes;
+
+ tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] +
+ cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] +
+ cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] +
+ cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
+
+ if (tot_modes) {
+ cpi->this_frame_percent_intra =
+ cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
+ }
+ }
+
+#if !CONFIG_REALTIME_ONLY
+ /* Adjust the projected reference frame usage probability numbers to
+ * reflect what we have just seen. This may be useful when we make
+ * multiple iterations of the recode loop rather than continuing to use
+ * values from the previous frame.
+ */
+ if ((cm->frame_type != KEY_FRAME) &&
+ ((cpi->oxcf.number_of_layers > 1) ||
+ (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) {
+ vp8_convert_rfct_to_prob(cpi);
+ }
+#endif
+}
+void vp8_setup_block_ptrs(MACROBLOCK *x) {
+ int r, c;
+ int i;
+
+ for (r = 0; r < 4; ++r) {
+ for (c = 0; c < 4; ++c) {
+ x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
+ }
+ }
+
+ for (r = 0; r < 2; ++r) {
+ for (c = 0; c < 2; ++c) {
+ x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
+ }
+ }
+
+ for (r = 0; r < 2; ++r) {
+ for (c = 0; c < 2; ++c) {
+ x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
+ }
+ }
+
+ x->block[24].src_diff = x->src_diff + 384;
+
+ for (i = 0; i < 25; ++i) {
+ x->block[i].coeff = x->coeff + i * 16;
+ }
+}
+
+void vp8_build_block_offsets(MACROBLOCK *x) {
+ int block = 0;
+ int br, bc;
+
+ vp8_build_block_doffsets(&x->e_mbd);
+
+ /* y blocks */
+ x->thismb_ptr = &x->thismb[0];
+ for (br = 0; br < 4; ++br) {
+ for (bc = 0; bc < 4; ++bc) {
+ BLOCK *this_block = &x->block[block];
+ this_block->base_src = &x->thismb_ptr;
+ this_block->src_stride = 16;
+ this_block->src = 4 * br * 16 + 4 * bc;
+ ++block;
+ }
+ }
+
+ /* u blocks */
+ for (br = 0; br < 2; ++br) {
+ for (bc = 0; bc < 2; ++bc) {
+ BLOCK *this_block = &x->block[block];
+ this_block->base_src = &x->src.u_buffer;
+ this_block->src_stride = x->src.uv_stride;
+ this_block->src = 4 * br * this_block->src_stride + 4 * bc;
+ ++block;
+ }
+ }
+
+ /* v blocks */
+ for (br = 0; br < 2; ++br) {
+ for (bc = 0; bc < 2; ++bc) {
+ BLOCK *this_block = &x->block[block];
+ this_block->base_src = &x->src.v_buffer;
+ this_block->src_stride = x->src.uv_stride;
+ this_block->src = 4 * br * this_block->src_stride + 4 * bc;
+ ++block;
+ }
+ }
+}
+
+static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) {
+ const MACROBLOCKD *xd = &x->e_mbd;
+ const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
+ const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
+
+#ifdef MODE_STATS
+ const int is_key = cpi->common.frame_type == KEY_FRAME;
+
+ ++(is_key ? uv_modes : inter_uv_modes)[uvm];
+
+ if (m == B_PRED) {
+ unsigned int *const bct = is_key ? b_modes : inter_b_modes;
+
+ int b = 0;
+
+ do {
+ ++bct[xd->block[b].bmi.mode];
+ } while (++b < 16);
+ }
+
+#else
+ (void)cpi;
+#endif
+
+ ++x->ymode_count[m];
+ ++x->uv_mode_count[uvm];
+}
+
+/* Experimental stub function to create a per MB zbin adjustment based on
+ * some previously calculated measure of MB activity.
+ */
+static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) {
+#if USE_ACT_INDEX
+ x->act_zbin_adj = *(x->mb_activity_ptr);
+#else
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
+
+ /* Apply the masking to the RD multiplier. */
+ a = act + 4 * cpi->activity_avg;
+ b = 4 * act + cpi->activity_avg;
+
+ if (act > cpi->activity_avg) {
+ x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
+ } else {
+ x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
+ }
+#endif
+}
+
+int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
+ TOKENEXTRA **t) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ int rate;
+
+ if (cpi->sf.RD && cpi->compressor_speed != 2) {
+ vp8_rd_pick_intra_mode(x, &rate);
+ } else {
+ vp8_pick_intra_mode(x, &rate);
+ }
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ adjust_act_zbin(cpi, x);
+ vp8_update_zbin_extra(cpi, x);
+ }
+
+ if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) {
+ vp8_encode_intra4x4mby(x);
+ } else {
+ vp8_encode_intra16x16mby(x);
+ }
+
+ vp8_encode_intra16x16mbuv(x);
+
+ sum_intra_stats(cpi, x);
+
+ vp8_tokenize_mb(cpi, x, t);
+
+ if (xd->mode_info_context->mbmi.mode != B_PRED) vp8_inverse_transform_mby(xd);
+
+ vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
+ xd->dst.u_buffer, xd->dst.v_buffer,
+ xd->dst.uv_stride, xd->eobs + 16);
+ return rate;
+}
+#ifdef SPEEDSTATS
+extern int cnt_pm;
+#endif
+
+extern void vp8_fix_contexts(MACROBLOCKD *x);
+
+int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
+ int recon_yoffset, int recon_uvoffset,
+ int mb_row, int mb_col) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int intra_error = 0;
+ int rate;
+ int distortion;
+
+ x->skip = 0;
+
+ if (xd->segmentation_enabled) {
+ x->encode_breakout =
+ cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
+ } else {
+ x->encode_breakout = cpi->oxcf.encode_breakout;
+ }
+
+#if CONFIG_TEMPORAL_DENOISING
+ /* Reset the best sse mode/mv for each macroblock. */
+ x->best_reference_frame = INTRA_FRAME;
+ x->best_zeromv_reference_frame = INTRA_FRAME;
+ x->best_sse_inter_mode = 0;
+ x->best_sse_mv.as_int = 0;
+ x->need_to_clamp_best_mvs = 0;
+#endif
+
+ if (cpi->sf.RD) {
+ int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
+
+ /* Are we using the fast quantizer for the mode selection? */
+ if (cpi->sf.use_fastquant_for_pick) {
+ x->quantize_b = vp8_fast_quantize_b;
+
+ /* the fast quantizer does not use zbin_extra, so
+ * do not recalculate */
+ x->zbin_mode_boost_enabled = 0;
+ }
+ vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
+ &distortion, &intra_error, mb_row, mb_col);
+
+ /* switch back to the regular quantizer for the encode */
+ if (cpi->sf.improved_quant) {
+ x->quantize_b = vp8_regular_quantize_b;
+ }
+
+ /* restore cpi->zbin_mode_boost_enabled */
+ x->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
+
+ } else {
+ vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
+ &distortion, &intra_error, mb_row, mb_col);
+ }
+
+ x->prediction_error += distortion;
+ x->intra_error += intra_error;
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ /* Adjust the zbin based on this MB rate. */
+ adjust_act_zbin(cpi, x);
+ }
+
+#if 0
+ /* Experimental RD code */
+ cpi->frame_distortion += distortion;
+ cpi->last_mb_distortion = distortion;
+#endif
+
+ /* MB level adjutment to quantizer setup */
+ if (xd->segmentation_enabled) {
+ /* If cyclic update enabled */
+ if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) {
+ /* Clear segment_id back to 0 if not coded (last frame 0,0) */
+ if ((xd->mode_info_context->mbmi.segment_id == 1) &&
+ ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) ||
+ (xd->mode_info_context->mbmi.mode != ZEROMV))) {
+ xd->mode_info_context->mbmi.segment_id = 0;
+
+ /* segment_id changed, so update */
+ vp8cx_mb_init_quantizer(cpi, x, 1);
+ }
+ }
+ }
+
+ {
+ /* Experimental code.
+ * Special case for gf and arf zeromv modes, for 1 temporal layer.
+ * Increase zbin size to supress noise.
+ */
+ x->zbin_mode_boost = 0;
+ if (x->zbin_mode_boost_enabled) {
+ if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
+ if (xd->mode_info_context->mbmi.mode == ZEROMV) {
+ if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME &&
+ cpi->oxcf.number_of_layers == 1) {
+ x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ } else {
+ x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ }
+ } else if (xd->mode_info_context->mbmi.mode == SPLITMV) {
+ x->zbin_mode_boost = 0;
+ } else {
+ x->zbin_mode_boost = MV_ZBIN_BOOST;
+ }
+ }
+ }
+
+ /* The fast quantizer doesn't use zbin_extra, only do so with
+ * the regular quantizer. */
+ if (cpi->sf.improved_quant) vp8_update_zbin_extra(cpi, x);
+ }
+
+ x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame]++;
+
+ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ vp8_encode_intra16x16mbuv(x);
+
+ if (xd->mode_info_context->mbmi.mode == B_PRED) {
+ vp8_encode_intra4x4mby(x);
+ } else {
+ vp8_encode_intra16x16mby(x);
+ }
+
+ sum_intra_stats(cpi, x);
+ } else {
+ int ref_fb_idx;
+
+ if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
+ ref_fb_idx = cpi->common.lst_fb_idx;
+ } else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) {
+ ref_fb_idx = cpi->common.gld_fb_idx;
+ } else {
+ ref_fb_idx = cpi->common.alt_fb_idx;
+ }
+
+ xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
+ xd->pre.u_buffer =
+ cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
+ xd->pre.v_buffer =
+ cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
+
+ if (!x->skip) {
+ vp8_encode_inter16x16(x);
+ } else {
+ vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer,
+ xd->dst.v_buffer, xd->dst.y_stride,
+ xd->dst.uv_stride);
+ }
+ }
+
+ if (!x->skip) {
+ vp8_tokenize_mb(cpi, x, t);
+
+ if (xd->mode_info_context->mbmi.mode != B_PRED) {
+ vp8_inverse_transform_mby(xd);
+ }
+
+ vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
+ xd->dst.u_buffer, xd->dst.v_buffer,
+ xd->dst.uv_stride, xd->eobs + 16);
+ } else {
+ /* always set mb_skip_coeff as it is needed by the loopfilter */
+ xd->mode_info_context->mbmi.mb_skip_coeff = 1;
+
+ if (cpi->common.mb_no_coeff_skip) {
+ x->skip_true_count++;
+ vp8_fix_contexts(xd);
+ } else {
+ vp8_stuff_mb(cpi, x, t);
+ }
+ }
+
+ return rate;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/encodeframe.h b/media/libvpx/libvpx/vp8/encoder/encodeframe.h
new file mode 100644
index 0000000000..cc8cf4d713
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/encodeframe.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VPX_VP8_ENCODER_ENCODEFRAME_H_
+#define VPX_VP8_ENCODER_ENCODEFRAME_H_
+
+#include "vp8/encoder/tokenize.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP8_COMP;
+struct macroblock;
+
+void vp8_activity_masking(struct VP8_COMP *cpi, MACROBLOCK *x);
+
+void vp8_build_block_offsets(struct macroblock *x);
+
+void vp8_setup_block_ptrs(struct macroblock *x);
+
+void vp8_encode_frame(struct VP8_COMP *cpi);
+
+int vp8cx_encode_inter_macroblock(struct VP8_COMP *cpi, struct macroblock *x,
+ TOKENEXTRA **t, int recon_yoffset,
+ int recon_uvoffset, int mb_row, int mb_col);
+
+int vp8cx_encode_intra_macroblock(struct VP8_COMP *cpi, struct macroblock *x,
+ TOKENEXTRA **t);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_ENCODEFRAME_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/encodeintra.c b/media/libvpx/libvpx/vp8/encoder/encodeintra.c
new file mode 100644
index 0000000000..7d448c0ea0
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/encodeintra.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "vp8/encoder/quantize.h"
+#include "vp8/common/reconintra.h"
+#include "vp8/common/reconintra4x4.h"
+#include "encodemb.h"
+#include "vp8/common/invtrans.h"
+#include "encodeintra.h"
+
+int vp8_encode_intra(MACROBLOCK *x, int use_dc_pred) {
+ int i;
+ int intra_pred_var = 0;
+
+ if (use_dc_pred) {
+ x->e_mbd.mode_info_context->mbmi.mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+
+ vp8_encode_intra16x16mby(x);
+
+ vp8_inverse_transform_mby(&x->e_mbd);
+ } else {
+ for (i = 0; i < 16; ++i) {
+ x->e_mbd.block[i].bmi.as_mode = B_DC_PRED;
+ vp8_encode_intra4x4block(x, i);
+ }
+ }
+
+ intra_pred_var = vpx_get_mb_ss(x->src_diff);
+
+ return intra_pred_var;
+}
+
+void vp8_encode_intra4x4block(MACROBLOCK *x, int ib) {
+ BLOCKD *b = &x->e_mbd.block[ib];
+ BLOCK *be = &x->block[ib];
+ int dst_stride = x->e_mbd.dst.y_stride;
+ unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
+ unsigned char *Above = dst - dst_stride;
+ unsigned char *yleft = dst - 1;
+ unsigned char top_left = Above[-1];
+
+ vp8_intra4x4_predict(Above, yleft, dst_stride, b->bmi.as_mode, b->predictor,
+ 16, top_left);
+
+ vp8_subtract_b(be, b, 16);
+
+ x->short_fdct4x4(be->src_diff, be->coeff, 32);
+
+ x->quantize_b(be, b);
+
+ if (*b->eob > 1) {
+ vp8_short_idct4x4llm(b->dqcoeff, b->predictor, 16, dst, dst_stride);
+ } else {
+ vp8_dc_only_idct_add(b->dqcoeff[0], b->predictor, 16, dst, dst_stride);
+ }
+}
+
+void vp8_encode_intra4x4mby(MACROBLOCK *mb) {
+ int i;
+
+ MACROBLOCKD *xd = &mb->e_mbd;
+ intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
+
+ for (i = 0; i < 16; ++i) vp8_encode_intra4x4block(mb, i);
+ return;
+}
+
+void vp8_encode_intra16x16mby(MACROBLOCK *x) {
+ BLOCK *b = &x->block[0];
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
+ xd->dst.y_buffer - 1, xd->dst.y_stride,
+ xd->dst.y_buffer, xd->dst.y_stride);
+
+ vp8_subtract_mby(x->src_diff, *(b->base_src), b->src_stride, xd->dst.y_buffer,
+ xd->dst.y_stride);
+
+ vp8_transform_intra_mby(x);
+
+ vp8_quantize_mby(x);
+
+ if (x->optimize) vp8_optimize_mby(x);
+}
+
+void vp8_encode_intra16x16mbuv(MACROBLOCK *x) {
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ vp8_build_intra_predictors_mbuv_s(xd, xd->dst.u_buffer - xd->dst.uv_stride,
+ xd->dst.v_buffer - xd->dst.uv_stride,
+ xd->dst.u_buffer - 1, xd->dst.v_buffer - 1,
+ xd->dst.uv_stride, xd->dst.u_buffer,
+ xd->dst.v_buffer, xd->dst.uv_stride);
+
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->src.uv_stride, xd->dst.u_buffer, xd->dst.v_buffer,
+ xd->dst.uv_stride);
+
+ vp8_transform_mbuv(x);
+
+ vp8_quantize_mbuv(x);
+
+ if (x->optimize) vp8_optimize_mbuv(x);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/encodeintra.h b/media/libvpx/libvpx/vp8/encoder/encodeintra.h
new file mode 100644
index 0000000000..9a378abf49
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/encodeintra.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_ENCODEINTRA_H_
+#define VPX_VP8_ENCODER_ENCODEINTRA_H_
+#include "onyx_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int vp8_encode_intra(MACROBLOCK *x, int use_dc_pred);
+void vp8_encode_intra16x16mby(MACROBLOCK *x);
+void vp8_encode_intra16x16mbuv(MACROBLOCK *x);
+void vp8_encode_intra4x4mby(MACROBLOCK *mb);
+void vp8_encode_intra4x4block(MACROBLOCK *x, int ib);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_ENCODEINTRA_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/encodemb.c b/media/libvpx/libvpx/vp8/encoder/encodemb.c
new file mode 100644
index 0000000000..3fd8d5fabe
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/encodemb.c
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_dsp_rtcd.h"
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "encodemb.h"
+#include "vp8/common/reconinter.h"
+#include "vp8/encoder/quantize.h"
+#include "tokenize.h"
+#include "vp8/common/invtrans.h"
+#include "vpx_mem/vpx_mem.h"
+#include "rdopt.h"
+
+void vp8_subtract_b(BLOCK *be, BLOCKD *bd, int pitch) {
+ unsigned char *src_ptr = (*(be->base_src) + be->src);
+ short *diff_ptr = be->src_diff;
+ unsigned char *pred_ptr = bd->predictor;
+ int src_stride = be->src_stride;
+
+ vpx_subtract_block(4, 4, diff_ptr, pitch, src_ptr, src_stride, pred_ptr,
+ pitch);
+}
+
+void vp8_subtract_mbuv(short *diff, unsigned char *usrc, unsigned char *vsrc,
+ int src_stride, unsigned char *upred,
+ unsigned char *vpred, int pred_stride) {
+ short *udiff = diff + 256;
+ short *vdiff = diff + 320;
+
+ vpx_subtract_block(8, 8, udiff, 8, usrc, src_stride, upred, pred_stride);
+ vpx_subtract_block(8, 8, vdiff, 8, vsrc, src_stride, vpred, pred_stride);
+}
+
+void vp8_subtract_mby(short *diff, unsigned char *src, int src_stride,
+ unsigned char *pred, int pred_stride) {
+ vpx_subtract_block(16, 16, diff, 16, src, src_stride, pred, pred_stride);
+}
+
+static void vp8_subtract_mb(MACROBLOCK *x) {
+ BLOCK *b = &x->block[0];
+
+ vp8_subtract_mby(x->src_diff, *(b->base_src), b->src_stride,
+ x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->src.uv_stride, x->e_mbd.dst.u_buffer,
+ x->e_mbd.dst.v_buffer, x->e_mbd.dst.uv_stride);
+}
+
+static void build_dcblock(MACROBLOCK *x) {
+ short *src_diff_ptr = &x->src_diff[384];
+ int i;
+
+ for (i = 0; i < 16; ++i) {
+ src_diff_ptr[i] = x->coeff[i * 16];
+ }
+}
+
+void vp8_transform_mbuv(MACROBLOCK *x) {
+ int i;
+
+ for (i = 16; i < 24; i += 2) {
+ x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 16);
+ }
+}
+
+void vp8_transform_intra_mby(MACROBLOCK *x) {
+ int i;
+
+ for (i = 0; i < 16; i += 2) {
+ x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32);
+ }
+
+ /* build dc block from 16 y dc values */
+ build_dcblock(x);
+
+ /* do 2nd order transform on the dc block */
+ x->short_walsh4x4(&x->block[24].src_diff[0], &x->block[24].coeff[0], 8);
+}
+
+static void transform_mb(MACROBLOCK *x) {
+ int i;
+
+ for (i = 0; i < 16; i += 2) {
+ x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32);
+ }
+
+ /* build dc block from 16 y dc values */
+ if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) build_dcblock(x);
+
+ for (i = 16; i < 24; i += 2) {
+ x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 16);
+ }
+
+ /* do 2nd order transform on the dc block */
+ if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
+ x->short_walsh4x4(&x->block[24].src_diff[0], &x->block[24].coeff[0], 8);
+ }
+}
+
+static void transform_mby(MACROBLOCK *x) {
+ int i;
+
+ for (i = 0; i < 16; i += 2) {
+ x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32);
+ }
+
+ /* build dc block from 16 y dc values */
+ if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
+ build_dcblock(x);
+ x->short_walsh4x4(&x->block[24].src_diff[0], &x->block[24].coeff[0], 8);
+ }
+}
+
+#define RDTRUNC(RM, DM, R, D) ((128 + (R) * (RM)) & 0xFF)
+
+typedef struct vp8_token_state vp8_token_state;
+
+struct vp8_token_state {
+ int rate;
+ int error;
+ signed char next;
+ signed char token;
+ short qc;
+};
+
+/* TODO: experiments to find optimal multiple numbers */
+#define Y1_RD_MULT 4
+#define UV_RD_MULT 2
+#define Y2_RD_MULT 16
+
+static const int plane_rd_mult[4] = { Y1_RD_MULT, Y2_RD_MULT, UV_RD_MULT,
+ Y1_RD_MULT };
+
+static void optimize_b(MACROBLOCK *mb, int ib, int type, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l) {
+ BLOCK *b;
+ BLOCKD *d;
+ vp8_token_state tokens[17][2];
+ unsigned best_mask[2];
+ const short *dequant_ptr;
+ const short *coeff_ptr;
+ short *qcoeff_ptr;
+ short *dqcoeff_ptr;
+ int eob;
+ int i0;
+ int rc;
+ int x;
+ int sz = 0;
+ int next;
+ int rdmult;
+ int rddiv;
+ int final_eob;
+ int rd_cost0;
+ int rd_cost1;
+ int rate0;
+ int rate1;
+ int error0;
+ int error1;
+ int t0;
+ int t1;
+ int best;
+ int band;
+ int pt;
+ int i;
+ int err_mult = plane_rd_mult[type];
+
+ b = &mb->block[ib];
+ d = &mb->e_mbd.block[ib];
+
+ dequant_ptr = d->dequant;
+ coeff_ptr = b->coeff;
+ qcoeff_ptr = d->qcoeff;
+ dqcoeff_ptr = d->dqcoeff;
+ i0 = !type;
+ eob = *d->eob;
+
+ /* Now set up a Viterbi trellis to evaluate alternative roundings. */
+ rdmult = mb->rdmult * err_mult;
+ if (mb->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ rdmult = (rdmult * 9) >> 4;
+ }
+
+ rddiv = mb->rddiv;
+ best_mask[0] = best_mask[1] = 0;
+ /* Initialize the sentinel node of the trellis. */
+ tokens[eob][0].rate = 0;
+ tokens[eob][0].error = 0;
+ tokens[eob][0].next = 16;
+ tokens[eob][0].token = DCT_EOB_TOKEN;
+ tokens[eob][0].qc = 0;
+ *(tokens[eob] + 1) = *(tokens[eob] + 0);
+ next = eob;
+ for (i = eob; i-- > i0;) {
+ int base_bits;
+ int d2;
+ int dx;
+
+ rc = vp8_default_zig_zag1d[i];
+ x = qcoeff_ptr[rc];
+ /* Only add a trellis state for non-zero coefficients. */
+ if (x) {
+ int shortcut = 0;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ /* Evaluate the first possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ t0 = (vp8_dct_value_tokens_ptr + x)->Token;
+ /* Consider both possible successor states. */
+ if (next < 16) {
+ band = vp8_coef_bands[i + 1];
+ pt = vp8_prev_token_class[t0];
+ rate0 += mb->token_costs[type][band][pt][tokens[next][0].token];
+ rate1 += mb->token_costs[type][band][pt][tokens[next][1].token];
+ }
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
+ rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
+ }
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp8_dct_value_cost_ptr + x);
+ dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
+ d2 = dx * dx;
+ tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][0].error = d2 + (best ? error1 : error0);
+ tokens[i][0].next = next;
+ tokens[i][0].token = t0;
+ tokens[i][0].qc = x;
+ best_mask[0] |= best << i;
+ /* Evaluate the second possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+
+ if ((abs(x) * dequant_ptr[rc] > abs(coeff_ptr[rc])) &&
+ (abs(x) * dequant_ptr[rc] < abs(coeff_ptr[rc]) + dequant_ptr[rc])) {
+ shortcut = 1;
+ } else {
+ shortcut = 0;
+ }
+
+ if (shortcut) {
+ sz = -(x < 0);
+ x -= 2 * sz + 1;
+ }
+
+ /* Consider both possible successor states. */
+ if (!x) {
+ /* If we reduced this coefficient to zero, check to see if
+ * we need to move the EOB back here.
+ */
+ t0 =
+ tokens[next][0].token == DCT_EOB_TOKEN ? DCT_EOB_TOKEN : ZERO_TOKEN;
+ t1 =
+ tokens[next][1].token == DCT_EOB_TOKEN ? DCT_EOB_TOKEN : ZERO_TOKEN;
+ } else {
+ t0 = t1 = (vp8_dct_value_tokens_ptr + x)->Token;
+ }
+ if (next < 16) {
+ band = vp8_coef_bands[i + 1];
+ if (t0 != DCT_EOB_TOKEN) {
+ pt = vp8_prev_token_class[t0];
+ rate0 += mb->token_costs[type][band][pt][tokens[next][0].token];
+ }
+ if (t1 != DCT_EOB_TOKEN) {
+ pt = vp8_prev_token_class[t1];
+ rate1 += mb->token_costs[type][band][pt][tokens[next][1].token];
+ }
+ }
+
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
+ rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
+ }
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp8_dct_value_cost_ptr + x);
+
+ if (shortcut) {
+ dx -= (dequant_ptr[rc] + sz) ^ sz;
+ d2 = dx * dx;
+ }
+ tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][1].error = d2 + (best ? error1 : error0);
+ tokens[i][1].next = next;
+ tokens[i][1].token = best ? t1 : t0;
+ tokens[i][1].qc = x;
+ best_mask[1] |= best << i;
+ /* Finally, make this the new head of the trellis. */
+ next = i;
+ }
+ /* There's no choice to make for a zero coefficient, so we don't
+ * add a new trellis node, but we do need to update the costs.
+ */
+ else {
+ band = vp8_coef_bands[i + 1];
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ /* Update the cost of each path if we're past the EOB token. */
+ if (t0 != DCT_EOB_TOKEN) {
+ tokens[next][0].rate += mb->token_costs[type][band][0][t0];
+ tokens[next][0].token = ZERO_TOKEN;
+ }
+ if (t1 != DCT_EOB_TOKEN) {
+ tokens[next][1].rate += mb->token_costs[type][band][0][t1];
+ tokens[next][1].token = ZERO_TOKEN;
+ }
+ /* Don't update next, because we didn't add a new node. */
+ }
+ }
+
+ /* Now pick the best path through the whole trellis. */
+ band = vp8_coef_bands[i + 1];
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ rate0 += mb->token_costs[type][band][pt][t0];
+ rate1 += mb->token_costs[type][band][pt][t1];
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
+ rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
+ }
+ best = rd_cost1 < rd_cost0;
+ final_eob = i0 - 1;
+ for (i = next; i < eob; i = next) {
+ x = tokens[i][best].qc;
+ if (x) final_eob = i;
+ rc = vp8_default_zig_zag1d[i];
+ qcoeff_ptr[rc] = x;
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc];
+ next = tokens[i][best].next;
+ best = (best_mask[best] >> i) & 1;
+ }
+ final_eob++;
+
+ *a = *l = (final_eob != !type);
+ *d->eob = (char)final_eob;
+}
+static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l) {
+ int sum = 0;
+ int i;
+ BLOCKD *bd = &x->block[24];
+
+ if (bd->dequant[0] >= 35 && bd->dequant[1] >= 35) return;
+
+ for (i = 0; i < (*bd->eob); ++i) {
+ int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]];
+ sum += (coef >= 0) ? coef : -coef;
+ if (sum >= 35) return;
+ }
+ /**************************************************************************
+ our inverse hadamard transform effectively is weighted sum of all 16 inputs
+ with weight either 1 or -1. It has a last stage scaling of (sum+3)>>3. And
+ dc only idct is (dc+4)>>3. So if all the sums are between -35 and 29, the
+ output after inverse wht and idct will be all zero. A sum of absolute value
+ smaller than 35 guarantees all 16 different (+1/-1) weighted sums in wht
+ fall between -35 and +35.
+ **************************************************************************/
+ if (sum < 35) {
+ for (i = 0; i < (*bd->eob); ++i) {
+ int rc = vp8_default_zig_zag1d[i];
+ bd->qcoeff[rc] = 0;
+ bd->dqcoeff[rc] = 0;
+ }
+ *bd->eob = 0;
+ *a = *l = (*bd->eob != !type);
+ }
+}
+
+static void optimize_mb(MACROBLOCK *x) {
+ int b;
+ int type;
+ int has_2nd_order;
+
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+
+ memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
+ x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
+
+ for (b = 0; b < 16; ++b) {
+ optimize_b(x, b, type, ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ }
+
+ for (b = 16; b < 24; ++b) {
+ optimize_b(x, b, PLANE_TYPE_UV, ta + vp8_block2above[b],
+ tl + vp8_block2left[b]);
+ }
+
+ if (has_2nd_order) {
+ b = 24;
+ optimize_b(x, b, PLANE_TYPE_Y2, ta + vp8_block2above[b],
+ tl + vp8_block2left[b]);
+ check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2, ta + vp8_block2above[b],
+ tl + vp8_block2left[b]);
+ }
+}
+
+void vp8_optimize_mby(MACROBLOCK *x) {
+ int b;
+ int type;
+ int has_2nd_order;
+
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+
+ if (!x->e_mbd.above_context) return;
+
+ if (!x->e_mbd.left_context) return;
+
+ memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
+ x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
+
+ for (b = 0; b < 16; ++b) {
+ optimize_b(x, b, type, ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ }
+
+ if (has_2nd_order) {
+ b = 24;
+ optimize_b(x, b, PLANE_TYPE_Y2, ta + vp8_block2above[b],
+ tl + vp8_block2left[b]);
+ check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2, ta + vp8_block2above[b],
+ tl + vp8_block2left[b]);
+ }
+}
+
+void vp8_optimize_mbuv(MACROBLOCK *x) {
+ int b;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+
+ if (!x->e_mbd.above_context) return;
+
+ if (!x->e_mbd.left_context) return;
+
+ memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ for (b = 16; b < 24; ++b) {
+ optimize_b(x, b, PLANE_TYPE_UV, ta + vp8_block2above[b],
+ tl + vp8_block2left[b]);
+ }
+}
+
+void vp8_encode_inter16x16(MACROBLOCK *x) {
+ vp8_build_inter_predictors_mb(&x->e_mbd);
+
+ vp8_subtract_mb(x);
+
+ transform_mb(x);
+
+ vp8_quantize_mb(x);
+
+ if (x->optimize) optimize_mb(x);
+}
+
+/* this funciton is used by first pass only */
+void vp8_encode_inter16x16y(MACROBLOCK *x) {
+ BLOCK *b = &x->block[0];
+
+ vp8_build_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.dst.y_buffer,
+ x->e_mbd.dst.y_stride);
+
+ vp8_subtract_mby(x->src_diff, *(b->base_src), b->src_stride,
+ x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_stride);
+
+ transform_mby(x);
+
+ vp8_quantize_mby(x);
+
+ vp8_inverse_transform_mby(&x->e_mbd);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/encodemb.h b/media/libvpx/libvpx/vp8/encoder/encodemb.h
new file mode 100644
index 0000000000..db577ddc10
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/encodemb.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_ENCODEMB_H_
+#define VPX_VP8_ENCODER_ENCODEMB_H_
+
+#include "onyx_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+void vp8_encode_inter16x16(MACROBLOCK *x);
+
+void vp8_subtract_b(BLOCK *be, BLOCKD *bd, int pitch);
+void vp8_subtract_mbuv(short *diff, unsigned char *usrc, unsigned char *vsrc,
+ int src_stride, unsigned char *upred,
+ unsigned char *vpred, int pred_stride);
+void vp8_subtract_mby(short *diff, unsigned char *src, int src_stride,
+ unsigned char *pred, int pred_stride);
+
+void vp8_build_dcblock(MACROBLOCK *b);
+void vp8_transform_mb(MACROBLOCK *mb);
+void vp8_transform_mbuv(MACROBLOCK *x);
+void vp8_transform_intra_mby(MACROBLOCK *x);
+
+void vp8_optimize_mby(MACROBLOCK *x);
+void vp8_optimize_mbuv(MACROBLOCK *x);
+void vp8_encode_inter16x16y(MACROBLOCK *x);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_ENCODEMB_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/encodemv.c b/media/libvpx/libvpx/vp8/encoder/encodemv.c
new file mode 100644
index 0000000000..384bb29389
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/encodemv.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/common/common.h"
+#include "encodemv.h"
+#include "vp8/common/entropymode.h"
+#include "vp8/common/systemdependent.h"
+#include "vpx_ports/system_state.h"
+
+#include <math.h>
+
+static void encode_mvcomponent(vp8_writer *const w, const int v,
+ const struct mv_context *mvc) {
+ const vp8_prob *p = mvc->prob;
+ const int x = v < 0 ? -v : v;
+
+ if (x < mvnum_short) { /* Small */
+ vp8_write(w, 0, p[mvpis_short]);
+ vp8_treed_write(w, vp8_small_mvtree, p + MVPshort, x, 3);
+
+ if (!x) return; /* no sign bit */
+ } else { /* Large */
+ int i = 0;
+
+ vp8_write(w, 1, p[mvpis_short]);
+
+ do {
+ vp8_write(w, (x >> i) & 1, p[MVPbits + i]);
+ } while (++i < 3);
+
+ i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
+
+ do {
+ vp8_write(w, (x >> i) & 1, p[MVPbits + i]);
+ } while (--i > 3);
+
+ if (x & 0xFFF0) vp8_write(w, (x >> 3) & 1, p[MVPbits + 3]);
+ }
+
+ vp8_write(w, v < 0, p[MVPsign]);
+}
+#if 0
+static int max_mv_r = 0;
+static int max_mv_c = 0;
+#endif
+void vp8_encode_motion_vector(vp8_writer *w, const MV *mv,
+ const MV_CONTEXT *mvc) {
+#if 0
+ {
+ if (abs(mv->row >> 1) > max_mv_r)
+ {
+ FILE *f = fopen("maxmv.stt", "a");
+ max_mv_r = abs(mv->row >> 1);
+ fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
+
+ if ((abs(mv->row) / 2) != max_mv_r)
+ fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
+
+ fclose(f);
+ }
+
+ if (abs(mv->col >> 1) > max_mv_c)
+ {
+ FILE *f = fopen("maxmv.stt", "a");
+ fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
+ max_mv_c = abs(mv->col >> 1);
+ fclose(f);
+ }
+ }
+#endif
+
+ encode_mvcomponent(w, mv->row >> 1, &mvc[0]);
+ encode_mvcomponent(w, mv->col >> 1, &mvc[1]);
+}
+
+static unsigned int cost_mvcomponent(const int v,
+ const struct mv_context *mvc) {
+ const vp8_prob *p = mvc->prob;
+ const int x = v;
+ unsigned int cost;
+
+ if (x < mvnum_short) {
+ cost = vp8_cost_zero(p[mvpis_short]) +
+ vp8_treed_cost(vp8_small_mvtree, p + MVPshort, x, 3);
+
+ if (!x) return cost;
+ } else {
+ int i = 0;
+ cost = vp8_cost_one(p[mvpis_short]);
+
+ do {
+ cost += vp8_cost_bit(p[MVPbits + i], (x >> i) & 1);
+
+ } while (++i < 3);
+
+ i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
+
+ do {
+ cost += vp8_cost_bit(p[MVPbits + i], (x >> i) & 1);
+
+ } while (--i > 3);
+
+ if (x & 0xFFF0) cost += vp8_cost_bit(p[MVPbits + 3], (x >> 3) & 1);
+ }
+
+ return cost; /* + vp8_cost_bit( p [MVPsign], v < 0); */
+}
+
+void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc,
+ int mvc_flag[2]) {
+ int i = 1;
+ unsigned int cost0 = 0;
+ unsigned int cost1 = 0;
+
+ vpx_clear_system_state();
+
+ i = 1;
+
+ if (mvc_flag[0]) {
+ mvcost[0][0] = cost_mvcomponent(0, &mvc[0]);
+
+ do {
+ cost0 = cost_mvcomponent(i, &mvc[0]);
+
+ mvcost[0][i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]);
+ mvcost[0][-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign]);
+ } while (++i <= mv_max);
+ }
+
+ i = 1;
+
+ if (mvc_flag[1]) {
+ mvcost[1][0] = cost_mvcomponent(0, &mvc[1]);
+
+ do {
+ cost1 = cost_mvcomponent(i, &mvc[1]);
+
+ mvcost[1][i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]);
+ mvcost[1][-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign]);
+ } while (++i <= mv_max);
+ }
+}
+
+/* Motion vector probability table update depends on benefit.
+ * Small correction allows for the fact that an update to an MV probability
+ * may have benefit in subsequent frames as well as the current one.
+ */
+#define MV_PROB_UPDATE_CORRECTION -1
+
+static void calc_prob(vp8_prob *p, const unsigned int ct[2]) {
+ const unsigned int tot = ct[0] + ct[1];
+
+ if (tot) {
+ const vp8_prob x = ((ct[0] * 255) / tot) & ~1u;
+ *p = x ? x : 1;
+ }
+}
+
+static void update(vp8_writer *const w, const unsigned int ct[2],
+ vp8_prob *const cur_p, const vp8_prob new_p,
+ const vp8_prob update_p, int *updated) {
+ const int cur_b = vp8_cost_branch(ct, *cur_p);
+ const int new_b = vp8_cost_branch(ct, new_p);
+ const int cost =
+ 7 + MV_PROB_UPDATE_CORRECTION +
+ ((vp8_cost_one(update_p) - vp8_cost_zero(update_p) + 128) >> 8);
+
+ if (cur_b - new_b > cost) {
+ *cur_p = new_p;
+ vp8_write(w, 1, update_p);
+ vp8_write_literal(w, new_p >> 1, 7);
+ *updated = 1;
+
+ } else
+ vp8_write(w, 0, update_p);
+}
+
+static void write_component_probs(vp8_writer *const w,
+ struct mv_context *cur_mvc,
+ const struct mv_context *default_mvc_,
+ const struct mv_context *update_mvc,
+ const unsigned int events[MVvals],
+ unsigned int rc, int *updated) {
+ vp8_prob *Pcur = cur_mvc->prob;
+ const vp8_prob *default_mvc = default_mvc_->prob;
+ const vp8_prob *Pupdate = update_mvc->prob;
+ unsigned int is_short_ct[2], sign_ct[2];
+
+ unsigned int bit_ct[mvlong_width][2];
+
+ unsigned int short_ct[mvnum_short];
+ unsigned int short_bct[mvnum_short - 1][2];
+
+ vp8_prob Pnew[MVPcount];
+
+ (void)rc;
+ vp8_copy_array(Pnew, default_mvc, MVPcount);
+
+ vp8_zero(is_short_ct);
+ vp8_zero(sign_ct);
+ vp8_zero(bit_ct);
+ vp8_zero(short_ct);
+ vp8_zero(short_bct);
+
+ /* j=0 */
+ {
+ const int c = events[mv_max];
+
+ is_short_ct[0] += c; /* Short vector */
+ short_ct[0] += c; /* Magnitude distribution */
+ }
+
+ /* j: 1 ~ mv_max (1023) */
+ {
+ int j = 1;
+
+ do {
+ const int c1 = events[mv_max + j]; /* positive */
+ const int c2 = events[mv_max - j]; /* negative */
+ const int c = c1 + c2;
+ int a = j;
+
+ sign_ct[0] += c1;
+ sign_ct[1] += c2;
+
+ if (a < mvnum_short) {
+ is_short_ct[0] += c; /* Short vector */
+ short_ct[a] += c; /* Magnitude distribution */
+ } else {
+ int k = mvlong_width - 1;
+ is_short_ct[1] += c; /* Long vector */
+
+ /* bit 3 not always encoded. */
+ do {
+ bit_ct[k][(a >> k) & 1] += c;
+
+ } while (--k >= 0);
+ }
+ } while (++j <= mv_max);
+ }
+
+ calc_prob(Pnew + mvpis_short, is_short_ct);
+
+ calc_prob(Pnew + MVPsign, sign_ct);
+
+ {
+ vp8_prob p[mvnum_short - 1]; /* actually only need branch ct */
+ int j = 0;
+
+ vp8_tree_probs_from_distribution(8, vp8_small_mvencodings, vp8_small_mvtree,
+ p, short_bct, short_ct, 256, 1);
+
+ do {
+ calc_prob(Pnew + MVPshort + j, short_bct[j]);
+
+ } while (++j < mvnum_short - 1);
+ }
+
+ {
+ int j = 0;
+
+ do {
+ calc_prob(Pnew + MVPbits + j, bit_ct[j]);
+
+ } while (++j < mvlong_width);
+ }
+
+ update(w, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short], *Pupdate++,
+ updated);
+
+ update(w, sign_ct, Pcur + MVPsign, Pnew[MVPsign], *Pupdate++, updated);
+
+ {
+ const vp8_prob *const new_p = Pnew + MVPshort;
+ vp8_prob *const cur_p = Pcur + MVPshort;
+
+ int j = 0;
+
+ do {
+ update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
+
+ } while (++j < mvnum_short - 1);
+ }
+
+ {
+ const vp8_prob *const new_p = Pnew + MVPbits;
+ vp8_prob *const cur_p = Pcur + MVPbits;
+
+ int j = 0;
+
+ do {
+ update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
+
+ } while (++j < mvlong_width);
+ }
+}
+
+void vp8_write_mvprobs(VP8_COMP *cpi) {
+ vp8_writer *const w = cpi->bc;
+ MV_CONTEXT *mvc = cpi->common.fc.mvc;
+ int flags[2] = { 0, 0 };
+ write_component_probs(w, &mvc[0], &vp8_default_mv_context[0],
+ &vp8_mv_update_probs[0], cpi->mb.MVcount[0], 0,
+ &flags[0]);
+ write_component_probs(w, &mvc[1], &vp8_default_mv_context[1],
+ &vp8_mv_update_probs[1], cpi->mb.MVcount[1], 1,
+ &flags[1]);
+
+ if (flags[0] || flags[1]) {
+ vp8_build_component_cost_table(
+ cpi->mb.mvcost, (const MV_CONTEXT *)cpi->common.fc.mvc, flags);
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/encodemv.h b/media/libvpx/libvpx/vp8/encoder/encodemv.h
new file mode 100644
index 0000000000..347b9feffe
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/encodemv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_ENCODEMV_H_
+#define VPX_VP8_ENCODER_ENCODEMV_H_
+
+#include "onyx_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_write_mvprobs(VP8_COMP *);
+void vp8_encode_motion_vector(vp8_writer *, const MV *, const MV_CONTEXT *);
+void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc,
+ int mvc_flag[2]);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_ENCODEMV_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/ethreading.c b/media/libvpx/libvpx/vp8/encoder/ethreading.c
new file mode 100644
index 0000000000..2583cb0ac3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/ethreading.c
@@ -0,0 +1,639 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "onyx_int.h"
+#include "vp8/common/threading.h"
+#include "vp8/common/common.h"
+#include "vp8/common/extend.h"
+#include "bitstream.h"
+#include "encodeframe.h"
+#include "ethreading.h"
+
+#if CONFIG_MULTITHREAD
+
+extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x,
+ int ok_to_skip);
+
+static THREAD_FUNCTION thread_loopfilter(void *p_data) {
+ VP8_COMP *cpi = (VP8_COMP *)(((LPFTHREAD_DATA *)p_data)->ptr1);
+ VP8_COMMON *cm = &cpi->common;
+
+ while (1) {
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) == 0) break;
+
+ if (sem_wait(&cpi->h_event_start_lpf) == 0) {
+ /* we're shutting down */
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) == 0) break;
+
+ vp8_loopfilter_frame(cpi, cm);
+
+ sem_post(&cpi->h_event_end_lpf);
+ }
+ }
+
+ return 0;
+}
+
+static THREAD_FUNCTION thread_encoding_proc(void *p_data) {
+ int ithread = ((ENCODETHREAD_DATA *)p_data)->ithread;
+ VP8_COMP *cpi = (VP8_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr1);
+ MB_ROW_COMP *mbri = (MB_ROW_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr2);
+ ENTROPY_CONTEXT_PLANES mb_row_left_context;
+
+ while (1) {
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) == 0) break;
+
+ if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0) {
+ const int nsync = cpi->mt_sync_range;
+ VP8_COMMON *cm = &cpi->common;
+ int mb_row;
+ MACROBLOCK *x = &mbri->mb;
+ MACROBLOCKD *xd = &x->e_mbd;
+ TOKENEXTRA *tp;
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ TOKENEXTRA *tp_start = cpi->tok + (1 + ithread) * (16 * 24);
+ const int num_part = (1 << cm->multi_token_partition);
+#endif
+
+ int *segment_counts = mbri->segment_counts;
+ int *totalrate = &mbri->totalrate;
+
+ /* we're shutting down */
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) == 0) break;
+
+ xd->mode_info_context = cm->mi + cm->mode_info_stride * (ithread + 1);
+ xd->mode_info_stride = cm->mode_info_stride;
+
+ for (mb_row = ithread + 1; mb_row < cm->mb_rows;
+ mb_row += (cpi->encoding_thread_count + 1)) {
+ int recon_yoffset, recon_uvoffset;
+ int mb_col;
+ int ref_fb_idx = cm->lst_fb_idx;
+ int dst_fb_idx = cm->new_fb_idx;
+ int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
+ int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
+ int map_index = (mb_row * cm->mb_cols);
+ const vpx_atomic_int *last_row_current_mb_col;
+ vpx_atomic_int *current_mb_col = &cpi->mt_current_mb_col[mb_row];
+
+#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ vp8_writer *w = &cpi->bc[1 + (mb_row % num_part)];
+#else
+ tp = cpi->tok + (mb_row * (cm->mb_cols * 16 * 24));
+ cpi->tplist[mb_row].start = tp;
+#endif
+
+ last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
+
+ /* reset above block coeffs */
+ xd->above_context = cm->above_context;
+ xd->left_context = &mb_row_left_context;
+
+ vp8_zero(mb_row_left_context);
+
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8);
+
+ /* Set the mb activity pointer to the start of the row. */
+ x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
+
+ /* for each macroblock col in image */
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ if (((mb_col - 1) % nsync) == 0) {
+ vpx_atomic_store_release(current_mb_col, mb_col - 1);
+ }
+
+ if (mb_row && !(mb_col & (nsync - 1))) {
+ vp8_atomic_spin_wait(mb_col, last_row_current_mb_col, nsync);
+ }
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ tp = tp_start;
+#endif
+
+ /* Distance of Mb to the various image edges.
+ * These specified to 8th pel as they are always compared
+ * to values that are in 1/8th pel units
+ */
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
+
+ /* Set up limit values for motion vectors used to prevent
+ * them extending outside the UMV borders
+ */
+ x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_col_max =
+ ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
+ x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_row_max =
+ ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
+
+ xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
+ xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
+ xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
+ xd->left_available = (mb_col != 0);
+
+ x->rddiv = cpi->RDDIV;
+ x->rdmult = cpi->RDMULT;
+
+ /* Copy current mb to a buffer */
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x);
+
+ /* Is segmentation enabled */
+ /* MB level adjustment to quantizer */
+ if (xd->segmentation_enabled) {
+ /* Code to set segment id in xd->mbmi.segment_id for
+ * current MB (with range checking)
+ */
+ if (cpi->segmentation_map[map_index + mb_col] <= 3) {
+ xd->mode_info_context->mbmi.segment_id =
+ cpi->segmentation_map[map_index + mb_col];
+ } else {
+ xd->mode_info_context->mbmi.segment_id = 0;
+ }
+
+ vp8cx_mb_init_quantizer(cpi, x, 1);
+ } else {
+ /* Set to Segment 0 by default */
+ xd->mode_info_context->mbmi.segment_id = 0;
+ }
+
+ x->active_ptr = cpi->active_map + map_index + mb_col;
+
+ if (cm->frame_type == KEY_FRAME) {
+ *totalrate += vp8cx_encode_intra_macroblock(cpi, x, &tp);
+#ifdef MODE_STATS
+ y_modes[xd->mbmi.mode]++;
+#endif
+ } else {
+ *totalrate += vp8cx_encode_inter_macroblock(
+ cpi, x, &tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);
+
+#ifdef MODE_STATS
+ inter_y_modes[xd->mbmi.mode]++;
+
+ if (xd->mbmi.mode == SPLITMV) {
+ int b;
+
+ for (b = 0; b < xd->mbmi.partition_count; ++b) {
+ inter_b_modes[x->partition->bmi[b].mode]++;
+ }
+ }
+
+#endif
+ // Keep track of how many (consecutive) times a block
+ // is coded as ZEROMV_LASTREF, for base layer frames.
+ // Reset to 0 if its coded as anything else.
+ if (cpi->current_layer == 0) {
+ if (xd->mode_info_context->mbmi.mode == ZEROMV &&
+ xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
+ // Increment, check for wrap-around.
+ if (cpi->consec_zero_last[map_index + mb_col] < 255) {
+ cpi->consec_zero_last[map_index + mb_col] += 1;
+ }
+ if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
+ cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
+ }
+ } else {
+ cpi->consec_zero_last[map_index + mb_col] = 0;
+ cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
+ }
+ if (x->zero_last_dot_suppress) {
+ cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
+ }
+ }
+
+ /* Special case code for cyclic refresh
+ * If cyclic update enabled then copy
+ * xd->mbmi.segment_id; (which may have been updated
+ * based on mode during
+ * vp8cx_encode_inter_macroblock()) back into the
+ * global segmentation map
+ */
+ if ((cpi->current_layer == 0) &&
+ (cpi->cyclic_refresh_mode_enabled &&
+ xd->segmentation_enabled)) {
+ const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id;
+
+ /* If the block has been refreshed mark it as clean
+ * (the magnitude of the -ve influences how long it
+ * will be before we consider another refresh):
+ * Else if it was coded (last frame 0,0) and has
+ * not already been refreshed then mark it as a
+ * candidate for cleanup next time (marked 0) else
+ * mark it as dirty (1).
+ */
+ if (mbmi->segment_id) {
+ cpi->cyclic_refresh_map[map_index + mb_col] = -1;
+ } else if ((mbmi->mode == ZEROMV) &&
+ (mbmi->ref_frame == LAST_FRAME)) {
+ if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
+ cpi->cyclic_refresh_map[map_index + mb_col] = 0;
+ }
+ } else {
+ cpi->cyclic_refresh_map[map_index + mb_col] = 1;
+ }
+ }
+ }
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ /* pack tokens for this MB */
+ {
+ int tok_count = tp - tp_start;
+ vp8_pack_tokens(w, tp_start, tok_count);
+ }
+#else
+ cpi->tplist[mb_row].stop = tp;
+#endif
+ /* Increment pointer into gf usage flags structure. */
+ x->gf_active_ptr++;
+
+ /* Increment the activity mask pointers. */
+ x->mb_activity_ptr++;
+
+ /* adjust to the next column of macroblocks */
+ x->src.y_buffer += 16;
+ x->src.u_buffer += 8;
+ x->src.v_buffer += 8;
+
+ recon_yoffset += 16;
+ recon_uvoffset += 8;
+
+ /* Keep track of segment usage */
+ segment_counts[xd->mode_info_context->mbmi.segment_id]++;
+
+ /* skip to next mb */
+ xd->mode_info_context++;
+ x->partition_info++;
+ xd->above_context++;
+ }
+
+ vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16,
+ xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
+
+ vpx_atomic_store_release(current_mb_col, mb_col + nsync);
+
+ /* this is to account for the border */
+ xd->mode_info_context++;
+ x->partition_info++;
+
+ x->src.y_buffer +=
+ 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) -
+ 16 * cm->mb_cols;
+ x->src.u_buffer +=
+ 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
+ 8 * cm->mb_cols;
+ x->src.v_buffer +=
+ 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
+ 8 * cm->mb_cols;
+
+ xd->mode_info_context +=
+ xd->mode_info_stride * cpi->encoding_thread_count;
+ x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
+ x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
+ }
+ /* Signal that this thread has completed processing its rows. */
+ sem_post(&cpi->h_event_end_encoding[ithread]);
+ }
+ }
+
+ /* printf("exit thread %d\n", ithread); */
+ return 0;
+}
+
+static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc) {
+ MACROBLOCK *x = mbsrc;
+ MACROBLOCK *z = mbdst;
+ int i;
+
+ z->ss = x->ss;
+ z->ss_count = x->ss_count;
+ z->searches_per_step = x->searches_per_step;
+ z->errorperbit = x->errorperbit;
+
+ z->sadperbit16 = x->sadperbit16;
+ z->sadperbit4 = x->sadperbit4;
+
+ /*
+ z->mv_col_min = x->mv_col_min;
+ z->mv_col_max = x->mv_col_max;
+ z->mv_row_min = x->mv_row_min;
+ z->mv_row_max = x->mv_row_max;
+ */
+
+ z->short_fdct4x4 = x->short_fdct4x4;
+ z->short_fdct8x4 = x->short_fdct8x4;
+ z->short_walsh4x4 = x->short_walsh4x4;
+ z->quantize_b = x->quantize_b;
+ z->optimize = x->optimize;
+
+ /*
+ z->mvc = x->mvc;
+ z->src.y_buffer = x->src.y_buffer;
+ z->src.u_buffer = x->src.u_buffer;
+ z->src.v_buffer = x->src.v_buffer;
+ */
+
+ z->mvcost[0] = x->mvcost[0];
+ z->mvcost[1] = x->mvcost[1];
+ z->mvsadcost[0] = x->mvsadcost[0];
+ z->mvsadcost[1] = x->mvsadcost[1];
+
+ z->token_costs = x->token_costs;
+ z->inter_bmode_costs = x->inter_bmode_costs;
+ z->mbmode_cost = x->mbmode_cost;
+ z->intra_uv_mode_cost = x->intra_uv_mode_cost;
+ z->bmode_costs = x->bmode_costs;
+
+ for (i = 0; i < 25; ++i) {
+ z->block[i].quant = x->block[i].quant;
+ z->block[i].quant_fast = x->block[i].quant_fast;
+ z->block[i].quant_shift = x->block[i].quant_shift;
+ z->block[i].zbin = x->block[i].zbin;
+ z->block[i].zrun_zbin_boost = x->block[i].zrun_zbin_boost;
+ z->block[i].round = x->block[i].round;
+ z->block[i].src_stride = x->block[i].src_stride;
+ }
+
+ z->q_index = x->q_index;
+ z->act_zbin_adj = x->act_zbin_adj;
+ z->last_act_zbin_adj = x->last_act_zbin_adj;
+
+ {
+ MACROBLOCKD *xd = &x->e_mbd;
+ MACROBLOCKD *zd = &z->e_mbd;
+
+ /*
+ zd->mode_info_context = xd->mode_info_context;
+ zd->mode_info = xd->mode_info;
+
+ zd->mode_info_stride = xd->mode_info_stride;
+ zd->frame_type = xd->frame_type;
+ zd->up_available = xd->up_available ;
+ zd->left_available = xd->left_available;
+ zd->left_context = xd->left_context;
+ zd->last_frame_dc = xd->last_frame_dc;
+ zd->last_frame_dccons = xd->last_frame_dccons;
+ zd->gold_frame_dc = xd->gold_frame_dc;
+ zd->gold_frame_dccons = xd->gold_frame_dccons;
+ zd->mb_to_left_edge = xd->mb_to_left_edge;
+ zd->mb_to_right_edge = xd->mb_to_right_edge;
+ zd->mb_to_top_edge = xd->mb_to_top_edge ;
+ zd->mb_to_bottom_edge = xd->mb_to_bottom_edge;
+ zd->gf_active_ptr = xd->gf_active_ptr;
+ zd->frames_since_golden = xd->frames_since_golden;
+ zd->frames_till_alt_ref_frame = xd->frames_till_alt_ref_frame;
+ */
+ zd->subpixel_predict = xd->subpixel_predict;
+ zd->subpixel_predict8x4 = xd->subpixel_predict8x4;
+ zd->subpixel_predict8x8 = xd->subpixel_predict8x8;
+ zd->subpixel_predict16x16 = xd->subpixel_predict16x16;
+ zd->segmentation_enabled = xd->segmentation_enabled;
+ zd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
+ memcpy(zd->segment_feature_data, xd->segment_feature_data,
+ sizeof(xd->segment_feature_data));
+
+ memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc));
+ memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1));
+ memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2));
+ memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv));
+
+#if 1
+ /*TODO: Remove dequant from BLOCKD. This is a temporary solution until
+ * the quantizer code uses a passed in pointer to the dequant constants.
+ * This will also require modifications to the x86 and neon assembly.
+ * */
+ for (i = 0; i < 16; ++i) zd->block[i].dequant = zd->dequant_y1;
+ for (i = 16; i < 24; ++i) zd->block[i].dequant = zd->dequant_uv;
+ zd->block[24].dequant = zd->dequant_y2;
+#endif
+
+ memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes));
+ memcpy(z->rd_thresh_mult, x->rd_thresh_mult, sizeof(x->rd_thresh_mult));
+
+ z->zbin_over_quant = x->zbin_over_quant;
+ z->zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
+ z->zbin_mode_boost = x->zbin_mode_boost;
+
+ memset(z->error_bins, 0, sizeof(z->error_bins));
+ }
+}
+
+void vp8cx_init_mbrthread_data(VP8_COMP *cpi, MACROBLOCK *x,
+ MB_ROW_COMP *mbr_ei, int count) {
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ MACROBLOCK *mb = &mbr_ei[i].mb;
+ MACROBLOCKD *mbd = &mb->e_mbd;
+
+ mbd->subpixel_predict = xd->subpixel_predict;
+ mbd->subpixel_predict8x4 = xd->subpixel_predict8x4;
+ mbd->subpixel_predict8x8 = xd->subpixel_predict8x8;
+ mbd->subpixel_predict16x16 = xd->subpixel_predict16x16;
+ mb->gf_active_ptr = x->gf_active_ptr;
+
+ memset(mbr_ei[i].segment_counts, 0, sizeof(mbr_ei[i].segment_counts));
+ mbr_ei[i].totalrate = 0;
+
+ mb->partition_info = x->pi + x->e_mbd.mode_info_stride * (i + 1);
+
+ mbd->frame_type = cm->frame_type;
+
+ mb->src = *cpi->Source;
+ mbd->pre = cm->yv12_fb[cm->lst_fb_idx];
+ mbd->dst = cm->yv12_fb[cm->new_fb_idx];
+
+ mb->src.y_buffer += 16 * x->src.y_stride * (i + 1);
+ mb->src.u_buffer += 8 * x->src.uv_stride * (i + 1);
+ mb->src.v_buffer += 8 * x->src.uv_stride * (i + 1);
+
+ vp8_build_block_offsets(mb);
+
+ mbd->left_context = &cm->left_context;
+ mb->mvc = cm->fc.mvc;
+
+ setup_mbby_copy(&mbr_ei[i].mb, x);
+
+ mbd->fullpixel_mask = ~0;
+ if (cm->full_pixel) mbd->fullpixel_mask = ~7;
+
+ vp8_zero(mb->coef_counts);
+ vp8_zero(x->ymode_count);
+ mb->skip_true_count = 0;
+ vp8_zero(mb->MVcount);
+ mb->prediction_error = 0;
+ mb->intra_error = 0;
+ vp8_zero(mb->count_mb_ref_frame_usage);
+ mb->mbs_tested_so_far = 0;
+ mb->mbs_zero_last_dot_suppress = 0;
+ }
+}
+
+int vp8cx_create_encoder_threads(VP8_COMP *cpi) {
+ const VP8_COMMON *cm = &cpi->common;
+
+ vpx_atomic_init(&cpi->b_multi_threaded, 0);
+ cpi->encoding_thread_count = 0;
+ cpi->b_lpf_running = 0;
+
+ if (cm->processor_core_count > 1 && cpi->oxcf.multi_threaded > 1) {
+ int ithread;
+ int th_count = cpi->oxcf.multi_threaded - 1;
+ int rc = 0;
+
+ /* don't allocate more threads than cores available */
+ if (cpi->oxcf.multi_threaded > cm->processor_core_count) {
+ th_count = cm->processor_core_count - 1;
+ }
+
+ /* we have th_count + 1 (main) threads processing one row each */
+ /* no point to have more threads than the sync range allows */
+ if (th_count > ((cm->mb_cols / cpi->mt_sync_range) - 1)) {
+ th_count = (cm->mb_cols / cpi->mt_sync_range) - 1;
+ }
+
+ if (th_count == 0) return 0;
+
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->h_encoding_thread,
+ vpx_malloc(sizeof(pthread_t) * th_count));
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->h_event_start_encoding,
+ vpx_malloc(sizeof(sem_t) * th_count));
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->h_event_end_encoding,
+ vpx_malloc(sizeof(sem_t) * th_count));
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->mb_row_ei,
+ vpx_memalign(32, sizeof(MB_ROW_COMP) * th_count));
+ memset(cpi->mb_row_ei, 0, sizeof(MB_ROW_COMP) * th_count);
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->en_thread_data,
+ vpx_malloc(sizeof(ENCODETHREAD_DATA) * th_count));
+
+ vpx_atomic_store_release(&cpi->b_multi_threaded, 1);
+ cpi->encoding_thread_count = th_count;
+
+ /*
+ printf("[VP8:] multi_threaded encoding is enabled with %d threads\n\n",
+ (cpi->encoding_thread_count +1));
+ */
+
+ for (ithread = 0; ithread < th_count; ++ithread) {
+ ENCODETHREAD_DATA *ethd = &cpi->en_thread_data[ithread];
+
+ /* Setup block ptrs and offsets */
+ vp8_setup_block_ptrs(&cpi->mb_row_ei[ithread].mb);
+ vp8_setup_block_dptrs(&cpi->mb_row_ei[ithread].mb.e_mbd);
+
+ sem_init(&cpi->h_event_start_encoding[ithread], 0, 0);
+ sem_init(&cpi->h_event_end_encoding[ithread], 0, 0);
+
+ ethd->ithread = ithread;
+ ethd->ptr1 = (void *)cpi;
+ ethd->ptr2 = (void *)&cpi->mb_row_ei[ithread];
+
+ rc = pthread_create(&cpi->h_encoding_thread[ithread], 0,
+ thread_encoding_proc, ethd);
+ if (rc) break;
+ }
+
+ if (rc) {
+ /* shutdown other threads */
+ vpx_atomic_store_release(&cpi->b_multi_threaded, 0);
+ for (--ithread; ithread >= 0; ithread--) {
+ pthread_join(cpi->h_encoding_thread[ithread], 0);
+ sem_destroy(&cpi->h_event_start_encoding[ithread]);
+ sem_destroy(&cpi->h_event_end_encoding[ithread]);
+ }
+
+ /* free thread related resources */
+ vpx_free(cpi->h_event_start_encoding);
+ vpx_free(cpi->h_event_end_encoding);
+ vpx_free(cpi->h_encoding_thread);
+ vpx_free(cpi->mb_row_ei);
+ vpx_free(cpi->en_thread_data);
+
+ return -1;
+ }
+
+ {
+ LPFTHREAD_DATA *lpfthd = &cpi->lpf_thread_data;
+
+ sem_init(&cpi->h_event_start_lpf, 0, 0);
+ sem_init(&cpi->h_event_end_lpf, 0, 0);
+
+ lpfthd->ptr1 = (void *)cpi;
+ rc = pthread_create(&cpi->h_filter_thread, 0, thread_loopfilter, lpfthd);
+
+ if (rc) {
+ /* shutdown other threads */
+ vpx_atomic_store_release(&cpi->b_multi_threaded, 0);
+ for (--ithread; ithread >= 0; ithread--) {
+ sem_post(&cpi->h_event_start_encoding[ithread]);
+ sem_post(&cpi->h_event_end_encoding[ithread]);
+ pthread_join(cpi->h_encoding_thread[ithread], 0);
+ sem_destroy(&cpi->h_event_start_encoding[ithread]);
+ sem_destroy(&cpi->h_event_end_encoding[ithread]);
+ }
+ sem_destroy(&cpi->h_event_end_lpf);
+ sem_destroy(&cpi->h_event_start_lpf);
+
+ /* free thread related resources */
+ vpx_free(cpi->h_event_start_encoding);
+ vpx_free(cpi->h_event_end_encoding);
+ vpx_free(cpi->h_encoding_thread);
+ vpx_free(cpi->mb_row_ei);
+ vpx_free(cpi->en_thread_data);
+
+ return -2;
+ }
+ }
+ }
+ return 0;
+}
+
+void vp8cx_remove_encoder_threads(VP8_COMP *cpi) {
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
+ /* shutdown other threads */
+ vpx_atomic_store_release(&cpi->b_multi_threaded, 0);
+ {
+ int i;
+
+ for (i = 0; i < cpi->encoding_thread_count; ++i) {
+ sem_post(&cpi->h_event_start_encoding[i]);
+ sem_post(&cpi->h_event_end_encoding[i]);
+
+ pthread_join(cpi->h_encoding_thread[i], 0);
+
+ sem_destroy(&cpi->h_event_start_encoding[i]);
+ sem_destroy(&cpi->h_event_end_encoding[i]);
+ }
+
+ sem_post(&cpi->h_event_start_lpf);
+ pthread_join(cpi->h_filter_thread, 0);
+ }
+
+ sem_destroy(&cpi->h_event_end_lpf);
+ sem_destroy(&cpi->h_event_start_lpf);
+
+ /* free thread related resources */
+ vpx_free(cpi->h_event_start_encoding);
+ vpx_free(cpi->h_event_end_encoding);
+ vpx_free(cpi->h_encoding_thread);
+ vpx_free(cpi->mb_row_ei);
+ vpx_free(cpi->en_thread_data);
+ }
+}
+#endif
diff --git a/media/libvpx/libvpx/vp8/encoder/ethreading.h b/media/libvpx/libvpx/vp8/encoder/ethreading.h
new file mode 100644
index 0000000000..598fe60559
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/ethreading.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_ETHREADING_H_
+#define VPX_VP8_ENCODER_ETHREADING_H_
+
+#include "vp8/encoder/onyx_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP8_COMP;
+struct macroblock;
+
+void vp8cx_init_mbrthread_data(struct VP8_COMP *cpi, struct macroblock *x,
+ MB_ROW_COMP *mbr_ei, int count);
+int vp8cx_create_encoder_threads(struct VP8_COMP *cpi);
+void vp8cx_remove_encoder_threads(struct VP8_COMP *cpi);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VPX_VP8_ENCODER_ETHREADING_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/firstpass.c b/media/libvpx/libvpx/vp8/encoder/firstpass.c
new file mode 100644
index 0000000000..4149fb4bf8
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/firstpass.c
@@ -0,0 +1,3144 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <limits.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#include "block.h"
+#include "onyx_int.h"
+#include "vpx_dsp/variance.h"
+#include "encodeintra.h"
+#include "vp8/common/common.h"
+#include "vp8/common/setupintrarecon.h"
+#include "vp8/common/systemdependent.h"
+#include "mcomp.h"
+#include "firstpass.h"
+#include "vpx_scale/vpx_scale.h"
+#include "encodemb.h"
+#include "vp8/common/extend.h"
+#include "vpx_ports/system_state.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/swapyv12buffer.h"
+#include "rdopt.h"
+#include "vp8/common/quant_common.h"
+#include "encodemv.h"
+#include "encodeframe.h"
+
+#define OUTPUT_FPF 0
+
+extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi);
+
+#define GFQ_ADJUSTMENT vp8_gf_boost_qadjustment[Q]
+extern int vp8_kf_boost_qadjustment[QINDEX_RANGE];
+
+extern const int vp8_gf_boost_qadjustment[QINDEX_RANGE];
+
+#define IIFACTOR 1.5
+#define IIKFACTOR1 1.40
+#define IIKFACTOR2 1.5
+#define RMAX 14.0
+#define GF_RMAX 48.0
+
+#define KF_MB_INTRA_MIN 300
+#define GF_MB_INTRA_MIN 200
+
+#define DOUBLE_DIVIDE_CHECK(X) ((X) < 0 ? (X)-.000001 : (X) + .000001)
+
+#define POW1 (double)cpi->oxcf.two_pass_vbrbias / 100.0
+#define POW2 (double)cpi->oxcf.two_pass_vbrbias / 100.0
+
+#define NEW_BOOST 1
+
+static int vscale_lookup[7] = { 0, 1, 1, 2, 2, 3, 3 };
+static int hscale_lookup[7] = { 0, 0, 1, 1, 2, 2, 3 };
+
+static const int cq_level[QINDEX_RANGE] = {
+ 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
+ 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
+ 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
+};
+
+static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame);
+
+/* Resets the first pass file to the given position using a relative seek
+ * from the current position
+ */
+static void reset_fpf_position(VP8_COMP *cpi, FIRSTPASS_STATS *Position) {
+ cpi->twopass.stats_in = Position;
+}
+
+static int lookup_next_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame) {
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) return EOF;
+
+ *next_frame = *cpi->twopass.stats_in;
+ return 1;
+}
+
+/* Read frame stats at an offset from the current position */
+static int read_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *frame_stats,
+ int offset) {
+ FIRSTPASS_STATS *fps_ptr = cpi->twopass.stats_in;
+
+ /* Check legality of offset */
+ if (offset >= 0) {
+ if (&fps_ptr[offset] >= cpi->twopass.stats_in_end) return EOF;
+ } else if (offset < 0) {
+ if (&fps_ptr[offset] < cpi->twopass.stats_in_start) return EOF;
+ }
+
+ *frame_stats = fps_ptr[offset];
+ return 1;
+}
+
+static int input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps) {
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) return EOF;
+
+ *fps = *cpi->twopass.stats_in;
+ cpi->twopass.stats_in =
+ (void *)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS));
+ return 1;
+}
+
+static void output_stats(struct vpx_codec_pkt_list *pktlist,
+ FIRSTPASS_STATS *stats) {
+ struct vpx_codec_cx_pkt pkt;
+ pkt.kind = VPX_CODEC_STATS_PKT;
+ pkt.data.twopass_stats.buf = stats;
+ pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
+ vpx_codec_pkt_list_add(pktlist, &pkt);
+
+/* TEMP debug code */
+#if OUTPUT_FPF
+
+ {
+ FILE *fpfile;
+ fpfile = fopen("firstpass.stt", "a");
+
+ fprintf(fpfile,
+ "%12.0f %12.0f %12.0f %12.4f %12.4f %12.4f %12.4f"
+ " %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f"
+ " %12.0f %12.0f %12.4f\n",
+ stats->frame, stats->intra_error, stats->coded_error,
+ stats->ssim_weighted_pred_err, stats->pcnt_inter,
+ stats->pcnt_motion, stats->pcnt_second_ref, stats->pcnt_neutral,
+ stats->MVr, stats->mvr_abs, stats->MVc, stats->mvc_abs, stats->MVrv,
+ stats->MVcv, stats->mv_in_out_count, stats->new_mv_count,
+ stats->count, stats->duration);
+ fclose(fpfile);
+ }
+#endif
+}
+
+static void zero_stats(FIRSTPASS_STATS *section) {
+ section->frame = 0.0;
+ section->intra_error = 0.0;
+ section->coded_error = 0.0;
+ section->ssim_weighted_pred_err = 0.0;
+ section->pcnt_inter = 0.0;
+ section->pcnt_motion = 0.0;
+ section->pcnt_second_ref = 0.0;
+ section->pcnt_neutral = 0.0;
+ section->MVr = 0.0;
+ section->mvr_abs = 0.0;
+ section->MVc = 0.0;
+ section->mvc_abs = 0.0;
+ section->MVrv = 0.0;
+ section->MVcv = 0.0;
+ section->mv_in_out_count = 0.0;
+ section->new_mv_count = 0.0;
+ section->count = 0.0;
+ section->duration = 1.0;
+}
+
+static void accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) {
+ section->frame += frame->frame;
+ section->intra_error += frame->intra_error;
+ section->coded_error += frame->coded_error;
+ section->ssim_weighted_pred_err += frame->ssim_weighted_pred_err;
+ section->pcnt_inter += frame->pcnt_inter;
+ section->pcnt_motion += frame->pcnt_motion;
+ section->pcnt_second_ref += frame->pcnt_second_ref;
+ section->pcnt_neutral += frame->pcnt_neutral;
+ section->MVr += frame->MVr;
+ section->mvr_abs += frame->mvr_abs;
+ section->MVc += frame->MVc;
+ section->mvc_abs += frame->mvc_abs;
+ section->MVrv += frame->MVrv;
+ section->MVcv += frame->MVcv;
+ section->mv_in_out_count += frame->mv_in_out_count;
+ section->new_mv_count += frame->new_mv_count;
+ section->count += frame->count;
+ section->duration += frame->duration;
+}
+
+static void subtract_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) {
+ section->frame -= frame->frame;
+ section->intra_error -= frame->intra_error;
+ section->coded_error -= frame->coded_error;
+ section->ssim_weighted_pred_err -= frame->ssim_weighted_pred_err;
+ section->pcnt_inter -= frame->pcnt_inter;
+ section->pcnt_motion -= frame->pcnt_motion;
+ section->pcnt_second_ref -= frame->pcnt_second_ref;
+ section->pcnt_neutral -= frame->pcnt_neutral;
+ section->MVr -= frame->MVr;
+ section->mvr_abs -= frame->mvr_abs;
+ section->MVc -= frame->MVc;
+ section->mvc_abs -= frame->mvc_abs;
+ section->MVrv -= frame->MVrv;
+ section->MVcv -= frame->MVcv;
+ section->mv_in_out_count -= frame->mv_in_out_count;
+ section->new_mv_count -= frame->new_mv_count;
+ section->count -= frame->count;
+ section->duration -= frame->duration;
+}
+
+static void avg_stats(FIRSTPASS_STATS *section) {
+ if (section->count < 1.0) return;
+
+ section->intra_error /= section->count;
+ section->coded_error /= section->count;
+ section->ssim_weighted_pred_err /= section->count;
+ section->pcnt_inter /= section->count;
+ section->pcnt_second_ref /= section->count;
+ section->pcnt_neutral /= section->count;
+ section->pcnt_motion /= section->count;
+ section->MVr /= section->count;
+ section->mvr_abs /= section->count;
+ section->MVc /= section->count;
+ section->mvc_abs /= section->count;
+ section->MVrv /= section->count;
+ section->MVcv /= section->count;
+ section->mv_in_out_count /= section->count;
+ section->duration /= section->count;
+}
+
+/* Calculate a modified Error used in distributing bits between easier
+ * and harder frames
+ */
+static double calculate_modified_err(VP8_COMP *cpi,
+ FIRSTPASS_STATS *this_frame) {
+ double av_err = (cpi->twopass.total_stats.ssim_weighted_pred_err /
+ cpi->twopass.total_stats.count);
+ double this_err = this_frame->ssim_weighted_pred_err;
+ double modified_err;
+
+ if (this_err > av_err) {
+ modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW1);
+ } else {
+ modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW2);
+ }
+
+ return modified_err;
+}
+
+static const double weight_table[256] = {
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.031250, 0.062500,
+ 0.093750, 0.125000, 0.156250, 0.187500, 0.218750, 0.250000, 0.281250,
+ 0.312500, 0.343750, 0.375000, 0.406250, 0.437500, 0.468750, 0.500000,
+ 0.531250, 0.562500, 0.593750, 0.625000, 0.656250, 0.687500, 0.718750,
+ 0.750000, 0.781250, 0.812500, 0.843750, 0.875000, 0.906250, 0.937500,
+ 0.968750, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000
+};
+
+static double simple_weight(YV12_BUFFER_CONFIG *source) {
+ int i, j;
+
+ unsigned char *src = source->y_buffer;
+ double sum_weights = 0.0;
+
+ /* Loop throught the Y plane raw examining levels and creating a weight
+ * for the image
+ */
+ i = source->y_height;
+ do {
+ j = source->y_width;
+ do {
+ sum_weights += weight_table[*src];
+ src++;
+ } while (--j);
+ src -= source->y_width;
+ src += source->y_stride;
+ } while (--i);
+
+ sum_weights /= (source->y_height * source->y_width);
+
+ return sum_weights;
+}
+
+/* This function returns the current per frame maximum bitrate target */
+static int frame_max_bits(VP8_COMP *cpi) {
+ /* Max allocation for a single frame based on the max section guidelines
+ * passed in and how many bits are left
+ */
+ int max_bits;
+
+ /* For CBR we need to also consider buffer fullness.
+ * If we are running below the optimal level then we need to gradually
+ * tighten up on max_bits.
+ */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ double buffer_fullness_ratio =
+ (double)cpi->buffer_level /
+ DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.optimal_buffer_level);
+
+ /* For CBR base this on the target average bits per frame plus the
+ * maximum sedction rate passed in by the user
+ */
+ max_bits = (int)(cpi->av_per_frame_bandwidth *
+ ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
+
+ /* If our buffer is below the optimum level */
+ if (buffer_fullness_ratio < 1.0) {
+ /* The lower of max_bits / 4 or cpi->av_per_frame_bandwidth / 4. */
+ int min_max_bits = ((cpi->av_per_frame_bandwidth >> 2) < (max_bits >> 2))
+ ? cpi->av_per_frame_bandwidth >> 2
+ : max_bits >> 2;
+
+ max_bits = (int)(max_bits * buffer_fullness_ratio);
+
+ /* Lowest value we will set ... which should allow the buffer to
+ * refill.
+ */
+ if (max_bits < min_max_bits) max_bits = min_max_bits;
+ }
+ }
+ /* VBR */
+ else {
+ /* For VBR base this on the bits and frames left plus the
+ * two_pass_vbrmax_section rate passed in by the user
+ */
+ max_bits = (int)(((double)cpi->twopass.bits_left /
+ (cpi->twopass.total_stats.count -
+ (double)cpi->common.current_video_frame)) *
+ ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
+ }
+
+ /* Trap case where we are out of bits */
+ if (max_bits < 0) max_bits = 0;
+
+ return max_bits;
+}
+
+void vp8_init_first_pass(VP8_COMP *cpi) {
+ zero_stats(&cpi->twopass.total_stats);
+}
+
+void vp8_end_first_pass(VP8_COMP *cpi) {
+ output_stats(cpi->output_pkt_list, &cpi->twopass.total_stats);
+}
+
+static void zz_motion_search(MACROBLOCK *x, YV12_BUFFER_CONFIG *raw_buffer,
+ int *raw_motion_err,
+ YV12_BUFFER_CONFIG *recon_buffer,
+ int *best_motion_err, int recon_yoffset) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
+
+ unsigned char *src_ptr = (*(b->base_src) + b->src);
+ int src_stride = b->src_stride;
+ unsigned char *raw_ptr;
+ int raw_stride = raw_buffer->y_stride;
+ unsigned char *ref_ptr;
+ int ref_stride = x->e_mbd.pre.y_stride;
+
+ /* Set up pointers for this macro block raw buffer */
+ raw_ptr = (unsigned char *)(raw_buffer->y_buffer + recon_yoffset + d->offset);
+ vpx_mse16x16(src_ptr, src_stride, raw_ptr, raw_stride,
+ (unsigned int *)(raw_motion_err));
+
+ /* Set up pointers for this macro block recon buffer */
+ xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
+ ref_ptr = (unsigned char *)(xd->pre.y_buffer + d->offset);
+ vpx_mse16x16(src_ptr, src_stride, ref_ptr, ref_stride,
+ (unsigned int *)(best_motion_err));
+}
+
+static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
+ int_mv *ref_mv, MV *best_mv,
+ YV12_BUFFER_CONFIG *recon_buffer,
+ int *best_motion_err, int recon_yoffset) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
+ int num00;
+
+ int_mv tmp_mv;
+ int_mv ref_mv_full;
+
+ int tmp_err;
+ int step_param = 3; /* Dont search over full range for first pass */
+ int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+ int n;
+ vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
+ int new_mv_mode_penalty = 256;
+
+ /* override the default variance function to use MSE */
+ v_fn_ptr.vf = vpx_mse16x16;
+
+ /* Set up pointers for this macro block recon buffer */
+ xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
+
+ /* Initial step/diamond search centred on best mv */
+ tmp_mv.as_int = 0;
+ ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3;
+ ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3;
+ tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, step_param,
+ x->sadperbit16, &num00, &v_fn_ptr,
+ x->mvcost, ref_mv);
+ if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
+
+ if (tmp_err < *best_motion_err) {
+ *best_motion_err = tmp_err;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
+ }
+
+ /* Further step/diamond searches as necessary */
+ n = num00;
+ num00 = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00) {
+ num00--;
+ } else {
+ tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv,
+ step_param + n, x->sadperbit16, &num00,
+ &v_fn_ptr, x->mvcost, ref_mv);
+ if (tmp_err < INT_MAX - new_mv_mode_penalty) {
+ tmp_err += new_mv_mode_penalty;
+ }
+
+ if (tmp_err < *best_motion_err) {
+ *best_motion_err = tmp_err;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
+ }
+ }
+ }
+}
+
+void vp8_first_pass(VP8_COMP *cpi) {
+ int mb_row, mb_col;
+ MACROBLOCK *const x = &cpi->mb;
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ int recon_yoffset, recon_uvoffset;
+ YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx];
+ YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
+ YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx];
+ int recon_y_stride = lst_yv12->y_stride;
+ int recon_uv_stride = lst_yv12->uv_stride;
+ int64_t intra_error = 0;
+ int64_t coded_error = 0;
+
+ int sum_mvr = 0, sum_mvc = 0;
+ int sum_mvr_abs = 0, sum_mvc_abs = 0;
+ int sum_mvrs = 0, sum_mvcs = 0;
+ int mvcount = 0;
+ int intercount = 0;
+ int second_ref_count = 0;
+ int intrapenalty = 256;
+ int neutral_count = 0;
+ int new_mv_count = 0;
+ int sum_in_vectors = 0;
+ uint32_t lastmv_as_int = 0;
+
+ int_mv zero_ref_mv;
+
+ zero_ref_mv.as_int = 0;
+
+ vpx_clear_system_state();
+
+ x->src = *cpi->Source;
+ xd->pre = *lst_yv12;
+ xd->dst = *new_yv12;
+
+ x->partition_info = x->pi;
+
+ xd->mode_info_context = cm->mi;
+
+ if (!cm->use_bilinear_mc_filter) {
+ xd->subpixel_predict = vp8_sixtap_predict4x4;
+ xd->subpixel_predict8x4 = vp8_sixtap_predict8x4;
+ xd->subpixel_predict8x8 = vp8_sixtap_predict8x8;
+ xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
+ } else {
+ xd->subpixel_predict = vp8_bilinear_predict4x4;
+ xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
+ xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
+ xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
+ }
+
+ vp8_build_block_offsets(x);
+
+ /* set up frame new frame for intra coded blocks */
+ vp8_setup_intra_recon(new_yv12);
+ vp8cx_frame_init_quantizer(cpi);
+
+ /* Initialise the MV cost table to the defaults */
+ {
+ int flag[2] = { 1, 1 };
+ vp8_initialize_rd_consts(cpi, x,
+ vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
+ memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+ vp8_build_component_cost_table(cpi->mb.mvcost,
+ (const MV_CONTEXT *)cm->fc.mvc, flag);
+ }
+
+ /* for each macroblock row in image */
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ int_mv best_ref_mv;
+
+ best_ref_mv.as_int = 0;
+
+ /* reset above block coeffs */
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8);
+
+ /* Set up limit values for motion vectors to prevent them extending
+ * outside the UMV borders
+ */
+ x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_row_max =
+ ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
+
+ /* for each macroblock col in image */
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ int this_error;
+ int gf_motion_error = INT_MAX;
+ int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+
+ xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
+ xd->dst.u_buffer = new_yv12->u_buffer + recon_uvoffset;
+ xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset;
+ xd->left_available = (mb_col != 0);
+
+ /* Copy current mb to a buffer */
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+
+ /* do intra 16x16 prediction */
+ this_error = vp8_encode_intra(x, use_dc_pred);
+
+ /* "intrapenalty" below deals with situations where the intra
+ * and inter error scores are very low (eg a plain black frame)
+ * We do not have special cases in first pass for 0,0 and
+ * nearest etc so all inter modes carry an overhead cost
+ * estimate fot the mv. When the error score is very low this
+ * causes us to pick all or lots of INTRA modes and throw lots
+ * of key frames. This penalty adds a cost matching that of a
+ * 0,0 mv to the intra case.
+ */
+ this_error += intrapenalty;
+
+ /* Cumulative intra error total */
+ intra_error += (int64_t)this_error;
+
+ /* Set up limit values for motion vectors to prevent them
+ * extending outside the UMV borders
+ */
+ x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_col_max =
+ ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
+
+ /* Other than for the first frame do a motion search */
+ if (cm->current_video_frame > 0) {
+ BLOCKD *d = &x->e_mbd.block[0];
+ MV tmp_mv = { 0, 0 };
+ int tmp_err;
+ int motion_error = INT_MAX;
+ int raw_motion_error = INT_MAX;
+
+ /* Simple 0,0 motion with no mv overhead */
+ zz_motion_search(x, cpi->last_frame_unscaled_source, &raw_motion_error,
+ lst_yv12, &motion_error, recon_yoffset);
+ d->bmi.mv.as_mv.row = 0;
+ d->bmi.mv.as_mv.col = 0;
+
+ if (raw_motion_error < cpi->oxcf.encode_breakout) {
+ goto skip_motion_search;
+ }
+
+ /* Test last reference frame using the previous best mv as the
+ * starting point (best reference) for the search
+ */
+ first_pass_motion_search(cpi, x, &best_ref_mv, &d->bmi.mv.as_mv,
+ lst_yv12, &motion_error, recon_yoffset);
+
+ /* If the current best reference mv is not centred on 0,0
+ * then do a 0,0 based search as well
+ */
+ if (best_ref_mv.as_int) {
+ tmp_err = INT_MAX;
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, lst_yv12,
+ &tmp_err, recon_yoffset);
+
+ if (tmp_err < motion_error) {
+ motion_error = tmp_err;
+ d->bmi.mv.as_mv.row = tmp_mv.row;
+ d->bmi.mv.as_mv.col = tmp_mv.col;
+ }
+ }
+
+ /* Experimental search in a second reference frame ((0,0)
+ * based only)
+ */
+ if (cm->current_video_frame > 1) {
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12,
+ &gf_motion_error, recon_yoffset);
+
+ if ((gf_motion_error < motion_error) &&
+ (gf_motion_error < this_error)) {
+ second_ref_count++;
+ }
+
+ /* Reset to last frame as reference buffer */
+ xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset;
+ xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset;
+ xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset;
+ }
+
+ skip_motion_search:
+ /* Intra assumed best */
+ best_ref_mv.as_int = 0;
+
+ if (motion_error <= this_error) {
+ /* Keep a count of cases where the inter and intra were
+ * very close and very low. This helps with scene cut
+ * detection for example in cropped clips with black bars
+ * at the sides or top and bottom.
+ */
+ if ((((this_error - intrapenalty) * 9) <= (motion_error * 10)) &&
+ (this_error < (2 * intrapenalty))) {
+ neutral_count++;
+ }
+
+ d->bmi.mv.as_mv.row *= 8;
+ d->bmi.mv.as_mv.col *= 8;
+ this_error = motion_error;
+ vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv);
+ vp8_encode_inter16x16y(x);
+ sum_mvr += d->bmi.mv.as_mv.row;
+ sum_mvr_abs += abs(d->bmi.mv.as_mv.row);
+ sum_mvc += d->bmi.mv.as_mv.col;
+ sum_mvc_abs += abs(d->bmi.mv.as_mv.col);
+ sum_mvrs += d->bmi.mv.as_mv.row * d->bmi.mv.as_mv.row;
+ sum_mvcs += d->bmi.mv.as_mv.col * d->bmi.mv.as_mv.col;
+ intercount++;
+
+ best_ref_mv.as_int = d->bmi.mv.as_int;
+
+ /* Was the vector non-zero */
+ if (d->bmi.mv.as_int) {
+ mvcount++;
+
+ /* Was it different from the last non zero vector */
+ if (d->bmi.mv.as_int != lastmv_as_int) new_mv_count++;
+ lastmv_as_int = d->bmi.mv.as_int;
+
+ /* Does the Row vector point inwards or outwards */
+ if (mb_row < cm->mb_rows / 2) {
+ if (d->bmi.mv.as_mv.row > 0) {
+ sum_in_vectors--;
+ } else if (d->bmi.mv.as_mv.row < 0) {
+ sum_in_vectors++;
+ }
+ } else if (mb_row > cm->mb_rows / 2) {
+ if (d->bmi.mv.as_mv.row > 0) {
+ sum_in_vectors++;
+ } else if (d->bmi.mv.as_mv.row < 0) {
+ sum_in_vectors--;
+ }
+ }
+
+ /* Does the Row vector point inwards or outwards */
+ if (mb_col < cm->mb_cols / 2) {
+ if (d->bmi.mv.as_mv.col > 0) {
+ sum_in_vectors--;
+ } else if (d->bmi.mv.as_mv.col < 0) {
+ sum_in_vectors++;
+ }
+ } else if (mb_col > cm->mb_cols / 2) {
+ if (d->bmi.mv.as_mv.col > 0) {
+ sum_in_vectors++;
+ } else if (d->bmi.mv.as_mv.col < 0) {
+ sum_in_vectors--;
+ }
+ }
+ }
+ }
+ }
+
+ coded_error += (int64_t)this_error;
+
+ /* adjust to the next column of macroblocks */
+ x->src.y_buffer += 16;
+ x->src.u_buffer += 8;
+ x->src.v_buffer += 8;
+
+ recon_yoffset += 16;
+ recon_uvoffset += 8;
+ }
+
+ /* adjust to the next row of mbs */
+ x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
+ x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+ x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+
+ /* extend the recon for intra prediction */
+ vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8,
+ xd->dst.v_buffer + 8);
+ vpx_clear_system_state();
+ }
+
+ vpx_clear_system_state();
+ {
+ double weight = 0.0;
+
+ FIRSTPASS_STATS fps;
+
+ fps.frame = cm->current_video_frame;
+ fps.intra_error = (double)(intra_error >> 8);
+ fps.coded_error = (double)(coded_error >> 8);
+ weight = simple_weight(cpi->Source);
+
+ if (weight < 0.1) weight = 0.1;
+
+ fps.ssim_weighted_pred_err = fps.coded_error * weight;
+
+ fps.pcnt_inter = 0.0;
+ fps.pcnt_motion = 0.0;
+ fps.MVr = 0.0;
+ fps.mvr_abs = 0.0;
+ fps.MVc = 0.0;
+ fps.mvc_abs = 0.0;
+ fps.MVrv = 0.0;
+ fps.MVcv = 0.0;
+ fps.mv_in_out_count = 0.0;
+ fps.new_mv_count = 0.0;
+ fps.count = 1.0;
+
+ fps.pcnt_inter = 1.0 * (double)intercount / cm->MBs;
+ fps.pcnt_second_ref = 1.0 * (double)second_ref_count / cm->MBs;
+ fps.pcnt_neutral = 1.0 * (double)neutral_count / cm->MBs;
+
+ if (mvcount > 0) {
+ fps.MVr = (double)sum_mvr / (double)mvcount;
+ fps.mvr_abs = (double)sum_mvr_abs / (double)mvcount;
+ fps.MVc = (double)sum_mvc / (double)mvcount;
+ fps.mvc_abs = (double)sum_mvc_abs / (double)mvcount;
+ fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / (double)mvcount)) /
+ (double)mvcount;
+ fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / (double)mvcount)) /
+ (double)mvcount;
+ fps.mv_in_out_count = (double)sum_in_vectors / (double)(mvcount * 2);
+ fps.new_mv_count = new_mv_count;
+
+ fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs;
+ }
+
+ /* TODO: handle the case when duration is set to 0, or something less
+ * than the full time between subsequent cpi->source_time_stamps
+ */
+ fps.duration = (double)(cpi->source->ts_end - cpi->source->ts_start);
+
+ /* don't want to do output stats with a stack variable! */
+ memcpy(&cpi->twopass.this_frame_stats, &fps, sizeof(FIRSTPASS_STATS));
+ output_stats(cpi->output_pkt_list, &cpi->twopass.this_frame_stats);
+ accumulate_stats(&cpi->twopass.total_stats, &fps);
+ }
+
+ /* Copy the previous Last Frame into the GF buffer if specific
+ * conditions for doing so are met
+ */
+ if ((cm->current_video_frame > 0) &&
+ (cpi->twopass.this_frame_stats.pcnt_inter > 0.20) &&
+ ((cpi->twopass.this_frame_stats.intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.this_frame_stats.coded_error)) >
+ 2.0)) {
+ vp8_yv12_copy_frame(lst_yv12, gld_yv12);
+ }
+
+ /* swap frame pointers so last frame refers to the frame we just
+ * compressed
+ */
+ vp8_swap_yv12_buffer(lst_yv12, new_yv12);
+ vp8_yv12_extend_frame_borders(lst_yv12);
+
+ /* Special case for the first frame. Copy into the GF buffer as a
+ * second reference.
+ */
+ if (cm->current_video_frame == 0) {
+ vp8_yv12_copy_frame(lst_yv12, gld_yv12);
+ }
+
+ /* use this to see what the first pass reconstruction looks like */
+ if (0) {
+ char filename[512];
+ FILE *recon_file;
+ sprintf(filename, "enc%04d.yuv", (int)cm->current_video_frame);
+
+ if (cm->current_video_frame == 0) {
+ recon_file = fopen(filename, "wb");
+ } else {
+ recon_file = fopen(filename, "ab");
+ }
+
+ (void)fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file);
+ fclose(recon_file);
+ }
+
+ cm->current_video_frame++;
+}
+extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
+
+/* Estimate a cost per mb attributable to overheads such as the coding of
+ * modes and motion vectors.
+ * Currently simplistic in its assumptions for testing.
+ */
+
+static double bitcost(double prob) {
+ if (prob > 0.000122) {
+ return -log(prob) / log(2.0);
+ } else {
+ return 13.0;
+ }
+}
+static int64_t estimate_modemvcost(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats) {
+ int mv_cost;
+ int64_t mode_cost;
+
+ double av_pct_inter = fpstats->pcnt_inter / fpstats->count;
+ double av_pct_motion = fpstats->pcnt_motion / fpstats->count;
+ double av_intra = (1.0 - av_pct_inter);
+
+ double zz_cost;
+ double motion_cost;
+ double intra_cost;
+
+ zz_cost = bitcost(av_pct_inter - av_pct_motion);
+ motion_cost = bitcost(av_pct_motion);
+ intra_cost = bitcost(av_intra);
+
+ /* Estimate of extra bits per mv overhead for mbs
+ * << 9 is the normalization to the (bits * 512) used in vp8_bits_per_mb
+ */
+ mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9;
+
+ /* Crude estimate of overhead cost from modes
+ * << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb
+ */
+ mode_cost =
+ (int64_t)((((av_pct_inter - av_pct_motion) * zz_cost) +
+ (av_pct_motion * motion_cost) + (av_intra * intra_cost)) *
+ cpi->common.MBs) *
+ 512;
+
+ return mv_cost + mode_cost;
+}
+
+static double calc_correction_factor(double err_per_mb, double err_devisor,
+ double pt_low, double pt_high, int Q) {
+ double power_term;
+ double error_term = err_per_mb / err_devisor;
+ double correction_factor;
+
+ /* Adjustment based on Q to power term. */
+ power_term = pt_low + (Q * 0.01);
+ power_term = (power_term > pt_high) ? pt_high : power_term;
+
+ /* Adjustments to error term */
+ /* TBD */
+
+ /* Calculate correction factor */
+ correction_factor = pow(error_term, power_term);
+
+ /* Clip range */
+ correction_factor = (correction_factor < 0.05) ? 0.05
+ : (correction_factor > 5.0) ? 5.0
+ : correction_factor;
+
+ return correction_factor;
+}
+
+static int estimate_max_q(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats,
+ int section_target_bandwitdh, int overhead_bits) {
+ int Q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double section_err = (fpstats->coded_error / fpstats->count);
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double speed_correction = 1.0;
+ int overhead_bits_per_mb;
+
+ if (section_target_bandwitdh <= 0) {
+ return cpi->twopass.maxq_max_limit; /* Highest value allowed */
+ }
+
+ target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20))
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+ /* Calculate a corrective factor based on a rolling ratio of bits spent
+ * vs target bits
+ */
+ if ((cpi->rolling_target_bits > 0) &&
+ (cpi->active_worst_quality < cpi->worst_quality)) {
+ double rolling_ratio;
+
+ rolling_ratio =
+ (double)cpi->rolling_actual_bits / (double)cpi->rolling_target_bits;
+
+ if (rolling_ratio < 0.95) {
+ cpi->twopass.est_max_qcorrection_factor -= 0.005;
+ } else if (rolling_ratio > 1.05) {
+ cpi->twopass.est_max_qcorrection_factor += 0.005;
+ }
+
+ cpi->twopass.est_max_qcorrection_factor =
+ (cpi->twopass.est_max_qcorrection_factor < 0.1) ? 0.1
+ : (cpi->twopass.est_max_qcorrection_factor > 10.0)
+ ? 10.0
+ : cpi->twopass.est_max_qcorrection_factor;
+ }
+
+ /* Corrections for higher compression speed settings
+ * (reduced compression expected)
+ */
+ if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) {
+ if (cpi->oxcf.cpu_used <= 5) {
+ speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
+ } else {
+ speed_correction = 1.25;
+ }
+ }
+
+ /* Estimate of overhead bits per mb */
+ /* Correction to overhead bits for min allowed Q. */
+ overhead_bits_per_mb = overhead_bits / num_mbs;
+ overhead_bits_per_mb = (int)(overhead_bits_per_mb *
+ pow(0.98, (double)cpi->twopass.maxq_min_limit));
+
+ /* Try and pick a max Q that will be high enough to encode the
+ * content at the given rate.
+ */
+ for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; ++Q) {
+ int bits_per_mb_at_this_q;
+
+ /* Error per MB based correction factor */
+ err_correction_factor =
+ calc_correction_factor(err_per_mb, 150.0, 0.40, 0.90, Q);
+
+ bits_per_mb_at_this_q =
+ vp8_bits_per_mb[INTER_FRAME][Q] + overhead_bits_per_mb;
+
+ bits_per_mb_at_this_q =
+ (int)(.5 + err_correction_factor * speed_correction *
+ cpi->twopass.est_max_qcorrection_factor *
+ cpi->twopass.section_max_qfactor *
+ (double)bits_per_mb_at_this_q);
+
+ /* Mode and motion overhead */
+ /* As Q rises in real encode loop rd code will force overhead down
+ * We make a crude adjustment for this here as *.98 per Q step.
+ */
+ overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98);
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break;
+ }
+
+ /* Restriction on active max q for constrained quality mode. */
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (Q < cpi->cq_target_quality)) {
+ Q = cpi->cq_target_quality;
+ }
+
+ /* Adjust maxq_min_limit and maxq_max_limit limits based on
+ * average q observed in clip for non kf/gf.arf frames
+ * Give average a chance to settle though.
+ */
+ if ((cpi->ni_frames > ((int)cpi->twopass.total_stats.count >> 8)) &&
+ (cpi->ni_frames > 150)) {
+ cpi->twopass.maxq_max_limit = ((cpi->ni_av_qi + 32) < cpi->worst_quality)
+ ? (cpi->ni_av_qi + 32)
+ : cpi->worst_quality;
+ cpi->twopass.maxq_min_limit = ((cpi->ni_av_qi - 32) > cpi->best_quality)
+ ? (cpi->ni_av_qi - 32)
+ : cpi->best_quality;
+ }
+
+ return Q;
+}
+
+/* For cq mode estimate a cq level that matches the observed
+ * complexity and data rate.
+ */
+static int estimate_cq(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats,
+ int section_target_bandwitdh, int overhead_bits) {
+ int Q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double section_err = (fpstats->coded_error / fpstats->count);
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double speed_correction = 1.0;
+ double clip_iiratio;
+ double clip_iifactor;
+ int overhead_bits_per_mb;
+
+ if (0) {
+ FILE *f = fopen("epmp.stt", "a");
+ fprintf(f, "%10.2f\n", err_per_mb);
+ fclose(f);
+ }
+
+ target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20))
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+ /* Estimate of overhead bits per mb */
+ overhead_bits_per_mb = overhead_bits / num_mbs;
+
+ /* Corrections for higher compression speed settings
+ * (reduced compression expected)
+ */
+ if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) {
+ if (cpi->oxcf.cpu_used <= 5) {
+ speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
+ } else {
+ speed_correction = 1.25;
+ }
+ }
+
+ /* II ratio correction factor for clip as a whole */
+ clip_iiratio = cpi->twopass.total_stats.intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats.coded_error);
+ clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025);
+ if (clip_iifactor < 0.80) clip_iifactor = 0.80;
+
+ /* Try and pick a Q that can encode the content at the given rate. */
+ for (Q = 0; Q < MAXQ; ++Q) {
+ int bits_per_mb_at_this_q;
+
+ /* Error per MB based correction factor */
+ err_correction_factor =
+ calc_correction_factor(err_per_mb, 100.0, 0.40, 0.90, Q);
+
+ bits_per_mb_at_this_q =
+ vp8_bits_per_mb[INTER_FRAME][Q] + overhead_bits_per_mb;
+
+ bits_per_mb_at_this_q =
+ (int)(.5 + err_correction_factor * speed_correction * clip_iifactor *
+ (double)bits_per_mb_at_this_q);
+
+ /* Mode and motion overhead */
+ /* As Q rises in real encode loop rd code will force overhead down
+ * We make a crude adjustment for this here as *.98 per Q step.
+ */
+ overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98);
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break;
+ }
+
+ /* Clip value to range "best allowed to (worst allowed - 1)" */
+ Q = cq_level[Q];
+ if (Q >= cpi->worst_quality) Q = cpi->worst_quality - 1;
+ if (Q < cpi->best_quality) Q = cpi->best_quality;
+
+ return Q;
+}
+
+static int estimate_q(VP8_COMP *cpi, double section_err,
+ int section_target_bandwitdh) {
+ int Q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double speed_correction = 1.0;
+
+ target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20))
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+ /* Corrections for higher compression speed settings
+ * (reduced compression expected)
+ */
+ if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) {
+ if (cpi->oxcf.cpu_used <= 5) {
+ speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
+ } else {
+ speed_correction = 1.25;
+ }
+ }
+
+ /* Try and pick a Q that can encode the content at the given rate. */
+ for (Q = 0; Q < MAXQ; ++Q) {
+ int bits_per_mb_at_this_q;
+
+ /* Error per MB based correction factor */
+ err_correction_factor =
+ calc_correction_factor(err_per_mb, 150.0, 0.40, 0.90, Q);
+
+ bits_per_mb_at_this_q =
+ (int)(.5 + (err_correction_factor * speed_correction *
+ cpi->twopass.est_max_qcorrection_factor *
+ (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0));
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break;
+ }
+
+ return Q;
+}
+
+/* Estimate a worst case Q for a KF group */
+static int estimate_kf_group_q(VP8_COMP *cpi, double section_err,
+ int section_target_bandwitdh,
+ double group_iiratio) {
+ int Q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb = (512 * section_target_bandwitdh) / num_mbs;
+ int bits_per_mb_at_this_q;
+
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double speed_correction = 1.0;
+ double current_spend_ratio = 1.0;
+
+ double pow_highq = (POW1 < 0.6) ? POW1 + 0.3 : 0.90;
+ double pow_lowq = (POW1 < 0.7) ? POW1 + 0.1 : 0.80;
+
+ double iiratio_correction_factor = 1.0;
+
+ double combined_correction_factor;
+
+ /* Trap special case where the target is <= 0 */
+ if (target_norm_bits_per_mb <= 0) return MAXQ * 2;
+
+ /* Calculate a corrective factor based on a rolling ratio of bits spent
+ * vs target bits
+ * This is clamped to the range 0.1 to 10.0
+ */
+ if (cpi->long_rolling_target_bits <= 0) {
+ current_spend_ratio = 10.0;
+ } else {
+ current_spend_ratio = (double)cpi->long_rolling_actual_bits /
+ (double)cpi->long_rolling_target_bits;
+ current_spend_ratio = (current_spend_ratio > 10.0) ? 10.0
+ : (current_spend_ratio < 0.1) ? 0.1
+ : current_spend_ratio;
+ }
+
+ /* Calculate a correction factor based on the quality of prediction in
+ * the sequence as indicated by intra_inter error score ratio (IIRatio)
+ * The idea here is to favour subsampling in the hardest sections vs
+ * the easyest.
+ */
+ iiratio_correction_factor = 1.0 - ((group_iiratio - 6.0) * 0.1);
+
+ if (iiratio_correction_factor < 0.5) iiratio_correction_factor = 0.5;
+
+ /* Corrections for higher compression speed settings
+ * (reduced compression expected)
+ */
+ if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) {
+ if (cpi->oxcf.cpu_used <= 5) {
+ speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
+ } else {
+ speed_correction = 1.25;
+ }
+ }
+
+ /* Combine the various factors calculated above */
+ combined_correction_factor =
+ speed_correction * iiratio_correction_factor * current_spend_ratio;
+
+ /* Try and pick a Q that should be high enough to encode the content at
+ * the given rate.
+ */
+ for (Q = 0; Q < MAXQ; ++Q) {
+ /* Error per MB based correction factor */
+ err_correction_factor =
+ calc_correction_factor(err_per_mb, 150.0, pow_lowq, pow_highq, Q);
+
+ bits_per_mb_at_this_q =
+ (int)(.5 + (err_correction_factor * combined_correction_factor *
+ (double)vp8_bits_per_mb[INTER_FRAME][Q]));
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break;
+ }
+
+ /* If we could not hit the target even at Max Q then estimate what Q
+ * would have been required
+ */
+ while ((bits_per_mb_at_this_q > target_norm_bits_per_mb) &&
+ (Q < (MAXQ * 2))) {
+ bits_per_mb_at_this_q = (int)(0.96 * bits_per_mb_at_this_q);
+ Q++;
+ }
+
+ if (0) {
+ FILE *f = fopen("estkf_q.stt", "a");
+ fprintf(f, "%8d %8d %8d %8.2f %8.3f %8.2f %8.3f %8.3f %8.3f %8d\n",
+ cpi->common.current_video_frame, bits_per_mb_at_this_q,
+ target_norm_bits_per_mb, err_per_mb, err_correction_factor,
+ current_spend_ratio, group_iiratio, iiratio_correction_factor,
+ (double)cpi->buffer_level / (double)cpi->oxcf.optimal_buffer_level,
+ Q);
+ fclose(f);
+ }
+
+ return Q;
+}
+
+void vp8_init_second_pass(VP8_COMP *cpi) {
+ FIRSTPASS_STATS this_frame;
+ FIRSTPASS_STATS *start_pos;
+
+ double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth *
+ cpi->oxcf.two_pass_vbrmin_section / 100);
+
+ zero_stats(&cpi->twopass.total_stats);
+ zero_stats(&cpi->twopass.total_left_stats);
+
+ if (!cpi->twopass.stats_in_end) return;
+
+ cpi->twopass.total_stats = *cpi->twopass.stats_in_end;
+ cpi->twopass.total_left_stats = cpi->twopass.total_stats;
+
+ /* each frame can have a different duration, as the frame rate in the
+ * source isn't guaranteed to be constant. The frame rate prior to
+ * the first frame encoded in the second pass is a guess. However the
+ * sum duration is not. Its calculated based on the actual durations of
+ * all frames from the first pass.
+ */
+ vp8_new_framerate(cpi, 10000000.0 * cpi->twopass.total_stats.count /
+ cpi->twopass.total_stats.duration);
+
+ cpi->output_framerate = cpi->framerate;
+ cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats.duration *
+ cpi->oxcf.target_bandwidth / 10000000.0);
+ cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats.duration *
+ two_pass_min_rate / 10000000.0);
+
+ /* Calculate a minimum intra value to be used in determining the IIratio
+ * scores used in the second pass. We have this minimum to make sure
+ * that clips that are static but "low complexity" in the intra domain
+ * are still boosted appropriately for KF/GF/ARF
+ */
+ cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
+ cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
+
+ /* Scan the first pass file and calculate an average Intra / Inter error
+ * score ratio for the sequence
+ */
+ {
+ double sum_iiratio = 0.0;
+ double IIRatio;
+
+ start_pos = cpi->twopass.stats_in; /* Note starting "file" position */
+
+ while (input_stats(cpi, &this_frame) != EOF) {
+ IIRatio =
+ this_frame.intra_error / DOUBLE_DIVIDE_CHECK(this_frame.coded_error);
+ IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio;
+ sum_iiratio += IIRatio;
+ }
+
+ cpi->twopass.avg_iiratio =
+ sum_iiratio /
+ DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats.count);
+
+ /* Reset file position */
+ reset_fpf_position(cpi, start_pos);
+ }
+
+ /* Scan the first pass file and calculate a modified total error based
+ * upon the bias/power function used to allocate bits
+ */
+ {
+ start_pos = cpi->twopass.stats_in; /* Note starting "file" position */
+
+ cpi->twopass.modified_error_total = 0.0;
+ cpi->twopass.modified_error_used = 0.0;
+
+ while (input_stats(cpi, &this_frame) != EOF) {
+ cpi->twopass.modified_error_total +=
+ calculate_modified_err(cpi, &this_frame);
+ }
+ cpi->twopass.modified_error_left = cpi->twopass.modified_error_total;
+
+ reset_fpf_position(cpi, start_pos); /* Reset file position */
+ }
+}
+
+void vp8_end_second_pass(VP8_COMP *cpi) { (void)cpi; }
+
+/* This function gives and estimate of how badly we believe the prediction
+ * quality is decaying from frame to frame.
+ */
+static double get_prediction_decay_rate(FIRSTPASS_STATS *next_frame) {
+ double prediction_decay_rate;
+ double motion_decay;
+ double motion_pct = next_frame->pcnt_motion;
+
+ /* Initial basis is the % mbs inter coded */
+ prediction_decay_rate = next_frame->pcnt_inter;
+
+ /* High % motion -> somewhat higher decay rate */
+ motion_decay = (1.0 - (motion_pct / 20.0));
+ if (motion_decay < prediction_decay_rate) {
+ prediction_decay_rate = motion_decay;
+ }
+
+ /* Adjustment to decay rate based on speed of motion */
+ {
+ double this_mv_rabs;
+ double this_mv_cabs;
+ double distance_factor;
+
+ this_mv_rabs = fabs(next_frame->mvr_abs * motion_pct);
+ this_mv_cabs = fabs(next_frame->mvc_abs * motion_pct);
+
+ distance_factor =
+ sqrt((this_mv_rabs * this_mv_rabs) + (this_mv_cabs * this_mv_cabs)) /
+ 250.0;
+ distance_factor = ((distance_factor > 1.0) ? 0.0 : (1.0 - distance_factor));
+ if (distance_factor < prediction_decay_rate) {
+ prediction_decay_rate = distance_factor;
+ }
+ }
+
+ return prediction_decay_rate;
+}
+
+/* Function to test for a condition where a complex transition is followed
+ * by a static section. For example in slide shows where there is a fade
+ * between slides. This is to help with more optimal kf and gf positioning.
+ */
+static int detect_transition_to_still(VP8_COMP *cpi, int frame_interval,
+ int still_interval,
+ double loop_decay_rate,
+ double decay_accumulator) {
+ int trans_to_still = 0;
+
+ /* Break clause to detect very still sections after motion
+ * For example a static image after a fade or other transition
+ * instead of a clean scene cut.
+ */
+ if ((frame_interval > MIN_GF_INTERVAL) && (loop_decay_rate >= 0.999) &&
+ (decay_accumulator < 0.9)) {
+ int j;
+ FIRSTPASS_STATS *position = cpi->twopass.stats_in;
+ FIRSTPASS_STATS tmp_next_frame;
+ double decay_rate;
+
+ /* Look ahead a few frames to see if static condition persists... */
+ for (j = 0; j < still_interval; ++j) {
+ if (EOF == input_stats(cpi, &tmp_next_frame)) break;
+
+ decay_rate = get_prediction_decay_rate(&tmp_next_frame);
+ if (decay_rate < 0.999) break;
+ }
+ /* Reset file position */
+ reset_fpf_position(cpi, position);
+
+ /* Only if it does do we signal a transition to still */
+ if (j == still_interval) trans_to_still = 1;
+ }
+
+ return trans_to_still;
+}
+
+/* This function detects a flash through the high relative pcnt_second_ref
+ * score in the frame following a flash frame. The offset passed in should
+ * reflect this
+ */
+static int detect_flash(VP8_COMP *cpi, int offset) {
+ FIRSTPASS_STATS next_frame;
+
+ int flash_detected = 0;
+
+ /* Read the frame data. */
+ /* The return is 0 (no flash detected) if not a valid frame */
+ if (read_frame_stats(cpi, &next_frame, offset) != EOF) {
+ /* What we are looking for here is a situation where there is a
+ * brief break in prediction (such as a flash) but subsequent frames
+ * are reasonably well predicted by an earlier (pre flash) frame.
+ * The recovery after a flash is indicated by a high pcnt_second_ref
+ * comapred to pcnt_inter.
+ */
+ if ((next_frame.pcnt_second_ref > next_frame.pcnt_inter) &&
+ (next_frame.pcnt_second_ref >= 0.5)) {
+ flash_detected = 1;
+
+ /*if (1)
+ {
+ FILE *f = fopen("flash.stt", "a");
+ fprintf(f, "%8.0f %6.2f %6.2f\n",
+ next_frame.frame,
+ next_frame.pcnt_inter,
+ next_frame.pcnt_second_ref);
+ fclose(f);
+ }*/
+ }
+ }
+
+ return flash_detected;
+}
+
+/* Update the motion related elements to the GF arf boost calculation */
+static void accumulate_frame_motion_stats(FIRSTPASS_STATS *this_frame,
+ double *this_frame_mv_in_out,
+ double *mv_in_out_accumulator,
+ double *abs_mv_in_out_accumulator,
+ double *mv_ratio_accumulator) {
+ double this_frame_mvr_ratio;
+ double this_frame_mvc_ratio;
+ double motion_pct;
+
+ /* Accumulate motion stats. */
+ motion_pct = this_frame->pcnt_motion;
+
+ /* Accumulate Motion In/Out of frame stats */
+ *this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct;
+ *mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct;
+ *abs_mv_in_out_accumulator += fabs(this_frame->mv_in_out_count * motion_pct);
+
+ /* Accumulate a measure of how uniform (or conversely how random)
+ * the motion field is. (A ratio of absmv / mv)
+ */
+ if (motion_pct > 0.05) {
+ this_frame_mvr_ratio =
+ fabs(this_frame->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVr));
+
+ this_frame_mvc_ratio =
+ fabs(this_frame->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVc));
+
+ *mv_ratio_accumulator += (this_frame_mvr_ratio < this_frame->mvr_abs)
+ ? (this_frame_mvr_ratio * motion_pct)
+ : this_frame->mvr_abs * motion_pct;
+
+ *mv_ratio_accumulator += (this_frame_mvc_ratio < this_frame->mvc_abs)
+ ? (this_frame_mvc_ratio * motion_pct)
+ : this_frame->mvc_abs * motion_pct;
+ }
+}
+
+/* Calculate a baseline boost number for the current frame. */
+static double calc_frame_boost(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame,
+ double this_frame_mv_in_out) {
+ double frame_boost;
+
+ /* Underlying boost factor is based on inter intra error ratio */
+ if (this_frame->intra_error > cpi->twopass.gf_intra_err_min) {
+ frame_boost = (IIFACTOR * this_frame->intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
+ } else {
+ frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
+ }
+
+ /* Increase boost for frames where new data coming into frame
+ * (eg zoom out). Slightly reduce boost if there is a net balance
+ * of motion out of the frame (zoom in).
+ * The range for this_frame_mv_in_out is -1.0 to +1.0
+ */
+ if (this_frame_mv_in_out > 0.0) {
+ frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
+ /* In extreme case boost is halved */
+ } else {
+ frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
+ }
+
+ /* Clip to maximum */
+ if (frame_boost > GF_RMAX) frame_boost = GF_RMAX;
+
+ return frame_boost;
+}
+
+#if NEW_BOOST
+static int calc_arf_boost(VP8_COMP *cpi, int offset, int f_frames, int b_frames,
+ int *f_boost, int *b_boost) {
+ FIRSTPASS_STATS this_frame;
+
+ int i;
+ double boost_score = 0.0;
+ double mv_ratio_accumulator = 0.0;
+ double decay_accumulator = 1.0;
+ double this_frame_mv_in_out = 0.0;
+ double mv_in_out_accumulator = 0.0;
+ double abs_mv_in_out_accumulator = 0.0;
+ double r;
+ int flash_detected = 0;
+
+ /* Search forward from the proposed arf/next gf position */
+ for (i = 0; i < f_frames; ++i) {
+ if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF) break;
+
+ /* Update the motion related elements to the boost calculation */
+ accumulate_frame_motion_stats(
+ &this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ /* Calculate the baseline boost number for this frame */
+ r = calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out);
+
+ /* We want to discount the the flash frame itself and the recovery
+ * frame that follows as both will have poor scores.
+ */
+ flash_detected =
+ detect_flash(cpi, (i + offset)) || detect_flash(cpi, (i + offset + 1));
+
+ /* Cumulative effect of prediction quality decay */
+ if (!flash_detected) {
+ decay_accumulator =
+ decay_accumulator * get_prediction_decay_rate(&this_frame);
+ decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
+ }
+ boost_score += (decay_accumulator * r);
+
+ /* Break out conditions. */
+ if ((!flash_detected) &&
+ ((mv_ratio_accumulator > 100.0) || (abs_mv_in_out_accumulator > 3.0) ||
+ (mv_in_out_accumulator < -2.0))) {
+ break;
+ }
+ }
+
+ *f_boost = (int)(boost_score * 100.0) >> 4;
+
+ /* Reset for backward looking loop */
+ boost_score = 0.0;
+ mv_ratio_accumulator = 0.0;
+ decay_accumulator = 1.0;
+ this_frame_mv_in_out = 0.0;
+ mv_in_out_accumulator = 0.0;
+ abs_mv_in_out_accumulator = 0.0;
+
+ /* Search forward from the proposed arf/next gf position */
+ for (i = -1; i >= -b_frames; i--) {
+ if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF) break;
+
+ /* Update the motion related elements to the boost calculation */
+ accumulate_frame_motion_stats(
+ &this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ /* Calculate the baseline boost number for this frame */
+ r = calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out);
+
+ /* We want to discount the the flash frame itself and the recovery
+ * frame that follows as both will have poor scores.
+ */
+ flash_detected =
+ detect_flash(cpi, (i + offset)) || detect_flash(cpi, (i + offset + 1));
+
+ /* Cumulative effect of prediction quality decay */
+ if (!flash_detected) {
+ decay_accumulator =
+ decay_accumulator * get_prediction_decay_rate(&this_frame);
+ decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
+ }
+
+ boost_score += (decay_accumulator * r);
+
+ /* Break out conditions. */
+ if ((!flash_detected) &&
+ ((mv_ratio_accumulator > 100.0) || (abs_mv_in_out_accumulator > 3.0) ||
+ (mv_in_out_accumulator < -2.0))) {
+ break;
+ }
+ }
+ *b_boost = (int)(boost_score * 100.0) >> 4;
+
+ return (*f_boost + *b_boost);
+}
+#endif
+
+/* Analyse and define a gf/arf group . */
+static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ FIRSTPASS_STATS next_frame;
+ FIRSTPASS_STATS *start_pos;
+ int i;
+ double r;
+ double boost_score = 0.0;
+ double old_boost_score = 0.0;
+ double gf_group_err = 0.0;
+ double gf_first_frame_err = 0.0;
+ double mod_frame_err = 0.0;
+
+ double mv_ratio_accumulator = 0.0;
+ double decay_accumulator = 1.0;
+
+ double loop_decay_rate = 1.00; /* Starting decay rate */
+
+ double this_frame_mv_in_out = 0.0;
+ double mv_in_out_accumulator = 0.0;
+ double abs_mv_in_out_accumulator = 0.0;
+
+ int max_bits = frame_max_bits(cpi); /* Max for a single frame */
+
+ unsigned int allow_alt_ref =
+ cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames;
+
+ int alt_boost = 0;
+ int f_boost = 0;
+ int b_boost = 0;
+ int flash_detected;
+
+ cpi->twopass.gf_group_bits = 0;
+ cpi->twopass.gf_decay_rate = 0;
+
+ vpx_clear_system_state();
+
+ start_pos = cpi->twopass.stats_in;
+
+ memset(&next_frame, 0, sizeof(next_frame)); /* assure clean */
+
+ /* Load stats for the current frame. */
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+
+ /* Note the error of the frame at the start of the group (this will be
+ * the GF frame error if we code a normal gf
+ */
+ gf_first_frame_err = mod_frame_err;
+
+ /* Special treatment if the current frame is a key frame (which is also
+ * a gf). If it is then its error score (and hence bit allocation) need
+ * to be subtracted out from the calculation for the GF group
+ */
+ if (cpi->common.frame_type == KEY_FRAME) gf_group_err -= gf_first_frame_err;
+
+ /* Scan forward to try and work out how many frames the next gf group
+ * should contain and what level of boost is appropriate for the GF
+ * or ARF that will be coded with the group
+ */
+ i = 0;
+
+ while (((i < cpi->twopass.static_scene_max_gf_interval) ||
+ ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
+ (i < cpi->twopass.frames_to_key)) {
+ i++;
+
+ /* Accumulate error score of frames in this gf group */
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+
+ gf_group_err += mod_frame_err;
+
+ if (EOF == input_stats(cpi, &next_frame)) break;
+
+ /* Test for the case where there is a brief flash but the prediction
+ * quality back to an earlier frame is then restored.
+ */
+ flash_detected = detect_flash(cpi, 0);
+
+ /* Update the motion related elements to the boost calculation */
+ accumulate_frame_motion_stats(
+ &next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ /* Calculate a baseline boost number for this frame */
+ r = calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out);
+
+ /* Cumulative effect of prediction quality decay */
+ if (!flash_detected) {
+ loop_decay_rate = get_prediction_decay_rate(&next_frame);
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+ decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
+ }
+ boost_score += (decay_accumulator * r);
+
+ /* Break clause to detect very still sections after motion
+ * For example a staic image after a fade or other transition.
+ */
+ if (detect_transition_to_still(cpi, i, 5, loop_decay_rate,
+ decay_accumulator)) {
+ allow_alt_ref = 0;
+ boost_score = old_boost_score;
+ break;
+ }
+
+ /* Break out conditions. */
+ if (
+ /* Break at cpi->max_gf_interval unless almost totally static */
+ (i >= cpi->max_gf_interval && (decay_accumulator < 0.995)) ||
+ (
+ /* Dont break out with a very short interval */
+ (i > MIN_GF_INTERVAL) &&
+ /* Dont break out very close to a key frame */
+ ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
+ ((boost_score > 20.0) || (next_frame.pcnt_inter < 0.75)) &&
+ (!flash_detected) &&
+ ((mv_ratio_accumulator > 100.0) ||
+ (abs_mv_in_out_accumulator > 3.0) ||
+ (mv_in_out_accumulator < -2.0) ||
+ ((boost_score - old_boost_score) < 2.0)))) {
+ boost_score = old_boost_score;
+ break;
+ }
+
+ memcpy(this_frame, &next_frame, sizeof(*this_frame));
+
+ old_boost_score = boost_score;
+ }
+
+ cpi->twopass.gf_decay_rate =
+ (i > 0) ? (int)(100.0 * (1.0 - decay_accumulator)) / i : 0;
+
+ /* When using CBR apply additional buffer related upper limits */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ double max_boost;
+
+ /* For cbr apply buffer related limits */
+ if (cpi->drop_frames_allowed) {
+ int64_t df_buffer_level = cpi->oxcf.drop_frames_water_mark *
+ (cpi->oxcf.optimal_buffer_level / 100);
+
+ if (cpi->buffer_level > df_buffer_level) {
+ max_boost =
+ ((double)((cpi->buffer_level - df_buffer_level) * 2 / 3) * 16.0) /
+ DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth);
+ } else {
+ max_boost = 0.0;
+ }
+ } else if (cpi->buffer_level > 0) {
+ max_boost = ((double)(cpi->buffer_level * 2 / 3) * 16.0) /
+ DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth);
+ } else {
+ max_boost = 0.0;
+ }
+
+ if (boost_score > max_boost) boost_score = max_boost;
+ }
+
+ /* Dont allow conventional gf too near the next kf */
+ if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) {
+ while (i < cpi->twopass.frames_to_key) {
+ i++;
+
+ if (EOF == input_stats(cpi, this_frame)) break;
+
+ if (i < cpi->twopass.frames_to_key) {
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+ gf_group_err += mod_frame_err;
+ }
+ }
+ }
+
+ cpi->gfu_boost = (int)(boost_score * 100.0) >> 4;
+
+#if NEW_BOOST
+ /* Alterrnative boost calculation for alt ref */
+ alt_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
+#endif
+
+ /* Should we use the alternate refernce frame */
+ if (allow_alt_ref && (i >= MIN_GF_INTERVAL) &&
+ /* dont use ARF very near next kf */
+ (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
+#if NEW_BOOST
+ ((next_frame.pcnt_inter > 0.75) || (next_frame.pcnt_second_ref > 0.5)) &&
+ ((mv_in_out_accumulator / (double)i > -0.2) ||
+ (mv_in_out_accumulator > -2.0)) &&
+ (b_boost > 100) && (f_boost > 100))
+#else
+ (next_frame.pcnt_inter > 0.75) &&
+ ((mv_in_out_accumulator / (double)i > -0.2) ||
+ (mv_in_out_accumulator > -2.0)) &&
+ (cpi->gfu_boost > 100) &&
+ (cpi->twopass.gf_decay_rate <=
+ (ARF_DECAY_THRESH + (cpi->gfu_boost / 200))))
+#endif
+ {
+ int Boost;
+ int allocation_chunks;
+ int Q =
+ (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
+ int tmp_q;
+ int arf_frame_bits = 0;
+ int group_bits;
+
+#if NEW_BOOST
+ cpi->gfu_boost = alt_boost;
+#endif
+
+ /* Estimate the bits to be allocated to the group as a whole */
+ if ((cpi->twopass.kf_group_bits > 0) &&
+ (cpi->twopass.kf_group_error_left > 0)) {
+ group_bits =
+ (int)((double)cpi->twopass.kf_group_bits *
+ (gf_group_err / (double)cpi->twopass.kf_group_error_left));
+ } else {
+ group_bits = 0;
+ }
+
+/* Boost for arf frame */
+#if NEW_BOOST
+ Boost = (alt_boost * GFQ_ADJUSTMENT) / 100;
+#else
+ Boost = (cpi->gfu_boost * 3 * GFQ_ADJUSTMENT) / (2 * 100);
+#endif
+ Boost += (i * 50);
+
+ /* Set max and minimum boost and hence minimum allocation */
+ if (Boost > ((cpi->baseline_gf_interval + 1) * 200)) {
+ Boost = ((cpi->baseline_gf_interval + 1) * 200);
+ } else if (Boost < 125) {
+ Boost = 125;
+ }
+
+ allocation_chunks = (i * 100) + Boost;
+
+ /* Normalize Altboost and allocations chunck down to prevent overflow */
+ while (Boost > 1000) {
+ Boost /= 2;
+ allocation_chunks /= 2;
+ }
+
+ /* Calculate the number of bits to be spent on the arf based on the
+ * boost number
+ */
+ arf_frame_bits =
+ (int)((double)Boost * (group_bits / (double)allocation_chunks));
+
+ /* Estimate if there are enough bits available to make worthwhile use
+ * of an arf.
+ */
+ tmp_q = estimate_q(cpi, mod_frame_err, (int)arf_frame_bits);
+
+ /* Only use an arf if it is likely we will be able to code
+ * it at a lower Q than the surrounding frames.
+ */
+ if (tmp_q < cpi->worst_quality) {
+ int half_gf_int;
+ int frames_after_arf;
+ int frames_bwd = cpi->oxcf.arnr_max_frames - 1;
+ int frames_fwd = cpi->oxcf.arnr_max_frames - 1;
+
+ cpi->source_alt_ref_pending = 1;
+
+ /*
+ * For alt ref frames the error score for the end frame of the
+ * group (the alt ref frame) should not contribute to the group
+ * total and hence the number of bit allocated to the group.
+ * Rather it forms part of the next group (it is the GF at the
+ * start of the next group)
+ * gf_group_err -= mod_frame_err;
+ *
+ * For alt ref frames alt ref frame is technically part of the
+ * GF frame for the next group but we always base the error
+ * calculation and bit allocation on the current group of frames.
+ *
+ * Set the interval till the next gf or arf.
+ * For ARFs this is the number of frames to be coded before the
+ * future frame that is coded as an ARF.
+ * The future frame itself is part of the next group
+ */
+ cpi->baseline_gf_interval = i;
+
+ /*
+ * Define the arnr filter width for this group of frames:
+ * We only filter frames that lie within a distance of half
+ * the GF interval from the ARF frame. We also have to trap
+ * cases where the filter extends beyond the end of clip.
+ * Note: this_frame->frame has been updated in the loop
+ * so it now points at the ARF frame.
+ */
+ half_gf_int = cpi->baseline_gf_interval >> 1;
+ frames_after_arf =
+ (int)(cpi->twopass.total_stats.count - this_frame->frame - 1);
+
+ switch (cpi->oxcf.arnr_type) {
+ case 1: /* Backward filter */
+ frames_fwd = 0;
+ if (frames_bwd > half_gf_int) frames_bwd = half_gf_int;
+ break;
+
+ case 2: /* Forward filter */
+ if (frames_fwd > half_gf_int) frames_fwd = half_gf_int;
+ if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf;
+ frames_bwd = 0;
+ break;
+
+ case 3: /* Centered filter */
+ default:
+ frames_fwd >>= 1;
+ if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf;
+ if (frames_fwd > half_gf_int) frames_fwd = half_gf_int;
+
+ frames_bwd = frames_fwd;
+
+ /* For even length filter there is one more frame backward
+ * than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
+ */
+ if (frames_bwd < half_gf_int) {
+ frames_bwd += (cpi->oxcf.arnr_max_frames + 1) & 0x1;
+ }
+ break;
+ }
+
+ cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd;
+ } else {
+ cpi->source_alt_ref_pending = 0;
+ cpi->baseline_gf_interval = i;
+ }
+ } else {
+ cpi->source_alt_ref_pending = 0;
+ cpi->baseline_gf_interval = i;
+ }
+
+ /*
+ * Now decide how many bits should be allocated to the GF group as a
+ * proportion of those remaining in the kf group.
+ * The final key frame group in the clip is treated as a special case
+ * where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
+ * This is also important for short clips where there may only be one
+ * key frame.
+ */
+ if (cpi->twopass.frames_to_key >=
+ (int)(cpi->twopass.total_stats.count - cpi->common.current_video_frame)) {
+ cpi->twopass.kf_group_bits =
+ (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;
+ }
+
+ /* Calculate the bits to be allocated to the group as a whole */
+ if ((cpi->twopass.kf_group_bits > 0) &&
+ (cpi->twopass.kf_group_error_left > 0)) {
+ cpi->twopass.gf_group_bits =
+ (int64_t)(cpi->twopass.kf_group_bits *
+ (gf_group_err / cpi->twopass.kf_group_error_left));
+ } else {
+ cpi->twopass.gf_group_bits = 0;
+ }
+
+ cpi->twopass.gf_group_bits =
+ (cpi->twopass.gf_group_bits < 0) ? 0
+ : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits)
+ ? cpi->twopass.kf_group_bits
+ : cpi->twopass.gf_group_bits;
+
+ /* Clip cpi->twopass.gf_group_bits based on user supplied data rate
+ * variability limit (cpi->oxcf.two_pass_vbrmax_section)
+ */
+ if (cpi->twopass.gf_group_bits >
+ (int64_t)max_bits * cpi->baseline_gf_interval) {
+ cpi->twopass.gf_group_bits = (int64_t)max_bits * cpi->baseline_gf_interval;
+ }
+
+ /* Reset the file position */
+ reset_fpf_position(cpi, start_pos);
+
+ /* Update the record of error used so far (only done once per gf group) */
+ cpi->twopass.modified_error_used += gf_group_err;
+
+ /* Assign bits to the arf or gf. */
+ for (i = 0; i <= (cpi->source_alt_ref_pending &&
+ cpi->common.frame_type != KEY_FRAME);
+ i++) {
+ int Boost;
+ int allocation_chunks;
+ int Q =
+ (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
+ int gf_bits;
+
+ /* For ARF frames */
+ if (cpi->source_alt_ref_pending && i == 0) {
+#if NEW_BOOST
+ Boost = (alt_boost * GFQ_ADJUSTMENT) / 100;
+#else
+ Boost = (cpi->gfu_boost * 3 * GFQ_ADJUSTMENT) / (2 * 100);
+#endif
+ Boost += (cpi->baseline_gf_interval * 50);
+
+ /* Set max and minimum boost and hence minimum allocation */
+ if (Boost > ((cpi->baseline_gf_interval + 1) * 200)) {
+ Boost = ((cpi->baseline_gf_interval + 1) * 200);
+ } else if (Boost < 125) {
+ Boost = 125;
+ }
+
+ allocation_chunks = ((cpi->baseline_gf_interval + 1) * 100) + Boost;
+ }
+ /* Else for standard golden frames */
+ else {
+ /* boost based on inter / intra ratio of subsequent frames */
+ Boost = (cpi->gfu_boost * GFQ_ADJUSTMENT) / 100;
+
+ /* Set max and minimum boost and hence minimum allocation */
+ if (Boost > (cpi->baseline_gf_interval * 150)) {
+ Boost = (cpi->baseline_gf_interval * 150);
+ } else if (Boost < 125) {
+ Boost = 125;
+ }
+
+ allocation_chunks = (cpi->baseline_gf_interval * 100) + (Boost - 100);
+ }
+
+ /* Normalize Altboost and allocations chunck down to prevent overflow */
+ while (Boost > 1000) {
+ Boost /= 2;
+ allocation_chunks /= 2;
+ }
+
+ /* Calculate the number of bits to be spent on the gf or arf based on
+ * the boost number
+ */
+ gf_bits = (int)((double)Boost *
+ (cpi->twopass.gf_group_bits / (double)allocation_chunks));
+
+ /* If the frame that is to be boosted is simpler than the average for
+ * the gf/arf group then use an alternative calculation
+ * based on the error score of the frame itself
+ */
+ if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval) {
+ double alt_gf_grp_bits;
+ int alt_gf_bits;
+
+ alt_gf_grp_bits =
+ (double)cpi->twopass.kf_group_bits *
+ (mod_frame_err * (double)cpi->baseline_gf_interval) /
+ DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left);
+
+ alt_gf_bits =
+ (int)((double)Boost * (alt_gf_grp_bits / (double)allocation_chunks));
+
+ if (gf_bits > alt_gf_bits) {
+ gf_bits = alt_gf_bits;
+ }
+ }
+ /* Else if it is harder than other frames in the group make sure it at
+ * least receives an allocation in keeping with its relative error
+ * score, otherwise it may be worse off than an "un-boosted" frame
+ */
+ else {
+ // Avoid division by 0 by clamping cpi->twopass.kf_group_error_left to 1
+ int alt_gf_bits =
+ (int)((double)cpi->twopass.kf_group_bits * mod_frame_err /
+ (double)VPXMAX(cpi->twopass.kf_group_error_left, 1));
+
+ if (alt_gf_bits > gf_bits) {
+ gf_bits = alt_gf_bits;
+ }
+ }
+
+ /* Apply an additional limit for CBR */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ if (cpi->twopass.gf_bits > (int)(cpi->buffer_level >> 1)) {
+ cpi->twopass.gf_bits = (int)(cpi->buffer_level >> 1);
+ }
+ }
+
+ /* Dont allow a negative value for gf_bits */
+ if (gf_bits < 0) gf_bits = 0;
+
+ /* Add in minimum for a frame */
+ gf_bits += cpi->min_frame_bandwidth;
+
+ if (i == 0) {
+ cpi->twopass.gf_bits = gf_bits;
+ }
+ if (i == 1 || (!cpi->source_alt_ref_pending &&
+ (cpi->common.frame_type != KEY_FRAME))) {
+ /* Per frame bit target for this frame */
+ cpi->per_frame_bandwidth = gf_bits;
+ }
+ }
+
+ {
+ /* Adjust KF group bits and error remainin */
+ cpi->twopass.kf_group_error_left -= (int64_t)gf_group_err;
+ cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits;
+
+ if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
+
+ /* Note the error score left in the remaining frames of the group.
+ * For normal GFs we want to remove the error score for the first
+ * frame of the group (except in Key frame case where this has
+ * already happened)
+ */
+ if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME) {
+ cpi->twopass.gf_group_error_left =
+ (int)(gf_group_err - gf_first_frame_err);
+ } else {
+ cpi->twopass.gf_group_error_left = (int)gf_group_err;
+ }
+
+ cpi->twopass.gf_group_bits -=
+ cpi->twopass.gf_bits - cpi->min_frame_bandwidth;
+
+ if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
+
+ /* This condition could fail if there are two kfs very close together
+ * despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the
+ * calculation of cpi->twopass.alt_extra_bits.
+ */
+ if (cpi->baseline_gf_interval >= 3) {
+#if NEW_BOOST
+ int boost = (cpi->source_alt_ref_pending) ? b_boost : cpi->gfu_boost;
+#else
+ int boost = cpi->gfu_boost;
+#endif
+ if (boost >= 150) {
+ int pct_extra;
+
+ pct_extra = (boost - 100) / 50;
+ pct_extra = (pct_extra > 20) ? 20 : pct_extra;
+
+ cpi->twopass.alt_extra_bits =
+ (int)(cpi->twopass.gf_group_bits * pct_extra) / 100;
+ cpi->twopass.gf_group_bits -= cpi->twopass.alt_extra_bits;
+ cpi->twopass.alt_extra_bits /= ((cpi->baseline_gf_interval - 1) >> 1);
+ } else {
+ cpi->twopass.alt_extra_bits = 0;
+ }
+ } else {
+ cpi->twopass.alt_extra_bits = 0;
+ }
+ }
+
+ /* Adjustments based on a measure of complexity of the section */
+ if (cpi->common.frame_type != KEY_FRAME) {
+ FIRSTPASS_STATS sectionstats;
+ double Ratio;
+
+ zero_stats(&sectionstats);
+ reset_fpf_position(cpi, start_pos);
+
+ for (i = 0; i < cpi->baseline_gf_interval; ++i) {
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
+ }
+
+ avg_stats(&sectionstats);
+
+ cpi->twopass.section_intra_rating =
+ (unsigned int)(sectionstats.intra_error /
+ DOUBLE_DIVIDE_CHECK(sectionstats.coded_error));
+
+ Ratio = sectionstats.intra_error /
+ DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
+ cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
+
+ if (cpi->twopass.section_max_qfactor < 0.80) {
+ cpi->twopass.section_max_qfactor = 0.80;
+ }
+
+ reset_fpf_position(cpi, start_pos);
+ }
+}
+
+/* Allocate bits to a normal frame that is neither a gf an arf or a key frame.
+ */
+static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int target_frame_size;
+
+ double modified_err;
+ double err_fraction;
+
+ int max_bits = frame_max_bits(cpi); /* Max for a single frame */
+
+ /* Calculate modified prediction error used in bit allocation */
+ modified_err = calculate_modified_err(cpi, this_frame);
+
+ /* What portion of the remaining GF group error is used by this frame */
+ if (cpi->twopass.gf_group_error_left > 0) {
+ err_fraction = modified_err / cpi->twopass.gf_group_error_left;
+ } else {
+ err_fraction = 0.0;
+ }
+
+ /* How many of those bits available for allocation should we give it? */
+ target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction);
+
+ /* Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits)
+ * at the top end.
+ */
+ if (target_frame_size < 0) {
+ target_frame_size = 0;
+ } else {
+ if (target_frame_size > max_bits) target_frame_size = max_bits;
+
+ if (target_frame_size > cpi->twopass.gf_group_bits) {
+ target_frame_size = (int)cpi->twopass.gf_group_bits;
+ }
+ }
+
+ /* Adjust error and bits remaining */
+ cpi->twopass.gf_group_error_left -= (int)modified_err;
+ cpi->twopass.gf_group_bits -= target_frame_size;
+
+ if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
+
+ /* Add in the minimum number of bits that is set aside for every frame. */
+ target_frame_size += cpi->min_frame_bandwidth;
+
+ /* Every other frame gets a few extra bits */
+ if ((cpi->frames_since_golden & 0x01) &&
+ (cpi->frames_till_gf_update_due > 0)) {
+ target_frame_size += cpi->twopass.alt_extra_bits;
+ }
+
+ /* Per frame bit target for this frame */
+ cpi->per_frame_bandwidth = target_frame_size;
+}
+
+void vp8_second_pass(VP8_COMP *cpi) {
+ int tmp_q;
+ int frames_left =
+ (int)(cpi->twopass.total_stats.count - cpi->common.current_video_frame);
+
+ FIRSTPASS_STATS this_frame;
+ FIRSTPASS_STATS this_frame_copy;
+
+ double this_frame_intra_error;
+ double this_frame_coded_error;
+
+ int overhead_bits;
+
+ vp8_zero(this_frame);
+
+ if (!cpi->twopass.stats_in) {
+ return;
+ }
+
+ vpx_clear_system_state();
+
+ if (EOF == input_stats(cpi, &this_frame)) return;
+
+ this_frame_intra_error = this_frame.intra_error;
+ this_frame_coded_error = this_frame.coded_error;
+
+ /* keyframe and section processing ! */
+ if (cpi->twopass.frames_to_key == 0) {
+ /* Define next KF group and assign bits to it */
+ memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ find_next_key_frame(cpi, &this_frame_copy);
+
+ /* Special case: Error error_resilient_mode mode does not make much
+ * sense for two pass but with its current meaning this code is
+ * designed to stop outlandish behaviour if someone does set it when
+ * using two pass. It effectively disables GF groups. This is
+ * temporary code until we decide what should really happen in this
+ * case.
+ */
+ if (cpi->oxcf.error_resilient_mode) {
+ cpi->twopass.gf_group_bits = cpi->twopass.kf_group_bits;
+ cpi->twopass.gf_group_error_left = (int)cpi->twopass.kf_group_error_left;
+ cpi->baseline_gf_interval = cpi->twopass.frames_to_key;
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ cpi->source_alt_ref_pending = 0;
+ }
+ }
+
+ /* Is this a GF / ARF (Note that a KF is always also a GF) */
+ if (cpi->frames_till_gf_update_due == 0) {
+ /* Define next gf group and assign bits to it */
+ memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ define_gf_group(cpi, &this_frame_copy);
+
+ /* If we are going to code an altref frame at the end of the group
+ * and the current frame is not a key frame.... If the previous
+ * group used an arf this frame has already benefited from that arf
+ * boost and it should not be given extra bits If the previous
+ * group was NOT coded using arf we may want to apply some boost to
+ * this GF as well
+ */
+ if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) {
+ /* Assign a standard frames worth of bits from those allocated
+ * to the GF group
+ */
+ int bak = cpi->per_frame_bandwidth;
+ memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ cpi->per_frame_bandwidth = bak;
+ }
+ }
+
+ /* Otherwise this is an ordinary frame */
+ else {
+ /* Special case: Error error_resilient_mode mode does not make much
+ * sense for two pass but with its current meaning but this code is
+ * designed to stop outlandish behaviour if someone does set it
+ * when using two pass. It effectively disables GF groups. This is
+ * temporary code till we decide what should really happen in this
+ * case.
+ */
+ if (cpi->oxcf.error_resilient_mode) {
+ cpi->frames_till_gf_update_due = cpi->twopass.frames_to_key;
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ /* Assign bits from those allocated to the GF group */
+ memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ }
+ } else {
+ /* Assign bits from those allocated to the GF group */
+ memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ }
+ }
+
+ /* Keep a globally available copy of this and the next frame's iiratio. */
+ cpi->twopass.this_iiratio =
+ (unsigned int)(this_frame_intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame_coded_error));
+ {
+ FIRSTPASS_STATS next_frame;
+ if (lookup_next_frame_stats(cpi, &next_frame) != EOF) {
+ cpi->twopass.next_iiratio =
+ (unsigned int)(next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+ }
+ }
+
+ /* Set nominal per second bandwidth for this frame */
+ cpi->target_bandwidth =
+ (int)(cpi->per_frame_bandwidth * cpi->output_framerate);
+ if (cpi->target_bandwidth < 0) cpi->target_bandwidth = 0;
+
+ /* Account for mv, mode and other overheads. */
+ overhead_bits = (int)estimate_modemvcost(cpi, &cpi->twopass.total_left_stats);
+
+ /* Special case code for first frame. */
+ if (cpi->common.current_video_frame == 0) {
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
+
+ /* Set a cq_level in constrained quality mode. */
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ int est_cq;
+
+ est_cq = estimate_cq(cpi, &cpi->twopass.total_left_stats,
+ (int)(cpi->twopass.bits_left / frames_left),
+ overhead_bits);
+
+ cpi->cq_target_quality = cpi->oxcf.cq_level;
+ if (est_cq > cpi->cq_target_quality) cpi->cq_target_quality = est_cq;
+ }
+
+ /* guess at maxq needed in 2nd pass */
+ cpi->twopass.maxq_max_limit = cpi->worst_quality;
+ cpi->twopass.maxq_min_limit = cpi->best_quality;
+
+ tmp_q = estimate_max_q(cpi, &cpi->twopass.total_left_stats,
+ (int)(cpi->twopass.bits_left / frames_left),
+ overhead_bits);
+
+ /* Limit the maxq value returned subsequently.
+ * This increases the risk of overspend or underspend if the initial
+ * estimate for the clip is bad, but helps prevent excessive
+ * variation in Q, especially near the end of a clip
+ * where for example a small overspend may cause Q to crash
+ */
+ cpi->twopass.maxq_max_limit =
+ ((tmp_q + 32) < cpi->worst_quality) ? (tmp_q + 32) : cpi->worst_quality;
+ cpi->twopass.maxq_min_limit =
+ ((tmp_q - 32) > cpi->best_quality) ? (tmp_q - 32) : cpi->best_quality;
+
+ cpi->active_worst_quality = tmp_q;
+ cpi->ni_av_qi = tmp_q;
+ }
+
+ /* The last few frames of a clip almost always have to few or too many
+ * bits and for the sake of over exact rate control we dont want to make
+ * radical adjustments to the allowed quantizer range just to use up a
+ * few surplus bits or get beneath the target rate.
+ */
+ else if ((cpi->common.current_video_frame <
+ (((unsigned int)cpi->twopass.total_stats.count * 255) >> 8)) &&
+ ((cpi->common.current_video_frame + cpi->baseline_gf_interval) <
+ (unsigned int)cpi->twopass.total_stats.count)) {
+ if (frames_left < 1) frames_left = 1;
+
+ tmp_q = estimate_max_q(cpi, &cpi->twopass.total_left_stats,
+ (int)(cpi->twopass.bits_left / frames_left),
+ overhead_bits);
+
+ /* Move active_worst_quality but in a damped way */
+ if (tmp_q > cpi->active_worst_quality) {
+ cpi->active_worst_quality++;
+ } else if (tmp_q < cpi->active_worst_quality) {
+ cpi->active_worst_quality--;
+ }
+
+ cpi->active_worst_quality =
+ ((cpi->active_worst_quality * 3) + tmp_q + 2) / 4;
+ }
+
+ cpi->twopass.frames_to_key--;
+
+ /* Update the total stats remaining sturcture */
+ subtract_stats(&cpi->twopass.total_left_stats, &this_frame);
+}
+
+static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame,
+ FIRSTPASS_STATS *this_frame,
+ FIRSTPASS_STATS *next_frame) {
+ int is_viable_kf = 0;
+
+ /* Does the frame satisfy the primary criteria of a key frame
+ * If so, then examine how well it predicts subsequent frames
+ */
+ if ((this_frame->pcnt_second_ref < 0.10) &&
+ (next_frame->pcnt_second_ref < 0.10) &&
+ ((this_frame->pcnt_inter < 0.05) ||
+ (((this_frame->pcnt_inter - this_frame->pcnt_neutral) < .25) &&
+ ((this_frame->intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < 2.5) &&
+ ((fabs(last_frame->coded_error - this_frame->coded_error) /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
+ .40) ||
+ (fabs(last_frame->intra_error - this_frame->intra_error) /
+ DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
+ .40) ||
+ ((next_frame->intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > 3.5))))) {
+ int i;
+ FIRSTPASS_STATS *start_pos;
+
+ FIRSTPASS_STATS local_next_frame;
+
+ double boost_score = 0.0;
+ double old_boost_score = 0.0;
+ double decay_accumulator = 1.0;
+ double next_iiratio;
+
+ memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
+
+ /* Note the starting file position so we can reset to it */
+ start_pos = cpi->twopass.stats_in;
+
+ /* Examine how well the key frame predicts subsequent frames */
+ for (i = 0; i < 16; ++i) {
+ next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
+
+ if (next_iiratio > RMAX) next_iiratio = RMAX;
+
+ /* Cumulative effect of decay in prediction quality */
+ if (local_next_frame.pcnt_inter > 0.85) {
+ decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
+ } else {
+ decay_accumulator =
+ decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0);
+ }
+
+ /* Keep a running total */
+ boost_score += (decay_accumulator * next_iiratio);
+
+ /* Test various breakout clauses */
+ if ((local_next_frame.pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
+ (((local_next_frame.pcnt_inter - local_next_frame.pcnt_neutral) <
+ 0.20) &&
+ (next_iiratio < 3.0)) ||
+ ((boost_score - old_boost_score) < 0.5) ||
+ (local_next_frame.intra_error < 200)) {
+ break;
+ }
+
+ old_boost_score = boost_score;
+
+ /* Get the next frame details */
+ if (EOF == input_stats(cpi, &local_next_frame)) break;
+ }
+
+ /* If there is tolerable prediction for at least the next 3 frames
+ * then break out else discard this pottential key frame and move on
+ */
+ if (boost_score > 5.0 && (i > 3)) {
+ is_viable_kf = 1;
+ } else {
+ /* Reset the file position */
+ reset_fpf_position(cpi, start_pos);
+
+ is_viable_kf = 0;
+ }
+ }
+
+ return is_viable_kf;
+}
+static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int i, j;
+ FIRSTPASS_STATS last_frame;
+ FIRSTPASS_STATS first_frame;
+ FIRSTPASS_STATS next_frame;
+ FIRSTPASS_STATS *start_position;
+
+ double decay_accumulator = 1.0;
+ double boost_score = 0;
+ double old_boost_score = 0.0;
+ double loop_decay_rate;
+
+ double kf_mod_err = 0.0;
+ double kf_group_err = 0.0;
+ double kf_group_intra_err = 0.0;
+ double kf_group_coded_err = 0.0;
+ double recent_loop_decay[8] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
+
+ memset(&next_frame, 0, sizeof(next_frame));
+
+ vpx_clear_system_state();
+ start_position = cpi->twopass.stats_in;
+
+ cpi->common.frame_type = KEY_FRAME;
+
+ /* is this a forced key frame by interval */
+ cpi->this_key_frame_forced = cpi->next_key_frame_forced;
+
+ /* Clear the alt ref active flag as this can never be active on a key
+ * frame
+ */
+ cpi->source_alt_ref_active = 0;
+
+ /* Kf is always a gf so clear frames till next gf counter */
+ cpi->frames_till_gf_update_due = 0;
+
+ cpi->twopass.frames_to_key = 1;
+
+ /* Take a copy of the initial frame details */
+ memcpy(&first_frame, this_frame, sizeof(*this_frame));
+
+ cpi->twopass.kf_group_bits = 0;
+ cpi->twopass.kf_group_error_left = 0;
+
+ kf_mod_err = calculate_modified_err(cpi, this_frame);
+
+ /* find the next keyframe */
+ i = 0;
+ while (cpi->twopass.stats_in < cpi->twopass.stats_in_end) {
+ /* Accumulate kf group error */
+ kf_group_err += calculate_modified_err(cpi, this_frame);
+
+ /* These figures keep intra and coded error counts for all frames
+ * including key frames in the group. The effect of the key frame
+ * itself can be subtracted out using the first_frame data
+ * collected above
+ */
+ kf_group_intra_err += this_frame->intra_error;
+ kf_group_coded_err += this_frame->coded_error;
+
+ /* Load the next frame's stats. */
+ memcpy(&last_frame, this_frame, sizeof(*this_frame));
+ input_stats(cpi, this_frame);
+
+ /* Provided that we are not at the end of the file... */
+ if (cpi->oxcf.auto_key &&
+ lookup_next_frame_stats(cpi, &next_frame) != EOF) {
+ /* Normal scene cut check */
+ if ((i >= MIN_GF_INTERVAL) &&
+ test_candidate_kf(cpi, &last_frame, this_frame, &next_frame)) {
+ break;
+ }
+
+ /* How fast is prediction quality decaying */
+ loop_decay_rate = get_prediction_decay_rate(&next_frame);
+
+ /* We want to know something about the recent past... rather than
+ * as used elsewhere where we are concened with decay in prediction
+ * quality since the last GF or KF.
+ */
+ recent_loop_decay[i % 8] = loop_decay_rate;
+ decay_accumulator = 1.0;
+ for (j = 0; j < 8; ++j) {
+ decay_accumulator = decay_accumulator * recent_loop_decay[j];
+ }
+
+ /* Special check for transition or high motion followed by a
+ * static scene.
+ */
+ if (detect_transition_to_still(cpi, i,
+ ((int)(cpi->key_frame_frequency) - (int)i),
+ loop_decay_rate, decay_accumulator)) {
+ break;
+ }
+
+ /* Step on to the next frame */
+ cpi->twopass.frames_to_key++;
+
+ /* If we don't have a real key frame within the next two
+ * forcekeyframeevery intervals then break out of the loop.
+ */
+ if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency) {
+ break;
+ }
+ } else {
+ cpi->twopass.frames_to_key++;
+ }
+
+ i++;
+ }
+
+ /* If there is a max kf interval set by the user we must obey it.
+ * We already breakout of the loop above at 2x max.
+ * This code centers the extra kf if the actual natural
+ * interval is between 1x and 2x
+ */
+ if (cpi->oxcf.auto_key &&
+ cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency) {
+ FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in;
+ FIRSTPASS_STATS tmp_frame;
+
+ cpi->twopass.frames_to_key /= 2;
+
+ /* Copy first frame details */
+ memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
+
+ /* Reset to the start of the group */
+ reset_fpf_position(cpi, start_position);
+
+ kf_group_err = 0;
+ kf_group_intra_err = 0;
+ kf_group_coded_err = 0;
+
+ /* Rescan to get the correct error data for the forced kf group */
+ for (i = 0; i < cpi->twopass.frames_to_key; ++i) {
+ /* Accumulate kf group errors */
+ kf_group_err += calculate_modified_err(cpi, &tmp_frame);
+ kf_group_intra_err += tmp_frame.intra_error;
+ kf_group_coded_err += tmp_frame.coded_error;
+
+ /* Load a the next frame's stats */
+ input_stats(cpi, &tmp_frame);
+ }
+
+ /* Reset to the start of the group */
+ reset_fpf_position(cpi, current_pos);
+
+ cpi->next_key_frame_forced = 1;
+ } else {
+ cpi->next_key_frame_forced = 0;
+ }
+
+ /* Special case for the last frame of the file */
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) {
+ /* Accumulate kf group error */
+ kf_group_err += calculate_modified_err(cpi, this_frame);
+
+ /* These figures keep intra and coded error counts for all frames
+ * including key frames in the group. The effect of the key frame
+ * itself can be subtracted out using the first_frame data
+ * collected above
+ */
+ kf_group_intra_err += this_frame->intra_error;
+ kf_group_coded_err += this_frame->coded_error;
+ }
+
+ /* Calculate the number of bits that should be assigned to the kf group. */
+ if ((cpi->twopass.bits_left > 0) &&
+ (cpi->twopass.modified_error_left > 0.0)) {
+ /* Max for a single normal frame (not key frame) */
+ int max_bits = frame_max_bits(cpi);
+
+ /* Maximum bits for the kf group */
+ int64_t max_grp_bits;
+
+ /* Default allocation based on bits left and relative
+ * complexity of the section
+ */
+ cpi->twopass.kf_group_bits =
+ (int64_t)(cpi->twopass.bits_left *
+ (kf_group_err / cpi->twopass.modified_error_left));
+
+ /* Clip based on maximum per frame rate defined by the user. */
+ max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
+ if (cpi->twopass.kf_group_bits > max_grp_bits) {
+ cpi->twopass.kf_group_bits = max_grp_bits;
+ }
+
+ /* Additional special case for CBR if buffer is getting full. */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ int64_t opt_buffer_lvl = cpi->oxcf.optimal_buffer_level;
+ int64_t buffer_lvl = cpi->buffer_level;
+
+ /* If the buffer is near or above the optimal and this kf group is
+ * not being allocated much then increase the allocation a bit.
+ */
+ if (buffer_lvl >= opt_buffer_lvl) {
+ int64_t high_water_mark =
+ (opt_buffer_lvl + cpi->oxcf.maximum_buffer_size) >> 1;
+
+ int64_t av_group_bits;
+
+ /* Av bits per frame * number of frames */
+ av_group_bits = (int64_t)cpi->av_per_frame_bandwidth *
+ (int64_t)cpi->twopass.frames_to_key;
+
+ /* We are at or above the maximum. */
+ if (cpi->buffer_level >= high_water_mark) {
+ int64_t min_group_bits;
+
+ min_group_bits =
+ av_group_bits + (int64_t)(buffer_lvl - high_water_mark);
+
+ if (cpi->twopass.kf_group_bits < min_group_bits) {
+ cpi->twopass.kf_group_bits = min_group_bits;
+ }
+ }
+ /* We are above optimal but below the maximum */
+ else if (cpi->twopass.kf_group_bits < av_group_bits) {
+ int64_t bits_below_av = av_group_bits - cpi->twopass.kf_group_bits;
+
+ cpi->twopass.kf_group_bits += (int64_t)(
+ (double)bits_below_av * (double)(buffer_lvl - opt_buffer_lvl) /
+ (double)(high_water_mark - opt_buffer_lvl));
+ }
+ }
+ }
+ } else {
+ cpi->twopass.kf_group_bits = 0;
+ }
+
+ /* Reset the first pass file position */
+ reset_fpf_position(cpi, start_position);
+
+ /* determine how big to make this keyframe based on how well the
+ * subsequent frames use inter blocks
+ */
+ decay_accumulator = 1.0;
+ boost_score = 0.0;
+
+ for (i = 0; i < cpi->twopass.frames_to_key; ++i) {
+ double r;
+
+ if (EOF == input_stats(cpi, &next_frame)) break;
+
+ if (next_frame.intra_error > cpi->twopass.kf_intra_err_min) {
+ r = (IIKFACTOR2 * next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+ } else {
+ r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+ }
+
+ if (r > RMAX) r = RMAX;
+
+ /* How fast is prediction quality decaying */
+ loop_decay_rate = get_prediction_decay_rate(&next_frame);
+
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+ decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
+
+ boost_score += (decay_accumulator * r);
+
+ if ((i > MIN_GF_INTERVAL) && ((boost_score - old_boost_score) < 1.0)) {
+ break;
+ }
+
+ old_boost_score = boost_score;
+ }
+
+ if (1) {
+ FIRSTPASS_STATS sectionstats;
+ double Ratio;
+
+ zero_stats(&sectionstats);
+ reset_fpf_position(cpi, start_position);
+
+ for (i = 0; i < cpi->twopass.frames_to_key; ++i) {
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
+ }
+
+ avg_stats(&sectionstats);
+
+ cpi->twopass.section_intra_rating =
+ (unsigned int)(sectionstats.intra_error /
+ DOUBLE_DIVIDE_CHECK(sectionstats.coded_error));
+
+ Ratio = sectionstats.intra_error /
+ DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
+ cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
+
+ if (cpi->twopass.section_max_qfactor < 0.80) {
+ cpi->twopass.section_max_qfactor = 0.80;
+ }
+ }
+
+ /* When using CBR apply additional buffer fullness related upper limits */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ double max_boost;
+
+ if (cpi->drop_frames_allowed) {
+ int df_buffer_level = (int)(cpi->oxcf.drop_frames_water_mark *
+ (cpi->oxcf.optimal_buffer_level / 100));
+
+ if (cpi->buffer_level > df_buffer_level) {
+ max_boost =
+ ((double)((cpi->buffer_level - df_buffer_level) * 2 / 3) * 16.0) /
+ DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth);
+ } else {
+ max_boost = 0.0;
+ }
+ } else if (cpi->buffer_level > 0) {
+ max_boost = ((double)(cpi->buffer_level * 2 / 3) * 16.0) /
+ DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth);
+ } else {
+ max_boost = 0.0;
+ }
+
+ if (boost_score > max_boost) boost_score = max_boost;
+ }
+
+ /* Reset the first pass file position */
+ reset_fpf_position(cpi, start_position);
+
+ /* Work out how many bits to allocate for the key frame itself */
+ if (1) {
+ int kf_boost = (int)boost_score;
+ int allocation_chunks;
+ int Counter = cpi->twopass.frames_to_key;
+ int alt_kf_bits;
+ YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
+/* Min boost based on kf interval */
+#if 0
+
+ while ((kf_boost < 48) && (Counter > 0))
+ {
+ Counter -= 2;
+ kf_boost ++;
+ }
+
+#endif
+
+ if (kf_boost < 48) {
+ kf_boost += ((Counter + 1) >> 1);
+
+ if (kf_boost > 48) kf_boost = 48;
+ }
+
+ /* bigger frame sizes need larger kf boosts, smaller frames smaller
+ * boosts...
+ */
+ if ((lst_yv12->y_width * lst_yv12->y_height) > (320 * 240)) {
+ kf_boost += 2 * (lst_yv12->y_width * lst_yv12->y_height) / (320 * 240);
+ } else if ((lst_yv12->y_width * lst_yv12->y_height) < (320 * 240)) {
+ kf_boost -= 4 * (320 * 240) / (lst_yv12->y_width * lst_yv12->y_height);
+ }
+
+ /* Min KF boost */
+ kf_boost = (int)((double)kf_boost * 100.0) >> 4; /* Scale 16 to 100 */
+ if (kf_boost < 250) kf_boost = 250;
+
+ /*
+ * We do three calculations for kf size.
+ * The first is based on the error score for the whole kf group.
+ * The second (optionaly) on the key frames own error if this is
+ * smaller than the average for the group.
+ * The final one insures that the frame receives at least the
+ * allocation it would have received based on its own error score vs
+ * the error score remaining
+ * Special case if the sequence appears almost totaly static
+ * as measured by the decay accumulator. In this case we want to
+ * spend almost all of the bits on the key frame.
+ * cpi->twopass.frames_to_key-1 because key frame itself is taken
+ * care of by kf_boost.
+ */
+ if (decay_accumulator >= 0.99) {
+ allocation_chunks = ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost;
+ } else {
+ allocation_chunks = ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost;
+ }
+
+ /* Normalize Altboost and allocations chunck down to prevent overflow */
+ while (kf_boost > 1000) {
+ kf_boost /= 2;
+ allocation_chunks /= 2;
+ }
+
+ cpi->twopass.kf_group_bits =
+ (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits;
+
+ /* Calculate the number of bits to be spent on the key frame */
+ cpi->twopass.kf_bits =
+ (int)((double)kf_boost *
+ ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks));
+
+ /* Apply an additional limit for CBR */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ if (cpi->twopass.kf_bits > (int)((3 * cpi->buffer_level) >> 2)) {
+ cpi->twopass.kf_bits = (int)((3 * cpi->buffer_level) >> 2);
+ }
+ }
+
+ /* If the key frame is actually easier than the average for the
+ * kf group (which does sometimes happen... eg a blank intro frame)
+ * Then use an alternate calculation based on the kf error score
+ * which should give a smaller key frame.
+ */
+ if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key) {
+ double alt_kf_grp_bits =
+ ((double)cpi->twopass.bits_left *
+ (kf_mod_err * (double)cpi->twopass.frames_to_key) /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left));
+
+ alt_kf_bits = (int)((double)kf_boost *
+ (alt_kf_grp_bits / (double)allocation_chunks));
+
+ if (cpi->twopass.kf_bits > alt_kf_bits) {
+ cpi->twopass.kf_bits = alt_kf_bits;
+ }
+ }
+ /* Else if it is much harder than other frames in the group make sure
+ * it at least receives an allocation in keeping with its relative
+ * error score
+ */
+ else {
+ alt_kf_bits = (int)((double)cpi->twopass.bits_left *
+ (kf_mod_err / DOUBLE_DIVIDE_CHECK(
+ cpi->twopass.modified_error_left)));
+
+ if (alt_kf_bits > cpi->twopass.kf_bits) {
+ cpi->twopass.kf_bits = alt_kf_bits;
+ }
+ }
+
+ cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits;
+ /* Add in the minimum frame allowance */
+ cpi->twopass.kf_bits += cpi->min_frame_bandwidth;
+
+ /* Peer frame bit target for this frame */
+ cpi->per_frame_bandwidth = cpi->twopass.kf_bits;
+
+ /* Convert to a per second bitrate */
+ cpi->target_bandwidth = (int)(cpi->twopass.kf_bits * cpi->output_framerate);
+ }
+
+ /* Note the total error score of the kf group minus the key frame itself */
+ cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err);
+
+ /* Adjust the count of total modified error left. The count of bits left
+ * is adjusted elsewhere based on real coded frame sizes
+ */
+ cpi->twopass.modified_error_left -= kf_group_err;
+
+ if (cpi->oxcf.allow_spatial_resampling) {
+ int resample_trigger = 0;
+ int last_kf_resampled = 0;
+ int kf_q;
+ int scale_val = 0;
+ int hr, hs, vr, vs;
+ int new_width = cpi->oxcf.Width;
+ int new_height = cpi->oxcf.Height;
+
+ int projected_buffer_level;
+ int tmp_q;
+
+ double projected_bits_perframe;
+ double group_iiratio = (kf_group_intra_err - first_frame.intra_error) /
+ (kf_group_coded_err - first_frame.coded_error);
+ double err_per_frame = kf_group_err / cpi->twopass.frames_to_key;
+ double bits_per_frame;
+ double av_bits_per_frame;
+ double effective_size_ratio;
+
+ if ((cpi->common.Width != cpi->oxcf.Width) ||
+ (cpi->common.Height != cpi->oxcf.Height)) {
+ last_kf_resampled = 1;
+ }
+
+ /* Set back to unscaled by defaults */
+ cpi->common.horiz_scale = VP8E_NORMAL;
+ cpi->common.vert_scale = VP8E_NORMAL;
+
+ /* Calculate Average bits per frame. */
+ av_bits_per_frame = cpi->oxcf.target_bandwidth /
+ DOUBLE_DIVIDE_CHECK((double)cpi->framerate);
+
+ /* CBR... Use the clip average as the target for deciding resample */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ bits_per_frame = av_bits_per_frame;
+ }
+
+ /* In VBR we want to avoid downsampling in easy section unless we
+ * are under extreme pressure So use the larger of target bitrate
+ * for this section or average bitrate for sequence
+ */
+ else {
+ /* This accounts for how hard the section is... */
+ bits_per_frame =
+ (double)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key);
+
+ /* Dont turn to resampling in easy sections just because they
+ * have been assigned a small number of bits
+ */
+ if (bits_per_frame < av_bits_per_frame) {
+ bits_per_frame = av_bits_per_frame;
+ }
+ }
+
+ /* bits_per_frame should comply with our minimum */
+ if (bits_per_frame < (cpi->oxcf.target_bandwidth *
+ cpi->oxcf.two_pass_vbrmin_section / 100)) {
+ bits_per_frame = (cpi->oxcf.target_bandwidth *
+ cpi->oxcf.two_pass_vbrmin_section / 100);
+ }
+
+ /* Work out if spatial resampling is necessary */
+ kf_q = estimate_kf_group_q(cpi, err_per_frame, (int)bits_per_frame,
+ group_iiratio);
+
+ /* If we project a required Q higher than the maximum allowed Q then
+ * make a guess at the actual size of frames in this section
+ */
+ projected_bits_perframe = bits_per_frame;
+ tmp_q = kf_q;
+
+ while (tmp_q > cpi->worst_quality) {
+ projected_bits_perframe *= 1.04;
+ tmp_q--;
+ }
+
+ /* Guess at buffer level at the end of the section */
+ projected_buffer_level =
+ (int)(cpi->buffer_level -
+ (int)((projected_bits_perframe - av_bits_per_frame) *
+ cpi->twopass.frames_to_key));
+
+ if (0) {
+ FILE *f = fopen("Subsamle.stt", "a");
+ fprintf(f, " %8d %8d %8d %8d %12.0f %8d %8d %8d\n",
+ cpi->common.current_video_frame, kf_q, cpi->common.horiz_scale,
+ cpi->common.vert_scale, kf_group_err / cpi->twopass.frames_to_key,
+ (int)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key),
+ new_height, new_width);
+ fclose(f);
+ }
+
+ /* The trigger for spatial resampling depends on the various
+ * parameters such as whether we are streaming (CBR) or VBR.
+ */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ /* Trigger resample if we are projected to fall below down
+ * sample level or resampled last time and are projected to
+ * remain below the up sample level
+ */
+ if ((projected_buffer_level < (cpi->oxcf.resample_down_water_mark *
+ cpi->oxcf.optimal_buffer_level / 100)) ||
+ (last_kf_resampled &&
+ (projected_buffer_level < (cpi->oxcf.resample_up_water_mark *
+ cpi->oxcf.optimal_buffer_level / 100)))) {
+ resample_trigger = 1;
+ } else {
+ resample_trigger = 0;
+ }
+ } else {
+ int64_t clip_bits = (int64_t)(
+ cpi->twopass.total_stats.count * cpi->oxcf.target_bandwidth /
+ DOUBLE_DIVIDE_CHECK((double)cpi->framerate));
+ int64_t over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level;
+
+ /* If triggered last time the threshold for triggering again is
+ * reduced:
+ *
+ * Projected Q higher than allowed and Overspend > 5% of total
+ * bits
+ */
+ if ((last_kf_resampled && (kf_q > cpi->worst_quality)) ||
+ ((kf_q > cpi->worst_quality) && (over_spend > clip_bits / 20))) {
+ resample_trigger = 1;
+ } else {
+ resample_trigger = 0;
+ }
+ }
+
+ if (resample_trigger) {
+ while ((kf_q >= cpi->worst_quality) && (scale_val < 6)) {
+ scale_val++;
+
+ cpi->common.vert_scale = vscale_lookup[scale_val];
+ cpi->common.horiz_scale = hscale_lookup[scale_val];
+
+ Scale2Ratio(cpi->common.horiz_scale, &hr, &hs);
+ Scale2Ratio(cpi->common.vert_scale, &vr, &vs);
+
+ new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
+ new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
+
+ /* Reducing the area to 1/4 does not reduce the complexity
+ * (err_per_frame) to 1/4... effective_sizeratio attempts
+ * to provide a crude correction for this
+ */
+ effective_size_ratio = (double)(new_width * new_height) /
+ (double)(cpi->oxcf.Width * cpi->oxcf.Height);
+ effective_size_ratio = (1.0 + (3.0 * effective_size_ratio)) / 4.0;
+
+ /* Now try again and see what Q we get with the smaller
+ * image size
+ */
+ kf_q = estimate_kf_group_q(cpi, err_per_frame * effective_size_ratio,
+ (int)bits_per_frame, group_iiratio);
+
+ if (0) {
+ FILE *f = fopen("Subsamle.stt", "a");
+ fprintf(
+ f, "******** %8d %8d %8d %12.0f %8d %8d %8d\n", kf_q,
+ cpi->common.horiz_scale, cpi->common.vert_scale,
+ kf_group_err / cpi->twopass.frames_to_key,
+ (int)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key),
+ new_height, new_width);
+ fclose(f);
+ }
+ }
+ }
+
+ if ((cpi->common.Width != new_width) ||
+ (cpi->common.Height != new_height)) {
+ cpi->common.Width = new_width;
+ cpi->common.Height = new_height;
+ vp8_alloc_compressor_data(cpi);
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/firstpass.h b/media/libvpx/libvpx/vp8/encoder/firstpass.h
new file mode 100644
index 0000000000..f5490f1eff
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/firstpass.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_FIRSTPASS_H_
+#define VPX_VP8_ENCODER_FIRSTPASS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void vp8_init_first_pass(VP8_COMP *cpi);
+extern void vp8_first_pass(VP8_COMP *cpi);
+extern void vp8_end_first_pass(VP8_COMP *cpi);
+
+extern void vp8_init_second_pass(VP8_COMP *cpi);
+extern void vp8_second_pass(VP8_COMP *cpi);
+extern void vp8_end_second_pass(VP8_COMP *cpi);
+
+extern size_t vp8_firstpass_stats_sz(unsigned int mb_count);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_FIRSTPASS_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/lookahead.c b/media/libvpx/libvpx/vp8/encoder/lookahead.c
new file mode 100644
index 0000000000..49f851d019
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/lookahead.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <assert.h>
+#include <stdlib.h>
+#include "vpx_config.h"
+#include "lookahead.h"
+#include "vp8/common/extend.h"
+
+#define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY ? 1 : 25)
+
+struct lookahead_ctx {
+ unsigned int max_sz; /* Absolute size of the queue */
+ unsigned int sz; /* Number of buffers currently in the queue */
+ unsigned int read_idx; /* Read index */
+ unsigned int write_idx; /* Write index */
+ struct lookahead_entry *buf; /* Buffer list */
+};
+
+/* Return the buffer at the given absolute index and increment the index */
+static struct lookahead_entry *pop(struct lookahead_ctx *ctx,
+ unsigned int *idx) {
+ unsigned int index = *idx;
+ struct lookahead_entry *buf = ctx->buf + index;
+
+ assert(index < ctx->max_sz);
+ if (++index >= ctx->max_sz) index -= ctx->max_sz;
+ *idx = index;
+ return buf;
+}
+
+void vp8_lookahead_destroy(struct lookahead_ctx *ctx) {
+ if (ctx) {
+ if (ctx->buf) {
+ unsigned int i;
+
+ for (i = 0; i < ctx->max_sz; ++i) {
+ vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img);
+ }
+ free(ctx->buf);
+ }
+ free(ctx);
+ }
+}
+
+struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
+ unsigned int height,
+ unsigned int depth) {
+ struct lookahead_ctx *ctx = NULL;
+ unsigned int i;
+
+ /* Clamp the lookahead queue depth */
+ if (depth < 1) {
+ depth = 1;
+ } else if (depth > MAX_LAG_BUFFERS) {
+ depth = MAX_LAG_BUFFERS;
+ }
+
+ /* Keep last frame in lookahead buffer by increasing depth by 1.*/
+ depth += 1;
+
+ /* Align the buffer dimensions */
+ width = (width + 15) & ~15u;
+ height = (height + 15) & ~15u;
+
+ /* Allocate the lookahead structures */
+ ctx = calloc(1, sizeof(*ctx));
+ if (ctx) {
+ ctx->max_sz = depth;
+ ctx->buf = calloc(depth, sizeof(*ctx->buf));
+ if (!ctx->buf) goto bail;
+ for (i = 0; i < depth; ++i) {
+ if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img, width, height,
+ VP8BORDERINPIXELS)) {
+ goto bail;
+ }
+ }
+ }
+ return ctx;
+bail:
+ vp8_lookahead_destroy(ctx);
+ return NULL;
+}
+
+int vp8_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end, unsigned int flags,
+ unsigned char *active_map) {
+ struct lookahead_entry *buf;
+ int row, col, active_end;
+ int mb_rows = (src->y_height + 15) >> 4;
+ int mb_cols = (src->y_width + 15) >> 4;
+
+ if (ctx->sz + 2 > ctx->max_sz) return 1;
+ ctx->sz++;
+ buf = pop(ctx, &ctx->write_idx);
+
+ /* Only do this partial copy if the following conditions are all met:
+ * 1. Lookahead queue has has size of 1.
+ * 2. Active map is provided.
+ * 3. This is not a key frame, golden nor altref frame.
+ */
+ if (ctx->max_sz == 1 && active_map && !flags) {
+ for (row = 0; row < mb_rows; ++row) {
+ col = 0;
+
+ while (1) {
+ /* Find the first active macroblock in this row. */
+ for (; col < mb_cols; ++col) {
+ if (active_map[col]) break;
+ }
+
+ /* No more active macroblock in this row. */
+ if (col == mb_cols) break;
+
+ /* Find the end of active region in this row. */
+ active_end = col;
+
+ for (; active_end < mb_cols; ++active_end) {
+ if (!active_map[active_end]) break;
+ }
+
+ /* Only copy this active region. */
+ vp8_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+ 16, (active_end - col) << 4);
+
+ /* Start again from the end of this active region. */
+ col = active_end;
+ }
+
+ active_map += mb_cols;
+ }
+ } else {
+ vp8_copy_and_extend_frame(src, &buf->img);
+ }
+ buf->ts_start = ts_start;
+ buf->ts_end = ts_end;
+ buf->flags = flags;
+ return 0;
+}
+
+struct lookahead_entry *vp8_lookahead_pop(struct lookahead_ctx *ctx,
+ int drain) {
+ struct lookahead_entry *buf = NULL;
+
+ assert(ctx != NULL);
+ if (ctx->sz && (drain || ctx->sz == ctx->max_sz - 1)) {
+ buf = pop(ctx, &ctx->read_idx);
+ ctx->sz--;
+ }
+ return buf;
+}
+
+struct lookahead_entry *vp8_lookahead_peek(struct lookahead_ctx *ctx,
+ unsigned int index, int direction) {
+ struct lookahead_entry *buf = NULL;
+
+ if (direction == PEEK_FORWARD) {
+ assert(index < ctx->max_sz - 1);
+ if (index < ctx->sz) {
+ index += ctx->read_idx;
+ if (index >= ctx->max_sz) index -= ctx->max_sz;
+ buf = ctx->buf + index;
+ }
+ } else if (direction == PEEK_BACKWARD) {
+ assert(index == 1);
+
+ if (ctx->read_idx == 0) {
+ index = ctx->max_sz - 1;
+ } else {
+ index = ctx->read_idx - index;
+ }
+ buf = ctx->buf + index;
+ }
+
+ return buf;
+}
+
+unsigned int vp8_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
diff --git a/media/libvpx/libvpx/vp8/encoder/lookahead.h b/media/libvpx/libvpx/vp8/encoder/lookahead.h
new file mode 100644
index 0000000000..bf0401190b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/lookahead.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VPX_VP8_ENCODER_LOOKAHEAD_H_
+#define VPX_VP8_ENCODER_LOOKAHEAD_H_
+#include "vpx_scale/yv12config.h"
+#include "vpx/vpx_integer.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct lookahead_entry {
+ YV12_BUFFER_CONFIG img;
+ int64_t ts_start;
+ int64_t ts_end;
+ unsigned int flags;
+};
+
+struct lookahead_ctx;
+
+/**\brief Initializes the lookahead stage
+ *
+ * The lookahead stage is a queue of frame buffers on which some analysis
+ * may be done when buffers are enqueued.
+ *
+ *
+ */
+struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
+ unsigned int height,
+ unsigned int depth);
+
+/**\brief Destroys the lookahead stage
+ *
+ */
+void vp8_lookahead_destroy(struct lookahead_ctx *ctx);
+
+/**\brief Enqueue a source buffer
+ *
+ * This function will copy the source image into a new framebuffer with
+ * the expected stride/border.
+ *
+ * If active_map is non-NULL and there is only one frame in the queue, then copy
+ * only active macroblocks.
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] src Pointer to the image to enqueue
+ * \param[in] ts_start Timestamp for the start of this frame
+ * \param[in] ts_end Timestamp for the end of this frame
+ * \param[in] flags Flags set on this frame
+ * \param[in] active_map Map that specifies which macroblock is active
+ */
+int vp8_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end, unsigned int flags,
+ unsigned char *active_map);
+
+/**\brief Get the next source buffer to encode
+ *
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] drain Flag indicating the buffer should be drained
+ * (return a buffer regardless of the current queue depth)
+ *
+ * \retval NULL, if drain set and queue is empty
+ * \retval NULL, if drain not set and queue not of the configured depth
+ *
+ */
+struct lookahead_entry *vp8_lookahead_pop(struct lookahead_ctx *ctx, int drain);
+
+#define PEEK_FORWARD 1
+#define PEEK_BACKWARD (-1)
+/**\brief Get a future source buffer to encode
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] index Index of the frame to be returned, 0 == next frame
+ *
+ * \retval NULL, if no buffer exists at the specified index
+ *
+ */
+struct lookahead_entry *vp8_lookahead_peek(struct lookahead_ctx *ctx,
+ unsigned int index, int direction);
+
+/**\brief Get the number of frames currently in the lookahead queue
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ */
+unsigned int vp8_lookahead_depth(struct lookahead_ctx *ctx);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_LOOKAHEAD_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/loongarch/dct_lsx.c b/media/libvpx/libvpx/vp8/encoder/loongarch/dct_lsx.c
new file mode 100644
index 0000000000..a08d4d3f63
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/loongarch/dct_lsx.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+#include "./vp8_rtcd.h"
+#include "vpx_util/loongson_intrinsics.h"
+
+#define LSX_TRANSPOSE4x4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
+ { \
+ __m128i _s0, _s1, _s2, _s3, _t0, _t1, _t2, _t3; \
+ \
+ DUP2_ARG2(__lsx_vilvl_h, _in2, _in0, _in3, _in1, _s0, _s1); \
+ DUP2_ARG2(__lsx_vilvh_h, _in2, _in0, _in3, _in1, _s2, _s3); \
+ _t0 = __lsx_vilvl_h(_s1, _s0); \
+ _t1 = __lsx_vilvh_h(_s1, _s0); \
+ _t2 = __lsx_vilvl_h(_s3, _s2); \
+ _t3 = __lsx_vilvh_h(_s3, _s2); \
+ DUP2_ARG2(__lsx_vpickev_d, _t2, _t0, _t3, _t1, _out0, _out2); \
+ DUP2_ARG2(__lsx_vpickod_d, _t2, _t0, _t3, _t1, _out1, _out3); \
+ }
+
+#define SET_DOTP_VALUES(coeff, val0, val1, val2, const1, const2) \
+ { \
+ __m128i tmp0_m, tmp1_m, tmp2_m; \
+ \
+ tmp0_m = __lsx_vreplvei_h(coeff, val0); \
+ DUP2_ARG2(__lsx_vreplvei_h, coeff, val1, coeff, val2, tmp1_m, tmp2_m); \
+ DUP2_ARG2(__lsx_vpackev_h, tmp1_m, tmp0_m, tmp0_m, tmp2_m, const1, \
+ const2); \
+ }
+
+#define RET_1_IF_NZERO_H(_in) \
+ ({ \
+ __m128i tmp_m; \
+ __m128i one_m = __lsx_vldi(0x401); \
+ __m128i max_m = __lsx_vldi(0xFF); \
+ \
+ tmp_m = __lsx_vseqi_h(_in, 0); \
+ tmp_m = __lsx_vxor_v(tmp_m, max_m); \
+ tmp_m = __lsx_vand_v(tmp_m, one_m); \
+ \
+ tmp_m; \
+ })
+
+void vp8_short_fdct4x4_lsx(int16_t *input, int16_t *output, int32_t pitch) {
+ __m128i in0, in1, in2, in3;
+ __m128i tmp0, tmp1, tmp2, tmp3, const0, const1;
+ __m128i coeff = { 0x38a4eb1814e808a9, 0x659061a82ee01d4c };
+ __m128i out0, out1, out2, out3;
+ __m128i zero = __lsx_vldi(0);
+ int32_t pitch2 = pitch << 1;
+ int32_t pitch3 = pitch2 + pitch;
+
+ in0 = __lsx_vld(input, 0);
+ DUP2_ARG2(__lsx_vldx, input, pitch, input, pitch2, in1, in2);
+ in3 = __lsx_vldx(input, pitch3);
+
+ LSX_TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3);
+ LSX_BUTTERFLY_4_H(in0, in1, in2, in3, tmp0, tmp1, in1, in3);
+ DUP4_ARG2(__lsx_vslli_h, tmp0, 3, tmp1, 3, in1, 3, in3, 3, tmp0, tmp1, in1,
+ in3);
+ in0 = __lsx_vadd_h(tmp0, tmp1);
+ in2 = __lsx_vsub_h(tmp0, tmp1);
+ SET_DOTP_VALUES(coeff, 0, 1, 2, const0, const1);
+ tmp0 = __lsx_vilvl_h(in3, in1);
+ in1 = __lsx_vreplvei_h(coeff, 3);
+ out0 = __lsx_vpackev_h(zero, in1);
+ coeff = __lsx_vilvl_h(zero, coeff);
+ out1 = __lsx_vreplvei_w(coeff, 0);
+ DUP2_ARG3(__lsx_vdp2add_w_h, out0, tmp0, const0, out1, tmp0, const1, out0,
+ out1);
+ DUP2_ARG3(__lsx_vsrani_h_w, out0, out0, 12, out1, out1, 12, in1, in3);
+ LSX_TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3);
+ LSX_BUTTERFLY_4_H(in0, in1, in2, in3, tmp0, tmp1, in1, in3);
+ tmp2 = __lsx_vadd_h(tmp0, tmp1);
+ tmp3 = __lsx_vsub_h(tmp0, tmp1);
+ DUP2_ARG2(__lsx_vaddi_hu, tmp2, 7, tmp3, 7, in0, in2);
+ DUP2_ARG2(__lsx_vsrai_h, in0, 4, in2, 4, in0, in2);
+ DUP2_ARG2(__lsx_vilvl_h, zero, in0, zero, in2, out0, out2);
+ tmp1 = RET_1_IF_NZERO_H(in3);
+ DUP2_ARG2(__lsx_vilvl_h, zero, tmp1, in3, in1, tmp1, tmp0);
+ DUP2_ARG2(__lsx_vreplvei_w, coeff, 2, coeff, 3, out3, out1);
+ out3 = __lsx_vadd_w(out3, out1);
+ out1 = __lsx_vreplvei_w(coeff, 1);
+ DUP2_ARG3(__lsx_vdp2add_w_h, out1, tmp0, const0, out3, tmp0, const1, out1,
+ out3);
+ DUP2_ARG2(__lsx_vsrai_w, out1, 16, out3, 16, out1, out3);
+ out1 = __lsx_vadd_w(out1, tmp1);
+ DUP2_ARG2(__lsx_vpickev_h, out1, out0, out3, out2, in0, in2);
+ __lsx_vst(in0, output, 0);
+ __lsx_vst(in2, output, 16);
+}
+
+void vp8_short_fdct8x4_lsx(int16_t *input, int16_t *output, int32_t pitch) {
+ __m128i in0, in1, in2, in3, temp0, temp1, tmp0, tmp1;
+ __m128i const0, const1, const2, vec0_w, vec1_w, vec2_w, vec3_w;
+ __m128i coeff = { 0x38a4eb1814e808a9, 0x659061a82ee01d4c };
+ __m128i zero = __lsx_vldi(0);
+ int32_t pitch2 = pitch << 1;
+ int32_t pitch3 = pitch2 + pitch;
+
+ in0 = __lsx_vld(input, 0);
+ DUP2_ARG2(__lsx_vldx, input, pitch, input, pitch2, in1, in2);
+ in3 = __lsx_vldx(input, pitch3);
+ LSX_TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3);
+
+ LSX_BUTTERFLY_4_H(in0, in1, in2, in3, temp0, temp1, in1, in3);
+ DUP4_ARG2(__lsx_vslli_h, temp0, 3, temp1, 3, in1, 3, in3, 3, temp0, temp1,
+ in1, in3);
+ in0 = __lsx_vadd_h(temp0, temp1);
+ in2 = __lsx_vsub_h(temp0, temp1);
+ SET_DOTP_VALUES(coeff, 0, 1, 2, const1, const2);
+ temp0 = __lsx_vreplvei_h(coeff, 3);
+ vec1_w = __lsx_vpackev_h(zero, temp0);
+ coeff = __lsx_vilvh_h(zero, coeff);
+ vec3_w = __lsx_vreplvei_w(coeff, 0);
+ tmp1 = __lsx_vilvl_h(in3, in1);
+ tmp0 = __lsx_vilvh_h(in3, in1);
+ vec0_w = vec1_w;
+ vec2_w = vec3_w;
+ DUP4_ARG3(__lsx_vdp2add_w_h, vec0_w, tmp1, const1, vec1_w, tmp0, const1,
+ vec2_w, tmp1, const2, vec3_w, tmp0, const2, vec0_w, vec1_w, vec2_w,
+ vec3_w);
+ DUP2_ARG3(__lsx_vsrani_h_w, vec1_w, vec0_w, 12, vec3_w, vec2_w, 12, in1, in3);
+ LSX_TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3);
+
+ LSX_BUTTERFLY_4_H(in0, in1, in2, in3, temp0, temp1, in1, in3);
+ in0 = __lsx_vadd_h(temp0, temp1);
+ in0 = __lsx_vaddi_hu(in0, 7);
+ in2 = __lsx_vsub_h(temp0, temp1);
+ in2 = __lsx_vaddi_hu(in2, 7);
+ in0 = __lsx_vsrai_h(in0, 4);
+ in2 = __lsx_vsrai_h(in2, 4);
+ DUP2_ARG2(__lsx_vreplvei_w, coeff, 2, coeff, 3, vec3_w, vec1_w);
+ vec3_w = __lsx_vadd_w(vec3_w, vec1_w);
+ vec1_w = __lsx_vreplvei_w(coeff, 1);
+ const0 = RET_1_IF_NZERO_H(in3);
+ tmp1 = __lsx_vilvl_h(in3, in1);
+ tmp0 = __lsx_vilvh_h(in3, in1);
+ vec0_w = vec1_w;
+ vec2_w = vec3_w;
+ DUP4_ARG3(__lsx_vdp2add_w_h, vec0_w, tmp1, const1, vec1_w, tmp0, const1,
+ vec2_w, tmp1, const2, vec3_w, tmp0, const2, vec0_w, vec1_w, vec2_w,
+ vec3_w);
+ DUP2_ARG3(__lsx_vsrani_h_w, vec1_w, vec0_w, 16, vec3_w, vec2_w, 16, in1, in3);
+ in1 = __lsx_vadd_h(in1, const0);
+ DUP2_ARG2(__lsx_vpickev_d, in1, in0, in3, in2, temp0, temp1);
+ __lsx_vst(temp0, output, 0);
+ __lsx_vst(temp1, output, 16);
+
+ DUP2_ARG2(__lsx_vpickod_d, in1, in0, in3, in2, in0, in2);
+ __lsx_vst(in0, output, 32);
+ __lsx_vst(in2, output, 48);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/loongarch/encodeopt_lsx.c b/media/libvpx/libvpx/vp8/encoder/loongarch/encodeopt_lsx.c
new file mode 100644
index 0000000000..4ad4caba60
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/loongarch/encodeopt_lsx.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vpx_util/loongson_intrinsics.h"
+#include "vp8/encoder/block.h"
+
+int32_t vp8_block_error_lsx(int16_t *coeff_ptr, int16_t *dq_coeff_ptr) {
+ int32_t err = 0;
+ __m128i dq_coeff0, dq_coeff1, coeff0, coeff1;
+ __m128i reg0, reg1, reg2, reg3, error;
+
+ DUP4_ARG2(__lsx_vld, coeff_ptr, 0, coeff_ptr, 16, dq_coeff_ptr, 0,
+ dq_coeff_ptr, 16, coeff0, coeff1, dq_coeff0, dq_coeff1);
+ DUP2_ARG2(__lsx_vsubwev_w_h, coeff0, dq_coeff0, coeff1, dq_coeff1, reg0,
+ reg2);
+ DUP2_ARG2(__lsx_vsubwod_w_h, coeff0, dq_coeff0, coeff1, dq_coeff1, reg1,
+ reg3);
+ error = __lsx_vmul_w(reg0, reg0);
+ DUP2_ARG3(__lsx_vmadd_w, error, reg1, reg1, error, reg2, reg2, error, error);
+ error = __lsx_vmadd_w(error, reg3, reg3);
+ error = __lsx_vhaddw_d_w(error, error);
+ err = __lsx_vpickve2gr_w(error, 0);
+ err += __lsx_vpickve2gr_w(error, 2);
+ return err;
+}
+
+int32_t vp8_mbblock_error_lsx(MACROBLOCK *mb, int32_t dc) {
+ BLOCK *be;
+ BLOCKD *bd;
+ int16_t *coeff, *dq_coeff;
+ int32_t err = 0;
+ uint32_t loop_cnt;
+ __m128i src0, src1, src2, src3;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, error;
+ __m128i mask0 = __lsx_vldi(0xFF);
+ __m128i zero = __lsx_vldi(0);
+
+ if (dc == 1) {
+ mask0 = __lsx_vinsgr2vr_w(mask0, 0, 0);
+ }
+
+ for (loop_cnt = 0; loop_cnt < 8; loop_cnt++) {
+ int32_t loop_tmp = loop_cnt << 1;
+ be = &mb->block[loop_tmp];
+ bd = &mb->e_mbd.block[loop_tmp];
+ coeff = be->coeff;
+ dq_coeff = bd->dqcoeff;
+ DUP4_ARG2(__lsx_vld, coeff, 0, coeff, 16, dq_coeff, 0, dq_coeff, 16, src0,
+ src1, tmp0, tmp1);
+ be = &mb->block[loop_tmp + 1];
+ bd = &mb->e_mbd.block[loop_tmp + 1];
+ coeff = be->coeff;
+ dq_coeff = bd->dqcoeff;
+ DUP4_ARG2(__lsx_vld, coeff, 0, coeff, 16, dq_coeff, 0, dq_coeff, 16, src2,
+ src3, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vsubwev_w_h, src0, tmp0, src1, tmp1, src2, tmp2, src3, tmp3,
+ reg0, reg2, reg4, reg6);
+ DUP4_ARG2(__lsx_vsubwod_w_h, src0, tmp0, src1, tmp1, src2, tmp2, src3, tmp3,
+ reg1, reg3, reg5, reg7);
+ DUP2_ARG3(__lsx_vbitsel_v, zero, reg0, mask0, zero, reg4, mask0, reg0,
+ reg4);
+ error = __lsx_vmul_w(reg0, reg0);
+ DUP4_ARG3(__lsx_vmadd_w, error, reg1, reg1, error, reg2, reg2, error, reg3,
+ reg3, error, reg4, reg4, error, error, error, error);
+ DUP2_ARG3(__lsx_vmadd_w, error, reg5, reg5, error, reg6, reg6, error,
+ error);
+ error = __lsx_vmadd_w(error, reg7, reg7);
+ error = __lsx_vhaddw_d_w(error, error);
+ error = __lsx_vhaddw_q_d(error, error);
+ err += __lsx_vpickve2gr_w(error, 0);
+ }
+ return err;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/loongarch/vp8_quantize_lsx.c b/media/libvpx/libvpx/vp8/encoder/loongarch/vp8_quantize_lsx.c
new file mode 100644
index 0000000000..75889192a7
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/loongarch/vp8_quantize_lsx.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+#include "./vp8_rtcd.h"
+#include "vpx_util/loongson_intrinsics.h"
+#include "vp8/encoder/block.h"
+
+#define BOOST_QUANT1(_in0, _in1, _in2, _ui) \
+ { \
+ if (boost_temp[0] <= __lsx_vpickve2gr_h(_in0, _ui)) { \
+ if (__lsx_vpickve2gr_h(_in1, _ui)) { \
+ eob = _ui; \
+ boost_temp = zbin_boost; \
+ } else { \
+ boost_temp++; \
+ } \
+ } else { \
+ _in2 = __lsx_vinsgr2vr_h(_in2, 0, _ui); \
+ boost_temp++; \
+ } \
+ }
+
+#define BOOST_QUANT2(_in0, _in1, _in2, _ui) \
+ { \
+ if (boost_temp[0] <= __lsx_vpickve2gr_h(_in0, _ui)) { \
+ if (__lsx_vpickve2gr_h(_in1, _ui)) { \
+ eob = _ui + 8; \
+ boost_temp = zbin_boost; \
+ } else { \
+ boost_temp++; \
+ } \
+ } else { \
+ _in2 = __lsx_vinsgr2vr_h(_in2, 0, _ui); \
+ boost_temp++; \
+ } \
+ }
+
+static int8_t exact_regular_quantize_b_lsx(
+ int16_t *zbin_boost, int16_t *coeff_ptr, int16_t *zbin, int16_t *round,
+ int16_t *quant, int16_t *quant_shift, int16_t *de_quant, int16_t zbin_oq_in,
+ int16_t *q_coeff, int16_t *dq_coeff) {
+ int32_t eob;
+ int16_t *boost_temp = zbin_boost;
+ __m128i inv_zig_zag = { 0x0C07040206050100, 0x0F0E0A090D0B0803 };
+ __m128i sign_z0, sign_z1, q_coeff0, q_coeff1;
+ __m128i z_bin0, z_bin1, zbin_o_q, x0, x1, sign_x0, sign_x1, de_quant0,
+ de_quant1;
+ __m128i z0, z1, round0, round1, quant0, quant2;
+ __m128i inv_zig_zag0, inv_zig_zag1;
+ __m128i zigzag_mask0 = { 0x0008000400010000, 0x0006000300020005 };
+ __m128i zigzag_mask1 = { 0x000A000D000C0009, 0X000F000E000B0007 };
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i zero = __lsx_vldi(0);
+
+ zbin_o_q = __lsx_vreplgr2vr_h(zbin_oq_in);
+ inv_zig_zag0 = __lsx_vilvl_b(zero, inv_zig_zag);
+ inv_zig_zag1 = __lsx_vilvh_b(zero, inv_zig_zag);
+ eob = -1;
+ DUP4_ARG2(__lsx_vld, coeff_ptr, 0, coeff_ptr, 16, round, 0, round, 16, tmp0,
+ tmp1, tmp2, tmp3);
+ DUP4_ARG3(__lsx_vshuf_h, zigzag_mask0, tmp1, tmp0, zigzag_mask1, tmp1, tmp0,
+ zigzag_mask0, tmp3, tmp2, zigzag_mask1, tmp3, tmp2, z0, z1, round0,
+ round1);
+ DUP4_ARG2(__lsx_vld, quant, 0, quant, 16, zbin, 0, zbin, 16, tmp0, tmp1, tmp2,
+ tmp3);
+ DUP4_ARG3(__lsx_vshuf_h, zigzag_mask0, tmp1, tmp0, zigzag_mask1, tmp1, tmp0,
+ zigzag_mask0, tmp3, tmp2, zigzag_mask1, tmp3, tmp2, quant0, quant2,
+ z_bin0, z_bin1);
+ DUP2_ARG2(__lsx_vsrai_h, z0, 15, z1, 15, sign_z0, sign_z1);
+ DUP2_ARG2(__lsx_vadda_h, z0, zero, z1, zero, x0, x1);
+ DUP2_ARG2(__lsx_vsub_h, x0, z_bin0, x1, z_bin1, z_bin0, z_bin1);
+ DUP2_ARG2(__lsx_vsub_h, z_bin0, zbin_o_q, z_bin1, zbin_o_q, z_bin0, z_bin1);
+ DUP2_ARG2(__lsx_vmulwev_w_h, quant0, round0, quant2, round1, tmp0, tmp2);
+ DUP2_ARG2(__lsx_vmulwod_w_h, quant0, round0, quant2, round1, tmp1, tmp3);
+ DUP2_ARG3(__lsx_vmaddwev_w_h, tmp0, quant0, x0, tmp2, quant2, x1, tmp0, tmp2);
+ DUP2_ARG3(__lsx_vmaddwod_w_h, tmp1, quant0, x0, tmp3, quant2, x1, tmp1, tmp3);
+ DUP2_ARG2(__lsx_vpackod_h, tmp1, tmp0, tmp3, tmp2, q_coeff0, q_coeff1);
+
+ DUP2_ARG2(__lsx_vld, quant_shift, 0, quant_shift, 16, tmp1, tmp3);
+ DUP2_ARG3(__lsx_vshuf_h, zigzag_mask0, tmp3, tmp1, zigzag_mask1, tmp3, tmp1,
+ quant0, quant2);
+ DUP2_ARG2(__lsx_vadd_h, x0, round0, x1, round1, x0, x1);
+ DUP2_ARG2(__lsx_vmulwev_w_h, quant0, q_coeff0, quant2, q_coeff1, tmp0, tmp2);
+ DUP2_ARG2(__lsx_vmulwod_w_h, quant0, q_coeff0, quant2, q_coeff1, tmp1, tmp3);
+ DUP2_ARG3(__lsx_vmaddwev_w_h, tmp0, quant0, x0, tmp2, quant2, x1, tmp0, tmp2);
+ DUP2_ARG3(__lsx_vmaddwod_w_h, tmp1, quant0, x0, tmp3, quant2, x1, tmp1, tmp3);
+ DUP2_ARG2(__lsx_vpackod_h, tmp1, tmp0, tmp3, tmp2, x0, x1);
+ DUP2_ARG2(__lsx_vxor_v, x0, sign_z0, x1, sign_z1, sign_x0, sign_x1);
+ DUP2_ARG2(__lsx_vsub_h, sign_x0, sign_z0, sign_x1, sign_z1, sign_x0, sign_x1);
+
+ BOOST_QUANT1(z_bin0, x0, sign_x0, 0);
+ BOOST_QUANT1(z_bin0, x0, sign_x0, 1);
+ BOOST_QUANT1(z_bin0, x0, sign_x0, 2);
+ BOOST_QUANT1(z_bin0, x0, sign_x0, 3);
+ BOOST_QUANT1(z_bin0, x0, sign_x0, 4);
+ BOOST_QUANT1(z_bin0, x0, sign_x0, 5);
+ BOOST_QUANT1(z_bin0, x0, sign_x0, 6);
+ BOOST_QUANT1(z_bin0, x0, sign_x0, 7);
+
+ BOOST_QUANT2(z_bin1, x1, sign_x1, 0);
+ BOOST_QUANT2(z_bin1, x1, sign_x1, 1);
+ BOOST_QUANT2(z_bin1, x1, sign_x1, 2);
+ BOOST_QUANT2(z_bin1, x1, sign_x1, 3);
+ BOOST_QUANT2(z_bin1, x1, sign_x1, 4);
+ BOOST_QUANT2(z_bin1, x1, sign_x1, 5);
+ BOOST_QUANT2(z_bin1, x1, sign_x1, 6);
+ BOOST_QUANT2(z_bin1, x1, sign_x1, 7);
+
+ DUP2_ARG2(__lsx_vld, de_quant, 0, de_quant, 16, de_quant0, de_quant1);
+ DUP2_ARG3(__lsx_vshuf_h, inv_zig_zag0, sign_x1, sign_x0, inv_zig_zag1,
+ sign_x1, sign_x0, q_coeff0, q_coeff1);
+ DUP2_ARG2(__lsx_vmul_h, de_quant0, q_coeff0, de_quant1, q_coeff1, de_quant0,
+ de_quant1);
+ __lsx_vst(q_coeff0, q_coeff, 0);
+ __lsx_vst(q_coeff1, q_coeff, 16);
+ __lsx_vst(de_quant0, dq_coeff, 0);
+ __lsx_vst(de_quant1, dq_coeff, 16);
+
+ return (int8_t)(eob + 1);
+}
+
+void vp8_regular_quantize_b_lsx(BLOCK *b, BLOCKD *d) {
+ int16_t *zbin_boost_ptr = b->zrun_zbin_boost;
+ int16_t *coeff_ptr = b->coeff;
+ int16_t *zbin_ptr = b->zbin;
+ int16_t *round_ptr = b->round;
+ int16_t *quant_ptr = b->quant;
+ int16_t *quant_shift_ptr = b->quant_shift;
+ int16_t *qcoeff_ptr = d->qcoeff;
+ int16_t *dqcoeff_ptr = d->dqcoeff;
+ int16_t *dequant_ptr = d->dequant;
+ int16_t zbin_oq_value = b->zbin_extra;
+
+ *d->eob = exact_regular_quantize_b_lsx(
+ zbin_boost_ptr, coeff_ptr, zbin_ptr, round_ptr, quant_ptr,
+ quant_shift_ptr, dequant_ptr, zbin_oq_value, qcoeff_ptr, dqcoeff_ptr);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/mcomp.c b/media/libvpx/libvpx/vp8/encoder/mcomp.c
new file mode 100644
index 0000000000..b92e2135e9
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mcomp.c
@@ -0,0 +1,1561 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "onyx_int.h"
+#include "mcomp.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_config.h"
+#include <stdio.h>
+#include <limits.h>
+#include <math.h>
+#include "vp8/common/findnearmv.h"
+#include "vp8/common/common.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) {
+ /* MV costing is based on the distribution of vectors in the previous
+ * frame and as such will tend to over state the cost of vectors. In
+ * addition coding a new vector can have a knock on effect on the cost
+ * of subsequent vectors and the quality of prediction from NEAR and
+ * NEAREST for subsequent blocks. The "Weight" parameter allows, to a
+ * limited extent, for some account to be taken of these factors.
+ */
+ const int mv_idx_row =
+ clamp((mv->as_mv.row - ref->as_mv.row) >> 1, 0, MVvals);
+ const int mv_idx_col =
+ clamp((mv->as_mv.col - ref->as_mv.col) >> 1, 0, MVvals);
+ return ((mvcost[0][mv_idx_row] + mvcost[1][mv_idx_col]) * Weight) >> 7;
+}
+
+static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
+ int error_per_bit) {
+ /* Ignore mv costing if mvcost is NULL */
+ if (mvcost) {
+ const int mv_idx_row =
+ clamp((mv->as_mv.row - ref->as_mv.row) >> 1, 0, MVvals);
+ const int mv_idx_col =
+ clamp((mv->as_mv.col - ref->as_mv.col) >> 1, 0, MVvals);
+ return ((mvcost[0][mv_idx_row] + mvcost[1][mv_idx_col]) * error_per_bit +
+ 128) >>
+ 8;
+ }
+ return 0;
+}
+
+static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2],
+ int error_per_bit) {
+ /* Calculate sad error cost on full pixel basis. */
+ /* Ignore mv costing if mvsadcost is NULL */
+ if (mvsadcost) {
+ return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
+ mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)]) *
+ error_per_bit +
+ 128) >>
+ 8;
+ }
+ return 0;
+}
+
+void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
+ int Len;
+ int search_site_count = 0;
+
+ /* Generate offsets for 4 search sites per step. */
+ Len = MAX_FIRST_STEP;
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = 0;
+ search_site_count++;
+
+ while (Len > 0) {
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = -Len;
+ x->ss[search_site_count].offset = -Len * stride;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = Len;
+ x->ss[search_site_count].offset = Len * stride;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = -Len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = -Len;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = Len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = Len;
+ search_site_count++;
+
+ /* Contract. */
+ Len /= 2;
+ }
+
+ x->ss_count = search_site_count;
+ x->searches_per_step = 4;
+}
+
+void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
+ int Len;
+ int search_site_count = 0;
+
+ /* Generate offsets for 8 search sites per step. */
+ Len = MAX_FIRST_STEP;
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = 0;
+ search_site_count++;
+
+ while (Len > 0) {
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = -Len;
+ x->ss[search_site_count].offset = -Len * stride;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = Len;
+ x->ss[search_site_count].offset = Len * stride;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = -Len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = -Len;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = Len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = Len;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = -Len;
+ x->ss[search_site_count].mv.row = -Len;
+ x->ss[search_site_count].offset = -Len * stride - Len;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = Len;
+ x->ss[search_site_count].mv.row = -Len;
+ x->ss[search_site_count].offset = -Len * stride + Len;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = -Len;
+ x->ss[search_site_count].mv.row = Len;
+ x->ss[search_site_count].offset = Len * stride - Len;
+ search_site_count++;
+
+ /* Compute offsets for search sites. */
+ x->ss[search_site_count].mv.col = Len;
+ x->ss[search_site_count].mv.row = Len;
+ x->ss[search_site_count].offset = Len * stride + Len;
+ search_site_count++;
+
+ /* Contract. */
+ Len /= 2;
+ }
+
+ x->ss_count = search_site_count;
+ x->searches_per_step = 8;
+}
+
+/*
+ * To avoid the penalty for crossing cache-line read, preload the reference
+ * area in a small buffer, which is aligned to make sure there won't be crossing
+ * cache-line read while reading from this buffer. This reduced the cpu
+ * cycles spent on reading ref data in sub-pixel filter functions.
+ * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
+ * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
+ * could reduce the area.
+ */
+
+/* estimated cost of a motion vector (r,c) */
+#define MVC(r, c) \
+ (mvcost \
+ ? ((mvcost[0][(r)-rr] + mvcost[1][(c)-rc]) * error_per_bit + 128) >> 8 \
+ : 0)
+/* pointer to predictor base of a motionvector */
+#define PRE(r, c) (y + (((r) >> 2) * y_stride + ((c) >> 2) - (offset)))
+/* convert motion vector component to offset for svf calc */
+#define SP(x) (((x)&3) << 1)
+/* returns subpixel variance error function. */
+#define DIST(r, c) \
+ vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, b->src_stride, &sse)
+#define IFMVCV(r, c, s, e) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
+/* returns distortion + motion vector cost */
+#define ERR(r, c) (MVC(r, c) + DIST(r, c))
+/* checks if (r,c) has better score than previous best */
+#define CHECK_BETTER(v, r, c) \
+ do { \
+ IFMVCV( \
+ r, c, \
+ { \
+ thismse = DIST(r, c); \
+ if ((v = (MVC(r, c) + thismse)) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ }, \
+ v = UINT_MAX;) \
+ } while (0)
+
+int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse1) {
+ unsigned char *z = (*(b->base_src) + b->src);
+
+ int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
+ int br = bestmv->as_mv.row * 4, bc = bestmv->as_mv.col * 4;
+ int tr = br, tc = bc;
+ unsigned int besterr;
+ unsigned int left, right, up, down, diag;
+ unsigned int sse;
+ unsigned int whichdir;
+ unsigned int halfiters = 4;
+ unsigned int quarteriters = 4;
+ int thismse;
+
+ int minc = VPXMAX(x->mv_col_min * 4,
+ (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
+ int maxc = VPXMIN(x->mv_col_max * 4,
+ (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
+ int minr = VPXMAX(x->mv_row_min * 4,
+ (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
+ int maxr = VPXMIN(x->mv_row_max * 4,
+ (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
+
+ int y_stride;
+ int offset;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+
+#if VPX_ARCH_X86 || VPX_ARCH_X86_64
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
+ bestmv->as_mv.col;
+ unsigned char *y;
+ int buf_r1, buf_r2, buf_c1;
+
+ /* Clamping to avoid out-of-range data access */
+ buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)
+ ? (bestmv->as_mv.row - x->mv_row_min)
+ : 3;
+ buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)
+ ? (x->mv_row_max - bestmv->as_mv.row)
+ : 3;
+ buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)
+ ? (bestmv->as_mv.col - x->mv_col_min)
+ : 3;
+ y_stride = 32;
+
+ /* Copy to intermediate buffer before searching. */
+ vfp->copymem(y_0 - buf_c1 - pre_stride * buf_r1, pre_stride, xd->y_buf,
+ y_stride, 16 + buf_r1 + buf_r2);
+ y = xd->y_buf + y_stride * buf_r1 + buf_c1;
+#else
+ unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
+ bestmv->as_mv.col;
+ y_stride = pre_stride;
+#endif
+
+ offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
+ /* central mv */
+ bestmv->as_mv.row *= 8;
+ bestmv->as_mv.col *= 8;
+
+ /* calculate central point error */
+ besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ *distortion = besterr;
+ besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
+
+ /* TODO: Each subsequent iteration checks at least one point in common
+ * with the last iteration could be 2 ( if diag selected)
+ */
+ while (--halfiters) {
+ /* 1/2 pel */
+ CHECK_BETTER(left, tr, tc - 2);
+ CHECK_BETTER(right, tr, tc + 2);
+ CHECK_BETTER(up, tr - 2, tc);
+ CHECK_BETTER(down, tr + 2, tc);
+
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ switch (whichdir) {
+ case 0: CHECK_BETTER(diag, tr - 2, tc - 2); break;
+ case 1: CHECK_BETTER(diag, tr - 2, tc + 2); break;
+ case 2: CHECK_BETTER(diag, tr + 2, tc - 2); break;
+ case 3: CHECK_BETTER(diag, tr + 2, tc + 2); break;
+ }
+
+ /* no reason to check the same one again. */
+ if (tr == br && tc == bc) break;
+
+ tr = br;
+ tc = bc;
+ }
+
+ /* TODO: Each subsequent iteration checks at least one point in common
+ * with the last iteration could be 2 ( if diag selected)
+ */
+
+ /* 1/4 pel */
+ while (--quarteriters) {
+ CHECK_BETTER(left, tr, tc - 1);
+ CHECK_BETTER(right, tr, tc + 1);
+ CHECK_BETTER(up, tr - 1, tc);
+ CHECK_BETTER(down, tr + 1, tc);
+
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ switch (whichdir) {
+ case 0: CHECK_BETTER(diag, tr - 1, tc - 1); break;
+ case 1: CHECK_BETTER(diag, tr - 1, tc + 1); break;
+ case 2: CHECK_BETTER(diag, tr + 1, tc - 1); break;
+ case 3: CHECK_BETTER(diag, tr + 1, tc + 1); break;
+ }
+
+ /* no reason to check the same one again. */
+ if (tr == br && tc == bc) break;
+
+ tr = br;
+ tc = bc;
+ }
+
+ bestmv->as_mv.row = br * 2;
+ bestmv->as_mv.col = bc * 2;
+
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3))) {
+ return INT_MAX;
+ }
+
+ return besterr;
+}
+#undef MVC
+#undef PRE
+#undef SP
+#undef DIST
+#undef IFMVCV
+#undef ERR
+#undef CHECK_BETTER
+
+int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse1) {
+ int bestmse = INT_MAX;
+ int_mv startmv;
+ int_mv this_mv;
+ unsigned char *z = (*(b->base_src) + b->src);
+ int left, right, up, down, diag;
+ unsigned int sse;
+ int whichdir;
+ int thismse;
+ int y_stride;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+
+#if VPX_ARCH_X86 || VPX_ARCH_X86_64
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
+ bestmv->as_mv.col;
+ unsigned char *y;
+
+ y_stride = 32;
+ /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
+ vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
+ y = xd->y_buf + y_stride + 1;
+#else
+ unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
+ bestmv->as_mv.col;
+ y_stride = pre_stride;
+#endif
+
+ /* central mv */
+ bestmv->as_mv.row *= 8;
+ bestmv->as_mv.col *= 8;
+ startmv = *bestmv;
+
+ /* calculate central point error */
+ bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ *distortion = bestmse;
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
+
+ /* go left then right and check error */
+ this_mv.as_mv.row = startmv.as_mv.row;
+ this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
+ /* "halfpix" horizontal variance */
+ thismse = vfp->svf(y - 1, y_stride, 4, 0, z, b->src_stride, &sse);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.col += 8;
+ /* "halfpix" horizontal variance */
+ thismse = vfp->svf(y, y_stride, 4, 0, z, b->src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ /* go up then down and check error */
+ this_mv.as_mv.col = startmv.as_mv.col;
+ this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
+ /* "halfpix" vertical variance */
+ thismse = vfp->svf(y - y_stride, y_stride, 0, 4, z, b->src_stride, &sse);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.row += 8;
+ /* "halfpix" vertical variance */
+ thismse = vfp->svf(y, y_stride, 0, 4, z, b->src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ /* now check 1 more diagonal */
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ this_mv = startmv;
+
+ switch (whichdir) {
+ case 0:
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ /* "halfpix" horizontal/vertical variance */
+ thismse =
+ vfp->svf(y - 1 - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
+ break;
+ case 1:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ /* "halfpix" horizontal/vertical variance */
+ thismse = vfp->svf(y - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
+ break;
+ case 2:
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
+ /* "halfpix" horizontal/vertical variance */
+ thismse = vfp->svf(y - 1, y_stride, 4, 4, z, b->src_stride, &sse);
+ break;
+ case 3:
+ default:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
+ /* "halfpix" horizontal/vertical variance */
+ thismse = vfp->svf(y, y_stride, 4, 4, z, b->src_stride, &sse);
+ break;
+ }
+
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ /* time to check quarter pels. */
+ if (bestmv->as_mv.row < startmv.as_mv.row) y -= y_stride;
+
+ if (bestmv->as_mv.col < startmv.as_mv.col) y--;
+
+ startmv = *bestmv;
+
+ /* go left then right and check error */
+ this_mv.as_mv.row = startmv.as_mv.row;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col = startmv.as_mv.col - 2;
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
+ this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z,
+ b->src_stride, &sse);
+ }
+
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.col += 4;
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7,
+ z, b->src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ /* go up then down and check error */
+ this_mv.as_mv.col = startmv.as_mv.col;
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row = startmv.as_mv.row - 2;
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
+ this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z,
+ b->src_stride, &sse);
+ }
+
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7,
+ z, b->src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ /* now check 1 more diagonal */
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ this_mv = startmv;
+
+ switch (whichdir) {
+ case 0:
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 2;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
+ this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z,
+ b->src_stride, &sse);
+ }
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6,
+ z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride,
+ &sse);
+ }
+ }
+
+ break;
+ case 1:
+ this_mv.as_mv.col += 2;
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 2;
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
+ this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+ thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z,
+ b->src_stride, &sse);
+ }
+
+ break;
+ case 2:
+ this_mv.as_mv.row += 2;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
+ this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z,
+ b->src_stride, &sse);
+ }
+
+ break;
+ case 3:
+ this_mv.as_mv.col += 2;
+ this_mv.as_mv.row += 2;
+ thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
+ this_mv.as_mv.row & 7, z, b->src_stride, &sse);
+ break;
+ }
+
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ return bestmse;
+}
+
+int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse1) {
+ int bestmse = INT_MAX;
+ int_mv startmv;
+ int_mv this_mv;
+ unsigned char *z = (*(b->base_src) + b->src);
+ int left, right, up, down, diag;
+ unsigned int sse;
+ int whichdir;
+ int thismse;
+ int y_stride;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+
+#if VPX_ARCH_X86 || VPX_ARCH_X86_64
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
+ bestmv->as_mv.col;
+ unsigned char *y;
+
+ y_stride = 32;
+ /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
+ vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
+ y = xd->y_buf + y_stride + 1;
+#else
+ unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
+ bestmv->as_mv.col;
+ y_stride = pre_stride;
+#endif
+
+ /* central mv */
+ bestmv->as_mv.row *= 8;
+ bestmv->as_mv.col *= 8;
+ startmv = *bestmv;
+
+ /* calculate central point error */
+ bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ *distortion = bestmse;
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
+
+ /* go left then right and check error */
+ this_mv.as_mv.row = startmv.as_mv.row;
+ this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
+ /* "halfpix" horizontal variance */
+ thismse = vfp->svf(y - 1, y_stride, 4, 0, z, b->src_stride, &sse);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.col += 8;
+ /* "halfpix" horizontal variance */
+ thismse = vfp->svf(y, y_stride, 4, 0, z, b->src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ /* go up then down and check error */
+ this_mv.as_mv.col = startmv.as_mv.col;
+ this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
+ /* "halfpix" vertical variance */
+ thismse = vfp->svf(y - y_stride, y_stride, 0, 4, z, b->src_stride, &sse);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.row += 8;
+ /* "halfpix" vertical variance */
+ thismse = vfp->svf(y, y_stride, 0, 4, z, b->src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ /* now check 1 more diagonal - */
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ this_mv = startmv;
+
+ switch (whichdir) {
+ case 0:
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ /* "halfpix" horizontal/vertical variance */
+ thismse =
+ vfp->svf(y - 1 - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
+ break;
+ case 1:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ /* "halfpix" horizontal/vertical variance */
+ thismse = vfp->svf(y - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
+ break;
+ case 2:
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
+ /* "halfpix" horizontal/vertical variance */
+ thismse = vfp->svf(y - 1, y_stride, 4, 4, z, b->src_stride, &sse);
+ break;
+ case 3:
+ default:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
+ /* "halfpix" horizontal/vertical variance */
+ thismse = vfp->svf(y, y_stride, 4, 4, z, b->src_stride, &sse);
+ break;
+ }
+
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ return bestmse;
+}
+
+#define CHECK_BOUNDS(range) \
+ do { \
+ all_in = 1; \
+ all_in &= ((br - range) >= x->mv_row_min); \
+ all_in &= ((br + range) <= x->mv_row_max); \
+ all_in &= ((bc - range) >= x->mv_col_min); \
+ all_in &= ((bc + range) <= x->mv_col_max); \
+ } while (0)
+
+#define CHECK_POINT \
+ { \
+ if (this_mv.as_mv.col < x->mv_col_min) continue; \
+ if (this_mv.as_mv.col > x->mv_col_max) continue; \
+ if (this_mv.as_mv.row < x->mv_row_min) continue; \
+ if (this_mv.as_mv.row > x->mv_row_max) continue; \
+ }
+
+#define CHECK_BETTER \
+ do { \
+ if (thissad < bestsad) { \
+ thissad += \
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); \
+ if (thissad < bestsad) { \
+ bestsad = thissad; \
+ best_site = i; \
+ } \
+ } \
+ } while (0)
+
+static const MV next_chkpts[6][3] = {
+ { { -2, 0 }, { -1, -2 }, { 1, -2 } }, { { -1, -2 }, { 1, -2 }, { 2, 0 } },
+ { { 1, -2 }, { 2, 0 }, { 1, 2 } }, { { 2, 0 }, { 1, 2 }, { -1, 2 } },
+ { { 1, 2 }, { -1, 2 }, { -2, 0 } }, { { -1, 2 }, { -2, 0 }, { -1, -2 } }
+};
+
+int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int_mv *best_mv, int search_param, int sad_per_bit,
+ const vp8_variance_fn_ptr_t *vfp, int *mvsadcost[2],
+ int_mv *center_mv) {
+ MV hex[6] = {
+ { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 }
+ };
+ MV neighbors[4] = { { 0, -1 }, { -1, 0 }, { 1, 0 }, { 0, 1 } };
+ int i, j;
+
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+
+ int in_what_stride = pre_stride;
+ int br, bc;
+ int_mv this_mv;
+ unsigned int bestsad;
+ unsigned int thissad;
+ unsigned char *base_offset;
+ unsigned char *this_offset;
+ int k = -1;
+ int all_in;
+ int best_site = -1;
+ int hex_range = 127;
+ int dia_range = 8;
+
+ int_mv fcenter_mv;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ /* adjust ref_mv to make sure it is within MV range */
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+ x->mv_row_max);
+ br = ref_mv->as_mv.row;
+ bc = ref_mv->as_mv.col;
+
+ /* Work out the start point for the search */
+ base_offset = (unsigned char *)(base_pre + d->offset);
+ this_offset = base_offset + (br * (pre_stride)) + bc;
+ this_mv.as_mv.row = br;
+ this_mv.as_mv.col = bc;
+ bestsad = vfp->sdf(what, what_stride, this_offset, in_what_stride) +
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+#if CONFIG_MULTI_RES_ENCODING
+ /* Lower search range based on prediction info */
+ if (search_param >= 6)
+ goto cal_neighbors;
+ else if (search_param >= 5)
+ hex_range = 4;
+ else if (search_param >= 4)
+ hex_range = 6;
+ else if (search_param >= 3)
+ hex_range = 15;
+ else if (search_param >= 2)
+ hex_range = 31;
+ else if (search_param >= 1)
+ hex_range = 63;
+
+ dia_range = 8;
+#else
+ (void)search_param;
+#endif
+
+ /* hex search */
+ CHECK_BOUNDS(2);
+
+ if (all_in) {
+ for (i = 0; i < 6; ++i) {
+ this_mv.as_mv.row = br + hex[i].row;
+ this_mv.as_mv.col = bc + hex[i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
+ CHECK_BETTER;
+ }
+ } else {
+ for (i = 0; i < 6; ++i) {
+ this_mv.as_mv.row = br + hex[i].row;
+ this_mv.as_mv.col = bc + hex[i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
+ CHECK_BETTER;
+ }
+ }
+
+ if (best_site == -1) {
+ goto cal_neighbors;
+ } else {
+ br += hex[best_site].row;
+ bc += hex[best_site].col;
+ k = best_site;
+ }
+
+ for (j = 1; j < hex_range; ++j) {
+ best_site = -1;
+ CHECK_BOUNDS(2);
+
+ if (all_in) {
+ for (i = 0; i < 3; ++i) {
+ this_mv.as_mv.row = br + next_chkpts[k][i].row;
+ this_mv.as_mv.col = bc + next_chkpts[k][i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
+ CHECK_BETTER;
+ }
+ } else {
+ for (i = 0; i < 3; ++i) {
+ this_mv.as_mv.row = br + next_chkpts[k][i].row;
+ this_mv.as_mv.col = bc + next_chkpts[k][i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
+ CHECK_BETTER;
+ }
+ }
+
+ if (best_site == -1) {
+ break;
+ } else {
+ br += next_chkpts[k][best_site].row;
+ bc += next_chkpts[k][best_site].col;
+ k += 5 + best_site;
+ if (k >= 12) {
+ k -= 12;
+ } else if (k >= 6) {
+ k -= 6;
+ }
+ }
+ }
+
+/* check 4 1-away neighbors */
+cal_neighbors:
+ for (j = 0; j < dia_range; ++j) {
+ best_site = -1;
+ CHECK_BOUNDS(1);
+
+ if (all_in) {
+ for (i = 0; i < 4; ++i) {
+ this_mv.as_mv.row = br + neighbors[i].row;
+ this_mv.as_mv.col = bc + neighbors[i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
+ CHECK_BETTER;
+ }
+ } else {
+ for (i = 0; i < 4; ++i) {
+ this_mv.as_mv.row = br + neighbors[i].row;
+ this_mv.as_mv.col = bc + neighbors[i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
+ CHECK_BETTER;
+ }
+ }
+
+ if (best_site == -1) {
+ break;
+ } else {
+ br += neighbors[best_site].row;
+ bc += neighbors[best_site].col;
+ }
+ }
+
+ best_mv->as_mv.row = br;
+ best_mv->as_mv.col = bc;
+
+ return bestsad;
+}
+#undef CHECK_BOUNDS
+#undef CHECK_POINT
+#undef CHECK_BETTER
+
+int vp8_diamond_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int_mv *best_mv, int search_param, int sad_per_bit,
+ int *num00, vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2], int_mv *center_mv) {
+ int i, j, step;
+
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ unsigned char *in_what;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+ int in_what_stride = pre_stride;
+ unsigned char *best_address;
+
+ int tot_steps;
+ int_mv this_mv;
+
+ unsigned int bestsad;
+ unsigned int thissad;
+ int best_site = 0;
+ int last_site = 0;
+
+ int ref_row;
+ int ref_col;
+ int this_row_offset;
+ int this_col_offset;
+ search_site *ss;
+
+ unsigned char *check_here;
+
+ int *mvsadcost[2];
+ int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+ x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
+ *num00 = 0;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ /* Work out the start point for the search */
+ in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) +
+ ref_col);
+ best_address = in_what;
+
+ /* Check the starting position */
+ bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ /* search_param determines the length of the initial step and hence
+ * the number of iterations 0 = initial step (MAX_FIRST_STEP) pel :
+ * 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ */
+ ss = &x->ss[search_param * x->searches_per_step];
+ tot_steps = (x->ss_count / x->searches_per_step) - search_param;
+
+ i = 1;
+
+ for (step = 0; step < tot_steps; ++step) {
+ for (j = 0; j < x->searches_per_step; ++j) {
+ /* Trap illegal vectors */
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
+
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max))
+
+ {
+ check_here = ss[i].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad +=
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = i;
+ }
+ }
+ }
+
+ i++;
+ }
+
+ if (best_site != last_site) {
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ last_site = best_site;
+ } else if (best_address == in_what) {
+ (*num00)++;
+ }
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+}
+
+#if HAVE_SSE2 || HAVE_MSA || HAVE_LSX
+int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int_mv *best_mv, int search_param, int sad_per_bit,
+ int *num00, vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2], int_mv *center_mv) {
+ int i, j, step;
+
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ unsigned char *in_what;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+ int in_what_stride = pre_stride;
+ unsigned char *best_address;
+
+ int tot_steps;
+ int_mv this_mv;
+
+ unsigned int bestsad;
+ unsigned int thissad;
+ int best_site = 0;
+ int last_site = 0;
+
+ int ref_row;
+ int ref_col;
+ int this_row_offset;
+ int this_col_offset;
+ search_site *ss;
+
+ unsigned char *check_here;
+
+ int *mvsadcost[2];
+ int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+ x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
+ *num00 = 0;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ /* Work out the start point for the search */
+ in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) +
+ ref_col);
+ best_address = in_what;
+
+ /* Check the starting position */
+ bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ /* search_param determines the length of the initial step and hence the
+ * number of iterations 0 = initial step (MAX_FIRST_STEP) pel : 1 =
+ * (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ */
+ ss = &x->ss[search_param * x->searches_per_step];
+ tot_steps = (x->ss_count / x->searches_per_step) - search_param;
+
+ i = 1;
+
+ for (step = 0; step < tot_steps; ++step) {
+ int all_in = 1, t;
+
+ /* To know if all neighbor points are within the bounds, 4 bounds
+ * checking are enough instead of checking 4 bounds for each
+ * points.
+ */
+ all_in &= ((best_mv->as_mv.row + ss[i].mv.row) > x->mv_row_min);
+ all_in &= ((best_mv->as_mv.row + ss[i + 1].mv.row) < x->mv_row_max);
+ all_in &= ((best_mv->as_mv.col + ss[i + 2].mv.col) > x->mv_col_min);
+ all_in &= ((best_mv->as_mv.col + ss[i + 3].mv.col) < x->mv_col_max);
+
+ if (all_in) {
+ unsigned int sad_array[4];
+
+ for (j = 0; j < x->searches_per_step; j += 4) {
+ const unsigned char *block_offset[4];
+
+ for (t = 0; t < 4; ++t) {
+ block_offset[t] = ss[i + t].offset + best_address;
+ }
+
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
+ sad_array);
+
+ for (t = 0; t < 4; t++, i++) {
+ if (sad_array[t] < bestsad) {
+ this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
+ this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
+ sad_array[t] +=
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ if (sad_array[t] < bestsad) {
+ bestsad = sad_array[t];
+ best_site = i;
+ }
+ }
+ }
+ }
+ } else {
+ for (j = 0; j < x->searches_per_step; ++j) {
+ /* Trap illegal vectors */
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
+
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max)) {
+ check_here = ss[i].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad +=
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = i;
+ }
+ }
+ }
+ i++;
+ }
+ }
+
+ if (best_site != last_site) {
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ last_site = best_site;
+ } else if (best_address == in_what) {
+ (*num00)++;
+ }
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row * 8;
+ this_mv.as_mv.col = best_mv->as_mv.col * 8;
+
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+}
+#endif // HAVE_SSE2 || HAVE_MSA || HAVE_LSX
+
+int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int sad_per_bit, int distance,
+ vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ int_mv *center_mv) {
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ unsigned char *in_what;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+ int in_what_stride = pre_stride;
+ int mv_stride = pre_stride;
+ unsigned char *bestaddress;
+ int_mv *best_mv = &d->bmi.mv;
+ int_mv this_mv;
+ unsigned int bestsad;
+ unsigned int thissad;
+ int r, c;
+
+ unsigned char *check_here;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+
+ int *mvsadcost[2];
+ int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ /* Work out the mid point for the search */
+ in_what = base_pre + d->offset;
+ bestaddress = in_what + (ref_row * pre_stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ /* Baseline value at the centre */
+ bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) +
+ mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ /* Apply further limits to prevent us looking using vectors that stretch
+ * beyond the UMV border
+ */
+ if (col_min < x->mv_col_min) col_min = x->mv_col_min;
+
+ if (col_max > x->mv_col_max) col_max = x->mv_col_max;
+
+ if (row_min < x->mv_row_min) row_min = x->mv_row_min;
+
+ if (row_max > x->mv_row_max) row_max = x->mv_row_max;
+
+ for (r = row_min; r < row_max; ++r) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+
+ for (c = col_min; c < col_max; ++c) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad +=
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ }
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row * 8;
+ this_mv.as_mv.col = best_mv->as_mv.col * 8;
+
+ return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+}
+
+int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2], int_mv *center_mv) {
+ MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
+ int i, j;
+ short this_row_offset, this_col_offset;
+
+ int what_stride = b->src_stride;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+ int in_what_stride = pre_stride;
+ unsigned char *what = (*(b->base_src) + b->src);
+ unsigned char *best_address =
+ (unsigned char *)(base_pre + d->offset +
+ (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col);
+ unsigned char *check_here;
+ int_mv this_mv;
+ unsigned int bestsad;
+ unsigned int thissad;
+
+ int *mvsadcost[2];
+ int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; ++i) {
+ int best_site = -1;
+
+ for (j = 0; j < 4; ++j) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col +
+ best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad +=
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
+ }
+ }
+ }
+
+ if (best_site == -1) {
+ break;
+ } else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride +
+ neighbors[best_site].col;
+ }
+ }
+
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+}
+
+#if HAVE_SSE2 || HAVE_MSA
+int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2], int_mv *center_mv) {
+ MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
+ int i, j;
+ short this_row_offset, this_col_offset;
+
+ int what_stride = b->src_stride;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+ int in_what_stride = pre_stride;
+ unsigned char *what = (*(b->base_src) + b->src);
+ unsigned char *best_address =
+ (unsigned char *)(base_pre + d->offset +
+ (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col);
+ unsigned char *check_here;
+ int_mv this_mv;
+ unsigned int bestsad;
+ unsigned int thissad;
+
+ int *mvsadcost[2];
+ int_mv fcenter_mv;
+
+ mvsadcost[0] = x->mvsadcost[0];
+ mvsadcost[1] = x->mvsadcost[1];
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; ++i) {
+ int best_site = -1;
+ int all_in = 1;
+
+ all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
+ all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
+ all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
+ all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
+
+ if (all_in) {
+ unsigned int sad_array[4];
+ const unsigned char *block_offset[4];
+ block_offset[0] = best_address - in_what_stride;
+ block_offset[1] = best_address - 1;
+ block_offset[2] = best_address + 1;
+ block_offset[3] = best_address + in_what_stride;
+
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
+ sad_array);
+
+ for (j = 0; j < 4; ++j) {
+ if (sad_array[j] < bestsad) {
+ this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
+ this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
+ sad_array[j] +=
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ if (sad_array[j] < bestsad) {
+ bestsad = sad_array[j];
+ best_site = j;
+ }
+ }
+ }
+ } else {
+ for (j = 0; j < 4; ++j) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col +
+ best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad +=
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
+ }
+ }
+ }
+ }
+
+ if (best_site == -1) {
+ break;
+ } else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride +
+ neighbors[best_site].col;
+ }
+ }
+
+ this_mv.as_mv.row = ref_mv->as_mv.row * 8;
+ this_mv.as_mv.col = ref_mv->as_mv.col * 8;
+
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+}
+#endif // HAVE_SSE2 || HAVE_MSA
diff --git a/media/libvpx/libvpx/vp8/encoder/mcomp.h b/media/libvpx/libvpx/vp8/encoder/mcomp.h
new file mode 100644
index 0000000000..1ee6fe5dd6
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mcomp.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_MCOMP_H_
+#define VPX_VP8_ENCODER_MCOMP_H_
+
+#include "block.h"
+#include "vpx_dsp/variance.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The maximum number of steps in a step search given the largest allowed
+ * initial step
+ */
+#define MAX_MVSEARCH_STEPS 8
+
+/* Max full pel mv specified in 1 pel units */
+#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1)
+
+/* Maximum size of the first step in full pel units */
+#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1))
+
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);
+void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
+void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
+
+int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int_mv *best_mv, int search_param, int sad_per_bit,
+ const vp8_variance_fn_ptr_t *vfp, int *mvsadcost[2],
+ int_mv *center_mv);
+
+typedef int(fractional_mv_step_fp)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse);
+
+fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively;
+fractional_mv_step_fp vp8_find_best_sub_pixel_step;
+fractional_mv_step_fp vp8_find_best_half_pixel_step;
+fractional_mv_step_fp vp8_skip_fractional_mv_step;
+
+int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int sad_per_bit, int distance,
+ vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ int_mv *center_mv);
+
+typedef int (*vp8_refining_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *ref_mv, int sad_per_bit,
+ int distance,
+ vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2], int_mv *center_mv);
+
+typedef int (*vp8_diamond_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *ref_mv, int_mv *best_mv,
+ int search_param, int sad_per_bit,
+ int *num00,
+ vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2], int_mv *center_mv);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_MCOMP_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/mips/mmi/dct_mmi.c b/media/libvpx/libvpx/vp8/encoder/mips/mmi/dct_mmi.c
new file mode 100644
index 0000000000..0fd25fcda5
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mips/mmi/dct_mmi.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vpx_ports/mem.h"
+#include "vpx_ports/asmdefs_mmi.h"
+
+/* clang-format off */
+/* TRANSPOSE_4H: transpose 4x4 matrix.
+ Input: ftmp1,ftmp2,ftmp3,ftmp4
+ Output: ftmp1,ftmp2,ftmp3,ftmp4
+ Note: ftmp0 always be 0, ftmp5~9 used for temporary value.
+ */
+#define TRANSPOSE_4H \
+ MMI_LI(%[tmp0], 0x93) \
+ "mtc1 %[tmp0], %[ftmp10] \n\t" \
+ "punpcklhw %[ftmp5], %[ftmp1], %[ftmp0] \n\t" \
+ "punpcklhw %[ftmp9], %[ftmp2], %[ftmp0] \n\t" \
+ "pshufh %[ftmp9], %[ftmp9], %[ftmp10] \n\t" \
+ "por %[ftmp5], %[ftmp5], %[ftmp9] \n\t" \
+ "punpckhhw %[ftmp6], %[ftmp1], %[ftmp0] \n\t" \
+ "punpckhhw %[ftmp9], %[ftmp2], %[ftmp0] \n\t" \
+ "pshufh %[ftmp9], %[ftmp9], %[ftmp10] \n\t" \
+ "por %[ftmp6], %[ftmp6], %[ftmp9] \n\t" \
+ "punpcklhw %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
+ "punpcklhw %[ftmp9], %[ftmp4], %[ftmp0] \n\t" \
+ "pshufh %[ftmp9], %[ftmp9], %[ftmp10] \n\t" \
+ "por %[ftmp7], %[ftmp7], %[ftmp9] \n\t" \
+ "punpckhhw %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
+ "punpckhhw %[ftmp9], %[ftmp4], %[ftmp0] \n\t" \
+ "pshufh %[ftmp9], %[ftmp9], %[ftmp10] \n\t" \
+ "por %[ftmp8], %[ftmp8], %[ftmp9] \n\t" \
+ "punpcklwd %[ftmp1], %[ftmp5], %[ftmp7] \n\t" \
+ "punpckhwd %[ftmp2], %[ftmp5], %[ftmp7] \n\t" \
+ "punpcklwd %[ftmp3], %[ftmp6], %[ftmp8] \n\t" \
+ "punpckhwd %[ftmp4], %[ftmp6], %[ftmp8] \n\t"
+/* clang-format on */
+
+void vp8_short_fdct4x4_mmi(int16_t *input, int16_t *output, int pitch) {
+ uint64_t tmp[1];
+ int16_t *ip = input;
+ double ff_ph_op1, ff_ph_op3;
+
+#if _MIPS_SIM == _ABIO32
+ register double ftmp0 asm("$f0");
+ register double ftmp1 asm("$f2");
+ register double ftmp2 asm("$f4");
+ register double ftmp3 asm("$f6");
+ register double ftmp4 asm("$f8");
+ register double ftmp5 asm("$f10");
+ register double ftmp6 asm("$f12");
+ register double ftmp7 asm("$f14");
+ register double ftmp8 asm("$f16");
+ register double ftmp9 asm("$f18");
+ register double ftmp10 asm("$f20");
+ register double ftmp11 asm("$f22");
+ register double ftmp12 asm("$f24");
+#else
+ register double ftmp0 asm("$f0");
+ register double ftmp1 asm("$f1");
+ register double ftmp2 asm("$f2");
+ register double ftmp3 asm("$f3");
+ register double ftmp4 asm("$f4");
+ register double ftmp5 asm("$f5");
+ register double ftmp6 asm("$f6");
+ register double ftmp7 asm("$f7");
+ register double ftmp8 asm("$f8");
+ register double ftmp9 asm("$f9");
+ register double ftmp10 asm("$f10");
+ register double ftmp11 asm("$f11");
+ register double ftmp12 asm("$f12");
+#endif // _MIPS_SIM == _ABIO32
+
+ DECLARE_ALIGNED(8, const uint64_t, ff_ph_01) = { 0x0001000100010001ULL };
+ DECLARE_ALIGNED(8, const uint64_t, ff_ph_07) = { 0x0007000700070007ULL };
+ DECLARE_ALIGNED(8, const uint64_t, ff_pw_12000) = { 0x00002ee000002ee0ULL };
+ DECLARE_ALIGNED(8, const uint64_t, ff_pw_51000) = { 0x0000c7380000c738ULL };
+ DECLARE_ALIGNED(8, const uint64_t, ff_pw_14500) = { 0x000038a4000038a4ULL };
+ DECLARE_ALIGNED(8, const uint64_t, ff_pw_7500) = { 0x00001d4c00001d4cULL };
+ DECLARE_ALIGNED(8, const uint64_t, ff_pw_5352) = { 0x000014e8000014e8ULL };
+ DECLARE_ALIGNED(8, const uint64_t, ff_pw_2217) = { 0x000008a9000008a9ULL };
+ DECLARE_ALIGNED(8, const uint64_t, ff_ph_8) = { 0x0008000800080008ULL };
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0x14e808a914e808a9 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_op1] \n\t"
+ "dli %[tmp0], 0xeb1808a9eb1808a9 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_op3] \n\t"
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[ip]) \n\t"
+ MMI_ADDU(%[ip], %[ip], %[pitch])
+ "gsldlc1 %[ftmp2], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[ip]) \n\t"
+ MMI_ADDU(%[ip], %[ip], %[pitch])
+ "gsldlc1 %[ftmp3], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp3], 0x00(%[ip]) \n\t"
+ MMI_ADDU(%[ip], %[ip], %[pitch])
+ "gsldlc1 %[ftmp4], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[ip]) \n\t"
+ MMI_ADDU(%[ip], %[ip], %[pitch])
+ TRANSPOSE_4H
+
+ "ldc1 %[ftmp11], %[ff_ph_8] \n\t"
+ // f1 + f4
+ "paddh %[ftmp5], %[ftmp1], %[ftmp4] \n\t"
+ // a1
+ "pmullh %[ftmp5], %[ftmp5], %[ftmp11] \n\t"
+ // f2 + f3
+ "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t"
+ // b1
+ "pmullh %[ftmp6], %[ftmp6], %[ftmp11] \n\t"
+ // f2 - f3
+ "psubh %[ftmp7], %[ftmp2], %[ftmp3] \n\t"
+ // c1
+ "pmullh %[ftmp7], %[ftmp7], %[ftmp11] \n\t"
+ // f1 - f4
+ "psubh %[ftmp8], %[ftmp1], %[ftmp4] \n\t"
+ // d1
+ "pmullh %[ftmp8], %[ftmp8], %[ftmp11] \n\t"
+ // op[0] = a1 + b1
+ "paddh %[ftmp1], %[ftmp5], %[ftmp6] \n\t"
+ // op[2] = a1 - b1
+ "psubh %[ftmp3], %[ftmp5], %[ftmp6] \n\t"
+
+ // op[1] = (c1 * 2217 + d1 * 5352 + 14500) >> 12
+ MMI_LI(%[tmp0], 0x0c)
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+ "ldc1 %[ftmp12], %[ff_pw_14500] \n\t"
+ "punpcklhw %[ftmp9], %[ftmp7], %[ftmp8] \n\t"
+ "pmaddhw %[ftmp5], %[ftmp9], %[ff_ph_op1] \n\t"
+ "punpckhhw %[ftmp9], %[ftmp7], %[ftmp8] \n\t"
+ "pmaddhw %[ftmp6], %[ftmp9], %[ff_ph_op1] \n\t"
+ "paddw %[ftmp5], %[ftmp5], %[ftmp12] \n\t"
+ "paddw %[ftmp6], %[ftmp6], %[ftmp12] \n\t"
+ "psraw %[ftmp5], %[ftmp5], %[ftmp11] \n\t"
+ "psraw %[ftmp6], %[ftmp6], %[ftmp11] \n\t"
+ "packsswh %[ftmp2], %[ftmp5], %[ftmp6] \n\t"
+
+ // op[3] = (d1 * 2217 - c1 * 5352 + 7500) >> 12
+ "ldc1 %[ftmp12], %[ff_pw_7500] \n\t"
+ "punpcklhw %[ftmp9], %[ftmp8], %[ftmp7] \n\t"
+ "pmaddhw %[ftmp5], %[ftmp9], %[ff_ph_op3] \n\t"
+ "punpckhhw %[ftmp9], %[ftmp8], %[ftmp7] \n\t"
+ "pmaddhw %[ftmp6], %[ftmp9], %[ff_ph_op3] \n\t"
+ "paddw %[ftmp5], %[ftmp5], %[ftmp12] \n\t"
+ "paddw %[ftmp6], %[ftmp6], %[ftmp12] \n\t"
+ "psraw %[ftmp5], %[ftmp5], %[ftmp11] \n\t"
+ "psraw %[ftmp6], %[ftmp6], %[ftmp11] \n\t"
+ "packsswh %[ftmp4], %[ftmp5], %[ftmp6] \n\t"
+ TRANSPOSE_4H
+
+ "paddh %[ftmp5], %[ftmp1], %[ftmp4] \n\t"
+ "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t"
+ "psubh %[ftmp7], %[ftmp2], %[ftmp3] \n\t"
+ "psubh %[ftmp8], %[ftmp1], %[ftmp4] \n\t"
+
+ "pcmpeqh %[ftmp0], %[ftmp8], %[ftmp0] \n\t"
+ "ldc1 %[ftmp9], %[ff_ph_01] \n\t"
+ "paddh %[ftmp0], %[ftmp0], %[ftmp9] \n\t"
+
+ "paddh %[ftmp1], %[ftmp5], %[ftmp6] \n\t"
+ "psubh %[ftmp2], %[ftmp5], %[ftmp6] \n\t"
+ "ldc1 %[ftmp9], %[ff_ph_07] \n\t"
+ "paddh %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
+ "paddh %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
+ MMI_LI(%[tmp0], 0x04)
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "psrah %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
+ "psrah %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
+
+ MMI_LI(%[tmp0], 0x10)
+ "mtc1 %[tmp0], %[ftmp9] \n\t"
+ "ldc1 %[ftmp12], %[ff_pw_12000] \n\t"
+ "punpcklhw %[ftmp5], %[ftmp7], %[ftmp8] \n\t"
+ "pmaddhw %[ftmp10], %[ftmp5], %[ff_ph_op1] \n\t"
+ "punpckhhw %[ftmp5], %[ftmp7], %[ftmp8] \n\t"
+ "pmaddhw %[ftmp11], %[ftmp5], %[ff_ph_op1] \n\t"
+ "paddw %[ftmp10], %[ftmp10], %[ftmp12] \n\t"
+ "paddw %[ftmp11], %[ftmp11], %[ftmp12] \n\t"
+ "psraw %[ftmp10], %[ftmp10], %[ftmp9] \n\t"
+ "psraw %[ftmp11], %[ftmp11], %[ftmp9] \n\t"
+ "packsswh %[ftmp3], %[ftmp10], %[ftmp11] \n\t"
+ "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
+
+ "ldc1 %[ftmp12], %[ff_pw_51000] \n\t"
+ "punpcklhw %[ftmp5], %[ftmp8], %[ftmp7] \n\t"
+ "pmaddhw %[ftmp10], %[ftmp5], %[ff_ph_op3] \n\t"
+ "punpckhhw %[ftmp5], %[ftmp8], %[ftmp7] \n\t"
+ "pmaddhw %[ftmp11], %[ftmp5], %[ff_ph_op3] \n\t"
+ "paddw %[ftmp10], %[ftmp10], %[ftmp12] \n\t"
+ "paddw %[ftmp11], %[ftmp11], %[ftmp12] \n\t"
+ "psraw %[ftmp10], %[ftmp10], %[ftmp9] \n\t"
+ "psraw %[ftmp11], %[ftmp11], %[ftmp9] \n\t"
+ "packsswh %[ftmp4], %[ftmp10], %[ftmp11] \n\t"
+
+ "gssdlc1 %[ftmp1], 0x07(%[output]) \n\t"
+ "gssdrc1 %[ftmp1], 0x00(%[output]) \n\t"
+ "gssdlc1 %[ftmp3], 0x0f(%[output]) \n\t"
+ "gssdrc1 %[ftmp3], 0x08(%[output]) \n\t"
+ "gssdlc1 %[ftmp2], 0x17(%[output]) \n\t"
+ "gssdrc1 %[ftmp2], 0x10(%[output]) \n\t"
+ "gssdlc1 %[ftmp4], 0x1f(%[output]) \n\t"
+ "gssdrc1 %[ftmp4], 0x18(%[output]) \n\t"
+
+ : [ftmp0] "=&f"(ftmp0), [ftmp1] "=&f"(ftmp1), [ftmp2] "=&f"(ftmp2),
+ [ftmp3] "=&f"(ftmp3), [ftmp4] "=&f"(ftmp4), [ftmp5] "=&f"(ftmp5),
+ [ftmp6] "=&f"(ftmp6), [ftmp7] "=&f"(ftmp7), [ftmp8] "=&f"(ftmp8),
+ [ftmp9] "=&f"(ftmp9), [ftmp10] "=&f"(ftmp10), [ftmp11] "=&f"(ftmp11),
+ [ftmp12] "=&f"(ftmp12), [tmp0] "=&r"(tmp[0]), [ip]"+&r"(ip),
+ [ff_ph_op1] "=&f"(ff_ph_op1), [ff_ph_op3] "=&f"(ff_ph_op3)
+ : [ff_ph_01] "m"(ff_ph_01), [ff_ph_07] "m"(ff_ph_07),
+ [ff_pw_14500] "m"(ff_pw_14500), [ff_pw_7500] "m"(ff_pw_7500),
+ [ff_pw_12000] "m"(ff_pw_12000), [ff_pw_51000] "m"(ff_pw_51000),
+ [ff_pw_5352]"m"(ff_pw_5352), [ff_pw_2217]"m"(ff_pw_2217),
+ [ff_ph_8]"m"(ff_ph_8), [pitch]"r"(pitch), [output] "r"(output)
+ : "memory"
+ );
+ /* clang-format on */
+}
+
+void vp8_short_fdct8x4_mmi(int16_t *input, int16_t *output, int pitch) {
+ vp8_short_fdct4x4_mmi(input, output, pitch);
+ vp8_short_fdct4x4_mmi(input + 4, output + 16, pitch);
+}
+
+void vp8_short_walsh4x4_mmi(int16_t *input, int16_t *output, int pitch) {
+ double ftmp[13], ff_ph_01, ff_pw_01, ff_pw_03, ff_pw_mask;
+ uint64_t tmp[1];
+
+ /* clang-format off */
+ __asm__ volatile (
+ "dli %[tmp0], 0x0001000100010001 \n\t"
+ "dmtc1 %[tmp0], %[ff_ph_01] \n\t"
+ "dli %[tmp0], 0x0000000100000001 \n\t"
+ "dmtc1 %[tmp0], %[ff_pw_01] \n\t"
+ "dli %[tmp0], 0x0000000300000003 \n\t"
+ "dmtc1 %[tmp0], %[ff_pw_03] \n\t"
+ "dli %[tmp0], 0x0001000000010000 \n\t"
+ "dmtc1 %[tmp0], %[ff_pw_mask] \n\t"
+ MMI_LI(%[tmp0], 0x02)
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+
+ "gsldlc1 %[ftmp1], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[ip]) \n\t"
+ MMI_ADDU(%[ip], %[ip], %[pitch])
+ "gsldlc1 %[ftmp2], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[ip]) \n\t"
+ MMI_ADDU(%[ip], %[ip], %[pitch])
+ "gsldlc1 %[ftmp3], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp3], 0x00(%[ip]) \n\t"
+ MMI_ADDU(%[ip], %[ip], %[pitch])
+ "gsldlc1 %[ftmp4], 0x07(%[ip]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[ip]) \n\t"
+ TRANSPOSE_4H
+
+ "psllh %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
+ "psllh %[ftmp2], %[ftmp2], %[ftmp11] \n\t"
+ "psllh %[ftmp3], %[ftmp3], %[ftmp11] \n\t"
+ "psllh %[ftmp4], %[ftmp4], %[ftmp11] \n\t"
+ // a
+ "paddh %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
+ // d
+ "paddh %[ftmp6], %[ftmp2], %[ftmp4] \n\t"
+ // c
+ "psubh %[ftmp7], %[ftmp2], %[ftmp4] \n\t"
+ // b
+ "psubh %[ftmp8], %[ftmp1], %[ftmp3] \n\t"
+
+ // a + d
+ "paddh %[ftmp1], %[ftmp5], %[ftmp6] \n\t"
+ // b + c
+ "paddh %[ftmp2], %[ftmp8], %[ftmp7] \n\t"
+ // b - c
+ "psubh %[ftmp3], %[ftmp8], %[ftmp7] \n\t"
+ // a - d
+ "psubh %[ftmp4], %[ftmp5], %[ftmp6] \n\t"
+
+ "pcmpeqh %[ftmp6], %[ftmp5], %[ftmp0] \n\t"
+ "paddh %[ftmp6], %[ftmp6], %[ff_ph_01] \n\t"
+ "paddh %[ftmp1], %[ftmp1], %[ftmp6] \n\t"
+ TRANSPOSE_4H
+
+ // op[2], op[0]
+ "pmaddhw %[ftmp5], %[ftmp1], %[ff_pw_01] \n\t"
+ // op[3], op[1]
+ "pmaddhw %[ftmp1], %[ftmp1], %[ff_pw_mask] \n\t"
+
+ // op[6], op[4]
+ "pmaddhw %[ftmp6], %[ftmp2], %[ff_pw_01] \n\t"
+ // op[7], op[5]
+ "pmaddhw %[ftmp2], %[ftmp2], %[ff_pw_mask] \n\t"
+
+ // op[10], op[8]
+ "pmaddhw %[ftmp7], %[ftmp3], %[ff_pw_01] \n\t"
+ // op[11], op[9]
+ "pmaddhw %[ftmp3], %[ftmp3], %[ff_pw_mask] \n\t"
+
+ // op[14], op[12]
+ "pmaddhw %[ftmp8], %[ftmp4], %[ff_pw_01] \n\t"
+ // op[15], op[13]
+ "pmaddhw %[ftmp4], %[ftmp4], %[ff_pw_mask] \n\t"
+
+ // a1, a3
+ "paddw %[ftmp9], %[ftmp5], %[ftmp7] \n\t"
+ // d1, d3
+ "paddw %[ftmp10], %[ftmp6], %[ftmp8] \n\t"
+ // c1, c3
+ "psubw %[ftmp11], %[ftmp6], %[ftmp8] \n\t"
+ // b1, b3
+ "psubw %[ftmp12], %[ftmp5], %[ftmp7] \n\t"
+
+ // a1 + d1, a3 + d3
+ "paddw %[ftmp5], %[ftmp9], %[ftmp10] \n\t"
+ // b1 + c1, b3 + c3
+ "paddw %[ftmp6], %[ftmp12], %[ftmp11] \n\t"
+ // b1 - c1, b3 - c3
+ "psubw %[ftmp7], %[ftmp12], %[ftmp11] \n\t"
+ // a1 - d1, a3 - d3
+ "psubw %[ftmp8], %[ftmp9], %[ftmp10] \n\t"
+
+ // a2, a4
+ "paddw %[ftmp9], %[ftmp1], %[ftmp3] \n\t"
+ // d2, d4
+ "paddw %[ftmp10], %[ftmp2], %[ftmp4] \n\t"
+ // c2, c4
+ "psubw %[ftmp11], %[ftmp2], %[ftmp4] \n\t"
+ // b2, b4
+ "psubw %[ftmp12], %[ftmp1], %[ftmp3] \n\t"
+
+ // a2 + d2, a4 + d4
+ "paddw %[ftmp1], %[ftmp9], %[ftmp10] \n\t"
+ // b2 + c2, b4 + c4
+ "paddw %[ftmp2], %[ftmp12], %[ftmp11] \n\t"
+ // b2 - c2, b4 - c4
+ "psubw %[ftmp3], %[ftmp12], %[ftmp11] \n\t"
+ // a2 - d2, a4 - d4
+ "psubw %[ftmp4], %[ftmp9], %[ftmp10] \n\t"
+
+ MMI_LI(%[tmp0], 0x03)
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+
+ "pcmpgtw %[ftmp9], %[ftmp0], %[ftmp1] \n\t"
+ "pand %[ftmp9], %[ftmp9], %[ff_pw_01] \n\t"
+ "paddw %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
+ "paddw %[ftmp1], %[ftmp1], %[ff_pw_03] \n\t"
+ "psraw %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
+
+ "pcmpgtw %[ftmp9], %[ftmp0], %[ftmp2] \n\t"
+ "pand %[ftmp9], %[ftmp9], %[ff_pw_01] \n\t"
+ "paddw %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
+ "paddw %[ftmp2], %[ftmp2], %[ff_pw_03] \n\t"
+ "psraw %[ftmp2], %[ftmp2], %[ftmp11] \n\t"
+
+ "pcmpgtw %[ftmp9], %[ftmp0], %[ftmp3] \n\t"
+ "pand %[ftmp9], %[ftmp9], %[ff_pw_01] \n\t"
+ "paddw %[ftmp3], %[ftmp3], %[ftmp9] \n\t"
+ "paddw %[ftmp3], %[ftmp3], %[ff_pw_03] \n\t"
+ "psraw %[ftmp3], %[ftmp3], %[ftmp11] \n\t"
+
+ "pcmpgtw %[ftmp9], %[ftmp0], %[ftmp4] \n\t"
+ "pand %[ftmp9], %[ftmp9], %[ff_pw_01] \n\t"
+ "paddw %[ftmp4], %[ftmp4], %[ftmp9] \n\t"
+ "paddw %[ftmp4], %[ftmp4], %[ff_pw_03] \n\t"
+ "psraw %[ftmp4], %[ftmp4], %[ftmp11] \n\t"
+
+ "pcmpgtw %[ftmp9], %[ftmp0], %[ftmp5] \n\t"
+ "pand %[ftmp9], %[ftmp9], %[ff_pw_01] \n\t"
+ "paddw %[ftmp5], %[ftmp5], %[ftmp9] \n\t"
+ "paddw %[ftmp5], %[ftmp5], %[ff_pw_03] \n\t"
+ "psraw %[ftmp5], %[ftmp5], %[ftmp11] \n\t"
+
+ "pcmpgtw %[ftmp9], %[ftmp0], %[ftmp6] \n\t"
+ "pand %[ftmp9], %[ftmp9], %[ff_pw_01] \n\t"
+ "paddw %[ftmp6], %[ftmp6], %[ftmp9] \n\t"
+ "paddw %[ftmp6], %[ftmp6], %[ff_pw_03] \n\t"
+ "psraw %[ftmp6], %[ftmp6], %[ftmp11] \n\t"
+
+ "pcmpgtw %[ftmp9], %[ftmp0], %[ftmp7] \n\t"
+ "pand %[ftmp9], %[ftmp9], %[ff_pw_01] \n\t"
+ "paddw %[ftmp7], %[ftmp7], %[ftmp9] \n\t"
+ "paddw %[ftmp7], %[ftmp7], %[ff_pw_03] \n\t"
+ "psraw %[ftmp7], %[ftmp7], %[ftmp11] \n\t"
+
+ "pcmpgtw %[ftmp9], %[ftmp0], %[ftmp8] \n\t"
+ "pand %[ftmp9], %[ftmp9], %[ff_pw_01] \n\t"
+ "paddw %[ftmp8], %[ftmp8], %[ftmp9] \n\t"
+ "paddw %[ftmp8], %[ftmp8], %[ff_pw_03] \n\t"
+ "psraw %[ftmp8], %[ftmp8], %[ftmp11] \n\t"
+
+ "packsswh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
+ "packsswh %[ftmp2], %[ftmp2], %[ftmp6] \n\t"
+ "packsswh %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
+ "packsswh %[ftmp4], %[ftmp4], %[ftmp8] \n\t"
+
+ MMI_LI(%[tmp0], 0x72)
+ "dmtc1 %[tmp0], %[ftmp11] \n\t"
+ "pshufh %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
+ "pshufh %[ftmp2], %[ftmp2], %[ftmp11] \n\t"
+ "pshufh %[ftmp3], %[ftmp3], %[ftmp11] \n\t"
+ "pshufh %[ftmp4], %[ftmp4], %[ftmp11] \n\t"
+
+ "gssdlc1 %[ftmp1], 0x07(%[op]) \n\t"
+ "gssdrc1 %[ftmp1], 0x00(%[op]) \n\t"
+ "gssdlc1 %[ftmp2], 0x0f(%[op]) \n\t"
+ "gssdrc1 %[ftmp2], 0x08(%[op]) \n\t"
+ "gssdlc1 %[ftmp3], 0x17(%[op]) \n\t"
+ "gssdrc1 %[ftmp3], 0x10(%[op]) \n\t"
+ "gssdlc1 %[ftmp4], 0x1f(%[op]) \n\t"
+ "gssdrc1 %[ftmp4], 0x18(%[op]) \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
+ [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
+ [ftmp12]"=&f"(ftmp[12]), [ff_pw_mask]"=&f"(ff_pw_mask),
+ [tmp0]"=&r"(tmp[0]), [ff_pw_01]"=&f"(ff_pw_01),
+ [ip]"+&r"(input), [ff_pw_03]"=&f"(ff_pw_03),
+ [ff_ph_01]"=&f"(ff_ph_01)
+ : [op]"r"(output), [pitch]"r"((mips_reg)pitch)
+ : "memory"
+ );
+ /* clang-format on */
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/mips/mmi/vp8_quantize_mmi.c b/media/libvpx/libvpx/vp8/encoder/mips/mmi/vp8_quantize_mmi.c
new file mode 100644
index 0000000000..1986444aa3
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mips/mmi/vp8_quantize_mmi.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/asmdefs_mmi.h"
+#include "vp8/encoder/onyx_int.h"
+#include "vp8/encoder/quantize.h"
+#include "vp8/common/quant_common.h"
+
+#define REGULAR_SELECT_EOB(i, rc) \
+ z = coeff_ptr[rc]; \
+ sz = (z >> 31); \
+ x = (z ^ sz) - sz; \
+ zbin = zbin_ptr[rc] + *(zbin_boost_ptr++) + zbin_oq_value; \
+ if (x >= zbin) { \
+ x += round_ptr[rc]; \
+ y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >> 16; \
+ if (y) { \
+ x = (y ^ sz) - sz; \
+ qcoeff_ptr[rc] = x; \
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; \
+ eob = i; \
+ zbin_boost_ptr = b->zrun_zbin_boost; \
+ } \
+ }
+
+void vp8_fast_quantize_b_mmi(BLOCK *b, BLOCKD *d) {
+ const int16_t *coeff_ptr = b->coeff;
+ const int16_t *round_ptr = b->round;
+ const int16_t *quant_ptr = b->quant_fast;
+ int16_t *qcoeff_ptr = d->qcoeff;
+ int16_t *dqcoeff_ptr = d->dqcoeff;
+ const int16_t *dequant_ptr = d->dequant;
+ const int16_t *inv_zig_zag = vp8_default_inv_zig_zag;
+
+ double ftmp[13];
+ uint64_t tmp[1];
+ int64_t eob = 0;
+ double ones;
+
+ __asm__ volatile(
+ // loop 0 ~ 7
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "pcmpeqh %[ones], %[ones], %[ones] \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[coeff_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[coeff_ptr]) \n\t"
+ "dli %[tmp0], 0x0f \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "gsldlc1 %[ftmp2], 0x0f(%[coeff_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x08(%[coeff_ptr]) \n\t"
+
+ "psrah %[ftmp3], %[ftmp1], %[ftmp9] \n\t"
+ "pxor %[ftmp1], %[ftmp3], %[ftmp1] \n\t"
+ "psubh %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
+ "psrah %[ftmp4], %[ftmp2], %[ftmp9] \n\t"
+ "pxor %[ftmp2], %[ftmp4], %[ftmp2] \n\t"
+ "psubh %[ftmp2], %[ftmp2], %[ftmp4] \n\t"
+
+ "gsldlc1 %[ftmp5], 0x07(%[round_ptr]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[round_ptr]) \n\t"
+ "gsldlc1 %[ftmp6], 0x0f(%[round_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x08(%[round_ptr]) \n\t"
+ "paddh %[ftmp5], %[ftmp5], %[ftmp1] \n\t"
+ "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t"
+ "gsldlc1 %[ftmp7], 0x07(%[quant_ptr]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[quant_ptr]) \n\t"
+ "gsldlc1 %[ftmp8], 0x0f(%[quant_ptr]) \n\t"
+ "gsldrc1 %[ftmp8], 0x08(%[quant_ptr]) \n\t"
+ "pmulhuh %[ftmp5], %[ftmp5], %[ftmp7] \n\t"
+ "pmulhuh %[ftmp6], %[ftmp6], %[ftmp8] \n\t"
+
+ "pxor %[ftmp7], %[ftmp5], %[ftmp3] \n\t"
+ "pxor %[ftmp8], %[ftmp6], %[ftmp4] \n\t"
+ "psubh %[ftmp7], %[ftmp7], %[ftmp3] \n\t"
+ "psubh %[ftmp8], %[ftmp8], %[ftmp4] \n\t"
+ "gssdlc1 %[ftmp7], 0x07(%[qcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp7], 0x00(%[qcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp8], 0x0f(%[qcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp8], 0x08(%[qcoeff_ptr]) \n\t"
+
+ "gsldlc1 %[ftmp1], 0x07(%[inv_zig_zag]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[inv_zig_zag]) \n\t"
+ "gsldlc1 %[ftmp2], 0x0f(%[inv_zig_zag]) \n\t"
+ "gsldrc1 %[ftmp2], 0x08(%[inv_zig_zag]) \n\t"
+ "pcmpeqh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "pcmpeqh %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ones] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ones] \n\t"
+ "pand %[ftmp5], %[ftmp5], %[ftmp1] \n\t"
+ "pand %[ftmp6], %[ftmp6], %[ftmp2] \n\t"
+ "pmaxsh %[ftmp10], %[ftmp5], %[ftmp6] \n\t"
+
+ "gsldlc1 %[ftmp5], 0x07(%[dequant_ptr]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[dequant_ptr]) \n\t"
+ "gsldlc1 %[ftmp6], 0x0f(%[dequant_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x08(%[dequant_ptr]) \n\t"
+ "pmullh %[ftmp5], %[ftmp5], %[ftmp7] \n\t"
+ "pmullh %[ftmp6], %[ftmp6], %[ftmp8] \n\t"
+ "gssdlc1 %[ftmp5], 0x07(%[dqcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp5], 0x00(%[dqcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp6], 0x0f(%[dqcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp6], 0x08(%[dqcoeff_ptr]) \n\t"
+
+ // loop 8 ~ 15
+ "gsldlc1 %[ftmp1], 0x17(%[coeff_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x10(%[coeff_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x1f(%[coeff_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x18(%[coeff_ptr]) \n\t"
+
+ "psrah %[ftmp3], %[ftmp1], %[ftmp9] \n\t"
+ "pxor %[ftmp1], %[ftmp3], %[ftmp1] \n\t"
+ "psubh %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
+ "psrah %[ftmp4], %[ftmp2], %[ftmp9] \n\t"
+ "pxor %[ftmp2], %[ftmp4], %[ftmp2] \n\t"
+ "psubh %[ftmp2], %[ftmp2], %[ftmp4] \n\t"
+
+ "gsldlc1 %[ftmp5], 0x17(%[round_ptr]) \n\t"
+ "gsldrc1 %[ftmp5], 0x10(%[round_ptr]) \n\t"
+ "gsldlc1 %[ftmp6], 0x1f(%[round_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x18(%[round_ptr]) \n\t"
+ "paddh %[ftmp5], %[ftmp5], %[ftmp1] \n\t"
+ "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t"
+ "gsldlc1 %[ftmp7], 0x17(%[quant_ptr]) \n\t"
+ "gsldrc1 %[ftmp7], 0x10(%[quant_ptr]) \n\t"
+ "gsldlc1 %[ftmp8], 0x1f(%[quant_ptr]) \n\t"
+ "gsldrc1 %[ftmp8], 0x18(%[quant_ptr]) \n\t"
+ "pmulhuh %[ftmp5], %[ftmp5], %[ftmp7] \n\t"
+ "pmulhuh %[ftmp6], %[ftmp6], %[ftmp8] \n\t"
+
+ "pxor %[ftmp7], %[ftmp5], %[ftmp3] \n\t"
+ "pxor %[ftmp8], %[ftmp6], %[ftmp4] \n\t"
+ "psubh %[ftmp7], %[ftmp7], %[ftmp3] \n\t"
+ "psubh %[ftmp8], %[ftmp8], %[ftmp4] \n\t"
+ "gssdlc1 %[ftmp7], 0x17(%[qcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp7], 0x10(%[qcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp8], 0x1f(%[qcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp8], 0x18(%[qcoeff_ptr]) \n\t"
+
+ "gsldlc1 %[ftmp1], 0x17(%[inv_zig_zag]) \n\t"
+ "gsldrc1 %[ftmp1], 0x10(%[inv_zig_zag]) \n\t"
+ "gsldlc1 %[ftmp2], 0x1f(%[inv_zig_zag]) \n\t"
+ "gsldrc1 %[ftmp2], 0x18(%[inv_zig_zag]) \n\t"
+ "pcmpeqh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "pcmpeqh %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+ "pxor %[ftmp5], %[ftmp5], %[ones] \n\t"
+ "pxor %[ftmp6], %[ftmp6], %[ones] \n\t"
+ "pand %[ftmp5], %[ftmp5], %[ftmp1] \n\t"
+ "pand %[ftmp6], %[ftmp6], %[ftmp2] \n\t"
+ "pmaxsh %[ftmp11], %[ftmp5], %[ftmp6] \n\t"
+
+ "gsldlc1 %[ftmp5], 0x17(%[dequant_ptr]) \n\t"
+ "gsldrc1 %[ftmp5], 0x10(%[dequant_ptr]) \n\t"
+ "gsldlc1 %[ftmp6], 0x1f(%[dequant_ptr]) \n\t"
+ "gsldrc1 %[ftmp6], 0x18(%[dequant_ptr]) \n\t"
+ "pmullh %[ftmp5], %[ftmp5], %[ftmp7] \n\t"
+ "pmullh %[ftmp6], %[ftmp6], %[ftmp8] \n\t"
+ "gssdlc1 %[ftmp5], 0x17(%[dqcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp5], 0x10(%[dqcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp6], 0x1f(%[dqcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp6], 0x18(%[dqcoeff_ptr]) \n\t"
+
+ "dli %[tmp0], 0x10 \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+
+ "pmaxsh %[ftmp10], %[ftmp10], %[ftmp11] \n\t"
+ "psrlw %[ftmp11], %[ftmp10], %[ftmp9] \n\t"
+ "pmaxsh %[ftmp10], %[ftmp10], %[ftmp11] \n\t"
+ "dli %[tmp0], 0xaa \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "pshufh %[ftmp11], %[ftmp10], %[ftmp9] \n\t"
+ "pmaxsh %[ftmp10], %[ftmp10], %[ftmp11] \n\t"
+ "dli %[tmp0], 0xffff \n\t"
+ "dmtc1 %[tmp0], %[ftmp9] \n\t"
+ "pand %[ftmp10], %[ftmp10], %[ftmp9] \n\t"
+ "gssdlc1 %[ftmp10], 0x07(%[eob]) \n\t"
+ "gssdrc1 %[ftmp10], 0x00(%[eob]) \n\t"
+ : [ftmp0] "=&f"(ftmp[0]), [ftmp1] "=&f"(ftmp[1]), [ftmp2] "=&f"(ftmp[2]),
+ [ftmp3] "=&f"(ftmp[3]), [ftmp4] "=&f"(ftmp[4]), [ftmp5] "=&f"(ftmp[5]),
+ [ftmp6] "=&f"(ftmp[6]), [ftmp7] "=&f"(ftmp[7]), [ftmp8] "=&f"(ftmp[8]),
+ [ftmp9] "=&f"(ftmp[9]), [ftmp10] "=&f"(ftmp[10]),
+ [ftmp11] "=&f"(ftmp[11]), [ftmp12] "=&f"(ftmp[12]),
+ [tmp0] "=&r"(tmp[0]), [ones] "=&f"(ones)
+ : [coeff_ptr] "r"((mips_reg)coeff_ptr),
+ [qcoeff_ptr] "r"((mips_reg)qcoeff_ptr),
+ [dequant_ptr] "r"((mips_reg)dequant_ptr),
+ [round_ptr] "r"((mips_reg)round_ptr),
+ [quant_ptr] "r"((mips_reg)quant_ptr),
+ [dqcoeff_ptr] "r"((mips_reg)dqcoeff_ptr),
+ [inv_zig_zag] "r"((mips_reg)inv_zig_zag), [eob] "r"((mips_reg)&eob)
+ : "memory");
+
+ *d->eob = eob;
+}
+
+void vp8_regular_quantize_b_mmi(BLOCK *b, BLOCKD *d) {
+ int eob = 0;
+ int x, y, z, sz, zbin;
+ const int16_t *zbin_boost_ptr = b->zrun_zbin_boost;
+ const int16_t *coeff_ptr = b->coeff;
+ const int16_t *zbin_ptr = b->zbin;
+ const int16_t *round_ptr = b->round;
+ const int16_t *quant_ptr = b->quant;
+ const int16_t *quant_shift_ptr = b->quant_shift;
+ int16_t *qcoeff_ptr = d->qcoeff;
+ int16_t *dqcoeff_ptr = d->dqcoeff;
+ const int16_t *dequant_ptr = d->dequant;
+ const int16_t zbin_oq_value = b->zbin_extra;
+ register double ftmp0 asm("$f0");
+
+ // memset(qcoeff_ptr, 0, 32);
+ // memset(dqcoeff_ptr, 0, 32);
+ /* clang-format off */
+ __asm__ volatile (
+ "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "gssdlc1 %[ftmp0], 0x07(%[qcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[qcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp0], 0x0f(%[qcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x08(%[qcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp0], 0x17(%[qcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x10(%[qcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp0], 0x1f(%[qcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x18(%[qcoeff_ptr]) \n\t"
+
+ "gssdlc1 %[ftmp0], 0x07(%[dqcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x00(%[dqcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp0], 0x0f(%[dqcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x08(%[dqcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp0], 0x17(%[dqcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x10(%[dqcoeff_ptr]) \n\t"
+ "gssdlc1 %[ftmp0], 0x1f(%[dqcoeff_ptr]) \n\t"
+ "gssdrc1 %[ftmp0], 0x18(%[dqcoeff_ptr]) \n\t"
+ : [ftmp0]"=&f"(ftmp0)
+ : [qcoeff_ptr]"r"(qcoeff_ptr), [dqcoeff_ptr]"r"(dqcoeff_ptr)
+ : "memory"
+ );
+ /* clang-format on */
+
+ REGULAR_SELECT_EOB(1, 0);
+ REGULAR_SELECT_EOB(2, 1);
+ REGULAR_SELECT_EOB(3, 4);
+ REGULAR_SELECT_EOB(4, 8);
+ REGULAR_SELECT_EOB(5, 5);
+ REGULAR_SELECT_EOB(6, 2);
+ REGULAR_SELECT_EOB(7, 3);
+ REGULAR_SELECT_EOB(8, 6);
+ REGULAR_SELECT_EOB(9, 9);
+ REGULAR_SELECT_EOB(10, 12);
+ REGULAR_SELECT_EOB(11, 13);
+ REGULAR_SELECT_EOB(12, 10);
+ REGULAR_SELECT_EOB(13, 7);
+ REGULAR_SELECT_EOB(14, 11);
+ REGULAR_SELECT_EOB(15, 14);
+ REGULAR_SELECT_EOB(16, 15);
+
+ *d->eob = (char)eob;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/mips/msa/dct_msa.c b/media/libvpx/libvpx/vp8/encoder/mips/msa/dct_msa.c
new file mode 100644
index 0000000000..3084667552
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mips/msa/dct_msa.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+
+#define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v8i16 s0_m, s1_m, tp0_m, tp1_m, tp2_m, tp3_m; \
+ \
+ ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
+ ILVRL_H2_SH(s1_m, s0_m, tp0_m, tp1_m); \
+ ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
+ ILVRL_H2_SH(s1_m, s0_m, tp2_m, tp3_m); \
+ PCKEV_D2_SH(tp2_m, tp0_m, tp3_m, tp1_m, out0, out2); \
+ PCKOD_D2_SH(tp2_m, tp0_m, tp3_m, tp1_m, out1, out3); \
+ }
+
+#define SET_DOTP_VALUES(coeff, val0, val1, val2, const1, const2) \
+ { \
+ v8i16 tmp0_m; \
+ \
+ SPLATI_H3_SH(coeff, val0, val1, val2, tmp0_m, const1, const2); \
+ ILVEV_H2_SH(tmp0_m, const1, const2, tmp0_m, const1, const2); \
+ }
+
+#define RET_1_IF_NZERO_H(in0) \
+ ({ \
+ v8i16 tmp0_m; \
+ v8i16 one_m = __msa_ldi_h(1); \
+ \
+ tmp0_m = __msa_ceqi_h(in0, 0); \
+ tmp0_m = tmp0_m ^ 255; \
+ tmp0_m = one_m & tmp0_m; \
+ \
+ tmp0_m; \
+ })
+
+#define RET_1_IF_NZERO_W(in0) \
+ ({ \
+ v4i32 tmp0_m; \
+ v4i32 one_m = __msa_ldi_w(1); \
+ \
+ tmp0_m = __msa_ceqi_w(in0, 0); \
+ tmp0_m = tmp0_m ^ 255; \
+ tmp0_m = one_m & tmp0_m; \
+ \
+ tmp0_m; \
+ })
+
+#define RET_1_IF_NEG_W(in0) \
+ ({ \
+ v4i32 tmp0_m; \
+ \
+ v4i32 one_m = __msa_ldi_w(1); \
+ tmp0_m = __msa_clti_s_w(in0, 0); \
+ tmp0_m = one_m & tmp0_m; \
+ \
+ tmp0_m; \
+ })
+
+void vp8_short_fdct4x4_msa(int16_t *input, int16_t *output, int32_t pitch) {
+ v8i16 in0, in1, in2, in3;
+ v8i16 temp0, temp1;
+ v8i16 const0, const1;
+ v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 };
+ v4i32 out0, out1, out2, out3;
+ v8i16 zero = { 0 };
+
+ LD_SH4(input, pitch / 2, in0, in1, in2, in3);
+ TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+
+ BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3);
+ SLLI_4V(temp0, temp1, in1, in3, 3);
+ in0 = temp0 + temp1;
+ in2 = temp0 - temp1;
+ SET_DOTP_VALUES(coeff, 0, 1, 2, const0, const1);
+ temp0 = __msa_ilvr_h(in3, in1);
+ in1 = __msa_splati_h(coeff, 3);
+ out0 = (v4i32)__msa_ilvev_h(zero, in1);
+ coeff = __msa_ilvl_h(zero, coeff);
+ out1 = __msa_splati_w((v4i32)coeff, 0);
+ DPADD_SH2_SW(temp0, temp0, const0, const1, out0, out1);
+ out0 >>= 12;
+ out1 >>= 12;
+ PCKEV_H2_SH(out0, out0, out1, out1, in1, in3);
+ TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+
+ BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3);
+ in0 = temp0 + temp1 + 7;
+ in2 = temp0 - temp1 + 7;
+ in0 >>= 4;
+ in2 >>= 4;
+ ILVR_H2_SW(zero, in0, zero, in2, out0, out2);
+ temp1 = RET_1_IF_NZERO_H(in3);
+ ILVR_H2_SH(zero, temp1, in3, in1, temp1, temp0);
+ SPLATI_W2_SW(coeff, 2, out3, out1);
+ out3 += out1;
+ out1 = __msa_splati_w((v4i32)coeff, 1);
+ DPADD_SH2_SW(temp0, temp0, const0, const1, out1, out3);
+ out1 >>= 16;
+ out3 >>= 16;
+ out1 += (v4i32)temp1;
+ PCKEV_H2_SH(out1, out0, out3, out2, in0, in2);
+ ST_SH2(in0, in2, output, 8);
+}
+
+void vp8_short_fdct8x4_msa(int16_t *input, int16_t *output, int32_t pitch) {
+ v8i16 in0, in1, in2, in3;
+ v8i16 temp0, temp1, tmp0, tmp1;
+ v8i16 const0, const1, const2;
+ v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 };
+ v8i16 zero = { 0 };
+ v4i32 vec0_w, vec1_w, vec2_w, vec3_w;
+
+ LD_SH4(input, pitch / 2, in0, in1, in2, in3);
+ TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3);
+
+ BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3);
+ SLLI_4V(temp0, temp1, in1, in3, 3);
+ in0 = temp0 + temp1;
+ in2 = temp0 - temp1;
+ SET_DOTP_VALUES(coeff, 0, 1, 2, const1, const2);
+ temp0 = __msa_splati_h(coeff, 3);
+ vec1_w = (v4i32)__msa_ilvev_h(zero, temp0);
+ coeff = __msa_ilvl_h(zero, coeff);
+ vec3_w = __msa_splati_w((v4i32)coeff, 0);
+ ILVRL_H2_SH(in3, in1, tmp1, tmp0);
+ vec0_w = vec1_w;
+ vec2_w = vec3_w;
+ DPADD_SH4_SW(tmp1, tmp0, tmp1, tmp0, const1, const1, const2, const2, vec0_w,
+ vec1_w, vec2_w, vec3_w);
+ SRA_4V(vec1_w, vec0_w, vec3_w, vec2_w, 12);
+ PCKEV_H2_SH(vec1_w, vec0_w, vec3_w, vec2_w, in1, in3);
+ TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3);
+
+ BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3);
+ in0 = temp0 + temp1 + 7;
+ in2 = temp0 - temp1 + 7;
+ in0 >>= 4;
+ in2 >>= 4;
+ SPLATI_W2_SW(coeff, 2, vec3_w, vec1_w);
+ vec3_w += vec1_w;
+ vec1_w = __msa_splati_w((v4i32)coeff, 1);
+ const0 = RET_1_IF_NZERO_H(in3);
+ ILVRL_H2_SH(in3, in1, tmp1, tmp0);
+ vec0_w = vec1_w;
+ vec2_w = vec3_w;
+ DPADD_SH4_SW(tmp1, tmp0, tmp1, tmp0, const1, const1, const2, const2, vec0_w,
+ vec1_w, vec2_w, vec3_w);
+ SRA_4V(vec1_w, vec0_w, vec3_w, vec2_w, 16);
+ PCKEV_H2_SH(vec1_w, vec0_w, vec3_w, vec2_w, in1, in3);
+ in1 += const0;
+ PCKEV_D2_SH(in1, in0, in3, in2, temp0, temp1);
+ ST_SH2(temp0, temp1, output, 8);
+
+ PCKOD_D2_SH(in1, in0, in3, in2, in0, in2);
+ ST_SH2(in0, in2, output + 16, 8);
+}
+
+void vp8_short_walsh4x4_msa(int16_t *input, int16_t *output, int32_t pitch) {
+ v8i16 in0_h, in1_h, in2_h, in3_h;
+ v4i32 in0_w, in1_w, in2_w, in3_w, temp0, temp1, temp2, temp3;
+
+ LD_SH4(input, pitch / 2, in0_h, in1_h, in2_h, in3_h);
+ TRANSPOSE4x4_SH_SH(in0_h, in1_h, in2_h, in3_h, in0_h, in1_h, in2_h, in3_h);
+
+ UNPCK_R_SH_SW(in0_h, in0_w);
+ UNPCK_R_SH_SW(in1_h, in1_w);
+ UNPCK_R_SH_SW(in2_h, in2_w);
+ UNPCK_R_SH_SW(in3_h, in3_w);
+ BUTTERFLY_4(in0_w, in1_w, in3_w, in2_w, temp0, temp3, temp2, temp1);
+ SLLI_4V(temp0, temp1, temp2, temp3, 2);
+ BUTTERFLY_4(temp0, temp1, temp2, temp3, in0_w, in1_w, in2_w, in3_w);
+ temp0 = RET_1_IF_NZERO_W(temp0);
+ in0_w += temp0;
+ TRANSPOSE4x4_SW_SW(in0_w, in1_w, in2_w, in3_w, in0_w, in1_w, in2_w, in3_w);
+
+ BUTTERFLY_4(in0_w, in1_w, in3_w, in2_w, temp0, temp3, temp2, temp1);
+ BUTTERFLY_4(temp0, temp1, temp2, temp3, in0_w, in1_w, in2_w, in3_w);
+ in0_w += RET_1_IF_NEG_W(in0_w);
+ in1_w += RET_1_IF_NEG_W(in1_w);
+ in2_w += RET_1_IF_NEG_W(in2_w);
+ in3_w += RET_1_IF_NEG_W(in3_w);
+ ADD4(in0_w, 3, in1_w, 3, in2_w, 3, in3_w, 3, in0_w, in1_w, in2_w, in3_w);
+ SRA_4V(in0_w, in1_w, in2_w, in3_w, 3);
+ PCKEV_H2_SH(in1_w, in0_w, in3_w, in2_w, in0_h, in1_h);
+ ST_SH2(in0_h, in1_h, output, 8);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/mips/msa/denoising_msa.c b/media/libvpx/libvpx/vp8/encoder/mips/msa/denoising_msa.c
new file mode 100644
index 0000000000..f8b653a9a7
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mips/msa/denoising_msa.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include "./vp8_rtcd.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+#include "vp8/encoder/denoising.h"
+
+int32_t vp8_denoiser_filter_msa(uint8_t *mc_running_avg_y_ptr,
+ int32_t mc_avg_y_stride,
+ uint8_t *running_avg_y_ptr,
+ int32_t avg_y_stride, uint8_t *sig_ptr,
+ int32_t sig_stride, uint32_t motion_magnitude,
+ int32_t increase_denoising) {
+ uint8_t *running_avg_y_start = running_avg_y_ptr;
+ uint8_t *sig_start = sig_ptr;
+ int32_t cnt = 0;
+ int32_t sum_diff = 0;
+ int32_t shift_inc1 = 3;
+ int32_t delta = 0;
+ int32_t sum_diff_thresh;
+ v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v16u8 src8, src9, src10, src11, src12, src13, src14, src15;
+ v16u8 mc_running_avg_y0, running_avg_y, sig0;
+ v16u8 mc_running_avg_y1, running_avg_y1, sig1;
+ v16u8 coeff0, coeff1;
+ v8i16 diff0, diff1, abs_diff0, abs_diff1, abs_diff_neg0, abs_diff_neg1;
+ v8i16 adjust0, adjust1, adjust2, adjust3;
+ v8i16 shift_inc1_vec = { 0 };
+ v8i16 col_sum0 = { 0 };
+ v8i16 col_sum1 = { 0 };
+ v8i16 col_sum2 = { 0 };
+ v8i16 col_sum3 = { 0 };
+ v8i16 temp0_h, temp1_h, temp2_h, temp3_h, cmp, delta_vec;
+ v4i32 temp0_w;
+ v2i64 temp0_d, temp1_d;
+ v8i16 zero = { 0 };
+ v8i16 one = __msa_ldi_h(1);
+ v8i16 four = __msa_ldi_h(4);
+ v8i16 val_127 = __msa_ldi_h(127);
+ v8i16 adj_val = { 6, 4, 3, 0, -6, -4, -3, 0 };
+
+ if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) {
+ adj_val = __msa_add_a_h(adj_val, one);
+ if (increase_denoising) {
+ adj_val = __msa_add_a_h(adj_val, one);
+ shift_inc1 = 4;
+ }
+
+ temp0_h = zero - adj_val;
+ adj_val = (v8i16)__msa_ilvev_d((v2i64)temp0_h, (v2i64)adj_val);
+ }
+
+ adj_val = __msa_insert_h(adj_val, 3, cnt);
+ adj_val = __msa_insert_h(adj_val, 7, cnt);
+ shift_inc1_vec = __msa_fill_h(shift_inc1);
+
+ for (cnt = 8; cnt--;) {
+ v8i16 mask0 = { 0 };
+ v8i16 mask1 = { 0 };
+
+ mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr);
+ sig0 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ mc_running_avg_y_ptr += mc_avg_y_stride;
+
+ mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr);
+ sig1 = LD_UB(sig_ptr);
+
+ ILVRL_B2_UB(mc_running_avg_y0, sig0, coeff0, coeff1);
+ HSUB_UB2_SH(coeff0, coeff1, diff0, diff1);
+ abs_diff0 = __msa_add_a_h(diff0, zero);
+ abs_diff1 = __msa_add_a_h(diff1, zero);
+ cmp = __msa_clei_s_h(abs_diff0, 15);
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = __msa_clei_s_h(abs_diff0, 7);
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = abs_diff0 < shift_inc1_vec;
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = __msa_clei_s_h(abs_diff1, 15);
+ cmp = cmp & one;
+ mask1 += cmp;
+ cmp = __msa_clei_s_h(abs_diff1, 7);
+ cmp = cmp & one;
+ mask1 += cmp;
+ cmp = abs_diff1 < shift_inc1_vec;
+ cmp = cmp & one;
+ mask1 += cmp;
+ temp0_h = __msa_clei_s_h(diff0, 0);
+ temp0_h = temp0_h & four;
+ mask0 += temp0_h;
+ temp1_h = __msa_clei_s_h(diff1, 0);
+ temp1_h = temp1_h & four;
+ mask1 += temp1_h;
+ VSHF_H2_SH(adj_val, adj_val, adj_val, adj_val, mask0, mask1, adjust0,
+ adjust1);
+ temp2_h = __msa_ceqi_h(adjust0, 0);
+ temp3_h = __msa_ceqi_h(adjust1, 0);
+ adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h);
+ adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, (v16u8)temp3_h);
+ ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1);
+ UNPCK_UB_SH(sig0, temp0_h, temp1_h);
+ ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h);
+ MAXI_SH2_SH(temp0_h, temp1_h, 0);
+ SAT_UH2_SH(temp0_h, temp1_h, 7);
+ temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h);
+ running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h);
+ running_avg_y =
+ __msa_bmnz_v(running_avg_y, mc_running_avg_y0, (v16u8)temp2_h);
+ ST_UB(running_avg_y, running_avg_y_ptr);
+ running_avg_y_ptr += avg_y_stride;
+
+ mask0 = zero;
+ mask1 = zero;
+ ILVRL_B2_UB(mc_running_avg_y1, sig1, coeff0, coeff1);
+ HSUB_UB2_SH(coeff0, coeff1, diff0, diff1);
+ abs_diff0 = __msa_add_a_h(diff0, zero);
+ abs_diff1 = __msa_add_a_h(diff1, zero);
+ cmp = __msa_clei_s_h(abs_diff0, 15);
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = __msa_clei_s_h(abs_diff0, 7);
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = abs_diff0 < shift_inc1_vec;
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = __msa_clei_s_h(abs_diff1, 15);
+ cmp = cmp & one;
+ mask1 += cmp;
+ cmp = __msa_clei_s_h(abs_diff1, 7);
+ cmp = cmp & one;
+ mask1 += cmp;
+ cmp = abs_diff1 < shift_inc1_vec;
+ cmp = cmp & one;
+ mask1 += cmp;
+ temp0_h = __msa_clei_s_h(diff0, 0);
+ temp0_h = temp0_h & four;
+ mask0 += temp0_h;
+ temp1_h = __msa_clei_s_h(diff1, 0);
+ temp1_h = temp1_h & four;
+ mask1 += temp1_h;
+ VSHF_H2_SH(adj_val, adj_val, adj_val, adj_val, mask0, mask1, adjust0,
+ adjust1);
+ temp2_h = __msa_ceqi_h(adjust0, 0);
+ temp3_h = __msa_ceqi_h(adjust1, 0);
+ adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h);
+ adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, (v16u8)temp3_h);
+ ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1);
+ UNPCK_UB_SH(sig1, temp0_h, temp1_h);
+ ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h);
+ MAXI_SH2_SH(temp0_h, temp1_h, 0);
+ SAT_UH2_SH(temp0_h, temp1_h, 7);
+ temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h);
+ running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h);
+ running_avg_y =
+ __msa_bmnz_v(running_avg_y, mc_running_avg_y1, (v16u8)temp2_h);
+ ST_UB(running_avg_y, running_avg_y_ptr);
+ sig_ptr += sig_stride;
+ mc_running_avg_y_ptr += mc_avg_y_stride;
+ running_avg_y_ptr += avg_y_stride;
+ }
+
+ col_sum0 = __msa_min_s_h(col_sum0, val_127);
+ col_sum1 = __msa_min_s_h(col_sum1, val_127);
+ temp0_h = col_sum0 + col_sum1;
+ temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
+ temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
+ temp1_d = __msa_splati_d(temp0_d, 1);
+ temp0_d += temp1_d;
+ sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0);
+ sig_ptr -= sig_stride * 16;
+ mc_running_avg_y_ptr -= mc_avg_y_stride * 16;
+ running_avg_y_ptr -= avg_y_stride * 16;
+
+ if (increase_denoising) {
+ sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
+ }
+
+ if (abs(sum_diff) > sum_diff_thresh) {
+ delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1;
+ delta_vec = __msa_fill_h(delta);
+ if (delta < 4) {
+ for (cnt = 8; cnt--;) {
+ running_avg_y = LD_UB(running_avg_y_ptr);
+ mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr);
+ sig0 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ mc_running_avg_y_ptr += mc_avg_y_stride;
+ running_avg_y_ptr += avg_y_stride;
+ mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr);
+ sig1 = LD_UB(sig_ptr);
+ running_avg_y1 = LD_UB(running_avg_y_ptr);
+ ILVRL_B2_UB(mc_running_avg_y0, sig0, coeff0, coeff1);
+ HSUB_UB2_SH(coeff0, coeff1, diff0, diff1);
+ abs_diff0 = __msa_add_a_h(diff0, zero);
+ abs_diff1 = __msa_add_a_h(diff1, zero);
+ temp0_h = abs_diff0 < delta_vec;
+ temp1_h = abs_diff1 < delta_vec;
+ abs_diff0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)delta_vec,
+ (v16u8)temp0_h);
+ abs_diff1 = (v8i16)__msa_bmz_v((v16u8)abs_diff1, (v16u8)delta_vec,
+ (v16u8)temp1_h);
+ SUB2(zero, abs_diff0, zero, abs_diff1, abs_diff_neg0, abs_diff_neg1);
+ abs_diff_neg0 = zero - abs_diff0;
+ abs_diff_neg1 = zero - abs_diff1;
+ temp0_h = __msa_clei_s_h(diff0, 0);
+ temp1_h = __msa_clei_s_h(diff1, 0);
+ adjust0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0,
+ (v16u8)temp0_h);
+ adjust1 = (v8i16)__msa_bmnz_v((v16u8)abs_diff1, (v16u8)abs_diff_neg1,
+ (v16u8)temp1_h);
+ ILVRL_B2_SH(zero, running_avg_y, temp2_h, temp3_h);
+ ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3);
+ MAXI_SH2_SH(adjust2, adjust3, 0);
+ SAT_UH2_SH(adjust2, adjust3, 7);
+ temp0_h = __msa_ceqi_h(diff0, 0);
+ temp1_h = __msa_ceqi_h(diff1, 0);
+ adjust2 =
+ (v8i16)__msa_bmz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h);
+ adjust3 =
+ (v8i16)__msa_bmz_v((v16u8)adjust3, (v16u8)temp3_h, (v16u8)temp1_h);
+ adjust0 =
+ (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h);
+ adjust1 =
+ (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)zero, (v16u8)temp1_h);
+ ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3);
+ running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, (v16i8)adjust2);
+ ST_UB(running_avg_y, running_avg_y_ptr - avg_y_stride);
+ ILVRL_B2_UB(mc_running_avg_y1, sig1, coeff0, coeff1);
+ HSUB_UB2_SH(coeff0, coeff1, diff0, diff1);
+ abs_diff0 = __msa_add_a_h(diff0, zero);
+ abs_diff1 = __msa_add_a_h(diff1, zero);
+ temp0_h = abs_diff0 < delta_vec;
+ temp1_h = abs_diff1 < delta_vec;
+ abs_diff0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)delta_vec,
+ (v16u8)temp0_h);
+ abs_diff1 = (v8i16)__msa_bmz_v((v16u8)abs_diff1, (v16u8)delta_vec,
+ (v16u8)temp1_h);
+ SUB2(zero, abs_diff0, zero, abs_diff1, abs_diff_neg0, abs_diff_neg1);
+ temp0_h = __msa_clei_s_h(diff0, 0);
+ temp1_h = __msa_clei_s_h(diff1, 0);
+ adjust0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0,
+ (v16u8)temp0_h);
+ adjust1 = (v8i16)__msa_bmnz_v((v16u8)abs_diff1, (v16u8)abs_diff_neg1,
+ (v16u8)temp1_h);
+ ILVRL_H2_SH(zero, running_avg_y1, temp2_h, temp3_h);
+ ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3);
+ MAXI_SH2_SH(adjust2, adjust3, 0);
+ SAT_UH2_SH(adjust2, adjust3, 7);
+ temp0_h = __msa_ceqi_h(diff0, 0);
+ temp1_h = __msa_ceqi_h(diff1, 0);
+ adjust2 =
+ (v8i16)__msa_bmz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h);
+ adjust3 =
+ (v8i16)__msa_bmz_v((v16u8)adjust3, (v16u8)temp3_h, (v16u8)temp1_h);
+ adjust0 =
+ (v8i16)__msa_bmz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h);
+ adjust1 =
+ (v8i16)__msa_bmz_v((v16u8)adjust1, (v16u8)zero, (v16u8)temp1_h);
+ ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3);
+ running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, (v16i8)adjust2);
+ ST_UB(running_avg_y, running_avg_y_ptr);
+ running_avg_y_ptr += avg_y_stride;
+ }
+
+ col_sum2 = __msa_min_s_h(col_sum2, val_127);
+ col_sum3 = __msa_min_s_h(col_sum3, val_127);
+ temp0_h = col_sum2 + col_sum3;
+ temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
+ temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
+ temp1_d = __msa_splati_d(temp0_d, 1);
+ temp0_d += (v2i64)temp1_d;
+ sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0);
+ if (abs(sum_diff) > SUM_DIFF_THRESHOLD) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+
+ LD_UB8(sig_start, sig_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ sig_start += (8 * sig_stride);
+ LD_UB8(sig_start, sig_stride, src8, src9, src10, src11, src12, src13, src14,
+ src15);
+
+ ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, running_avg_y_start,
+ avg_y_stride);
+ running_avg_y_start += (8 * avg_y_stride);
+ ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15,
+ running_avg_y_start, avg_y_stride);
+
+ return FILTER_BLOCK;
+}
+
+int32_t vp8_denoiser_filter_uv_msa(
+ uint8_t *mc_running_avg_y_ptr, int32_t mc_avg_y_stride,
+ uint8_t *running_avg_y_ptr, int32_t avg_y_stride, uint8_t *sig_ptr,
+ int32_t sig_stride, uint32_t motion_magnitude, int32_t increase_denoising) {
+ uint8_t *running_avg_y_start = running_avg_y_ptr;
+ uint8_t *sig_start = sig_ptr;
+ int32_t cnt = 0;
+ int32_t sum_diff = 0;
+ int32_t shift_inc1 = 3;
+ int32_t delta = 0;
+ int32_t sum_block = 0;
+ int32_t sum_diff_thresh;
+ int64_t dst0, dst1, src0, src1, src2, src3;
+ v16u8 mc_running_avg_y0, running_avg_y, sig0;
+ v16u8 mc_running_avg_y1, running_avg_y1, sig1;
+ v16u8 sig2, sig3, sig4, sig5, sig6, sig7;
+ v16u8 coeff0;
+ v8i16 diff0, abs_diff0, abs_diff_neg0;
+ v8i16 adjust0, adjust2;
+ v8i16 shift_inc1_vec = { 0 };
+ v8i16 col_sum0 = { 0 };
+ v8i16 temp0_h, temp2_h, cmp, delta_vec;
+ v4i32 temp0_w;
+ v2i64 temp0_d, temp1_d;
+ v16i8 zero = { 0 };
+ v8i16 one = __msa_ldi_h(1);
+ v8i16 four = __msa_ldi_h(4);
+ v8i16 adj_val = { 6, 4, 3, 0, -6, -4, -3, 0 };
+
+ sig0 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0);
+ sig1 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig1);
+ sig2 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig2);
+ sig3 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig3);
+ sig4 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig4);
+ sig5 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig5);
+ sig6 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig6);
+ sig7 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig7);
+ temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
+ temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
+ temp1_d = __msa_splati_d(temp0_d, 1);
+ temp0_d += temp1_d;
+ sum_block = __msa_copy_s_w((v4i32)temp0_d, 0);
+ sig_ptr -= sig_stride * 8;
+
+ if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) {
+ return COPY_BLOCK;
+ }
+
+ if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) {
+ adj_val = __msa_add_a_h(adj_val, one);
+
+ if (increase_denoising) {
+ adj_val = __msa_add_a_h(adj_val, one);
+ shift_inc1 = 4;
+ }
+
+ temp0_h = (v8i16)zero - adj_val;
+ adj_val = (v8i16)__msa_ilvev_d((v2i64)temp0_h, (v2i64)adj_val);
+ }
+
+ adj_val = __msa_insert_h(adj_val, 3, cnt);
+ adj_val = __msa_insert_h(adj_val, 7, cnt);
+ shift_inc1_vec = __msa_fill_h(shift_inc1);
+ for (cnt = 4; cnt--;) {
+ v8i16 mask0 = { 0 };
+ mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr);
+ sig0 = LD_UB(sig_ptr);
+ sig_ptr += sig_stride;
+ mc_running_avg_y_ptr += mc_avg_y_stride;
+ mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr);
+ sig1 = LD_UB(sig_ptr);
+ coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y0, (v16i8)sig0);
+ diff0 = __msa_hsub_u_h(coeff0, coeff0);
+ abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero);
+ cmp = __msa_clei_s_h(abs_diff0, 15);
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = __msa_clei_s_h(abs_diff0, 7);
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = abs_diff0 < shift_inc1_vec;
+ cmp = cmp & one;
+ mask0 += cmp;
+ temp0_h = __msa_clei_s_h(diff0, 0);
+ temp0_h = temp0_h & four;
+ mask0 += temp0_h;
+ adjust0 = __msa_vshf_h(mask0, adj_val, adj_val);
+ temp2_h = __msa_ceqi_h(adjust0, 0);
+ adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h);
+ col_sum0 += adjust0;
+ temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0);
+ temp0_h += adjust0;
+ temp0_h = __msa_maxi_s_h(temp0_h, 0);
+ temp0_h = (v8i16)__msa_sat_u_h((v8u16)temp0_h, 7);
+ temp2_h = (v8i16)__msa_pckev_b((v16i8)temp2_h, (v16i8)temp2_h);
+ running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp0_h, (v16i8)temp0_h);
+ running_avg_y =
+ __msa_bmnz_v(running_avg_y, mc_running_avg_y0, (v16u8)temp2_h);
+ dst0 = __msa_copy_s_d((v2i64)running_avg_y, 0);
+ SD(dst0, running_avg_y_ptr);
+ running_avg_y_ptr += avg_y_stride;
+
+ mask0 = __msa_ldi_h(0);
+ coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y1, (v16i8)sig1);
+ diff0 = __msa_hsub_u_h(coeff0, coeff0);
+ abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero);
+ cmp = __msa_clei_s_h(abs_diff0, 15);
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = __msa_clei_s_h(abs_diff0, 7);
+ cmp = cmp & one;
+ mask0 += cmp;
+ cmp = abs_diff0 < shift_inc1_vec;
+ cmp = cmp & one;
+ mask0 += cmp;
+ temp0_h = __msa_clei_s_h(diff0, 0);
+ temp0_h = temp0_h & four;
+ mask0 += temp0_h;
+ adjust0 = __msa_vshf_h(mask0, adj_val, adj_val);
+ temp2_h = __msa_ceqi_h(adjust0, 0);
+ adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h);
+ col_sum0 += adjust0;
+ temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig1);
+ temp0_h += adjust0;
+ temp0_h = __msa_maxi_s_h(temp0_h, 0);
+ temp0_h = (v8i16)__msa_sat_u_h((v8u16)temp0_h, 7);
+
+ temp2_h = (v8i16)__msa_pckev_b((v16i8)temp2_h, (v16i8)temp2_h);
+ running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp0_h, (v16i8)temp0_h);
+ running_avg_y =
+ __msa_bmnz_v(running_avg_y, mc_running_avg_y1, (v16u8)temp2_h);
+ dst1 = __msa_copy_s_d((v2i64)running_avg_y, 0);
+ SD(dst1, running_avg_y_ptr);
+
+ sig_ptr += sig_stride;
+ mc_running_avg_y_ptr += mc_avg_y_stride;
+ running_avg_y_ptr += avg_y_stride;
+ }
+
+ temp0_h = col_sum0;
+ temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
+ temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
+ temp1_d = __msa_splati_d(temp0_d, 1);
+ temp0_d += temp1_d;
+ sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0);
+ sig_ptr -= sig_stride * 8;
+ mc_running_avg_y_ptr -= mc_avg_y_stride * 8;
+ running_avg_y_ptr -= avg_y_stride * 8;
+ sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
+
+ if (increase_denoising) {
+ sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV;
+ }
+
+ if (abs(sum_diff) > sum_diff_thresh) {
+ delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1;
+ delta_vec = __msa_fill_h(delta);
+ if (delta < 4) {
+ for (cnt = 4; cnt--;) {
+ running_avg_y = LD_UB(running_avg_y_ptr);
+ mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr);
+ sig0 = LD_UB(sig_ptr);
+ /* Update pointers for next iteration. */
+ sig_ptr += sig_stride;
+ mc_running_avg_y_ptr += mc_avg_y_stride;
+ running_avg_y_ptr += avg_y_stride;
+
+ mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr);
+ sig1 = LD_UB(sig_ptr);
+ running_avg_y1 = LD_UB(running_avg_y_ptr);
+
+ coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y0, (v16i8)sig0);
+ diff0 = __msa_hsub_u_h(coeff0, coeff0);
+ abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero);
+ temp0_h = delta_vec < abs_diff0;
+ abs_diff0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)delta_vec,
+ (v16u8)temp0_h);
+ abs_diff_neg0 = (v8i16)zero - abs_diff0;
+ temp0_h = __msa_clei_s_h(diff0, 0);
+ adjust0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0,
+ (v16u8)temp0_h);
+ temp2_h = (v8i16)__msa_ilvr_b(zero, (v16i8)running_avg_y);
+ adjust2 = temp2_h + adjust0;
+ adjust2 = __msa_maxi_s_h(adjust2, 0);
+ adjust2 = (v8i16)__msa_sat_u_h((v8u16)adjust2, 7);
+ temp0_h = __msa_ceqi_h(diff0, 0);
+ adjust2 =
+ (v8i16)__msa_bmnz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h);
+ adjust0 =
+ (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h);
+ col_sum0 += adjust0;
+ running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust2, (v16i8)adjust2);
+ dst0 = __msa_copy_s_d((v2i64)running_avg_y, 0);
+ SD(dst0, running_avg_y_ptr - avg_y_stride);
+
+ coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y1, (v16i8)sig1);
+ diff0 = __msa_hsub_u_h(coeff0, coeff0);
+ abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero);
+ temp0_h = delta_vec < abs_diff0;
+ abs_diff0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)delta_vec,
+ (v16u8)temp0_h);
+ abs_diff_neg0 = (v8i16)zero - abs_diff0;
+ temp0_h = __msa_clei_s_h(diff0, 0);
+ adjust0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0,
+ (v16u8)temp0_h);
+ temp2_h = (v8i16)__msa_ilvr_b(zero, (v16i8)running_avg_y1);
+ adjust2 = temp2_h + adjust0;
+ adjust2 = __msa_maxi_s_h(adjust2, 0);
+ adjust2 = (v8i16)__msa_sat_u_h((v8u16)adjust2, 7);
+ temp0_h = __msa_ceqi_h(diff0, 0);
+ adjust2 =
+ (v8i16)__msa_bmnz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h);
+ adjust0 =
+ (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h);
+ col_sum0 += adjust0;
+ running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust2, (v16i8)adjust2);
+ dst1 = __msa_copy_s_d((v2i64)running_avg_y, 0);
+ SD(dst1, running_avg_y_ptr);
+ running_avg_y_ptr += avg_y_stride;
+ }
+
+ temp0_h = col_sum0;
+ temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
+ temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
+ temp1_d = __msa_splati_d(temp0_d, 1);
+ temp0_d += temp1_d;
+ sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0);
+
+ if (abs(sum_diff) > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+
+ LD4(sig_start, sig_stride, src0, src1, src2, src3);
+ sig_start += (4 * sig_stride);
+ SD4(src0, src1, src2, src3, running_avg_y_start, avg_y_stride);
+ running_avg_y_start += (4 * avg_y_stride);
+
+ LD4(sig_start, sig_stride, src0, src1, src2, src3);
+ SD4(src0, src1, src2, src3, running_avg_y_start, avg_y_stride);
+
+ return FILTER_BLOCK;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/mips/msa/encodeopt_msa.c b/media/libvpx/libvpx/vp8/encoder/mips/msa/encodeopt_msa.c
new file mode 100644
index 0000000000..2bcddb6235
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mips/msa/encodeopt_msa.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+#include "vp8/encoder/block.h"
+
+int32_t vp8_block_error_msa(int16_t *coeff_ptr, int16_t *dq_coeff_ptr) {
+ int32_t err = 0;
+ uint32_t loop_cnt;
+ v8i16 coeff, dq_coeff, coeff0, coeff1;
+ v4i32 diff0, diff1;
+ v2i64 err0 = { 0 };
+ v2i64 err1 = { 0 };
+
+ for (loop_cnt = 2; loop_cnt--;) {
+ coeff = LD_SH(coeff_ptr);
+ dq_coeff = LD_SH(dq_coeff_ptr);
+ ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ DPADD_SD2_SD(diff0, diff1, err0, err1);
+ coeff_ptr += 8;
+ dq_coeff_ptr += 8;
+ }
+
+ err0 += __msa_splati_d(err0, 1);
+ err1 += __msa_splati_d(err1, 1);
+ err = __msa_copy_s_d(err0, 0);
+ err += __msa_copy_s_d(err1, 0);
+
+ return err;
+}
+
+int32_t vp8_mbblock_error_msa(MACROBLOCK *mb, int32_t dc) {
+ BLOCK *be;
+ BLOCKD *bd;
+ int16_t *coeff_ptr, *dq_coeff_ptr;
+ int32_t err = 0;
+ uint32_t loop_cnt;
+ v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4;
+ v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4;
+ v4i32 diff0, diff1;
+ v2i64 err0, err1;
+ v16u8 zero = { 0 };
+ v16u8 mask0 = (v16u8)__msa_ldi_b(255);
+
+ if (1 == dc) {
+ mask0 = (v16u8)__msa_insve_w((v4i32)mask0, 0, (v4i32)zero);
+ }
+
+ for (loop_cnt = 0; loop_cnt < 8; ++loop_cnt) {
+ be = &mb->block[2 * loop_cnt];
+ bd = &mb->e_mbd.block[2 * loop_cnt];
+ coeff_ptr = be->coeff;
+ dq_coeff_ptr = bd->dqcoeff;
+ coeff = LD_SH(coeff_ptr);
+ dq_coeff = LD_SH(dq_coeff_ptr);
+ coeff_ptr += 8;
+ dq_coeff_ptr += 8;
+ coeff2 = LD_SH(coeff_ptr);
+ dq_coeff2 = LD_SH(dq_coeff_ptr);
+ be = &mb->block[2 * loop_cnt + 1];
+ bd = &mb->e_mbd.block[2 * loop_cnt + 1];
+ coeff_ptr = be->coeff;
+ dq_coeff_ptr = bd->dqcoeff;
+ coeff3 = LD_SH(coeff_ptr);
+ dq_coeff3 = LD_SH(dq_coeff_ptr);
+ coeff_ptr += 8;
+ dq_coeff_ptr += 8;
+ coeff4 = LD_SH(coeff_ptr);
+ dq_coeff4 = LD_SH(dq_coeff_ptr);
+ ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0);
+ DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1);
+ ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ DPADD_SD2_SD(diff0, diff1, err0, err1);
+ err0 += __msa_splati_d(err0, 1);
+ err1 += __msa_splati_d(err1, 1);
+ err += __msa_copy_s_d(err0, 0);
+ err += __msa_copy_s_d(err1, 0);
+
+ ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0);
+ DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1);
+ ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ DPADD_SD2_SD(diff0, diff1, err0, err1);
+ err0 += __msa_splati_d(err0, 1);
+ err1 += __msa_splati_d(err1, 1);
+ err += __msa_copy_s_d(err0, 0);
+ err += __msa_copy_s_d(err1, 0);
+ }
+
+ return err;
+}
+
+int32_t vp8_mbuverror_msa(MACROBLOCK *mb) {
+ BLOCK *be;
+ BLOCKD *bd;
+ int16_t *coeff_ptr, *dq_coeff_ptr;
+ int32_t err = 0;
+ uint32_t loop_cnt;
+ v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4;
+ v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4;
+ v4i32 diff0, diff1;
+ v2i64 err0, err1, err_dup0, err_dup1;
+
+ for (loop_cnt = 16; loop_cnt < 24; loop_cnt += 2) {
+ be = &mb->block[loop_cnt];
+ bd = &mb->e_mbd.block[loop_cnt];
+ coeff_ptr = be->coeff;
+ dq_coeff_ptr = bd->dqcoeff;
+ coeff = LD_SH(coeff_ptr);
+ dq_coeff = LD_SH(dq_coeff_ptr);
+ coeff_ptr += 8;
+ dq_coeff_ptr += 8;
+ coeff2 = LD_SH(coeff_ptr);
+ dq_coeff2 = LD_SH(dq_coeff_ptr);
+ be = &mb->block[loop_cnt + 1];
+ bd = &mb->e_mbd.block[loop_cnt + 1];
+ coeff_ptr = be->coeff;
+ dq_coeff_ptr = bd->dqcoeff;
+ coeff3 = LD_SH(coeff_ptr);
+ dq_coeff3 = LD_SH(dq_coeff_ptr);
+ coeff_ptr += 8;
+ dq_coeff_ptr += 8;
+ coeff4 = LD_SH(coeff_ptr);
+ dq_coeff4 = LD_SH(dq_coeff_ptr);
+
+ ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1);
+
+ ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ DPADD_SD2_SD(diff0, diff1, err0, err1);
+ err_dup0 = __msa_splati_d(err0, 1);
+ err_dup1 = __msa_splati_d(err1, 1);
+ ADD2(err0, err_dup0, err1, err_dup1, err0, err1);
+ err += __msa_copy_s_d(err0, 0);
+ err += __msa_copy_s_d(err1, 0);
+
+ ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1);
+ ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1);
+ HSUB_UH2_SW(coeff0, coeff1, diff0, diff1);
+ DPADD_SD2_SD(diff0, diff1, err0, err1);
+ err_dup0 = __msa_splati_d(err0, 1);
+ err_dup1 = __msa_splati_d(err1, 1);
+ ADD2(err0, err_dup0, err1, err_dup1, err0, err1);
+ err += __msa_copy_s_d(err0, 0);
+ err += __msa_copy_s_d(err1, 0);
+ }
+
+ return err;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/mips/msa/quantize_msa.c b/media/libvpx/libvpx/vp8/encoder/mips/msa/quantize_msa.c
new file mode 100644
index 0000000000..9f5fbd39c8
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mips/msa/quantize_msa.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+#include "vp8/encoder/block.h"
+
+static int8_t fast_quantize_b_msa(int16_t *coeff_ptr, int16_t *round,
+ int16_t *quant, int16_t *de_quant,
+ int16_t *q_coeff, int16_t *dq_coeff) {
+ int32_t cnt, eob;
+ v16i8 inv_zig_zag = { 0, 1, 5, 6, 2, 4, 7, 12, 3, 8, 11, 13, 9, 10, 14, 15 };
+ v8i16 round0, round1;
+ v8i16 sign_z0, sign_z1;
+ v8i16 q_coeff0, q_coeff1;
+ v8i16 x0, x1, de_quant0, de_quant1;
+ v8i16 coeff0, coeff1, z0, z1;
+ v8i16 quant0, quant1, quant2, quant3;
+ v8i16 zero = { 0 };
+ v8i16 inv_zig_zag0, inv_zig_zag1;
+ v8i16 zigzag_mask0 = { 0, 1, 4, 8, 5, 2, 3, 6 };
+ v8i16 zigzag_mask1 = { 9, 12, 13, 10, 7, 11, 14, 15 };
+ v8i16 temp0_h, temp1_h, temp2_h, temp3_h;
+ v4i32 temp0_w, temp1_w, temp2_w, temp3_w;
+
+ ILVRL_B2_SH(zero, inv_zig_zag, inv_zig_zag0, inv_zig_zag1);
+ eob = -1;
+ LD_SH2(coeff_ptr, 8, coeff0, coeff1);
+ VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, z0,
+ z1);
+ LD_SH2(round, 8, coeff0, coeff1);
+ VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, round0,
+ round1);
+ LD_SH2(quant, 8, coeff0, coeff1);
+ VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, quant0,
+ quant2);
+ sign_z0 = z0 >> 15;
+ sign_z1 = z1 >> 15;
+ x0 = __msa_add_a_h(z0, zero);
+ x1 = __msa_add_a_h(z1, zero);
+ ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3);
+ ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2);
+ ILVL_H2_SH(round0, x0, round1, x1, temp1_h, temp3_h);
+ ILVR_H2_SH(round0, x0, round1, x1, temp0_h, temp2_h);
+ DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2,
+ quant3, temp0_w, temp1_w, temp2_w, temp3_w);
+ SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16);
+ PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, x0, x1);
+ x0 = x0 ^ sign_z0;
+ x1 = x1 ^ sign_z1;
+ SUB2(x0, sign_z0, x1, sign_z1, x0, x1);
+ VSHF_H2_SH(x0, x1, x0, x1, inv_zig_zag0, inv_zig_zag1, q_coeff0, q_coeff1);
+ ST_SH2(q_coeff0, q_coeff1, q_coeff, 8);
+ LD_SH2(de_quant, 8, de_quant0, de_quant1);
+ q_coeff0 *= de_quant0;
+ q_coeff1 *= de_quant1;
+ ST_SH2(q_coeff0, q_coeff1, dq_coeff, 8);
+
+ for (cnt = 0; cnt < 16; ++cnt) {
+ if ((cnt <= 7) && (x1[7 - cnt] != 0)) {
+ eob = (15 - cnt);
+ break;
+ }
+
+ if ((cnt > 7) && (x0[7 - (cnt - 8)] != 0)) {
+ eob = (7 - (cnt - 8));
+ break;
+ }
+ }
+
+ return (int8_t)(eob + 1);
+}
+
+static int8_t exact_regular_quantize_b_msa(
+ int16_t *zbin_boost, int16_t *coeff_ptr, int16_t *zbin, int16_t *round,
+ int16_t *quant, int16_t *quant_shift, int16_t *de_quant, int16_t zbin_oq_in,
+ int16_t *q_coeff, int16_t *dq_coeff) {
+ int32_t cnt, eob;
+ int16_t *boost_temp = zbin_boost;
+ v16i8 inv_zig_zag = { 0, 1, 5, 6, 2, 4, 7, 12, 3, 8, 11, 13, 9, 10, 14, 15 };
+ v8i16 round0, round1;
+ v8i16 sign_z0, sign_z1;
+ v8i16 q_coeff0, q_coeff1;
+ v8i16 z_bin0, z_bin1, zbin_o_q;
+ v8i16 x0, x1, sign_x0, sign_x1, de_quant0, de_quant1;
+ v8i16 coeff0, coeff1, z0, z1;
+ v8i16 quant0, quant1, quant2, quant3;
+ v8i16 zero = { 0 };
+ v8i16 inv_zig_zag0, inv_zig_zag1;
+ v8i16 zigzag_mask0 = { 0, 1, 4, 8, 5, 2, 3, 6 };
+ v8i16 zigzag_mask1 = { 9, 12, 13, 10, 7, 11, 14, 15 };
+ v8i16 temp0_h, temp1_h, temp2_h, temp3_h;
+ v4i32 temp0_w, temp1_w, temp2_w, temp3_w;
+
+ ILVRL_B2_SH(zero, inv_zig_zag, inv_zig_zag0, inv_zig_zag1);
+ zbin_o_q = __msa_fill_h(zbin_oq_in);
+ eob = -1;
+ LD_SH2(coeff_ptr, 8, coeff0, coeff1);
+ VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, z0,
+ z1);
+ LD_SH2(round, 8, coeff0, coeff1);
+ VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, round0,
+ round1);
+ LD_SH2(quant, 8, coeff0, coeff1);
+ VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, quant0,
+ quant2);
+ LD_SH2(zbin, 8, coeff0, coeff1);
+ VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, z_bin0,
+ z_bin1);
+ sign_z0 = z0 >> 15;
+ sign_z1 = z1 >> 15;
+ x0 = __msa_add_a_h(z0, zero);
+ x1 = __msa_add_a_h(z1, zero);
+ SUB2(x0, z_bin0, x1, z_bin1, z_bin0, z_bin1);
+ SUB2(z_bin0, zbin_o_q, z_bin1, zbin_o_q, z_bin0, z_bin1);
+ ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3);
+ ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2);
+ ILVL_H2_SH(round0, x0, round1, x1, temp1_h, temp3_h);
+ ILVR_H2_SH(round0, x0, round1, x1, temp0_h, temp2_h);
+ DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2,
+ quant3, temp0_w, temp1_w, temp2_w, temp3_w);
+ SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16);
+ PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, temp0_h, temp2_h);
+ LD_SH2(quant_shift, 8, coeff0, coeff1);
+ VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, quant0,
+ quant2);
+ ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3);
+ ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2);
+ ADD2(x0, round0, x1, round1, x0, x1);
+ ILVL_H2_SH(temp0_h, x0, temp2_h, x1, temp1_h, temp3_h);
+ ILVR_H2_SH(temp0_h, x0, temp2_h, x1, temp0_h, temp2_h);
+ DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2,
+ quant3, temp0_w, temp1_w, temp2_w, temp3_w);
+ SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16);
+ PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, x0, x1);
+ sign_x0 = x0 ^ sign_z0;
+ sign_x1 = x1 ^ sign_z1;
+ SUB2(sign_x0, sign_z0, sign_x1, sign_z1, sign_x0, sign_x1);
+ for (cnt = 0; cnt < 16; ++cnt) {
+ if (cnt <= 7) {
+ if (boost_temp[0] <= z_bin0[cnt]) {
+ if (x0[cnt]) {
+ eob = cnt;
+ boost_temp = zbin_boost;
+ } else {
+ boost_temp++;
+ }
+ } else {
+ sign_x0[cnt] = 0;
+ boost_temp++;
+ }
+ } else {
+ if (boost_temp[0] <= z_bin1[cnt - 8]) {
+ if (x1[cnt - 8]) {
+ eob = cnt;
+ boost_temp = zbin_boost;
+ } else {
+ boost_temp++;
+ }
+ } else {
+ sign_x1[cnt - 8] = 0;
+ boost_temp++;
+ }
+ }
+ }
+
+ VSHF_H2_SH(sign_x0, sign_x1, sign_x0, sign_x1, inv_zig_zag0, inv_zig_zag1,
+ q_coeff0, q_coeff1);
+ ST_SH2(q_coeff0, q_coeff1, q_coeff, 8);
+ LD_SH2(de_quant, 8, de_quant0, de_quant1);
+ MUL2(de_quant0, q_coeff0, de_quant1, q_coeff1, de_quant0, de_quant1);
+ ST_SH2(de_quant0, de_quant1, dq_coeff, 8);
+
+ return (int8_t)(eob + 1);
+}
+
+void vp8_fast_quantize_b_msa(BLOCK *b, BLOCKD *d) {
+ int16_t *coeff_ptr = b->coeff;
+ int16_t *round_ptr = b->round;
+ int16_t *quant_ptr = b->quant_fast;
+ int16_t *qcoeff_ptr = d->qcoeff;
+ int16_t *dqcoeff_ptr = d->dqcoeff;
+ int16_t *dequant_ptr = d->dequant;
+
+ *d->eob = fast_quantize_b_msa(coeff_ptr, round_ptr, quant_ptr, dequant_ptr,
+ qcoeff_ptr, dqcoeff_ptr);
+}
+
+void vp8_regular_quantize_b_msa(BLOCK *b, BLOCKD *d) {
+ int16_t *zbin_boost_ptr = b->zrun_zbin_boost;
+ int16_t *coeff_ptr = b->coeff;
+ int16_t *zbin_ptr = b->zbin;
+ int16_t *round_ptr = b->round;
+ int16_t *quant_ptr = b->quant;
+ int16_t *quant_shift_ptr = b->quant_shift;
+ int16_t *qcoeff_ptr = d->qcoeff;
+ int16_t *dqcoeff_ptr = d->dqcoeff;
+ int16_t *dequant_ptr = d->dequant;
+ int16_t zbin_oq_value = b->zbin_extra;
+
+ *d->eob = exact_regular_quantize_b_msa(
+ zbin_boost_ptr, coeff_ptr, zbin_ptr, round_ptr, quant_ptr,
+ quant_shift_ptr, dequant_ptr, zbin_oq_value, qcoeff_ptr, dqcoeff_ptr);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/mips/msa/temporal_filter_msa.c b/media/libvpx/libvpx/vp8/encoder/mips/msa/temporal_filter_msa.c
new file mode 100644
index 0000000000..fb83f07bd2
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mips/msa/temporal_filter_msa.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp8_rtcd.h"
+#include "vp8/common/mips/msa/vp8_macros_msa.h"
+
+static void temporal_filter_apply_16size_msa(
+ uint8_t *frame1_ptr, uint32_t stride, uint8_t *frame2_ptr,
+ int32_t strength_in, int32_t filter_wt_in, uint32_t *acc, uint16_t *cnt) {
+ uint32_t row;
+ v16i8 frame1_0_b, frame1_1_b, frame2_0_b, frame2_1_b;
+ v16u8 frame_l, frame_h;
+ v16i8 zero = { 0 };
+ v8i16 frame2_0_h, frame2_1_h, mod0_h, mod1_h;
+ v8i16 diff0, diff1, cnt0, cnt1;
+ v4i32 const3, const16, filter_wt, strength;
+ v4i32 mod0_w, mod1_w, mod2_w, mod3_w;
+ v4i32 diff0_r, diff0_l, diff1_r, diff1_l;
+ v4i32 frame2_0, frame2_1, frame2_2, frame2_3;
+ v4i32 acc0, acc1, acc2, acc3;
+
+ filter_wt = __msa_fill_w(filter_wt_in);
+ strength = __msa_fill_w(strength_in);
+ const3 = __msa_ldi_w(3);
+ const16 = __msa_ldi_w(16);
+
+ for (row = 8; row--;) {
+ frame1_0_b = LD_SB(frame1_ptr);
+ frame2_0_b = LD_SB(frame2_ptr);
+ frame1_ptr += stride;
+ frame2_ptr += 16;
+ frame1_1_b = LD_SB(frame1_ptr);
+ frame2_1_b = LD_SB(frame2_ptr);
+ LD_SW2(acc, 4, acc0, acc1);
+ LD_SW2(acc + 8, 4, acc2, acc3);
+ LD_SH2(cnt, 8, cnt0, cnt1);
+ ILVRL_B2_UB(frame1_0_b, frame2_0_b, frame_l, frame_h);
+ HSUB_UB2_SH(frame_l, frame_h, diff0, diff1);
+ UNPCK_SH_SW(diff0, diff0_r, diff0_l);
+ UNPCK_SH_SW(diff1, diff1_r, diff1_l);
+ MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
+ SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
+ diff0_r = (mod0_w < const16);
+ diff0_l = (mod1_w < const16);
+ diff1_r = (mod2_w < const16);
+ diff1_l = (mod3_w < const16);
+ SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ mod0_w = diff0_r & mod0_w;
+ mod1_w = diff0_l & mod1_w;
+ mod2_w = diff1_r & mod2_w;
+ mod3_w = diff1_l & mod3_w;
+ MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w,
+ filter_wt, mod0_w, mod1_w, mod2_w, mod3_w);
+ PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h)
+ ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
+ ST_SH2(mod0_h, mod1_h, cnt, 8);
+ cnt += 16;
+ ILVRL_B2_SH(zero, frame2_0_b, frame2_0_h, frame2_1_h);
+ UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1);
+ UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3);
+ MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, frame2_3,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
+ ST_SW2(mod0_w, mod1_w, acc, 4);
+ ST_SW2(mod2_w, mod3_w, acc + 8, 4);
+ acc += 16;
+ LD_SW2(acc, 4, acc0, acc1);
+ LD_SW2(acc + 8, 4, acc2, acc3);
+ LD_SH2(cnt, 8, cnt0, cnt1);
+ ILVRL_B2_UB(frame1_1_b, frame2_1_b, frame_l, frame_h);
+ HSUB_UB2_SH(frame_l, frame_h, diff0, diff1);
+ UNPCK_SH_SW(diff0, diff0_r, diff0_l);
+ UNPCK_SH_SW(diff1, diff1_r, diff1_l);
+ MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
+ SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
+ diff0_r = (mod0_w < const16);
+ diff0_l = (mod1_w < const16);
+ diff1_r = (mod2_w < const16);
+ diff1_l = (mod3_w < const16);
+ SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ mod0_w = diff0_r & mod0_w;
+ mod1_w = diff0_l & mod1_w;
+ mod2_w = diff1_r & mod2_w;
+ mod3_w = diff1_l & mod3_w;
+ MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w,
+ filter_wt, mod0_w, mod1_w, mod2_w, mod3_w);
+ PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h);
+ ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
+ ST_SH2(mod0_h, mod1_h, cnt, 8);
+ cnt += 16;
+
+ UNPCK_UB_SH(frame2_1_b, frame2_0_h, frame2_1_h);
+ UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1);
+ UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3);
+ MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, frame2_3,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
+ ST_SW2(mod0_w, mod1_w, acc, 4);
+ ST_SW2(mod2_w, mod3_w, acc + 8, 4);
+ acc += 16;
+ frame1_ptr += stride;
+ frame2_ptr += 16;
+ }
+}
+
+static void temporal_filter_apply_8size_msa(
+ uint8_t *frame1_ptr, uint32_t stride, uint8_t *frame2_ptr,
+ int32_t strength_in, int32_t filter_wt_in, uint32_t *acc, uint16_t *cnt) {
+ uint32_t row;
+ uint64_t f0, f1, f2, f3, f4, f5, f6, f7;
+ v16i8 frame1 = { 0 };
+ v16i8 frame2 = { 0 };
+ v16i8 frame3 = { 0 };
+ v16i8 frame4 = { 0 };
+ v16u8 frame_l, frame_h;
+ v8i16 frame2_0_h, frame2_1_h, mod0_h, mod1_h;
+ v8i16 diff0, diff1, cnt0, cnt1;
+ v4i32 const3, const16;
+ v4i32 filter_wt, strength;
+ v4i32 mod0_w, mod1_w, mod2_w, mod3_w;
+ v4i32 diff0_r, diff0_l, diff1_r, diff1_l;
+ v4i32 frame2_0, frame2_1, frame2_2, frame2_3;
+ v4i32 acc0, acc1, acc2, acc3;
+
+ filter_wt = __msa_fill_w(filter_wt_in);
+ strength = __msa_fill_w(strength_in);
+ const3 = __msa_ldi_w(3);
+ const16 = __msa_ldi_w(16);
+
+ for (row = 2; row--;) {
+ LD2(frame1_ptr, stride, f0, f1);
+ frame1_ptr += (2 * stride);
+ LD2(frame2_ptr, 8, f2, f3);
+ frame2_ptr += 16;
+ LD2(frame1_ptr, stride, f4, f5);
+ frame1_ptr += (2 * stride);
+ LD2(frame2_ptr, 8, f6, f7);
+ frame2_ptr += 16;
+
+ LD_SW2(acc, 4, acc0, acc1);
+ LD_SW2(acc + 8, 4, acc2, acc3);
+ LD_SH2(cnt, 8, cnt0, cnt1);
+ INSERT_D2_SB(f0, f1, frame1);
+ INSERT_D2_SB(f2, f3, frame2);
+ INSERT_D2_SB(f4, f5, frame3);
+ INSERT_D2_SB(f6, f7, frame4);
+ ILVRL_B2_UB(frame1, frame2, frame_l, frame_h);
+ HSUB_UB2_SH(frame_l, frame_h, diff0, diff1);
+ UNPCK_SH_SW(diff0, diff0_r, diff0_l);
+ UNPCK_SH_SW(diff1, diff1_r, diff1_l);
+ MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
+ SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
+ diff0_r = (mod0_w < const16);
+ diff0_l = (mod1_w < const16);
+ diff1_r = (mod2_w < const16);
+ diff1_l = (mod3_w < const16);
+ SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ mod0_w = diff0_r & mod0_w;
+ mod1_w = diff0_l & mod1_w;
+ mod2_w = diff1_r & mod2_w;
+ mod3_w = diff1_l & mod3_w;
+ MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w,
+ filter_wt, mod0_w, mod1_w, mod2_w, mod3_w);
+ PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h);
+ ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
+ ST_SH2(mod0_h, mod1_h, cnt, 8);
+ cnt += 16;
+
+ UNPCK_UB_SH(frame2, frame2_0_h, frame2_1_h);
+ UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1);
+ UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3);
+ MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, frame2_3,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
+ ST_SW2(mod0_w, mod1_w, acc, 4);
+ ST_SW2(mod2_w, mod3_w, acc + 8, 4);
+ acc += 16;
+
+ LD_SW2(acc, 4, acc0, acc1);
+ LD_SW2(acc + 8, 4, acc2, acc3);
+ LD_SH2(cnt, 8, cnt0, cnt1);
+ ILVRL_B2_UB(frame3, frame4, frame_l, frame_h);
+ HSUB_UB2_SH(frame_l, frame_h, diff0, diff1);
+ UNPCK_SH_SW(diff0, diff0_r, diff0_l);
+ UNPCK_SH_SW(diff1, diff1_r, diff1_l);
+ MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
+ SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
+ diff0_r = (mod0_w < const16);
+ diff0_l = (mod1_w < const16);
+ diff1_r = (mod2_w < const16);
+ diff1_l = (mod3_w < const16);
+ SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ mod0_w = diff0_r & mod0_w;
+ mod1_w = diff0_l & mod1_w;
+ mod2_w = diff1_r & mod2_w;
+ mod3_w = diff1_l & mod3_w;
+ MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w,
+ filter_wt, mod0_w, mod1_w, mod2_w, mod3_w);
+ PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h);
+ ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
+ ST_SH2(mod0_h, mod1_h, cnt, 8);
+ cnt += 16;
+
+ UNPCK_UB_SH(frame4, frame2_0_h, frame2_1_h);
+ UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1);
+ UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3);
+ MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, frame2_3,
+ mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
+ ST_SW2(mod0_w, mod1_w, acc, 4);
+ ST_SW2(mod2_w, mod3_w, acc + 8, 4);
+ acc += 16;
+ }
+}
+
+void vp8_temporal_filter_apply_msa(uint8_t *frame1, uint32_t stride,
+ uint8_t *frame2, uint32_t block_size,
+ int32_t strength, int32_t filter_weight,
+ uint32_t *accumulator, uint16_t *count) {
+ if (8 == block_size) {
+ temporal_filter_apply_8size_msa(frame1, stride, frame2, strength,
+ filter_weight, accumulator, count);
+ } else if (16 == block_size) {
+ temporal_filter_apply_16size_msa(frame1, stride, frame2, strength,
+ filter_weight, accumulator, count);
+ } else {
+ uint32_t i, j, k;
+ int32_t modifier;
+ int32_t byte = 0;
+ const int32_t rounding = strength > 0 ? 1 << (strength - 1) : 0;
+
+ for (i = 0, k = 0; i < block_size; ++i) {
+ for (j = 0; j < block_size; ++j, ++k) {
+ int src_byte = frame1[byte];
+ int pixel_value = *frame2++;
+
+ modifier = src_byte - pixel_value;
+ modifier *= modifier;
+ modifier *= 3;
+ modifier += rounding;
+ modifier >>= strength;
+
+ if (modifier > 16) modifier = 16;
+
+ modifier = 16 - modifier;
+ modifier *= filter_weight;
+
+ count[k] += modifier;
+ accumulator[k] += modifier * pixel_value;
+
+ byte++;
+ }
+
+ byte += stride - block_size;
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/modecosts.c b/media/libvpx/libvpx/vp8/encoder/modecosts.c
new file mode 100644
index 0000000000..b1c3120a92
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/modecosts.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/common/blockd.h"
+#include "modecosts.h"
+#include "onyx_int.h"
+#include "treewriter.h"
+#include "vp8/common/entropymode.h"
+
+void vp8_init_mode_costs(VP8_COMP *c) {
+ VP8_COMMON *x = &c->common;
+ struct rd_costs_struct *rd_costs = &c->rd_costs;
+
+ {
+ const vp8_tree_p T = vp8_bmode_tree;
+
+ int i = 0;
+
+ do {
+ int j = 0;
+
+ do {
+ vp8_cost_tokens(rd_costs->bmode_costs[i][j], vp8_kf_bmode_prob[i][j],
+ T);
+ } while (++j < VP8_BINTRAMODES);
+ } while (++i < VP8_BINTRAMODES);
+
+ vp8_cost_tokens(rd_costs->inter_bmode_costs, x->fc.bmode_prob, T);
+ }
+ vp8_cost_tokens(rd_costs->inter_bmode_costs, x->fc.sub_mv_ref_prob,
+ vp8_sub_mv_ref_tree);
+
+ vp8_cost_tokens(rd_costs->mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree);
+ vp8_cost_tokens(rd_costs->mbmode_cost[0], vp8_kf_ymode_prob,
+ vp8_kf_ymode_tree);
+
+ vp8_cost_tokens(rd_costs->intra_uv_mode_cost[1], x->fc.uv_mode_prob,
+ vp8_uv_mode_tree);
+ vp8_cost_tokens(rd_costs->intra_uv_mode_cost[0], vp8_kf_uv_mode_prob,
+ vp8_uv_mode_tree);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/modecosts.h b/media/libvpx/libvpx/vp8/encoder/modecosts.h
new file mode 100644
index 0000000000..09ee2b5520
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/modecosts.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_MODECOSTS_H_
+#define VPX_VP8_ENCODER_MODECOSTS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP8_COMP;
+
+void vp8_init_mode_costs(struct VP8_COMP *c);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_MODECOSTS_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/mr_dissim.c b/media/libvpx/libvpx/vp8/encoder/mr_dissim.c
new file mode 100644
index 0000000000..b1bfb4b54a
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mr_dissim.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include "vpx_config.h"
+#include "onyx_int.h"
+#include "mr_dissim.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_mem/vpx_mem.h"
+#include "rdopt.h"
+#include "vp8/common/common.h"
+
+void vp8_cal_low_res_mb_cols(VP8_COMP *cpi) {
+ int low_res_w;
+
+ /* Support arbitrary down-sampling factor */
+ unsigned int iw = cpi->oxcf.Width * cpi->oxcf.mr_down_sampling_factor.den +
+ cpi->oxcf.mr_down_sampling_factor.num - 1;
+
+ low_res_w = iw / cpi->oxcf.mr_down_sampling_factor.num;
+ cpi->mr_low_res_mb_cols = ((low_res_w + 15) >> 4);
+}
+
+#define GET_MV(x) \
+ if (x->mbmi.ref_frame != INTRA_FRAME) { \
+ mvx[cnt] = x->mbmi.mv.as_mv.row; \
+ mvy[cnt] = x->mbmi.mv.as_mv.col; \
+ cnt++; \
+ }
+
+#define GET_MV_SIGN(x) \
+ if (x->mbmi.ref_frame != INTRA_FRAME) { \
+ mvx[cnt] = x->mbmi.mv.as_mv.row; \
+ mvy[cnt] = x->mbmi.mv.as_mv.col; \
+ if (cm->ref_frame_sign_bias[x->mbmi.ref_frame] != \
+ cm->ref_frame_sign_bias[tmp->mbmi.ref_frame]) { \
+ mvx[cnt] *= -1; \
+ mvy[cnt] *= -1; \
+ } \
+ cnt++; \
+ }
+
+void vp8_cal_dissimilarity(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ /* Note: The first row & first column in mip are outside the frame, which
+ * were initialized to all 0.(ref_frame, mode, mv...)
+ * Their ref_frame = 0 means they won't be counted in the following
+ * calculation.
+ */
+ if (cpi->oxcf.mr_total_resolutions > 1 &&
+ cpi->oxcf.mr_encoder_id < (cpi->oxcf.mr_total_resolutions - 1)) {
+ /* Store info for show/no-show frames for supporting alt_ref.
+ * If parent frame is alt_ref, child has one too.
+ */
+ LOWER_RES_FRAME_INFO *store_info =
+ (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
+
+ store_info->frame_type = cm->frame_type;
+
+ if (cm->frame_type != KEY_FRAME) {
+ int i;
+ store_info->is_frame_dropped = 0;
+ for (i = 1; i < MAX_REF_FRAMES; ++i)
+ store_info->low_res_ref_frames[i] = cpi->current_ref_frames[i];
+ }
+
+ if (cm->frame_type != KEY_FRAME) {
+ int mb_row;
+ int mb_col;
+ /* Point to beginning of allocated MODE_INFO arrays. */
+ MODE_INFO *tmp = cm->mip + cm->mode_info_stride;
+ LOWER_RES_MB_INFO *store_mode_info = store_info->mb_info;
+
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ tmp++;
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ int dissim = INT_MAX;
+
+ if (tmp->mbmi.ref_frame != INTRA_FRAME) {
+ int mvx[8];
+ int mvy[8];
+ int mmvx;
+ int mmvy;
+ int cnt = 0;
+ const MODE_INFO *here = tmp;
+ const MODE_INFO *above = here - cm->mode_info_stride;
+ const MODE_INFO *left = here - 1;
+ const MODE_INFO *aboveleft = above - 1;
+ const MODE_INFO *aboveright = NULL;
+ const MODE_INFO *right = NULL;
+ const MODE_INFO *belowleft = NULL;
+ const MODE_INFO *below = NULL;
+ const MODE_INFO *belowright = NULL;
+
+ /* If alternate reference frame is used, we have to
+ * check sign of MV. */
+ if (cpi->oxcf.play_alternate) {
+ /* Gather mv of neighboring MBs */
+ GET_MV_SIGN(above)
+ GET_MV_SIGN(left)
+ GET_MV_SIGN(aboveleft)
+
+ if (mb_col < (cm->mb_cols - 1)) {
+ right = here + 1;
+ aboveright = above + 1;
+ GET_MV_SIGN(right)
+ GET_MV_SIGN(aboveright)
+ }
+
+ if (mb_row < (cm->mb_rows - 1)) {
+ below = here + cm->mode_info_stride;
+ belowleft = below - 1;
+ GET_MV_SIGN(below)
+ GET_MV_SIGN(belowleft)
+ }
+
+ if (mb_col < (cm->mb_cols - 1) && mb_row < (cm->mb_rows - 1)) {
+ belowright = below + 1;
+ GET_MV_SIGN(belowright)
+ }
+ } else {
+ /* No alt_ref and gather mv of neighboring MBs */
+ GET_MV(above)
+ GET_MV(left)
+ GET_MV(aboveleft)
+
+ if (mb_col < (cm->mb_cols - 1)) {
+ right = here + 1;
+ aboveright = above + 1;
+ GET_MV(right)
+ GET_MV(aboveright)
+ }
+
+ if (mb_row < (cm->mb_rows - 1)) {
+ below = here + cm->mode_info_stride;
+ belowleft = below - 1;
+ GET_MV(below)
+ GET_MV(belowleft)
+ }
+
+ if (mb_col < (cm->mb_cols - 1) && mb_row < (cm->mb_rows - 1)) {
+ belowright = below + 1;
+ GET_MV(belowright)
+ }
+ }
+
+ if (cnt > 0) {
+ int max_mvx = mvx[0];
+ int min_mvx = mvx[0];
+ int max_mvy = mvy[0];
+ int min_mvy = mvy[0];
+ int i;
+
+ if (cnt > 1) {
+ for (i = 1; i < cnt; ++i) {
+ if (mvx[i] > max_mvx)
+ max_mvx = mvx[i];
+ else if (mvx[i] < min_mvx)
+ min_mvx = mvx[i];
+ if (mvy[i] > max_mvy)
+ max_mvy = mvy[i];
+ else if (mvy[i] < min_mvy)
+ min_mvy = mvy[i];
+ }
+ }
+
+ mmvx = VPXMAX(abs(min_mvx - here->mbmi.mv.as_mv.row),
+ abs(max_mvx - here->mbmi.mv.as_mv.row));
+ mmvy = VPXMAX(abs(min_mvy - here->mbmi.mv.as_mv.col),
+ abs(max_mvy - here->mbmi.mv.as_mv.col));
+ dissim = VPXMAX(mmvx, mmvy);
+ }
+ }
+
+ /* Store mode info for next resolution encoding */
+ store_mode_info->mode = tmp->mbmi.mode;
+ store_mode_info->ref_frame = tmp->mbmi.ref_frame;
+ store_mode_info->mv.as_int = tmp->mbmi.mv.as_int;
+ store_mode_info->dissim = dissim;
+ tmp++;
+ store_mode_info++;
+ }
+ }
+ }
+ }
+}
+
+/* This function is called only when this frame is dropped at current
+ resolution level. */
+void vp8_store_drop_frame_info(VP8_COMP *cpi) {
+ /* If the frame is dropped in lower-resolution encoding, this information
+ is passed to higher resolution level so that the encoder knows there
+ is no mode & motion info available.
+ */
+ if (cpi->oxcf.mr_total_resolutions > 1 &&
+ cpi->oxcf.mr_encoder_id < (cpi->oxcf.mr_total_resolutions - 1)) {
+ /* Store info for show/no-show frames for supporting alt_ref.
+ * If parent frame is alt_ref, child has one too.
+ */
+ LOWER_RES_FRAME_INFO *store_info =
+ (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
+
+ /* Set frame_type to be INTER_FRAME since we won't drop key frame. */
+ store_info->frame_type = INTER_FRAME;
+ store_info->is_frame_dropped = 1;
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/mr_dissim.h b/media/libvpx/libvpx/vp8/encoder/mr_dissim.h
new file mode 100644
index 0000000000..58f5a97623
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/mr_dissim.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_MR_DISSIM_H_
+#define VPX_VP8_ENCODER_MR_DISSIM_H_
+#include "vpx_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void vp8_cal_low_res_mb_cols(VP8_COMP *cpi);
+extern void vp8_cal_dissimilarity(VP8_COMP *cpi);
+extern void vp8_store_drop_frame_info(VP8_COMP *cpi);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_MR_DISSIM_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/onyx_if.c b/media/libvpx/libvpx/vp8/encoder/onyx_if.c
new file mode 100644
index 0000000000..9646379a2e
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/onyx_if.c
@@ -0,0 +1,5433 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "./vpx_scale_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vp8_rtcd.h"
+#include "bitstream.h"
+#include "vp8/common/onyxc_int.h"
+#include "vp8/common/blockd.h"
+#include "onyx_int.h"
+#include "vp8/common/systemdependent.h"
+#include "vp8/common/vp8_skin_detection.h"
+#include "vp8/encoder/quantize.h"
+#include "vp8/common/alloccommon.h"
+#include "mcomp.h"
+#include "firstpass.h"
+#include "vpx_dsp/psnr.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp8/common/extend.h"
+#include "ratectrl.h"
+#include "vp8/common/quant_common.h"
+#include "segmentation.h"
+#if CONFIG_POSTPROC
+#include "vp8/common/postproc.h"
+#endif
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/reconintra.h"
+#include "vp8/common/swapyv12buffer.h"
+#include "vp8/common/threading.h"
+#include "vpx_ports/system_state.h"
+#include "vpx_ports/vpx_once.h"
+#include "vpx_ports/vpx_timer.h"
+#include "vpx_util/vpx_write_yuv_frame.h"
+#if VPX_ARCH_ARM
+#include "vpx_ports/arm.h"
+#endif
+#if CONFIG_MULTI_RES_ENCODING
+#include "mr_dissim.h"
+#endif
+#include "encodeframe.h"
+#if CONFIG_MULTITHREAD
+#include "ethreading.h"
+#endif
+#include "picklpf.h"
+#if !CONFIG_REALTIME_ONLY
+#include "temporal_filter.h"
+#endif
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+#include <limits.h>
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+extern int vp8_update_coef_context(VP8_COMP *cpi);
+#endif
+
+extern unsigned int vp8_get_processor_freq();
+
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
+
+static void set_default_lf_deltas(VP8_COMP *cpi);
+
+extern const int vp8_gf_interval_table[101];
+
+#if CONFIG_INTERNAL_STATS
+#include "math.h"
+#include "vpx_dsp/ssim.h"
+#endif
+
+#ifdef OUTPUT_YUV_SRC
+FILE *yuv_file;
+#endif
+#ifdef OUTPUT_YUV_DENOISED
+FILE *yuv_denoised_file;
+#endif
+#ifdef OUTPUT_YUV_SKINMAP
+static FILE *yuv_skinmap_file = NULL;
+#endif
+
+#if 0
+FILE *framepsnr;
+FILE *kf_list;
+FILE *keyfile;
+#endif
+
+#if 0
+extern int skip_true_count;
+extern int skip_false_count;
+#endif
+
+#ifdef SPEEDSTATS
+unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+unsigned int tot_pm = 0;
+unsigned int cnt_pm = 0;
+unsigned int tot_ef = 0;
+unsigned int cnt_ef = 0;
+#endif
+
+#ifdef MODE_STATS
+extern unsigned __int64 Sectionbits[50];
+extern int y_modes[5];
+extern int uv_modes[4];
+extern int b_modes[10];
+
+extern int inter_y_modes[10];
+extern int inter_uv_modes[4];
+extern unsigned int inter_b_modes[15];
+#endif
+
+extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
+
+extern const int qrounding_factors[129];
+extern const int qzbin_factors[129];
+extern void vp8cx_init_quantizer(VP8_COMP *cpi);
+extern const int vp8cx_base_skip_false_prob[128];
+
+/* Tables relating active max Q to active min Q */
+static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
+ 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
+ 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
+};
+static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
+ 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
+ 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
+};
+static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
+ 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
+ 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
+ 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
+ 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
+ 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
+};
+static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
+ 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
+ 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
+ 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
+ 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
+ 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
+ 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
+ 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
+ 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
+ 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
+};
+static const unsigned char inter_minq[QINDEX_RANGE] = {
+ 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
+ 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
+ 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
+};
+
+#ifdef PACKET_TESTING
+extern FILE *vpxlogc;
+#endif
+
+void vp8_save_layer_context(VP8_COMP *cpi) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
+
+ /* Save layer dependent coding state */
+ lc->target_bandwidth = cpi->target_bandwidth;
+ lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
+ lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
+ lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
+ lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
+ lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
+ lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
+ lc->buffer_level = cpi->buffer_level;
+ lc->bits_off_target = cpi->bits_off_target;
+ lc->total_actual_bits = cpi->total_actual_bits;
+ lc->worst_quality = cpi->worst_quality;
+ lc->active_worst_quality = cpi->active_worst_quality;
+ lc->best_quality = cpi->best_quality;
+ lc->active_best_quality = cpi->active_best_quality;
+ lc->ni_av_qi = cpi->ni_av_qi;
+ lc->ni_tot_qi = cpi->ni_tot_qi;
+ lc->ni_frames = cpi->ni_frames;
+ lc->avg_frame_qindex = cpi->avg_frame_qindex;
+ lc->rate_correction_factor = cpi->rate_correction_factor;
+ lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
+ lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
+ lc->zbin_over_quant = cpi->mb.zbin_over_quant;
+ lc->inter_frame_target = cpi->inter_frame_target;
+ lc->total_byte_count = cpi->total_byte_count;
+ lc->filter_level = cpi->common.filter_level;
+ lc->frames_since_last_drop_overshoot = cpi->frames_since_last_drop_overshoot;
+ lc->force_maxqp = cpi->force_maxqp;
+ lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
+ lc->last_q[0] = cpi->last_q[0];
+ lc->last_q[1] = cpi->last_q[1];
+
+ memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
+ sizeof(cpi->mb.count_mb_ref_frame_usage));
+}
+
+void vp8_restore_layer_context(VP8_COMP *cpi, const int layer) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[layer];
+
+ /* Restore layer dependent coding state */
+ cpi->current_layer = layer;
+ cpi->target_bandwidth = lc->target_bandwidth;
+ cpi->oxcf.target_bandwidth = lc->target_bandwidth;
+ cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
+ cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
+ cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
+ cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
+ cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
+ cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
+ cpi->buffer_level = lc->buffer_level;
+ cpi->bits_off_target = lc->bits_off_target;
+ cpi->total_actual_bits = lc->total_actual_bits;
+ cpi->active_worst_quality = lc->active_worst_quality;
+ cpi->active_best_quality = lc->active_best_quality;
+ cpi->ni_av_qi = lc->ni_av_qi;
+ cpi->ni_tot_qi = lc->ni_tot_qi;
+ cpi->ni_frames = lc->ni_frames;
+ cpi->avg_frame_qindex = lc->avg_frame_qindex;
+ cpi->rate_correction_factor = lc->rate_correction_factor;
+ cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
+ cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
+ cpi->mb.zbin_over_quant = lc->zbin_over_quant;
+ cpi->inter_frame_target = lc->inter_frame_target;
+ cpi->total_byte_count = lc->total_byte_count;
+ cpi->common.filter_level = lc->filter_level;
+ cpi->frames_since_last_drop_overshoot = lc->frames_since_last_drop_overshoot;
+ cpi->force_maxqp = lc->force_maxqp;
+ cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
+ cpi->last_q[0] = lc->last_q[0];
+ cpi->last_q[1] = lc->last_q[1];
+
+ memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
+ sizeof(cpi->mb.count_mb_ref_frame_usage));
+}
+
+static int rescale(int val, int num, int denom) {
+ int64_t llnum = num;
+ int64_t llden = denom;
+ int64_t llval = val;
+
+ return (int)(llval * llnum / llden);
+}
+
+void vp8_init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
+ const int layer,
+ double prev_layer_framerate) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[layer];
+
+ lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
+ lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
+
+ lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
+ lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
+ lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
+
+ lc->starting_buffer_level =
+ rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
+
+ if (oxcf->optimal_buffer_level == 0) {
+ lc->optimal_buffer_level = lc->target_bandwidth / 8;
+ } else {
+ lc->optimal_buffer_level =
+ rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
+ }
+
+ if (oxcf->maximum_buffer_size == 0) {
+ lc->maximum_buffer_size = lc->target_bandwidth / 8;
+ } else {
+ lc->maximum_buffer_size =
+ rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
+ }
+
+ /* Work out the average size of a frame within this layer */
+ if (layer > 0) {
+ lc->avg_frame_size_for_layer =
+ (int)round((cpi->oxcf.target_bitrate[layer] -
+ cpi->oxcf.target_bitrate[layer - 1]) *
+ 1000 / (lc->framerate - prev_layer_framerate));
+ }
+
+ lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ lc->active_best_quality = cpi->oxcf.best_allowed_q;
+ lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
+
+ lc->buffer_level = lc->starting_buffer_level;
+ lc->bits_off_target = lc->starting_buffer_level;
+
+ lc->total_actual_bits = 0;
+ lc->ni_av_qi = 0;
+ lc->ni_tot_qi = 0;
+ lc->ni_frames = 0;
+ lc->rate_correction_factor = 1.0;
+ lc->key_frame_rate_correction_factor = 1.0;
+ lc->gf_rate_correction_factor = 1.0;
+ lc->inter_frame_target = 0;
+}
+
+// Upon a run-time change in temporal layers, reset the layer context parameters
+// for any "new" layers. For "existing" layers, let them inherit the parameters
+// from the previous layer state (at the same layer #). In future we may want
+// to better map the previous layer state(s) to the "new" ones.
+void vp8_reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
+ const int prev_num_layers) {
+ int i;
+ double prev_layer_framerate = 0;
+ const int curr_num_layers = cpi->oxcf.number_of_layers;
+ // If the previous state was 1 layer, get current layer context from cpi.
+ // We need this to set the layer context for the new layers below.
+ if (prev_num_layers == 1) {
+ cpi->current_layer = 0;
+ vp8_save_layer_context(cpi);
+ }
+ for (i = 0; i < curr_num_layers; ++i) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ if (i >= prev_num_layers) {
+ vp8_init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
+ }
+ // The initial buffer levels are set based on their starting levels.
+ // We could set the buffer levels based on the previous state (normalized
+ // properly by the layer bandwidths) but we would need to keep track of
+ // the previous set of layer bandwidths (i.e., target_bitrate[i])
+ // before the layer change. For now, reset to the starting levels.
+ lc->buffer_level =
+ cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
+ lc->bits_off_target = lc->buffer_level;
+ // TDOD(marpan): Should we set the rate_correction_factor and
+ // active_worst/best_quality to values derived from the previous layer
+ // state (to smooth-out quality dips/rate fluctuation at transition)?
+
+ // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
+ // is not set for 1 layer, and the vp8_restore_layer_context/save_context()
+ // are not called in the encoding loop, so we need to call it here to
+ // pass the layer context state to |cpi|.
+ if (curr_num_layers == 1) {
+ lc->target_bandwidth = cpi->oxcf.target_bandwidth;
+ lc->buffer_level =
+ cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
+ lc->bits_off_target = lc->buffer_level;
+ vp8_restore_layer_context(cpi, 0);
+ }
+ prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
+ }
+}
+
+static void setup_features(VP8_COMP *cpi) {
+ // If segmentation enabled set the update flags
+ if (cpi->mb.e_mbd.segmentation_enabled) {
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+ } else {
+ cpi->mb.e_mbd.update_mb_segmentation_map = 0;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 0;
+ }
+
+ cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
+ cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
+ memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
+ memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
+ memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
+ sizeof(cpi->mb.e_mbd.ref_lf_deltas));
+ memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
+ sizeof(cpi->mb.e_mbd.mode_lf_deltas));
+
+ set_default_lf_deltas(cpi);
+}
+
+static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
+
+static void initialize_enc(void) {
+ vpx_dsp_rtcd();
+ vp8_init_intra_predictors();
+}
+
+void vp8_initialize_enc(void) { once(initialize_enc); }
+
+static void dealloc_compressor_data(VP8_COMP *cpi) {
+ vpx_free(cpi->tplist);
+ cpi->tplist = NULL;
+
+ /* Delete last frame MV storage buffers */
+ vpx_free(cpi->lfmv);
+ cpi->lfmv = 0;
+
+ vpx_free(cpi->lf_ref_frame_sign_bias);
+ cpi->lf_ref_frame_sign_bias = 0;
+
+ vpx_free(cpi->lf_ref_frame);
+ cpi->lf_ref_frame = 0;
+
+ /* Delete sementation map */
+ vpx_free(cpi->segmentation_map);
+ cpi->segmentation_map = 0;
+
+ vpx_free(cpi->active_map);
+ cpi->active_map = 0;
+
+ vp8_de_alloc_frame_buffers(&cpi->common);
+
+ vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
+ vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
+ dealloc_raw_frame_buffers(cpi);
+
+ vpx_free(cpi->tok);
+ cpi->tok = 0;
+
+ /* Structure used to monitor GF usage */
+ vpx_free(cpi->gf_active_flags);
+ cpi->gf_active_flags = 0;
+
+ /* Activity mask based per mb zbin adjustments */
+ vpx_free(cpi->mb_activity_map);
+ cpi->mb_activity_map = 0;
+
+ vpx_free(cpi->mb.pip);
+ cpi->mb.pip = 0;
+
+#if CONFIG_MULTITHREAD
+ vpx_free(cpi->mt_current_mb_col);
+ cpi->mt_current_mb_col = NULL;
+#endif
+}
+
+static void enable_segmentation(VP8_COMP *cpi) {
+ /* Set the appropriate feature bit */
+ cpi->mb.e_mbd.segmentation_enabled = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+}
+static void disable_segmentation(VP8_COMP *cpi) {
+ /* Clear the appropriate feature bit */
+ cpi->mb.e_mbd.segmentation_enabled = 0;
+}
+
+/* Valid values for a segment are 0 to 3
+ * Segmentation map is arrange as [Rows][Columns]
+ */
+static void set_segmentation_map(VP8_COMP *cpi,
+ unsigned char *segmentation_map) {
+ /* Copy in the new segmentation map */
+ memcpy(cpi->segmentation_map, segmentation_map,
+ (cpi->common.mb_rows * cpi->common.mb_cols));
+
+ /* Signal that the map should be updated. */
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+}
+
+/* The values given for each segment can be either deltas (from the default
+ * value chosen for the frame) or absolute values.
+ *
+ * Valid range for abs values is:
+ * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
+ * Valid range for delta values are:
+ * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
+ *
+ * abs_delta = SEGMENT_DELTADATA (deltas)
+ * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
+ *
+ */
+static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
+ unsigned char abs_delta) {
+ cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
+ memcpy(cpi->segment_feature_data, feature_data,
+ sizeof(cpi->segment_feature_data));
+}
+
+/* A simple function to cyclically refresh the background at a lower Q */
+static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
+ unsigned char *seg_map = cpi->segmentation_map;
+ signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
+ int i;
+ int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
+ int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
+
+ cpi->cyclic_refresh_q = Q / 2;
+
+ if (cpi->oxcf.screen_content_mode) {
+ // Modify quality ramp-up based on Q. Above some Q level, increase the
+ // number of blocks to be refreshed, and reduce it below the thredhold.
+ // Turn-off under certain conditions (i.e., away from key frame, and if
+ // we are at good quality (low Q) and most of the blocks were
+ // skipped-encoded
+ // in previous frame.
+ int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
+ if (Q >= qp_thresh) {
+ cpi->cyclic_refresh_mode_max_mbs_perframe =
+ (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
+ } else if (cpi->frames_since_key > 250 && Q < 20 &&
+ cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
+ cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
+ } else {
+ cpi->cyclic_refresh_mode_max_mbs_perframe =
+ (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
+ }
+ block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
+ }
+
+ // Set every macroblock to be eligible for update.
+ // For key frame this will reset seg map to 0.
+ memset(cpi->segmentation_map, 0, mbs_in_frame);
+
+ if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
+ /* Cycle through the macro_block rows */
+ /* MB loop to set local segmentation map */
+ i = cpi->cyclic_refresh_mode_index;
+ assert(i < mbs_in_frame);
+ do {
+ /* If the MB is as a candidate for clean up then mark it for
+ * possible boost/refresh (segment 1) The segment id may get
+ * reset to 0 later if the MB gets coded anything other than
+ * last frame 0,0 as only (last frame 0,0) MBs are eligable for
+ * refresh : that is to say Mbs likely to be background blocks.
+ */
+ if (cpi->cyclic_refresh_map[i] == 0) {
+ seg_map[i] = 1;
+ block_count--;
+ } else if (cpi->cyclic_refresh_map[i] < 0) {
+ cpi->cyclic_refresh_map[i]++;
+ }
+
+ i++;
+ if (i == mbs_in_frame) i = 0;
+
+ } while (block_count && i != cpi->cyclic_refresh_mode_index);
+
+ cpi->cyclic_refresh_mode_index = i;
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity > 0) {
+ if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
+ Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
+ (cpi->frames_since_key >
+ 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
+ // Under aggressive denoising, use segmentation to turn off loop
+ // filter below some qp thresh. The filter is reduced for all
+ // blocks that have been encoded as ZEROMV LAST x frames in a row,
+ // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
+ // This is to avoid "dot" artifacts that can occur from repeated
+ // loop filtering on noisy input source.
+ cpi->cyclic_refresh_q = Q;
+ // lf_adjustment = -MAX_LOOP_FILTER;
+ lf_adjustment = -40;
+ for (i = 0; i < mbs_in_frame; ++i) {
+ seg_map[i] = (cpi->consec_zero_last[i] >
+ cpi->denoiser.denoise_pars.consec_zerolast)
+ ? 1
+ : 0;
+ }
+ }
+ }
+#endif
+ }
+
+ /* Activate segmentation. */
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+ enable_segmentation(cpi);
+
+ /* Set up the quant segment data */
+ feature_data[MB_LVL_ALT_Q][0] = 0;
+ feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
+ feature_data[MB_LVL_ALT_Q][2] = 0;
+ feature_data[MB_LVL_ALT_Q][3] = 0;
+
+ /* Set up the loop segment data */
+ feature_data[MB_LVL_ALT_LF][0] = 0;
+ feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
+ feature_data[MB_LVL_ALT_LF][2] = 0;
+ feature_data[MB_LVL_ALT_LF][3] = 0;
+
+ /* Initialise the feature data structure */
+ set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
+}
+
+static void compute_skin_map(VP8_COMP *cpi) {
+ int mb_row, mb_col, num_bl;
+ VP8_COMMON *cm = &cpi->common;
+ const uint8_t *src_y = cpi->Source->y_buffer;
+ const uint8_t *src_u = cpi->Source->u_buffer;
+ const uint8_t *src_v = cpi->Source->v_buffer;
+ const int src_ystride = cpi->Source->y_stride;
+ const int src_uvstride = cpi->Source->uv_stride;
+
+ const SKIN_DETECTION_BLOCK_SIZE bsize =
+ (cm->Width * cm->Height <= 352 * 288) ? SKIN_8X8 : SKIN_16X16;
+
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ num_bl = 0;
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ const int bl_index = mb_row * cm->mb_cols + mb_col;
+ cpi->skin_map[bl_index] =
+ vp8_compute_skin_block(src_y, src_u, src_v, src_ystride, src_uvstride,
+ bsize, cpi->consec_zero_last[bl_index], 0);
+ num_bl++;
+ src_y += 16;
+ src_u += 8;
+ src_v += 8;
+ }
+ src_y += (src_ystride << 4) - (num_bl << 4);
+ src_u += (src_uvstride << 3) - (num_bl << 3);
+ src_v += (src_uvstride << 3) - (num_bl << 3);
+ }
+
+ // Remove isolated skin blocks (none of its neighbors are skin) and isolated
+ // non-skin blocks (all of its neighbors are skin). Skip the boundary.
+ for (mb_row = 1; mb_row < cm->mb_rows - 1; mb_row++) {
+ for (mb_col = 1; mb_col < cm->mb_cols - 1; mb_col++) {
+ const int bl_index = mb_row * cm->mb_cols + mb_col;
+ int num_neighbor = 0;
+ int mi, mj;
+ int non_skin_threshold = 8;
+
+ for (mi = -1; mi <= 1; mi += 1) {
+ for (mj = -1; mj <= 1; mj += 1) {
+ int bl_neighbor_index = (mb_row + mi) * cm->mb_cols + mb_col + mj;
+ if (cpi->skin_map[bl_neighbor_index]) num_neighbor++;
+ }
+ }
+
+ if (cpi->skin_map[bl_index] && num_neighbor < 2)
+ cpi->skin_map[bl_index] = 0;
+ if (!cpi->skin_map[bl_index] && num_neighbor == non_skin_threshold)
+ cpi->skin_map[bl_index] = 1;
+ }
+ }
+}
+
+static void set_default_lf_deltas(VP8_COMP *cpi) {
+ cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
+ cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
+
+ memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
+ memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
+
+ /* Test of ref frame deltas */
+ cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
+ cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
+ cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
+ cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
+
+ cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
+
+ if (cpi->oxcf.Mode == MODE_REALTIME) {
+ cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
+ } else {
+ cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
+ }
+
+ cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
+ cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
+}
+
+/* Convenience macros for mapping speed and mode into a continuous
+ * range
+ */
+#define GOOD(x) ((x) + 1)
+#define RT(x) ((x) + 7)
+
+static int speed_map(int speed, const int *map) {
+ int res;
+
+ do {
+ res = *map++;
+ } while (speed >= *map++);
+ return res;
+}
+
+static const int thresh_mult_map_znn[] = {
+ /* map common to zero, nearest, and near */
+ 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
+};
+
+static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
+ 2000, RT(0), 1000, RT(1),
+ 2000, RT(7), INT_MAX, INT_MAX };
+
+static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
+ 5000, GOOD(3), 7500, RT(0),
+ 2500, RT(1), 5000, RT(6),
+ INT_MAX, INT_MAX };
+
+static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
+ 2000, RT(0), 0, RT(1),
+ 1000, RT(2), 2000, RT(7),
+ INT_MAX, INT_MAX };
+
+static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
+ RT(0), 2000, INT_MAX };
+
+static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
+ 2500, GOOD(5), 4000, RT(0),
+ 2000, RT(2), 2500, RT(5),
+ 4000, INT_MAX };
+
+static const int thresh_mult_map_split1[] = {
+ 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
+ RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
+};
+
+static const int thresh_mult_map_split2[] = {
+ 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
+ RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
+};
+
+static const int mode_check_freq_map_zn2[] = {
+ /* {zero,nearest}{2,3} */
+ 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
+};
+
+static const int mode_check_freq_map_vhbpred[] = { 0, GOOD(5), 2, RT(0),
+ 0, RT(3), 2, RT(5),
+ 4, INT_MAX };
+
+static const int mode_check_freq_map_near2[] = {
+ 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
+ RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
+};
+
+static const int mode_check_freq_map_new1[] = {
+ 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
+};
+
+static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
+ 0, RT(3), 4, RT(10),
+ 1 << 3, RT(11), 1 << 4, RT(12),
+ 1 << 5, INT_MAX };
+
+static const int mode_check_freq_map_split1[] = { 0, GOOD(2), 2, GOOD(3),
+ 7, RT(1), 2, RT(2),
+ 7, INT_MAX };
+
+static const int mode_check_freq_map_split2[] = { 0, GOOD(1), 2, GOOD(2),
+ 4, GOOD(3), 15, RT(1),
+ 4, RT(2), 15, INT_MAX };
+
+void vp8_set_speed_features(VP8_COMP *cpi) {
+ SPEED_FEATURES *sf = &cpi->sf;
+ int Mode = cpi->compressor_speed;
+ int Speed = cpi->Speed;
+ int Speed2;
+ int i;
+ VP8_COMMON *cm = &cpi->common;
+ int last_improved_quant = sf->improved_quant;
+ int ref_frames;
+
+ /* Initialise default mode frequency sampling variables */
+ for (i = 0; i < MAX_MODES; ++i) {
+ cpi->mode_check_freq[i] = 0;
+ }
+
+ cpi->mb.mbs_tested_so_far = 0;
+ cpi->mb.mbs_zero_last_dot_suppress = 0;
+
+ /* best quality defaults */
+ sf->RD = 1;
+ sf->search_method = NSTEP;
+ sf->improved_quant = 1;
+ sf->improved_dct = 1;
+ sf->auto_filter = 1;
+ sf->recode_loop = 1;
+ sf->quarter_pixel_search = 1;
+ sf->half_pixel_search = 1;
+ sf->iterative_sub_pixel = 1;
+ sf->optimize_coefficients = 1;
+ sf->use_fastquant_for_pick = 0;
+ sf->no_skip_block4x4_search = 1;
+
+ sf->first_step = 0;
+ sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
+ sf->improved_mv_pred = 1;
+
+ /* default thresholds to 0 */
+ for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
+
+ /* Count enabled references */
+ ref_frames = 1;
+ if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
+ if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
+ if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
+
+ /* Convert speed to continuous range, with clamping */
+ if (Mode == 0) {
+ Speed = 0;
+ } else if (Mode == 2) {
+ Speed = RT(Speed);
+ } else {
+ if (Speed > 5) Speed = 5;
+ Speed = GOOD(Speed);
+ }
+
+ sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
+ sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
+
+ sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
+ sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
+ sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
+ speed_map(Speed, thresh_mult_map_znn);
+
+ sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
+ speed_map(Speed, thresh_mult_map_vhpred);
+ sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
+ sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
+ sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
+ sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
+ speed_map(Speed, thresh_mult_map_new2);
+ sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
+ sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
+ speed_map(Speed, thresh_mult_map_split2);
+
+ // Special case for temporal layers.
+ // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
+ // used as second reference. We don't modify thresholds for ALTREF case
+ // since ALTREF is usually used as long-term reference in temporal layers.
+ if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
+ (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
+ (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
+ if (cpi->closest_reference_frame == GOLDEN_FRAME) {
+ sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
+ sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
+ sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
+ } else {
+ sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
+ sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
+ sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
+ }
+ }
+
+ cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
+ cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
+ cpi->mode_check_freq[THR_DC] = 0; /* always */
+
+ cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
+ cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
+ speed_map(Speed, mode_check_freq_map_zn2);
+
+ cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
+ speed_map(Speed, mode_check_freq_map_near2);
+
+ cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
+ cpi->mode_check_freq[THR_B_PRED] =
+ speed_map(Speed, mode_check_freq_map_vhbpred);
+
+ // For real-time mode at speed 10 keep the mode_check_freq threshold
+ // for NEW1 similar to that of speed 9.
+ Speed2 = Speed;
+ if (cpi->Speed == 10 && Mode == 2) Speed2 = RT(9);
+ cpi->mode_check_freq[THR_NEW1] = speed_map(Speed2, mode_check_freq_map_new1);
+
+ cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
+ speed_map(Speed, mode_check_freq_map_new2);
+
+ cpi->mode_check_freq[THR_SPLIT1] =
+ speed_map(Speed, mode_check_freq_map_split1);
+ cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
+ speed_map(Speed, mode_check_freq_map_split2);
+ Speed = cpi->Speed;
+ switch (Mode) {
+#if !CONFIG_REALTIME_ONLY
+ case 0: /* best quality mode */
+ sf->first_step = 0;
+ sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
+ break;
+ case 1:
+ case 3:
+ if (Speed > 0) {
+ /* Disable coefficient optimization above speed 0 */
+ sf->optimize_coefficients = 0;
+ sf->use_fastquant_for_pick = 1;
+ sf->no_skip_block4x4_search = 0;
+
+ sf->first_step = 1;
+ }
+
+ if (Speed > 2) {
+ sf->improved_quant = 0;
+ sf->improved_dct = 0;
+
+ /* Only do recode loop on key frames, golden frames and
+ * alt ref frames
+ */
+ sf->recode_loop = 2;
+ }
+
+ if (Speed > 3) {
+ sf->auto_filter = 1;
+ sf->recode_loop = 0; /* recode loop off */
+ sf->RD = 0; /* Turn rd off */
+ }
+
+ if (Speed > 4) {
+ sf->auto_filter = 0; /* Faster selection of loop filter */
+ }
+
+ break;
+#endif
+ case 2:
+ sf->optimize_coefficients = 0;
+ sf->recode_loop = 0;
+ sf->auto_filter = 1;
+ sf->iterative_sub_pixel = 1;
+ sf->search_method = NSTEP;
+
+ if (Speed > 0) {
+ sf->improved_quant = 0;
+ sf->improved_dct = 0;
+
+ sf->use_fastquant_for_pick = 1;
+ sf->no_skip_block4x4_search = 0;
+ sf->first_step = 1;
+ }
+
+ if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
+
+ if (Speed > 3) {
+ sf->RD = 0;
+ sf->auto_filter = 1;
+ }
+
+ if (Speed > 4) {
+ sf->auto_filter = 0; /* Faster selection of loop filter */
+ sf->search_method = HEX;
+ sf->iterative_sub_pixel = 0;
+ }
+
+ if (Speed > 6) {
+ unsigned int sum = 0;
+ unsigned int total_mbs = cm->MBs;
+ int thresh;
+ unsigned int total_skip;
+
+ int min = 2000;
+
+ if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
+
+ min >>= 7;
+
+ for (i = 0; i < min; ++i) {
+ sum += cpi->mb.error_bins[i];
+ }
+
+ total_skip = sum;
+ sum = 0;
+
+ /* i starts from 2 to make sure thresh started from 2048 */
+ for (; i < 1024; ++i) {
+ sum += cpi->mb.error_bins[i];
+
+ if (10 * sum >=
+ (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
+ break;
+ }
+ }
+
+ i--;
+ thresh = (i << 7);
+
+ if (thresh < 2000) thresh = 2000;
+
+ if (ref_frames > 1) {
+ sf->thresh_mult[THR_NEW1] = thresh;
+ sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
+ sf->thresh_mult[THR_NEAR1] = thresh >> 1;
+ }
+
+ if (ref_frames > 2) {
+ sf->thresh_mult[THR_NEW2] = thresh << 1;
+ sf->thresh_mult[THR_NEAREST2] = thresh;
+ sf->thresh_mult[THR_NEAR2] = thresh;
+ }
+
+ if (ref_frames > 3) {
+ sf->thresh_mult[THR_NEW3] = thresh << 1;
+ sf->thresh_mult[THR_NEAREST3] = thresh;
+ sf->thresh_mult[THR_NEAR3] = thresh;
+ }
+
+ sf->improved_mv_pred = 0;
+ }
+
+ if (Speed > 8) sf->quarter_pixel_search = 0;
+
+ if (cm->version == 0) {
+ cm->filter_type = NORMAL_LOOPFILTER;
+
+ if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
+ } else {
+ cm->filter_type = SIMPLE_LOOPFILTER;
+ }
+
+ /* This has a big hit on quality. Last resort */
+ if (Speed >= 15) sf->half_pixel_search = 0;
+
+ memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
+
+ } /* switch */
+
+ /* Slow quant, dct and trellis not worthwhile for first pass
+ * so make sure they are always turned off.
+ */
+ if (cpi->pass == 1) {
+ sf->improved_quant = 0;
+ sf->optimize_coefficients = 0;
+ sf->improved_dct = 0;
+ }
+
+ if (cpi->sf.search_method == NSTEP) {
+ vp8_init3smotion_compensation(&cpi->mb,
+ cm->yv12_fb[cm->lst_fb_idx].y_stride);
+ } else if (cpi->sf.search_method == DIAMOND) {
+ vp8_init_dsmotion_compensation(&cpi->mb,
+ cm->yv12_fb[cm->lst_fb_idx].y_stride);
+ }
+
+ if (cpi->sf.improved_dct) {
+ cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
+ cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
+ } else {
+ /* No fast FDCT defined for any platform at this time. */
+ cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
+ cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
+ }
+
+ cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
+
+ if (cpi->sf.improved_quant) {
+ cpi->mb.quantize_b = vp8_regular_quantize_b;
+ } else {
+ cpi->mb.quantize_b = vp8_fast_quantize_b;
+ }
+ if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
+
+ if (cpi->sf.iterative_sub_pixel == 1) {
+ cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
+ } else if (cpi->sf.quarter_pixel_search) {
+ cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
+ } else if (cpi->sf.half_pixel_search) {
+ cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
+ } else {
+ cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
+ }
+
+ if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
+ cpi->mb.optimize = 1;
+ } else {
+ cpi->mb.optimize = 0;
+ }
+
+ if (cpi->common.full_pixel) {
+ cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
+ }
+
+#ifdef SPEEDSTATS
+ frames_at_speed[cpi->Speed]++;
+#endif
+}
+#undef GOOD
+#undef RT
+
+static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
+#if VP8_TEMPORAL_ALT_REF
+ int width = (cpi->oxcf.Width + 15) & ~15;
+ int height = (cpi->oxcf.Height + 15) & ~15;
+#endif
+
+ cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
+ cpi->oxcf.lag_in_frames);
+ if (!cpi->lookahead) {
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate lag buffers");
+ }
+
+#if VP8_TEMPORAL_ALT_REF
+
+ if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
+ VP8BORDERINPIXELS)) {
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate altref buffer");
+ }
+
+#endif
+}
+
+static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
+#if VP8_TEMPORAL_ALT_REF
+ vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
+#endif
+ vp8_lookahead_destroy(cpi->lookahead);
+}
+
+static int vp8_alloc_partition_data(VP8_COMP *cpi) {
+ vpx_free(cpi->mb.pip);
+
+ cpi->mb.pip =
+ vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
+ sizeof(PARTITION_INFO));
+ if (!cpi->mb.pip) return 1;
+
+ cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
+
+ return 0;
+}
+
+void vp8_alloc_compressor_data(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ int width = cm->Width;
+ int height = cm->Height;
+
+ if (vp8_alloc_frame_buffers(cm, width, height)) {
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+ }
+
+ if (vp8_alloc_partition_data(cpi)) {
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate partition data");
+ }
+
+ if ((width & 0xf) != 0) width += 16 - (width & 0xf);
+
+ if ((height & 0xf) != 0) height += 16 - (height & 0xf);
+
+ if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
+ VP8BORDERINPIXELS)) {
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate last frame buffer");
+ }
+
+ if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
+ VP8BORDERINPIXELS)) {
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate scaled source buffer");
+ }
+
+ vpx_free(cpi->tok);
+
+ {
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
+#else
+ unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
+#endif
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->tok,
+ vpx_calloc(tokens, sizeof(*cpi->tok)));
+ }
+
+ /* Data used for real time vc mode to see if gf needs refreshing */
+ cpi->zeromv_count = 0;
+
+ /* Structures used to monitor GF usage */
+ vpx_free(cpi->gf_active_flags);
+ CHECK_MEM_ERROR(
+ &cpi->common.error, cpi->gf_active_flags,
+ vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
+ cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
+
+ vpx_free(cpi->mb_activity_map);
+ CHECK_MEM_ERROR(
+ &cpi->common.error, cpi->mb_activity_map,
+ vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
+
+ /* allocate memory for storing last frame's MVs for MV prediction. */
+ vpx_free(cpi->lfmv);
+ CHECK_MEM_ERROR(
+ &cpi->common.error, cpi->lfmv,
+ vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2), sizeof(*cpi->lfmv)));
+ vpx_free(cpi->lf_ref_frame_sign_bias);
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->lf_ref_frame_sign_bias,
+ vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
+ sizeof(*cpi->lf_ref_frame_sign_bias)));
+ vpx_free(cpi->lf_ref_frame);
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->lf_ref_frame,
+ vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
+ sizeof(*cpi->lf_ref_frame)));
+
+ /* Create the encoder segmentation map and set all entries to 0 */
+ vpx_free(cpi->segmentation_map);
+ CHECK_MEM_ERROR(
+ &cpi->common.error, cpi->segmentation_map,
+ vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
+ cpi->cyclic_refresh_mode_index = 0;
+ vpx_free(cpi->active_map);
+ CHECK_MEM_ERROR(
+ &cpi->common.error, cpi->active_map,
+ vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->active_map)));
+ memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
+
+#if CONFIG_MULTITHREAD
+ if (width < 640) {
+ cpi->mt_sync_range = 1;
+ } else if (width <= 1280) {
+ cpi->mt_sync_range = 4;
+ } else if (width <= 2560) {
+ cpi->mt_sync_range = 8;
+ } else {
+ cpi->mt_sync_range = 16;
+ }
+
+ if (cpi->oxcf.multi_threaded > 1) {
+ int i;
+
+ vpx_free(cpi->mt_current_mb_col);
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->mt_current_mb_col,
+ vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
+ for (i = 0; i < cm->mb_rows; ++i)
+ vpx_atomic_init(&cpi->mt_current_mb_col[i], 0);
+ }
+
+#endif
+
+ vpx_free(cpi->tplist);
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->tplist,
+ vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity > 0) {
+ vp8_denoiser_free(&cpi->denoiser);
+ if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
+ cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate denoiser");
+ }
+ }
+#endif
+}
+
+/* Quant MOD */
+static const int q_trans[] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
+ 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
+ 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
+ 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
+};
+
+int vp8_reverse_trans(int x) {
+ int i;
+
+ for (i = 0; i < 64; ++i) {
+ if (q_trans[i] >= x) return i;
+ }
+
+ return 63;
+}
+void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
+ if (framerate < .1) framerate = 30;
+
+ cpi->framerate = framerate;
+ cpi->output_framerate = framerate;
+ cpi->per_frame_bandwidth =
+ (int)round(cpi->oxcf.target_bandwidth / cpi->output_framerate);
+ cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
+ cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
+ cpi->oxcf.two_pass_vbrmin_section / 100);
+
+ /* Set Maximum gf/arf interval */
+ cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
+
+ if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
+
+ /* Extended interval for genuinely static scenes */
+ cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
+
+ /* Special conditions when altr ref frame enabled in lagged compress mode */
+ if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
+ if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
+ cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+ }
+
+ if (cpi->twopass.static_scene_max_gf_interval >
+ cpi->oxcf.lag_in_frames - 1) {
+ cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+ }
+ }
+
+ if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
+ cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
+ }
+}
+
+static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
+ VP8_COMMON *cm = &cpi->common;
+
+ cpi->oxcf = *oxcf;
+
+ cpi->auto_gold = 1;
+ cpi->auto_adjust_gold_quantizer = 1;
+
+ cm->version = oxcf->Version;
+ vp8_setup_version(cm);
+
+ /* Frame rate is not available on the first frame, as it's derived from
+ * the observed timestamps. The actual value used here doesn't matter
+ * too much, as it will adapt quickly.
+ */
+ if (oxcf->timebase.num > 0) {
+ cpi->framerate =
+ (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
+ } else {
+ cpi->framerate = 30;
+ }
+
+ /* If the reciprocal of the timebase seems like a reasonable framerate,
+ * then use that as a guess, otherwise use 30.
+ */
+ if (cpi->framerate > 180) cpi->framerate = 30;
+
+ cpi->ref_framerate = cpi->framerate;
+
+ cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
+
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 1;
+ cm->refresh_entropy_probs = 1;
+
+ /* change includes all joint functionality */
+ vp8_change_config(cpi, oxcf);
+
+ /* Initialize active best and worst q and average q values. */
+ cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->active_best_quality = cpi->oxcf.best_allowed_q;
+ cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
+
+ /* Initialise the starting buffer levels */
+ cpi->buffer_level = cpi->oxcf.starting_buffer_level;
+ cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
+
+ cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
+ cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
+ cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
+ cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
+
+ cpi->total_actual_bits = 0;
+ cpi->total_target_vs_actual = 0;
+
+ /* Temporal scalabilty */
+ if (cpi->oxcf.number_of_layers > 1) {
+ unsigned int i;
+ double prev_layer_framerate = 0;
+
+ for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
+ vp8_init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
+ prev_layer_framerate =
+ cpi->output_framerate / cpi->oxcf.rate_decimator[i];
+ }
+ }
+
+#if VP8_TEMPORAL_ALT_REF
+ {
+ int i;
+
+ cpi->fixed_divide[0] = 0;
+
+ for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
+ }
+#endif
+}
+
+void vp8_update_layer_contexts(VP8_COMP *cpi) {
+ VP8_CONFIG *oxcf = &cpi->oxcf;
+
+ /* Update snapshots of the layer contexts to reflect new parameters */
+ if (oxcf->number_of_layers > 1) {
+ unsigned int i;
+ double prev_layer_framerate = 0;
+
+ assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
+ for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+
+ lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
+ lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
+
+ lc->starting_buffer_level = rescale(
+ (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
+
+ if (oxcf->optimal_buffer_level == 0) {
+ lc->optimal_buffer_level = lc->target_bandwidth / 8;
+ } else {
+ lc->optimal_buffer_level = rescale(
+ (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
+ }
+
+ if (oxcf->maximum_buffer_size == 0) {
+ lc->maximum_buffer_size = lc->target_bandwidth / 8;
+ } else {
+ lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
+ lc->target_bandwidth, 1000);
+ }
+
+ /* Work out the average size of a frame within this layer */
+ if (i > 0) {
+ lc->avg_frame_size_for_layer =
+ (int)round((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
+ 1000 / (lc->framerate - prev_layer_framerate));
+ }
+
+ prev_layer_framerate = lc->framerate;
+ }
+ }
+}
+
+void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
+ VP8_COMMON *cm = &cpi->common;
+ int last_w, last_h;
+ unsigned int prev_number_of_layers;
+ unsigned int raw_target_rate;
+
+ if (!cpi) return;
+
+ if (!oxcf) return;
+
+ if (cm->version != oxcf->Version) {
+ cm->version = oxcf->Version;
+ vp8_setup_version(cm);
+ }
+
+ last_w = cpi->oxcf.Width;
+ last_h = cpi->oxcf.Height;
+ prev_number_of_layers = cpi->oxcf.number_of_layers;
+
+ if (cpi->initial_width) {
+ // TODO(https://crbug.com/1486441): Allow changing thread counts; the
+ // allocation is done once in vp8_create_compressor().
+ oxcf->multi_threaded = cpi->oxcf.multi_threaded;
+ }
+
+ cpi->oxcf = *oxcf;
+
+ switch (cpi->oxcf.Mode) {
+ case MODE_REALTIME:
+ cpi->pass = 0;
+ cpi->compressor_speed = 2;
+
+ if (cpi->oxcf.cpu_used < -16) {
+ cpi->oxcf.cpu_used = -16;
+ }
+
+ if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
+
+ break;
+
+ case MODE_GOODQUALITY:
+ cpi->pass = 0;
+ cpi->compressor_speed = 1;
+
+ if (cpi->oxcf.cpu_used < -5) {
+ cpi->oxcf.cpu_used = -5;
+ }
+
+ if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
+
+ break;
+
+ case MODE_BESTQUALITY:
+ cpi->pass = 0;
+ cpi->compressor_speed = 0;
+ break;
+
+ case MODE_FIRSTPASS:
+ cpi->pass = 1;
+ cpi->compressor_speed = 1;
+ break;
+ case MODE_SECONDPASS:
+ cpi->pass = 2;
+ cpi->compressor_speed = 1;
+
+ if (cpi->oxcf.cpu_used < -5) {
+ cpi->oxcf.cpu_used = -5;
+ }
+
+ if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
+
+ break;
+ case MODE_SECONDPASS_BEST:
+ cpi->pass = 2;
+ cpi->compressor_speed = 0;
+ break;
+ }
+
+ if (cpi->pass == 0) cpi->auto_worst_q = 1;
+
+ cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
+ cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
+ cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
+
+ if (oxcf->fixed_q >= 0) {
+ if (oxcf->worst_allowed_q < 0) {
+ cpi->oxcf.fixed_q = q_trans[0];
+ } else {
+ cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
+ }
+
+ if (oxcf->alt_q < 0) {
+ cpi->oxcf.alt_q = q_trans[0];
+ } else {
+ cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
+ }
+
+ if (oxcf->key_q < 0) {
+ cpi->oxcf.key_q = q_trans[0];
+ } else {
+ cpi->oxcf.key_q = q_trans[oxcf->key_q];
+ }
+
+ if (oxcf->gold_q < 0) {
+ cpi->oxcf.gold_q = q_trans[0];
+ } else {
+ cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
+ }
+ }
+
+ cpi->ext_refresh_frame_flags_pending = 0;
+
+ cpi->baseline_gf_interval =
+ cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
+
+ // GF behavior for 1 pass CBR, used when error_resilience is off.
+ if (!cpi->oxcf.error_resilient_mode &&
+ cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
+ cpi->oxcf.Mode == MODE_REALTIME)
+ cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
+
+#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ cpi->oxcf.token_partitions = 3;
+#endif
+
+ if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
+ cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
+ }
+
+ setup_features(cpi);
+
+ if (!cpi->use_roi_static_threshold) {
+ int i;
+ for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
+ cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
+ }
+ }
+
+ /* At the moment the first order values may not be > MAXQ */
+ if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
+
+ /* local file playback mode == really big buffer */
+ if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
+ cpi->oxcf.starting_buffer_level = 60000;
+ cpi->oxcf.optimal_buffer_level = 60000;
+ cpi->oxcf.maximum_buffer_size = 240000;
+ cpi->oxcf.starting_buffer_level_in_ms = 60000;
+ cpi->oxcf.optimal_buffer_level_in_ms = 60000;
+ cpi->oxcf.maximum_buffer_size_in_ms = 240000;
+ }
+
+ raw_target_rate = (unsigned int)((int64_t)cpi->oxcf.Width * cpi->oxcf.Height *
+ 8 * 3 * cpi->framerate / 1000);
+ if (cpi->oxcf.target_bandwidth > raw_target_rate)
+ cpi->oxcf.target_bandwidth = raw_target_rate;
+ /* Convert target bandwidth from Kbit/s to Bit/s */
+ cpi->oxcf.target_bandwidth *= 1000;
+
+ cpi->oxcf.starting_buffer_level = rescale(
+ (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
+
+ /* Set or reset optimal and maximum buffer levels. */
+ if (cpi->oxcf.optimal_buffer_level == 0) {
+ cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
+ } else {
+ cpi->oxcf.optimal_buffer_level = rescale(
+ (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
+ }
+
+ if (cpi->oxcf.maximum_buffer_size == 0) {
+ cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
+ } else {
+ cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
+ cpi->oxcf.target_bandwidth, 1000);
+ }
+ // Under a configuration change, where maximum_buffer_size may change,
+ // keep buffer level clipped to the maximum allowed buffer size.
+ if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
+ cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
+ cpi->buffer_level = cpi->bits_off_target;
+ }
+
+ /* Set up frame rate and related parameters rate control values. */
+ vp8_new_framerate(cpi, cpi->framerate);
+
+ /* Set absolute upper and lower quality limits */
+ cpi->worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->best_quality = cpi->oxcf.best_allowed_q;
+
+ /* active values should only be modified if out of new range */
+ if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
+ cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ }
+ /* less likely */
+ else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
+ cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
+ }
+ if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
+ cpi->active_best_quality = cpi->oxcf.best_allowed_q;
+ }
+ /* less likely */
+ else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
+ cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
+ }
+
+ cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
+
+ cpi->cq_target_quality = cpi->oxcf.cq_level;
+
+ /* Only allow dropped frames in buffered mode */
+ cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
+
+ cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
+
+ // Check if the number of temporal layers has changed, and if so reset the
+ // pattern counter and set/initialize the temporal layer context for the
+ // new layer configuration.
+ if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
+ // If the number of temporal layers are changed we must start at the
+ // base of the pattern cycle, so set the layer id to 0 and reset
+ // the temporal pattern counter.
+ if (cpi->temporal_layer_id > 0) {
+ cpi->temporal_layer_id = 0;
+ }
+ cpi->temporal_pattern_counter = 0;
+ vp8_reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
+ }
+
+ if (!cpi->initial_width) {
+ cpi->initial_width = cpi->oxcf.Width;
+ cpi->initial_height = cpi->oxcf.Height;
+ }
+
+ cm->Width = cpi->oxcf.Width;
+ cm->Height = cpi->oxcf.Height;
+ assert(cm->Width <= cpi->initial_width);
+ assert(cm->Height <= cpi->initial_height);
+
+ /* TODO(jkoleszar): if an internal spatial resampling is active,
+ * and we downsize the input image, maybe we should clear the
+ * internal scale immediately rather than waiting for it to
+ * correct.
+ */
+
+ /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
+ if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
+
+ cm->sharpness_level = cpi->oxcf.Sharpness;
+
+ if (cm->horiz_scale != VP8E_NORMAL || cm->vert_scale != VP8E_NORMAL) {
+ int hr, hs, vr, vs;
+
+ Scale2Ratio(cm->horiz_scale, &hr, &hs);
+ Scale2Ratio(cm->vert_scale, &vr, &vs);
+
+ /* always go to the next whole number */
+ cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
+ cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
+ }
+
+ if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
+ cpi->force_next_frame_intra = 1;
+ }
+
+ if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
+ ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
+ cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
+ dealloc_raw_frame_buffers(cpi);
+ alloc_raw_frame_buffers(cpi);
+ vp8_alloc_compressor_data(cpi);
+ }
+
+ if (cpi->oxcf.fixed_q >= 0) {
+ cpi->last_q[0] = cpi->oxcf.fixed_q;
+ cpi->last_q[1] = cpi->oxcf.fixed_q;
+ }
+
+ cpi->Speed = cpi->oxcf.cpu_used;
+
+ /* force to allowlag to 0 if lag_in_frames is 0; */
+ if (cpi->oxcf.lag_in_frames == 0) {
+ cpi->oxcf.allow_lag = 0;
+ }
+ /* Limit on lag buffers as these are not currently dynamically allocated */
+ else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
+ cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
+ }
+
+ /* YX Temp */
+ cpi->alt_ref_source = NULL;
+ cpi->is_src_frame_alt_ref = 0;
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
+ int width = (cpi->oxcf.Width + 15) & ~15;
+ int height = (cpi->oxcf.Height + 15) & ~15;
+ if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
+ cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate denoiser");
+ }
+ }
+ }
+#endif
+
+#if 0
+ /* Experimental RD Code */
+ cpi->frame_distortion = 0;
+ cpi->last_frame_distortion = 0;
+#endif
+}
+
+#ifndef M_LOG2_E
+#define M_LOG2_E 0.693147180559945309417
+#endif
+#define log2f(x) (log(x) / (float)M_LOG2_E)
+
+static void cal_mvsadcosts(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost[0][0] = 300;
+ mvsadcost[1][0] = 300;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost[0][i] = (int)z;
+ mvsadcost[1][i] = (int)z;
+ mvsadcost[0][-i] = (int)z;
+ mvsadcost[1][-i] = (int)z;
+ } while (++i <= mvfp_max);
+}
+
+struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
+ int i;
+
+ VP8_COMP *cpi;
+ VP8_COMMON *cm;
+
+ cpi = vpx_memalign(32, sizeof(VP8_COMP));
+ /* Check that the CPI instance is valid */
+ if (!cpi) return 0;
+
+ cm = &cpi->common;
+
+ memset(cpi, 0, sizeof(VP8_COMP));
+
+ if (setjmp(cm->error.jmp)) {
+ cpi->common.error.setjmp = 0;
+ vp8_remove_compressor(&cpi);
+ return 0;
+ }
+
+ cpi->common.error.setjmp = 1;
+
+ CHECK_MEM_ERROR(
+ &cpi->common.error, cpi->mb.ss,
+ vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
+
+ vp8_create_common(&cpi->common);
+
+ init_config(cpi, oxcf);
+
+ memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
+ sizeof(vp8cx_base_skip_false_prob));
+ cpi->common.current_video_frame = 0;
+ cpi->temporal_pattern_counter = 0;
+ cpi->temporal_layer_id = -1;
+ cpi->kf_overspend_bits = 0;
+ cpi->kf_bitrate_adjustment = 0;
+ cpi->frames_till_gf_update_due = 0;
+ cpi->gf_overspend_bits = 0;
+ cpi->non_gf_bitrate_adjustment = 0;
+ cpi->prob_last_coded = 128;
+ cpi->prob_gf_coded = 128;
+ cpi->prob_intra_coded = 63;
+
+ /* Prime the recent reference frame usage counters.
+ * Hereafter they will be maintained as a sort of moving average
+ */
+ cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
+ cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
+ cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
+
+ /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
+
+ cpi->twopass.gf_decay_rate = 0;
+ cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
+
+ cpi->gold_is_last = 0;
+ cpi->alt_is_last = 0;
+ cpi->gold_is_alt = 0;
+
+ cpi->active_map_enabled = 0;
+
+ cpi->use_roi_static_threshold = 0;
+
+#if 0
+ /* Experimental code for lagged and one pass */
+ /* Initialise one_pass GF frames stats */
+ /* Update stats used for GF selection */
+ if (cpi->pass == 0)
+ {
+ cpi->one_pass_frame_index = 0;
+
+ for (i = 0; i < MAX_LAG_BUFFERS; ++i)
+ {
+ cpi->one_pass_frame_stats[i].frames_so_far = 0;
+ cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
+ cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
+ cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
+ cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
+ cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
+ cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
+ cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
+ cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
+ }
+ }
+#endif
+
+ cpi->mse_source_denoised = 0;
+
+ /* Should we use the cyclic refresh method.
+ * Currently there is no external control for this.
+ * Enable it for error_resilient_mode, or for 1 pass CBR mode.
+ */
+ cpi->cyclic_refresh_mode_enabled =
+ (cpi->oxcf.error_resilient_mode ||
+ (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
+ cpi->oxcf.Mode <= 2));
+ cpi->cyclic_refresh_mode_max_mbs_perframe =
+ (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
+ if (cpi->oxcf.number_of_layers == 1) {
+ cpi->cyclic_refresh_mode_max_mbs_perframe =
+ (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
+ } else if (cpi->oxcf.number_of_layers == 2) {
+ cpi->cyclic_refresh_mode_max_mbs_perframe =
+ (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
+ }
+ cpi->cyclic_refresh_mode_index = 0;
+ cpi->cyclic_refresh_q = 32;
+
+ // GF behavior for 1 pass CBR, used when error_resilience is off.
+ cpi->gf_update_onepass_cbr = 0;
+ cpi->gf_noboost_onepass_cbr = 0;
+ if (!cpi->oxcf.error_resilient_mode &&
+ cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
+ cpi->gf_update_onepass_cbr = 1;
+ cpi->gf_noboost_onepass_cbr = 1;
+ cpi->gf_interval_onepass_cbr =
+ cpi->cyclic_refresh_mode_max_mbs_perframe > 0
+ ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
+ cpi->cyclic_refresh_mode_max_mbs_perframe)
+ : 10;
+ cpi->gf_interval_onepass_cbr =
+ VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
+ cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
+ }
+
+ if (cpi->cyclic_refresh_mode_enabled) {
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->cyclic_refresh_map,
+ vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
+ } else {
+ cpi->cyclic_refresh_map = (signed char *)NULL;
+ }
+
+ CHECK_MEM_ERROR(
+ &cpi->common.error, cpi->skin_map,
+ vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(cpi->skin_map[0])));
+
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->consec_zero_last,
+ vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
+ CHECK_MEM_ERROR(&cpi->common.error, cpi->consec_zero_last_mvbias,
+ vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
+
+ /*Initialize the feed-forward activity masking.*/
+ cpi->activity_avg = 90 << 12;
+
+ /* Give a sensible default for the first frame. */
+ cpi->frames_since_key = 8;
+ cpi->key_frame_frequency = cpi->oxcf.key_freq;
+ cpi->this_key_frame_forced = 0;
+ cpi->next_key_frame_forced = 0;
+
+ cpi->source_alt_ref_pending = 0;
+ cpi->source_alt_ref_active = 0;
+ cpi->common.refresh_alt_ref_frame = 0;
+
+ cpi->force_maxqp = 0;
+ cpi->frames_since_last_drop_overshoot = 0;
+ cpi->rt_always_update_correction_factor = 0;
+
+ cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
+#if CONFIG_INTERNAL_STATS
+ cpi->b_calculate_ssimg = 0;
+
+ cpi->count = 0;
+ cpi->bytes = 0;
+
+ if (cpi->b_calculate_psnr) {
+ cpi->total_sq_error = 0.0;
+ cpi->total_sq_error2 = 0.0;
+ cpi->total_y = 0.0;
+ cpi->total_u = 0.0;
+ cpi->total_v = 0.0;
+ cpi->total = 0.0;
+ cpi->totalp_y = 0.0;
+ cpi->totalp_u = 0.0;
+ cpi->totalp_v = 0.0;
+ cpi->totalp = 0.0;
+ cpi->tot_recode_hits = 0;
+ cpi->summed_quality = 0;
+ cpi->summed_weights = 0;
+ }
+
+#endif
+
+ cpi->first_time_stamp_ever = 0x7FFFFFFF;
+
+ cpi->frames_till_gf_update_due = 0;
+ cpi->key_frame_count = 1;
+
+ cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
+ cpi->ni_tot_qi = 0;
+ cpi->ni_frames = 0;
+ cpi->total_byte_count = 0;
+
+ cpi->drop_frame = 0;
+
+ cpi->rate_correction_factor = 1.0;
+ cpi->key_frame_rate_correction_factor = 1.0;
+ cpi->gf_rate_correction_factor = 1.0;
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
+
+ for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
+ cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
+ }
+
+#ifdef OUTPUT_YUV_SRC
+ yuv_file = fopen("bd.yuv", "ab");
+#endif
+#ifdef OUTPUT_YUV_DENOISED
+ yuv_denoised_file = fopen("denoised.yuv", "ab");
+#endif
+#ifdef OUTPUT_YUV_SKINMAP
+ yuv_skinmap_file = fopen("skinmap.yuv", "wb");
+#endif
+
+#if 0
+ framepsnr = fopen("framepsnr.stt", "a");
+ kf_list = fopen("kf_list.stt", "w");
+#endif
+
+ cpi->output_pkt_list = oxcf->output_pkt_list;
+
+#if !CONFIG_REALTIME_ONLY
+
+ if (cpi->pass == 1) {
+ vp8_init_first_pass(cpi);
+ } else if (cpi->pass == 2) {
+ size_t packet_sz = sizeof(FIRSTPASS_STATS);
+ int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
+
+ cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
+ cpi->twopass.stats_in = cpi->twopass.stats_in_start;
+ cpi->twopass.stats_in_end =
+ (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
+ vp8_init_second_pass(cpi);
+ }
+
+#endif
+
+ if (cpi->compressor_speed == 2) {
+ cpi->avg_encode_time = 0;
+ cpi->avg_pick_mode_time = 0;
+ }
+
+ vp8_set_speed_features(cpi);
+
+ /* Set starting values of RD threshold multipliers (128 = *1) */
+ for (i = 0; i < MAX_MODES; ++i) {
+ cpi->mb.rd_thresh_mult[i] = 128;
+ }
+
+#if CONFIG_MULTITHREAD
+ if (vp8cx_create_encoder_threads(cpi)) {
+ vp8_remove_compressor(&cpi);
+ return 0;
+ }
+#endif
+
+ cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
+ cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
+ cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
+ cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
+
+ cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
+ cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
+ cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
+ cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
+
+ cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
+ cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
+ cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
+ cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
+
+ cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
+ cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
+ cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
+ cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
+
+ cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
+ cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
+ cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
+ cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
+
+#if VPX_ARCH_X86 || VPX_ARCH_X86_64
+ cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
+#endif
+
+ cpi->diamond_search_sad = vp8_diamond_search_sad;
+ cpi->refining_search_sad = vp8_refining_search_sad;
+
+ /* make sure frame 1 is okay */
+ cpi->mb.error_bins[0] = cpi->common.MBs;
+
+ /* vp8cx_init_quantizer() is first called here. Add check in
+ * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
+ * called later when needed. This will avoid unnecessary calls of
+ * vp8cx_init_quantizer() for every frame.
+ */
+ vp8cx_init_quantizer(cpi);
+
+ vp8_loop_filter_init(cm);
+
+ cpi->common.error.setjmp = 0;
+
+#if CONFIG_MULTI_RES_ENCODING
+
+ /* Calculate # of MBs in a row in lower-resolution level image. */
+ if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
+
+#endif
+
+ /* setup RD costs to MACROBLOCK struct */
+
+ cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
+ cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
+ cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
+ cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
+
+ cal_mvsadcosts(cpi->mb.mvsadcost);
+
+ cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
+ cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
+ cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
+ cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
+ cpi->mb.token_costs = cpi->rd_costs.token_costs;
+
+ /* setup block ptrs & offsets */
+ vp8_setup_block_ptrs(&cpi->mb);
+ vp8_setup_block_dptrs(&cpi->mb.e_mbd);
+
+ return cpi;
+}
+
+void vp8_remove_compressor(VP8_COMP **comp) {
+ VP8_COMP *cpi = *comp;
+
+ if (!cpi) return;
+
+ if (cpi && (cpi->common.current_video_frame > 0)) {
+#if !CONFIG_REALTIME_ONLY
+
+ if (cpi->pass == 2) {
+ vp8_end_second_pass(cpi);
+ }
+
+#endif
+
+#if CONFIG_INTERNAL_STATS
+
+ if (cpi->pass != 1) {
+ FILE *f = fopen("opsnr.stt", "a");
+ double time_encoded =
+ (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
+ 10000000.000;
+
+ if (cpi->b_calculate_psnr) {
+ if (cpi->oxcf.number_of_layers > 1) {
+ int i;
+
+ fprintf(f,
+ "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
+ "GLPsnrP\tVPXSSIM\n");
+ for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
+ double dr =
+ (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
+ double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
+ cpi->common.Width * cpi->common.Height;
+ double total_psnr =
+ vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
+ double total_psnr2 =
+ vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
+ double total_ssim =
+ 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
+
+ fprintf(f,
+ "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
+ "%7.3f\t%7.3f\n",
+ i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
+ total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
+ total_psnr2, total_ssim);
+ }
+ } else {
+ double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
+ double samples =
+ 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
+ double total_psnr =
+ vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
+ double total_psnr2 =
+ vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
+ double total_ssim =
+ 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
+
+ fprintf(f,
+ "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
+ "GLPsnrP\tVPXSSIM\n");
+ fprintf(f,
+ "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
+ "%7.3f\n",
+ dr, cpi->total / cpi->count, total_psnr,
+ cpi->totalp / cpi->count, total_psnr2, total_ssim);
+ }
+ }
+ fclose(f);
+#if 0
+ f = fopen("qskip.stt", "a");
+ fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
+ fclose(f);
+#endif
+ }
+
+#endif
+
+#ifdef SPEEDSTATS
+
+ if (cpi->compressor_speed == 2) {
+ int i;
+ FILE *f = fopen("cxspeed.stt", "a");
+ cnt_pm /= cpi->common.MBs;
+
+ for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
+
+ fprintf(f, "\n");
+ fclose(f);
+ }
+
+#endif
+
+#ifdef MODE_STATS
+ {
+ extern int count_mb_seg[4];
+ FILE *f = fopen("modes.stt", "a");
+ double dr = (double)cpi->framerate * (double)bytes * (double)8 /
+ (double)count / (double)1000;
+ fprintf(f, "intra_mode in Intra Frames:\n");
+ fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
+ y_modes[2], y_modes[3], y_modes[4]);
+ fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
+ uv_modes[2], uv_modes[3]);
+ fprintf(f, "B: ");
+ {
+ int i;
+
+ for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
+
+ fprintf(f, "\n");
+ }
+
+ fprintf(f, "Modes in Inter Frames:\n");
+ fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
+ inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
+ inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
+ inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
+ inter_y_modes[9]);
+ fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
+ inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
+ fprintf(f, "B: ");
+ {
+ int i;
+
+ for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
+
+ fprintf(f, "\n");
+ }
+ fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
+ count_mb_seg[2], count_mb_seg[3]);
+ fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
+ inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
+ inter_b_modes[NEW4X4]);
+
+ fclose(f);
+ }
+#endif
+
+#if defined(SECTIONBITS_OUTPUT)
+
+ if (0) {
+ int i;
+ FILE *f = fopen("tokenbits.stt", "a");
+
+ for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
+
+ fprintf(f, "\n");
+ fclose(f);
+ }
+
+#endif
+
+#if 0
+ {
+ printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
+ printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
+ printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
+ }
+#endif
+ }
+
+#if CONFIG_MULTITHREAD
+ vp8cx_remove_encoder_threads(cpi);
+#endif
+
+#if CONFIG_TEMPORAL_DENOISING
+ vp8_denoiser_free(&cpi->denoiser);
+#endif
+ dealloc_compressor_data(cpi);
+ vpx_free(cpi->mb.ss);
+ vpx_free(cpi->tok);
+ vpx_free(cpi->skin_map);
+ vpx_free(cpi->cyclic_refresh_map);
+ vpx_free(cpi->consec_zero_last);
+ vpx_free(cpi->consec_zero_last_mvbias);
+
+ vp8_remove_common(&cpi->common);
+ vpx_free(cpi);
+ *comp = 0;
+
+#ifdef OUTPUT_YUV_SRC
+ fclose(yuv_file);
+#endif
+#ifdef OUTPUT_YUV_DENOISED
+ fclose(yuv_denoised_file);
+#endif
+#ifdef OUTPUT_YUV_SKINMAP
+ fclose(yuv_skinmap_file);
+#endif
+
+#if 0
+
+ if (keyfile)
+ fclose(keyfile);
+
+ if (framepsnr)
+ fclose(framepsnr);
+
+ if (kf_list)
+ fclose(kf_list);
+
+#endif
+}
+
+static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
+ unsigned char *recon, int recon_stride,
+ unsigned int cols, unsigned int rows) {
+ unsigned int row, col;
+ uint64_t total_sse = 0;
+ int diff;
+
+ for (row = 0; row + 16 <= rows; row += 16) {
+ for (col = 0; col + 16 <= cols; col += 16) {
+ unsigned int sse;
+
+ vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
+ total_sse += sse;
+ }
+
+ /* Handle odd-sized width */
+ if (col < cols) {
+ unsigned int border_row, border_col;
+ unsigned char *border_orig = orig;
+ unsigned char *border_recon = recon;
+
+ for (border_row = 0; border_row < 16; ++border_row) {
+ for (border_col = col; border_col < cols; ++border_col) {
+ diff = border_orig[border_col] - border_recon[border_col];
+ total_sse += diff * diff;
+ }
+
+ border_orig += orig_stride;
+ border_recon += recon_stride;
+ }
+ }
+
+ orig += orig_stride * 16;
+ recon += recon_stride * 16;
+ }
+
+ /* Handle odd-sized height */
+ for (; row < rows; ++row) {
+ for (col = 0; col < cols; ++col) {
+ diff = orig[col] - recon[col];
+ total_sse += diff * diff;
+ }
+
+ orig += orig_stride;
+ recon += recon_stride;
+ }
+
+ vpx_clear_system_state();
+ return total_sse;
+}
+
+static void generate_psnr_packet(VP8_COMP *cpi) {
+ YV12_BUFFER_CONFIG *orig = cpi->Source;
+ YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ struct vpx_codec_cx_pkt pkt;
+ uint64_t sse;
+ int i;
+ unsigned int width = cpi->common.Width;
+ unsigned int height = cpi->common.Height;
+
+ pkt.kind = VPX_CODEC_PSNR_PKT;
+ sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
+ recon->y_stride, width, height);
+ pkt.data.psnr.sse[0] = sse;
+ pkt.data.psnr.sse[1] = sse;
+ pkt.data.psnr.samples[0] = width * height;
+ pkt.data.psnr.samples[1] = width * height;
+
+ width = (width + 1) / 2;
+ height = (height + 1) / 2;
+
+ sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
+ recon->uv_stride, width, height);
+ pkt.data.psnr.sse[0] += sse;
+ pkt.data.psnr.sse[2] = sse;
+ pkt.data.psnr.samples[0] += width * height;
+ pkt.data.psnr.samples[2] = width * height;
+
+ sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
+ recon->uv_stride, width, height);
+ pkt.data.psnr.sse[0] += sse;
+ pkt.data.psnr.sse[3] = sse;
+ pkt.data.psnr.samples[0] += width * height;
+ pkt.data.psnr.samples[3] = width * height;
+
+ for (i = 0; i < 4; ++i) {
+ pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
+ (double)(pkt.data.psnr.sse[i]));
+ }
+
+ vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
+}
+
+int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
+ if (ref_frame_flags > 7) return -1;
+
+ cpi->ref_frame_flags = ref_frame_flags;
+ return 0;
+}
+int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
+ if (ref_frame_flags > 7) return -1;
+
+ cpi->common.refresh_golden_frame = 0;
+ cpi->common.refresh_alt_ref_frame = 0;
+ cpi->common.refresh_last_frame = 0;
+
+ if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
+
+ if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
+
+ if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
+
+ cpi->ext_refresh_frame_flags_pending = 1;
+ return 0;
+}
+
+int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP8_COMMON *cm = &cpi->common;
+ int ref_fb_idx;
+
+ if (ref_frame_flag == VP8_LAST_FRAME) {
+ ref_fb_idx = cm->lst_fb_idx;
+ } else if (ref_frame_flag == VP8_GOLD_FRAME) {
+ ref_fb_idx = cm->gld_fb_idx;
+ } else if (ref_frame_flag == VP8_ALTR_FRAME) {
+ ref_fb_idx = cm->alt_fb_idx;
+ } else {
+ return -1;
+ }
+
+ vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
+
+ return 0;
+}
+int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP8_COMMON *cm = &cpi->common;
+
+ int ref_fb_idx;
+
+ if (ref_frame_flag == VP8_LAST_FRAME) {
+ ref_fb_idx = cm->lst_fb_idx;
+ } else if (ref_frame_flag == VP8_GOLD_FRAME) {
+ ref_fb_idx = cm->gld_fb_idx;
+ } else if (ref_frame_flag == VP8_ALTR_FRAME) {
+ ref_fb_idx = cm->alt_fb_idx;
+ } else {
+ return -1;
+ }
+
+ vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
+
+ return 0;
+}
+int vp8_update_entropy(VP8_COMP *cpi, int update) {
+ VP8_COMMON *cm = &cpi->common;
+ cm->refresh_entropy_probs = update;
+
+ return 0;
+}
+
+static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ /* are we resizing the image */
+ if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
+#if CONFIG_SPATIAL_RESAMPLING
+ int hr, hs, vr, vs;
+ int tmp_height;
+
+ if (cm->vert_scale == 3) {
+ tmp_height = 9;
+ } else {
+ tmp_height = 11;
+ }
+
+ Scale2Ratio(cm->horiz_scale, &hr, &hs);
+ Scale2Ratio(cm->vert_scale, &vr, &vs);
+
+ vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
+ tmp_height, hs, hr, vs, vr, 0);
+
+ vp8_yv12_extend_frame_borders(&cpi->scaled_source);
+ cpi->Source = &cpi->scaled_source;
+#endif
+ } else {
+ cpi->Source = sd;
+ }
+}
+
+static int resize_key_frame(VP8_COMP *cpi) {
+#if CONFIG_SPATIAL_RESAMPLING
+ VP8_COMMON *cm = &cpi->common;
+
+ /* Do we need to apply resampling for one pass cbr.
+ * In one pass this is more limited than in two pass cbr.
+ * The test and any change is only made once per key frame sequence.
+ */
+ if (cpi->oxcf.allow_spatial_resampling &&
+ (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
+ int hr, hs, vr, vs;
+ int new_width, new_height;
+
+ /* If we are below the resample DOWN watermark then scale down a
+ * notch.
+ */
+ if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
+ cpi->oxcf.optimal_buffer_level / 100)) {
+ cm->horiz_scale =
+ (cm->horiz_scale < VP8E_ONETWO) ? cm->horiz_scale + 1 : VP8E_ONETWO;
+ cm->vert_scale =
+ (cm->vert_scale < VP8E_ONETWO) ? cm->vert_scale + 1 : VP8E_ONETWO;
+ }
+ /* Should we now start scaling back up */
+ else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
+ cpi->oxcf.optimal_buffer_level / 100)) {
+ cm->horiz_scale =
+ (cm->horiz_scale > VP8E_NORMAL) ? cm->horiz_scale - 1 : VP8E_NORMAL;
+ cm->vert_scale =
+ (cm->vert_scale > VP8E_NORMAL) ? cm->vert_scale - 1 : VP8E_NORMAL;
+ }
+
+ /* Get the new height and width */
+ Scale2Ratio(cm->horiz_scale, &hr, &hs);
+ Scale2Ratio(cm->vert_scale, &vr, &vs);
+ new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
+ new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
+
+ /* If the image size has changed we need to reallocate the buffers
+ * and resample the source image
+ */
+ if ((cm->Width != new_width) || (cm->Height != new_height)) {
+ cm->Width = new_width;
+ cm->Height = new_height;
+ vp8_alloc_compressor_data(cpi);
+ scale_and_extend_source(cpi->un_scaled_source, cpi);
+ return 1;
+ }
+ }
+
+#endif
+ return 0;
+}
+
+static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ /* Select an interval before next GF or altref */
+ if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
+
+ if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
+ cpi->current_gf_interval = cpi->frames_till_gf_update_due;
+
+ /* Set the bits per frame that we should try and recover in
+ * subsequent inter frames to account for the extra GF spend...
+ * note that his does not apply for GF updates that occur
+ * coincident with a key frame as the extra cost of key frames is
+ * dealt with elsewhere.
+ */
+ cpi->gf_overspend_bits += cpi->projected_frame_size;
+ cpi->non_gf_bitrate_adjustment =
+ cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
+ }
+
+ /* Update data structure that monitors level of reference to last GF */
+ memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
+ cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
+
+ /* this frame refreshes means next frames don't unless specified by user */
+ cpi->frames_since_golden = 0;
+
+ /* Clear the alternate reference update pending flag. */
+ cpi->source_alt_ref_pending = 0;
+
+ /* Set the alternate reference frame active flag */
+ cpi->source_alt_ref_active = 1;
+}
+static void update_golden_frame_stats(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ /* Update the Golden frame usage counts. */
+ if (cm->refresh_golden_frame) {
+ /* Select an interval before next GF */
+ if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
+
+ if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
+ cpi->current_gf_interval = cpi->frames_till_gf_update_due;
+
+ /* Set the bits per frame that we should try and recover in
+ * subsequent inter frames to account for the extra GF spend...
+ * note that his does not apply for GF updates that occur
+ * coincident with a key frame as the extra cost of key frames
+ * is dealt with elsewhere.
+ */
+ if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
+ /* Calcluate GF bits to be recovered
+ * Projected size - av frame bits available for inter
+ * frames for clip as a whole
+ */
+ cpi->gf_overspend_bits +=
+ (cpi->projected_frame_size - cpi->inter_frame_target);
+ }
+
+ cpi->non_gf_bitrate_adjustment =
+ cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
+ }
+
+ /* Update data structure that monitors level of reference to last GF */
+ memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
+ cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
+
+ /* this frame refreshes means next frames don't unless specified by
+ * user
+ */
+ cm->refresh_golden_frame = 0;
+ cpi->frames_since_golden = 0;
+
+ cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
+ cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
+ cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
+
+ /* ******** Fixed Q test code only ************ */
+ /* If we are going to use the ALT reference for the next group of
+ * frames set a flag to say so.
+ */
+ if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
+ !cpi->common.refresh_alt_ref_frame) {
+ cpi->source_alt_ref_pending = 1;
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ }
+
+ if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
+
+ /* Decrement count down till next gf */
+ if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
+
+ } else if (!cpi->common.refresh_alt_ref_frame) {
+ /* Decrement count down till next gf */
+ if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
+
+ if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
+
+ cpi->frames_since_golden++;
+
+ if (cpi->frames_since_golden > 1) {
+ cpi->recent_ref_frame_usage[INTRA_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
+ cpi->recent_ref_frame_usage[LAST_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
+ cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
+ }
+ }
+}
+
+/* This function updates the reference frame probability estimates that
+ * will be used during mode selection
+ */
+static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
+ const int rf_intra = rfct[INTRA_FRAME];
+ const int rf_inter =
+ rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
+
+ if (cm->frame_type == KEY_FRAME) {
+ cpi->prob_intra_coded = 255;
+ cpi->prob_last_coded = 128;
+ cpi->prob_gf_coded = 128;
+ } else if (!(rf_intra + rf_inter)) {
+ cpi->prob_intra_coded = 63;
+ cpi->prob_last_coded = 128;
+ cpi->prob_gf_coded = 128;
+ }
+
+ /* update reference frame costs since we can do better than what we got
+ * last frame.
+ */
+ if (cpi->oxcf.number_of_layers == 1) {
+ if (cpi->common.refresh_alt_ref_frame) {
+ cpi->prob_intra_coded += 40;
+ if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
+ cpi->prob_last_coded = 200;
+ cpi->prob_gf_coded = 1;
+ } else if (cpi->frames_since_golden == 0) {
+ cpi->prob_last_coded = 214;
+ } else if (cpi->frames_since_golden == 1) {
+ cpi->prob_last_coded = 192;
+ cpi->prob_gf_coded = 220;
+ } else if (cpi->source_alt_ref_active) {
+ cpi->prob_gf_coded -= 20;
+
+ if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
+ }
+ if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
+ }
+}
+
+#if !CONFIG_REALTIME_ONLY
+/* 1 = key, 0 = inter */
+static int decide_key_frame(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ int code_key_frame = 0;
+
+ cpi->kf_boost = 0;
+
+ if (cpi->Speed > 11) return 0;
+
+ /* Clear down mmx registers */
+ vpx_clear_system_state();
+
+ if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
+ double change = 1.0 *
+ abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
+ (1 + cpi->last_intra_error);
+ double change2 =
+ 1.0 *
+ abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
+ (1 + cpi->last_prediction_error);
+ double minerror = cm->MBs * 256;
+
+ cpi->last_intra_error = cpi->mb.intra_error;
+ cpi->last_prediction_error = cpi->mb.prediction_error;
+
+ if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
+ cpi->mb.prediction_error > minerror &&
+ (change > .25 || change2 > .25)) {
+ /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
+ * cpi->last_frame_percent_intra + 3*/
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* If the following are true we might as well code a key frame */
+ if (((cpi->this_frame_percent_intra == 100) &&
+ (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
+ ((cpi->this_frame_percent_intra > 95) &&
+ (cpi->this_frame_percent_intra >=
+ (cpi->last_frame_percent_intra + 5)))) {
+ code_key_frame = 1;
+ }
+ /* in addition if the following are true and this is not a golden frame
+ * then code a key frame Note that on golden frames there often seems
+ * to be a pop in intra useage anyway hence this restriction is
+ * designed to prevent spurious key frames. The Intra pop needs to be
+ * investigated.
+ */
+ else if (((cpi->this_frame_percent_intra > 60) &&
+ (cpi->this_frame_percent_intra >
+ (cpi->last_frame_percent_intra * 2))) ||
+ ((cpi->this_frame_percent_intra > 75) &&
+ (cpi->this_frame_percent_intra >
+ (cpi->last_frame_percent_intra * 3 / 2))) ||
+ ((cpi->this_frame_percent_intra > 90) &&
+ (cpi->this_frame_percent_intra >
+ (cpi->last_frame_percent_intra + 10)))) {
+ if (!cm->refresh_golden_frame) code_key_frame = 1;
+ }
+
+ return code_key_frame;
+}
+
+static void Pass1Encode(VP8_COMP *cpi) {
+ vp8_set_quantizer(cpi, 26);
+ vp8_first_pass(cpi);
+}
+#endif
+
+#if 0
+void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
+{
+
+ /* write the frame */
+ FILE *yframe;
+ int i;
+ char filename[255];
+
+ sprintf(filename, "cx\\y%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->y_height; ++i)
+ fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "cx\\u%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; ++i)
+ fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "cx\\v%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; ++i)
+ fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+}
+#endif
+
+#if !CONFIG_REALTIME_ONLY
+/* Function to test for conditions that indeicate we should loop
+ * back and recode a frame.
+ */
+static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
+ int maxq, int minq) {
+ int force_recode = 0;
+ VP8_COMMON *cm = &cpi->common;
+
+ /* Is frame recode allowed at all
+ * Yes if either recode mode 1 is selected or mode two is selcted
+ * and the frame is a key frame. golden frame or alt_ref_frame
+ */
+ if ((cpi->sf.recode_loop == 1) ||
+ ((cpi->sf.recode_loop == 2) &&
+ ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
+ cm->refresh_alt_ref_frame))) {
+ /* General over and under shoot tests */
+ if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
+ ((cpi->projected_frame_size < low_limit) && (q > minq))) {
+ force_recode = 1;
+ }
+ /* Special Constrained quality tests */
+ else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ /* Undershoot and below auto cq level */
+ if ((q > cpi->cq_target_quality) &&
+ (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
+ force_recode = 1;
+ }
+ /* Severe undershoot and between auto and user cq level */
+ else if ((q > cpi->oxcf.cq_level) &&
+ (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
+ (cpi->active_best_quality > cpi->oxcf.cq_level)) {
+ force_recode = 1;
+ cpi->active_best_quality = cpi->oxcf.cq_level;
+ }
+ }
+ }
+
+ return force_recode;
+}
+#endif // !CONFIG_REALTIME_ONLY
+
+static void update_reference_frames(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+ YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
+
+ /* At this point the new frame has been encoded.
+ * If any buffer copy / swapping is signaled it should be done here.
+ */
+
+ if (cm->frame_type == KEY_FRAME) {
+ yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
+
+ yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
+ yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
+
+ cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
+
+ cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
+ cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
+ } else {
+ if (cm->refresh_alt_ref_frame) {
+ assert(!cm->copy_buffer_to_arf);
+
+ cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
+ cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
+ cm->alt_fb_idx = cm->new_fb_idx;
+
+ cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
+ } else if (cm->copy_buffer_to_arf) {
+ assert(!(cm->copy_buffer_to_arf & ~0x3));
+
+ if (cm->copy_buffer_to_arf == 1) {
+ if (cm->alt_fb_idx != cm->lst_fb_idx) {
+ yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
+ yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
+ cm->alt_fb_idx = cm->lst_fb_idx;
+
+ cpi->current_ref_frames[ALTREF_FRAME] =
+ cpi->current_ref_frames[LAST_FRAME];
+ }
+ } else {
+ if (cm->alt_fb_idx != cm->gld_fb_idx) {
+ yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
+ yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
+ cm->alt_fb_idx = cm->gld_fb_idx;
+
+ cpi->current_ref_frames[ALTREF_FRAME] =
+ cpi->current_ref_frames[GOLDEN_FRAME];
+ }
+ }
+ }
+
+ if (cm->refresh_golden_frame) {
+ assert(!cm->copy_buffer_to_gf);
+
+ cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
+ cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
+ cm->gld_fb_idx = cm->new_fb_idx;
+
+ cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
+ } else if (cm->copy_buffer_to_gf) {
+ assert(!(cm->copy_buffer_to_arf & ~0x3));
+
+ if (cm->copy_buffer_to_gf == 1) {
+ if (cm->gld_fb_idx != cm->lst_fb_idx) {
+ yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
+ yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
+ cm->gld_fb_idx = cm->lst_fb_idx;
+
+ cpi->current_ref_frames[GOLDEN_FRAME] =
+ cpi->current_ref_frames[LAST_FRAME];
+ }
+ } else {
+ if (cm->alt_fb_idx != cm->gld_fb_idx) {
+ yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
+ yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
+ cm->gld_fb_idx = cm->alt_fb_idx;
+
+ cpi->current_ref_frames[GOLDEN_FRAME] =
+ cpi->current_ref_frames[ALTREF_FRAME];
+ }
+ }
+ }
+ }
+
+ if (cm->refresh_last_frame) {
+ cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
+ cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
+ cm->lst_fb_idx = cm->new_fb_idx;
+
+ cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
+ }
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ /* we shouldn't have to keep multiple copies as we know in advance which
+ * buffer we should start - for now to get something up and running
+ * I've chosen to copy the buffers
+ */
+ if (cm->frame_type == KEY_FRAME) {
+ int i;
+ for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
+ vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
+ } else {
+ vp8_yv12_extend_frame_borders(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
+
+ if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
+ vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
+ }
+ if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
+ vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
+ }
+ if (cm->refresh_last_frame) {
+ vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
+ }
+ }
+ if (cpi->oxcf.noise_sensitivity == 4)
+ vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
+ }
+#endif
+}
+
+static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest,
+ VP8_COMP *cpi) {
+ int i, j;
+ int Total = 0;
+ int num_blocks = 0;
+ int skip = 2;
+ int min_consec_zero_last = 10;
+ int tot_num_blocks = (source->y_height * source->y_width) >> 8;
+ unsigned char *src = source->y_buffer;
+ unsigned char *dst = dest->y_buffer;
+
+ /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
+ * summing the square differences, and only for blocks that have been
+ * zero_last mode at least |x| frames in a row.
+ */
+ for (i = 0; i < source->y_height; i += 16 * skip) {
+ int block_index_row = (i >> 4) * cpi->common.mb_cols;
+ for (j = 0; j < source->y_width; j += 16 * skip) {
+ int index = block_index_row + (j >> 4);
+ if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
+ unsigned int sse;
+ Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
+ dest->y_stride, &sse);
+ num_blocks++;
+ }
+ }
+ src += 16 * skip * source->y_stride;
+ dst += 16 * skip * dest->y_stride;
+ }
+ // Only return non-zero if we have at least ~1/16 samples for estimate.
+ if (num_blocks > (tot_num_blocks >> 4)) {
+ assert(num_blocks != 0);
+ return (Total / num_blocks);
+ } else {
+ return 0;
+ }
+}
+
+#if CONFIG_TEMPORAL_DENOISING
+static void process_denoiser_mode_change(VP8_COMP *cpi) {
+ const VP8_COMMON *const cm = &cpi->common;
+ int i, j;
+ int total = 0;
+ int num_blocks = 0;
+ // Number of blocks skipped along row/column in computing the
+ // nmse (normalized mean square error) of source.
+ int skip = 2;
+ // Only select blocks for computing nmse that have been encoded
+ // as ZERO LAST min_consec_zero_last frames in a row.
+ // Scale with number of temporal layers.
+ int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
+ // Decision is tested for changing the denoising mode every
+ // num_mode_change times this function is called. Note that this
+ // function called every 8 frames, so (8 * num_mode_change) is number
+ // of frames where denoising mode change is tested for switch.
+ int num_mode_change = 20;
+ // Framerate factor, to compensate for larger mse at lower framerates.
+ // Use ref_framerate, which is full source framerate for temporal layers.
+ // TODO(marpan): Adjust this factor.
+ int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
+ int tot_num_blocks = cm->mb_rows * cm->mb_cols;
+ int ystride = cpi->Source->y_stride;
+ unsigned char *src = cpi->Source->y_buffer;
+ unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
+ static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128 };
+ int bandwidth = (int)(cpi->target_bandwidth);
+ // For temporal layers, use full bandwidth (top layer).
+ if (cpi->oxcf.number_of_layers > 1) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
+ bandwidth = (int)(lc->target_bandwidth);
+ }
+ // Loop through the Y plane, every skip blocks along rows and columns,
+ // summing the normalized mean square error, only for blocks that have
+ // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
+ // a row and have small sum difference between current and previous frame.
+ // Normalization here is by the contrast of the current frame block.
+ for (i = 0; i < cm->Height; i += 16 * skip) {
+ int block_index_row = (i >> 4) * cm->mb_cols;
+ for (j = 0; j < cm->Width; j += 16 * skip) {
+ int index = block_index_row + (j >> 4);
+ if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
+ unsigned int sse;
+ const unsigned int var =
+ vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
+ // Only consider this block as valid for noise measurement
+ // if the sum_diff average of the current and previous frame
+ // is small (to avoid effects from lighting change).
+ if ((sse - var) < 128) {
+ unsigned int sse2;
+ const unsigned int act =
+ vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
+ if (act > 0) total += sse / act;
+ num_blocks++;
+ }
+ }
+ }
+ src += 16 * skip * ystride;
+ dst += 16 * skip * ystride;
+ }
+ total = total * fac_framerate / 100;
+
+ // Only consider this frame as valid sample if we have computed nmse over
+ // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
+ // application inputs duplicate frames, or contrast is all zero).
+ if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
+ // Update the recursive mean square source_diff.
+ total = (total << 8) / num_blocks;
+ if (cpi->denoiser.nmse_source_diff_count == 0) {
+ // First sample in new interval.
+ cpi->denoiser.nmse_source_diff = total;
+ cpi->denoiser.qp_avg = cm->base_qindex;
+ } else {
+ // For subsequent samples, use average with weight ~1/4 for new sample.
+ cpi->denoiser.nmse_source_diff =
+ (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
+ cpi->denoiser.qp_avg =
+ (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
+ }
+ cpi->denoiser.nmse_source_diff_count++;
+ }
+ // Check for changing the denoiser mode, when we have obtained #samples =
+ // num_mode_change. Condition the change also on the bitrate and QP.
+ if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
+ // Check for going up: from normal to aggressive mode.
+ if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
+ (cpi->denoiser.nmse_source_diff >
+ cpi->denoiser.threshold_aggressive_mode) &&
+ (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
+ bandwidth > cpi->denoiser.bitrate_threshold)) {
+ vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
+ } else {
+ // Check for going down: from aggressive to normal mode.
+ if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
+ (cpi->denoiser.nmse_source_diff <
+ cpi->denoiser.threshold_aggressive_mode)) ||
+ ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
+ (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
+ bandwidth < cpi->denoiser.bitrate_threshold))) {
+ vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
+ }
+ }
+ // Reset metric and counter for next interval.
+ cpi->denoiser.nmse_source_diff = 0;
+ cpi->denoiser.qp_avg = 0;
+ cpi->denoiser.nmse_source_diff_count = 0;
+ }
+}
+#endif
+
+void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
+ const FRAME_TYPE frame_type = cm->frame_type;
+
+ int update_any_ref_buffers = 1;
+ if (cpi->common.refresh_last_frame == 0 &&
+ cpi->common.refresh_golden_frame == 0 &&
+ cpi->common.refresh_alt_ref_frame == 0) {
+ update_any_ref_buffers = 0;
+ }
+
+ if (cm->no_lpf) {
+ cm->filter_level = 0;
+ } else {
+ struct vpx_usec_timer timer;
+
+ vpx_clear_system_state();
+
+ vpx_usec_timer_start(&timer);
+ if (cpi->sf.auto_filter == 0) {
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
+ // Use the denoised buffer for selecting base loop filter level.
+ // Denoised signal for current frame is stored in INTRA_FRAME.
+ // No denoising on key frames.
+ vp8cx_pick_filter_level_fast(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
+ } else {
+ vp8cx_pick_filter_level_fast(cpi->Source, cpi);
+ }
+#else
+ vp8cx_pick_filter_level_fast(cpi->Source, cpi);
+#endif
+ } else {
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
+ // Use the denoised buffer for selecting base loop filter level.
+ // Denoised signal for current frame is stored in INTRA_FRAME.
+ // No denoising on key frames.
+ vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ cpi);
+ } else {
+ vp8cx_pick_filter_level(cpi->Source, cpi);
+ }
+#else
+ vp8cx_pick_filter_level(cpi->Source, cpi);
+#endif
+ }
+
+ if (cm->filter_level > 0) {
+ vp8cx_set_alt_lf_level(cpi, cm->filter_level);
+ }
+
+ vpx_usec_timer_mark(&timer);
+ cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
+ }
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
+ sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
+ }
+#endif
+
+ // No need to apply loop-filter if the encoded frame does not update
+ // any reference buffers.
+ if (cm->filter_level > 0 && update_any_ref_buffers) {
+ vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
+ }
+
+ vp8_yv12_extend_frame_borders(cm->frame_to_show);
+}
+
+static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
+ unsigned char *dest,
+ unsigned char *dest_end,
+ unsigned int *frame_flags) {
+ int Q;
+ int frame_over_shoot_limit;
+ int frame_under_shoot_limit;
+
+ int Loop = 0;
+
+ VP8_COMMON *cm = &cpi->common;
+ int active_worst_qchanged = 0;
+
+#if !CONFIG_REALTIME_ONLY
+ int q_low;
+ int q_high;
+ int zbin_oq_high;
+ int zbin_oq_low = 0;
+ int top_index;
+ int bottom_index;
+ int overshoot_seen = 0;
+ int undershoot_seen = 0;
+#endif
+
+ int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
+ cpi->oxcf.optimal_buffer_level / 100);
+ int drop_mark75 = drop_mark * 2 / 3;
+ int drop_mark50 = drop_mark / 4;
+ int drop_mark25 = drop_mark / 8;
+
+ /* Clear down mmx registers to allow floating point in what follows */
+ vpx_clear_system_state();
+
+ if (cpi->force_next_frame_intra) {
+ cm->frame_type = KEY_FRAME; /* delayed intra frame */
+ cpi->force_next_frame_intra = 0;
+ }
+
+ /* For an alt ref frame in 2 pass we skip the call to the second pass
+ * function that sets the target bandwidth
+ */
+ switch (cpi->pass) {
+#if !CONFIG_REALTIME_ONLY
+ case 2:
+ if (cpi->common.refresh_alt_ref_frame) {
+ /* Per frame bit target for the alt ref frame */
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
+ /* per second target bitrate */
+ cpi->target_bandwidth =
+ (int)(cpi->twopass.gf_bits * cpi->output_framerate);
+ }
+ break;
+#endif // !CONFIG_REALTIME_ONLY
+ default:
+ cpi->per_frame_bandwidth =
+ (int)round(cpi->target_bandwidth / cpi->output_framerate);
+ break;
+ }
+
+ /* Default turn off buffer to buffer copying */
+ cm->copy_buffer_to_gf = 0;
+ cm->copy_buffer_to_arf = 0;
+
+ /* Clear zbin over-quant value and mode boost values. */
+ cpi->mb.zbin_over_quant = 0;
+ cpi->mb.zbin_mode_boost = 0;
+
+ /* Enable or disable mode based tweaking of the zbin
+ * For 2 Pass Only used where GF/ARF prediction quality
+ * is above a threshold
+ */
+ cpi->mb.zbin_mode_boost_enabled = 1;
+ if (cpi->pass == 2) {
+ if (cpi->gfu_boost <= 400) {
+ cpi->mb.zbin_mode_boost_enabled = 0;
+ }
+ }
+
+ /* Current default encoder behaviour for the altref sign bias */
+ if (cpi->source_alt_ref_active) {
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
+ } else {
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
+ }
+
+ /* Check to see if a key frame is signaled
+ * For two pass with auto key frame enabled cm->frame_type may already
+ * be set, but not for one pass.
+ */
+ if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
+ (cpi->oxcf.auto_key &&
+ (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
+ /* Key frame from VFW/auto-keyframe/first frame */
+ cm->frame_type = KEY_FRAME;
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity == 4) {
+ // For adaptive mode, reset denoiser to normal mode on key frame.
+ vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
+ }
+#endif
+ }
+
+#if CONFIG_MULTI_RES_ENCODING
+ if (cpi->oxcf.mr_total_resolutions > 1) {
+ LOWER_RES_FRAME_INFO *low_res_frame_info =
+ (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
+
+ if (cpi->oxcf.mr_encoder_id) {
+ // Check if lower resolution is available for motion vector reuse.
+ if (cm->frame_type != KEY_FRAME) {
+ cpi->mr_low_res_mv_avail = 1;
+ cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
+
+ if (cpi->ref_frame_flags & VP8_LAST_FRAME)
+ cpi->mr_low_res_mv_avail &=
+ (cpi->current_ref_frames[LAST_FRAME] ==
+ low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
+
+ if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
+ cpi->mr_low_res_mv_avail &=
+ (cpi->current_ref_frames[GOLDEN_FRAME] ==
+ low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
+
+ // Don't use altref to determine whether low res is available.
+ // TODO (marpan): Should we make this type of condition on a
+ // per-reference frame basis?
+ /*
+ if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
+ cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
+ == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
+ */
+ }
+ // Disable motion vector reuse (i.e., disable any usage of the low_res)
+ // if the previous lower stream is skipped/disabled.
+ if (low_res_frame_info->skip_encoding_prev_stream) {
+ cpi->mr_low_res_mv_avail = 0;
+ }
+ }
+ // This stream is not skipped (i.e., it's being encoded), so set this skip
+ // flag to 0. This is needed for the next stream (i.e., which is the next
+ // frame to be encoded).
+ low_res_frame_info->skip_encoding_prev_stream = 0;
+
+ // On a key frame: For the lowest resolution, keep track of the key frame
+ // counter value. For the higher resolutions, reset the current video
+ // frame counter to that of the lowest resolution.
+ // This is done to the handle the case where we may stop/start encoding
+ // higher layer(s). The restart-encoding of higher layer is only signaled
+ // by a key frame for now.
+ // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
+ if (cm->frame_type == KEY_FRAME) {
+ if (cpi->oxcf.mr_encoder_id) {
+ // If the initial starting value of the buffer level is zero (this can
+ // happen because we may have not started encoding this higher stream),
+ // then reset it to non-zero value based on |starting_buffer_level|.
+ if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
+ unsigned int i;
+ cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
+ cpi->buffer_level = cpi->oxcf.starting_buffer_level;
+ for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ lc->bits_off_target = lc->starting_buffer_level;
+ lc->buffer_level = lc->starting_buffer_level;
+ }
+ }
+ cpi->common.current_video_frame =
+ low_res_frame_info->key_frame_counter_value;
+ } else {
+ low_res_frame_info->key_frame_counter_value =
+ cpi->common.current_video_frame;
+ }
+ }
+ }
+#endif
+
+ // Find the reference frame closest to the current frame.
+ cpi->closest_reference_frame = LAST_FRAME;
+ if (cm->frame_type != KEY_FRAME) {
+ int i;
+ MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
+ if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
+ closest_ref = LAST_FRAME;
+ } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
+ closest_ref = GOLDEN_FRAME;
+ } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
+ closest_ref = ALTREF_FRAME;
+ }
+ for (i = 1; i <= 3; ++i) {
+ vpx_ref_frame_type_t ref_frame_type =
+ (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
+ if (cpi->ref_frame_flags & ref_frame_type) {
+ if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
+ (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
+ closest_ref = i;
+ }
+ }
+ }
+ cpi->closest_reference_frame = closest_ref;
+ }
+
+ /* Set various flags etc to special state if it is a key frame */
+ if (cm->frame_type == KEY_FRAME) {
+ int i;
+
+ // Set the loop filter deltas and segmentation map update
+ setup_features(cpi);
+
+ /* The alternate reference frame cannot be active for a key frame */
+ cpi->source_alt_ref_active = 0;
+
+ /* Reset the RD threshold multipliers to default of * 1 (128) */
+ for (i = 0; i < MAX_MODES; ++i) {
+ cpi->mb.rd_thresh_mult[i] = 128;
+ }
+
+ // Reset the zero_last counter to 0 on key frame.
+ memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
+ memset(cpi->consec_zero_last_mvbias, 0,
+ (cpi->common.mb_rows * cpi->common.mb_cols));
+ }
+
+#if 0
+ /* Experimental code for lagged compress and one pass
+ * Initialise one_pass GF frames stats
+ * Update stats used for GF selection
+ */
+ {
+ cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
+
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
+ }
+#endif
+
+ update_rd_ref_frame_probs(cpi);
+
+ if (cpi->drop_frames_allowed) {
+ /* The reset to decimation 0 is only done here for one pass.
+ * Once it is set two pass leaves decimation on till the next kf.
+ */
+ if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
+ cpi->decimation_factor--;
+ }
+
+ if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
+ cpi->decimation_factor = 1;
+
+ } else if (cpi->buffer_level < drop_mark25 &&
+ (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
+ cpi->decimation_factor = 3;
+ } else if (cpi->buffer_level < drop_mark50 &&
+ (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
+ cpi->decimation_factor = 2;
+ } else if (cpi->buffer_level < drop_mark75 &&
+ (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
+ cpi->decimation_factor = 1;
+ }
+ }
+
+ /* The following decimates the frame rate according to a regular
+ * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
+ * prevent buffer under-run in CBR mode. Alternatively it might be
+ * desirable in some situations to drop frame rate but throw more bits
+ * at each frame.
+ *
+ * Note that dropping a key frame can be problematic if spatial
+ * resampling is also active
+ */
+ if (cpi->decimation_factor > 0 && cpi->drop_frames_allowed) {
+ switch (cpi->decimation_factor) {
+ case 1:
+ cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
+ break;
+ case 2:
+ cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
+ break;
+ case 3:
+ cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
+ break;
+ }
+
+ /* Note that we should not throw out a key frame (especially when
+ * spatial resampling is enabled).
+ */
+ if (cm->frame_type == KEY_FRAME) {
+ cpi->decimation_count = cpi->decimation_factor;
+ } else if (cpi->decimation_count > 0) {
+ cpi->decimation_count--;
+
+ cpi->bits_off_target += cpi->av_per_frame_bandwidth;
+ if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
+ cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
+ }
+
+#if CONFIG_MULTI_RES_ENCODING
+ vp8_store_drop_frame_info(cpi);
+#endif
+
+ cm->current_video_frame++;
+ cpi->frames_since_key++;
+ cpi->ext_refresh_frame_flags_pending = 0;
+ // We advance the temporal pattern for dropped frames.
+ cpi->temporal_pattern_counter++;
+
+#if CONFIG_INTERNAL_STATS
+ cpi->count++;
+#endif
+
+ cpi->buffer_level = cpi->bits_off_target;
+
+ if (cpi->oxcf.number_of_layers > 1) {
+ unsigned int i;
+
+ /* Propagate bits saved by dropping the frame to higher
+ * layers
+ */
+ for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
+ if (lc->bits_off_target > lc->maximum_buffer_size) {
+ lc->bits_off_target = lc->maximum_buffer_size;
+ }
+ lc->buffer_level = lc->bits_off_target;
+ }
+ }
+
+ return;
+ } else {
+ cpi->decimation_count = cpi->decimation_factor;
+ }
+ } else {
+ cpi->decimation_count = 0;
+ }
+
+ /* Decide how big to make the frame */
+ if (!vp8_pick_frame_size(cpi)) {
+/*TODO: 2 drop_frame and return code could be put together. */
+#if CONFIG_MULTI_RES_ENCODING
+ vp8_store_drop_frame_info(cpi);
+#endif
+ cm->current_video_frame++;
+ cpi->frames_since_key++;
+ cpi->ext_refresh_frame_flags_pending = 0;
+ // We advance the temporal pattern for dropped frames.
+ cpi->temporal_pattern_counter++;
+ return;
+ }
+
+ /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
+ * This has a knock on effect on active best quality as well.
+ * For CBR if the buffer reaches its maximum level then we can no longer
+ * save up bits for later frames so we might as well use them up
+ * on the current frame.
+ */
+ if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
+ (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
+ cpi->buffered_mode) {
+ /* Max adjustment is 1/4 */
+ int Adjustment = cpi->active_worst_quality / 4;
+
+ if (Adjustment) {
+ int buff_lvl_step;
+
+ if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
+ buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
+ cpi->oxcf.optimal_buffer_level) /
+ Adjustment);
+
+ if (buff_lvl_step) {
+ Adjustment =
+ (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
+ buff_lvl_step);
+ } else {
+ Adjustment = 0;
+ }
+ }
+
+ cpi->active_worst_quality -= Adjustment;
+
+ if (cpi->active_worst_quality < cpi->active_best_quality) {
+ cpi->active_worst_quality = cpi->active_best_quality;
+ }
+ }
+ }
+
+ /* Set an active best quality and if necessary active worst quality
+ * There is some odd behavior for one pass here that needs attention.
+ */
+ if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
+ vpx_clear_system_state();
+
+ Q = cpi->active_worst_quality;
+
+ if (cm->frame_type == KEY_FRAME) {
+ if (cpi->pass == 2) {
+ if (cpi->gfu_boost > 600) {
+ cpi->active_best_quality = kf_low_motion_minq[Q];
+ } else {
+ cpi->active_best_quality = kf_high_motion_minq[Q];
+ }
+
+ /* Special case for key frames forced because we have reached
+ * the maximum key frame interval. Here force the Q to a range
+ * based on the ambient Q to reduce the risk of popping
+ */
+ if (cpi->this_key_frame_forced) {
+ if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
+ cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
+ } else if (cpi->active_best_quality < (cpi->avg_frame_qindex >> 2)) {
+ cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
+ }
+ }
+ }
+ /* One pass more conservative */
+ else {
+ cpi->active_best_quality = kf_high_motion_minq[Q];
+ }
+ }
+
+ else if (cpi->oxcf.number_of_layers == 1 &&
+ (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
+ /* Use the lower of cpi->active_worst_quality and recent
+ * average Q as basis for GF/ARF Q limit unless last frame was
+ * a key frame.
+ */
+ if ((cpi->frames_since_key > 1) &&
+ (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
+ Q = cpi->avg_frame_qindex;
+ }
+
+ /* For constrained quality dont allow Q less than the cq level */
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (Q < cpi->cq_target_quality)) {
+ Q = cpi->cq_target_quality;
+ }
+
+ if (cpi->pass == 2) {
+ if (cpi->gfu_boost > 1000) {
+ cpi->active_best_quality = gf_low_motion_minq[Q];
+ } else if (cpi->gfu_boost < 400) {
+ cpi->active_best_quality = gf_high_motion_minq[Q];
+ } else {
+ cpi->active_best_quality = gf_mid_motion_minq[Q];
+ }
+
+ /* Constrained quality use slightly lower active best. */
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
+ }
+ }
+ /* One pass more conservative */
+ else {
+ cpi->active_best_quality = gf_high_motion_minq[Q];
+ }
+ } else {
+ cpi->active_best_quality = inter_minq[Q];
+
+ /* For the constant/constrained quality mode we dont want
+ * q to fall below the cq level.
+ */
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (cpi->active_best_quality < cpi->cq_target_quality)) {
+ /* If we are strongly undershooting the target rate in the last
+ * frames then use the user passed in cq value not the auto
+ * cq value.
+ */
+ if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
+ cpi->active_best_quality = cpi->oxcf.cq_level;
+ } else {
+ cpi->active_best_quality = cpi->cq_target_quality;
+ }
+ }
+ }
+
+ /* If CBR and the buffer is as full then it is reasonable to allow
+ * higher quality on the frames to prevent bits just going to waste.
+ */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ /* Note that the use of >= here elliminates the risk of a devide
+ * by 0 error in the else if clause
+ */
+ if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
+ cpi->active_best_quality = cpi->best_quality;
+
+ } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
+ int Fraction =
+ (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
+ (cpi->oxcf.maximum_buffer_size -
+ cpi->oxcf.optimal_buffer_level));
+ int min_qadjustment =
+ ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
+
+ cpi->active_best_quality -= min_qadjustment;
+ }
+ }
+ }
+ /* Make sure constrained quality mode limits are adhered to for the first
+ * few frames of one pass encodes
+ */
+ else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
+ cpi->common.refresh_alt_ref_frame) {
+ cpi->active_best_quality = cpi->best_quality;
+ } else if (cpi->active_best_quality < cpi->cq_target_quality) {
+ cpi->active_best_quality = cpi->cq_target_quality;
+ }
+ }
+
+ /* Clip the active best and worst quality values to limits */
+ if (cpi->active_worst_quality > cpi->worst_quality) {
+ cpi->active_worst_quality = cpi->worst_quality;
+ }
+
+ if (cpi->active_best_quality < cpi->best_quality) {
+ cpi->active_best_quality = cpi->best_quality;
+ }
+
+ if (cpi->active_worst_quality < cpi->active_best_quality) {
+ cpi->active_worst_quality = cpi->active_best_quality;
+ }
+
+ /* Determine initial Q to try */
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+
+#if !CONFIG_REALTIME_ONLY
+
+ /* Set highest allowed value for Zbin over quant */
+ if (cm->frame_type == KEY_FRAME) {
+ zbin_oq_high = 0;
+ } else if ((cpi->oxcf.number_of_layers == 1) &&
+ ((cm->refresh_alt_ref_frame ||
+ (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
+ zbin_oq_high = 16;
+ } else {
+ zbin_oq_high = ZBIN_OQ_MAX;
+ }
+#endif
+
+ compute_skin_map(cpi);
+
+ /* Setup background Q adjustment for error resilient mode.
+ * For multi-layer encodes only enable this for the base layer.
+ */
+ if (cpi->cyclic_refresh_mode_enabled) {
+ // Special case for screen_content_mode with golden frame updates.
+ int disable_cr_gf =
+ (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
+ if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
+ cyclic_background_refresh(cpi, Q, 0);
+ } else {
+ disable_segmentation(cpi);
+ }
+ }
+
+ vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
+ &frame_over_shoot_limit);
+
+#if !CONFIG_REALTIME_ONLY
+ /* Limit Q range for the adaptive loop. */
+ bottom_index = cpi->active_best_quality;
+ top_index = cpi->active_worst_quality;
+ q_low = cpi->active_best_quality;
+ q_high = cpi->active_worst_quality;
+#endif
+
+ vp8_save_coding_context(cpi);
+
+ scale_and_extend_source(cpi->un_scaled_source, cpi);
+
+#if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
+ // Option to apply spatial blur under the aggressive or adaptive
+ // (temporal denoising) mode.
+ if (cpi->oxcf.noise_sensitivity >= 3) {
+ if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
+ vp8_de_noise(cm, cpi->Source, cpi->denoiser.denoise_pars.spatial_blur, 1);
+ }
+ }
+#endif
+
+#if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
+
+ if (cpi->oxcf.noise_sensitivity > 0) {
+ unsigned char *src;
+ int l = 0;
+
+ switch (cpi->oxcf.noise_sensitivity) {
+ case 1: l = 20; break;
+ case 2: l = 40; break;
+ case 3: l = 60; break;
+ case 4: l = 80; break;
+ case 5: l = 100; break;
+ case 6: l = 150; break;
+ }
+
+ if (cm->frame_type == KEY_FRAME) {
+ vp8_de_noise(cm, cpi->Source, l, 1);
+ } else {
+ vp8_de_noise(cm, cpi->Source, l, 1);
+
+ src = cpi->Source->y_buffer;
+
+ if (cpi->Source->y_stride < 0) {
+ src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
+ }
+ }
+ }
+
+#endif
+
+#ifdef OUTPUT_YUV_SRC
+ vpx_write_yuv_frame(yuv_file, cpi->Source);
+#endif
+
+ do {
+ vpx_clear_system_state();
+
+ vp8_set_quantizer(cpi, Q);
+
+ /* setup skip prob for costing in mode/mv decision */
+ if (cpi->common.mb_no_coeff_skip) {
+ cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
+
+ if (cm->frame_type != KEY_FRAME) {
+ if (cpi->common.refresh_alt_ref_frame) {
+ if (cpi->last_skip_false_probs[2] != 0) {
+ cpi->prob_skip_false = cpi->last_skip_false_probs[2];
+ }
+
+ /*
+ if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
+ cpi->last_skip_probs_q[2])<=16 )
+ cpi->prob_skip_false = cpi->last_skip_false_probs[2];
+ else if (cpi->last_skip_false_probs[2]!=0)
+ cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
+ cpi->prob_skip_false ) / 2;
+ */
+ } else if (cpi->common.refresh_golden_frame) {
+ if (cpi->last_skip_false_probs[1] != 0) {
+ cpi->prob_skip_false = cpi->last_skip_false_probs[1];
+ }
+
+ /*
+ if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
+ cpi->last_skip_probs_q[1])<=16 )
+ cpi->prob_skip_false = cpi->last_skip_false_probs[1];
+ else if (cpi->last_skip_false_probs[1]!=0)
+ cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
+ cpi->prob_skip_false ) / 2;
+ */
+ } else {
+ if (cpi->last_skip_false_probs[0] != 0) {
+ cpi->prob_skip_false = cpi->last_skip_false_probs[0];
+ }
+
+ /*
+ if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
+ cpi->last_skip_probs_q[0])<=16 )
+ cpi->prob_skip_false = cpi->last_skip_false_probs[0];
+ else if(cpi->last_skip_false_probs[0]!=0)
+ cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
+ cpi->prob_skip_false ) / 2;
+ */
+ }
+
+ /* as this is for cost estimate, let's make sure it does not
+ * go extreme eitehr way
+ */
+ if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
+
+ if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
+
+ if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
+ cpi->prob_skip_false = 1;
+ }
+ }
+
+#if 0
+
+ if (cpi->pass != 1)
+ {
+ FILE *f = fopen("skip.stt", "a");
+ fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
+ fclose(f);
+ }
+
+#endif
+ }
+
+ if (cm->frame_type == KEY_FRAME) {
+ if (resize_key_frame(cpi)) {
+ /* If the frame size has changed, need to reset Q, quantizer,
+ * and background refresh.
+ */
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+ if (cpi->cyclic_refresh_mode_enabled) {
+ if (cpi->current_layer == 0) {
+ cyclic_background_refresh(cpi, Q, 0);
+ } else {
+ disable_segmentation(cpi);
+ }
+ }
+ // Reset the zero_last counter to 0 on key frame.
+ memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
+ memset(cpi->consec_zero_last_mvbias, 0,
+ (cpi->common.mb_rows * cpi->common.mb_cols));
+ vp8_set_quantizer(cpi, Q);
+ }
+
+ vp8_setup_key_frame(cpi);
+ }
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ {
+ if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
+
+ if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
+ if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
+ }
+
+ if (cm->refresh_entropy_probs == 0) {
+ /* save a copy for later refresh */
+ memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
+ }
+
+ vp8_update_coef_context(cpi);
+
+ vp8_update_coef_probs(cpi);
+
+ /* transform / motion compensation build reconstruction frame
+ * +pack coef partitions
+ */
+ vp8_encode_frame(cpi);
+
+ /* cpi->projected_frame_size is not needed for RT mode */
+ }
+#else
+ /* transform / motion compensation build reconstruction frame */
+ vp8_encode_frame(cpi);
+
+ if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ if (vp8_drop_encodedframe_overshoot(cpi, Q)) {
+ vpx_clear_system_state();
+ return;
+ }
+ if (cm->frame_type != KEY_FRAME)
+ cpi->last_pred_err_mb =
+ (int)(cpi->mb.prediction_error / cpi->common.MBs);
+ }
+
+ cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
+ cpi->projected_frame_size =
+ (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
+#endif
+ vpx_clear_system_state();
+
+ /* Test to see if the stats generated for this frame indicate that
+ * we should have coded a key frame (assuming that we didn't)!
+ */
+
+ if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
+ cpi->compressor_speed != 2) {
+#if !CONFIG_REALTIME_ONLY
+ if (decide_key_frame(cpi)) {
+ /* Reset all our sizing numbers and recode */
+ cm->frame_type = KEY_FRAME;
+
+ vp8_pick_frame_size(cpi);
+
+ /* Clear the Alt reference frame active flag when we have
+ * a key frame
+ */
+ cpi->source_alt_ref_active = 0;
+
+ // Set the loop filter deltas and segmentation map update
+ setup_features(cpi);
+
+ vp8_restore_coding_context(cpi);
+
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+
+ vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
+ &frame_over_shoot_limit);
+
+ /* Limit Q range for the adaptive loop. */
+ bottom_index = cpi->active_best_quality;
+ top_index = cpi->active_worst_quality;
+ q_low = cpi->active_best_quality;
+ q_high = cpi->active_worst_quality;
+
+ Loop = 1;
+
+ continue;
+ }
+#endif
+ }
+
+ vpx_clear_system_state();
+
+ if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
+
+ /* Are we are overshooting and up against the limit of active max Q. */
+ if (!cpi->rt_always_update_correction_factor &&
+ ((cpi->pass != 2) ||
+ (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
+ (Q == cpi->active_worst_quality) &&
+ (cpi->active_worst_quality < cpi->worst_quality) &&
+ (cpi->projected_frame_size > frame_over_shoot_limit)) {
+ int over_size_percent =
+ ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
+ frame_over_shoot_limit;
+
+ /* If so is there any scope for relaxing it */
+ while ((cpi->active_worst_quality < cpi->worst_quality) &&
+ (over_size_percent > 0)) {
+ cpi->active_worst_quality++;
+ /* Assume 1 qstep = about 4% on frame size. */
+ over_size_percent = (int)(over_size_percent * 0.96);
+ }
+#if !CONFIG_REALTIME_ONLY
+ top_index = cpi->active_worst_quality;
+#endif // !CONFIG_REALTIME_ONLY
+ /* If we have updated the active max Q do not call
+ * vp8_update_rate_correction_factors() this loop.
+ */
+ active_worst_qchanged = 1;
+ } else {
+ active_worst_qchanged = 0;
+ }
+
+#if CONFIG_REALTIME_ONLY
+ Loop = 0;
+#else
+ /* Special case handling for forced key frames */
+ if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+ int last_q = Q;
+ int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
+
+ /* The key frame is not good enough */
+ if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
+ /* Lower q_high */
+ q_high = (Q > q_low) ? (Q - 1) : q_low;
+
+ /* Adjust Q */
+ Q = (q_high + q_low) >> 1;
+ }
+ /* The key frame is much better than the previous frame */
+ else if (kf_err < (cpi->ambient_err >> 1)) {
+ /* Raise q_low */
+ q_low = (Q < q_high) ? (Q + 1) : q_high;
+
+ /* Adjust Q */
+ Q = (q_high + q_low + 1) >> 1;
+ }
+
+ /* Clamp Q to upper and lower limits: */
+ if (Q > q_high) {
+ Q = q_high;
+ } else if (Q < q_low) {
+ Q = q_low;
+ }
+
+ Loop = Q != last_q;
+ }
+
+ /* Is the projected frame size out of range and are we allowed
+ * to attempt to recode.
+ */
+ else if (recode_loop_test(cpi, frame_over_shoot_limit,
+ frame_under_shoot_limit, Q, top_index,
+ bottom_index)) {
+ int last_q = Q;
+ int Retries = 0;
+
+ /* Frame size out of permitted range. Update correction factor
+ * & compute new Q to try...
+ */
+
+ /* Frame is too large */
+ if (cpi->projected_frame_size > cpi->this_frame_target) {
+ /* Raise Qlow as to at least the current value */
+ q_low = (Q < q_high) ? (Q + 1) : q_high;
+
+ /* If we are using over quant do the same for zbin_oq_low */
+ if (cpi->mb.zbin_over_quant > 0) {
+ zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
+ ? (cpi->mb.zbin_over_quant + 1)
+ : zbin_oq_high;
+ }
+
+ if (undershoot_seen) {
+ /* Update rate_correction_factor unless
+ * cpi->active_worst_quality has changed.
+ */
+ if (!active_worst_qchanged) {
+ vp8_update_rate_correction_factors(cpi, 1);
+ }
+
+ Q = (q_high + q_low + 1) / 2;
+
+ /* Adjust cpi->zbin_over_quant (only allowed when Q
+ * is max)
+ */
+ if (Q < MAXQ) {
+ cpi->mb.zbin_over_quant = 0;
+ } else {
+ zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
+ ? (cpi->mb.zbin_over_quant + 1)
+ : zbin_oq_high;
+ cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
+ }
+ } else {
+ /* Update rate_correction_factor unless
+ * cpi->active_worst_quality has changed.
+ */
+ if (!active_worst_qchanged) {
+ vp8_update_rate_correction_factors(cpi, 0);
+ }
+
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+
+ while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
+ (Retries < 10)) {
+ vp8_update_rate_correction_factors(cpi, 0);
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+ Retries++;
+ }
+ }
+
+ overshoot_seen = 1;
+ }
+ /* Frame is too small */
+ else {
+ if (cpi->mb.zbin_over_quant == 0) {
+ /* Lower q_high if not using over quant */
+ q_high = (Q > q_low) ? (Q - 1) : q_low;
+ } else {
+ /* else lower zbin_oq_high */
+ zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
+ ? (cpi->mb.zbin_over_quant - 1)
+ : zbin_oq_low;
+ }
+
+ if (overshoot_seen) {
+ /* Update rate_correction_factor unless
+ * cpi->active_worst_quality has changed.
+ */
+ if (!active_worst_qchanged) {
+ vp8_update_rate_correction_factors(cpi, 1);
+ }
+
+ Q = (q_high + q_low) / 2;
+
+ /* Adjust cpi->zbin_over_quant (only allowed when Q
+ * is max)
+ */
+ if (Q < MAXQ) {
+ cpi->mb.zbin_over_quant = 0;
+ } else {
+ cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
+ }
+ } else {
+ /* Update rate_correction_factor unless
+ * cpi->active_worst_quality has changed.
+ */
+ if (!active_worst_qchanged) {
+ vp8_update_rate_correction_factors(cpi, 0);
+ }
+
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+
+ /* Special case reset for qlow for constrained quality.
+ * This should only trigger where there is very substantial
+ * undershoot on a frame and the auto cq level is above
+ * the user passsed in value.
+ */
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (Q < q_low)) {
+ q_low = Q;
+ }
+
+ while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
+ (Retries < 10)) {
+ vp8_update_rate_correction_factors(cpi, 0);
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+ Retries++;
+ }
+ }
+
+ undershoot_seen = 1;
+ }
+
+ /* Clamp Q to upper and lower limits: */
+ if (Q > q_high) {
+ Q = q_high;
+ } else if (Q < q_low) {
+ Q = q_low;
+ }
+
+ /* Clamp cpi->zbin_over_quant */
+ cpi->mb.zbin_over_quant =
+ (cpi->mb.zbin_over_quant < zbin_oq_low) ? zbin_oq_low
+ : (cpi->mb.zbin_over_quant > zbin_oq_high) ? zbin_oq_high
+ : cpi->mb.zbin_over_quant;
+
+ Loop = Q != last_q;
+ } else {
+ Loop = 0;
+ }
+#endif // CONFIG_REALTIME_ONLY
+
+ if (cpi->is_src_frame_alt_ref) Loop = 0;
+
+ if (Loop == 1) {
+ vp8_restore_coding_context(cpi);
+#if CONFIG_INTERNAL_STATS
+ cpi->tot_recode_hits++;
+#endif
+ }
+ } while (Loop == 1);
+
+#if defined(DROP_UNCODED_FRAMES)
+ /* if there are no coded macroblocks at all drop this frame */
+ if (cpi->common.MBs == cpi->mb.skip_true_count &&
+ (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
+ cpi->common.current_video_frame++;
+ cpi->frames_since_key++;
+ cpi->drop_frame_count++;
+ cpi->ext_refresh_frame_flags_pending = 0;
+ // We advance the temporal pattern for dropped frames.
+ cpi->temporal_pattern_counter++;
+ return;
+ }
+ cpi->drop_frame_count = 0;
+#endif
+
+#if 0
+ /* Experimental code for lagged and one pass
+ * Update stats used for one pass GF selection
+ */
+ {
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
+ cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
+ }
+#endif
+
+ /* Special case code to reduce pulsing when key frames are forced at a
+ * fixed interval. Note the reconstruction error if it is the frame before
+ * the force key frame
+ */
+ if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
+ cpi->ambient_err =
+ vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
+ }
+
+/* This frame's MVs are saved and will be used in next frame's MV predictor.
+ * Last frame has one more line(add to bottom) and one more column(add to
+ * right) than cm->mip. The edge elements are initialized to 0.
+ */
+#if CONFIG_MULTI_RES_ENCODING
+ if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
+#else
+ if (cm->show_frame) /* do not save for altref frame */
+#endif
+ {
+ int mb_row;
+ int mb_col;
+ /* Point to beginning of allocated MODE_INFO arrays. */
+ MODE_INFO *tmp = cm->mip;
+
+ if (cm->frame_type != KEY_FRAME) {
+ for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
+ for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
+ if (tmp->mbmi.ref_frame != INTRA_FRAME) {
+ cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
+ tmp->mbmi.mv.as_int;
+ }
+
+ cpi->lf_ref_frame_sign_bias[mb_col +
+ mb_row * (cm->mode_info_stride + 1)] =
+ cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
+ cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
+ tmp->mbmi.ref_frame;
+ tmp++;
+ }
+ }
+ }
+ }
+
+ /* Count last ref frame 0,0 usage on current encoded frame. */
+ {
+ int mb_row;
+ int mb_col;
+ /* Point to beginning of MODE_INFO arrays. */
+ MODE_INFO *tmp = cm->mi;
+
+ cpi->zeromv_count = 0;
+
+ if (cm->frame_type != KEY_FRAME) {
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
+ cpi->zeromv_count++;
+ }
+ tmp++;
+ }
+ tmp++;
+ }
+ }
+ }
+
+#if CONFIG_MULTI_RES_ENCODING
+ vp8_cal_dissimilarity(cpi);
+#endif
+
+ /* Update the GF useage maps.
+ * This is done after completing the compression of a frame when all
+ * modes etc. are finalized but before loop filter
+ */
+ if (cpi->oxcf.number_of_layers == 1) {
+ vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
+ }
+
+ if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
+
+#if 0
+ {
+ FILE *f = fopen("gfactive.stt", "a");
+ fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
+ fclose(f);
+ }
+#endif
+
+ /* For inter frames the current default behavior is that when
+ * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
+ * This is purely an encoder decision at present.
+ * Avoid this behavior when refresh flags are set by the user.
+ */
+ if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame &&
+ !cpi->ext_refresh_frame_flags_pending) {
+ cm->copy_buffer_to_arf = 2;
+ } else {
+ cm->copy_buffer_to_arf = 0;
+ }
+
+ cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
+
+#if CONFIG_TEMPORAL_DENOISING
+ // Get some measure of the amount of noise, by measuring the (partial) mse
+ // between source and denoised buffer, for y channel. Partial refers to
+ // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
+ // row/column),
+ // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
+ // Do this every ~8 frames, to further reduce complexity.
+ // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
+ // 4,
+ // should be removed in favor of the process_denoiser_mode_change() function
+ // below.
+ if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
+ !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
+ cm->frame_type != KEY_FRAME) {
+ cpi->mse_source_denoised = measure_square_diff_partial(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
+ }
+
+ // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
+ // of source diff (between current and previous frame), and determine if we
+ // should switch the denoiser mode. Sampling refers to computing the mse for
+ // a sub-sample of the frame (i.e., skip x blocks along row/column), and
+ // only for blocks in that set that have used ZEROMV LAST, along with some
+ // constraint on the sum diff between blocks. This process is called every
+ // ~8 frames, to further reduce complexity.
+ if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
+ cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
+ process_denoiser_mode_change(cpi);
+ }
+#endif
+
+#ifdef OUTPUT_YUV_SKINMAP
+ if (cpi->common.current_video_frame > 1) {
+ vp8_compute_skin_map(cpi, yuv_skinmap_file);
+ }
+#endif
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
+ /* start loopfilter in separate thread */
+ sem_post(&cpi->h_event_start_lpf);
+ cpi->b_lpf_running = 1;
+ /* wait for the filter_level to be picked so that we can continue with
+ * stream packing */
+ sem_wait(&cpi->h_event_end_lpf);
+ } else
+#endif
+ {
+ vp8_loopfilter_frame(cpi, cm);
+ }
+
+ update_reference_frames(cpi);
+
+#ifdef OUTPUT_YUV_DENOISED
+ vpx_write_yuv_frame(yuv_denoised_file,
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
+#endif
+
+#if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ if (cpi->oxcf.error_resilient_mode) {
+ cm->refresh_entropy_probs = 0;
+ }
+#endif
+
+ /* build the bitstream */
+ vp8_pack_bitstream(cpi, dest, dest_end, size);
+
+ /* Move storing frame_type out of the above loop since it is also
+ * needed in motion search besides loopfilter */
+ cm->last_frame_type = cm->frame_type;
+
+ /* Update rate control heuristics */
+ cpi->total_byte_count += (*size);
+ cpi->projected_frame_size = (int)(*size) << 3;
+
+ if (cpi->oxcf.number_of_layers > 1) {
+ unsigned int i;
+ for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
+ cpi->layer_context[i].total_byte_count += (*size);
+ }
+ }
+
+ if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
+
+ cpi->last_q[cm->frame_type] = cm->base_qindex;
+
+ if (cm->frame_type == KEY_FRAME) {
+ vp8_adjust_key_frame_context(cpi);
+ }
+
+ /* Keep a record of ambient average Q. */
+ if (cm->frame_type != KEY_FRAME) {
+ cpi->avg_frame_qindex =
+ (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
+ }
+
+ /* Keep a record from which we can calculate the average Q excluding
+ * GF updates and key frames
+ */
+ if ((cm->frame_type != KEY_FRAME) &&
+ ((cpi->oxcf.number_of_layers > 1) ||
+ (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
+ cpi->ni_frames++;
+
+ /* Calculate the average Q for normal inter frames (not key or GFU
+ * frames).
+ */
+ if (cpi->pass == 2) {
+ cpi->ni_tot_qi += Q;
+ cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
+ } else {
+ /* Damp value for first few frames */
+ if (cpi->ni_frames > 150) {
+ cpi->ni_tot_qi += Q;
+ cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
+ }
+ /* For one pass, early in the clip ... average the current frame Q
+ * value with the worstq entered by the user as a dampening measure
+ */
+ else {
+ cpi->ni_tot_qi += Q;
+ cpi->ni_av_qi =
+ ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
+ }
+
+ /* If the average Q is higher than what was used in the last
+ * frame (after going through the recode loop to keep the frame
+ * size within range) then use the last frame value - 1. The -1
+ * is designed to stop Q and hence the data rate, from
+ * progressively falling away during difficult sections, but at
+ * the same time reduce the number of itterations around the
+ * recode loop.
+ */
+ if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
+ }
+ }
+
+ /* Update the buffer level variable. */
+ /* Non-viewable frames are a special case and are treated as pure overhead. */
+ if (!cm->show_frame) {
+ cpi->bits_off_target -= cpi->projected_frame_size;
+ } else {
+ cpi->bits_off_target +=
+ cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
+ }
+
+ /* Clip the buffer level to the maximum specified buffer size */
+ if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
+ cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
+ }
+
+ // Don't let the buffer level go below some threshold, given here
+ // by -|maximum_buffer_size|. For now we only do this for
+ // screen content input.
+ if (cpi->oxcf.screen_content_mode &&
+ cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
+ cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
+ }
+
+ /* Rolling monitors of whether we are over or underspending used to
+ * help regulate min and Max Q in two pass.
+ */
+ cpi->rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
+ (int64_t)cpi->rolling_target_bits * 3 + cpi->this_frame_target, 2);
+ cpi->rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
+ (int64_t)cpi->rolling_actual_bits * 3 + cpi->projected_frame_size, 2);
+ cpi->long_rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
+ (int64_t)cpi->long_rolling_target_bits * 31 + cpi->this_frame_target, 5);
+ cpi->long_rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
+ (int64_t)cpi->long_rolling_actual_bits * 31 + cpi->projected_frame_size,
+ 5);
+
+ /* Actual bits spent */
+ cpi->total_actual_bits += cpi->projected_frame_size;
+
+#if 0 && CONFIG_INTERNAL_STATS
+ /* Debug stats */
+ cpi->total_target_vs_actual +=
+ (cpi->this_frame_target - cpi->projected_frame_size);
+#endif
+
+ cpi->buffer_level = cpi->bits_off_target;
+
+ /* Propagate values to higher temporal layers */
+ if (cpi->oxcf.number_of_layers > 1) {
+ unsigned int i;
+
+ for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ int bits_off_for_this_layer = (int)round(
+ lc->target_bandwidth / lc->framerate - cpi->projected_frame_size);
+
+ lc->bits_off_target += bits_off_for_this_layer;
+
+ /* Clip buffer level to maximum buffer size for the layer */
+ if (lc->bits_off_target > lc->maximum_buffer_size) {
+ lc->bits_off_target = lc->maximum_buffer_size;
+ }
+
+ lc->total_actual_bits += cpi->projected_frame_size;
+ lc->total_target_vs_actual += bits_off_for_this_layer;
+ lc->buffer_level = lc->bits_off_target;
+ }
+ }
+
+ /* Update bits left to the kf and gf groups to account for overshoot
+ * or undershoot on these frames
+ */
+ if (cm->frame_type == KEY_FRAME) {
+ cpi->twopass.kf_group_bits +=
+ cpi->this_frame_target - cpi->projected_frame_size;
+
+ if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
+ } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
+ cpi->twopass.gf_group_bits +=
+ cpi->this_frame_target - cpi->projected_frame_size;
+
+ if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
+ }
+
+ if (cm->frame_type != KEY_FRAME) {
+ if (cpi->common.refresh_alt_ref_frame) {
+ cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
+ cpi->last_skip_probs_q[2] = cm->base_qindex;
+ } else if (cpi->common.refresh_golden_frame) {
+ cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
+ cpi->last_skip_probs_q[1] = cm->base_qindex;
+ } else {
+ cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
+ cpi->last_skip_probs_q[0] = cm->base_qindex;
+
+ /* update the baseline */
+ cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
+ }
+ }
+
+#if 0 && CONFIG_INTERNAL_STATS
+ {
+ FILE *f = fopen("tmp.stt", "a");
+
+ vpx_clear_system_state();
+
+ if (cpi->twopass.total_left_stats.coded_error != 0.0)
+ fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
+ "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
+ "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
+ cpi->common.current_video_frame, cpi->this_frame_target,
+ cpi->projected_frame_size,
+ (cpi->projected_frame_size - cpi->this_frame_target),
+ cpi->total_target_vs_actual,
+ cpi->buffer_level,
+ (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
+ cpi->total_actual_bits, cm->base_qindex,
+ cpi->active_best_quality, cpi->active_worst_quality,
+ cpi->ni_av_qi, cpi->cq_target_quality,
+ cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
+ cm->frame_type, cpi->gfu_boost,
+ cpi->twopass.est_max_qcorrection_factor,
+ cpi->twopass.bits_left,
+ cpi->twopass.total_left_stats.coded_error,
+ (double)cpi->twopass.bits_left /
+ cpi->twopass.total_left_stats.coded_error,
+ cpi->tot_recode_hits);
+ else
+ fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
+ "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
+ "%8.2lf %"PRId64" %10.3lf %8d\n",
+ cpi->common.current_video_frame, cpi->this_frame_target,
+ cpi->projected_frame_size,
+ (cpi->projected_frame_size - cpi->this_frame_target),
+ cpi->total_target_vs_actual,
+ cpi->buffer_level,
+ (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
+ cpi->total_actual_bits, cm->base_qindex,
+ cpi->active_best_quality, cpi->active_worst_quality,
+ cpi->ni_av_qi, cpi->cq_target_quality,
+ cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
+ cm->frame_type, cpi->gfu_boost,
+ cpi->twopass.est_max_qcorrection_factor,
+ cpi->twopass.bits_left,
+ cpi->twopass.total_left_stats.coded_error,
+ cpi->tot_recode_hits);
+
+ fclose(f);
+
+ {
+ FILE *fmodes = fopen("Modes.stt", "a");
+
+ fprintf(fmodes, "%6d:%1d:%1d:%1d ",
+ cpi->common.current_video_frame,
+ cm->frame_type, cm->refresh_golden_frame,
+ cm->refresh_alt_ref_frame);
+
+ fprintf(fmodes, "\n");
+
+ fclose(fmodes);
+ }
+ }
+
+#endif
+
+ cpi->ext_refresh_frame_flags_pending = 0;
+
+ if (cm->refresh_golden_frame == 1) {
+ cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
+ } else {
+ cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
+ }
+
+ if (cm->refresh_alt_ref_frame == 1) {
+ cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
+ } else {
+ cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
+ }
+
+ if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
+ cpi->gold_is_last = 1;
+ } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
+ /* 1 refreshed but not the other */
+ cpi->gold_is_last = 0;
+ }
+
+ if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
+ cpi->alt_is_last = 1;
+ } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
+ /* 1 refreshed but not the other */
+ cpi->alt_is_last = 0;
+ }
+
+ if (cm->refresh_alt_ref_frame &
+ cm->refresh_golden_frame) { /* both refreshed */
+ cpi->gold_is_alt = 1;
+ } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
+ /* 1 refreshed but not the other */
+ cpi->gold_is_alt = 0;
+ }
+
+ cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
+
+ if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
+
+ if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
+
+ if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
+
+ if (!cpi->oxcf.error_resilient_mode) {
+ if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
+ (cm->frame_type != KEY_FRAME)) {
+ /* Update the alternate reference frame stats as appropriate. */
+ update_alt_ref_frame_stats(cpi);
+ } else {
+ /* Update the Golden frame stats as appropriate. */
+ update_golden_frame_stats(cpi);
+ }
+ }
+
+ if (cm->frame_type == KEY_FRAME) {
+ /* Tell the caller that the frame was coded as a key frame */
+ *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
+
+ /* As this frame is a key frame the next defaults to an inter frame. */
+ cm->frame_type = INTER_FRAME;
+
+ cpi->last_frame_percent_intra = 100;
+ } else {
+ *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
+
+ cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
+ }
+
+ /* Clear the one shot update flags for segmentation map and mode/ref
+ * loop filter deltas.
+ */
+ cpi->mb.e_mbd.update_mb_segmentation_map = 0;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 0;
+ cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
+
+ /* Dont increment frame counters if this was an altref buffer update
+ * not a real frame
+ */
+ if (cm->show_frame) {
+ cm->current_video_frame++;
+ cpi->frames_since_key++;
+ cpi->temporal_pattern_counter++;
+ }
+
+#if 0
+ {
+ char filename[512];
+ FILE *recon_file;
+ sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
+ recon_file = fopen(filename, "wb");
+ fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
+ cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
+ fclose(recon_file);
+ }
+#endif
+
+ /* DEBUG */
+ /* vpx_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
+}
+#if !CONFIG_REALTIME_ONLY
+static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
+ unsigned char *dest_end, unsigned int *frame_flags) {
+ if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
+
+ encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
+ cpi->twopass.bits_left -= 8 * (int)(*size);
+
+ if (!cpi->common.refresh_alt_ref_frame) {
+ double two_pass_min_rate =
+ (double)(cpi->oxcf.target_bandwidth *
+ cpi->oxcf.two_pass_vbrmin_section / 100);
+ cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
+ }
+}
+#endif
+
+int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time) {
+ struct vpx_usec_timer timer;
+ int res = 0;
+
+ vpx_usec_timer_start(&timer);
+
+ /* Reinit the lookahead buffer if the frame size changes */
+ if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
+ assert(cpi->oxcf.lag_in_frames < 2);
+ dealloc_raw_frame_buffers(cpi);
+ alloc_raw_frame_buffers(cpi);
+ }
+
+ if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
+ cpi->active_map_enabled ? cpi->active_map : NULL)) {
+ res = -1;
+ }
+ vpx_usec_timer_mark(&timer);
+ cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
+
+ return res;
+}
+
+static int frame_is_reference(const VP8_COMP *cpi) {
+ const VP8_COMMON *cm = &cpi->common;
+ const MACROBLOCKD *xd = &cpi->mb.e_mbd;
+
+ return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
+ cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
+ cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
+ cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
+ xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
+}
+
+int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
+ size_t *size, unsigned char *dest,
+ unsigned char *dest_end, int64_t *time_stamp,
+ int64_t *time_end, int flush) {
+ VP8_COMMON *cm;
+ struct vpx_usec_timer tsctimer;
+ struct vpx_usec_timer ticktimer;
+ struct vpx_usec_timer cmptimer;
+ YV12_BUFFER_CONFIG *force_src_buffer = NULL;
+
+ if (!cpi) return -1;
+
+ cm = &cpi->common;
+
+ vpx_usec_timer_start(&cmptimer);
+
+ cpi->source = NULL;
+
+#if !CONFIG_REALTIME_ONLY
+ /* Should we code an alternate reference frame */
+ if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
+ cpi->source_alt_ref_pending) {
+ if ((cpi->source = vp8_lookahead_peek(
+ cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
+ cpi->alt_ref_source = cpi->source;
+ if (cpi->oxcf.arnr_max_frames > 0) {
+ vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
+ force_src_buffer = &cpi->alt_ref_buffer;
+ }
+ cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
+ cm->refresh_alt_ref_frame = 1;
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 0;
+ cm->show_frame = 0;
+ /* Clear Pending alt Ref flag. */
+ cpi->source_alt_ref_pending = 0;
+ cpi->is_src_frame_alt_ref = 0;
+ }
+ }
+#endif
+
+ if (!cpi->source) {
+ /* Read last frame source if we are encoding first pass. */
+ if (cpi->pass == 1 && cm->current_video_frame > 0) {
+ if ((cpi->last_source =
+ vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
+ return -1;
+ }
+ }
+
+ if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
+ cm->show_frame = 1;
+
+ cpi->is_src_frame_alt_ref =
+ cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
+
+ if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
+ }
+ }
+
+ if (cpi->source) {
+ cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
+ cpi->un_scaled_source = cpi->Source;
+ *time_stamp = cpi->source->ts_start;
+ *time_end = cpi->source->ts_end;
+ *frame_flags = cpi->source->flags;
+
+ if (cpi->pass == 1 && cm->current_video_frame > 0) {
+ cpi->last_frame_unscaled_source = &cpi->last_source->img;
+ }
+ } else {
+ *size = 0;
+#if !CONFIG_REALTIME_ONLY
+
+ if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
+ vp8_end_first_pass(cpi); /* get last stats packet */
+ cpi->twopass.first_pass_done = 1;
+ }
+
+#endif
+
+ return -1;
+ }
+
+ if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
+ cpi->first_time_stamp_ever = cpi->source->ts_start;
+ cpi->last_end_time_stamp_seen = cpi->source->ts_start;
+ }
+
+ /* adjust frame rates based on timestamps given */
+ if (cm->show_frame) {
+ int64_t this_duration;
+ int step = 0;
+
+ if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
+ this_duration = cpi->source->ts_end - cpi->source->ts_start;
+ step = 1;
+ } else {
+ int64_t last_duration;
+
+ this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
+ last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
+ // Cap this to avoid overflow of (this_duration - last_duration) * 10
+ this_duration = VPXMIN(this_duration, INT64_MAX / 10);
+ /* do a step update if the duration changes by 10% */
+ if (last_duration) {
+ step = (int)(((this_duration - last_duration) * 10 / last_duration));
+ }
+ }
+
+ if (this_duration) {
+ if (step) {
+ cpi->ref_framerate = 10000000.0 / this_duration;
+ } else {
+ double avg_duration, interval;
+
+ /* Average this frame's rate into the last second's average
+ * frame rate. If we haven't seen 1 second yet, then average
+ * over the whole interval seen.
+ */
+ interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
+ if (interval > 10000000.0) interval = 10000000;
+
+ avg_duration = 10000000.0 / cpi->ref_framerate;
+ avg_duration *= (interval - avg_duration + this_duration);
+ avg_duration /= interval;
+
+ cpi->ref_framerate = 10000000.0 / avg_duration;
+ }
+#if CONFIG_MULTI_RES_ENCODING
+ if (cpi->oxcf.mr_total_resolutions > 1) {
+ LOWER_RES_FRAME_INFO *low_res_frame_info =
+ (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
+ // Frame rate should be the same for all spatial layers in
+ // multi-res-encoding (simulcast), so we constrain the frame for
+ // higher layers to be that of lowest resolution. This is needed
+ // as he application may decide to skip encoding a high layer and
+ // then start again, in which case a big jump in time-stamps will
+ // be received for that high layer, which will yield an incorrect
+ // frame rate (from time-stamp adjustment in above calculation).
+ if (cpi->oxcf.mr_encoder_id) {
+ if (!low_res_frame_info->skip_encoding_base_stream)
+ cpi->ref_framerate = low_res_frame_info->low_res_framerate;
+ } else {
+ // Keep track of frame rate for lowest resolution.
+ low_res_frame_info->low_res_framerate = cpi->ref_framerate;
+ // The base stream is being encoded so set skip flag to 0.
+ low_res_frame_info->skip_encoding_base_stream = 0;
+ }
+ }
+#endif
+ if (cpi->oxcf.number_of_layers > 1) {
+ unsigned int i;
+
+ /* Update frame rates for each layer */
+ assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
+ for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
+ ++i) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
+ }
+ } else {
+ vp8_new_framerate(cpi, cpi->ref_framerate);
+ }
+ }
+
+ cpi->last_time_stamp_seen = cpi->source->ts_start;
+ cpi->last_end_time_stamp_seen = cpi->source->ts_end;
+ }
+
+ if (cpi->oxcf.number_of_layers > 1) {
+ int layer;
+
+ vp8_update_layer_contexts(cpi);
+
+ /* Restore layer specific context & set frame rate */
+ if (cpi->temporal_layer_id >= 0) {
+ layer = cpi->temporal_layer_id;
+ } else {
+ layer =
+ cpi->oxcf
+ .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
+ }
+ vp8_restore_layer_context(cpi, layer);
+ vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
+ }
+
+ if (cpi->compressor_speed == 2) {
+ vpx_usec_timer_start(&tsctimer);
+ vpx_usec_timer_start(&ticktimer);
+ }
+
+ cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
+
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ {
+ int i;
+ const int num_part = (1 << cm->multi_token_partition);
+ /* the available bytes in dest */
+ const unsigned long dest_size = dest_end - dest;
+ const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
+
+ unsigned char *dp = dest;
+
+ cpi->partition_d[0] = dp;
+ dp += dest_size / 10; /* reserve 1/10 for control partition */
+ cpi->partition_d_end[0] = dp;
+
+ for (i = 0; i < num_part; ++i) {
+ cpi->partition_d[i + 1] = dp;
+ dp += tok_part_buff_size;
+ cpi->partition_d_end[i + 1] = dp;
+ }
+ }
+#endif
+
+ /* start with a 0 size frame */
+ *size = 0;
+
+ /* Clear down mmx registers */
+ vpx_clear_system_state();
+
+ cm->frame_type = INTER_FRAME;
+ cm->frame_flags = *frame_flags;
+
+#if 0
+
+ if (cm->refresh_alt_ref_frame)
+ {
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 0;
+ }
+ else
+ {
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 1;
+ }
+
+#endif
+ /* find a free buffer for the new frame */
+ {
+ int i = 0;
+ for (; i < NUM_YV12_BUFFERS; ++i) {
+ if (!cm->yv12_fb[i].flags) {
+ cm->new_fb_idx = i;
+ break;
+ }
+ }
+
+ assert(i < NUM_YV12_BUFFERS);
+ }
+ switch (cpi->pass) {
+#if !CONFIG_REALTIME_ONLY
+ case 1: Pass1Encode(cpi); break;
+ case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
+#endif // !CONFIG_REALTIME_ONLY
+ default:
+ encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
+ break;
+ }
+
+ if (cpi->compressor_speed == 2) {
+ unsigned int duration, duration2;
+ vpx_usec_timer_mark(&tsctimer);
+ vpx_usec_timer_mark(&ticktimer);
+
+ duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
+ duration2 = (unsigned int)((double)duration / 2);
+
+ if (cm->frame_type != KEY_FRAME) {
+ if (cpi->avg_encode_time == 0) {
+ cpi->avg_encode_time = duration;
+ } else {
+ cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
+ }
+ }
+
+ if (duration2) {
+ {
+ if (cpi->avg_pick_mode_time == 0) {
+ cpi->avg_pick_mode_time = duration2;
+ } else {
+ cpi->avg_pick_mode_time =
+ (7 * cpi->avg_pick_mode_time + duration2) >> 3;
+ }
+ }
+ }
+ }
+
+ if (cm->refresh_entropy_probs == 0) {
+ memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
+ }
+
+ /* Save the contexts separately for alt ref, gold and last. */
+ /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
+ if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
+
+ if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
+
+ if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
+
+ /* if its a dropped frame honor the requests on subsequent frames */
+ if (*size > 0) {
+ cpi->droppable = !frame_is_reference(cpi);
+
+ /* return to normal state */
+ cm->refresh_entropy_probs = 1;
+ cm->refresh_alt_ref_frame = 0;
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 1;
+ cm->frame_type = INTER_FRAME;
+ }
+
+ /* Save layer specific state */
+ if (cpi->oxcf.number_of_layers > 1) vp8_save_layer_context(cpi);
+
+ vpx_usec_timer_mark(&cmptimer);
+ cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
+
+ if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
+ generate_psnr_packet(cpi);
+ }
+
+#if CONFIG_INTERNAL_STATS
+
+ if (cpi->pass != 1) {
+ cpi->bytes += *size;
+
+ if (cm->show_frame) {
+ cpi->common.show_frame_mi = cpi->common.mi;
+ cpi->count++;
+
+ if (cpi->b_calculate_psnr) {
+ uint64_t ye, ue, ve;
+ double frame_psnr;
+ YV12_BUFFER_CONFIG *orig = cpi->Source;
+ YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ unsigned int y_width = cpi->common.Width;
+ unsigned int y_height = cpi->common.Height;
+ unsigned int uv_width = (y_width + 1) / 2;
+ unsigned int uv_height = (y_height + 1) / 2;
+ int y_samples = y_height * y_width;
+ int uv_samples = uv_height * uv_width;
+ int t_samples = y_samples + 2 * uv_samples;
+ double sq_error;
+
+ ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
+ recon->y_stride, y_width, y_height);
+
+ ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
+ recon->uv_stride, uv_width, uv_height);
+
+ ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
+ recon->uv_stride, uv_width, uv_height);
+
+ sq_error = (double)(ye + ue + ve);
+
+ frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
+
+ cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
+ cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
+ cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
+ cpi->total_sq_error += sq_error;
+ cpi->total += frame_psnr;
+#if CONFIG_POSTPROC
+ {
+ YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
+ double sq_error2;
+ double frame_psnr2, frame_ssim2 = 0;
+ double weight = 0;
+
+ vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
+ cm->filter_level * 10 / 6);
+ vpx_clear_system_state();
+
+ ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
+ pp->y_stride, y_width, y_height);
+
+ ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
+ pp->uv_stride, uv_width, uv_height);
+
+ ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
+ pp->uv_stride, uv_width, uv_height);
+
+ sq_error2 = (double)(ye + ue + ve);
+
+ frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
+
+ cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
+ cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
+ cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
+ cpi->total_sq_error2 += sq_error2;
+ cpi->totalp += frame_psnr2;
+
+ frame_ssim2 =
+ vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
+
+ cpi->summed_quality += frame_ssim2 * weight;
+ cpi->summed_weights += weight;
+
+ if (cpi->oxcf.number_of_layers > 1) {
+ unsigned int i;
+
+ for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
+ cpi->frames_in_layer[i]++;
+
+ cpi->bytes_in_layer[i] += *size;
+ cpi->sum_psnr[i] += frame_psnr;
+ cpi->sum_psnr_p[i] += frame_psnr2;
+ cpi->total_error2[i] += sq_error;
+ cpi->total_error2_p[i] += sq_error2;
+ cpi->sum_ssim[i] += frame_ssim2 * weight;
+ cpi->sum_weights[i] += weight;
+ }
+ }
+ }
+#endif
+ }
+ }
+ }
+
+#if 0
+
+ if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
+ {
+ skiptruecount += cpi->skip_true_count;
+ skipfalsecount += cpi->skip_false_count;
+ }
+
+#endif
+#if 0
+
+ if (cpi->pass != 1)
+ {
+ FILE *f = fopen("skip.stt", "a");
+ fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
+
+ if (cpi->is_src_frame_alt_ref == 1)
+ fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
+
+ fclose(f);
+ }
+
+#endif
+#endif
+
+ cpi->common.error.setjmp = 0;
+
+#if CONFIG_MULTITHREAD
+ /* wait for the lpf thread done */
+ if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) && cpi->b_lpf_running) {
+ sem_wait(&cpi->h_event_end_lpf);
+ cpi->b_lpf_running = 0;
+ }
+#endif
+
+ return 0;
+}
+
+int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
+ vp8_ppflags_t *flags) {
+ if (cpi->common.refresh_alt_ref_frame) {
+ return -1;
+ } else {
+ int ret;
+
+#if CONFIG_POSTPROC
+ cpi->common.show_frame_mi = cpi->common.mi;
+ ret = vp8_post_proc_frame(&cpi->common, dest, flags);
+#else
+ (void)flags;
+
+ if (cpi->common.frame_to_show) {
+ *dest = *cpi->common.frame_to_show;
+ dest->y_width = cpi->common.Width;
+ dest->y_height = cpi->common.Height;
+ dest->uv_height = cpi->common.Height / 2;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+#endif
+ vpx_clear_system_state();
+ return ret;
+ }
+}
+
+int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
+ unsigned int cols, int delta_q[4], int delta_lf[4],
+ unsigned int threshold[4]) {
+ signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
+ int internal_delta_q[MAX_MB_SEGMENTS];
+ const int range = 63;
+ int i;
+
+ // Check number of rows and columns match
+ if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
+ return -1;
+ }
+
+ for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
+ // Note abs() alone can't be used as the behavior of abs(INT_MIN) is
+ // undefined.
+ if (delta_q[i] > range || delta_q[i] < -range || delta_lf[i] > range ||
+ delta_lf[i] < -range) {
+ return -1;
+ }
+ }
+
+ // Also disable segmentation if no deltas are specified.
+ if (!map || (delta_q[0] == 0 && delta_q[1] == 0 && delta_q[2] == 0 &&
+ delta_q[3] == 0 && delta_lf[0] == 0 && delta_lf[1] == 0 &&
+ delta_lf[2] == 0 && delta_lf[3] == 0 && threshold[0] == 0 &&
+ threshold[1] == 0 && threshold[2] == 0 && threshold[3] == 0)) {
+ disable_segmentation(cpi);
+ return 0;
+ }
+
+ // Translate the external delta q values to internal values.
+ for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
+ internal_delta_q[i] =
+ (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
+ }
+
+ /* Set the segmentation Map */
+ set_segmentation_map(cpi, map);
+
+ /* Activate segmentation. */
+ enable_segmentation(cpi);
+
+ /* Set up the quant segment data */
+ feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
+ feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
+ feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
+ feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
+
+ /* Set up the loop segment data s */
+ feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
+ feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
+ feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
+ feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
+
+ cpi->segment_encode_breakout[0] = threshold[0];
+ cpi->segment_encode_breakout[1] = threshold[1];
+ cpi->segment_encode_breakout[2] = threshold[2];
+ cpi->segment_encode_breakout[3] = threshold[3];
+
+ /* Initialise the feature data structure */
+ set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
+
+ if (threshold[0] != 0 || threshold[1] != 0 || threshold[2] != 0 ||
+ threshold[3] != 0)
+ cpi->use_roi_static_threshold = 1;
+ cpi->cyclic_refresh_mode_enabled = 0;
+
+ return 0;
+}
+
+int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
+ unsigned int cols) {
+ if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
+ if (map) {
+ memcpy(cpi->active_map, map, rows * cols);
+ cpi->active_map_enabled = 1;
+ } else {
+ cpi->active_map_enabled = 0;
+ }
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING_MODE horiz_mode,
+ VPX_SCALING_MODE vert_mode) {
+ if (horiz_mode <= VP8E_ONETWO) {
+ cpi->common.horiz_scale = horiz_mode;
+ } else {
+ return -1;
+ }
+
+ if (vert_mode <= VP8E_ONETWO) {
+ cpi->common.vert_scale = vert_mode;
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
+ int i, j;
+ int Total = 0;
+
+ unsigned char *src = source->y_buffer;
+ unsigned char *dst = dest->y_buffer;
+
+ /* Loop through the Y plane raw and reconstruction data summing
+ * (square differences)
+ */
+ for (i = 0; i < source->y_height; i += 16) {
+ for (j = 0; j < source->y_width; j += 16) {
+ unsigned int sse;
+ Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
+ }
+
+ src += 16 * source->y_stride;
+ dst += 16 * dest->y_stride;
+ }
+
+ return Total;
+}
+
+int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }
diff --git a/media/libvpx/libvpx/vp8/encoder/onyx_int.h b/media/libvpx/libvpx/vp8/encoder/onyx_int.h
new file mode 100644
index 0000000000..bde5c2f69b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/onyx_int.h
@@ -0,0 +1,738 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_ONYX_INT_H_
+#define VPX_VP8_ENCODER_ONYX_INT_H_
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "vpx_config.h"
+#include "vp8/common/onyx.h"
+#include "treewriter.h"
+#include "tokenize.h"
+#include "vp8/common/onyxc_int.h"
+#include "vpx_dsp/variance.h"
+#include "encodemb.h"
+#include "vp8/encoder/quantize.h"
+#include "vp8/common/entropy.h"
+#include "vp8/common/threading.h"
+#include "vpx_ports/mem.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx/vp8.h"
+#include "mcomp.h"
+#include "vp8/common/findnearmv.h"
+#include "lookahead.h"
+#if CONFIG_TEMPORAL_DENOISING
+#include "vp8/encoder/denoising.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MIN_GF_INTERVAL 4
+#define DEFAULT_GF_INTERVAL 7
+
+#define KEY_FRAME_CONTEXT 5
+
+#define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY ? 1 : 25)
+
+#define AF_THRESH 25
+#define AF_THRESH2 100
+#define ARF_DECAY_THRESH 12
+
+#define MIN_THRESHMULT 32
+#define MAX_THRESHMULT 512
+
+#define GF_ZEROMV_ZBIN_BOOST 12
+#define LF_ZEROMV_ZBIN_BOOST 6
+#define MV_ZBIN_BOOST 4
+#define ZBIN_OQ_MAX 192
+
+#define VP8_TEMPORAL_ALT_REF !CONFIG_REALTIME_ONLY
+
+/* vp8 uses 10,000,000 ticks/second as time stamp */
+#define TICKS_PER_SEC 10000000
+
+typedef struct {
+ int kf_indicated;
+ unsigned int frames_since_key;
+ unsigned int frames_since_golden;
+ int filter_level;
+ int frames_till_gf_update_due;
+ int recent_ref_frame_usage[MAX_REF_FRAMES];
+
+ MV_CONTEXT mvc[2];
+ int mvcosts[2][MVvals + 1];
+
+#ifdef MODE_STATS
+ int y_modes[5];
+ int uv_modes[4];
+ int b_modes[10];
+ int inter_y_modes[10];
+ int inter_uv_modes[4];
+ int inter_b_modes[10];
+#endif
+
+ vp8_prob ymode_prob[4], uv_mode_prob[3]; /* interframe intra mode probs */
+ vp8_prob kf_ymode_prob[4], kf_uv_mode_prob[3]; /* keyframe "" */
+
+ int ymode_count[5], uv_mode_count[4]; /* intra MB type cts this frame */
+
+ int count_mb_ref_frame_usage[MAX_REF_FRAMES];
+
+ int this_frame_percent_intra;
+ int last_frame_percent_intra;
+
+} CODING_CONTEXT;
+
+typedef struct {
+ double frame;
+ double intra_error;
+ double coded_error;
+ double ssim_weighted_pred_err;
+ double pcnt_inter;
+ double pcnt_motion;
+ double pcnt_second_ref;
+ double pcnt_neutral;
+ double MVr;
+ double mvr_abs;
+ double MVc;
+ double mvc_abs;
+ double MVrv;
+ double MVcv;
+ double mv_in_out_count;
+ double new_mv_count;
+ double duration;
+ double count;
+} FIRSTPASS_STATS;
+
+typedef struct {
+ int frames_so_far;
+ double frame_intra_error;
+ double frame_coded_error;
+ double frame_pcnt_inter;
+ double frame_pcnt_motion;
+ double frame_mvr;
+ double frame_mvr_abs;
+ double frame_mvc;
+ double frame_mvc_abs;
+
+} ONEPASS_FRAMESTATS;
+
+typedef enum {
+ THR_ZERO1 = 0,
+ THR_DC = 1,
+
+ THR_NEAREST1 = 2,
+ THR_NEAR1 = 3,
+
+ THR_ZERO2 = 4,
+ THR_NEAREST2 = 5,
+
+ THR_ZERO3 = 6,
+ THR_NEAREST3 = 7,
+
+ THR_NEAR2 = 8,
+ THR_NEAR3 = 9,
+
+ THR_V_PRED = 10,
+ THR_H_PRED = 11,
+ THR_TM = 12,
+
+ THR_NEW1 = 13,
+ THR_NEW2 = 14,
+ THR_NEW3 = 15,
+
+ THR_SPLIT1 = 16,
+ THR_SPLIT2 = 17,
+ THR_SPLIT3 = 18,
+
+ THR_B_PRED = 19
+} THR_MODES;
+
+typedef enum { DIAMOND = 0, NSTEP = 1, HEX = 2 } SEARCH_METHODS;
+
+typedef struct {
+ int RD;
+ SEARCH_METHODS search_method;
+ int improved_quant;
+ int improved_dct;
+ int auto_filter;
+ int recode_loop;
+ int iterative_sub_pixel;
+ int half_pixel_search;
+ int quarter_pixel_search;
+ int thresh_mult[MAX_MODES];
+ int max_step_search_steps;
+ int first_step;
+ int optimize_coefficients;
+
+ int use_fastquant_for_pick;
+ int no_skip_block4x4_search;
+ int improved_mv_pred;
+
+} SPEED_FEATURES;
+
+typedef struct {
+ MACROBLOCK mb;
+ int segment_counts[MAX_MB_SEGMENTS];
+ int totalrate;
+} MB_ROW_COMP;
+
+typedef struct {
+ TOKENEXTRA *start;
+ TOKENEXTRA *stop;
+} TOKENLIST;
+
+typedef struct {
+ int ithread;
+ void *ptr1;
+ void *ptr2;
+} ENCODETHREAD_DATA;
+typedef struct {
+ int ithread;
+ void *ptr1;
+} LPFTHREAD_DATA;
+
+enum {
+ BLOCK_16X8,
+ BLOCK_8X16,
+ BLOCK_8X8,
+ BLOCK_4X4,
+ BLOCK_16X16,
+ BLOCK_MAX_SEGMENTS
+};
+
+typedef struct {
+ /* Layer configuration */
+ double framerate;
+ int target_bandwidth;
+
+ /* Layer specific coding parameters */
+ int64_t starting_buffer_level;
+ int64_t optimal_buffer_level;
+ int64_t maximum_buffer_size;
+ int64_t starting_buffer_level_in_ms;
+ int64_t optimal_buffer_level_in_ms;
+ int64_t maximum_buffer_size_in_ms;
+
+ int avg_frame_size_for_layer;
+
+ int64_t buffer_level;
+ int64_t bits_off_target;
+
+ int64_t total_actual_bits;
+ int total_target_vs_actual;
+
+ int worst_quality;
+ int active_worst_quality;
+ int best_quality;
+ int active_best_quality;
+
+ int ni_av_qi;
+ int ni_tot_qi;
+ int ni_frames;
+ int avg_frame_qindex;
+
+ double rate_correction_factor;
+ double key_frame_rate_correction_factor;
+ double gf_rate_correction_factor;
+
+ int zbin_over_quant;
+
+ int inter_frame_target;
+ int64_t total_byte_count;
+
+ int filter_level;
+
+ int frames_since_last_drop_overshoot;
+
+ int force_maxqp;
+
+ int last_frame_percent_intra;
+
+ int count_mb_ref_frame_usage[MAX_REF_FRAMES];
+
+ int last_q[2];
+} LAYER_CONTEXT;
+
+typedef struct VP8_COMP {
+ DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1zbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1round[QINDEX_RANGE][16]);
+
+ DECLARE_ALIGNED(16, short, Y2quant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2zbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2round[QINDEX_RANGE][16]);
+
+ DECLARE_ALIGNED(16, short, UVquant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVquant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVzbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVround[QINDEX_RANGE][16]);
+
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_y1[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1quant_fast[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2quant_fast[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVquant_fast[QINDEX_RANGE][16]);
+
+ MACROBLOCK mb;
+ VP8_COMMON common;
+ vp8_writer bc[9]; /* one boolcoder for each partition */
+
+ VP8_CONFIG oxcf;
+
+ struct lookahead_ctx *lookahead;
+ struct lookahead_entry *source;
+ struct lookahead_entry *alt_ref_source;
+ struct lookahead_entry *last_source;
+
+ YV12_BUFFER_CONFIG *Source;
+ YV12_BUFFER_CONFIG *un_scaled_source;
+ YV12_BUFFER_CONFIG scaled_source;
+ YV12_BUFFER_CONFIG *last_frame_unscaled_source;
+
+ unsigned int frames_till_alt_ref_frame;
+ /* frame in src_buffers has been identified to be encoded as an alt ref */
+ int source_alt_ref_pending;
+ /* an alt ref frame has been encoded and is usable */
+ int source_alt_ref_active;
+ /* source of frame to encode is an exact copy of an alt ref frame */
+ int is_src_frame_alt_ref;
+
+ /* golden frame same as last frame ( short circuit gold searches) */
+ int gold_is_last;
+ /* Alt reference frame same as last ( short circuit altref search) */
+ int alt_is_last;
+ /* don't do both alt and gold search ( just do gold). */
+ int gold_is_alt;
+
+ YV12_BUFFER_CONFIG pick_lf_lvl_frame;
+
+ TOKENEXTRA *tok;
+ unsigned int tok_count;
+
+ unsigned int frames_since_key;
+ unsigned int key_frame_frequency;
+ unsigned int this_key_frame_forced;
+ unsigned int next_key_frame_forced;
+
+ /* Ambient reconstruction err target for force key frames */
+ int ambient_err;
+
+ unsigned int mode_check_freq[MAX_MODES];
+
+ int rd_baseline_thresh[MAX_MODES];
+
+ int RDMULT;
+ int RDDIV;
+
+ CODING_CONTEXT coding_context;
+
+ /* Rate targeting variables */
+ int64_t last_prediction_error;
+ int64_t last_intra_error;
+
+ int this_frame_target;
+ int projected_frame_size;
+ int last_q[2]; /* Separate values for Intra/Inter */
+
+ double rate_correction_factor;
+ double key_frame_rate_correction_factor;
+ double gf_rate_correction_factor;
+
+ int frames_since_golden;
+ /* Count down till next GF */
+ int frames_till_gf_update_due;
+
+ /* GF interval chosen when we coded the last GF */
+ int current_gf_interval;
+
+ /* Total bits overspent becasue of GF boost (cumulative) */
+ int gf_overspend_bits;
+
+ /* Used in the few frames following a GF to recover the extra bits
+ * spent in that GF
+ */
+ int non_gf_bitrate_adjustment;
+
+ /* Extra bits spent on key frames that need to be recovered */
+ int kf_overspend_bits;
+
+ /* Current number of bit s to try and recover on each inter frame. */
+ int kf_bitrate_adjustment;
+ int max_gf_interval;
+ int baseline_gf_interval;
+ int active_arnr_frames;
+
+ int64_t key_frame_count;
+ int prior_key_frame_distance[KEY_FRAME_CONTEXT];
+ /* Current section per frame bandwidth target */
+ int per_frame_bandwidth;
+ /* Average frame size target for clip */
+ int av_per_frame_bandwidth;
+ /* Minimum allocation that should be used for any frame */
+ int min_frame_bandwidth;
+ int inter_frame_target;
+ double output_framerate;
+ int64_t last_time_stamp_seen;
+ int64_t last_end_time_stamp_seen;
+ int64_t first_time_stamp_ever;
+
+ int ni_av_qi;
+ int ni_tot_qi;
+ int ni_frames;
+ int avg_frame_qindex;
+
+ int64_t total_byte_count;
+
+ int buffered_mode;
+
+ double framerate;
+ double ref_framerate;
+ int64_t buffer_level;
+ int64_t bits_off_target;
+
+ int rolling_target_bits;
+ int rolling_actual_bits;
+
+ int long_rolling_target_bits;
+ int long_rolling_actual_bits;
+
+ int64_t total_actual_bits;
+ int total_target_vs_actual; /* debug stats */
+
+ int worst_quality;
+ int active_worst_quality;
+ int best_quality;
+ int active_best_quality;
+
+ int cq_target_quality;
+
+ int drop_frames_allowed; /* Are we permitted to drop frames? */
+ int drop_frame; /* Drop this frame? */
+#if defined(DROP_UNCODED_FRAMES)
+ int drop_frame_count;
+#endif
+
+ vp8_prob frame_coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES];
+ char update_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+
+ unsigned int frame_branch_ct[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES][2];
+
+ int gfu_boost;
+ int kf_boost;
+ int last_boost;
+
+ int target_bandwidth;
+ struct vpx_codec_pkt_list *output_pkt_list;
+
+#if 0
+ /* Experimental code for lagged and one pass */
+ ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS];
+ int one_pass_frame_index;
+#endif
+
+ int decimation_factor;
+ int decimation_count;
+
+ /* for real time encoding */
+ int avg_encode_time; /* microsecond */
+ int avg_pick_mode_time; /* microsecond */
+ int Speed;
+ int compressor_speed;
+
+ int auto_gold;
+ int auto_adjust_gold_quantizer;
+ int auto_worst_q;
+ int cpu_used;
+ int pass;
+
+ int prob_intra_coded;
+ int prob_last_coded;
+ int prob_gf_coded;
+ int prob_skip_false;
+ int last_skip_false_probs[3];
+ int last_skip_probs_q[3];
+ int recent_ref_frame_usage[MAX_REF_FRAMES];
+
+ int this_frame_percent_intra;
+ int last_frame_percent_intra;
+
+ int ref_frame_flags;
+
+ SPEED_FEATURES sf;
+
+ /* Count ZEROMV on all reference frames. */
+ int zeromv_count;
+ int lf_zeromv_pct;
+
+ unsigned char *skin_map;
+
+ unsigned char *segmentation_map;
+ signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
+ unsigned int segment_encode_breakout[MAX_MB_SEGMENTS];
+
+ unsigned char *active_map;
+ unsigned int active_map_enabled;
+
+ /* Video conferencing cyclic refresh mode flags. This is a mode
+ * designed to clean up the background over time in live encoding
+ * scenarious. It uses segmentation.
+ */
+ int cyclic_refresh_mode_enabled;
+ int cyclic_refresh_mode_max_mbs_perframe;
+ int cyclic_refresh_mode_index;
+ int cyclic_refresh_q;
+ signed char *cyclic_refresh_map;
+ // Count on how many (consecutive) times a macroblock uses ZER0MV_LAST.
+ unsigned char *consec_zero_last;
+ // Counter that is reset when a block is checked for a mode-bias against
+ // ZEROMV_LASTREF.
+ unsigned char *consec_zero_last_mvbias;
+
+ // Frame counter for the temporal pattern. Counter is rest when the temporal
+ // layers are changed dynamically (run-time change).
+ unsigned int temporal_pattern_counter;
+ // Temporal layer id.
+ int temporal_layer_id;
+
+ // Measure of average squared difference between source and denoised signal.
+ int mse_source_denoised;
+
+ int force_maxqp;
+ int frames_since_last_drop_overshoot;
+ int last_pred_err_mb;
+
+ // GF update for 1 pass cbr.
+ int gf_update_onepass_cbr;
+ int gf_interval_onepass_cbr;
+ int gf_noboost_onepass_cbr;
+
+#if CONFIG_MULTITHREAD
+ /* multithread data */
+ vpx_atomic_int *mt_current_mb_col;
+ int mt_sync_range;
+ vpx_atomic_int b_multi_threaded;
+ int encoding_thread_count;
+ int b_lpf_running;
+
+ pthread_t *h_encoding_thread;
+ pthread_t h_filter_thread;
+
+ MB_ROW_COMP *mb_row_ei;
+ ENCODETHREAD_DATA *en_thread_data;
+ LPFTHREAD_DATA lpf_thread_data;
+
+ /* events */
+ sem_t *h_event_start_encoding;
+ sem_t *h_event_end_encoding;
+ sem_t h_event_start_lpf;
+ sem_t h_event_end_lpf;
+#endif
+
+ TOKENLIST *tplist;
+ unsigned int partition_sz[MAX_PARTITIONS];
+ unsigned char *partition_d[MAX_PARTITIONS];
+ unsigned char *partition_d_end[MAX_PARTITIONS];
+
+ fractional_mv_step_fp *find_fractional_mv_step;
+ vp8_refining_search_fn_t refining_search_sad;
+ vp8_diamond_search_fn_t diamond_search_sad;
+ vp8_variance_fn_ptr_t fn_ptr[BLOCK_MAX_SEGMENTS];
+ uint64_t time_receive_data;
+ uint64_t time_compress_data;
+ uint64_t time_pick_lpf;
+ uint64_t time_encode_mb_row;
+
+ int base_skip_false_prob[128];
+
+ FRAME_CONTEXT lfc_n; /* last frame entropy */
+ FRAME_CONTEXT lfc_a; /* last alt ref entropy */
+ FRAME_CONTEXT lfc_g; /* last gold ref entropy */
+
+ struct twopass_rc {
+ unsigned int section_intra_rating;
+ double section_max_qfactor;
+ unsigned int next_iiratio;
+ unsigned int this_iiratio;
+ FIRSTPASS_STATS total_stats;
+ FIRSTPASS_STATS this_frame_stats;
+ FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start;
+ FIRSTPASS_STATS total_left_stats;
+ int first_pass_done;
+ int64_t bits_left;
+ int64_t clip_bits_total;
+ double avg_iiratio;
+ double modified_error_total;
+ double modified_error_used;
+ double modified_error_left;
+ double kf_intra_err_min;
+ double gf_intra_err_min;
+ int frames_to_key;
+ int maxq_max_limit;
+ int maxq_min_limit;
+ int gf_decay_rate;
+ int static_scene_max_gf_interval;
+ int kf_bits;
+ /* Remaining error from uncoded frames in a gf group. */
+ int gf_group_error_left;
+ /* Projected total bits available for a key frame group of frames */
+ int64_t kf_group_bits;
+ /* Error score of frames still to be coded in kf group */
+ int64_t kf_group_error_left;
+ /* Projected Bits available for a group including 1 GF or ARF */
+ int64_t gf_group_bits;
+ /* Bits for the golden frame or ARF */
+ int gf_bits;
+ int alt_extra_bits;
+ double est_max_qcorrection_factor;
+ } twopass;
+
+#if VP8_TEMPORAL_ALT_REF
+ YV12_BUFFER_CONFIG alt_ref_buffer;
+ YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS];
+ int fixed_divide[512];
+#endif
+
+#if CONFIG_INTERNAL_STATS
+ int count;
+ double total_y;
+ double total_u;
+ double total_v;
+ double total;
+ double total_sq_error;
+ double totalp_y;
+ double totalp_u;
+ double totalp_v;
+ double totalp;
+ double total_sq_error2;
+ int bytes;
+ double summed_quality;
+ double summed_weights;
+ unsigned int tot_recode_hits;
+
+ int b_calculate_ssimg;
+#endif
+ int b_calculate_psnr;
+
+ /* Per MB activity measurement */
+ unsigned int activity_avg;
+ unsigned int *mb_activity_map;
+
+ /* Record of which MBs still refer to last golden frame either
+ * directly or through 0,0
+ */
+ unsigned char *gf_active_flags;
+ int gf_active_count;
+
+ int output_partition;
+
+ /* Store last frame's MV info for next frame MV prediction */
+ int_mv *lfmv;
+ int *lf_ref_frame_sign_bias;
+ int *lf_ref_frame;
+
+ /* force next frame to intra when kf_auto says so */
+ int force_next_frame_intra;
+
+ int droppable;
+
+ int initial_width;
+ int initial_height;
+
+#if CONFIG_TEMPORAL_DENOISING
+ VP8_DENOISER denoiser;
+#endif
+
+ /* Coding layer state variables */
+ unsigned int current_layer;
+ LAYER_CONTEXT layer_context[VPX_TS_MAX_LAYERS];
+
+ int64_t frames_in_layer[VPX_TS_MAX_LAYERS];
+ int64_t bytes_in_layer[VPX_TS_MAX_LAYERS];
+ double sum_psnr[VPX_TS_MAX_LAYERS];
+ double sum_psnr_p[VPX_TS_MAX_LAYERS];
+ double total_error2[VPX_TS_MAX_LAYERS];
+ double total_error2_p[VPX_TS_MAX_LAYERS];
+ double sum_ssim[VPX_TS_MAX_LAYERS];
+ double sum_weights[VPX_TS_MAX_LAYERS];
+
+ double total_ssimg_y_in_layer[VPX_TS_MAX_LAYERS];
+ double total_ssimg_u_in_layer[VPX_TS_MAX_LAYERS];
+ double total_ssimg_v_in_layer[VPX_TS_MAX_LAYERS];
+ double total_ssimg_all_in_layer[VPX_TS_MAX_LAYERS];
+
+#if CONFIG_MULTI_RES_ENCODING
+ /* Number of MBs per row at lower-resolution level */
+ int mr_low_res_mb_cols;
+ /* Indicate if lower-res mv info is available */
+ unsigned char mr_low_res_mv_avail;
+#endif
+ /* The frame number of each reference frames */
+ unsigned int current_ref_frames[MAX_REF_FRAMES];
+ // Closest reference frame to current frame.
+ MV_REFERENCE_FRAME closest_reference_frame;
+
+ struct rd_costs_struct {
+ int mvcosts[2][MVvals + 1];
+ int mvsadcosts[2][MVfpvals + 1];
+ int mbmode_cost[2][MB_MODE_COUNT];
+ int intra_uv_mode_cost[2][MB_MODE_COUNT];
+ int bmode_costs[10][10][10];
+ int inter_bmode_costs[B_MODE_COUNT];
+ int token_costs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [MAX_ENTROPY_TOKENS];
+ } rd_costs;
+
+ // Use the static threshold from ROI settings.
+ int use_roi_static_threshold;
+
+ int ext_refresh_frame_flags_pending;
+
+ // Always update correction factor used for rate control after each frame for
+ // realtime encoding.
+ int rt_always_update_correction_factor;
+} VP8_COMP;
+
+void vp8_initialize_enc(void);
+
+void vp8_alloc_compressor_data(VP8_COMP *cpi);
+int vp8_reverse_trans(int x);
+void vp8_reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
+ const int prev_num_layers);
+void vp8_init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
+ const int layer,
+ double prev_layer_framerate);
+void vp8_update_layer_contexts(VP8_COMP *cpi);
+void vp8_save_layer_context(VP8_COMP *cpi);
+void vp8_restore_layer_context(VP8_COMP *cpi, const int layer);
+void vp8_new_framerate(VP8_COMP *cpi, double framerate);
+void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm);
+
+void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
+ unsigned char *dest_end, size_t *size);
+
+void vp8_tokenize_mb(VP8_COMP *, MACROBLOCK *, TOKENEXTRA **);
+
+void vp8_set_speed_features(VP8_COMP *cpi);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_ONYX_INT_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/pickinter.c b/media/libvpx/libvpx/vp8/encoder/pickinter.c
new file mode 100644
index 0000000000..04f68c3245
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/pickinter.c
@@ -0,0 +1,1347 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include "vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "onyx_int.h"
+#include "modecosts.h"
+#include "encodeintra.h"
+#include "vp8/common/common.h"
+#include "vp8/common/entropymode.h"
+#include "pickinter.h"
+#include "vp8/common/findnearmv.h"
+#include "encodemb.h"
+#include "vp8/common/reconinter.h"
+#include "vp8/common/reconintra.h"
+#include "vp8/common/reconintra4x4.h"
+#include "vpx_dsp/variance.h"
+#include "mcomp.h"
+#include "vp8/common/vp8_skin_detection.h"
+#include "rdopt.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_mem/vpx_mem.h"
+#if CONFIG_TEMPORAL_DENOISING
+#include "denoising.h"
+#endif
+
+#ifdef SPEEDSTATS
+extern unsigned int cnt_pm;
+#endif
+
+extern const int vp8_ref_frame_order[MAX_MODES];
+extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
+
+static int macroblock_corner_grad(unsigned char *signal, int stride,
+ int offsetx, int offsety, int sgnx,
+ int sgny) {
+ int y1 = signal[offsetx * stride + offsety];
+ int y2 = signal[offsetx * stride + offsety + sgny];
+ int y3 = signal[(offsetx + sgnx) * stride + offsety];
+ int y4 = signal[(offsetx + sgnx) * stride + offsety + sgny];
+ return VPXMAX(VPXMAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4));
+}
+
+static int check_dot_artifact_candidate(VP8_COMP *cpi, MACROBLOCK *x,
+ unsigned char *target_last, int stride,
+ unsigned char *last_ref, int mb_row,
+ int mb_col, int channel) {
+ int threshold1 = 6;
+ int threshold2 = 3;
+ unsigned int max_num = (cpi->common.MBs) / 10;
+ int grad_last = 0;
+ int grad_source = 0;
+ int index = mb_row * cpi->common.mb_cols + mb_col;
+ // Threshold for #consecutive (base layer) frames using zero_last mode.
+ int num_frames = 30;
+ int shift = 15;
+ if (channel > 0) {
+ shift = 7;
+ }
+ if (cpi->oxcf.number_of_layers > 1) {
+ num_frames = 20;
+ }
+ x->zero_last_dot_suppress = 0;
+ // Blocks on base layer frames that have been using ZEROMV_LAST repeatedly
+ // (i.e, at least |x| consecutive frames are candidates for increasing the
+ // rd adjustment for zero_last mode.
+ // Only allow this for at most |max_num| blocks per frame.
+ // Don't allow this for screen content input.
+ if (cpi->current_layer == 0 &&
+ cpi->consec_zero_last_mvbias[index] > num_frames &&
+ x->mbs_zero_last_dot_suppress < max_num &&
+ !cpi->oxcf.screen_content_mode) {
+ // If this block is checked here, label it so we don't check it again until
+ // ~|x| framaes later.
+ x->zero_last_dot_suppress = 1;
+ // Dot artifact is noticeable as strong gradient at corners of macroblock,
+ // for flat areas. As a simple detector for now, we look for a high
+ // corner gradient on last ref, and a smaller gradient on source.
+ // Check 4 corners, return if any satisfy condition.
+ // Top-left:
+ grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1);
+ grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1);
+ if (grad_last >= threshold1 && grad_source <= threshold2) {
+ x->mbs_zero_last_dot_suppress++;
+ return 1;
+ }
+ // Top-right:
+ grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1);
+ grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1);
+ if (grad_last >= threshold1 && grad_source <= threshold2) {
+ x->mbs_zero_last_dot_suppress++;
+ return 1;
+ }
+ // Bottom-left:
+ grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1);
+ grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1);
+ if (grad_last >= threshold1 && grad_source <= threshold2) {
+ x->mbs_zero_last_dot_suppress++;
+ return 1;
+ }
+ // Bottom-right:
+ grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1);
+ grad_source =
+ macroblock_corner_grad(target_last, stride, shift, shift, -1, -1);
+ if (grad_last >= threshold1 && grad_source <= threshold2) {
+ x->mbs_zero_last_dot_suppress++;
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse) {
+ (void)b;
+ (void)d;
+ (void)ref_mv;
+ (void)error_per_bit;
+ (void)vfp;
+ (void)mb;
+ (void)mvcost;
+ (void)distortion;
+ (void)sse;
+ bestmv->as_mv.row *= 8;
+ bestmv->as_mv.col *= 8;
+ return 0;
+}
+
+int vp8_get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *vfp,
+ unsigned int *sse, int_mv this_mv) {
+ BLOCK *b = &mb->block[0];
+ BLOCKD *d = &mb->e_mbd.block[0];
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ int pre_stride = mb->e_mbd.pre.y_stride;
+ unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset;
+ int in_what_stride = pre_stride;
+ int xoffset = this_mv.as_mv.col & 7;
+ int yoffset = this_mv.as_mv.row & 7;
+
+ in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3);
+
+ if (xoffset | yoffset) {
+ return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what,
+ what_stride, sse);
+ } else {
+ return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
+ }
+}
+
+static int get_prediction_error(BLOCK *be, BLOCKD *b) {
+ unsigned char *sptr;
+ unsigned char *dptr;
+ sptr = (*(be->base_src) + be->src);
+ dptr = b->predictor;
+
+ return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
+}
+
+static int pick_intra4x4block(MACROBLOCK *x, int ib,
+ B_PREDICTION_MODE *best_mode,
+ const int *mode_costs, int *bestrate,
+ int *bestdistortion) {
+ BLOCKD *b = &x->e_mbd.block[ib];
+ BLOCK *be = &x->block[ib];
+ int dst_stride = x->e_mbd.dst.y_stride;
+ unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
+ B_PREDICTION_MODE mode;
+ int best_rd = INT_MAX;
+ int rate;
+ int distortion;
+
+ unsigned char *Above = dst - dst_stride;
+ unsigned char *yleft = dst - 1;
+ unsigned char top_left = Above[-1];
+
+ for (mode = B_DC_PRED; mode <= B_HE_PRED; ++mode) {
+ int this_rd;
+
+ rate = mode_costs[mode];
+
+ vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16,
+ top_left);
+ distortion = get_prediction_error(be, b);
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+ if (this_rd < best_rd) {
+ *bestrate = rate;
+ *bestdistortion = distortion;
+ best_rd = this_rd;
+ *best_mode = mode;
+ }
+ }
+
+ b->bmi.as_mode = *best_mode;
+ vp8_encode_intra4x4block(x, ib);
+ return best_rd;
+}
+
+static int pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *best_dist) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ int i;
+ int cost = mb->mbmode_cost[xd->frame_type][B_PRED];
+ int error;
+ int distortion = 0;
+ const int *bmode_costs;
+
+ intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
+
+ bmode_costs = mb->inter_bmode_costs;
+
+ for (i = 0; i < 16; ++i) {
+ MODE_INFO *const mic = xd->mode_info_context;
+ const int mis = xd->mode_info_stride;
+
+ B_PREDICTION_MODE best_mode = B_MODE_COUNT;
+ int r = 0, d = 0;
+
+ if (mb->e_mbd.frame_type == KEY_FRAME) {
+ const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
+ const B_PREDICTION_MODE L = left_block_mode(mic, i);
+
+ bmode_costs = mb->bmode_costs[A][L];
+ }
+
+ pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d);
+
+ cost += r;
+ distortion += d;
+ assert(best_mode != B_MODE_COUNT);
+ mic->bmi[i].as_mode = best_mode;
+
+ /* Break out case where we have already exceeded best so far value
+ * that was passed in
+ */
+ if (distortion > *best_dist) break;
+ }
+
+ *Rate = cost;
+
+ if (i == 16) {
+ *best_dist = distortion;
+ error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
+ } else {
+ *best_dist = INT_MAX;
+ error = INT_MAX;
+ }
+
+ return error;
+}
+
+static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
+ MACROBLOCKD *x = &mb->e_mbd;
+ unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
+ unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
+ unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src);
+ unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src);
+ int uvsrc_stride = mb->block[16].src_stride;
+ unsigned char uleft_col[8];
+ unsigned char vleft_col[8];
+ unsigned char utop_left = uabove_row[-1];
+ unsigned char vtop_left = vabove_row[-1];
+ int i, j;
+ int expected_udc;
+ int expected_vdc;
+ int shift;
+ int Uaverage = 0;
+ int Vaverage = 0;
+ int diff;
+ int pred_error[4] = { 0, 0, 0, 0 }, best_error = INT_MAX;
+ MB_PREDICTION_MODE best_mode = MB_MODE_COUNT;
+
+ for (i = 0; i < 8; ++i) {
+ uleft_col[i] = x->dst.u_buffer[i * x->dst.uv_stride - 1];
+ vleft_col[i] = x->dst.v_buffer[i * x->dst.uv_stride - 1];
+ }
+
+ if (!x->up_available && !x->left_available) {
+ expected_udc = 128;
+ expected_vdc = 128;
+ } else {
+ shift = 2;
+
+ if (x->up_available) {
+ for (i = 0; i < 8; ++i) {
+ Uaverage += uabove_row[i];
+ Vaverage += vabove_row[i];
+ }
+
+ shift++;
+ }
+
+ if (x->left_available) {
+ for (i = 0; i < 8; ++i) {
+ Uaverage += uleft_col[i];
+ Vaverage += vleft_col[i];
+ }
+
+ shift++;
+ }
+
+ expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
+ expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
+ }
+
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j) {
+ int predu = uleft_col[i] + uabove_row[j] - utop_left;
+ int predv = vleft_col[i] + vabove_row[j] - vtop_left;
+ int u_p, v_p;
+
+ u_p = usrc_ptr[j];
+ v_p = vsrc_ptr[j];
+
+ if (predu < 0) predu = 0;
+
+ if (predu > 255) predu = 255;
+
+ if (predv < 0) predv = 0;
+
+ if (predv > 255) predv = 255;
+
+ diff = u_p - expected_udc;
+ pred_error[DC_PRED] += diff * diff;
+ diff = v_p - expected_vdc;
+ pred_error[DC_PRED] += diff * diff;
+
+ diff = u_p - uabove_row[j];
+ pred_error[V_PRED] += diff * diff;
+ diff = v_p - vabove_row[j];
+ pred_error[V_PRED] += diff * diff;
+
+ diff = u_p - uleft_col[i];
+ pred_error[H_PRED] += diff * diff;
+ diff = v_p - vleft_col[i];
+ pred_error[H_PRED] += diff * diff;
+
+ diff = u_p - predu;
+ pred_error[TM_PRED] += diff * diff;
+ diff = v_p - predv;
+ pred_error[TM_PRED] += diff * diff;
+ }
+
+ usrc_ptr += uvsrc_stride;
+ vsrc_ptr += uvsrc_stride;
+
+ if (i == 3) {
+ usrc_ptr = (mb->block[18].src + *mb->block[18].base_src);
+ vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src);
+ }
+ }
+
+ for (i = DC_PRED; i <= TM_PRED; ++i) {
+ if (best_error > pred_error[i]) {
+ best_error = pred_error[i];
+ best_mode = (MB_PREDICTION_MODE)i;
+ }
+ }
+
+ assert(best_mode != MB_MODE_COUNT);
+ mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
+}
+
+static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ /* Split MV modes currently not supported when RD is nopt enabled,
+ * therefore, only need to modify MVcount in NEWMV mode. */
+ if (xd->mode_info_context->mbmi.mode == NEWMV) {
+ x->MVcount[0][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.row -
+ best_ref_mv->as_mv.row) >>
+ 1)]++;
+ x->MVcount[1][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.col -
+ best_ref_mv->as_mv.col) >>
+ 1)]++;
+ }
+}
+
+#if CONFIG_MULTI_RES_ENCODING
+static void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd,
+ int *dissim, int *parent_ref_frame,
+ MB_PREDICTION_MODE *parent_mode,
+ int_mv *parent_ref_mv, int mb_row,
+ int mb_col) {
+ LOWER_RES_MB_INFO *store_mode_info =
+ ((LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info)->mb_info;
+ unsigned int parent_mb_index;
+
+ /* Consider different down_sampling_factor. */
+ {
+ /* TODO: Removed the loop that supports special down_sampling_factor
+ * such as 2, 4, 8. Will revisit it if needed.
+ * Should also try using a look-up table to see if it helps
+ * performance. */
+ int parent_mb_row, parent_mb_col;
+
+ parent_mb_row = mb_row * cpi->oxcf.mr_down_sampling_factor.den /
+ cpi->oxcf.mr_down_sampling_factor.num;
+ parent_mb_col = mb_col * cpi->oxcf.mr_down_sampling_factor.den /
+ cpi->oxcf.mr_down_sampling_factor.num;
+ parent_mb_index = parent_mb_row * cpi->mr_low_res_mb_cols + parent_mb_col;
+ }
+
+ /* Read lower-resolution mode & motion result from memory.*/
+ *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame;
+ *parent_mode = store_mode_info[parent_mb_index].mode;
+ *dissim = store_mode_info[parent_mb_index].dissim;
+
+ /* For highest-resolution encoder, adjust dissim value. Lower its quality
+ * for good performance. */
+ if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1))
+ *dissim >>= 1;
+
+ if (*parent_ref_frame != INTRA_FRAME) {
+ /* Consider different down_sampling_factor.
+ * The result can be rounded to be more precise, but it takes more time.
+ */
+ (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row *
+ cpi->oxcf.mr_down_sampling_factor.num /
+ cpi->oxcf.mr_down_sampling_factor.den;
+ (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col *
+ cpi->oxcf.mr_down_sampling_factor.num /
+ cpi->oxcf.mr_down_sampling_factor.den;
+
+ vp8_clamp_mv2(parent_ref_mv, xd);
+ }
+}
+#endif
+
+static void check_for_encode_breakout(unsigned int sse, MACROBLOCK *x) {
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ unsigned int threshold =
+ (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4);
+
+ if (threshold < x->encode_breakout) threshold = x->encode_breakout;
+
+ if (sse < threshold) {
+ /* Check u and v to make sure skip is ok */
+ unsigned int sse2 = 0;
+
+ sse2 = VP8_UVSSE(x);
+
+ if (sse2 * 2 < x->encode_breakout) {
+ x->skip = 1;
+ } else {
+ x->skip = 0;
+ }
+ }
+}
+
+static int evaluate_inter_mode(unsigned int *sse, int rate2, int *distortion2,
+ VP8_COMP *cpi, MACROBLOCK *x, int rd_adj) {
+ MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
+ int_mv mv = x->e_mbd.mode_info_context->mbmi.mv;
+ int this_rd;
+ int denoise_aggressive = 0;
+ /* Exit early and don't compute the distortion if this macroblock
+ * is marked inactive. */
+ if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
+ *sse = 0;
+ *distortion2 = 0;
+ x->skip = 1;
+ return INT_MAX;
+ }
+
+ if ((this_mode != NEWMV) || !(cpi->sf.half_pixel_search) ||
+ cpi->common.full_pixel == 1) {
+ *distortion2 =
+ vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], sse, mv);
+ }
+
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2);
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity > 0) {
+ denoise_aggressive =
+ (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) ? 1 : 0;
+ }
+#endif
+
+ // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame.
+ // TODO: We should also add condition on distance of closest to current.
+ if (!cpi->oxcf.screen_content_mode && this_mode == ZEROMV &&
+ x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
+ (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME))) {
+ // No adjustment if block is considered to be skin area.
+ if (x->is_skin) rd_adj = 100;
+
+ this_rd = (int)(((int64_t)this_rd) * rd_adj / 100);
+ }
+
+ check_for_encode_breakout(*sse, x);
+ return this_rd;
+}
+
+static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x,
+ int *rd_adjustment) {
+ MODE_INFO *mic = x->e_mbd.mode_info_context;
+ int_mv mv_l, mv_a, mv_al;
+ int local_motion_check = 0;
+
+ if (cpi->lf_zeromv_pct > 40) {
+ /* left mb */
+ mic -= 1;
+ mv_l = mic->mbmi.mv;
+
+ if (mic->mbmi.ref_frame != INTRA_FRAME) {
+ if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) {
+ local_motion_check++;
+ }
+ }
+
+ /* above-left mb */
+ mic -= x->e_mbd.mode_info_stride;
+ mv_al = mic->mbmi.mv;
+
+ if (mic->mbmi.ref_frame != INTRA_FRAME) {
+ if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) {
+ local_motion_check++;
+ }
+ }
+
+ /* above mb */
+ mic += 1;
+ mv_a = mic->mbmi.mv;
+
+ if (mic->mbmi.ref_frame != INTRA_FRAME) {
+ if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) {
+ local_motion_check++;
+ }
+ }
+
+ if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) &&
+ local_motion_check > 0) ||
+ local_motion_check > 2) {
+ *rd_adjustment = 80;
+ } else if (local_motion_check > 0) {
+ *rd_adjustment = 90;
+ }
+ }
+}
+
+void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
+ int recon_uvoffset, int *returnrate,
+ int *returndistortion, int *returnintra, int mb_row,
+ int mb_col) {
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO best_mbmode;
+
+ int_mv best_ref_mv_sb[2] = { { 0 }, { 0 } };
+ int_mv mode_mv_sb[2][MB_MODE_COUNT];
+ int_mv best_ref_mv;
+ int_mv *mode_mv;
+ MB_PREDICTION_MODE this_mode;
+ int num00;
+ int mdcounts[4];
+ int best_rd = INT_MAX;
+ int rd_adjustment = 100;
+ int best_intra_rd = INT_MAX;
+ int mode_index;
+ int rate;
+ int rate2;
+ int distortion2;
+ int bestsme = INT_MAX;
+ int best_mode_index = 0;
+ unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX;
+#if CONFIG_TEMPORAL_DENOISING
+ unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX;
+#endif
+
+ int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
+
+#if CONFIG_MULTI_RES_ENCODING
+ int dissim = INT_MAX;
+ int parent_ref_frame = 0;
+ int_mv parent_ref_mv;
+ MB_PREDICTION_MODE parent_mode = 0;
+ int parent_ref_valid = 0;
+#endif
+
+ int_mv mvp;
+
+ int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ int saddone = 0;
+ /* search range got from mv_pred(). It uses step_param levels. (0-7) */
+ int sr = 0;
+
+ unsigned char *plane[4][3] = { { 0, 0 } };
+ int ref_frame_map[4];
+ int sign_bias = 0;
+ int dot_artifact_candidate = 0;
+ get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
+
+ // If the current frame is using LAST as a reference, check for
+ // biasing the mode selection for dot artifacts.
+ if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
+ unsigned char *target_y = x->src.y_buffer;
+ unsigned char *target_u = x->block[16].src + *x->block[16].base_src;
+ unsigned char *target_v = x->block[20].src + *x->block[20].base_src;
+ int stride = x->src.y_stride;
+ int stride_uv = x->block[16].src_stride;
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
+ target_y =
+ cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
+ stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
+ if (uv_denoise) {
+ target_u = cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer +
+ recon_uvoffset;
+ target_v = cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer +
+ recon_uvoffset;
+ stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
+ }
+ }
+#endif
+ assert(plane[LAST_FRAME][0] != NULL);
+ dot_artifact_candidate = check_dot_artifact_candidate(
+ cpi, x, target_y, stride, plane[LAST_FRAME][0], mb_row, mb_col, 0);
+ // If not found in Y channel, check UV channel.
+ if (!dot_artifact_candidate) {
+ assert(plane[LAST_FRAME][1] != NULL);
+ dot_artifact_candidate = check_dot_artifact_candidate(
+ cpi, x, target_u, stride_uv, plane[LAST_FRAME][1], mb_row, mb_col, 1);
+ if (!dot_artifact_candidate) {
+ assert(plane[LAST_FRAME][2] != NULL);
+ dot_artifact_candidate = check_dot_artifact_candidate(
+ cpi, x, target_v, stride_uv, plane[LAST_FRAME][2], mb_row, mb_col,
+ 2);
+ }
+ }
+ }
+
+#if CONFIG_MULTI_RES_ENCODING
+ // |parent_ref_valid| will be set here if potentially we can do mv resue for
+ // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame.
+ // |parent_ref_valid| may be reset depending on |parent_ref_frame| for
+ // the current macroblock below.
+ parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
+ if (parent_ref_valid) {
+ int parent_ref_flag;
+
+ get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame, &parent_mode,
+ &parent_ref_mv, mb_row, mb_col);
+
+ /* TODO(jkoleszar): The references available (ref_frame_flags) to the
+ * lower res encoder should match those available to this encoder, but
+ * there seems to be a situation where this mismatch can happen in the
+ * case of frame dropping and temporal layers. For example,
+ * GOLD being disallowed in ref_frame_flags, but being returned as
+ * parent_ref_frame.
+ *
+ * In this event, take the conservative approach of disabling the
+ * lower res info for this MB.
+ */
+
+ parent_ref_flag = 0;
+ // Note availability for mv reuse is only based on last and golden.
+ if (parent_ref_frame == LAST_FRAME)
+ parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME);
+ else if (parent_ref_frame == GOLDEN_FRAME)
+ parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME);
+
+ // assert(!parent_ref_frame || parent_ref_flag);
+
+ // If |parent_ref_frame| did not match either last or golden then
+ // shut off mv reuse.
+ if (parent_ref_frame && !parent_ref_flag) parent_ref_valid = 0;
+
+ // Don't do mv reuse since we want to allow for another mode besides
+ // ZEROMV_LAST to remove dot artifact.
+ if (dot_artifact_candidate) parent_ref_valid = 0;
+ }
+#endif
+
+ // Check if current macroblock is in skin area.
+ x->is_skin = 0;
+ if (!cpi->oxcf.screen_content_mode) {
+ int block_index = mb_row * cpi->common.mb_cols + mb_col;
+ x->is_skin = cpi->skin_map[block_index];
+ }
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ // Under aggressive denoising mode, should we use skin map to reduce
+ // denoiser
+ // and ZEROMV bias? Will need to revisit the accuracy of this detection for
+ // very noisy input. For now keep this as is (i.e., don't turn it off).
+ // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive)
+ // x->is_skin = 0;
+ }
+#endif
+
+ mode_mv = mode_mv_sb[sign_bias];
+ best_ref_mv.as_int = 0;
+ memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
+ memset(&best_mbmode, 0, sizeof(best_mbmode));
+
+/* Setup search priorities */
+#if CONFIG_MULTI_RES_ENCODING
+ if (parent_ref_valid && parent_ref_frame && dissim < 8) {
+ ref_frame_map[0] = -1;
+ ref_frame_map[1] = parent_ref_frame;
+ ref_frame_map[2] = -1;
+ ref_frame_map[3] = -1;
+ } else
+#endif
+ get_reference_search_order(cpi, ref_frame_map);
+
+ /* Check to see if there is at least 1 valid reference frame that we need
+ * to calculate near_mvs.
+ */
+ if (ref_frame_map[1] > 0) {
+ sign_bias = vp8_find_near_mvs_bias(
+ &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb,
+ mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias);
+
+ mode_mv = mode_mv_sb[sign_bias];
+ best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
+ }
+
+ /* Count of the number of MBs tested so far this frame */
+ x->mbs_tested_so_far++;
+
+ *returnintra = INT_MAX;
+ x->skip = 0;
+
+ x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+
+ /* If the frame has big static background and current MB is in low
+ * motion area, its mode decision is biased to ZEROMV mode.
+ * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12).
+ * At such speed settings, ZEROMV is already heavily favored.
+ */
+ if (cpi->Speed < 12) {
+ calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
+ }
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ rd_adjustment = (int)(rd_adjustment *
+ cpi->denoiser.denoise_pars.pickmode_mv_bias / 100);
+ }
+#endif
+
+ if (dot_artifact_candidate) {
+ // Bias against ZEROMV_LAST mode.
+ rd_adjustment = 150;
+ }
+
+ /* if we encode a new mv this is important
+ * find the best new motion vector
+ */
+ for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
+ int frame_cost;
+ int this_rd = INT_MAX;
+ int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
+
+ if (best_rd <= x->rd_threshes[mode_index]) continue;
+
+ if (this_ref_frame < 0) continue;
+
+ x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
+
+ /* everything but intra */
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
+ x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
+ x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
+ x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
+
+ if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) {
+ sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
+ mode_mv = mode_mv_sb[sign_bias];
+ best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
+ }
+
+#if CONFIG_MULTI_RES_ENCODING
+ if (parent_ref_valid) {
+ if (vp8_mode_order[mode_index] == NEARESTMV &&
+ mode_mv[NEARESTMV].as_int == 0)
+ continue;
+ if (vp8_mode_order[mode_index] == NEARMV && mode_mv[NEARMV].as_int == 0)
+ continue;
+
+ if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV &&
+ best_ref_mv.as_int == 0)
+ continue;
+ else if (vp8_mode_order[mode_index] == NEWMV && dissim == 0 &&
+ best_ref_mv.as_int == parent_ref_mv.as_int)
+ continue;
+ }
+#endif
+ }
+
+ /* Check to see if the testing frequency for this mode is at its max
+ * If so then prevent it from being tested and increase the threshold
+ * for its testing */
+ if (x->mode_test_hit_counts[mode_index] &&
+ (cpi->mode_check_freq[mode_index] > 1)) {
+ if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
+ x->mode_test_hit_counts[mode_index])) {
+ /* Increase the threshold for coding this mode to make it less
+ * likely to be chosen */
+ x->rd_thresh_mult[mode_index] += 4;
+
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ }
+
+ x->rd_threshes[mode_index] =
+ (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
+ continue;
+ }
+ }
+
+ /* We have now reached the point where we are going to test the current
+ * mode so increment the counter for the number of times it has been
+ * tested */
+ x->mode_test_hit_counts[mode_index]++;
+
+ rate2 = 0;
+ distortion2 = 0;
+
+ this_mode = vp8_mode_order[mode_index];
+
+ x->e_mbd.mode_info_context->mbmi.mode = this_mode;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+
+ /* Work out the cost assosciated with selecting the reference frame */
+ frame_cost = x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
+ rate2 += frame_cost;
+
+ /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
+ * unless ARNR filtering is enabled in which case we want
+ * an unfiltered alternative */
+ if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
+ if (this_mode != ZEROMV ||
+ x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
+ continue;
+ }
+ }
+
+ switch (this_mode) {
+ case B_PRED:
+ /* Pass best so far to pick_intra4x4mby_modes to use as breakout */
+ distortion2 = best_rd_sse;
+ pick_intra4x4mby_modes(x, &rate, &distortion2);
+
+ if (distortion2 == INT_MAX) {
+ this_rd = INT_MAX;
+ } else {
+ rate2 += rate;
+ distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
+ x->e_mbd.predictor, 16, &sse);
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+
+ if (this_rd < best_intra_rd) {
+ best_intra_rd = this_rd;
+ *returnintra = distortion2;
+ }
+ }
+
+ break;
+
+ case SPLITMV:
+
+ /* Split MV modes currently not supported when RD is not enabled. */
+ break;
+
+ case DC_PRED:
+ case V_PRED:
+ case H_PRED:
+ case TM_PRED:
+ vp8_build_intra_predictors_mby_s(
+ xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1,
+ xd->dst.y_stride, xd->predictor, 16);
+ distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
+ x->e_mbd.predictor, 16, &sse);
+ rate2 += x->mbmode_cost[x->e_mbd.frame_type]
+ [x->e_mbd.mode_info_context->mbmi.mode];
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+
+ if (this_rd < best_intra_rd) {
+ best_intra_rd = this_rd;
+ *returnintra = distortion2;
+ }
+ break;
+
+ case NEWMV: {
+ int thissme;
+ int step_param;
+ int further_steps;
+ int n = 0;
+ int sadpb = x->sadperbit16;
+ int_mv mvp_full;
+
+ int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
+ int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
+ int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
+ int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+
+ int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8) ? 3 : 2) : 1;
+
+ /* Further step/diamond searches as necessary */
+ step_param = cpi->sf.first_step + speed_adjust;
+
+#if CONFIG_MULTI_RES_ENCODING
+ /* If lower-res frame is not available for mv reuse (because of
+ frame dropping or different temporal layer pattern), then higher
+ resol encoder does motion search without any previous knowledge.
+ Also, since last frame motion info is not stored, then we can not
+ use improved_mv_pred. */
+ if (cpi->oxcf.mr_encoder_id) sf_improved_mv_pred = 0;
+
+ // Only use parent MV as predictor if this candidate reference frame
+ // (|this_ref_frame|) is equal to |parent_ref_frame|.
+ if (parent_ref_valid && (parent_ref_frame == this_ref_frame)) {
+ /* Use parent MV as predictor. Adjust search range
+ * accordingly.
+ */
+ mvp.as_int = parent_ref_mv.as_int;
+ mvp_full.as_mv.col = parent_ref_mv.as_mv.col >> 3;
+ mvp_full.as_mv.row = parent_ref_mv.as_mv.row >> 3;
+
+ if (dissim <= 32)
+ step_param += 3;
+ else if (dissim <= 128)
+ step_param += 2;
+ else
+ step_param += 1;
+ } else
+#endif
+ {
+ if (sf_improved_mv_pred) {
+ if (!saddone) {
+ vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
+ saddone = 1;
+ }
+
+ vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
+ x->e_mbd.mode_info_context->mbmi.ref_frame,
+ cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
+
+ sr += speed_adjust;
+ /* adjust search range according to sr from mv prediction */
+ if (sr > step_param) step_param = sr;
+
+ mvp_full.as_mv.col = mvp.as_mv.col >> 3;
+ mvp_full.as_mv.row = mvp.as_mv.row >> 3;
+ } else {
+ mvp.as_int = best_ref_mv.as_int;
+ mvp_full.as_mv.col = best_ref_mv.as_mv.col >> 3;
+ mvp_full.as_mv.row = best_ref_mv.as_mv.row >> 3;
+ }
+ }
+
+#if CONFIG_MULTI_RES_ENCODING
+ if (parent_ref_valid && (parent_ref_frame == this_ref_frame) &&
+ dissim <= 2 &&
+ VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row),
+ abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= 4) {
+ d->bmi.mv.as_int = mvp_full.as_int;
+ mode_mv[NEWMV].as_int = mvp_full.as_int;
+
+ cpi->find_fractional_mv_step(
+ x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
+ &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
+ } else
+#endif
+ {
+ /* Get intersection of UMV window and valid MV window to
+ * reduce # of checks in diamond search. */
+ if (x->mv_col_min < col_min) x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max) x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min) x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max) x->mv_row_max = row_max;
+
+ further_steps =
+ (cpi->Speed >= 8)
+ ? 0
+ : (cpi->sf.max_step_search_steps - 1 - step_param);
+
+ if (cpi->sf.search_method == HEX) {
+#if CONFIG_MULTI_RES_ENCODING
+ /* TODO: In higher-res pick_inter_mode, step_param is used to
+ * modify hex search range. Here, set step_param to 0 not to
+ * change the behavior in lowest-resolution encoder.
+ * Will improve it later.
+ */
+ /* Set step_param to 0 to ensure large-range motion search
+ * when mv reuse if not valid (i.e. |parent_ref_valid| = 0),
+ * or if this candidate reference frame (|this_ref_frame|) is
+ * not equal to |parent_ref_frame|.
+ */
+ if (!parent_ref_valid || (parent_ref_frame != this_ref_frame))
+ step_param = 0;
+#endif
+ bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, step_param,
+ sadpb, &cpi->fn_ptr[BLOCK_16X16],
+ x->mvsadcost, &best_ref_mv);
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ } else {
+ bestsme = cpi->diamond_search_sad(
+ x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00,
+ &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+
+ /* Further step/diamond searches as necessary */
+ n = num00;
+ num00 = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00) {
+ num00--;
+ } else {
+ thissme = cpi->diamond_search_sad(
+ x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb,
+ &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ } else {
+ d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
+ }
+ }
+ }
+ }
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ if (bestsme < INT_MAX) {
+ cpi->find_fractional_mv_step(
+ x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
+ &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
+ }
+ }
+
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ // The clamp below is not necessary from the perspective
+ // of VP8 bitstream, but is added to improve ChromeCast
+ // mirroring's robustness. Please do not remove.
+ vp8_clamp_mv2(&mode_mv[this_mode], xd);
+ /* mv cost; */
+ rate2 +=
+ vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvcost, 128);
+ }
+ // fall through
+
+ case NEARESTMV:
+ case NEARMV:
+ if (mode_mv[this_mode].as_int == 0) continue;
+ // fall through
+
+ case ZEROMV:
+
+ /* Trap vectors that reach beyond the UMV borders
+ * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops
+ * through to this point because of the lack of break statements
+ * in the previous two cases.
+ */
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
+ ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
+ continue;
+ }
+
+ rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = mode_mv[this_mode].as_int;
+ this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x,
+ rd_adjustment);
+
+ break;
+ default: break;
+ }
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ /* Store for later use by denoiser. */
+ // Dont' denoise with GOLDEN OR ALTREF is they are old reference
+ // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
+ int skip_old_reference = ((this_ref_frame != LAST_FRAME) &&
+ (cpi->common.current_video_frame -
+ cpi->current_ref_frames[this_ref_frame] >
+ MAX_GF_ARF_DENOISE_RANGE))
+ ? 1
+ : 0;
+ if (this_mode == ZEROMV && sse < zero_mv_sse && !skip_old_reference) {
+ zero_mv_sse = sse;
+ x->best_zeromv_reference_frame =
+ x->e_mbd.mode_info_context->mbmi.ref_frame;
+ }
+
+ // Store the best NEWMV in x for later use in the denoiser.
+ if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse &&
+ !skip_old_reference) {
+ best_sse = sse;
+ x->best_sse_inter_mode = NEWMV;
+ x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
+ x->need_to_clamp_best_mvs =
+ x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
+ x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame;
+ }
+ }
+#endif
+
+ if (this_rd < best_rd || x->skip) {
+ /* Note index of best mode */
+ best_mode_index = mode_index;
+
+ *returnrate = rate2;
+ *returndistortion = distortion2;
+ best_rd_sse = sse;
+ best_rd = this_rd;
+ memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
+ sizeof(MB_MODE_INFO));
+
+ /* Testing this mode gave rise to an improvement in best error
+ * score. Lower threshold a bit for next time
+ */
+ x->rd_thresh_mult[mode_index] =
+ (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2))
+ ? x->rd_thresh_mult[mode_index] - 2
+ : MIN_THRESHMULT;
+ x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
+ }
+
+ /* If the mode did not help improve the best error case then raise the
+ * threshold for testing that mode next time around.
+ */
+ else {
+ x->rd_thresh_mult[mode_index] += 4;
+
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ }
+
+ x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
+ }
+
+ if (x->skip) break;
+ }
+
+ /* Reduce the activation RD thresholds for the best choice mode */
+ if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
+ (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
+ int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
+
+ x->rd_thresh_mult[best_mode_index] =
+ (x->rd_thresh_mult[best_mode_index] >=
+ (MIN_THRESHMULT + best_adjustment))
+ ? x->rd_thresh_mult[best_mode_index] - best_adjustment
+ : MIN_THRESHMULT;
+ x->rd_threshes[best_mode_index] =
+ (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
+ x->rd_thresh_mult[best_mode_index];
+ }
+
+ {
+ int this_rdbin = (*returndistortion >> 7);
+
+ if (this_rdbin >= 1024) {
+ this_rdbin = 1023;
+ }
+
+ x->error_bins[this_rdbin]++;
+ }
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ int block_index = mb_row * cpi->common.mb_cols + mb_col;
+ int reevaluate = 0;
+ int is_noisy = 0;
+ if (x->best_sse_inter_mode == DC_PRED) {
+ /* No best MV found. */
+ x->best_sse_inter_mode = best_mbmode.mode;
+ x->best_sse_mv = best_mbmode.mv;
+ x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
+ x->best_reference_frame = best_mbmode.ref_frame;
+ best_sse = best_rd_sse;
+ }
+ // For non-skin blocks that have selected ZEROMV for this current frame,
+ // and have been selecting ZEROMV_LAST (on the base layer frame) at
+ // least |x~20| consecutive past frames in a row, label the block for
+ // possible increase in denoising strength. We also condition this
+ // labeling on there being significant denoising in the scene
+ if (cpi->oxcf.noise_sensitivity == 4) {
+ if (cpi->denoiser.nmse_source_diff >
+ 70 * cpi->denoiser.threshold_aggressive_mode / 100) {
+ is_noisy = 1;
+ }
+ } else {
+ if (cpi->mse_source_denoised > 1000) is_noisy = 1;
+ }
+ x->increase_denoising = 0;
+ if (!x->is_skin && x->best_sse_inter_mode == ZEROMV &&
+ (x->best_reference_frame == LAST_FRAME ||
+ x->best_reference_frame == cpi->closest_reference_frame) &&
+ cpi->consec_zero_last[block_index] >= 20 && is_noisy) {
+ x->increase_denoising = 1;
+ }
+ x->denoise_zeromv = 0;
+ vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
+ recon_yoffset, recon_uvoffset, &cpi->common.lf_info,
+ mb_row, mb_col, block_index,
+ cpi->consec_zero_last_mvbias[block_index]);
+
+ // Reevaluate ZEROMV after denoising: for large noise content
+ // (i.e., cpi->mse_source_denoised is above threshold), do this for all
+ // blocks that did not pick ZEROMV as best mode but are using ZEROMV
+ // for denoising. Otherwise, always re-evaluate for blocks that picked
+ // INTRA mode as best mode.
+ // Avoid blocks that have been biased against ZERO_LAST
+ // (i.e., dot artifact candidate blocks).
+ reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) ||
+ (best_mbmode.mode != ZEROMV && x->denoise_zeromv &&
+ cpi->mse_source_denoised > 2000);
+ if (!dot_artifact_candidate && reevaluate &&
+ x->best_zeromv_reference_frame != INTRA_FRAME) {
+ int this_rd = 0;
+ int this_ref_frame = x->best_zeromv_reference_frame;
+ rd_adjustment = 100;
+ rate2 =
+ x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts);
+ distortion2 = 0;
+
+ /* set up the proper prediction buffers for the frame */
+ x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
+ x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
+ x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
+ x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
+
+ x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+ this_rd =
+ evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, rd_adjustment);
+
+ if (this_rd < best_rd) {
+ memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
+ sizeof(MB_MODE_INFO));
+ }
+ }
+ }
+#endif
+
+ if (cpi->is_src_frame_alt_ref &&
+ (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
+ x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
+ x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
+ (cpi->common.mb_no_coeff_skip);
+ x->e_mbd.mode_info_context->mbmi.partitioning = 0;
+
+ return;
+ }
+
+ /* set to the best mb mode, this copy can be skip if x->skip since it
+ * already has the right content */
+ if (!x->skip) {
+ memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
+ sizeof(MB_MODE_INFO));
+ }
+
+ if (best_mbmode.mode <= B_PRED) {
+ /* set mode_info_context->mbmi.uv_mode */
+ pick_intra_mbuv_mode(x);
+ }
+
+ if (sign_bias !=
+ cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
+ best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
+ }
+
+ update_mvcount(x, &best_ref_mv);
+}
+
+void vp8_pick_intra_mode(MACROBLOCK *x, int *rate) {
+ int error4x4, error16x16 = INT_MAX;
+ int rate_, best_rate = 0, distortion, best_sse;
+ MB_PREDICTION_MODE mode, best_mode = DC_PRED;
+ int this_rd;
+ unsigned int sse;
+ BLOCK *b = &x->block[0];
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+
+ pick_intra_mbuv_mode(x);
+
+ for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
+ xd->mode_info_context->mbmi.mode = mode;
+ vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
+ xd->dst.y_buffer - 1, xd->dst.y_stride,
+ xd->predictor, 16);
+ distortion = vpx_variance16x16(*(b->base_src), b->src_stride, xd->predictor,
+ 16, &sse);
+ rate_ = x->mbmode_cost[xd->frame_type][mode];
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate_, distortion);
+
+ if (error16x16 > this_rd) {
+ error16x16 = this_rd;
+ best_mode = mode;
+ best_sse = sse;
+ best_rate = rate_;
+ }
+ }
+ xd->mode_info_context->mbmi.mode = best_mode;
+
+ error4x4 = pick_intra4x4mby_modes(x, &rate_, &best_sse);
+ if (error4x4 < error16x16) {
+ xd->mode_info_context->mbmi.mode = B_PRED;
+ best_rate = rate_;
+ }
+
+ *rate = best_rate;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/pickinter.h b/media/libvpx/libvpx/vp8/encoder/pickinter.h
new file mode 100644
index 0000000000..392fb41593
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/pickinter.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_PICKINTER_H_
+#define VPX_VP8_ENCODER_PICKINTER_H_
+#include "vpx_config.h"
+#include "vp8/common/onyxc_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
+ int recon_uvoffset, int *returnrate,
+ int *returndistortion, int *returnintra,
+ int mb_row, int mb_col);
+extern void vp8_pick_intra_mode(MACROBLOCK *x, int *rate);
+
+extern int vp8_get_inter_mbpred_error(MACROBLOCK *mb,
+ const vp8_variance_fn_ptr_t *vfp,
+ unsigned int *sse, int_mv this_mv);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_PICKINTER_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/picklpf.c b/media/libvpx/libvpx/vp8/encoder/picklpf.c
new file mode 100644
index 0000000000..387ac9788b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/picklpf.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#include "vp8/common/onyxc_int.h"
+#include "onyx_int.h"
+#include "vp8/encoder/picklpf.h"
+#include "vp8/encoder/quantize.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp8/common/alloccommon.h"
+#include "vp8/common/loopfilter.h"
+#if VPX_ARCH_ARM
+#include "vpx_ports/arm.h"
+#endif
+
+extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest);
+
+static void yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
+ YV12_BUFFER_CONFIG *dst_ybc) {
+ unsigned char *src_y, *dst_y;
+ int yheight;
+ int ystride;
+ int yoffset;
+ int linestocopy;
+
+ yheight = src_ybc->y_height;
+ ystride = src_ybc->y_stride;
+
+ /* number of MB rows to use in partial filtering */
+ linestocopy = (yheight >> 4) / PARTIAL_FRAME_FRACTION;
+ linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */
+
+ /* Copy extra 4 so that full filter context is available if filtering done
+ * on the copied partial frame and not original. Partial filter does mb
+ * filtering for top row also, which can modify3 pixels above.
+ */
+ linestocopy += 4;
+ /* partial image starts at ~middle of frame (macroblock border)*/
+ yoffset = ystride * (((yheight >> 5) * 16) - 4);
+ src_y = src_ybc->y_buffer + yoffset;
+ dst_y = dst_ybc->y_buffer + yoffset;
+
+ memcpy(dst_y, src_y, ystride * linestocopy);
+}
+
+static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest) {
+ int i, j;
+ int Total = 0;
+ int srcoffset, dstoffset;
+ unsigned char *src = source->y_buffer;
+ unsigned char *dst = dest->y_buffer;
+
+ int linestocopy;
+
+ /* number of MB rows to use in partial filtering */
+ linestocopy = (source->y_height >> 4) / PARTIAL_FRAME_FRACTION;
+ linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */
+
+ /* partial image starts at ~middle of frame (macroblock border)*/
+ srcoffset = source->y_stride * ((dest->y_height >> 5) * 16);
+ dstoffset = dest->y_stride * ((dest->y_height >> 5) * 16);
+
+ src += srcoffset;
+ dst += dstoffset;
+
+ /* Loop through the Y plane raw and reconstruction data summing
+ * (square differences)
+ */
+ for (i = 0; i < linestocopy; i += 16) {
+ for (j = 0; j < source->y_width; j += 16) {
+ unsigned int sse;
+ Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
+ }
+
+ src += 16 * source->y_stride;
+ dst += 16 * dest->y_stride;
+ }
+
+ return Total;
+}
+
+/* Enforce a minimum filter level based upon baseline Q */
+static int get_min_filter_level(VP8_COMP *cpi, int base_qindex) {
+ int min_filter_level;
+
+ if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame &&
+ !cpi->common.refresh_alt_ref_frame) {
+ min_filter_level = 0;
+ } else {
+ if (base_qindex <= 6) {
+ min_filter_level = 0;
+ } else if (base_qindex <= 16) {
+ min_filter_level = 1;
+ } else {
+ min_filter_level = (base_qindex / 8);
+ }
+ }
+
+ return min_filter_level;
+}
+
+/* Enforce a maximum filter level based upon baseline Q */
+static int get_max_filter_level(VP8_COMP *cpi, int base_qindex) {
+ /* PGW August 2006: Highest filter values almost always a bad idea */
+
+ /* jbb chg: 20100118 - not so any more with this overquant stuff allow
+ * high values with lots of intra coming in.
+ */
+ int max_filter_level = MAX_LOOP_FILTER;
+ (void)base_qindex;
+
+ if (cpi->twopass.section_intra_rating > 8) {
+ max_filter_level = MAX_LOOP_FILTER * 3 / 4;
+ }
+
+ return max_filter_level;
+}
+
+void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ int best_err = 0;
+ int filt_err = 0;
+ int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
+ int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
+ int filt_val;
+ int best_filt_val;
+ YV12_BUFFER_CONFIG *saved_frame = cm->frame_to_show;
+
+ /* Replace unfiltered frame buffer with a new one */
+ cm->frame_to_show = &cpi->pick_lf_lvl_frame;
+
+ if (cm->frame_type == KEY_FRAME) {
+ cm->sharpness_level = 0;
+ } else {
+ cm->sharpness_level = cpi->oxcf.Sharpness;
+ }
+
+ if (cm->sharpness_level != cm->last_sharpness_level) {
+ vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
+ cm->last_sharpness_level = cm->sharpness_level;
+ }
+
+ /* Start the search at the previous frame filter level unless it is
+ * now out of range.
+ */
+ if (cm->filter_level < min_filter_level) {
+ cm->filter_level = min_filter_level;
+ } else if (cm->filter_level > max_filter_level) {
+ cm->filter_level = max_filter_level;
+ }
+
+ filt_val = cm->filter_level;
+ best_filt_val = filt_val;
+
+ /* Get the err using the previous frame's filter value. */
+
+ /* Copy the unfiltered / processed recon buffer to the new buffer */
+ yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
+ vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
+
+ best_err = calc_partial_ssl_err(sd, cm->frame_to_show);
+
+ filt_val -= 1 + (filt_val > 10);
+
+ /* Search lower filter levels */
+ while (filt_val >= min_filter_level) {
+ /* Apply the loop filter */
+ yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
+ vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
+
+ /* Get the err for filtered frame */
+ filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
+
+ /* Update the best case record or exit loop. */
+ if (filt_err < best_err) {
+ best_err = filt_err;
+ best_filt_val = filt_val;
+ } else {
+ break;
+ }
+
+ /* Adjust filter level */
+ filt_val -= 1 + (filt_val > 10);
+ }
+
+ /* Search up (note that we have already done filt_val = cm->filter_level) */
+ filt_val = cm->filter_level + 1 + (filt_val > 10);
+
+ if (best_filt_val == cm->filter_level) {
+ /* Resist raising filter level for very small gains */
+ best_err -= (best_err >> 10);
+
+ while (filt_val < max_filter_level) {
+ /* Apply the loop filter */
+ yv12_copy_partial_frame(saved_frame, cm->frame_to_show);
+
+ vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
+
+ /* Get the err for filtered frame */
+ filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
+
+ /* Update the best case record or exit loop. */
+ if (filt_err < best_err) {
+ /* Do not raise filter level if improvement is < 1 part
+ * in 4096
+ */
+ best_err = filt_err - (filt_err >> 10);
+
+ best_filt_val = filt_val;
+ } else {
+ break;
+ }
+
+ /* Adjust filter level */
+ filt_val += 1 + (filt_val > 10);
+ }
+ }
+
+ cm->filter_level = best_filt_val;
+
+ if (cm->filter_level < min_filter_level) cm->filter_level = min_filter_level;
+
+ if (cm->filter_level > max_filter_level) cm->filter_level = max_filter_level;
+
+ /* restore unfiltered frame pointer */
+ cm->frame_to_show = saved_frame;
+}
+
+/* Stub function for now Alt LF not used */
+void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val) {
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ (void)filt_val;
+
+ mbd->segment_feature_data[MB_LVL_ALT_LF][0] =
+ cpi->segment_feature_data[MB_LVL_ALT_LF][0];
+ mbd->segment_feature_data[MB_LVL_ALT_LF][1] =
+ cpi->segment_feature_data[MB_LVL_ALT_LF][1];
+ mbd->segment_feature_data[MB_LVL_ALT_LF][2] =
+ cpi->segment_feature_data[MB_LVL_ALT_LF][2];
+ mbd->segment_feature_data[MB_LVL_ALT_LF][3] =
+ cpi->segment_feature_data[MB_LVL_ALT_LF][3];
+}
+
+void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ int best_err = 0;
+ int filt_err = 0;
+ int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
+ int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
+
+ int filter_step;
+ int filt_high = 0;
+ int filt_mid;
+ int filt_low = 0;
+ int filt_best;
+ int filt_direction = 0;
+
+ /* Bias against raising loop filter and in favor of lowering it */
+ int Bias = 0;
+
+ int ss_err[MAX_LOOP_FILTER + 1];
+
+ YV12_BUFFER_CONFIG *saved_frame = cm->frame_to_show;
+
+ memset(ss_err, 0, sizeof(ss_err));
+
+ /* Replace unfiltered frame buffer with a new one */
+ cm->frame_to_show = &cpi->pick_lf_lvl_frame;
+
+ if (cm->frame_type == KEY_FRAME) {
+ cm->sharpness_level = 0;
+ } else {
+ cm->sharpness_level = cpi->oxcf.Sharpness;
+ }
+
+ /* Start the search at the previous frame filter level unless it is
+ * now out of range.
+ */
+ filt_mid = cm->filter_level;
+
+ if (filt_mid < min_filter_level) {
+ filt_mid = min_filter_level;
+ } else if (filt_mid > max_filter_level) {
+ filt_mid = max_filter_level;
+ }
+
+ /* Define the initial step size */
+ filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
+
+ /* Get baseline error score */
+
+ /* Copy the unfiltered / processed recon buffer to the new buffer */
+ vpx_yv12_copy_y(saved_frame, cm->frame_to_show);
+
+ vp8cx_set_alt_lf_level(cpi, filt_mid);
+ vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
+
+ best_err = vp8_calc_ss_err(sd, cm->frame_to_show);
+
+ ss_err[filt_mid] = best_err;
+
+ filt_best = filt_mid;
+
+ while (filter_step > 0) {
+ Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
+
+ if (cpi->twopass.section_intra_rating < 20) {
+ Bias = Bias * cpi->twopass.section_intra_rating / 20;
+ }
+
+ filt_high = ((filt_mid + filter_step) > max_filter_level)
+ ? max_filter_level
+ : (filt_mid + filter_step);
+ filt_low = ((filt_mid - filter_step) < min_filter_level)
+ ? min_filter_level
+ : (filt_mid - filter_step);
+
+ if ((filt_direction <= 0) && (filt_low != filt_mid)) {
+ if (ss_err[filt_low] == 0) {
+ /* Get Low filter error score */
+ vpx_yv12_copy_y(saved_frame, cm->frame_to_show);
+ vp8cx_set_alt_lf_level(cpi, filt_low);
+ vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
+
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
+ ss_err[filt_low] = filt_err;
+ } else {
+ filt_err = ss_err[filt_low];
+ }
+
+ /* If value is close to the best so far then bias towards a
+ * lower loop filter value.
+ */
+ if ((filt_err - Bias) < best_err) {
+ /* Was it actually better than the previous best? */
+ if (filt_err < best_err) best_err = filt_err;
+
+ filt_best = filt_low;
+ }
+ }
+
+ /* Now look at filt_high */
+ if ((filt_direction >= 0) && (filt_high != filt_mid)) {
+ if (ss_err[filt_high] == 0) {
+ vpx_yv12_copy_y(saved_frame, cm->frame_to_show);
+ vp8cx_set_alt_lf_level(cpi, filt_high);
+ vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
+
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
+ ss_err[filt_high] = filt_err;
+ } else {
+ filt_err = ss_err[filt_high];
+ }
+
+ /* Was it better than the previous best? */
+ if (filt_err < (best_err - Bias)) {
+ best_err = filt_err;
+ filt_best = filt_high;
+ }
+ }
+
+ /* Half the step distance if the best filter value was the same
+ * as last time
+ */
+ if (filt_best == filt_mid) {
+ filter_step = filter_step / 2;
+ filt_direction = 0;
+ } else {
+ filt_direction = (filt_best < filt_mid) ? -1 : 1;
+ filt_mid = filt_best;
+ }
+ }
+
+ cm->filter_level = filt_best;
+
+ /* restore unfiltered frame pointer */
+ cm->frame_to_show = saved_frame;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/picklpf.h b/media/libvpx/libvpx/vp8/encoder/picklpf.h
new file mode 100644
index 0000000000..03597e5427
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/picklpf.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_PICKLPF_H_
+#define VPX_VP8_ENCODER_PICKLPF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP8_COMP;
+struct yv12_buffer_config;
+
+void vp8cx_pick_filter_level_fast(struct yv12_buffer_config *sd,
+ struct VP8_COMP *cpi);
+void vp8cx_set_alt_lf_level(struct VP8_COMP *cpi, int filt_val);
+void vp8cx_pick_filter_level(struct yv12_buffer_config *sd, VP8_COMP *cpi);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VPX_VP8_ENCODER_PICKLPF_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/quantize.h b/media/libvpx/libvpx/vp8/encoder/quantize.h
new file mode 100644
index 0000000000..78746c0c20
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/quantize.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_QUANTIZE_H_
+#define VPX_VP8_ENCODER_QUANTIZE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP8_COMP;
+struct macroblock;
+extern void vp8_quantize_mb(struct macroblock *x);
+extern void vp8_quantize_mby(struct macroblock *x);
+extern void vp8_quantize_mbuv(struct macroblock *x);
+extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
+extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi);
+extern void vp8_update_zbin_extra(struct VP8_COMP *cpi, struct macroblock *x);
+extern void vp8cx_mb_init_quantizer(struct VP8_COMP *cpi, struct macroblock *x,
+ int ok_to_skip);
+extern void vp8cx_init_quantizer(struct VP8_COMP *cpi);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_QUANTIZE_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/ratectrl.c b/media/libvpx/libvpx/vp8/encoder/ratectrl.c
new file mode 100644
index 0000000000..9cd3963e22
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/ratectrl.c
@@ -0,0 +1,1589 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <limits.h>
+#include <assert.h>
+
+#include "math.h"
+#include "vp8/common/common.h"
+#include "ratectrl.h"
+#include "vp8/common/entropymode.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/systemdependent.h"
+#include "encodemv.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_ports/system_state.h"
+
+#define MIN_BPB_FACTOR 0.01
+#define MAX_BPB_FACTOR 50
+
+extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
+
+#ifdef MODE_STATS
+extern int y_modes[5];
+extern int uv_modes[4];
+extern int b_modes[10];
+
+extern int inter_y_modes[10];
+extern int inter_uv_modes[4];
+extern int inter_b_modes[10];
+#endif
+
+/* Bits Per MB at different Q (Multiplied by 512) */
+#define BPER_MB_NORMBITS 9
+
+/* Work in progress recalibration of baseline rate tables based on
+ * the assumption that bits per mb is inversely proportional to the
+ * quantizer value.
+ */
+const int vp8_bits_per_mb[2][QINDEX_RANGE] = {
+ /* Intra case 450000/Qintra */
+ {
+ 1125000, 900000, 750000, 642857, 562500, 500000, 450000, 450000, 409090,
+ 375000, 346153, 321428, 300000, 281250, 264705, 264705, 250000, 236842,
+ 225000, 225000, 214285, 214285, 204545, 204545, 195652, 195652, 187500,
+ 180000, 180000, 173076, 166666, 160714, 155172, 150000, 145161, 140625,
+ 136363, 132352, 128571, 125000, 121621, 121621, 118421, 115384, 112500,
+ 109756, 107142, 104651, 102272, 100000, 97826, 97826, 95744, 93750,
+ 91836, 90000, 88235, 86538, 84905, 83333, 81818, 80357, 78947,
+ 77586, 76271, 75000, 73770, 72580, 71428, 70312, 69230, 68181,
+ 67164, 66176, 65217, 64285, 63380, 62500, 61643, 60810, 60000,
+ 59210, 59210, 58441, 57692, 56962, 56250, 55555, 54878, 54216,
+ 53571, 52941, 52325, 51724, 51136, 50561, 49450, 48387, 47368,
+ 46875, 45918, 45000, 44554, 44117, 43269, 42452, 41666, 40909,
+ 40178, 39473, 38793, 38135, 36885, 36290, 35714, 35156, 34615,
+ 34090, 33582, 33088, 32608, 32142, 31468, 31034, 30405, 29801,
+ 29220, 28662,
+ },
+ /* Inter case 285000/Qinter */
+ {
+ 712500, 570000, 475000, 407142, 356250, 316666, 285000, 259090, 237500,
+ 219230, 203571, 190000, 178125, 167647, 158333, 150000, 142500, 135714,
+ 129545, 123913, 118750, 114000, 109615, 105555, 101785, 98275, 95000,
+ 91935, 89062, 86363, 83823, 81428, 79166, 77027, 75000, 73076,
+ 71250, 69512, 67857, 66279, 64772, 63333, 61956, 60638, 59375,
+ 58163, 57000, 55882, 54807, 53773, 52777, 51818, 50892, 50000,
+ 49137, 47500, 45967, 44531, 43181, 41911, 40714, 39583, 38513,
+ 37500, 36538, 35625, 34756, 33928, 33139, 32386, 31666, 30978,
+ 30319, 29687, 29081, 28500, 27941, 27403, 26886, 26388, 25909,
+ 25446, 25000, 24568, 23949, 23360, 22800, 22265, 21755, 21268,
+ 20802, 20357, 19930, 19520, 19127, 18750, 18387, 18037, 17701,
+ 17378, 17065, 16764, 16473, 16101, 15745, 15405, 15079, 14766,
+ 14467, 14179, 13902, 13636, 13380, 13133, 12895, 12666, 12445,
+ 12179, 11924, 11632, 11445, 11220, 11003, 10795, 10594, 10401,
+ 10215, 10035,
+ }
+};
+
+static const int kf_boost_qadjustment[QINDEX_RANGE] = {
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 200, 201,
+ 201, 202, 203, 203, 203, 204, 204, 205, 205, 206, 206, 207, 207, 208, 208,
+ 209, 209, 210, 210, 211, 211, 212, 212, 213, 213, 214, 214, 215, 215, 216,
+ 216, 217, 217, 218, 218, 219, 219, 220, 220, 220, 220, 220, 220, 220, 220,
+ 220, 220, 220, 220, 220, 220, 220, 220,
+};
+
+/* #define GFQ_ADJUSTMENT (Q+100) */
+#define GFQ_ADJUSTMENT vp8_gf_boost_qadjustment[Q]
+const int vp8_gf_boost_qadjustment[QINDEX_RANGE] = {
+ 80, 82, 84, 86, 88, 90, 92, 94, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 184, 185, 185, 186, 186, 187, 187, 188,
+ 188, 189, 189, 190, 190, 191, 191, 192, 192, 193, 193, 194, 194, 194, 194,
+ 195, 195, 196, 196, 197, 197, 198, 198
+};
+
+/*
+const int vp8_gf_boost_qadjustment[QINDEX_RANGE] =
+{
+ 100,101,102,103,104,105,105,106,
+ 106,107,107,108,109,109,110,111,
+ 112,113,114,115,116,117,118,119,
+ 120,121,122,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,
+ 136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,
+ 152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,
+ 168,169,170,170,171,171,172,172,
+ 173,173,173,174,174,174,175,175,
+ 175,176,176,176,177,177,177,177,
+ 178,178,179,179,180,180,181,181,
+ 182,182,183,183,184,184,185,185,
+ 186,186,187,187,188,188,189,189,
+ 190,190,191,191,192,192,193,193,
+};
+*/
+
+static const int kf_gf_boost_qlimits[QINDEX_RANGE] = {
+ 150, 155, 160, 165, 170, 175, 180, 185, 190, 195, 200, 205, 210, 215, 220,
+ 225, 230, 235, 240, 245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 295,
+ 300, 305, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, 420, 430,
+ 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580,
+ 590, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
+ 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
+ 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
+ 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
+ 600, 600, 600, 600, 600, 600, 600, 600,
+};
+
+static const int gf_adjust_table[101] = {
+ 100, 115, 130, 145, 160, 175, 190, 200, 210, 220, 230, 240, 260, 270, 280,
+ 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+};
+
+static const int gf_intra_usage_adjustment[20] = {
+ 125, 120, 115, 110, 105, 100, 95, 85, 80, 75,
+ 70, 65, 60, 55, 50, 50, 50, 50, 50, 50,
+};
+
+static const int gf_interval_table[101] = {
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+};
+
+static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = { 1, 2, 3,
+ 4, 5 };
+
+void vp8_save_coding_context(VP8_COMP *cpi) {
+ CODING_CONTEXT *const cc = &cpi->coding_context;
+
+ /* Stores a snapshot of key state variables which can subsequently be
+ * restored with a call to vp8_restore_coding_context. These functions are
+ * intended for use in a re-code loop in vp8_compress_frame where the
+ * quantizer value is adjusted between loop iterations.
+ */
+
+ cc->frames_since_key = cpi->frames_since_key;
+ cc->filter_level = cpi->common.filter_level;
+ cc->frames_till_gf_update_due = cpi->frames_till_gf_update_due;
+ cc->frames_since_golden = cpi->frames_since_golden;
+
+ vp8_copy(cc->mvc, cpi->common.fc.mvc);
+ vp8_copy(cc->mvcosts, cpi->rd_costs.mvcosts);
+
+ vp8_copy(cc->ymode_prob, cpi->common.fc.ymode_prob);
+ vp8_copy(cc->uv_mode_prob, cpi->common.fc.uv_mode_prob);
+
+ vp8_copy(cc->ymode_count, cpi->mb.ymode_count);
+ vp8_copy(cc->uv_mode_count, cpi->mb.uv_mode_count);
+
+/* Stats */
+#ifdef MODE_STATS
+ vp8_copy(cc->y_modes, y_modes);
+ vp8_copy(cc->uv_modes, uv_modes);
+ vp8_copy(cc->b_modes, b_modes);
+ vp8_copy(cc->inter_y_modes, inter_y_modes);
+ vp8_copy(cc->inter_uv_modes, inter_uv_modes);
+ vp8_copy(cc->inter_b_modes, inter_b_modes);
+#endif
+
+ cc->this_frame_percent_intra = cpi->this_frame_percent_intra;
+}
+
+void vp8_restore_coding_context(VP8_COMP *cpi) {
+ CODING_CONTEXT *const cc = &cpi->coding_context;
+
+ /* Restore key state variables to the snapshot state stored in the
+ * previous call to vp8_save_coding_context.
+ */
+
+ cpi->frames_since_key = cc->frames_since_key;
+ cpi->common.filter_level = cc->filter_level;
+ cpi->frames_till_gf_update_due = cc->frames_till_gf_update_due;
+ cpi->frames_since_golden = cc->frames_since_golden;
+
+ vp8_copy(cpi->common.fc.mvc, cc->mvc);
+
+ vp8_copy(cpi->rd_costs.mvcosts, cc->mvcosts);
+
+ vp8_copy(cpi->common.fc.ymode_prob, cc->ymode_prob);
+ vp8_copy(cpi->common.fc.uv_mode_prob, cc->uv_mode_prob);
+
+ vp8_copy(cpi->mb.ymode_count, cc->ymode_count);
+ vp8_copy(cpi->mb.uv_mode_count, cc->uv_mode_count);
+
+/* Stats */
+#ifdef MODE_STATS
+ vp8_copy(y_modes, cc->y_modes);
+ vp8_copy(uv_modes, cc->uv_modes);
+ vp8_copy(b_modes, cc->b_modes);
+ vp8_copy(inter_y_modes, cc->inter_y_modes);
+ vp8_copy(inter_uv_modes, cc->inter_uv_modes);
+ vp8_copy(inter_b_modes, cc->inter_b_modes);
+#endif
+
+ cpi->this_frame_percent_intra = cc->this_frame_percent_intra;
+}
+
+void vp8_setup_key_frame(VP8_COMP *cpi) {
+ /* Setup for Key frame: */
+
+ vp8_default_coef_probs(&cpi->common);
+
+ memcpy(cpi->common.fc.mvc, vp8_default_mv_context,
+ sizeof(vp8_default_mv_context));
+ {
+ int flag[2] = { 1, 1 };
+ vp8_build_component_cost_table(
+ cpi->mb.mvcost, (const MV_CONTEXT *)cpi->common.fc.mvc, flag);
+ }
+
+ /* Make sure we initialize separate contexts for altref,gold, and normal.
+ * TODO shouldn't need 3 different copies of structure to do this!
+ */
+ memcpy(&cpi->lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
+ memcpy(&cpi->lfc_g, &cpi->common.fc, sizeof(cpi->common.fc));
+ memcpy(&cpi->lfc_n, &cpi->common.fc, sizeof(cpi->common.fc));
+
+ cpi->common.filter_level = cpi->common.base_qindex * 3 / 8;
+
+ /* Provisional interval before next GF */
+ if (cpi->auto_gold) {
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ } else {
+ cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
+ }
+
+ cpi->common.refresh_golden_frame = 1;
+ cpi->common.refresh_alt_ref_frame = 1;
+}
+
+static int estimate_bits_at_q(int frame_kind, int Q, int MBs,
+ double correction_factor) {
+ int Bpm = (int)(.5 + correction_factor * vp8_bits_per_mb[frame_kind][Q]);
+
+ /* Attempt to retain reasonable accuracy without overflow. The cutoff is
+ * chosen such that the maximum product of Bpm and MBs fits 31 bits. The
+ * largest Bpm takes 20 bits.
+ */
+ if (MBs > (1 << 11)) {
+ return (Bpm >> BPER_MB_NORMBITS) * MBs;
+ } else {
+ return (Bpm * MBs) >> BPER_MB_NORMBITS;
+ }
+}
+
+static void calc_iframe_target_size(VP8_COMP *cpi) {
+ /* boost defaults to half second */
+ int kf_boost;
+ uint64_t target;
+
+ /* Clear down mmx registers to allow floating point in what follows */
+ vpx_clear_system_state();
+
+ if (cpi->oxcf.fixed_q >= 0) {
+ int Q = cpi->oxcf.key_q;
+
+ target = estimate_bits_at_q(INTRA_FRAME, Q, cpi->common.MBs,
+ cpi->key_frame_rate_correction_factor);
+ } else if (cpi->pass == 2) {
+ /* New Two pass RC */
+ target = cpi->per_frame_bandwidth;
+ }
+ /* First Frame is a special case */
+ else if (cpi->common.current_video_frame == 0) {
+ /* 1 Pass there is no information on which to base size so use
+ * bandwidth per second * fraction of the initial buffer
+ * level
+ */
+ target = (uint64_t)cpi->oxcf.starting_buffer_level / 2;
+
+ if (target > cpi->oxcf.target_bandwidth * 3 / 2) {
+ target = cpi->oxcf.target_bandwidth * 3 / 2;
+ }
+ } else {
+ /* if this keyframe was forced, use a more recent Q estimate */
+ int Q = (cpi->common.frame_flags & FRAMEFLAGS_KEY) ? cpi->avg_frame_qindex
+ : cpi->ni_av_qi;
+
+ int initial_boost = 32; /* |3.0 * per_frame_bandwidth| */
+ /* Boost depends somewhat on frame rate: only used for 1 layer case. */
+ if (cpi->oxcf.number_of_layers == 1) {
+ kf_boost =
+ VPXMAX(initial_boost, (int)round(2 * cpi->output_framerate - 16));
+ } else {
+ /* Initial factor: set target size to: |3.0 * per_frame_bandwidth|. */
+ kf_boost = initial_boost;
+ }
+
+ /* adjustment up based on q: this factor ranges from ~1.2 to 2.2. */
+ kf_boost = kf_boost * kf_boost_qadjustment[Q] / 100;
+
+ /* frame separation adjustment ( down) */
+ if (cpi->frames_since_key < cpi->output_framerate / 2) {
+ kf_boost =
+ (int)(kf_boost * cpi->frames_since_key / (cpi->output_framerate / 2));
+ }
+
+ /* Minimal target size is |2* per_frame_bandwidth|. */
+ if (kf_boost < 16) kf_boost = 16;
+
+ target = ((16 + kf_boost) * cpi->per_frame_bandwidth) >> 4;
+ }
+
+ if (cpi->oxcf.rc_max_intra_bitrate_pct) {
+ unsigned int max_rate;
+ // This product may overflow unsigned int
+ uint64_t product = cpi->per_frame_bandwidth;
+ product *= cpi->oxcf.rc_max_intra_bitrate_pct;
+ product /= 100;
+ max_rate = (unsigned int)VPXMIN(INT_MAX, product);
+
+ if (target > max_rate) target = max_rate;
+ }
+
+ cpi->this_frame_target = (int)target;
+
+ /* TODO: if we separate rate targeting from Q targeting, move this.
+ * Reset the active worst quality to the baseline value for key frames.
+ */
+ if (cpi->pass != 2) cpi->active_worst_quality = cpi->worst_quality;
+
+#if 0
+ {
+ FILE *f;
+
+ f = fopen("kf_boost.stt", "a");
+ fprintf(f, " %8u %10d %10d %10d\n",
+ cpi->common.current_video_frame, cpi->gfu_boost, cpi->baseline_gf_interval, cpi->source_alt_ref_pending);
+
+ fclose(f);
+ }
+#endif
+}
+
+/* Do the best we can to define the parameters for the next GF based on what
+ * information we have available.
+ */
+static void calc_gf_params(VP8_COMP *cpi) {
+ int Q =
+ (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
+ int Boost = 0;
+
+ int gf_frame_useage = 0; /* Golden frame useage since last GF */
+ int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] +
+ cpi->recent_ref_frame_usage[LAST_FRAME] +
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
+ cpi->recent_ref_frame_usage[ALTREF_FRAME];
+
+ int pct_gf_active = (100 * cpi->gf_active_count) /
+ (cpi->common.mb_rows * cpi->common.mb_cols);
+
+ if (tot_mbs) {
+ gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
+ cpi->recent_ref_frame_usage[ALTREF_FRAME]) *
+ 100 / tot_mbs;
+ }
+
+ if (pct_gf_active > gf_frame_useage) gf_frame_useage = pct_gf_active;
+
+ /* Not two pass */
+ if (cpi->pass != 2) {
+ /* Single Pass lagged mode: TBD */
+ if (0) {
+ }
+
+ /* Single Pass compression: Has to use current and historical data */
+ else {
+#if 0
+ /* Experimental code */
+ int index = cpi->one_pass_frame_index;
+ int frames_to_scan = (cpi->max_gf_interval <= MAX_LAG_BUFFERS) ? cpi->max_gf_interval : MAX_LAG_BUFFERS;
+
+ /* ************** Experimental code - incomplete */
+ /*
+ double decay_val = 1.0;
+ double IIAccumulator = 0.0;
+ double last_iiaccumulator = 0.0;
+ double IIRatio;
+
+ cpi->one_pass_frame_index = cpi->common.current_video_frame%MAX_LAG_BUFFERS;
+
+ for ( i = 0; i < (frames_to_scan - 1); i++ )
+ {
+ if ( index < 0 )
+ index = MAX_LAG_BUFFERS;
+ index --;
+
+ if ( cpi->one_pass_frame_stats[index].frame_coded_error > 0.0 )
+ {
+ IIRatio = cpi->one_pass_frame_stats[index].frame_intra_error / cpi->one_pass_frame_stats[index].frame_coded_error;
+
+ if ( IIRatio > 30.0 )
+ IIRatio = 30.0;
+ }
+ else
+ IIRatio = 30.0;
+
+ IIAccumulator += IIRatio * decay_val;
+
+ decay_val = decay_val * cpi->one_pass_frame_stats[index].frame_pcnt_inter;
+
+ if ( (i > MIN_GF_INTERVAL) &&
+ ((IIAccumulator - last_iiaccumulator) < 2.0) )
+ {
+ break;
+ }
+ last_iiaccumulator = IIAccumulator;
+ }
+
+ Boost = IIAccumulator*100.0/16.0;
+ cpi->baseline_gf_interval = i;
+
+ */
+#else
+
+ /*************************************************************/
+ /* OLD code */
+
+ /* Adjust boost based upon ambient Q */
+ Boost = GFQ_ADJUSTMENT;
+
+ /* Adjust based upon most recently measure intra useage */
+ Boost = Boost *
+ gf_intra_usage_adjustment[(cpi->this_frame_percent_intra < 15)
+ ? cpi->this_frame_percent_intra
+ : 14] /
+ 100;
+
+ /* Adjust gf boost based upon GF usage since last GF */
+ Boost = Boost * gf_adjust_table[gf_frame_useage] / 100;
+#endif
+ }
+
+ /* golden frame boost without recode loop often goes awry. be
+ * safe by keeping numbers down.
+ */
+ if (!cpi->sf.recode_loop) {
+ if (cpi->compressor_speed == 2) Boost = Boost / 2;
+ }
+
+ /* Apply an upper limit based on Q for 1 pass encodes */
+ if (Boost > kf_gf_boost_qlimits[Q] && (cpi->pass == 0)) {
+ Boost = kf_gf_boost_qlimits[Q];
+
+ /* Apply lower limits to boost. */
+ } else if (Boost < 110) {
+ Boost = 110;
+ }
+
+ /* Note the boost used */
+ cpi->last_boost = Boost;
+ }
+
+ /* Estimate next interval
+ * This is updated once the real frame size/boost is known.
+ */
+ if (cpi->oxcf.fixed_q == -1) {
+ if (cpi->pass == 2) { /* 2 Pass */
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ } else { /* 1 Pass */
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+
+ if (cpi->last_boost > 750) cpi->frames_till_gf_update_due++;
+
+ if (cpi->last_boost > 1000) cpi->frames_till_gf_update_due++;
+
+ if (cpi->last_boost > 1250) cpi->frames_till_gf_update_due++;
+
+ if (cpi->last_boost >= 1500) cpi->frames_till_gf_update_due++;
+
+ if (gf_interval_table[gf_frame_useage] > cpi->frames_till_gf_update_due) {
+ cpi->frames_till_gf_update_due = gf_interval_table[gf_frame_useage];
+ }
+
+ if (cpi->frames_till_gf_update_due > cpi->max_gf_interval) {
+ cpi->frames_till_gf_update_due = cpi->max_gf_interval;
+ }
+ }
+ } else {
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ }
+
+ /* ARF on or off */
+ if (cpi->pass != 2) {
+ /* For now Alt ref is not allowed except in 2 pass modes. */
+ cpi->source_alt_ref_pending = 0;
+
+ /*if ( cpi->oxcf.fixed_q == -1)
+ {
+ if ( cpi->oxcf.play_alternate && (cpi->last_boost > (100 +
+ (AF_THRESH*cpi->frames_till_gf_update_due)) ) )
+ cpi->source_alt_ref_pending = 1;
+ else
+ cpi->source_alt_ref_pending = 0;
+ }*/
+ }
+}
+
+static void calc_pframe_target_size(VP8_COMP *cpi) {
+ int min_frame_target;
+ int old_per_frame_bandwidth = cpi->per_frame_bandwidth;
+
+ if (cpi->current_layer > 0) {
+ cpi->per_frame_bandwidth =
+ cpi->layer_context[cpi->current_layer].avg_frame_size_for_layer;
+ }
+
+ min_frame_target = 0;
+
+ if (cpi->pass == 2) {
+ min_frame_target = cpi->min_frame_bandwidth;
+
+ if (min_frame_target < (cpi->av_per_frame_bandwidth >> 5)) {
+ min_frame_target = cpi->av_per_frame_bandwidth >> 5;
+ }
+ } else if (min_frame_target < cpi->per_frame_bandwidth / 4) {
+ min_frame_target = cpi->per_frame_bandwidth / 4;
+ }
+
+ /* Special alt reference frame case */
+ if ((cpi->common.refresh_alt_ref_frame) &&
+ (cpi->oxcf.number_of_layers == 1)) {
+ if (cpi->pass == 2) {
+ /* Per frame bit target for the alt ref frame */
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ }
+
+ /* One Pass ??? TBD */
+ }
+
+ /* Normal frames (gf,and inter) */
+ else {
+ /* 2 pass */
+ if (cpi->pass == 2) {
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ }
+ /* 1 pass */
+ else {
+ int Adjustment;
+ /* Make rate adjustment to recover bits spent in key frame
+ * Test to see if the key frame inter data rate correction
+ * should still be in force
+ */
+ if (cpi->kf_overspend_bits > 0) {
+ Adjustment = (cpi->kf_bitrate_adjustment <= cpi->kf_overspend_bits)
+ ? cpi->kf_bitrate_adjustment
+ : cpi->kf_overspend_bits;
+
+ if (Adjustment > (cpi->per_frame_bandwidth - min_frame_target)) {
+ Adjustment = (cpi->per_frame_bandwidth - min_frame_target);
+ }
+
+ cpi->kf_overspend_bits -= Adjustment;
+
+ /* Calculate an inter frame bandwidth target for the next
+ * few frames designed to recover any extra bits spent on
+ * the key frame.
+ */
+ cpi->this_frame_target = cpi->per_frame_bandwidth - Adjustment;
+
+ if (cpi->this_frame_target < min_frame_target) {
+ cpi->this_frame_target = min_frame_target;
+ }
+ } else {
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ }
+
+ /* If appropriate make an adjustment to recover bits spent on a
+ * recent GF
+ */
+ if ((cpi->gf_overspend_bits > 0) &&
+ (cpi->this_frame_target > min_frame_target)) {
+ Adjustment = (cpi->non_gf_bitrate_adjustment <= cpi->gf_overspend_bits)
+ ? cpi->non_gf_bitrate_adjustment
+ : cpi->gf_overspend_bits;
+
+ if (Adjustment > (cpi->this_frame_target - min_frame_target)) {
+ Adjustment = (cpi->this_frame_target - min_frame_target);
+ }
+
+ cpi->gf_overspend_bits -= Adjustment;
+ cpi->this_frame_target -= Adjustment;
+ }
+
+ /* Apply small + and - boosts for non gf frames */
+ if ((cpi->last_boost > 150) && (cpi->frames_till_gf_update_due > 0) &&
+ (cpi->current_gf_interval >= (MIN_GF_INTERVAL << 1))) {
+ /* % Adjustment limited to the range 1% to 10% */
+ Adjustment = (cpi->last_boost - 100) >> 5;
+
+ if (Adjustment < 1) {
+ Adjustment = 1;
+ } else if (Adjustment > 10) {
+ Adjustment = 10;
+ }
+
+ /* Convert to bits */
+ Adjustment = (cpi->this_frame_target * Adjustment) / 100;
+
+ if (Adjustment > (cpi->this_frame_target - min_frame_target)) {
+ Adjustment = (cpi->this_frame_target - min_frame_target);
+ }
+
+ if (cpi->frames_since_golden == (cpi->current_gf_interval >> 1)) {
+ Adjustment = (cpi->current_gf_interval - 1) * Adjustment;
+ // Limit adjustment to 10% of current target.
+ if (Adjustment > (10 * cpi->this_frame_target) / 100) {
+ Adjustment = (10 * cpi->this_frame_target) / 100;
+ }
+ cpi->this_frame_target += Adjustment;
+ } else {
+ cpi->this_frame_target -= Adjustment;
+ }
+ }
+ }
+ }
+
+ /* Sanity check that the total sum of adjustments is not above the
+ * maximum allowed That is that having allowed for KF and GF penalties
+ * we have not pushed the current interframe target to low. If the
+ * adjustment we apply here is not capable of recovering all the extra
+ * bits we have spent in the KF or GF then the remainder will have to
+ * be recovered over a longer time span via other buffer / rate control
+ * mechanisms.
+ */
+ if (cpi->this_frame_target < min_frame_target) {
+ cpi->this_frame_target = min_frame_target;
+ }
+
+ if (!cpi->common.refresh_alt_ref_frame) {
+ /* Note the baseline target data rate for this inter frame. */
+ cpi->inter_frame_target = cpi->this_frame_target;
+ }
+
+ /* One Pass specific code */
+ if (cpi->pass == 0) {
+ /* Adapt target frame size with respect to any buffering constraints: */
+ if (cpi->buffered_mode) {
+ int one_percent_bits = (int)(1 + cpi->oxcf.optimal_buffer_level / 100);
+
+ if ((cpi->buffer_level < cpi->oxcf.optimal_buffer_level) ||
+ (cpi->bits_off_target < cpi->oxcf.optimal_buffer_level)) {
+ int percent_low = 0;
+
+ /* Decide whether or not we need to adjust the frame data
+ * rate target.
+ *
+ * If we are are below the optimal buffer fullness level
+ * and adherence to buffering constraints is important to
+ * the end usage then adjust the per frame target.
+ */
+ if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
+ (cpi->buffer_level < cpi->oxcf.optimal_buffer_level)) {
+ percent_low =
+ (int)((cpi->oxcf.optimal_buffer_level - cpi->buffer_level) /
+ one_percent_bits);
+ }
+ /* Are we overshooting the long term clip data rate... */
+ else if (cpi->bits_off_target < 0) {
+ /* Adjust per frame data target downwards to compensate. */
+ percent_low =
+ (int)(100 * -cpi->bits_off_target / (cpi->total_byte_count * 8));
+ }
+
+ if (percent_low > cpi->oxcf.under_shoot_pct) {
+ percent_low = cpi->oxcf.under_shoot_pct;
+ } else if (percent_low < 0) {
+ percent_low = 0;
+ }
+
+ /* lower the target bandwidth for this frame. */
+ cpi->this_frame_target -= (cpi->this_frame_target * percent_low) / 200;
+
+ /* Are we using allowing control of active_worst_allowed_q
+ * according to buffer level.
+ */
+ if (cpi->auto_worst_q && cpi->ni_frames > 150) {
+ int64_t critical_buffer_level;
+
+ /* For streaming applications the most important factor is
+ * cpi->buffer_level as this takes into account the
+ * specified short term buffering constraints. However,
+ * hitting the long term clip data rate target is also
+ * important.
+ */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ /* Take the smaller of cpi->buffer_level and
+ * cpi->bits_off_target
+ */
+ critical_buffer_level = (cpi->buffer_level < cpi->bits_off_target)
+ ? cpi->buffer_level
+ : cpi->bits_off_target;
+ }
+ /* For local file playback short term buffering constraints
+ * are less of an issue
+ */
+ else {
+ /* Consider only how we are doing for the clip as a
+ * whole
+ */
+ critical_buffer_level = cpi->bits_off_target;
+ }
+
+ /* Set the active worst quality based upon the selected
+ * buffer fullness number.
+ */
+ if (critical_buffer_level < cpi->oxcf.optimal_buffer_level) {
+ if (critical_buffer_level > (cpi->oxcf.optimal_buffer_level >> 2)) {
+ int64_t qadjustment_range = cpi->worst_quality - cpi->ni_av_qi;
+ int64_t above_base = (critical_buffer_level -
+ (cpi->oxcf.optimal_buffer_level >> 2));
+
+ /* Step active worst quality down from
+ * cpi->ni_av_qi when (critical_buffer_level ==
+ * cpi->optimal_buffer_level) to
+ * cpi->worst_quality when
+ * (critical_buffer_level ==
+ * cpi->optimal_buffer_level >> 2)
+ */
+ cpi->active_worst_quality =
+ cpi->worst_quality -
+ (int)((qadjustment_range * above_base) /
+ (cpi->oxcf.optimal_buffer_level * 3 >> 2));
+ } else {
+ cpi->active_worst_quality = cpi->worst_quality;
+ }
+ } else {
+ cpi->active_worst_quality = cpi->ni_av_qi;
+ }
+ } else {
+ cpi->active_worst_quality = cpi->worst_quality;
+ }
+ } else {
+ int percent_high = 0;
+ int64_t target = cpi->this_frame_target;
+
+ if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
+ (cpi->buffer_level > cpi->oxcf.optimal_buffer_level)) {
+ percent_high =
+ (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
+ one_percent_bits);
+ } else if (cpi->bits_off_target > cpi->oxcf.optimal_buffer_level) {
+ percent_high =
+ (int)((100 * cpi->bits_off_target) / (cpi->total_byte_count * 8));
+ }
+
+ if (percent_high > cpi->oxcf.over_shoot_pct) {
+ percent_high = cpi->oxcf.over_shoot_pct;
+ } else if (percent_high < 0) {
+ percent_high = 0;
+ }
+
+ target += (target * percent_high) / 200;
+ target = VPXMIN(target, INT_MAX);
+ cpi->this_frame_target = (int)target;
+
+ /* Are we allowing control of active_worst_allowed_q according
+ * to buffer level.
+ */
+ if (cpi->auto_worst_q && cpi->ni_frames > 150) {
+ /* When using the relaxed buffer model stick to the
+ * user specified value
+ */
+ cpi->active_worst_quality = cpi->ni_av_qi;
+ } else {
+ cpi->active_worst_quality = cpi->worst_quality;
+ }
+ }
+
+ /* Set active_best_quality to prevent quality rising too high */
+ cpi->active_best_quality = cpi->best_quality;
+
+ /* Worst quality obviously must not be better than best quality */
+ if (cpi->active_worst_quality <= cpi->active_best_quality) {
+ cpi->active_worst_quality = cpi->active_best_quality + 1;
+ }
+
+ if (cpi->active_worst_quality > 127) cpi->active_worst_quality = 127;
+ }
+ /* Unbuffered mode (eg. video conferencing) */
+ else {
+ /* Set the active worst quality */
+ cpi->active_worst_quality = cpi->worst_quality;
+ }
+
+ /* Special trap for constrained quality mode
+ * "active_worst_quality" may never drop below cq level
+ * for any frame type.
+ */
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY &&
+ cpi->active_worst_quality < cpi->cq_target_quality) {
+ cpi->active_worst_quality = cpi->cq_target_quality;
+ }
+ }
+
+ /* Test to see if we have to drop a frame
+ * The auto-drop frame code is only used in buffered mode.
+ * In unbufferd mode (eg vide conferencing) the descision to
+ * code or drop a frame is made outside the codec in response to real
+ * world comms or buffer considerations.
+ */
+ if (cpi->drop_frames_allowed &&
+ (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
+ ((cpi->common.frame_type != KEY_FRAME))) {
+ /* Check for a buffer underun-crisis in which case we have to drop
+ * a frame
+ */
+ if ((cpi->buffer_level < 0)) {
+#if 0
+ FILE *f = fopen("dec.stt", "a");
+ fprintf(f, "%10d %10d %10d %10d ***** BUFFER EMPTY\n",
+ (int) cpi->common.current_video_frame,
+ cpi->decimation_factor, cpi->common.horiz_scale,
+ (cpi->buffer_level * 100) / cpi->oxcf.optimal_buffer_level);
+ fclose(f);
+#endif
+ cpi->drop_frame = 1;
+
+ /* Update the buffer level variable. */
+ cpi->bits_off_target += cpi->av_per_frame_bandwidth;
+ if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
+ cpi->bits_off_target = (int)cpi->oxcf.maximum_buffer_size;
+ }
+ cpi->buffer_level = cpi->bits_off_target;
+
+ if (cpi->oxcf.number_of_layers > 1) {
+ unsigned int i;
+
+ // Propagate bits saved by dropping the frame to higher layers.
+ for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
+ if (lc->bits_off_target > lc->maximum_buffer_size) {
+ lc->bits_off_target = lc->maximum_buffer_size;
+ }
+ lc->buffer_level = lc->bits_off_target;
+ }
+ }
+ }
+ }
+
+ /* Adjust target frame size for Golden Frames: */
+ if (cpi->oxcf.error_resilient_mode == 0 &&
+ (cpi->frames_till_gf_update_due == 0) && !cpi->drop_frame) {
+ if (!cpi->gf_update_onepass_cbr) {
+ int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME]
+ : cpi->oxcf.fixed_q;
+
+ int gf_frame_useage = 0; /* Golden frame useage since last GF */
+ int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] +
+ cpi->recent_ref_frame_usage[LAST_FRAME] +
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
+ cpi->recent_ref_frame_usage[ALTREF_FRAME];
+
+ int pct_gf_active = (100 * cpi->gf_active_count) /
+ (cpi->common.mb_rows * cpi->common.mb_cols);
+
+ if (tot_mbs) {
+ gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
+ cpi->recent_ref_frame_usage[ALTREF_FRAME]) *
+ 100 / tot_mbs;
+ }
+
+ if (pct_gf_active > gf_frame_useage) gf_frame_useage = pct_gf_active;
+
+ /* Is a fixed manual GF frequency being used */
+ if (cpi->auto_gold) {
+ /* For one pass throw a GF if recent frame intra useage is
+ * low or the GF useage is high
+ */
+ if ((cpi->pass == 0) &&
+ (cpi->this_frame_percent_intra < 15 || gf_frame_useage >= 5)) {
+ cpi->common.refresh_golden_frame = 1;
+
+ /* Two pass GF descision */
+ } else if (cpi->pass == 2) {
+ cpi->common.refresh_golden_frame = 1;
+ }
+ }
+
+#if 0
+
+ /* Debug stats */
+ if (0) {
+ FILE *f;
+
+ f = fopen("gf_useaget.stt", "a");
+ fprintf(f, " %8ld %10ld %10ld %10ld %10ld\n",
+ cpi->common.current_video_frame, cpi->gfu_boost,
+ GFQ_ADJUSTMENT, cpi->gfu_boost, gf_frame_useage);
+ fclose(f);
+ }
+
+#endif
+
+ if (cpi->common.refresh_golden_frame == 1) {
+#if 0
+
+ if (0) {
+ FILE *f;
+
+ f = fopen("GFexit.stt", "a");
+ fprintf(f, "%8ld GF coded\n", cpi->common.current_video_frame);
+ fclose(f);
+ }
+
+#endif
+
+ if (cpi->auto_adjust_gold_quantizer) {
+ calc_gf_params(cpi);
+ }
+
+ /* If we are using alternate ref instead of gf then do not apply the
+ * boost It will instead be applied to the altref update Jims
+ * modified boost
+ */
+ if (!cpi->source_alt_ref_active) {
+ if (cpi->oxcf.fixed_q < 0) {
+ if (cpi->pass == 2) {
+ /* The spend on the GF is defined in the two pass
+ * code for two pass encodes
+ */
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ } else {
+ int Boost = cpi->last_boost;
+ int frames_in_section = cpi->frames_till_gf_update_due + 1;
+ int allocation_chunks = (frames_in_section * 100) + (Boost - 100);
+ int bits_in_section = cpi->inter_frame_target * frames_in_section;
+
+ /* Normalize Altboost and allocations chunck down to
+ * prevent overflow
+ */
+ while (Boost > 1000) {
+ Boost /= 2;
+ allocation_chunks /= 2;
+ }
+
+ /* Avoid loss of precision but avoid overflow */
+ if ((bits_in_section >> 7) > allocation_chunks) {
+ cpi->this_frame_target =
+ Boost * (bits_in_section / allocation_chunks);
+ } else {
+ cpi->this_frame_target =
+ (Boost * bits_in_section) / allocation_chunks;
+ }
+ }
+ } else {
+ cpi->this_frame_target =
+ (estimate_bits_at_q(1, Q, cpi->common.MBs, 1.0) *
+ cpi->last_boost) /
+ 100;
+ }
+ } else {
+ /* If there is an active ARF at this location use the minimum
+ * bits on this frame even if it is a contructed arf.
+ * The active maximum quantizer insures that an appropriate
+ * number of bits will be spent if needed for contstructed ARFs.
+ */
+ cpi->this_frame_target = 0;
+ }
+
+ cpi->current_gf_interval = cpi->frames_till_gf_update_due;
+ }
+ } else {
+ // Special case for 1 pass CBR: fixed gf period.
+ // TODO(marpan): Adjust this boost/interval logic.
+ // If gf_cbr_boost_pct is small (below threshold) set the flag
+ // gf_noboost_onepass_cbr = 1, which forces the gf to use the same
+ // rate correction factor as last.
+ cpi->gf_noboost_onepass_cbr = (cpi->oxcf.gf_cbr_boost_pct <= 100);
+ cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
+ // Skip this update if the zero_mvcount is low.
+ if (cpi->zeromv_count > (cpi->common.MBs >> 1)) {
+ cpi->common.refresh_golden_frame = 1;
+ cpi->this_frame_target =
+ (cpi->this_frame_target * (100 + cpi->oxcf.gf_cbr_boost_pct)) / 100;
+ }
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ cpi->current_gf_interval = cpi->frames_till_gf_update_due;
+ }
+ }
+
+ cpi->per_frame_bandwidth = old_per_frame_bandwidth;
+}
+
+void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
+ int Q = cpi->common.base_qindex;
+ int correction_factor = 100;
+ double rate_correction_factor;
+ double adjustment_limit;
+
+ int projected_size_based_on_q = 0;
+
+ /* Clear down mmx registers to allow floating point in what follows */
+ vpx_clear_system_state();
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ rate_correction_factor = cpi->key_frame_rate_correction_factor;
+ } else {
+ if (cpi->oxcf.number_of_layers == 1 && !cpi->gf_noboost_onepass_cbr &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame)) {
+ rate_correction_factor = cpi->gf_rate_correction_factor;
+ } else {
+ rate_correction_factor = cpi->rate_correction_factor;
+ }
+ }
+
+ /* Work out how big we would have expected the frame to be at this Q
+ * given the current correction factor. Stay in double to avoid int
+ * overflow when values are large
+ */
+ projected_size_based_on_q =
+ (int)(((.5 + rate_correction_factor *
+ vp8_bits_per_mb[cpi->common.frame_type][Q]) *
+ cpi->common.MBs) /
+ (1 << BPER_MB_NORMBITS));
+
+ /* Make some allowance for cpi->zbin_over_quant */
+ if (cpi->mb.zbin_over_quant > 0) {
+ int Z = cpi->mb.zbin_over_quant;
+ double Factor = 0.99;
+ double factor_adjustment = 0.01 / 256.0;
+
+ while (Z > 0) {
+ Z--;
+ projected_size_based_on_q = (int)(Factor * projected_size_based_on_q);
+ Factor += factor_adjustment;
+
+ if (Factor >= 0.999) Factor = 0.999;
+ }
+ }
+
+ /* Work out a size correction factor. */
+ if (projected_size_based_on_q > 0) {
+ correction_factor = (int)((100 * (int64_t)cpi->projected_frame_size) /
+ projected_size_based_on_q);
+ }
+
+ /* More heavily damped adjustment used if we have been oscillating
+ * either side of target
+ */
+ switch (damp_var) {
+ case 0: adjustment_limit = 0.75; break;
+ case 1: adjustment_limit = 0.375; break;
+ case 2:
+ default: adjustment_limit = 0.25; break;
+ }
+
+ if (correction_factor > 102) {
+ /* We are not already at the worst allowable quality */
+ correction_factor =
+ (int)(100.5 + ((correction_factor - 100) * adjustment_limit));
+ rate_correction_factor =
+ ((rate_correction_factor * correction_factor) / 100);
+
+ /* Keep rate_correction_factor within limits */
+ if (rate_correction_factor > MAX_BPB_FACTOR) {
+ rate_correction_factor = MAX_BPB_FACTOR;
+ }
+ } else if (correction_factor < 99) {
+ /* We are not already at the best allowable quality */
+ correction_factor =
+ (int)(100.5 - ((100 - correction_factor) * adjustment_limit));
+ rate_correction_factor =
+ ((rate_correction_factor * correction_factor) / 100);
+
+ /* Keep rate_correction_factor within limits */
+ if (rate_correction_factor < MIN_BPB_FACTOR) {
+ rate_correction_factor = MIN_BPB_FACTOR;
+ }
+ }
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ cpi->key_frame_rate_correction_factor = rate_correction_factor;
+ } else {
+ if (cpi->oxcf.number_of_layers == 1 && !cpi->gf_noboost_onepass_cbr &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame)) {
+ cpi->gf_rate_correction_factor = rate_correction_factor;
+ } else {
+ cpi->rate_correction_factor = rate_correction_factor;
+ }
+ }
+}
+
+static int limit_q_cbr_inter(int last_q, int current_q) {
+ int limit_down = 12;
+ if (last_q - current_q > limit_down)
+ return (last_q - limit_down);
+ else
+ return current_q;
+}
+
+int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
+ int Q = cpi->active_worst_quality;
+
+ if (cpi->force_maxqp == 1) {
+ cpi->active_worst_quality = cpi->worst_quality;
+ return cpi->worst_quality;
+ }
+ /* Reset Zbin OQ value */
+ cpi->mb.zbin_over_quant = 0;
+
+ if (cpi->oxcf.fixed_q >= 0) {
+ Q = cpi->oxcf.fixed_q;
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ Q = cpi->oxcf.key_q;
+ } else if (cpi->oxcf.number_of_layers == 1 &&
+ cpi->common.refresh_alt_ref_frame &&
+ !cpi->gf_noboost_onepass_cbr) {
+ Q = cpi->oxcf.alt_q;
+ } else if (cpi->oxcf.number_of_layers == 1 &&
+ cpi->common.refresh_golden_frame &&
+ !cpi->gf_noboost_onepass_cbr) {
+ Q = cpi->oxcf.gold_q;
+ }
+ } else {
+ int i;
+ int last_error = INT_MAX;
+ int target_bits_per_mb;
+ int bits_per_mb_at_this_q;
+ double correction_factor;
+
+ /* Select the appropriate correction factor based upon type of frame. */
+ if (cpi->common.frame_type == KEY_FRAME) {
+ correction_factor = cpi->key_frame_rate_correction_factor;
+ } else {
+ if (cpi->oxcf.number_of_layers == 1 && !cpi->gf_noboost_onepass_cbr &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame)) {
+ correction_factor = cpi->gf_rate_correction_factor;
+ } else {
+ correction_factor = cpi->rate_correction_factor;
+ }
+ }
+
+ /* Calculate required scaling factor based on target frame size and
+ * size of frame produced using previous Q
+ */
+ if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS)) {
+ /* Case where we would overflow int */
+ target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs)
+ << BPER_MB_NORMBITS;
+ } else {
+ target_bits_per_mb =
+ (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs;
+ }
+
+ i = cpi->active_best_quality;
+
+ do {
+ bits_per_mb_at_this_q =
+ (int)(.5 +
+ correction_factor * vp8_bits_per_mb[cpi->common.frame_type][i]);
+
+ if (bits_per_mb_at_this_q <= target_bits_per_mb) {
+ if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error) {
+ Q = i;
+ } else {
+ Q = i - 1;
+ }
+
+ break;
+ } else {
+ last_error = bits_per_mb_at_this_q - target_bits_per_mb;
+ }
+ } while (++i <= cpi->active_worst_quality);
+
+ /* If we are at MAXQ then enable Q over-run which seeks to claw
+ * back additional bits through things like the RD multiplier
+ * and zero bin size.
+ */
+ if (Q >= MAXQ) {
+ int zbin_oqmax;
+
+ double Factor = 0.99;
+ double factor_adjustment = 0.01 / 256.0;
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ zbin_oqmax = 0;
+ } else if (cpi->oxcf.number_of_layers == 1 &&
+ !cpi->gf_noboost_onepass_cbr &&
+ (cpi->common.refresh_alt_ref_frame ||
+ (cpi->common.refresh_golden_frame &&
+ !cpi->source_alt_ref_active))) {
+ zbin_oqmax = 16;
+ } else {
+ zbin_oqmax = ZBIN_OQ_MAX;
+ }
+
+ /*{
+ double Factor =
+ (double)target_bits_per_mb/(double)bits_per_mb_at_this_q;
+ double Oq;
+
+ Factor = Factor/1.2683;
+
+ Oq = pow( Factor, (1.0/-0.165) );
+
+ if ( Oq > zbin_oqmax )
+ Oq = zbin_oqmax;
+
+ cpi->zbin_over_quant = (int)Oq;
+ }*/
+
+ /* Each incrment in the zbin is assumed to have a fixed effect
+ * on bitrate. This is not of course true. The effect will be
+ * highly clip dependent and may well have sudden steps. The
+ * idea here is to acheive higher effective quantizers than the
+ * normal maximum by expanding the zero bin and hence
+ * decreasing the number of low magnitude non zero coefficients.
+ */
+ while (cpi->mb.zbin_over_quant < zbin_oqmax) {
+ cpi->mb.zbin_over_quant++;
+
+ if (cpi->mb.zbin_over_quant > zbin_oqmax) {
+ cpi->mb.zbin_over_quant = zbin_oqmax;
+ }
+
+ /* Adjust bits_per_mb_at_this_q estimate */
+ bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q);
+ Factor += factor_adjustment;
+
+ if (Factor >= 0.999) Factor = 0.999;
+
+ /* Break out if we get down to the target rate */
+ if (bits_per_mb_at_this_q <= target_bits_per_mb) break;
+ }
+ }
+ }
+
+ // Limit decrease in Q for 1 pass CBR screen content mode.
+ if (cpi->common.frame_type != KEY_FRAME && cpi->pass == 0 &&
+ cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
+ cpi->oxcf.screen_content_mode)
+ Q = limit_q_cbr_inter(cpi->last_q[1], Q);
+
+ return Q;
+}
+
+static int estimate_keyframe_frequency(VP8_COMP *cpi) {
+ int i;
+
+ /* Average key frame frequency */
+ int av_key_frame_frequency = 0;
+
+ /* First key frame at start of sequence is a special case. We have no
+ * frequency data.
+ */
+ if (cpi->key_frame_count == 1) {
+ /* Assume a default of 1 kf every 2 seconds, or the max kf interval,
+ * whichever is smaller.
+ */
+ int key_freq = cpi->oxcf.key_freq > 0 ? cpi->oxcf.key_freq : 1;
+ av_key_frame_frequency = 1 + (int)cpi->output_framerate * 2;
+
+ if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq) {
+ av_key_frame_frequency = key_freq;
+ }
+
+ cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1] =
+ av_key_frame_frequency;
+ } else {
+ unsigned int total_weight = 0;
+ int last_kf_interval =
+ (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1;
+
+ /* reset keyframe context and calculate weighted average of last
+ * KEY_FRAME_CONTEXT keyframes
+ */
+ for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
+ if (i < KEY_FRAME_CONTEXT - 1) {
+ cpi->prior_key_frame_distance[i] = cpi->prior_key_frame_distance[i + 1];
+ } else {
+ cpi->prior_key_frame_distance[i] = last_kf_interval;
+ }
+
+ av_key_frame_frequency +=
+ prior_key_frame_weight[i] * cpi->prior_key_frame_distance[i];
+ total_weight += prior_key_frame_weight[i];
+ }
+
+ av_key_frame_frequency /= total_weight;
+ }
+ // TODO (marpan): Given the checks above, |av_key_frame_frequency|
+ // should always be above 0. But for now we keep the sanity check in.
+ if (av_key_frame_frequency == 0) av_key_frame_frequency = 1;
+ return av_key_frame_frequency;
+}
+
+void vp8_adjust_key_frame_context(VP8_COMP *cpi) {
+ /* Clear down mmx registers to allow floating point in what follows */
+ vpx_clear_system_state();
+
+ /* Do we have any key frame overspend to recover? */
+ /* Two-pass overspend handled elsewhere. */
+ if ((cpi->pass != 2) &&
+ (cpi->projected_frame_size > cpi->per_frame_bandwidth)) {
+ int overspend;
+
+ /* Update the count of key frame overspend to be recovered in
+ * subsequent frames. A portion of the KF overspend is treated as gf
+ * overspend (and hence recovered more quickly) as the kf is also a
+ * gf. Otherwise the few frames following each kf tend to get more
+ * bits allocated than those following other gfs.
+ */
+ overspend = (cpi->projected_frame_size - cpi->per_frame_bandwidth);
+
+ if (cpi->oxcf.number_of_layers > 1) {
+ cpi->kf_overspend_bits += overspend;
+ } else {
+ cpi->kf_overspend_bits += overspend * 7 / 8;
+ cpi->gf_overspend_bits += overspend * 1 / 8;
+ }
+
+ /* Work out how much to try and recover per frame. */
+ cpi->kf_bitrate_adjustment =
+ cpi->kf_overspend_bits / estimate_keyframe_frequency(cpi);
+ }
+
+ cpi->frames_since_key = 0;
+ cpi->key_frame_count++;
+}
+
+void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit) {
+ /* Set-up bounds on acceptable frame size: */
+ if (cpi->oxcf.fixed_q >= 0) {
+ /* Fixed Q scenario: frame size never outranges target
+ * (there is no target!)
+ */
+ *frame_under_shoot_limit = 0;
+ *frame_over_shoot_limit = INT_MAX;
+ } else {
+ const int64_t this_frame_target = cpi->this_frame_target;
+ int64_t over_shoot_limit, under_shoot_limit;
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ over_shoot_limit = this_frame_target * 9 / 8;
+ under_shoot_limit = this_frame_target * 7 / 8;
+ } else {
+ if (cpi->oxcf.number_of_layers > 1 || cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame) {
+ over_shoot_limit = this_frame_target * 9 / 8;
+ under_shoot_limit = this_frame_target * 7 / 8;
+ } else {
+ /* For CBR take buffer fullness into account */
+ if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
+ if (cpi->buffer_level >= ((cpi->oxcf.optimal_buffer_level +
+ cpi->oxcf.maximum_buffer_size) >>
+ 1)) {
+ /* Buffer is too full so relax overshoot and tighten
+ * undershoot
+ */
+ over_shoot_limit = this_frame_target * 12 / 8;
+ under_shoot_limit = this_frame_target * 6 / 8;
+ } else if (cpi->buffer_level <=
+ (cpi->oxcf.optimal_buffer_level >> 1)) {
+ /* Buffer is too low so relax undershoot and tighten
+ * overshoot
+ */
+ over_shoot_limit = this_frame_target * 10 / 8;
+ under_shoot_limit = this_frame_target * 4 / 8;
+ } else {
+ over_shoot_limit = this_frame_target * 11 / 8;
+ under_shoot_limit = this_frame_target * 5 / 8;
+ }
+ }
+ /* VBR and CQ mode */
+ /* Note that tighter restrictions here can help quality
+ * but hurt encode speed
+ */
+ else {
+ /* Stron overshoot limit for constrained quality */
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ over_shoot_limit = this_frame_target * 11 / 8;
+ under_shoot_limit = this_frame_target * 2 / 8;
+ } else {
+ over_shoot_limit = this_frame_target * 11 / 8;
+ under_shoot_limit = this_frame_target * 5 / 8;
+ }
+ }
+ }
+ }
+
+ /* For very small rate targets where the fractional adjustment
+ * (eg * 7/8) may be tiny make sure there is at least a minimum
+ * range.
+ */
+ over_shoot_limit += 200;
+ under_shoot_limit -= 200;
+ if (under_shoot_limit < 0) under_shoot_limit = 0;
+ if (under_shoot_limit > INT_MAX) under_shoot_limit = INT_MAX;
+ if (over_shoot_limit > INT_MAX) over_shoot_limit = INT_MAX;
+ *frame_under_shoot_limit = (int)under_shoot_limit;
+ *frame_over_shoot_limit = (int)over_shoot_limit;
+ }
+}
+
+/* return of 0 means drop frame */
+int vp8_pick_frame_size(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ if (cm->frame_type == KEY_FRAME) {
+ calc_iframe_target_size(cpi);
+ } else {
+ calc_pframe_target_size(cpi);
+
+ /* Check if we're dropping the frame: */
+ if (cpi->drop_frame) {
+ cpi->drop_frame = 0;
+ return 0;
+ }
+ }
+ return 1;
+}
+// If this just encoded frame (mcomp/transform/quant, but before loopfilter and
+// pack_bitstream) has large overshoot, and was not being encoded close to the
+// max QP, then drop this frame and force next frame to be encoded at max QP.
+// Allow this for screen_content_mode = 2, or if drop frames is allowed.
+// TODO(marpan): Should do this exit condition during the encode_frame
+// (i.e., halfway during the encoding of the frame) to save cycles.
+int vp8_drop_encodedframe_overshoot(VP8_COMP *cpi, int Q) {
+ int force_drop_overshoot = 0;
+#if CONFIG_MULTI_RES_ENCODING
+ // Only check for dropping due to overshoot on the lowest stream.
+ // If the lowest stream of the multi-res encoding was dropped due to
+ // overshoot, then force dropping on all upper layer streams
+ // (mr_encoder_id > 0).
+ LOWER_RES_FRAME_INFO *low_res_frame_info =
+ (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
+ if (cpi->oxcf.mr_total_resolutions > 1 && cpi->oxcf.mr_encoder_id > 0) {
+ force_drop_overshoot = low_res_frame_info->is_frame_dropped_overshoot_maxqp;
+ if (!force_drop_overshoot) {
+ cpi->force_maxqp = 0;
+ cpi->frames_since_last_drop_overshoot++;
+ return 0;
+ }
+ }
+#endif
+ if (cpi->common.frame_type != KEY_FRAME &&
+ (cpi->oxcf.screen_content_mode == 2 ||
+ (cpi->drop_frames_allowed &&
+ (force_drop_overshoot ||
+ (cpi->rate_correction_factor < (8.0f * MIN_BPB_FACTOR) &&
+ cpi->frames_since_last_drop_overshoot > (int)cpi->framerate))))) {
+ // Note: the "projected_frame_size" from encode_frame() only gives estimate
+ // of mode/motion vector rate (in non-rd mode): so below we only require
+ // that projected_frame_size is somewhat greater than per-frame-bandwidth,
+ // but add additional condition with high threshold on prediction residual.
+
+ // QP threshold: only allow dropping if we are not close to qp_max.
+ int thresh_qp = 3 * cpi->worst_quality >> 2;
+ // Rate threshold, in bytes.
+ int thresh_rate = 2 * (cpi->av_per_frame_bandwidth >> 3);
+ // Threshold for the average (over all macroblocks) of the pixel-sum
+ // residual error over 16x16 block.
+ int thresh_pred_err_mb = (200 << 4);
+ int pred_err_mb = (int)(cpi->mb.prediction_error / cpi->common.MBs);
+ // Reduce/ignore thresh_rate if pred_err_mb much larger than its threshold,
+ // give more weight to pred_err metric for overshoot detection.
+ if (cpi->drop_frames_allowed && pred_err_mb > (thresh_pred_err_mb << 4))
+ thresh_rate = thresh_rate >> 3;
+ if ((Q < thresh_qp && cpi->projected_frame_size > thresh_rate &&
+ pred_err_mb > thresh_pred_err_mb &&
+ pred_err_mb > 2 * cpi->last_pred_err_mb) ||
+ force_drop_overshoot) {
+ unsigned int i;
+ double new_correction_factor;
+ int target_bits_per_mb;
+ const int target_size = cpi->av_per_frame_bandwidth;
+ // Flag to indicate we will force next frame to be encoded at max QP.
+ cpi->force_maxqp = 1;
+ // Reset the buffer levels.
+ cpi->buffer_level = cpi->oxcf.optimal_buffer_level;
+ cpi->bits_off_target = cpi->oxcf.optimal_buffer_level;
+ // Compute a new rate correction factor, corresponding to the current
+ // target frame size and max_QP, and adjust the rate correction factor
+ // upwards, if needed.
+ // This is to prevent a bad state where the re-encoded frame at max_QP
+ // undershoots significantly, and then we end up dropping every other
+ // frame because the QP/rate_correction_factor may have been too low
+ // before the drop and then takes too long to come up.
+ if (target_size >= (INT_MAX >> BPER_MB_NORMBITS)) {
+ target_bits_per_mb = (target_size / cpi->common.MBs)
+ << BPER_MB_NORMBITS;
+ } else {
+ target_bits_per_mb =
+ (target_size << BPER_MB_NORMBITS) / cpi->common.MBs;
+ }
+ // Rate correction factor based on target_size_per_mb and max_QP.
+ new_correction_factor =
+ (double)target_bits_per_mb /
+ (double)vp8_bits_per_mb[INTER_FRAME][cpi->worst_quality];
+ if (new_correction_factor > cpi->rate_correction_factor) {
+ cpi->rate_correction_factor =
+ VPXMIN(2.0 * cpi->rate_correction_factor, new_correction_factor);
+ }
+ if (cpi->rate_correction_factor > MAX_BPB_FACTOR) {
+ cpi->rate_correction_factor = MAX_BPB_FACTOR;
+ }
+ // Drop this frame: update frame counters.
+ cpi->common.current_video_frame++;
+ cpi->frames_since_key++;
+ cpi->temporal_pattern_counter++;
+ cpi->frames_since_last_drop_overshoot = 0;
+ if (cpi->oxcf.number_of_layers > 1) {
+ // Set max_qp and rate correction for all temporal layers if overshoot
+ // is detected.
+ for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ lc->force_maxqp = 1;
+ lc->frames_since_last_drop_overshoot = 0;
+ lc->rate_correction_factor = cpi->rate_correction_factor;
+ }
+ }
+#if CONFIG_MULTI_RES_ENCODING
+ if (cpi->oxcf.mr_total_resolutions > 1)
+ low_res_frame_info->is_frame_dropped_overshoot_maxqp = 1;
+#endif
+ return 1;
+ }
+ cpi->force_maxqp = 0;
+ cpi->frames_since_last_drop_overshoot++;
+#if CONFIG_MULTI_RES_ENCODING
+ if (cpi->oxcf.mr_total_resolutions > 1)
+ low_res_frame_info->is_frame_dropped_overshoot_maxqp = 0;
+#endif
+ return 0;
+ }
+ cpi->force_maxqp = 0;
+ cpi->frames_since_last_drop_overshoot++;
+#if CONFIG_MULTI_RES_ENCODING
+ if (cpi->oxcf.mr_total_resolutions > 1)
+ low_res_frame_info->is_frame_dropped_overshoot_maxqp = 0;
+#endif
+ return 0;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/ratectrl.h b/media/libvpx/libvpx/vp8/encoder/ratectrl.h
new file mode 100644
index 0000000000..844c72cb86
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/ratectrl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_RATECTRL_H_
+#define VPX_VP8_ENCODER_RATECTRL_H_
+
+#include "onyx_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void vp8_save_coding_context(VP8_COMP *cpi);
+extern void vp8_restore_coding_context(VP8_COMP *cpi);
+
+extern void vp8_setup_key_frame(VP8_COMP *cpi);
+extern void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var);
+extern int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame);
+extern void vp8_adjust_key_frame_context(VP8_COMP *cpi);
+extern void vp8_compute_frame_size_bounds(VP8_COMP *cpi,
+ int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit);
+
+/* return of 0 means drop frame */
+extern int vp8_pick_frame_size(VP8_COMP *cpi);
+
+extern int vp8_drop_encodedframe_overshoot(VP8_COMP *cpi, int Q);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_RATECTRL_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/rdopt.c b/media/libvpx/libvpx/vp8/encoder/rdopt.c
new file mode 100644
index 0000000000..bbddacf8f0
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/rdopt.c
@@ -0,0 +1,2394 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <math.h>
+#include <limits.h>
+#include <assert.h>
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "encodeframe.h"
+#include "tokenize.h"
+#include "treewriter.h"
+#include "onyx_int.h"
+#include "modecosts.h"
+#include "encodeintra.h"
+#include "pickinter.h"
+#include "vp8/common/common.h"
+#include "vp8/common/entropymode.h"
+#include "vp8/common/reconinter.h"
+#include "vp8/common/reconintra.h"
+#include "vp8/common/reconintra4x4.h"
+#include "vp8/common/findnearmv.h"
+#include "vp8/common/quant_common.h"
+#include "encodemb.h"
+#include "vp8/encoder/quantize.h"
+#include "vpx_dsp/variance.h"
+#include "vpx_ports/system_state.h"
+#include "mcomp.h"
+#include "rdopt.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/systemdependent.h"
+#if CONFIG_TEMPORAL_DENOISING
+#include "denoising.h"
+#endif
+extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
+
+#define MAXF(a, b) (((a) > (b)) ? (a) : (b))
+
+typedef struct rate_distortion_struct {
+ int rate2;
+ int rate_y;
+ int rate_uv;
+ int distortion2;
+ int distortion_uv;
+} RATE_DISTORTION;
+
+typedef struct best_mode_struct {
+ int yrd;
+ int rd;
+ int intra_rd;
+ MB_MODE_INFO mbmode;
+ union b_mode_info bmodes[16];
+ PARTITION_INFO partition;
+} BEST_MODE;
+
+static const int auto_speed_thresh[17] = { 1000, 200, 150, 130, 150, 125,
+ 120, 115, 115, 115, 115, 115,
+ 115, 115, 115, 115, 105 };
+
+const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES] = {
+ ZEROMV, DC_PRED,
+
+ NEARESTMV, NEARMV,
+
+ ZEROMV, NEARESTMV,
+
+ ZEROMV, NEARESTMV,
+
+ NEARMV, NEARMV,
+
+ V_PRED, H_PRED, TM_PRED,
+
+ NEWMV, NEWMV, NEWMV,
+
+ SPLITMV, SPLITMV, SPLITMV,
+
+ B_PRED,
+};
+
+/* This table determines the search order in reference frame priority order,
+ * which may not necessarily match INTRA,LAST,GOLDEN,ARF
+ */
+const int vp8_ref_frame_order[MAX_MODES] = {
+ 1, 0,
+
+ 1, 1,
+
+ 2, 2,
+
+ 3, 3,
+
+ 2, 3,
+
+ 0, 0, 0,
+
+ 1, 2, 3,
+
+ 1, 2, 3,
+
+ 0,
+};
+
+static void fill_token_costs(
+ int c[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS],
+ const vp8_prob p[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES]) {
+ int i, j, k;
+
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ /* check for pt=0 and band > 1 if block type 0
+ * and 0 if blocktype 1
+ */
+ if (k == 0 && j > (i == 0)) {
+ vp8_cost_tokens2(c[i][j][k], p[i][j][k], vp8_coef_tree, 2);
+ } else {
+ vp8_cost_tokens(c[i][j][k], p[i][j][k], vp8_coef_tree);
+ }
+ }
+ }
+ }
+}
+
+static const int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+/* values are now correlated to quantizer */
+static const int sad_per_bit16lut[QINDEX_RANGE] = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14
+};
+static const int sad_per_bit4lut[QINDEX_RANGE] = {
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
+ 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20,
+};
+
+void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex) {
+ cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex];
+ cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
+}
+
+void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) {
+ int q;
+ int i;
+ double capped_q = (Qvalue < 160) ? (double)Qvalue : 160.0;
+ double rdconst = 2.80;
+
+ vpx_clear_system_state();
+
+ /* Further tests required to see if optimum is different
+ * for key frames, golden frames and arf frames.
+ */
+ cpi->RDMULT = (int)(rdconst * (capped_q * capped_q));
+
+ /* Extend rate multiplier along side quantizer zbin increases */
+ if (cpi->mb.zbin_over_quant > 0) {
+ double oq_factor;
+ double modq;
+
+ /* Experimental code using the same basic equation as used for Q above
+ * The units of cpi->mb.zbin_over_quant are 1/128 of Q bin size
+ */
+ oq_factor = 1.0 + ((double)0.0015625 * cpi->mb.zbin_over_quant);
+ modq = (int)((double)capped_q * oq_factor);
+ cpi->RDMULT = (int)(rdconst * (modq * modq));
+ }
+
+ if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
+ if (cpi->twopass.next_iiratio > 31) {
+ cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
+ } else {
+ cpi->RDMULT +=
+ (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
+ }
+ }
+
+ cpi->mb.errorperbit = (cpi->RDMULT / 110);
+ cpi->mb.errorperbit += (cpi->mb.errorperbit == 0);
+
+ vp8_set_speed_features(cpi);
+
+ for (i = 0; i < MAX_MODES; ++i) {
+ x->mode_test_hit_counts[i] = 0;
+ }
+
+ q = (int)pow(Qvalue, 1.25);
+
+ if (q < 8) q = 8;
+
+ if (cpi->RDMULT > 1000) {
+ cpi->RDDIV = 1;
+ cpi->RDMULT /= 100;
+
+ for (i = 0; i < MAX_MODES; ++i) {
+ if (cpi->sf.thresh_mult[i] < INT_MAX) {
+ x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
+ } else {
+ x->rd_threshes[i] = INT_MAX;
+ }
+
+ cpi->rd_baseline_thresh[i] = x->rd_threshes[i];
+ }
+ } else {
+ cpi->RDDIV = 100;
+
+ for (i = 0; i < MAX_MODES; ++i) {
+ if (cpi->sf.thresh_mult[i] < (INT_MAX / q)) {
+ x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
+ } else {
+ x->rd_threshes[i] = INT_MAX;
+ }
+
+ cpi->rd_baseline_thresh[i] = x->rd_threshes[i];
+ }
+ }
+
+ {
+ /* build token cost array for the type of frame we have now */
+ FRAME_CONTEXT *l = &cpi->lfc_n;
+
+ if (cpi->common.refresh_alt_ref_frame) {
+ l = &cpi->lfc_a;
+ } else if (cpi->common.refresh_golden_frame) {
+ l = &cpi->lfc_g;
+ }
+
+ fill_token_costs(cpi->mb.token_costs,
+ (const vp8_prob(*)[8][3][11])l->coef_probs);
+ /*
+ fill_token_costs(
+ cpi->mb.token_costs,
+ (const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs);
+ */
+
+ /* TODO make these mode costs depend on last,alt or gold too. (jbb) */
+ vp8_init_mode_costs(cpi);
+ }
+}
+
+void vp8_auto_select_speed(VP8_COMP *cpi) {
+ int milliseconds_for_compress = (int)(1000000 / cpi->framerate);
+
+ milliseconds_for_compress =
+ milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
+
+#if 0
+
+ if (0)
+ {
+ FILE *f;
+
+ f = fopen("speed.stt", "a");
+ fprintf(f, " %8ld %10ld %10ld %10ld\n",
+ cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
+ fclose(f);
+ }
+
+#endif
+
+ if (cpi->avg_pick_mode_time < milliseconds_for_compress &&
+ (cpi->avg_encode_time - cpi->avg_pick_mode_time) <
+ milliseconds_for_compress) {
+ if (cpi->avg_pick_mode_time == 0) {
+ cpi->Speed = 4;
+ } else {
+ if (milliseconds_for_compress * 100 < cpi->avg_encode_time * 95) {
+ cpi->Speed += 2;
+ cpi->avg_pick_mode_time = 0;
+ cpi->avg_encode_time = 0;
+
+ if (cpi->Speed > 16) {
+ cpi->Speed = 16;
+ }
+ }
+
+ if (milliseconds_for_compress * 100 >
+ cpi->avg_encode_time * auto_speed_thresh[cpi->Speed]) {
+ cpi->Speed -= 1;
+ cpi->avg_pick_mode_time = 0;
+ cpi->avg_encode_time = 0;
+
+ /* In real-time mode, cpi->speed is in [4, 16]. */
+ if (cpi->Speed < 4) {
+ cpi->Speed = 4;
+ }
+ }
+ }
+ } else {
+ cpi->Speed += 4;
+
+ if (cpi->Speed > 16) cpi->Speed = 16;
+
+ cpi->avg_pick_mode_time = 0;
+ cpi->avg_encode_time = 0;
+ }
+}
+
+int vp8_block_error_c(short *coeff, short *dqcoeff) {
+ int i;
+ int error = 0;
+
+ for (i = 0; i < 16; ++i) {
+ int this_diff = coeff[i] - dqcoeff[i];
+ error += this_diff * this_diff;
+ }
+
+ return error;
+}
+
+int vp8_mbblock_error_c(MACROBLOCK *mb, int dc) {
+ BLOCK *be;
+ BLOCKD *bd;
+ int i, j;
+ int berror, error = 0;
+
+ for (i = 0; i < 16; ++i) {
+ be = &mb->block[i];
+ bd = &mb->e_mbd.block[i];
+
+ berror = 0;
+
+ for (j = dc; j < 16; ++j) {
+ int this_diff = be->coeff[j] - bd->dqcoeff[j];
+ berror += this_diff * this_diff;
+ }
+
+ error += berror;
+ }
+
+ return error;
+}
+
+int vp8_mbuverror_c(MACROBLOCK *mb) {
+ BLOCK *be;
+ BLOCKD *bd;
+
+ int i;
+ int error = 0;
+
+ for (i = 16; i < 24; ++i) {
+ be = &mb->block[i];
+ bd = &mb->e_mbd.block[i];
+
+ error += vp8_block_error_c(be->coeff, bd->dqcoeff);
+ }
+
+ return error;
+}
+
+int VP8_UVSSE(MACROBLOCK *x) {
+ unsigned char *uptr, *vptr;
+ unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
+ unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
+ int uv_stride = x->block[16].src_stride;
+
+ unsigned int sse1 = 0;
+ unsigned int sse2 = 0;
+ int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row;
+ int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col;
+ int offset;
+ int pre_stride = x->e_mbd.pre.uv_stride;
+
+ if (mv_row < 0) {
+ mv_row -= 1;
+ } else {
+ mv_row += 1;
+ }
+
+ if (mv_col < 0) {
+ mv_col -= 1;
+ } else {
+ mv_col += 1;
+ }
+
+ mv_row /= 2;
+ mv_col /= 2;
+
+ offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
+ uptr = x->e_mbd.pre.u_buffer + offset;
+ vptr = x->e_mbd.pre.v_buffer + offset;
+
+ if ((mv_row | mv_col) & 7) {
+ vpx_sub_pixel_variance8x8(uptr, pre_stride, mv_col & 7, mv_row & 7,
+ upred_ptr, uv_stride, &sse2);
+ vpx_sub_pixel_variance8x8(vptr, pre_stride, mv_col & 7, mv_row & 7,
+ vpred_ptr, uv_stride, &sse1);
+ sse2 += sse1;
+ } else {
+ vpx_variance8x8(uptr, pre_stride, upred_ptr, uv_stride, &sse2);
+ vpx_variance8x8(vptr, pre_stride, vpred_ptr, uv_stride, &sse1);
+ sse2 += sse1;
+ }
+ return sse2;
+}
+
+static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l) {
+ int c = !type; /* start at coef 0, unless Y with Y2 */
+ int eob = (int)(*b->eob);
+ int pt; /* surrounding block/prev coef predictor */
+ int cost = 0;
+ short *qcoeff_ptr = b->qcoeff;
+
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ assert(eob <= 16);
+ for (; c < eob; ++c) {
+ const int v = qcoeff_ptr[vp8_default_zig_zag1d[c]];
+ const int t = vp8_dct_value_tokens_ptr[v].Token;
+ cost += mb->token_costs[type][vp8_coef_bands[c]][pt][t];
+ cost += vp8_dct_value_cost_ptr[v];
+ pt = vp8_prev_token_class[t];
+ }
+
+ if (c < 16) {
+ cost += mb->token_costs[type][vp8_coef_bands[c]][pt][DCT_EOB_TOKEN];
+ }
+
+ pt = (c != !type); /* is eob first coefficient; */
+ *a = *l = pt;
+
+ return cost;
+}
+
+static int vp8_rdcost_mby(MACROBLOCK *mb) {
+ int cost = 0;
+ int b;
+ MACROBLOCKD *x = &mb->e_mbd;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+
+ memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ for (b = 0; b < 16; ++b) {
+ cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ }
+
+ cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2,
+ ta + vp8_block2above[24], tl + vp8_block2left[24]);
+
+ return cost;
+}
+
+static void macro_block_yrd(MACROBLOCK *mb, int *Rate, int *Distortion) {
+ int b;
+ MACROBLOCKD *const x = &mb->e_mbd;
+ BLOCK *const mb_y2 = mb->block + 24;
+ BLOCKD *const x_y2 = x->block + 24;
+ short *Y2DCPtr = mb_y2->src_diff;
+ BLOCK *beptr;
+ int d;
+
+ vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src),
+ mb->block[0].src_stride, mb->e_mbd.predictor, 16);
+
+ /* Fdct and building the 2nd order block */
+ for (beptr = mb->block; beptr < mb->block + 16; beptr += 2) {
+ mb->short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
+ *Y2DCPtr++ = beptr->coeff[0];
+ *Y2DCPtr++ = beptr->coeff[16];
+ }
+
+ /* 2nd order fdct */
+ mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
+
+ /* Quantization */
+ for (b = 0; b < 16; ++b) {
+ mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
+ }
+
+ /* DC predication and Quantization of 2nd Order block */
+ mb->quantize_b(mb_y2, x_y2);
+
+ /* Distortion */
+ d = vp8_mbblock_error(mb, 1) << 2;
+ d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff);
+
+ *Distortion = (d >> 4);
+
+ /* rate */
+ *Rate = vp8_rdcost_mby(mb);
+}
+
+static void copy_predictor(unsigned char *dst, const unsigned char *predictor) {
+ const unsigned int *p = (const unsigned int *)predictor;
+ unsigned int *d = (unsigned int *)dst;
+ d[0] = p[0];
+ d[4] = p[4];
+ d[8] = p[8];
+ d[12] = p[12];
+}
+static int rd_pick_intra4x4block(MACROBLOCK *x, BLOCK *be, BLOCKD *b,
+ B_PREDICTION_MODE *best_mode,
+ const int *bmode_costs, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+
+ int *bestrate, int *bestratey,
+ int *bestdistortion) {
+ B_PREDICTION_MODE mode;
+ int best_rd = INT_MAX;
+ int rate = 0;
+ int distortion;
+
+ ENTROPY_CONTEXT ta = *a, tempa = *a;
+ ENTROPY_CONTEXT tl = *l, templ = *l;
+ /*
+ * The predictor buffer is a 2d buffer with a stride of 16. Create
+ * a temp buffer that meets the stride requirements, but we are only
+ * interested in the left 4x4 block
+ * */
+ DECLARE_ALIGNED(16, unsigned char, best_predictor[16 * 4]);
+ DECLARE_ALIGNED(16, short, best_dqcoeff[16]);
+ int dst_stride = x->e_mbd.dst.y_stride;
+ unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
+
+ unsigned char *Above = dst - dst_stride;
+ unsigned char *yleft = dst - 1;
+ unsigned char top_left = Above[-1];
+
+ for (mode = B_DC_PRED; mode <= B_HU_PRED; ++mode) {
+ int this_rd;
+ int ratey;
+
+ rate = bmode_costs[mode];
+
+ vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16,
+ top_left);
+ vp8_subtract_b(be, b, 16);
+ x->short_fdct4x4(be->src_diff, be->coeff, 32);
+ x->quantize_b(be, b);
+
+ tempa = ta;
+ templ = tl;
+
+ ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ);
+ rate += ratey;
+ distortion = vp8_block_error(be->coeff, b->dqcoeff) >> 2;
+
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+ if (this_rd < best_rd) {
+ *bestrate = rate;
+ *bestratey = ratey;
+ *bestdistortion = distortion;
+ best_rd = this_rd;
+ *best_mode = mode;
+ *a = tempa;
+ *l = templ;
+ copy_predictor(best_predictor, b->predictor);
+ memcpy(best_dqcoeff, b->dqcoeff, 32);
+ }
+ }
+ b->bmi.as_mode = *best_mode;
+
+ vp8_short_idct4x4llm(best_dqcoeff, best_predictor, 16, dst, dst_stride);
+
+ return best_rd;
+}
+
+static int rd_pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *rate_y,
+ int *Distortion, int best_rd) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ int i;
+ int cost = mb->mbmode_cost[xd->frame_type][B_PRED];
+ int distortion = 0;
+ int tot_rate_y = 0;
+ int64_t total_rd = 0;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+ const int *bmode_costs;
+
+ memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
+
+ bmode_costs = mb->inter_bmode_costs;
+
+ for (i = 0; i < 16; ++i) {
+ MODE_INFO *const mic = xd->mode_info_context;
+ const int mis = xd->mode_info_stride;
+ B_PREDICTION_MODE best_mode = B_MODE_COUNT;
+ int r = 0, ry = 0, d = 0;
+
+ if (mb->e_mbd.frame_type == KEY_FRAME) {
+ const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
+ const B_PREDICTION_MODE L = left_block_mode(mic, i);
+
+ bmode_costs = mb->bmode_costs[A][L];
+ }
+
+ total_rd += rd_pick_intra4x4block(
+ mb, mb->block + i, xd->block + i, &best_mode, bmode_costs,
+ ta + vp8_block2above[i], tl + vp8_block2left[i], &r, &ry, &d);
+
+ cost += r;
+ distortion += d;
+ tot_rate_y += ry;
+
+ assert(best_mode != B_MODE_COUNT);
+ mic->bmi[i].as_mode = best_mode;
+
+ if (total_rd >= (int64_t)best_rd) break;
+ }
+
+ if (total_rd >= (int64_t)best_rd) return INT_MAX;
+
+ *Rate = cost;
+ *rate_y = tot_rate_y;
+ *Distortion = distortion;
+
+ return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
+}
+
+static int rd_pick_intra16x16mby_mode(MACROBLOCK *x, int *Rate, int *rate_y,
+ int *Distortion) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE mode_selected = MB_MODE_COUNT;
+ int rate, ratey;
+ int distortion;
+ int best_rd = INT_MAX;
+ int this_rd;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ /* Y Search for 16x16 intra prediction mode */
+ for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
+ xd->mode_info_context->mbmi.mode = mode;
+
+ vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
+ xd->dst.y_buffer - 1, xd->dst.y_stride,
+ xd->predictor, 16);
+
+ macro_block_yrd(x, &ratey, &distortion);
+ rate = ratey +
+ x->mbmode_cost[xd->frame_type][xd->mode_info_context->mbmi.mode];
+
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+ if (this_rd < best_rd) {
+ mode_selected = mode;
+ best_rd = this_rd;
+ *Rate = rate;
+ *rate_y = ratey;
+ *Distortion = distortion;
+ }
+ }
+
+ assert(mode_selected != MB_MODE_COUNT);
+ xd->mode_info_context->mbmi.mode = mode_selected;
+ return best_rd;
+}
+
+static int rd_cost_mbuv(MACROBLOCK *mb) {
+ int b;
+ int cost = 0;
+ MACROBLOCKD *x = &mb->e_mbd;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+
+ memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ for (b = 16; b < 24; ++b) {
+ cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ }
+
+ return cost;
+}
+
+static int rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
+ int *distortion, int fullpixel) {
+ (void)cpi;
+ (void)fullpixel;
+
+ vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->src.uv_stride, &x->e_mbd.predictor[256],
+ &x->e_mbd.predictor[320], 8);
+
+ vp8_transform_mbuv(x);
+ vp8_quantize_mbuv(x);
+
+ *rate = rd_cost_mbuv(x);
+ *distortion = vp8_mbuverror(x) / 4;
+
+ return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
+}
+
+static int rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
+ int *distortion, int fullpixel) {
+ (void)cpi;
+ (void)fullpixel;
+
+ vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->src.uv_stride, &x->e_mbd.predictor[256],
+ &x->e_mbd.predictor[320], 8);
+
+ vp8_transform_mbuv(x);
+ vp8_quantize_mbuv(x);
+
+ *rate = rd_cost_mbuv(x);
+ *distortion = vp8_mbuverror(x) / 4;
+
+ return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
+}
+
+static void rd_pick_intra_mbuv_mode(MACROBLOCK *x, int *rate,
+ int *rate_tokenonly, int *distortion) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE mode_selected = MB_MODE_COUNT;
+ int best_rd = INT_MAX;
+ int d = 0, r = 0;
+ int rate_to;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
+ int this_rate;
+ int this_distortion;
+ int this_rd;
+
+ xd->mode_info_context->mbmi.uv_mode = mode;
+
+ vp8_build_intra_predictors_mbuv_s(
+ xd, xd->dst.u_buffer - xd->dst.uv_stride,
+ xd->dst.v_buffer - xd->dst.uv_stride, xd->dst.u_buffer - 1,
+ xd->dst.v_buffer - 1, xd->dst.uv_stride, &xd->predictor[256],
+ &xd->predictor[320], 8);
+
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->src.uv_stride, &xd->predictor[256],
+ &xd->predictor[320], 8);
+ vp8_transform_mbuv(x);
+ vp8_quantize_mbuv(x);
+
+ rate_to = rd_cost_mbuv(x);
+ this_rate =
+ rate_to + x->intra_uv_mode_cost[xd->frame_type]
+ [xd->mode_info_context->mbmi.uv_mode];
+
+ this_distortion = vp8_mbuverror(x) / 4;
+
+ this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+ if (this_rd < best_rd) {
+ best_rd = this_rd;
+ d = this_distortion;
+ r = this_rate;
+ *rate_tokenonly = rate_to;
+ mode_selected = mode;
+ }
+ }
+
+ *rate = r;
+ *distortion = d;
+
+ assert(mode_selected != MB_MODE_COUNT);
+ xd->mode_info_context->mbmi.uv_mode = mode_selected;
+}
+
+int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]) {
+ vp8_prob p[VP8_MVREFS - 1];
+ assert(NEARESTMV <= m && m <= SPLITMV);
+ vp8_mv_ref_probs(p, near_mv_ref_ct);
+ return vp8_cost_token(vp8_mv_ref_tree, p,
+ vp8_mv_ref_encoding_array + (m - NEARESTMV));
+}
+
+void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
+ x->e_mbd.mode_info_context->mbmi.mode = mb;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int;
+}
+
+static int labels2mode(MACROBLOCK *x, int const *labelings, int which_label,
+ B_PREDICTION_MODE this_mode, int_mv *this_mv,
+ int_mv *best_ref_mv, int *mvcost[2]) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *const mic = xd->mode_info_context;
+ const int mis = xd->mode_info_stride;
+
+ int cost = 0;
+ int thismvcost = 0;
+
+ /* We have to be careful retrieving previously-encoded motion vectors.
+ Ones from this macroblock have to be pulled from the BLOCKD array
+ as they have not yet made it to the bmi array in our MB_MODE_INFO. */
+
+ int i = 0;
+
+ do {
+ BLOCKD *const d = xd->block + i;
+ const int row = i >> 2, col = i & 3;
+
+ B_PREDICTION_MODE m;
+
+ if (labelings[i] != which_label) continue;
+
+ if (col && labelings[i] == labelings[i - 1]) {
+ m = LEFT4X4;
+ } else if (row && labelings[i] == labelings[i - 4]) {
+ m = ABOVE4X4;
+ } else {
+ /* the only time we should do costing for new motion vector
+ * or mode is when we are on a new label (jbb May 08, 2007)
+ */
+ switch (m = this_mode) {
+ case NEW4X4:
+ thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
+ break;
+ case LEFT4X4:
+ this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
+ break;
+ case ABOVE4X4:
+ this_mv->as_int =
+ row ? d[-4].bmi.mv.as_int : above_block_mv(mic, i, mis);
+ break;
+ case ZERO4X4: this_mv->as_int = 0; break;
+ default: break;
+ }
+
+ if (m == ABOVE4X4) { /* replace above with left if same */
+ int_mv left_mv;
+
+ left_mv.as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
+
+ if (left_mv.as_int == this_mv->as_int) m = LEFT4X4;
+ }
+
+ cost = x->inter_bmode_costs[m];
+ }
+
+ d->bmi.mv.as_int = this_mv->as_int;
+
+ x->partition_info->bmi[i].mode = m;
+ x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
+
+ } while (++i < 16);
+
+ cost += thismvcost;
+ return cost;
+}
+
+static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
+ int which_label, ENTROPY_CONTEXT *ta,
+ ENTROPY_CONTEXT *tl) {
+ int cost = 0;
+ int b;
+ MACROBLOCKD *x = &mb->e_mbd;
+
+ for (b = 0; b < 16; ++b) {
+ if (labels[b] == which_label) {
+ cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ }
+ }
+
+ return cost;
+}
+static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
+ int const *labels,
+ int which_label) {
+ int i;
+ unsigned int distortion = 0;
+ int pre_stride = x->e_mbd.pre.y_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+
+ for (i = 0; i < 16; ++i) {
+ if (labels[i] == which_label) {
+ BLOCKD *bd = &x->e_mbd.block[i];
+ BLOCK *be = &x->block[i];
+
+ vp8_build_inter_predictors_b(bd, 16, base_pre, pre_stride,
+ x->e_mbd.subpixel_predict);
+ vp8_subtract_b(be, bd, 16);
+ x->short_fdct4x4(be->src_diff, be->coeff, 32);
+ x->quantize_b(be, bd);
+
+ distortion += vp8_block_error(be->coeff, bd->dqcoeff);
+ }
+ }
+
+ return distortion;
+}
+
+static const unsigned int segmentation_to_sseshift[4] = { 3, 3, 2, 0 };
+
+typedef struct {
+ int_mv *ref_mv;
+ int_mv mvp;
+
+ int segment_rd;
+ int segment_num;
+ int r;
+ int d;
+ int segment_yrate;
+ B_PREDICTION_MODE modes[16];
+ int_mv mvs[16];
+ unsigned char eobs[16];
+
+ int mvthresh;
+ int *mdcounts;
+
+ int_mv sv_mvp[4]; /* save 4 mvp from 8x8 */
+ int sv_istep[2]; /* save 2 initial step_param for 16x8/8x16 */
+
+} BEST_SEG_INFO;
+
+static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
+ unsigned int segmentation) {
+ int i;
+ int const *labels;
+ int br = 0;
+ int bd = 0;
+ B_PREDICTION_MODE this_mode;
+
+ int label_count;
+ int this_segment_rd = 0;
+ int label_mv_thresh;
+ int rate = 0;
+ int sbr = 0;
+ int sbd = 0;
+ int segmentyrate = 0;
+
+ vp8_variance_fn_ptr_t *v_fn_ptr;
+
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT_PLANES t_above_b, t_left_b;
+
+ memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ vp8_zero(t_above_b);
+ vp8_zero(t_left_b);
+
+ br = 0;
+ bd = 0;
+
+ v_fn_ptr = &cpi->fn_ptr[segmentation];
+ labels = vp8_mbsplits[segmentation];
+ label_count = vp8_mbsplit_count[segmentation];
+
+ /* 64 makes this threshold really big effectively making it so that we
+ * very rarely check mvs on segments. setting this to 1 would make mv
+ * thresh roughly equal to what it is for macroblocks
+ */
+ label_mv_thresh = 1 * bsi->mvthresh / label_count;
+
+ /* Segmentation method overheads */
+ rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs,
+ vp8_mbsplit_encodings + segmentation);
+ rate += vp8_cost_mv_ref(SPLITMV, bsi->mdcounts);
+ this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
+ br += rate;
+
+ for (i = 0; i < label_count; ++i) {
+ int_mv mode_mv[B_MODE_COUNT] = { { 0 }, { 0 } };
+ int best_label_rd = INT_MAX;
+ B_PREDICTION_MODE mode_selected = ZERO4X4;
+ int bestlabelyrate = 0;
+
+ /* search for the best motion vector on this segment */
+ for (this_mode = LEFT4X4; this_mode <= NEW4X4; ++this_mode) {
+ int this_rd;
+ int distortion;
+ int labelyrate;
+ ENTROPY_CONTEXT_PLANES t_above_s, t_left_s;
+ ENTROPY_CONTEXT *ta_s;
+ ENTROPY_CONTEXT *tl_s;
+
+ memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta_s = (ENTROPY_CONTEXT *)&t_above_s;
+ tl_s = (ENTROPY_CONTEXT *)&t_left_s;
+
+ if (this_mode == NEW4X4) {
+ int sseshift;
+ int num00;
+ int step_param = 0;
+ int further_steps;
+ int n;
+ int thissme;
+ int bestsme = INT_MAX;
+ int_mv temp_mv;
+ BLOCK *c;
+ BLOCKD *e;
+
+ /* Is the best so far sufficiently good that we cant justify
+ * doing a new motion search.
+ */
+ if (best_label_rd < label_mv_thresh) break;
+
+ if (cpi->compressor_speed) {
+ if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8) {
+ bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
+ if (i == 1 && segmentation == BLOCK_16X8) {
+ bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
+ }
+
+ step_param = bsi->sv_istep[i];
+ }
+
+ /* use previous block's result as next block's MV
+ * predictor.
+ */
+ if (segmentation == BLOCK_4X4 && i > 0) {
+ bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.mv.as_int;
+ if (i == 4 || i == 8 || i == 12) {
+ bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.mv.as_int;
+ }
+ step_param = 2;
+ }
+ }
+
+ further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+
+ {
+ int sadpb = x->sadperbit4;
+ int_mv mvp_full;
+
+ mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3;
+ mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
+
+ /* find first label */
+ n = vp8_mbsplit_offset[segmentation][i];
+
+ c = &x->block[n];
+ e = &x->e_mbd.block[n];
+
+ {
+ bestsme = cpi->diamond_search_sad(
+ x, c, e, &mvp_full, &mode_mv[NEW4X4], step_param, sadpb, &num00,
+ v_fn_ptr, x->mvcost, bsi->ref_mv);
+
+ n = num00;
+ num00 = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00) {
+ num00--;
+ } else {
+ thissme = cpi->diamond_search_sad(
+ x, c, e, &mvp_full, &temp_mv, step_param + n, sadpb, &num00,
+ v_fn_ptr, x->mvcost, bsi->ref_mv);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEW4X4].as_int = temp_mv.as_int;
+ }
+ }
+ }
+ }
+
+ sseshift = segmentation_to_sseshift[segmentation];
+
+ /* Should we do a full search (best quality only) */
+ if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) {
+ /* Check if mvp_full is within the range. */
+ vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+ x->mv_row_max);
+
+ thissme = vp8_full_search_sad(x, c, e, &mvp_full, sadpb, 16,
+ v_fn_ptr, x->mvcost, bsi->ref_mv);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEW4X4].as_int = e->bmi.mv.as_int;
+ } else {
+ /* The full search result is actually worse so
+ * re-instate the previous best vector
+ */
+ e->bmi.mv.as_int = mode_mv[NEW4X4].as_int;
+ }
+ }
+ }
+
+ if (bestsme < INT_MAX) {
+ int disto;
+ unsigned int sse;
+ cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4], bsi->ref_mv,
+ x->errorperbit, v_fn_ptr, x->mvcost,
+ &disto, &sse);
+ }
+ } /* NEW4X4 */
+
+ rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
+ bsi->ref_mv, x->mvcost);
+
+ /* Trap vectors that reach beyond the UMV borders */
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
+ ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
+ continue;
+ }
+
+ distortion = vp8_encode_inter_mb_segment(x, labels, i) / 4;
+
+ labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s);
+ rate += labelyrate;
+
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+ if (this_rd < best_label_rd) {
+ sbr = rate;
+ sbd = distortion;
+ bestlabelyrate = labelyrate;
+ mode_selected = this_mode;
+ best_label_rd = this_rd;
+
+ memcpy(&t_above_b, &t_above_s, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left_b, &t_left_s, sizeof(ENTROPY_CONTEXT_PLANES));
+ }
+ } /*for each 4x4 mode*/
+
+ memcpy(&t_above, &t_above_b, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&t_left, &t_left_b, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
+ bsi->ref_mv, x->mvcost);
+
+ br += sbr;
+ bd += sbd;
+ segmentyrate += bestlabelyrate;
+ this_segment_rd += best_label_rd;
+
+ if (this_segment_rd >= bsi->segment_rd) break;
+
+ } /* for each label */
+
+ if (this_segment_rd < bsi->segment_rd) {
+ bsi->r = br;
+ bsi->d = bd;
+ bsi->segment_yrate = segmentyrate;
+ bsi->segment_rd = this_segment_rd;
+ bsi->segment_num = segmentation;
+
+ /* store everything needed to come back to this!! */
+ for (i = 0; i < 16; ++i) {
+ bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+ bsi->modes[i] = x->partition_info->bmi[i].mode;
+ bsi->eobs[i] = x->e_mbd.eobs[i];
+ }
+ }
+}
+
+static void vp8_cal_step_param(int sr, int *sp) {
+ int step = 0;
+
+ if (sr > MAX_FIRST_STEP) {
+ sr = MAX_FIRST_STEP;
+ } else if (sr < 1) {
+ sr = 1;
+ }
+
+ while (sr >>= 1) step++;
+
+ *sp = MAX_MVSEARCH_STEPS - 1 - step;
+}
+
+static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv, int best_rd,
+ int *mdcounts, int *returntotrate,
+ int *returnyrate,
+ int *returndistortion,
+ int mvthresh) {
+ int i;
+ BEST_SEG_INFO bsi;
+
+ memset(&bsi, 0, sizeof(bsi));
+
+ bsi.segment_rd = best_rd;
+ bsi.ref_mv = best_ref_mv;
+ bsi.mvp.as_int = best_ref_mv->as_int;
+ bsi.mvthresh = mvthresh;
+ bsi.mdcounts = mdcounts;
+
+ for (i = 0; i < 16; ++i) {
+ bsi.modes[i] = ZERO4X4;
+ }
+
+ if (cpi->compressor_speed == 0) {
+ /* for now, we will keep the original segmentation order
+ when in best quality mode */
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
+ } else {
+ int sr;
+
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
+
+ if (bsi.segment_rd < best_rd) {
+ int col_min = ((best_ref_mv->as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
+ int row_min = ((best_ref_mv->as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
+ int col_max = (best_ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
+ int row_max = (best_ref_mv->as_mv.row >> 3) + MAX_FULL_PEL_VAL;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+
+ /* Get intersection of UMV window and valid MV window to reduce # of
+ * checks in diamond search. */
+ if (x->mv_col_min < col_min) x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max) x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min) x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max) x->mv_row_max = row_max;
+
+ /* Get 8x8 result */
+ bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int;
+ bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int;
+ bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
+ bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
+
+ /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range
+ * according to the closeness of 2 MV. */
+ /* block 8X16 */
+ {
+ sr =
+ MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[0]);
+
+ sr =
+ MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[1]);
+
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
+ }
+
+ /* block 16X8 */
+ {
+ sr =
+ MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[0]);
+
+ sr =
+ MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[1]);
+
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
+ }
+
+ /* If 8x8 is better than 16x8/8x16, then do 4x4 search */
+ /* Not skip 4x4 if speed=0 (good quality) */
+ if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8)
+ /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
+ {
+ bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
+ }
+
+ /* restore UMV window */
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+ }
+ }
+
+ /* set it to the best */
+ for (i = 0; i < 16; ++i) {
+ BLOCKD *bd = &x->e_mbd.block[i];
+
+ bd->bmi.mv.as_int = bsi.mvs[i].as_int;
+ *bd->eob = bsi.eobs[i];
+ }
+
+ *returntotrate = bsi.r;
+ *returndistortion = bsi.d;
+ *returnyrate = bsi.segment_yrate;
+
+ /* save partitions */
+ x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num;
+ x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
+
+ for (i = 0; i < x->partition_info->count; ++i) {
+ int j;
+
+ j = vp8_mbsplit_offset[bsi.segment_num][i];
+
+ x->partition_info->bmi[i].mode = bsi.modes[j];
+ x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
+ }
+ /*
+ * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
+ */
+ x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
+
+ return bsi.segment_rd;
+}
+
+/* The improved MV prediction */
+void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
+ int_mv *mvp, int refframe, int *ref_frame_sign_bias, int *sr,
+ int near_sadidx[]) {
+ const MODE_INFO *above = here - xd->mode_info_stride;
+ const MODE_INFO *left = here - 1;
+ const MODE_INFO *aboveleft = above - 1;
+ int_mv near_mvs[8];
+ int near_ref[8];
+ int_mv mv;
+ int vcnt = 0;
+ int find = 0;
+ int mb_offset;
+
+ int mvx[8];
+ int mvy[8];
+ int i;
+
+ mv.as_int = 0;
+
+ if (here->mbmi.ref_frame != INTRA_FRAME) {
+ near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int =
+ near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int =
+ near_mvs[6].as_int = near_mvs[7].as_int = 0;
+ near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] =
+ near_ref[5] = near_ref[6] = near_ref[7] = 0;
+
+ /* read in 3 nearby block's MVs from current frame as prediction
+ * candidates.
+ */
+ if (above->mbmi.ref_frame != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe,
+ &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = above->mbmi.ref_frame;
+ }
+ vcnt++;
+ if (left->mbmi.ref_frame != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = left->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe,
+ &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = left->mbmi.ref_frame;
+ }
+ vcnt++;
+ if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe,
+ &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = aboveleft->mbmi.ref_frame;
+ }
+ vcnt++;
+
+ /* read in 5 nearby block's MVs from last frame. */
+ if (cpi->common.last_frame_type != KEY_FRAME) {
+ mb_offset = (-xd->mb_to_top_edge / 128 + 1) * (xd->mode_info_stride + 1) +
+ (-xd->mb_to_left_edge / 128 + 1);
+
+ /* current in last frame */
+ if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int;
+ mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset], refframe,
+ &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = cpi->lf_ref_frame[mb_offset];
+ }
+ vcnt++;
+
+ /* above in last frame */
+ if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride - 1] !=
+ INTRA_FRAME) {
+ near_mvs[vcnt].as_int =
+ cpi->lfmv[mb_offset - xd->mode_info_stride - 1].as_int;
+ mv_bias(
+ cpi->lf_ref_frame_sign_bias[mb_offset - xd->mode_info_stride - 1],
+ refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] =
+ cpi->lf_ref_frame[mb_offset - xd->mode_info_stride - 1];
+ }
+ vcnt++;
+
+ /* left in last frame */
+ if (cpi->lf_ref_frame[mb_offset - 1] != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - 1].as_int;
+ mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - 1], refframe,
+ &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - 1];
+ }
+ vcnt++;
+
+ /* right in last frame */
+ if (cpi->lf_ref_frame[mb_offset + 1] != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + 1].as_int;
+ mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + 1], refframe,
+ &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + 1];
+ }
+ vcnt++;
+
+ /* below in last frame */
+ if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride + 1] !=
+ INTRA_FRAME) {
+ near_mvs[vcnt].as_int =
+ cpi->lfmv[mb_offset + xd->mode_info_stride + 1].as_int;
+ mv_bias(
+ cpi->lf_ref_frame_sign_bias[mb_offset + xd->mode_info_stride + 1],
+ refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] =
+ cpi->lf_ref_frame[mb_offset + xd->mode_info_stride + 1];
+ }
+ vcnt++;
+ }
+
+ for (i = 0; i < vcnt; ++i) {
+ if (near_ref[near_sadidx[i]] != INTRA_FRAME) {
+ if (here->mbmi.ref_frame == near_ref[near_sadidx[i]]) {
+ mv.as_int = near_mvs[near_sadidx[i]].as_int;
+ find = 1;
+ if (i < 3) {
+ *sr = 3;
+ } else {
+ *sr = 2;
+ }
+ break;
+ }
+ }
+ }
+
+ if (!find) {
+ for (i = 0; i < vcnt; ++i) {
+ mvx[i] = near_mvs[i].as_mv.row;
+ mvy[i] = near_mvs[i].as_mv.col;
+ }
+
+ insertsortmv(mvx, vcnt);
+ insertsortmv(mvy, vcnt);
+ mv.as_mv.row = mvx[vcnt / 2];
+ mv.as_mv.col = mvy[vcnt / 2];
+
+ /* sr is set to 0 to allow calling function to decide the search
+ * range.
+ */
+ *sr = 0;
+ }
+ }
+
+ /* Set up return values */
+ mvp->as_int = mv.as_int;
+ vp8_clamp_mv2(mvp, xd);
+}
+
+void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x,
+ int recon_yoffset, int near_sadidx[]) {
+ /* near_sad indexes:
+ * 0-cf above, 1-cf left, 2-cf aboveleft,
+ * 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
+ */
+ int near_sad[8] = { 0 };
+ BLOCK *b = &x->block[0];
+ unsigned char *src_y_ptr = *(b->base_src);
+
+ /* calculate sad for current frame 3 nearby MBs. */
+ if (xd->mb_to_top_edge == 0 && xd->mb_to_left_edge == 0) {
+ near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX;
+ } else if (xd->mb_to_top_edge ==
+ 0) { /* only has left MB for sad calculation. */
+ near_sad[0] = near_sad[2] = INT_MAX;
+ near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, xd->dst.y_buffer - 16, xd->dst.y_stride);
+ } else if (xd->mb_to_left_edge ==
+ 0) { /* only has left MB for sad calculation. */
+ near_sad[1] = near_sad[2] = INT_MAX;
+ near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16,
+ xd->dst.y_stride);
+ } else {
+ near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16,
+ xd->dst.y_stride);
+ near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, xd->dst.y_buffer - 16, xd->dst.y_stride);
+ near_sad[2] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16 - 16,
+ xd->dst.y_stride);
+ }
+
+ if (cpi->common.last_frame_type != KEY_FRAME) {
+ /* calculate sad for last frame 5 nearby MBs. */
+ unsigned char *pre_y_buffer =
+ cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset;
+ int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride;
+
+ if (xd->mb_to_top_edge == 0) near_sad[4] = INT_MAX;
+ if (xd->mb_to_left_edge == 0) near_sad[5] = INT_MAX;
+ if (xd->mb_to_right_edge == 0) near_sad[6] = INT_MAX;
+ if (xd->mb_to_bottom_edge == 0) near_sad[7] = INT_MAX;
+
+ if (near_sad[4] != INT_MAX) {
+ near_sad[4] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, pre_y_buffer - pre_y_stride * 16,
+ pre_y_stride);
+ }
+ if (near_sad[5] != INT_MAX) {
+ near_sad[5] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, pre_y_buffer - 16, pre_y_stride);
+ }
+ near_sad[3] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride,
+ pre_y_buffer, pre_y_stride);
+ if (near_sad[6] != INT_MAX) {
+ near_sad[6] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, pre_y_buffer + 16, pre_y_stride);
+ }
+ if (near_sad[7] != INT_MAX) {
+ near_sad[7] = cpi->fn_ptr[BLOCK_16X16].sdf(
+ src_y_ptr, b->src_stride, pre_y_buffer + pre_y_stride * 16,
+ pre_y_stride);
+ }
+ }
+
+ if (cpi->common.last_frame_type != KEY_FRAME) {
+ insertsortsad(near_sad, near_sadidx, 8);
+ } else {
+ insertsortsad(near_sad, near_sadidx, 3);
+ }
+}
+
+static void rd_update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
+ if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV) {
+ int i;
+
+ for (i = 0; i < x->partition_info->count; ++i) {
+ if (x->partition_info->bmi[i].mode == NEW4X4) {
+ x->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row -
+ best_ref_mv->as_mv.row) >>
+ 1)]++;
+ x->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col -
+ best_ref_mv->as_mv.col) >>
+ 1)]++;
+ }
+ }
+ } else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV) {
+ x->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row -
+ best_ref_mv->as_mv.row) >>
+ 1)]++;
+ x->MVcount[1][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col -
+ best_ref_mv->as_mv.col) >>
+ 1)]++;
+ }
+}
+
+static int evaluate_inter_mode_rd(int mdcounts[4], RATE_DISTORTION *rd,
+ int *disable_skip, VP8_COMP *cpi,
+ MACROBLOCK *x) {
+ MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
+ BLOCK *b = &x->block[0];
+ MACROBLOCKD *xd = &x->e_mbd;
+ int distortion;
+ vp8_build_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor, 16);
+
+ if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
+ x->skip = 1;
+ } else if (x->encode_breakout) {
+ unsigned int sse;
+ unsigned int var;
+ unsigned int threshold =
+ (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4);
+
+ if (threshold < x->encode_breakout) threshold = x->encode_breakout;
+
+ var = vpx_variance16x16(*(b->base_src), b->src_stride, x->e_mbd.predictor,
+ 16, &sse);
+
+ if (sse < threshold) {
+ unsigned int q2dc = xd->block[24].dequant[0];
+ /* If theres is no codeable 2nd order dc
+ or a very small uniform pixel change change */
+ if ((sse - var < q2dc * q2dc >> 4) || (sse / 2 > var && sse - var < 64)) {
+ /* Check u and v to make sure skip is ok */
+ unsigned int sse2 = VP8_UVSSE(x);
+ if (sse2 * 2 < threshold) {
+ x->skip = 1;
+ rd->distortion2 = sse + sse2;
+ rd->rate2 = 500;
+
+ /* for best_yrd calculation */
+ rd->rate_uv = 0;
+ rd->distortion_uv = sse2;
+
+ *disable_skip = 1;
+ return RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2);
+ }
+ }
+ }
+ }
+
+ /* Add in the Mv/mode cost */
+ rd->rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
+
+ /* Y cost and distortion */
+ macro_block_yrd(x, &rd->rate_y, &distortion);
+ rd->rate2 += rd->rate_y;
+ rd->distortion2 += distortion;
+
+ /* UV cost and distortion */
+ rd_inter16x16_uv(cpi, x, &rd->rate_uv, &rd->distortion_uv,
+ cpi->common.full_pixel);
+ rd->rate2 += rd->rate_uv;
+ rd->distortion2 += rd->distortion_uv;
+ return INT_MAX;
+}
+
+static int calculate_final_rd_costs(int this_rd, RATE_DISTORTION *rd,
+ int *other_cost, int disable_skip,
+ int uv_intra_tteob, int intra_rd_penalty,
+ VP8_COMP *cpi, MACROBLOCK *x) {
+ MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
+
+ /* Where skip is allowable add in the default per mb cost for the no
+ * skip case. where we then decide to skip we have to delete this and
+ * replace it with the cost of signalling a skip
+ */
+ if (cpi->common.mb_no_coeff_skip) {
+ *other_cost += vp8_cost_bit(cpi->prob_skip_false, 0);
+ rd->rate2 += *other_cost;
+ }
+
+ /* Estimate the reference frame signaling cost and add it
+ * to the rolling cost variable.
+ */
+ rd->rate2 += x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
+
+ if (!disable_skip) {
+ /* Test for the condition where skip block will be activated
+ * because there are no non zero coefficients and make any
+ * necessary adjustment for rate
+ */
+ if (cpi->common.mb_no_coeff_skip) {
+ int i;
+ int tteob;
+ int has_y2_block = (this_mode != SPLITMV && this_mode != B_PRED);
+
+ tteob = 0;
+ if (has_y2_block) tteob += x->e_mbd.eobs[24];
+
+ for (i = 0; i < 16; ++i) tteob += (x->e_mbd.eobs[i] > has_y2_block);
+
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
+ for (i = 16; i < 24; ++i) tteob += x->e_mbd.eobs[i];
+ } else {
+ tteob += uv_intra_tteob;
+ }
+
+ if (tteob == 0) {
+ rd->rate2 -= (rd->rate_y + rd->rate_uv);
+ /* for best_yrd calculation */
+ rd->rate_uv = 0;
+
+ /* Back out no skip flag costing and add in skip flag costing */
+ if (cpi->prob_skip_false) {
+ int prob_skip_cost;
+
+ prob_skip_cost = vp8_cost_bit(cpi->prob_skip_false, 1);
+ prob_skip_cost -= (int)vp8_cost_bit(cpi->prob_skip_false, 0);
+ rd->rate2 += prob_skip_cost;
+ *other_cost += prob_skip_cost;
+ }
+ }
+ }
+ /* Calculate the final RD estimate for this mode */
+ this_rd = RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2);
+ if (this_rd < INT_MAX &&
+ x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ this_rd += intra_rd_penalty;
+ }
+ }
+ return this_rd;
+}
+
+static void update_best_mode(BEST_MODE *best_mode, int this_rd,
+ RATE_DISTORTION *rd, int other_cost,
+ MACROBLOCK *x) {
+ MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
+
+ other_cost += x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
+
+ /* Calculate the final y RD estimate for this mode */
+ best_mode->yrd =
+ RDCOST(x->rdmult, x->rddiv, (rd->rate2 - rd->rate_uv - other_cost),
+ (rd->distortion2 - rd->distortion_uv));
+
+ best_mode->rd = this_rd;
+ memcpy(&best_mode->mbmode, &x->e_mbd.mode_info_context->mbmi,
+ sizeof(MB_MODE_INFO));
+ memcpy(&best_mode->partition, x->partition_info, sizeof(PARTITION_INFO));
+
+ if ((this_mode == B_PRED) || (this_mode == SPLITMV)) {
+ int i;
+ for (i = 0; i < 16; ++i) {
+ best_mode->bmodes[i] = x->e_mbd.block[i].bmi;
+ }
+ }
+}
+
+void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
+ int recon_uvoffset, int *returnrate,
+ int *returndistortion, int *returnintra, int mb_row,
+ int mb_col) {
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
+ MACROBLOCKD *xd = &x->e_mbd;
+ int_mv best_ref_mv_sb[2];
+ int_mv mode_mv_sb[2][MB_MODE_COUNT];
+ int_mv best_ref_mv;
+ int_mv *mode_mv;
+ MB_PREDICTION_MODE this_mode;
+ int num00;
+ int best_mode_index = 0;
+ BEST_MODE best_mode;
+
+ int i;
+ int mode_index;
+ int mdcounts[4];
+ int rate;
+ RATE_DISTORTION rd;
+ int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
+ int uv_intra_tteob = 0;
+ int uv_intra_done = 0;
+
+ MB_PREDICTION_MODE uv_intra_mode = 0;
+ int_mv mvp;
+ int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ int saddone = 0;
+ /* search range got from mv_pred(). It uses step_param levels. (0-7) */
+ int sr = 0;
+
+ unsigned char *plane[4][3] = { { 0, 0 } };
+ int ref_frame_map[4];
+ int sign_bias = 0;
+
+ int intra_rd_penalty =
+ 10 * vp8_dc_quant(cpi->common.base_qindex, cpi->common.y1dc_delta_q);
+
+#if CONFIG_TEMPORAL_DENOISING
+ unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX,
+ best_rd_sse = UINT_MAX;
+#endif
+
+ // _uv variables are not set consistantly before calling update_best_mode.
+ rd.rate_uv = 0;
+ rd.distortion_uv = 0;
+
+ mode_mv = mode_mv_sb[sign_bias];
+ best_ref_mv.as_int = 0;
+ best_mode.rd = INT_MAX;
+ best_mode.yrd = INT_MAX;
+ best_mode.intra_rd = INT_MAX;
+ memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
+ memset(&best_mode.mbmode, 0, sizeof(best_mode.mbmode));
+ memset(&best_mode.bmodes, 0, sizeof(best_mode.bmodes));
+
+ /* Setup search priorities */
+ get_reference_search_order(cpi, ref_frame_map);
+
+ /* Check to see if there is at least 1 valid reference frame that we need
+ * to calculate near_mvs.
+ */
+ if (ref_frame_map[1] > 0) {
+ sign_bias = vp8_find_near_mvs_bias(
+ &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb,
+ mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias);
+
+ mode_mv = mode_mv_sb[sign_bias];
+ best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
+ }
+
+ get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
+
+ *returnintra = INT_MAX;
+ /* Count of the number of MBs tested so far this frame */
+ x->mbs_tested_so_far++;
+
+ x->skip = 0;
+
+ for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
+ int this_rd = INT_MAX;
+ int disable_skip = 0;
+ int other_cost = 0;
+ int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
+
+ /* Test best rd so far against threshold for trying this mode. */
+ if (best_mode.rd <= x->rd_threshes[mode_index]) continue;
+
+ if (this_ref_frame < 0) continue;
+
+ /* These variables hold are rolling total cost and distortion for
+ * this mode
+ */
+ rd.rate2 = 0;
+ rd.distortion2 = 0;
+
+ this_mode = vp8_mode_order[mode_index];
+
+ x->e_mbd.mode_info_context->mbmi.mode = this_mode;
+ x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
+
+ /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
+ * unless ARNR filtering is enabled in which case we want
+ * an unfiltered alternative
+ */
+ if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
+ if (this_mode != ZEROMV ||
+ x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
+ continue;
+ }
+ }
+
+ /* everything but intra */
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
+ assert(plane[this_ref_frame][0] != NULL &&
+ plane[this_ref_frame][1] != NULL &&
+ plane[this_ref_frame][2] != NULL);
+ x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
+ x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
+ x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
+
+ if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) {
+ sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
+ mode_mv = mode_mv_sb[sign_bias];
+ best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
+ }
+ }
+
+ /* Check to see if the testing frequency for this mode is at its
+ * max If so then prevent it from being tested and increase the
+ * threshold for its testing
+ */
+ if (x->mode_test_hit_counts[mode_index] &&
+ (cpi->mode_check_freq[mode_index] > 1)) {
+ if (x->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] *
+ x->mode_test_hit_counts[mode_index]) {
+ /* Increase the threshold for coding this mode to make it
+ * less likely to be chosen
+ */
+ x->rd_thresh_mult[mode_index] += 4;
+
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ }
+
+ x->rd_threshes[mode_index] =
+ (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
+
+ continue;
+ }
+ }
+
+ /* We have now reached the point where we are going to test the
+ * current mode so increment the counter for the number of times
+ * it has been tested
+ */
+ x->mode_test_hit_counts[mode_index]++;
+
+ /* Experimental code. Special case for gf and arf zeromv modes.
+ * Increase zbin size to supress noise
+ */
+ if (x->zbin_mode_boost_enabled) {
+ if (this_ref_frame == INTRA_FRAME) {
+ x->zbin_mode_boost = 0;
+ } else {
+ if (vp8_mode_order[mode_index] == ZEROMV) {
+ if (this_ref_frame != LAST_FRAME) {
+ x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ } else {
+ x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ }
+ } else if (vp8_mode_order[mode_index] == SPLITMV) {
+ x->zbin_mode_boost = 0;
+ } else {
+ x->zbin_mode_boost = MV_ZBIN_BOOST;
+ }
+ }
+
+ vp8_update_zbin_extra(cpi, x);
+ }
+
+ if (!uv_intra_done && this_ref_frame == INTRA_FRAME) {
+ rd_pick_intra_mbuv_mode(x, &uv_intra_rate, &uv_intra_rate_tokenonly,
+ &uv_intra_distortion);
+ uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
+
+ /*
+ * Total of the eobs is used later to further adjust rate2. Since uv
+ * block's intra eobs will be overwritten when we check inter modes,
+ * we need to save uv_intra_tteob here.
+ */
+ for (i = 16; i < 24; ++i) uv_intra_tteob += x->e_mbd.eobs[i];
+
+ uv_intra_done = 1;
+ }
+
+ switch (this_mode) {
+ case B_PRED: {
+ int tmp_rd;
+
+ /* Note the rate value returned here includes the cost of
+ * coding the BPRED mode: x->mbmode_cost[x->e_mbd.frame_type][BPRED]
+ */
+ int distortion;
+ tmp_rd = rd_pick_intra4x4mby_modes(x, &rate, &rd.rate_y, &distortion,
+ best_mode.yrd);
+ rd.rate2 += rate;
+ rd.distortion2 += distortion;
+
+ if (tmp_rd < best_mode.yrd) {
+ assert(uv_intra_done);
+ rd.rate2 += uv_intra_rate;
+ rd.rate_uv = uv_intra_rate_tokenonly;
+ rd.distortion2 += uv_intra_distortion;
+ rd.distortion_uv = uv_intra_distortion;
+ } else {
+ this_rd = INT_MAX;
+ disable_skip = 1;
+ }
+ break;
+ }
+
+ case SPLITMV: {
+ int tmp_rd;
+ int this_rd_thresh;
+ int distortion;
+
+ this_rd_thresh = (vp8_ref_frame_order[mode_index] == 1)
+ ? x->rd_threshes[THR_NEW1]
+ : x->rd_threshes[THR_NEW3];
+ this_rd_thresh = (vp8_ref_frame_order[mode_index] == 2)
+ ? x->rd_threshes[THR_NEW2]
+ : this_rd_thresh;
+
+ tmp_rd = vp8_rd_pick_best_mbsegmentation(
+ cpi, x, &best_ref_mv, best_mode.yrd, mdcounts, &rate, &rd.rate_y,
+ &distortion, this_rd_thresh);
+
+ rd.rate2 += rate;
+ rd.distortion2 += distortion;
+
+ /* If even the 'Y' rd value of split is higher than best so far
+ * then dont bother looking at UV
+ */
+ if (tmp_rd < best_mode.yrd) {
+ /* Now work out UV cost and add it in */
+ rd_inter4x4_uv(cpi, x, &rd.rate_uv, &rd.distortion_uv,
+ cpi->common.full_pixel);
+ rd.rate2 += rd.rate_uv;
+ rd.distortion2 += rd.distortion_uv;
+ } else {
+ this_rd = INT_MAX;
+ disable_skip = 1;
+ }
+ break;
+ }
+ case DC_PRED:
+ case V_PRED:
+ case H_PRED:
+ case TM_PRED: {
+ int distortion;
+ x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+
+ vp8_build_intra_predictors_mby_s(
+ xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1,
+ xd->dst.y_stride, xd->predictor, 16);
+ macro_block_yrd(x, &rd.rate_y, &distortion);
+ rd.rate2 += rd.rate_y;
+ rd.distortion2 += distortion;
+ rd.rate2 += x->mbmode_cost[x->e_mbd.frame_type]
+ [x->e_mbd.mode_info_context->mbmi.mode];
+ assert(uv_intra_done);
+ rd.rate2 += uv_intra_rate;
+ rd.rate_uv = uv_intra_rate_tokenonly;
+ rd.distortion2 += uv_intra_distortion;
+ rd.distortion_uv = uv_intra_distortion;
+ break;
+ }
+
+ case NEWMV: {
+ int thissme;
+ int bestsme = INT_MAX;
+ int step_param = cpi->sf.first_step;
+ int further_steps;
+ int n;
+ /* If last step (1-away) of n-step search doesn't pick the center point
+ as the best match, we will do a final 1-away diamond refining search
+ */
+ int do_refine = 1;
+
+ int sadpb = x->sadperbit16;
+ int_mv mvp_full;
+
+ int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
+ int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
+ int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
+ int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+
+ if (!saddone) {
+ vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
+ saddone = 1;
+ }
+
+ vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
+ x->e_mbd.mode_info_context->mbmi.ref_frame,
+ cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
+
+ mvp_full.as_mv.col = mvp.as_mv.col >> 3;
+ mvp_full.as_mv.row = mvp.as_mv.row >> 3;
+
+ /* Get intersection of UMV window and valid MV window to
+ * reduce # of checks in diamond search.
+ */
+ if (x->mv_col_min < col_min) x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max) x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min) x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max) x->mv_row_max = row_max;
+
+ /* adjust search range according to sr from mv prediction */
+ if (sr > step_param) step_param = sr;
+
+ /* Initial step/diamond search */
+ {
+ bestsme = cpi->diamond_search_sad(
+ x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00,
+ &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+
+ /* Further step/diamond searches as necessary */
+ further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
+
+ n = num00;
+ num00 = 0;
+
+ /* If there won't be more n-step search, check to see if refining
+ * search is needed. */
+ if (n > further_steps) do_refine = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00) {
+ num00--;
+ } else {
+ thissme = cpi->diamond_search_sad(
+ x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb, &num00,
+ &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
+
+ /* check to see if refining search is needed. */
+ if (num00 > (further_steps - n)) do_refine = 0;
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ } else {
+ d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
+ }
+ }
+ }
+ }
+
+ /* final 1-away diamond refining search */
+ if (do_refine == 1) {
+ int search_range;
+
+ search_range = 8;
+
+ thissme = cpi->refining_search_sad(
+ x, b, d, &d->bmi.mv, sadpb, search_range,
+ &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ } else {
+ d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
+ }
+ }
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ if (bestsme < INT_MAX) {
+ int dis; /* TODO: use dis in distortion calculation later. */
+ unsigned int sse;
+ cpi->find_fractional_mv_step(
+ x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
+ &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &dis, &sse);
+ }
+
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+
+ /* Add the new motion vector cost to our rolling cost variable */
+ rd.rate2 +=
+ vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96);
+ }
+ // fall through
+
+ case NEARESTMV:
+ case NEARMV:
+ /* Clip "next_nearest" so that it does not extend to far out
+ * of image
+ */
+ vp8_clamp_mv2(&mode_mv[this_mode], xd);
+
+ /* Do not bother proceeding if the vector (from newmv, nearest
+ * or near) is 0,0 as this should then be coded using the zeromv
+ * mode.
+ */
+ if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) &&
+ (mode_mv[this_mode].as_int == 0)) {
+ continue;
+ }
+ // fall through
+
+ case ZEROMV:
+
+ /* Trap vectors that reach beyond the UMV borders
+ * Note that ALL New MV, Nearest MV Near MV and Zero MV code
+ * drops through to this point because of the lack of break
+ * statements in the previous two cases.
+ */
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
+ ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
+ continue;
+ }
+
+ vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
+ this_rd = evaluate_inter_mode_rd(mdcounts, &rd, &disable_skip, cpi, x);
+ break;
+
+ default: break;
+ }
+
+ this_rd =
+ calculate_final_rd_costs(this_rd, &rd, &other_cost, disable_skip,
+ uv_intra_tteob, intra_rd_penalty, cpi, x);
+
+ /* Keep record of best intra distortion */
+ if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
+ (this_rd < best_mode.intra_rd)) {
+ best_mode.intra_rd = this_rd;
+ *returnintra = rd.distortion2;
+ }
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ unsigned int sse;
+ vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], &sse,
+ mode_mv[this_mode]);
+
+ if (sse < best_rd_sse) best_rd_sse = sse;
+
+ /* Store for later use by denoiser. */
+ if (this_mode == ZEROMV && sse < zero_mv_sse) {
+ zero_mv_sse = sse;
+ x->best_zeromv_reference_frame =
+ x->e_mbd.mode_info_context->mbmi.ref_frame;
+ }
+
+ /* Store the best NEWMV in x for later use in the denoiser. */
+ if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse) {
+ best_sse = sse;
+ vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], &best_sse,
+ mode_mv[this_mode]);
+ x->best_sse_inter_mode = NEWMV;
+ x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
+ x->need_to_clamp_best_mvs =
+ x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
+ x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame;
+ }
+ }
+#endif
+
+ /* Did this mode help.. i.i is it the new best mode */
+ if (this_rd < best_mode.rd || x->skip) {
+ /* Note index of best mode so far */
+ best_mode_index = mode_index;
+ *returnrate = rd.rate2;
+ *returndistortion = rd.distortion2;
+ if (this_mode <= B_PRED) {
+ x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode;
+ /* required for left and above block mv */
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+ }
+ update_best_mode(&best_mode, this_rd, &rd, other_cost, x);
+
+ /* Testing this mode gave rise to an improvement in best error
+ * score. Lower threshold a bit for next time
+ */
+ x->rd_thresh_mult[mode_index] =
+ (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2))
+ ? x->rd_thresh_mult[mode_index] - 2
+ : MIN_THRESHMULT;
+ }
+
+ /* If the mode did not help improve the best error case then raise
+ * the threshold for testing that mode next time around.
+ */
+ else {
+ x->rd_thresh_mult[mode_index] += 4;
+
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ }
+ }
+ x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
+
+ if (x->skip) break;
+ }
+
+ /* Reduce the activation RD thresholds for the best choice mode */
+ if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
+ (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
+ int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 2);
+
+ x->rd_thresh_mult[best_mode_index] =
+ (x->rd_thresh_mult[best_mode_index] >=
+ (MIN_THRESHMULT + best_adjustment))
+ ? x->rd_thresh_mult[best_mode_index] - best_adjustment
+ : MIN_THRESHMULT;
+ x->rd_threshes[best_mode_index] =
+ (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
+ x->rd_thresh_mult[best_mode_index];
+ }
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ int block_index = mb_row * cpi->common.mb_cols + mb_col;
+ if (x->best_sse_inter_mode == DC_PRED) {
+ /* No best MV found. */
+ x->best_sse_inter_mode = best_mode.mbmode.mode;
+ x->best_sse_mv = best_mode.mbmode.mv;
+ x->need_to_clamp_best_mvs = best_mode.mbmode.need_to_clamp_mvs;
+ x->best_reference_frame = best_mode.mbmode.ref_frame;
+ best_sse = best_rd_sse;
+ }
+ vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
+ recon_yoffset, recon_uvoffset, &cpi->common.lf_info,
+ mb_row, mb_col, block_index, 0);
+
+ /* Reevaluate ZEROMV after denoising. */
+ if (best_mode.mbmode.ref_frame == INTRA_FRAME &&
+ x->best_zeromv_reference_frame != INTRA_FRAME) {
+ int this_rd = INT_MAX;
+ int disable_skip = 0;
+ int other_cost = 0;
+ int this_ref_frame = x->best_zeromv_reference_frame;
+ rd.rate2 =
+ x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts);
+ rd.distortion2 = 0;
+
+ /* set up the proper prediction buffers for the frame */
+ x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
+ x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
+ x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
+ x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
+
+ x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+
+ this_rd = evaluate_inter_mode_rd(mdcounts, &rd, &disable_skip, cpi, x);
+ this_rd =
+ calculate_final_rd_costs(this_rd, &rd, &other_cost, disable_skip,
+ uv_intra_tteob, intra_rd_penalty, cpi, x);
+ if (this_rd < best_mode.rd || x->skip) {
+ *returnrate = rd.rate2;
+ *returndistortion = rd.distortion2;
+ update_best_mode(&best_mode, this_rd, &rd, other_cost, x);
+ }
+ }
+ }
+#endif
+
+ if (cpi->is_src_frame_alt_ref &&
+ (best_mode.mbmode.mode != ZEROMV ||
+ best_mode.mbmode.ref_frame != ALTREF_FRAME)) {
+ x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
+ x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
+ (cpi->common.mb_no_coeff_skip);
+ x->e_mbd.mode_info_context->mbmi.partitioning = 0;
+ return;
+ }
+
+ /* macroblock modes */
+ memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode,
+ sizeof(MB_MODE_INFO));
+
+ if (best_mode.mbmode.mode == B_PRED) {
+ for (i = 0; i < 16; ++i) {
+ xd->mode_info_context->bmi[i].as_mode = best_mode.bmodes[i].as_mode;
+ }
+ }
+
+ if (best_mode.mbmode.mode == SPLITMV) {
+ for (i = 0; i < 16; ++i) {
+ xd->mode_info_context->bmi[i].mv.as_int = best_mode.bmodes[i].mv.as_int;
+ }
+
+ memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO));
+
+ x->e_mbd.mode_info_context->mbmi.mv.as_int =
+ x->partition_info->bmi[15].mv.as_int;
+ }
+
+ if (sign_bias !=
+ cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
+ best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
+ }
+
+ rd_update_mvcount(x, &best_ref_mv);
+}
+
+void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate) {
+ int error4x4, error16x16;
+ int rate4x4, rate16x16 = 0, rateuv;
+ int dist4x4, dist16x16, distuv;
+ int rate_;
+ int rate4x4_tokenonly = 0;
+ int rate16x16_tokenonly = 0;
+ int rateuv_tokenonly = 0;
+
+ x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+
+ rd_pick_intra_mbuv_mode(x, &rateuv, &rateuv_tokenonly, &distuv);
+ rate_ = rateuv;
+
+ error16x16 = rd_pick_intra16x16mby_mode(x, &rate16x16, &rate16x16_tokenonly,
+ &dist16x16);
+
+ error4x4 = rd_pick_intra4x4mby_modes(x, &rate4x4, &rate4x4_tokenonly,
+ &dist4x4, error16x16);
+
+ if (error4x4 < error16x16) {
+ x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
+ rate_ += rate4x4;
+ } else {
+ rate_ += rate16x16;
+ }
+
+ *rate = rate_;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/rdopt.h b/media/libvpx/libvpx/vp8/encoder/rdopt.h
new file mode 100644
index 0000000000..cc3db8197c
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/rdopt.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_RDOPT_H_
+#define VPX_VP8_ENCODER_RDOPT_H_
+
+#include "./vpx_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RDCOST(RM, DM, R, D) (((128 + (R) * (RM)) >> 8) + (DM) * (D))
+
+void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
+void vp8_auto_select_speed(VP8_COMP *cpi);
+
+static INLINE void insertsortmv(int arr[], int len) {
+ int i, j, k;
+
+ for (i = 1; i <= len - 1; ++i) {
+ for (j = 0; j < i; ++j) {
+ if (arr[j] > arr[i]) {
+ int temp;
+
+ temp = arr[i];
+
+ for (k = i; k > j; k--) arr[k] = arr[k - 1];
+
+ arr[j] = temp;
+ }
+ }
+ }
+}
+
+static INLINE void insertsortsad(int arr[], int idx[], int len) {
+ int i, j, k;
+
+ for (i = 1; i <= len - 1; ++i) {
+ for (j = 0; j < i; ++j) {
+ if (arr[j] > arr[i]) {
+ int temp, tempi;
+
+ temp = arr[i];
+ tempi = idx[i];
+
+ for (k = i; k > j; k--) {
+ arr[k] = arr[k - 1];
+ idx[k] = idx[k - 1];
+ }
+
+ arr[j] = temp;
+ idx[j] = tempi;
+ }
+ }
+ }
+}
+
+void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue);
+void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
+ int recon_uvoffset, int *returnrate,
+ int *returndistortion, int *returnintra, int mb_row,
+ int mb_col);
+void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate);
+
+static INLINE void get_plane_pointers(const YV12_BUFFER_CONFIG *fb,
+ unsigned char *plane[3],
+ unsigned int recon_yoffset,
+ unsigned int recon_uvoffset) {
+ plane[0] = fb->y_buffer + recon_yoffset;
+ plane[1] = fb->u_buffer + recon_uvoffset;
+ plane[2] = fb->v_buffer + recon_uvoffset;
+}
+
+static INLINE void get_predictor_pointers(const VP8_COMP *cpi,
+ unsigned char *plane[4][3],
+ unsigned int recon_yoffset,
+ unsigned int recon_uvoffset) {
+ if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
+ get_plane_pointers(&cpi->common.yv12_fb[cpi->common.lst_fb_idx],
+ plane[LAST_FRAME], recon_yoffset, recon_uvoffset);
+ }
+
+ if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
+ get_plane_pointers(&cpi->common.yv12_fb[cpi->common.gld_fb_idx],
+ plane[GOLDEN_FRAME], recon_yoffset, recon_uvoffset);
+ }
+
+ if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
+ get_plane_pointers(&cpi->common.yv12_fb[cpi->common.alt_fb_idx],
+ plane[ALTREF_FRAME], recon_yoffset, recon_uvoffset);
+ }
+}
+
+static INLINE void get_reference_search_order(const VP8_COMP *cpi,
+ int ref_frame_map[4]) {
+ int i = 0;
+
+ ref_frame_map[i++] = INTRA_FRAME;
+ if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frame_map[i++] = LAST_FRAME;
+ if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frame_map[i++] = GOLDEN_FRAME;
+ if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frame_map[i++] = ALTREF_FRAME;
+ for (; i < 4; ++i) ref_frame_map[i] = -1;
+}
+
+void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
+ int_mv *mvp, int refframe, int *ref_frame_sign_bias, int *sr,
+ int near_sadidx[]);
+void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x,
+ int recon_yoffset, int near_sadidx[]);
+int VP8_UVSSE(MACROBLOCK *x);
+int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]);
+void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_RDOPT_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/segmentation.c b/media/libvpx/libvpx/vp8/encoder/segmentation.c
new file mode 100644
index 0000000000..dcb68119e1
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/segmentation.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "segmentation.h"
+#include "vpx_mem/vpx_mem.h"
+
+void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
+ int mb_row, mb_col;
+
+ MODE_INFO *this_mb_mode_info = cm->mi;
+
+ x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
+
+ if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame)) {
+ /* Reset Gf useage monitors */
+ memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
+ cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
+ } else {
+ /* for each macroblock row in image */
+ for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+ /* for each macroblock col in image */
+ for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+ /* If using golden then set GF active flag if not already set.
+ * If using last frame 0,0 mode then leave flag as it is
+ * else if using non 0,0 motion or intra modes then clear
+ * flag if it is currently set
+ */
+ if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) ||
+ (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME)) {
+ if (*(x->gf_active_ptr) == 0) {
+ *(x->gf_active_ptr) = 1;
+ cpi->gf_active_count++;
+ }
+ } else if ((this_mb_mode_info->mbmi.mode != ZEROMV) &&
+ *(x->gf_active_ptr)) {
+ *(x->gf_active_ptr) = 0;
+ cpi->gf_active_count--;
+ }
+
+ x->gf_active_ptr++; /* Step onto next entry */
+ this_mb_mode_info++; /* skip to next mb */
+ }
+
+ /* this is to account for the border */
+ this_mb_mode_info++;
+ }
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/segmentation.h b/media/libvpx/libvpx/vp8/encoder/segmentation.h
new file mode 100644
index 0000000000..4ddbdbbd26
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/segmentation.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_SEGMENTATION_H_
+#define VPX_VP8_ENCODER_SEGMENTATION_H_
+
+#include "string.h"
+#include "vp8/common/blockd.h"
+#include "onyx_int.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm,
+ MACROBLOCK *x);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_SEGMENTATION_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/temporal_filter.c b/media/libvpx/libvpx/vp8/encoder/temporal_filter.c
new file mode 100644
index 0000000000..1c1a55fde6
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/temporal_filter.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/common/onyxc_int.h"
+#include "onyx_int.h"
+#include "vp8/common/systemdependent.h"
+#include "vp8/encoder/quantize.h"
+#include "vp8/common/alloccommon.h"
+#include "mcomp.h"
+#include "firstpass.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp8/common/extend.h"
+#include "ratectrl.h"
+#include "vp8/common/quant_common.h"
+#include "segmentation.h"
+#include "temporal_filter.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/swapyv12buffer.h"
+#include "vp8/common/threading.h"
+#include "vpx_ports/vpx_timer.h"
+
+#include <math.h>
+#include <limits.h>
+
+#define ALT_REF_MC_ENABLED 1 /* toggle MC in AltRef filtering */
+#define ALT_REF_SUBPEL_ENABLED 1 /* toggle subpel in MC AltRef filtering */
+
+#if VP8_TEMPORAL_ALT_REF
+
+static void vp8_temporal_filter_predictors_mb_c(
+ MACROBLOCKD *x, unsigned char *y_mb_ptr, unsigned char *u_mb_ptr,
+ unsigned char *v_mb_ptr, int stride, int mv_row, int mv_col,
+ unsigned char *pred) {
+ int offset;
+ unsigned char *yptr, *uptr, *vptr;
+
+ /* Y */
+ yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
+
+ if ((mv_row | mv_col) & 7) {
+ x->subpixel_predict16x16(yptr, stride, mv_col & 7, mv_row & 7, &pred[0],
+ 16);
+ } else {
+ vp8_copy_mem16x16(yptr, stride, &pred[0], 16);
+ }
+
+ /* U & V */
+ mv_row >>= 1;
+ mv_col >>= 1;
+ stride = (stride + 1) >> 1;
+ offset = (mv_row >> 3) * stride + (mv_col >> 3);
+ uptr = u_mb_ptr + offset;
+ vptr = v_mb_ptr + offset;
+
+ if ((mv_row | mv_col) & 7) {
+ x->subpixel_predict8x8(uptr, stride, mv_col & 7, mv_row & 7, &pred[256], 8);
+ x->subpixel_predict8x8(vptr, stride, mv_col & 7, mv_row & 7, &pred[320], 8);
+ } else {
+ vp8_copy_mem8x8(uptr, stride, &pred[256], 8);
+ vp8_copy_mem8x8(vptr, stride, &pred[320], 8);
+ }
+}
+void vp8_temporal_filter_apply_c(unsigned char *frame1, unsigned int stride,
+ unsigned char *frame2, unsigned int block_size,
+ int strength, int filter_weight,
+ unsigned int *accumulator,
+ unsigned short *count) {
+ unsigned int i, j, k;
+ int modifier;
+ int byte = 0;
+ const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
+
+ for (i = 0, k = 0; i < block_size; ++i) {
+ for (j = 0; j < block_size; j++, k++) {
+ int src_byte = frame1[byte];
+ int pixel_value = *frame2++;
+
+ modifier = src_byte - pixel_value;
+ /* This is an integer approximation of:
+ * float coeff = (3.0 * modifer * modifier) / pow(2, strength);
+ * modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
+ */
+ modifier *= modifier;
+ modifier *= 3;
+ modifier += rounding;
+ modifier >>= strength;
+
+ if (modifier > 16) modifier = 16;
+
+ modifier = 16 - modifier;
+ modifier *= filter_weight;
+
+ count[k] += modifier;
+ accumulator[k] += modifier * pixel_value;
+
+ byte++;
+ }
+
+ byte += stride - block_size;
+ }
+}
+
+#if ALT_REF_MC_ENABLED
+
+static int vp8_temporal_filter_find_matching_mb_c(VP8_COMP *cpi,
+ YV12_BUFFER_CONFIG *arf_frame,
+ YV12_BUFFER_CONFIG *frame_ptr,
+ int mb_offset,
+ int error_thresh) {
+ MACROBLOCK *x = &cpi->mb;
+ int step_param;
+ int sadpb = x->sadperbit16;
+ int bestsme = INT_MAX;
+
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
+ int_mv best_ref_mv1;
+ int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
+
+ /* Save input state */
+ unsigned char **base_src = b->base_src;
+ int src = b->src;
+ int src_stride = b->src_stride;
+ unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+ int pre = d->offset;
+ int pre_stride = x->e_mbd.pre.y_stride;
+
+ (void)error_thresh;
+
+ best_ref_mv1.as_int = 0;
+ best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >> 3;
+ best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >> 3;
+
+ /* Setup frame pointers */
+ b->base_src = &arf_frame->y_buffer;
+ b->src_stride = arf_frame->y_stride;
+ b->src = mb_offset;
+
+ x->e_mbd.pre.y_buffer = frame_ptr->y_buffer;
+ x->e_mbd.pre.y_stride = frame_ptr->y_stride;
+ d->offset = mb_offset;
+
+ /* Further step/diamond searches as necessary */
+ if (cpi->Speed < 8) {
+ step_param = cpi->sf.first_step + (cpi->Speed > 5);
+ } else {
+ step_param = cpi->sf.first_step + 2;
+ }
+
+ /* TODO Check that the 16x16 vf & sdf are selected here */
+ /* Ignore mv costing by sending NULL cost arrays */
+ bestsme =
+ vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.mv, step_param, sadpb,
+ &cpi->fn_ptr[BLOCK_16X16], NULL, &best_ref_mv1);
+ (void)bestsme; // Ignore unused return value.
+
+#if ALT_REF_SUBPEL_ENABLED
+ /* Try sub-pixel MC? */
+ {
+ int distortion;
+ unsigned int sse;
+ /* Ignore mv costing by sending NULL cost array */
+ bestsme = cpi->find_fractional_mv_step(
+ x, b, d, &d->bmi.mv, &best_ref_mv1, x->errorperbit,
+ &cpi->fn_ptr[BLOCK_16X16], NULL, &distortion, &sse);
+ }
+#endif
+
+ /* Save input state */
+ b->base_src = base_src;
+ b->src = src;
+ b->src_stride = src_stride;
+ x->e_mbd.pre.y_buffer = base_pre;
+ d->offset = pre;
+ x->e_mbd.pre.y_stride = pre_stride;
+
+ return bestsme;
+}
+#endif
+
+static void vp8_temporal_filter_iterate_c(VP8_COMP *cpi, int frame_count,
+ int alt_ref_index, int strength) {
+ int byte;
+ int frame;
+ int mb_col, mb_row;
+ unsigned int filter_weight;
+ int mb_cols = cpi->common.mb_cols;
+ int mb_rows = cpi->common.mb_rows;
+ int mb_y_offset = 0;
+ int mb_uv_offset = 0;
+ DECLARE_ALIGNED(16, unsigned int, accumulator[16 * 16 + 8 * 8 + 8 * 8]);
+ DECLARE_ALIGNED(16, unsigned short, count[16 * 16 + 8 * 8 + 8 * 8]);
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index];
+ unsigned char *dst1, *dst2;
+ DECLARE_ALIGNED(16, unsigned char, predictor[16 * 16 + 8 * 8 + 8 * 8]);
+
+ /* Save input state */
+ unsigned char *y_buffer = mbd->pre.y_buffer;
+ unsigned char *u_buffer = mbd->pre.u_buffer;
+ unsigned char *v_buffer = mbd->pre.v_buffer;
+
+ for (mb_row = 0; mb_row < mb_rows; ++mb_row) {
+#if ALT_REF_MC_ENABLED
+ /* Source frames are extended to 16 pixels. This is different than
+ * L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
+ * A 6 tap filter is used for motion search. This requires 2 pixels
+ * before and 3 pixels after. So the largest Y mv on a border would
+ * then be 16 - 3. The UV blocks are half the size of the Y and
+ * therefore only extended by 8. The largest mv that a UV block
+ * can support is 8 - 3. A UV mv is half of a Y mv.
+ * (16 - 3) >> 1 == 6 which is greater than 8 - 3.
+ * To keep the mv in play for both Y and UV planes the max that it
+ * can be on a border is therefore 16 - 5.
+ */
+ cpi->mb.mv_row_min = -((mb_row * 16) + (16 - 5));
+ cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16) + (16 - 5);
+#endif
+
+ for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
+ int i, j, k;
+ int stride;
+
+ memset(accumulator, 0, 384 * sizeof(unsigned int));
+ memset(count, 0, 384 * sizeof(unsigned short));
+
+#if ALT_REF_MC_ENABLED
+ cpi->mb.mv_col_min = -((mb_col * 16) + (16 - 5));
+ cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16) + (16 - 5);
+#endif
+
+ for (frame = 0; frame < frame_count; ++frame) {
+ if (cpi->frames[frame] == NULL) continue;
+
+ mbd->block[0].bmi.mv.as_mv.row = 0;
+ mbd->block[0].bmi.mv.as_mv.col = 0;
+
+ if (frame == alt_ref_index) {
+ filter_weight = 2;
+ } else {
+ int err = 0;
+#if ALT_REF_MC_ENABLED
+#define THRESH_LOW 10000
+#define THRESH_HIGH 20000
+ /* Find best match in this frame by MC */
+ err = vp8_temporal_filter_find_matching_mb_c(
+ cpi, cpi->frames[alt_ref_index], cpi->frames[frame], mb_y_offset,
+ THRESH_LOW);
+#endif
+ /* Assign higher weight to matching MB if it's error
+ * score is lower. If not applying MC default behavior
+ * is to weight all MBs equal.
+ */
+ filter_weight = err < THRESH_LOW ? 2 : err < THRESH_HIGH ? 1 : 0;
+ }
+
+ if (filter_weight != 0) {
+ /* Construct the predictors */
+ vp8_temporal_filter_predictors_mb_c(
+ mbd, cpi->frames[frame]->y_buffer + mb_y_offset,
+ cpi->frames[frame]->u_buffer + mb_uv_offset,
+ cpi->frames[frame]->v_buffer + mb_uv_offset,
+ cpi->frames[frame]->y_stride, mbd->block[0].bmi.mv.as_mv.row,
+ mbd->block[0].bmi.mv.as_mv.col, predictor);
+
+ /* Apply the filter (YUV) */
+ vp8_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
+ predictor, 16, strength, filter_weight,
+ accumulator, count);
+
+ vp8_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
+ predictor + 256, 8, strength, filter_weight,
+ accumulator + 256, count + 256);
+
+ vp8_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
+ predictor + 320, 8, strength, filter_weight,
+ accumulator + 320, count + 320);
+ }
+ }
+
+ /* Normalize filter output to produce AltRef frame */
+ dst1 = cpi->alt_ref_buffer.y_buffer;
+ stride = cpi->alt_ref_buffer.y_stride;
+ byte = mb_y_offset;
+ for (i = 0, k = 0; i < 16; ++i) {
+ for (j = 0; j < 16; j++, k++) {
+ unsigned int pval = accumulator[k] + (count[k] >> 1);
+ pval *= cpi->fixed_divide[count[k]];
+ pval >>= 19;
+
+ dst1[byte] = (unsigned char)pval;
+
+ /* move to next pixel */
+ byte++;
+ }
+
+ byte += stride - 16;
+ }
+
+ dst1 = cpi->alt_ref_buffer.u_buffer;
+ dst2 = cpi->alt_ref_buffer.v_buffer;
+ stride = cpi->alt_ref_buffer.uv_stride;
+ byte = mb_uv_offset;
+ for (i = 0, k = 256; i < 8; ++i) {
+ for (j = 0; j < 8; j++, k++) {
+ int m = k + 64;
+
+ /* U */
+ unsigned int pval = accumulator[k] + (count[k] >> 1);
+ pval *= cpi->fixed_divide[count[k]];
+ pval >>= 19;
+ dst1[byte] = (unsigned char)pval;
+
+ /* V */
+ pval = accumulator[m] + (count[m] >> 1);
+ pval *= cpi->fixed_divide[count[m]];
+ pval >>= 19;
+ dst2[byte] = (unsigned char)pval;
+
+ /* move to next pixel */
+ byte++;
+ }
+
+ byte += stride - 8;
+ }
+
+ mb_y_offset += 16;
+ mb_uv_offset += 8;
+ }
+
+ mb_y_offset += 16 * (f->y_stride - mb_cols);
+ mb_uv_offset += 8 * (f->uv_stride - mb_cols);
+ }
+
+ /* Restore input state */
+ mbd->pre.y_buffer = y_buffer;
+ mbd->pre.u_buffer = u_buffer;
+ mbd->pre.v_buffer = v_buffer;
+}
+
+void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance) {
+ int frame = 0;
+
+ int num_frames_backward = 0;
+ int num_frames_forward = 0;
+ int frames_to_blur_backward = 0;
+ int frames_to_blur_forward = 0;
+ int frames_to_blur = 0;
+ int start_frame = 0;
+
+ int strength = cpi->oxcf.arnr_strength;
+
+ int blur_type = cpi->oxcf.arnr_type;
+
+ int max_frames = cpi->active_arnr_frames;
+
+ num_frames_backward = distance;
+ num_frames_forward =
+ vp8_lookahead_depth(cpi->lookahead) - (num_frames_backward + 1);
+
+ switch (blur_type) {
+ case 1:
+ /* Backward Blur */
+
+ frames_to_blur_backward = num_frames_backward;
+
+ if (frames_to_blur_backward >= max_frames) {
+ frames_to_blur_backward = max_frames - 1;
+ }
+
+ frames_to_blur = frames_to_blur_backward + 1;
+ break;
+
+ case 2:
+ /* Forward Blur */
+
+ frames_to_blur_forward = num_frames_forward;
+
+ if (frames_to_blur_forward >= max_frames) {
+ frames_to_blur_forward = max_frames - 1;
+ }
+
+ frames_to_blur = frames_to_blur_forward + 1;
+ break;
+
+ case 3:
+ default:
+ /* Center Blur */
+ frames_to_blur_forward = num_frames_forward;
+ frames_to_blur_backward = num_frames_backward;
+
+ if (frames_to_blur_forward > frames_to_blur_backward) {
+ frames_to_blur_forward = frames_to_blur_backward;
+ }
+
+ if (frames_to_blur_backward > frames_to_blur_forward) {
+ frames_to_blur_backward = frames_to_blur_forward;
+ }
+
+ /* When max_frames is even we have 1 more frame backward than forward */
+ if (frames_to_blur_forward > (max_frames - 1) / 2) {
+ frames_to_blur_forward = ((max_frames - 1) / 2);
+ }
+
+ if (frames_to_blur_backward > (max_frames / 2)) {
+ frames_to_blur_backward = (max_frames / 2);
+ }
+
+ frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1;
+ break;
+ }
+
+ start_frame = distance + frames_to_blur_forward;
+
+ /* Setup frame pointers, NULL indicates frame not included in filter */
+ memset(cpi->frames, 0, max_frames * sizeof(YV12_BUFFER_CONFIG *));
+ for (frame = 0; frame < frames_to_blur; ++frame) {
+ int which_buffer = start_frame - frame;
+ struct lookahead_entry *buf =
+ vp8_lookahead_peek(cpi->lookahead, which_buffer, PEEK_FORWARD);
+ cpi->frames[frames_to_blur - 1 - frame] = &buf->img;
+ }
+
+ vp8_temporal_filter_iterate_c(cpi, frames_to_blur, frames_to_blur_backward,
+ strength);
+}
+#endif
diff --git a/media/libvpx/libvpx/vp8/encoder/temporal_filter.h b/media/libvpx/libvpx/vp8/encoder/temporal_filter.h
new file mode 100644
index 0000000000..fd39f5cb87
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/temporal_filter.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_TEMPORAL_FILTER_H_
+#define VPX_VP8_ENCODER_TEMPORAL_FILTER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP8_COMP;
+
+void vp8_temporal_filter_prepare_c(struct VP8_COMP *cpi, int distance);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VPX_VP8_ENCODER_TEMPORAL_FILTER_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/tokenize.c b/media/libvpx/libvpx/vp8/encoder/tokenize.c
new file mode 100644
index 0000000000..c3d7026607
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/tokenize.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include "onyx_int.h"
+#include "tokenize.h"
+#include "vpx_mem/vpx_mem.h"
+
+/* Global event counters used for accumulating statistics across several
+ compressions, then generating context.c = initial stats. */
+
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
+void vp8_fix_contexts(MACROBLOCKD *x);
+
+#include "dct_value_tokens.h"
+#include "dct_value_cost.h"
+
+const TOKENVALUE *const vp8_dct_value_tokens_ptr =
+ dct_value_tokens + DCT_MAX_VALUE;
+const short *const vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
+
+#if 0
+int skip_true_count = 0;
+int skip_false_count = 0;
+#endif
+
+/* function used to generate dct_value_tokens and dct_value_cost tables */
+/*
+static void fill_value_tokens()
+{
+
+ TOKENVALUE *t = dct_value_tokens + DCT_MAX_VALUE;
+ const vp8_extra_bit_struct *e = vp8_extra_bits;
+
+ int i = -DCT_MAX_VALUE;
+ int sign = 1;
+
+ do
+ {
+ if (!i)
+ sign = 0;
+
+ {
+ const int a = sign ? -i : i;
+ int eb = sign;
+
+ if (a > 4)
+ {
+ int j = 4;
+
+ while (++j < 11 && e[j].base_val <= a) {}
+
+ t[i].Token = --j;
+ eb |= (a - e[j].base_val) << 1;
+ }
+ else
+ t[i].Token = a;
+
+ t[i].Extra = eb;
+ }
+
+ // initialize the cost for extra bits for all possible coefficient
+value.
+ {
+ int cost = 0;
+ const vp8_extra_bit_struct *p = vp8_extra_bits + t[i].Token;
+
+ if (p->base_val)
+ {
+ const int extra = t[i].Extra;
+ const int Length = p->Len;
+
+ if (Length)
+ cost += vp8_treed_cost(p->tree, p->prob, extra >> 1,
+Length);
+
+ cost += vp8_cost_bit(vp8_prob_half, extra & 1); // sign
+ dct_value_cost[i + DCT_MAX_VALUE] = cost;
+ }
+
+ }
+
+ }
+ while (++i < DCT_MAX_VALUE);
+
+ vp8_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
+ vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
+}
+*/
+
+static void tokenize2nd_order_b(MACROBLOCK *x, TOKENEXTRA **tp, VP8_COMP *cpi) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ int pt; /* near block/prev token context index */
+ int c; /* start at DC */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ const BLOCKD *b;
+ const short *qcoeff_ptr;
+ ENTROPY_CONTEXT *a;
+ ENTROPY_CONTEXT *l;
+ int band, rc, v, token;
+ int eob;
+
+ b = xd->block + 24;
+ qcoeff_ptr = b->qcoeff;
+ a = (ENTROPY_CONTEXT *)xd->above_context + 8;
+ l = (ENTROPY_CONTEXT *)xd->left_context + 8;
+ eob = xd->eobs[24];
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ if (!eob) {
+ /* c = band for this case */
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[1][0][pt];
+ t->skip_eob_node = 0;
+
+ ++x->coef_counts[1][0][pt][DCT_EOB_TOKEN];
+ t++;
+ *tp = t;
+ *a = *l = 0;
+ return;
+ }
+
+ v = qcoeff_ptr[0];
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ t->Token = token;
+
+ t->context_tree = cpi->common.fc.coef_probs[1][0][pt];
+ t->skip_eob_node = 0;
+ ++x->coef_counts[1][0][pt][token];
+ pt = vp8_prev_token_class[token];
+ t++;
+ c = 1;
+
+ for (; c < eob; ++c) {
+ rc = vp8_default_zig_zag1d[c];
+ band = vp8_coef_bands[c];
+ v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+
+ t->Token = token;
+ t->context_tree = cpi->common.fc.coef_probs[1][band][pt];
+
+ t->skip_eob_node = ((pt == 0));
+
+ ++x->coef_counts[1][band][pt][token];
+
+ pt = vp8_prev_token_class[token];
+ t++;
+ }
+ if (c < 16) {
+ band = vp8_coef_bands[c];
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[1][band][pt];
+
+ t->skip_eob_node = 0;
+
+ ++x->coef_counts[1][band][pt][DCT_EOB_TOKEN];
+
+ t++;
+ }
+
+ *tp = t;
+ *a = *l = 1;
+}
+
+static void tokenize1st_order_b(
+ MACROBLOCK *x, TOKENEXTRA **tp,
+ int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
+ VP8_COMP *cpi) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ unsigned int block;
+ const BLOCKD *b;
+ int pt; /* near block/prev token context index */
+ int c;
+ int token;
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ const short *qcoeff_ptr;
+ ENTROPY_CONTEXT *a;
+ ENTROPY_CONTEXT *l;
+ int band, rc, v;
+ int tmp1, tmp2;
+
+ b = xd->block;
+ /* Luma */
+ for (block = 0; block < 16; block++, b++) {
+ const int eob = *b->eob;
+ tmp1 = vp8_block2above[block];
+ tmp2 = vp8_block2left[block];
+ qcoeff_ptr = b->qcoeff;
+ a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
+ l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
+
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ c = type ? 0 : 1;
+
+ if (c >= eob) {
+ /* c = band for this case */
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[type][c][pt];
+ t->skip_eob_node = 0;
+
+ ++x->coef_counts[type][c][pt][DCT_EOB_TOKEN];
+ t++;
+ *tp = t;
+ *a = *l = 0;
+ continue;
+ }
+
+ v = qcoeff_ptr[c];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ t->Token = token;
+
+ t->context_tree = cpi->common.fc.coef_probs[type][c][pt];
+ t->skip_eob_node = 0;
+ ++x->coef_counts[type][c][pt][token];
+ pt = vp8_prev_token_class[token];
+ t++;
+ c++;
+
+ assert(eob <= 16);
+ for (; c < eob; ++c) {
+ rc = vp8_default_zig_zag1d[c];
+ band = vp8_coef_bands[c];
+ v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+
+ t->Token = token;
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
+
+ t->skip_eob_node = (pt == 0);
+ ++x->coef_counts[type][band][pt][token];
+
+ pt = vp8_prev_token_class[token];
+ t++;
+ }
+ if (c < 16) {
+ band = vp8_coef_bands[c];
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
+
+ t->skip_eob_node = 0;
+ ++x->coef_counts[type][band][pt][DCT_EOB_TOKEN];
+
+ t++;
+ }
+ *tp = t;
+ *a = *l = 1;
+ }
+
+ /* Chroma */
+ for (block = 16; block < 24; block++, b++) {
+ const int eob = *b->eob;
+ tmp1 = vp8_block2above[block];
+ tmp2 = vp8_block2left[block];
+ qcoeff_ptr = b->qcoeff;
+ a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
+ l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
+
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ if (!eob) {
+ /* c = band for this case */
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[2][0][pt];
+ t->skip_eob_node = 0;
+
+ ++x->coef_counts[2][0][pt][DCT_EOB_TOKEN];
+ t++;
+ *tp = t;
+ *a = *l = 0;
+ continue;
+ }
+
+ v = qcoeff_ptr[0];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ t->Token = token;
+
+ t->context_tree = cpi->common.fc.coef_probs[2][0][pt];
+ t->skip_eob_node = 0;
+ ++x->coef_counts[2][0][pt][token];
+ pt = vp8_prev_token_class[token];
+ t++;
+ c = 1;
+
+ assert(eob <= 16);
+ for (; c < eob; ++c) {
+ rc = vp8_default_zig_zag1d[c];
+ band = vp8_coef_bands[c];
+ v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+
+ t->Token = token;
+ t->context_tree = cpi->common.fc.coef_probs[2][band][pt];
+
+ t->skip_eob_node = (pt == 0);
+
+ ++x->coef_counts[2][band][pt][token];
+
+ pt = vp8_prev_token_class[token];
+ t++;
+ }
+ if (c < 16) {
+ band = vp8_coef_bands[c];
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[2][band][pt];
+
+ t->skip_eob_node = 0;
+
+ ++x->coef_counts[2][band][pt][DCT_EOB_TOKEN];
+
+ t++;
+ }
+ *tp = t;
+ *a = *l = 1;
+ }
+}
+
+static int mb_is_skippable(MACROBLOCKD *x, int has_y2_block) {
+ int skip = 1;
+ int i = 0;
+
+ if (has_y2_block) {
+ for (i = 0; i < 16; ++i) skip &= (x->eobs[i] < 2);
+ }
+
+ for (; i < 24 + has_y2_block; ++i) skip &= (!x->eobs[i]);
+
+ return skip;
+}
+
+void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ int plane_type;
+ int has_y2_block;
+
+ has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED &&
+ xd->mode_info_context->mbmi.mode != SPLITMV);
+
+ xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block);
+ if (xd->mode_info_context->mbmi.mb_skip_coeff) {
+ if (!cpi->common.mb_no_coeff_skip) {
+ vp8_stuff_mb(cpi, x, t);
+ } else {
+ vp8_fix_contexts(xd);
+ x->skip_true_count++;
+ }
+
+ return;
+ }
+
+ plane_type = 3;
+ if (has_y2_block) {
+ tokenize2nd_order_b(x, t, cpi);
+ plane_type = 0;
+ }
+
+ tokenize1st_order_b(x, t, plane_type, cpi);
+}
+
+static void stuff2nd_order_b(TOKENEXTRA **tp, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l, VP8_COMP *cpi, MACROBLOCK *x) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[1][0][pt];
+ t->skip_eob_node = 0;
+ ++x->coef_counts[1][0][pt][DCT_EOB_TOKEN];
+ ++t;
+
+ *tp = t;
+ pt = 0;
+ *a = *l = pt;
+}
+
+static void stuff1st_order_b(TOKENEXTRA **tp, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l, int type, VP8_COMP *cpi,
+ MACROBLOCK *x) {
+ int pt; /* near block/prev token context index */
+ int band;
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ band = type ? 0 : 1;
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
+ t->skip_eob_node = 0;
+ ++x->coef_counts[type][band][pt][DCT_EOB_TOKEN];
+ ++t;
+ *tp = t;
+ pt = 0; /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
+}
+
+static void stuff1st_order_buv(TOKENEXTRA **tp, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l, VP8_COMP *cpi,
+ MACROBLOCK *x) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs[2][0][pt];
+ t->skip_eob_node = 0;
+ ++x->coef_counts[2][0][pt][DCT_EOB_TOKEN];
+ ++t;
+ *tp = t;
+ pt = 0; /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
+}
+
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
+ ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
+ int plane_type;
+ int b;
+ plane_type = 3;
+ if ((xd->mode_info_context->mbmi.mode != B_PRED &&
+ xd->mode_info_context->mbmi.mode != SPLITMV)) {
+ stuff2nd_order_b(t, A + vp8_block2above[24], L + vp8_block2left[24], cpi,
+ x);
+ plane_type = 0;
+ }
+
+ for (b = 0; b < 16; ++b) {
+ stuff1st_order_b(t, A + vp8_block2above[b], L + vp8_block2left[b],
+ plane_type, cpi, x);
+ }
+
+ for (b = 16; b < 24; ++b) {
+ stuff1st_order_buv(t, A + vp8_block2above[b], L + vp8_block2left[b], cpi,
+ x);
+ }
+}
+void vp8_fix_contexts(MACROBLOCKD *x) {
+ /* Clear entropy contexts for Y2 blocks */
+ if (x->mode_info_context->mbmi.mode != B_PRED &&
+ x->mode_info_context->mbmi.mode != SPLITMV) {
+ memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+ memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+ } else {
+ memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
+ memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
+ }
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/tokenize.h b/media/libvpx/libvpx/vp8/encoder/tokenize.h
new file mode 100644
index 0000000000..47b5be17f1
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/tokenize.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_TOKENIZE_H_
+#define VPX_VP8_ENCODER_TOKENIZE_H_
+
+#include "vp8/common/entropy.h"
+#include "block.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp8_tokenize_initialize();
+
+typedef struct {
+ short Token;
+ short Extra;
+} TOKENVALUE;
+
+typedef struct {
+ const vp8_prob *context_tree;
+ short Extra;
+ unsigned char Token;
+ unsigned char skip_eob_node;
+} TOKENEXTRA;
+
+int rd_cost_mby(MACROBLOCKD *);
+
+extern const short *const vp8_dct_value_cost_ptr;
+/* TODO: The Token field should be broken out into a separate char array to
+ * improve cache locality, since it's needed for costing when the rest of the
+ * fields are not.
+ */
+extern const TOKENVALUE *const vp8_dct_value_tokens_ptr;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_TOKENIZE_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/treewriter.c b/media/libvpx/libvpx/vp8/encoder/treewriter.c
new file mode 100644
index 0000000000..f055f05229
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/treewriter.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "treewriter.h"
+
+static void cost(int *const C, vp8_tree T, const vp8_prob *const P, int i,
+ int c) {
+ const vp8_prob p = P[i >> 1];
+
+ do {
+ const vp8_tree_index j = T[i];
+ const int d = c + vp8_cost_bit(p, i & 1);
+
+ if (j <= 0) {
+ C[-j] = d;
+ } else {
+ cost(C, T, P, j, d);
+ }
+ } while (++i & 1);
+}
+void vp8_cost_tokens(int *c, const vp8_prob *p, vp8_tree t) {
+ cost(c, t, p, 0, 0);
+}
+void vp8_cost_tokens2(int *c, const vp8_prob *p, vp8_tree t, int start) {
+ cost(c, t, p, start, 0);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/treewriter.h b/media/libvpx/libvpx/vp8/encoder/treewriter.h
new file mode 100644
index 0000000000..4e9ed6af17
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/treewriter.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_ENCODER_TREEWRITER_H_
+#define VPX_VP8_ENCODER_TREEWRITER_H_
+
+/* Trees map alphabets into huffman-like codes suitable for an arithmetic
+ bit coder. Timothy S Murphy 11 October 2004 */
+
+#include <stdint.h>
+
+#include "./vpx_config.h"
+#include "vp8/common/treecoder.h"
+
+#include "boolhuff.h" /* for now */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef BOOL_CODER vp8_writer;
+
+#define vp8_write vp8_encode_bool
+#define vp8_write_literal vp8_encode_value
+#define vp8_write_bit(W, V) vp8_write(W, V, vp8_prob_half)
+
+#define vp8bc_write vp8bc_write_bool
+#define vp8bc_write_literal vp8bc_write_bits
+#define vp8bc_write_bit(W, V) vp8bc_write_bits(W, V, 1)
+
+/* Approximate length of an encoded bool in 256ths of a bit at given prob */
+
+#define vp8_cost_zero(x) (vp8_prob_cost[x])
+#define vp8_cost_one(x) vp8_cost_zero(vp8_complement(x))
+
+#define vp8_cost_bit(x, b) vp8_cost_zero((b) ? vp8_complement(x) : (x))
+
+/* VP8BC version is scaled by 2^20 rather than 2^8; see bool_coder.h */
+
+/* Both of these return bits, not scaled bits. */
+
+static INLINE unsigned int vp8_cost_branch(const unsigned int ct[2],
+ vp8_prob p) {
+ /* Imitate existing calculation */
+
+ return (unsigned int)(((((uint64_t)ct[0]) * vp8_cost_zero(p)) +
+ (((uint64_t)ct[1]) * vp8_cost_one(p))) >>
+ 8);
+}
+
+/* Small functions to write explicit values and tokens, as well as
+ estimate their lengths. */
+
+static void vp8_treed_write(vp8_writer *const w, vp8_tree t,
+ const vp8_prob *const p, int v,
+ int n) { /* number of bits in v, assumed nonzero */
+ vp8_tree_index i = 0;
+
+ do {
+ const int b = (v >> --n) & 1;
+ vp8_write(w, b, p[i >> 1]);
+ i = t[i + b];
+ } while (n);
+}
+static INLINE void vp8_write_token(vp8_writer *const w, vp8_tree t,
+ const vp8_prob *const p,
+ vp8_token *const x) {
+ vp8_treed_write(w, t, p, x->value, x->Len);
+}
+
+static int vp8_treed_cost(vp8_tree t, const vp8_prob *const p, int v,
+ int n) { /* number of bits in v, assumed nonzero */
+ int c = 0;
+ vp8_tree_index i = 0;
+
+ do {
+ const int b = (v >> --n) & 1;
+ c += vp8_cost_bit(p[i >> 1], b);
+ i = t[i + b];
+ } while (n);
+
+ return c;
+}
+static INLINE int vp8_cost_token(vp8_tree t, const vp8_prob *const p,
+ vp8_token *const x) {
+ return vp8_treed_cost(t, p, x->value, x->Len);
+}
+
+/* Fill array of costs for all possible token values. */
+
+void vp8_cost_tokens(int *c, const vp8_prob *, vp8_tree);
+
+void vp8_cost_tokens2(int *c, const vp8_prob *, vp8_tree, int);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // VPX_VP8_ENCODER_TREEWRITER_H_
diff --git a/media/libvpx/libvpx/vp8/encoder/vp8_quantize.c b/media/libvpx/libvpx/vp8/encoder/vp8_quantize.c
new file mode 100644
index 0000000000..5b89555108
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/vp8_quantize.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include "vpx_mem/vpx_mem.h"
+
+#include "onyx_int.h"
+#include "vp8/encoder/quantize.h"
+#include "vp8/common/quant_common.h"
+
+void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) {
+ int i, rc, eob;
+ int x, y, z, sz;
+ short *coeff_ptr = b->coeff;
+ short *round_ptr = b->round;
+ short *quant_ptr = b->quant_fast;
+ short *qcoeff_ptr = d->qcoeff;
+ short *dqcoeff_ptr = d->dqcoeff;
+ short *dequant_ptr = d->dequant;
+
+ eob = -1;
+ for (i = 0; i < 16; ++i) {
+ rc = vp8_default_zig_zag1d[i];
+ z = coeff_ptr[rc];
+
+ sz = (z >> 31); /* sign of z */
+ x = (z ^ sz) - sz; /* x = abs(z) */
+
+ y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
+ x = (y ^ sz) - sz; /* get the sign back */
+ qcoeff_ptr[rc] = x; /* write to destination */
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
+
+ if (y) {
+ eob = i; /* last nonzero coeffs */
+ }
+ }
+ *d->eob = (char)(eob + 1);
+}
+
+void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d) {
+ int i, rc, eob;
+ int zbin;
+ int x, y, z, sz;
+ short *zbin_boost_ptr = b->zrun_zbin_boost;
+ short *coeff_ptr = b->coeff;
+ short *zbin_ptr = b->zbin;
+ short *round_ptr = b->round;
+ short *quant_ptr = b->quant;
+ short *quant_shift_ptr = b->quant_shift;
+ short *qcoeff_ptr = d->qcoeff;
+ short *dqcoeff_ptr = d->dqcoeff;
+ short *dequant_ptr = d->dequant;
+ short zbin_oq_value = b->zbin_extra;
+
+ memset(qcoeff_ptr, 0, 32);
+ memset(dqcoeff_ptr, 0, 32);
+
+ eob = -1;
+
+ for (i = 0; i < 16; ++i) {
+ rc = vp8_default_zig_zag1d[i];
+ z = coeff_ptr[rc];
+
+ zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
+
+ zbin_boost_ptr++;
+ sz = (z >> 31); /* sign of z */
+ x = (z ^ sz) - sz; /* x = abs(z) */
+
+ if (x >= zbin) {
+ x += round_ptr[rc];
+ y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >>
+ 16; /* quantize (x) */
+ x = (y ^ sz) - sz; /* get the sign back */
+ qcoeff_ptr[rc] = x; /* write to destination */
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
+
+ if (y) {
+ eob = i; /* last nonzero coeffs */
+ zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */
+ }
+ }
+ }
+
+ *d->eob = (char)(eob + 1);
+}
+
+void vp8_quantize_mby(MACROBLOCK *x) {
+ int i;
+ int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
+ x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+
+ for (i = 0; i < 16; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+
+ if (has_2nd_order) x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
+}
+
+void vp8_quantize_mb(MACROBLOCK *x) {
+ int i;
+ int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
+ x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+
+ for (i = 0; i < 24 + has_2nd_order; ++i) {
+ x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ }
+}
+
+void vp8_quantize_mbuv(MACROBLOCK *x) {
+ int i;
+
+ for (i = 16; i < 24; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+}
+
+static const int qrounding_factors[129] = {
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
+};
+
+static const int qzbin_factors[129] = {
+ 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
+};
+
+static const int qrounding_factors_y2[129] = {
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48
+};
+
+static const int qzbin_factors_y2[129] = {
+ 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80
+};
+
+static void invert_quant(int improved_quant, short *quant, short *shift,
+ short d) {
+ if (improved_quant) {
+ unsigned t;
+ int l, m;
+ t = d;
+ for (l = 0; t > 1; ++l) t >>= 1;
+ m = 1 + (1 << (16 + l)) / d;
+ *quant = (short)(m - (1 << 16));
+ *shift = l;
+ /* use multiplication and constant shift by 16 */
+ *shift = 1 << (16 - *shift);
+ } else {
+ *quant = (1 << 16) / d;
+ *shift = 0;
+ }
+}
+
+void vp8cx_init_quantizer(VP8_COMP *cpi) {
+ int i;
+ int quant_val;
+ int Q;
+
+ int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
+ 24, 28, 32, 36, 40, 44, 44, 44 };
+
+ for (Q = 0; Q < QINDEX_RANGE; ++Q) {
+ /* dc values */
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
+ cpi->Y1quant_shift[Q] + 0, quant_val);
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
+ cpi->Y2quant_shift[Q] + 0, quant_val);
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
+ cpi->UVquant_shift[Q] + 0, quant_val);
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ /* all the ac values = ; */
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1,
+ cpi->Y1quant_shift[Q] + 1, quant_val);
+ cpi->Y1zbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][1] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][1] = (quant_val * zbin_boost[1]) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2quant_fast[Q][1] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 1,
+ cpi->Y2quant_shift[Q] + 1, quant_val);
+ cpi->Y2zbin[Q][1] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][1] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][1] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][1] = (quant_val * zbin_boost[1]) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVquant_fast[Q][1] = (1 << 16) / quant_val;
+ invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 1,
+ cpi->UVquant_shift[Q] + 1, quant_val);
+ cpi->UVzbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][1] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7;
+
+ for (i = 2; i < 16; ++i) {
+ cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1];
+ cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1];
+ cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1];
+ cpi->Y1zbin[Q][i] = cpi->Y1zbin[Q][1];
+ cpi->Y1round[Q][i] = cpi->Y1round[Q][1];
+ cpi->zrun_zbin_boost_y1[Q][i] =
+ (cpi->common.Y1dequant[Q][1] * zbin_boost[i]) >> 7;
+
+ cpi->Y2quant_fast[Q][i] = cpi->Y2quant_fast[Q][1];
+ cpi->Y2quant[Q][i] = cpi->Y2quant[Q][1];
+ cpi->Y2quant_shift[Q][i] = cpi->Y2quant_shift[Q][1];
+ cpi->Y2zbin[Q][i] = cpi->Y2zbin[Q][1];
+ cpi->Y2round[Q][i] = cpi->Y2round[Q][1];
+ cpi->zrun_zbin_boost_y2[Q][i] =
+ (cpi->common.Y2dequant[Q][1] * zbin_boost[i]) >> 7;
+
+ cpi->UVquant_fast[Q][i] = cpi->UVquant_fast[Q][1];
+ cpi->UVquant[Q][i] = cpi->UVquant[Q][1];
+ cpi->UVquant_shift[Q][i] = cpi->UVquant_shift[Q][1];
+ cpi->UVzbin[Q][i] = cpi->UVzbin[Q][1];
+ cpi->UVround[Q][i] = cpi->UVround[Q][1];
+ cpi->zrun_zbin_boost_uv[Q][i] =
+ (cpi->common.UVdequant[Q][1] * zbin_boost[i]) >> 7;
+ }
+ }
+}
+
+#define ZBIN_EXTRA_Y \
+ ((cpi->common.Y1dequant[QIndex][1] * \
+ (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
+ 7)
+
+#define ZBIN_EXTRA_UV \
+ ((cpi->common.UVdequant[QIndex][1] * \
+ (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \
+ 7)
+
+#define ZBIN_EXTRA_Y2 \
+ ((cpi->common.Y2dequant[QIndex][1] * \
+ ((x->zbin_over_quant / 2) + x->zbin_mode_boost + x->act_zbin_adj)) >> \
+ 7)
+
+void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
+ int i;
+ int QIndex;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int zbin_extra;
+
+ /* Select the baseline MB Q index. */
+ if (xd->segmentation_enabled) {
+ /* Abs Value */
+ if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
+ QIndex = xd->segment_feature_data[MB_LVL_ALT_Q]
+ [xd->mode_info_context->mbmi.segment_id];
+ /* Delta Value */
+ } else {
+ QIndex = cpi->common.base_qindex +
+ xd->segment_feature_data[MB_LVL_ALT_Q]
+ [xd->mode_info_context->mbmi.segment_id];
+ /* Clamp to valid range */
+ QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
+ }
+ } else {
+ QIndex = cpi->common.base_qindex;
+ }
+
+ /* This initialization should be called at least once. Use ok_to_skip to
+ * decide if it is ok to skip.
+ * Before encoding a frame, this function is always called with ok_to_skip
+ * =0, which means no skiping of calculations. The "last" values are
+ * initialized at that time.
+ */
+ if (!ok_to_skip || QIndex != x->q_index) {
+ xd->dequant_y1_dc[0] = 1;
+ xd->dequant_y1[0] = cpi->common.Y1dequant[QIndex][0];
+ xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0];
+ xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0];
+
+ for (i = 1; i < 16; ++i) {
+ xd->dequant_y1_dc[i] = xd->dequant_y1[i] =
+ cpi->common.Y1dequant[QIndex][1];
+ xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1];
+ xd->dequant_uv[i] = cpi->common.UVdequant[QIndex][1];
+ }
+#if 1
+ /*TODO: Remove dequant from BLOCKD. This is a temporary solution until
+ * the quantizer code uses a passed in pointer to the dequant constants.
+ * This will also require modifications to the x86 and neon assembly.
+ * */
+ for (i = 0; i < 16; ++i) x->e_mbd.block[i].dequant = xd->dequant_y1;
+ for (i = 16; i < 24; ++i) x->e_mbd.block[i].dequant = xd->dequant_uv;
+ x->e_mbd.block[24].dequant = xd->dequant_y2;
+#endif
+
+ /* Y */
+ zbin_extra = ZBIN_EXTRA_Y;
+
+ for (i = 0; i < 16; ++i) {
+ x->block[i].quant = cpi->Y1quant[QIndex];
+ x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
+ x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
+ x->block[i].zbin = cpi->Y1zbin[QIndex];
+ x->block[i].round = cpi->Y1round[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ /* UV */
+ zbin_extra = ZBIN_EXTRA_UV;
+
+ for (i = 16; i < 24; ++i) {
+ x->block[i].quant = cpi->UVquant[QIndex];
+ x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
+ x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
+ x->block[i].zbin = cpi->UVzbin[QIndex];
+ x->block[i].round = cpi->UVround[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ /* Y2 */
+ zbin_extra = ZBIN_EXTRA_Y2;
+
+ x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
+ x->block[24].quant = cpi->Y2quant[QIndex];
+ x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
+ x->block[24].zbin = cpi->Y2zbin[QIndex];
+ x->block[24].round = cpi->Y2round[QIndex];
+ x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
+ x->block[24].zbin_extra = (short)zbin_extra;
+
+ /* save this macroblock QIndex for vp8_update_zbin_extra() */
+ x->q_index = QIndex;
+
+ x->last_zbin_over_quant = x->zbin_over_quant;
+ x->last_zbin_mode_boost = x->zbin_mode_boost;
+ x->last_act_zbin_adj = x->act_zbin_adj;
+
+ } else if (x->last_zbin_over_quant != x->zbin_over_quant ||
+ x->last_zbin_mode_boost != x->zbin_mode_boost ||
+ x->last_act_zbin_adj != x->act_zbin_adj) {
+ /* Y */
+ zbin_extra = ZBIN_EXTRA_Y;
+
+ for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
+
+ /* UV */
+ zbin_extra = ZBIN_EXTRA_UV;
+
+ for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
+
+ /* Y2 */
+ zbin_extra = ZBIN_EXTRA_Y2;
+ x->block[24].zbin_extra = (short)zbin_extra;
+
+ x->last_zbin_over_quant = x->zbin_over_quant;
+ x->last_zbin_mode_boost = x->zbin_mode_boost;
+ x->last_act_zbin_adj = x->act_zbin_adj;
+ }
+}
+
+void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
+ int i;
+ int QIndex = x->q_index;
+ int zbin_extra;
+
+ /* Y */
+ zbin_extra = ZBIN_EXTRA_Y;
+
+ for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
+
+ /* UV */
+ zbin_extra = ZBIN_EXTRA_UV;
+
+ for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
+
+ /* Y2 */
+ zbin_extra = ZBIN_EXTRA_Y2;
+ x->block[24].zbin_extra = (short)zbin_extra;
+}
+#undef ZBIN_EXTRA_Y
+#undef ZBIN_EXTRA_UV
+#undef ZBIN_EXTRA_Y2
+
+void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
+ /* Clear Zbin mode boost for default case */
+ cpi->mb.zbin_mode_boost = 0;
+
+ /* MB level quantizer setup */
+ vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
+}
+
+void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ int update = 0;
+ int new_delta_q;
+ int new_uv_delta_q;
+ cm->base_qindex = Q;
+
+ /* if any of the delta_q values are changing update flag has to be set */
+ /* currently only y2dc_delta_q may change */
+
+ cm->y1dc_delta_q = 0;
+ cm->y2ac_delta_q = 0;
+
+ if (Q < 4) {
+ new_delta_q = 4 - Q;
+ } else {
+ new_delta_q = 0;
+ }
+
+ update |= cm->y2dc_delta_q != new_delta_q;
+ cm->y2dc_delta_q = new_delta_q;
+
+ new_uv_delta_q = 0;
+ // For screen content, lower the q value for UV channel. For now, select
+ // conservative delta; same delta for dc and ac, and decrease it with lower
+ // Q, and set to 0 below some threshold. May want to condition this in
+ // future on the variance/energy in UV channel.
+ if (cpi->oxcf.screen_content_mode && Q > 40) {
+ new_uv_delta_q = -(int)(0.15 * Q);
+ // Check range: magnitude of delta is 4 bits.
+ if (new_uv_delta_q < -15) {
+ new_uv_delta_q = -15;
+ }
+ }
+ update |= cm->uvdc_delta_q != new_uv_delta_q;
+ cm->uvdc_delta_q = new_uv_delta_q;
+ cm->uvac_delta_q = new_uv_delta_q;
+
+ /* Set Segment specific quatizers */
+ mbd->segment_feature_data[MB_LVL_ALT_Q][0] =
+ cpi->segment_feature_data[MB_LVL_ALT_Q][0];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][1] =
+ cpi->segment_feature_data[MB_LVL_ALT_Q][1];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][2] =
+ cpi->segment_feature_data[MB_LVL_ALT_Q][2];
+ mbd->segment_feature_data[MB_LVL_ALT_Q][3] =
+ cpi->segment_feature_data[MB_LVL_ALT_Q][3];
+
+ /* quantizer has to be reinitialized for any delta_q changes */
+ if (update) vp8cx_init_quantizer(cpi);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/block_error_sse2.asm b/media/libvpx/libvpx/vp8/encoder/x86/block_error_sse2.asm
new file mode 100644
index 0000000000..200b4ccfe6
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/block_error_sse2.asm
@@ -0,0 +1,188 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+;int vp8_block_error_sse2(short *coeff_ptr, short *dcoef_ptr)
+globalsym(vp8_block_error_sse2)
+sym(vp8_block_error_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ push rsi
+ push rdi
+ ; end prologue
+
+ mov rsi, arg(0) ;coeff_ptr
+ mov rdi, arg(1) ;dcoef_ptr
+
+ movdqa xmm0, [rsi]
+ movdqa xmm1, [rdi]
+
+ movdqa xmm2, [rsi+16]
+ movdqa xmm3, [rdi+16]
+
+ psubw xmm0, xmm1
+ psubw xmm2, xmm3
+
+ pmaddwd xmm0, xmm0
+ pmaddwd xmm2, xmm2
+
+ paddd xmm0, xmm2
+
+ pxor xmm5, xmm5
+ movdqa xmm1, xmm0
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ psrldq xmm0, 8
+ paddd xmm0, xmm1
+
+ movq rax, xmm0
+
+ pop rdi
+ pop rsi
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;int vp8_mbblock_error_sse2_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
+globalsym(vp8_mbblock_error_sse2_impl)
+sym(vp8_mbblock_error_sse2_impl):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 3
+ SAVE_XMM 6
+ push rsi
+ push rdi
+ ; end prolog
+
+
+ mov rsi, arg(0) ;coeff_ptr
+ pxor xmm6, xmm6
+
+ mov rdi, arg(1) ;dcoef_ptr
+ pxor xmm4, xmm4
+
+ movd xmm5, dword ptr arg(2) ;dc
+ por xmm5, xmm4
+
+ pcmpeqw xmm5, xmm6
+ mov rcx, 16
+
+.mberror_loop:
+ movdqa xmm0, [rsi]
+ movdqa xmm1, [rdi]
+
+ movdqa xmm2, [rsi+16]
+ movdqa xmm3, [rdi+16]
+
+
+ psubw xmm2, xmm3
+ pmaddwd xmm2, xmm2
+
+ psubw xmm0, xmm1
+ pand xmm0, xmm5
+
+ pmaddwd xmm0, xmm0
+ add rsi, 32
+
+ add rdi, 32
+
+ sub rcx, 1
+ paddd xmm4, xmm2
+
+ paddd xmm4, xmm0
+ jnz .mberror_loop
+
+ movdqa xmm0, xmm4
+ punpckldq xmm0, xmm6
+
+ punpckhdq xmm4, xmm6
+ paddd xmm0, xmm4
+
+ movdqa xmm1, xmm0
+ psrldq xmm0, 8
+
+ paddd xmm0, xmm1
+ movq rax, xmm0
+
+ pop rdi
+ pop rsi
+ ; begin epilog
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;int vp8_mbuverror_sse2_impl(short *s_ptr, short *d_ptr);
+globalsym(vp8_mbuverror_sse2_impl)
+sym(vp8_mbuverror_sse2_impl):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ push rsi
+ push rdi
+ ; end prolog
+
+
+ mov rsi, arg(0) ;s_ptr
+ mov rdi, arg(1) ;d_ptr
+
+ mov rcx, 16
+ pxor xmm3, xmm3
+
+.mbuverror_loop:
+
+ movdqa xmm1, [rsi]
+ movdqa xmm2, [rdi]
+
+ psubw xmm1, xmm2
+ pmaddwd xmm1, xmm1
+
+ paddd xmm3, xmm1
+
+ add rsi, 16
+ add rdi, 16
+
+ dec rcx
+ jnz .mbuverror_loop
+
+ pxor xmm0, xmm0
+ movdqa xmm1, xmm3
+
+ movdqa xmm2, xmm1
+ punpckldq xmm1, xmm0
+
+ punpckhdq xmm2, xmm0
+ paddd xmm1, xmm2
+
+ movdqa xmm2, xmm1
+
+ psrldq xmm1, 8
+ paddd xmm1, xmm2
+
+ movq rax, xmm1
+
+ pop rdi
+ pop rsi
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/copy_sse2.asm b/media/libvpx/libvpx/vp8/encoder/x86/copy_sse2.asm
new file mode 100644
index 0000000000..fe78da398e
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/copy_sse2.asm
@@ -0,0 +1,94 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+;void vp8_copy32xn_sse2(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *dst_ptr,
+; int dst_stride,
+; int height);
+globalsym(vp8_copy32xn_sse2)
+sym(vp8_copy32xn_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;dst_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;dst_stride
+ movsxd rcx, dword ptr arg(4) ;height
+
+.block_copy_sse2_loopx4:
+ movdqu xmm0, XMMWORD PTR [rsi]
+ movdqu xmm1, XMMWORD PTR [rsi + 16]
+ movdqu xmm2, XMMWORD PTR [rsi + rax]
+ movdqu xmm3, XMMWORD PTR [rsi + rax + 16]
+
+ lea rsi, [rsi+rax*2]
+
+ movdqu xmm4, XMMWORD PTR [rsi]
+ movdqu xmm5, XMMWORD PTR [rsi + 16]
+ movdqu xmm6, XMMWORD PTR [rsi + rax]
+ movdqu xmm7, XMMWORD PTR [rsi + rax + 16]
+
+ lea rsi, [rsi+rax*2]
+
+ movdqa XMMWORD PTR [rdi], xmm0
+ movdqa XMMWORD PTR [rdi + 16], xmm1
+ movdqa XMMWORD PTR [rdi + rdx], xmm2
+ movdqa XMMWORD PTR [rdi + rdx + 16], xmm3
+
+ lea rdi, [rdi+rdx*2]
+
+ movdqa XMMWORD PTR [rdi], xmm4
+ movdqa XMMWORD PTR [rdi + 16], xmm5
+ movdqa XMMWORD PTR [rdi + rdx], xmm6
+ movdqa XMMWORD PTR [rdi + rdx + 16], xmm7
+
+ lea rdi, [rdi+rdx*2]
+
+ sub rcx, 4
+ cmp rcx, 4
+ jge .block_copy_sse2_loopx4
+
+ cmp rcx, 0
+ je .copy_is_done
+
+.block_copy_sse2_loop:
+ movdqu xmm0, XMMWORD PTR [rsi]
+ movdqu xmm1, XMMWORD PTR [rsi + 16]
+ lea rsi, [rsi+rax]
+
+ movdqa XMMWORD PTR [rdi], xmm0
+ movdqa XMMWORD PTR [rdi + 16], xmm1
+ lea rdi, [rdi+rdx]
+
+ sub rcx, 1
+ jne .block_copy_sse2_loop
+
+.copy_is_done:
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/copy_sse3.asm b/media/libvpx/libvpx/vp8/encoder/x86/copy_sse3.asm
new file mode 100644
index 0000000000..c40b2d8bf6
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/copy_sse3.asm
@@ -0,0 +1,147 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro STACK_FRAME_CREATE_X3 0
+%if ABI_IS_32BIT
+ %define src_ptr rsi
+ %define src_stride rax
+ %define ref_ptr rdi
+ %define ref_stride rdx
+ %define end_ptr rcx
+ %define ret_var rbx
+ %define result_ptr arg(4)
+ %define max_sad arg(4)
+ %define height dword ptr arg(4)
+ push rbp
+ mov rbp, rsp
+ push rsi
+ push rdi
+ push rbx
+
+ mov rsi, arg(0) ; src_ptr
+ mov rdi, arg(2) ; ref_ptr
+
+ movsxd rax, dword ptr arg(1) ; src_stride
+ movsxd rdx, dword ptr arg(3) ; ref_stride
+%else
+ %if LIBVPX_YASM_WIN64
+ SAVE_XMM 7, u
+ %define src_ptr rcx
+ %define src_stride rdx
+ %define ref_ptr r8
+ %define ref_stride r9
+ %define end_ptr r10
+ %define ret_var r11
+ %define result_ptr [rsp+xmm_stack_space+8+4*8]
+ %define max_sad [rsp+xmm_stack_space+8+4*8]
+ %define height dword ptr [rsp+xmm_stack_space+8+4*8]
+ %else
+ %define src_ptr rdi
+ %define src_stride rsi
+ %define ref_ptr rdx
+ %define ref_stride rcx
+ %define end_ptr r9
+ %define ret_var r10
+ %define result_ptr r8
+ %define max_sad r8
+ %define height r8
+ %endif
+%endif
+
+%endmacro
+
+%macro STACK_FRAME_DESTROY_X3 0
+ %define src_ptr
+ %define src_stride
+ %define ref_ptr
+ %define ref_stride
+ %define end_ptr
+ %define ret_var
+ %define result_ptr
+ %define max_sad
+ %define height
+
+%if ABI_IS_32BIT
+ pop rbx
+ pop rdi
+ pop rsi
+ pop rbp
+%else
+ %if LIBVPX_YASM_WIN64
+ RESTORE_XMM
+ %endif
+%endif
+ ret
+%endmacro
+
+SECTION .text
+
+;void vp8_copy32xn_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *dst_ptr,
+; int dst_stride,
+; int height);
+globalsym(vp8_copy32xn_sse3)
+sym(vp8_copy32xn_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+.block_copy_sse3_loopx4:
+ lea end_ptr, [src_ptr+src_stride*2]
+
+ movdqu xmm0, XMMWORD PTR [src_ptr]
+ movdqu xmm1, XMMWORD PTR [src_ptr + 16]
+ movdqu xmm2, XMMWORD PTR [src_ptr + src_stride]
+ movdqu xmm3, XMMWORD PTR [src_ptr + src_stride + 16]
+ movdqu xmm4, XMMWORD PTR [end_ptr]
+ movdqu xmm5, XMMWORD PTR [end_ptr + 16]
+ movdqu xmm6, XMMWORD PTR [end_ptr + src_stride]
+ movdqu xmm7, XMMWORD PTR [end_ptr + src_stride + 16]
+
+ lea src_ptr, [src_ptr+src_stride*4]
+
+ lea end_ptr, [ref_ptr+ref_stride*2]
+
+ movdqa XMMWORD PTR [ref_ptr], xmm0
+ movdqa XMMWORD PTR [ref_ptr + 16], xmm1
+ movdqa XMMWORD PTR [ref_ptr + ref_stride], xmm2
+ movdqa XMMWORD PTR [ref_ptr + ref_stride + 16], xmm3
+ movdqa XMMWORD PTR [end_ptr], xmm4
+ movdqa XMMWORD PTR [end_ptr + 16], xmm5
+ movdqa XMMWORD PTR [end_ptr + ref_stride], xmm6
+ movdqa XMMWORD PTR [end_ptr + ref_stride + 16], xmm7
+
+ lea ref_ptr, [ref_ptr+ref_stride*4]
+
+ sub height, 4
+ cmp height, 4
+ jge .block_copy_sse3_loopx4
+
+ ;Check to see if there is more rows need to be copied.
+ cmp height, 0
+ je .copy_is_done
+
+.block_copy_sse3_loop:
+ movdqu xmm0, XMMWORD PTR [src_ptr]
+ movdqu xmm1, XMMWORD PTR [src_ptr + 16]
+ lea src_ptr, [src_ptr+src_stride]
+
+ movdqa XMMWORD PTR [ref_ptr], xmm0
+ movdqa XMMWORD PTR [ref_ptr + 16], xmm1
+ lea ref_ptr, [ref_ptr+ref_stride]
+
+ sub height, 1
+ jne .block_copy_sse3_loop
+
+.copy_is_done:
+ STACK_FRAME_DESTROY_X3
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/dct_sse2.asm b/media/libvpx/libvpx/vp8/encoder/x86/dct_sse2.asm
new file mode 100644
index 0000000000..3c28cb902e
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/dct_sse2.asm
@@ -0,0 +1,434 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro STACK_FRAME_CREATE 0
+%if ABI_IS_32BIT
+ %define input rsi
+ %define output rdi
+ %define pitch rax
+ push rbp
+ mov rbp, rsp
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0)
+ mov rdi, arg(1)
+
+ movsxd rax, dword ptr arg(2)
+ lea rcx, [rsi + rax*2]
+%else
+ %if LIBVPX_YASM_WIN64
+ %define input rcx
+ %define output rdx
+ %define pitch r8
+ SAVE_XMM 7, u
+ %else
+ %define input rdi
+ %define output rsi
+ %define pitch rdx
+ %endif
+%endif
+%endmacro
+
+%macro STACK_FRAME_DESTROY 0
+ %define input
+ %define output
+ %define pitch
+
+%if ABI_IS_32BIT
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ pop rbp
+%else
+ %if LIBVPX_YASM_WIN64
+ RESTORE_XMM
+ %endif
+%endif
+ ret
+%endmacro
+
+SECTION .text
+
+;void vp8_short_fdct4x4_sse2(short *input, short *output, int pitch)
+globalsym(vp8_short_fdct4x4_sse2)
+sym(vp8_short_fdct4x4_sse2):
+
+ STACK_FRAME_CREATE
+
+ movq xmm0, MMWORD PTR[input ] ;03 02 01 00
+ movq xmm2, MMWORD PTR[input+ pitch] ;13 12 11 10
+ lea input, [input+2*pitch]
+ movq xmm1, MMWORD PTR[input ] ;23 22 21 20
+ movq xmm3, MMWORD PTR[input+ pitch] ;33 32 31 30
+
+ punpcklqdq xmm0, xmm2 ;13 12 11 10 03 02 01 00
+ punpcklqdq xmm1, xmm3 ;33 32 31 30 23 22 21 20
+
+ movdqa xmm2, xmm0
+ punpckldq xmm0, xmm1 ;23 22 03 02 21 20 01 00
+ punpckhdq xmm2, xmm1 ;33 32 13 12 31 30 11 10
+ movdqa xmm1, xmm0
+ punpckldq xmm0, xmm2 ;31 21 30 20 11 10 01 00
+ pshufhw xmm1, xmm1, 0b1h ;22 23 02 03 xx xx xx xx
+ pshufhw xmm2, xmm2, 0b1h ;32 33 12 13 xx xx xx xx
+
+ punpckhdq xmm1, xmm2 ;32 33 22 23 12 13 02 03
+ movdqa xmm3, xmm0
+ paddw xmm0, xmm1 ;b1 a1 b1 a1 b1 a1 b1 a1
+ psubw xmm3, xmm1 ;c1 d1 c1 d1 c1 d1 c1 d1
+ psllw xmm0, 3 ;b1 <<= 3 a1 <<= 3
+ psllw xmm3, 3 ;c1 <<= 3 d1 <<= 3
+
+ movdqa xmm1, xmm0
+ pmaddwd xmm0, XMMWORD PTR[GLOBAL(_mult_add)] ;a1 + b1
+ pmaddwd xmm1, XMMWORD PTR[GLOBAL(_mult_sub)] ;a1 - b1
+ movdqa xmm4, xmm3
+ pmaddwd xmm3, XMMWORD PTR[GLOBAL(_5352_2217)] ;c1*2217 + d1*5352
+ pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)];d1*2217 - c1*5352
+
+ paddd xmm3, XMMWORD PTR[GLOBAL(_14500)]
+ paddd xmm4, XMMWORD PTR[GLOBAL(_7500)]
+ psrad xmm3, 12 ;(c1 * 2217 + d1 * 5352 + 14500)>>12
+ psrad xmm4, 12 ;(d1 * 2217 - c1 * 5352 + 7500)>>12
+
+ packssdw xmm0, xmm1 ;op[2] op[0]
+ packssdw xmm3, xmm4 ;op[3] op[1]
+ ; 23 22 21 20 03 02 01 00
+ ;
+ ; 33 32 31 30 13 12 11 10
+ ;
+ movdqa xmm2, xmm0
+ punpcklqdq xmm0, xmm3 ;13 12 11 10 03 02 01 00
+ punpckhqdq xmm2, xmm3 ;23 22 21 20 33 32 31 30
+
+ movdqa xmm3, xmm0
+ punpcklwd xmm0, xmm2 ;32 30 22 20 12 10 02 00
+ punpckhwd xmm3, xmm2 ;33 31 23 21 13 11 03 01
+ movdqa xmm2, xmm0
+ punpcklwd xmm0, xmm3 ;13 12 11 10 03 02 01 00
+ punpckhwd xmm2, xmm3 ;33 32 31 30 23 22 21 20
+
+ movdqa xmm5, XMMWORD PTR[GLOBAL(_7)]
+ pshufd xmm2, xmm2, 04eh
+ movdqa xmm3, xmm0
+ paddw xmm0, xmm2 ;b1 b1 b1 b1 a1 a1 a1 a1
+ psubw xmm3, xmm2 ;c1 c1 c1 c1 d1 d1 d1 d1
+
+ pshufd xmm0, xmm0, 0d8h ;b1 b1 a1 a1 b1 b1 a1 a1
+ movdqa xmm2, xmm3 ;save d1 for compare
+ pshufd xmm3, xmm3, 0d8h ;c1 c1 d1 d1 c1 c1 d1 d1
+ pshuflw xmm0, xmm0, 0d8h ;b1 b1 a1 a1 b1 a1 b1 a1
+ pshuflw xmm3, xmm3, 0d8h ;c1 c1 d1 d1 c1 d1 c1 d1
+ pshufhw xmm0, xmm0, 0d8h ;b1 a1 b1 a1 b1 a1 b1 a1
+ pshufhw xmm3, xmm3, 0d8h ;c1 d1 c1 d1 c1 d1 c1 d1
+ movdqa xmm1, xmm0
+ pmaddwd xmm0, XMMWORD PTR[GLOBAL(_mult_add)] ;a1 + b1
+ pmaddwd xmm1, XMMWORD PTR[GLOBAL(_mult_sub)] ;a1 - b1
+
+ pxor xmm4, xmm4 ;zero out for compare
+ paddd xmm0, xmm5
+ paddd xmm1, xmm5
+ pcmpeqw xmm2, xmm4
+ psrad xmm0, 4 ;(a1 + b1 + 7)>>4
+ psrad xmm1, 4 ;(a1 - b1 + 7)>>4
+ pandn xmm2, XMMWORD PTR[GLOBAL(_cmp_mask)] ;clear upper,
+ ;and keep bit 0 of lower
+
+ movdqa xmm4, xmm3
+ pmaddwd xmm3, XMMWORD PTR[GLOBAL(_5352_2217)] ;c1*2217 + d1*5352
+ pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)] ;d1*2217 - c1*5352
+ paddd xmm3, XMMWORD PTR[GLOBAL(_12000)]
+ paddd xmm4, XMMWORD PTR[GLOBAL(_51000)]
+ packssdw xmm0, xmm1 ;op[8] op[0]
+ psrad xmm3, 16 ;(c1 * 2217 + d1 * 5352 + 12000)>>16
+ psrad xmm4, 16 ;(d1 * 2217 - c1 * 5352 + 51000)>>16
+
+ packssdw xmm3, xmm4 ;op[12] op[4]
+ movdqa xmm1, xmm0
+ paddw xmm3, xmm2 ;op[4] += (d1!=0)
+ punpcklqdq xmm0, xmm3 ;op[4] op[0]
+ punpckhqdq xmm1, xmm3 ;op[12] op[8]
+
+ movdqa XMMWORD PTR[output + 0], xmm0
+ movdqa XMMWORD PTR[output + 16], xmm1
+
+ STACK_FRAME_DESTROY
+
+;void vp8_short_fdct8x4_sse2(short *input, short *output, int pitch)
+globalsym(vp8_short_fdct8x4_sse2)
+sym(vp8_short_fdct8x4_sse2):
+
+ STACK_FRAME_CREATE
+
+ ; read the input data
+ movdqa xmm0, [input ]
+ movdqa xmm2, [input+ pitch]
+ lea input, [input+2*pitch]
+ movdqa xmm4, [input ]
+ movdqa xmm3, [input+ pitch]
+
+ ; transpose for the first stage
+ movdqa xmm1, xmm0 ; 00 01 02 03 04 05 06 07
+ movdqa xmm5, xmm4 ; 20 21 22 23 24 25 26 27
+
+ punpcklwd xmm0, xmm2 ; 00 10 01 11 02 12 03 13
+ punpckhwd xmm1, xmm2 ; 04 14 05 15 06 16 07 17
+
+ punpcklwd xmm4, xmm3 ; 20 30 21 31 22 32 23 33
+ punpckhwd xmm5, xmm3 ; 24 34 25 35 26 36 27 37
+
+ movdqa xmm2, xmm0 ; 00 10 01 11 02 12 03 13
+ punpckldq xmm0, xmm4 ; 00 10 20 30 01 11 21 31
+
+ punpckhdq xmm2, xmm4 ; 02 12 22 32 03 13 23 33
+
+ movdqa xmm4, xmm1 ; 04 14 05 15 06 16 07 17
+ punpckldq xmm4, xmm5 ; 04 14 24 34 05 15 25 35
+
+ punpckhdq xmm1, xmm5 ; 06 16 26 36 07 17 27 37
+ movdqa xmm3, xmm2 ; 02 12 22 32 03 13 23 33
+
+ punpckhqdq xmm3, xmm1 ; 03 13 23 33 07 17 27 37
+ punpcklqdq xmm2, xmm1 ; 02 12 22 32 06 16 26 36
+
+ movdqa xmm1, xmm0 ; 00 10 20 30 01 11 21 31
+ punpcklqdq xmm0, xmm4 ; 00 10 20 30 04 14 24 34
+
+ punpckhqdq xmm1, xmm4 ; 01 11 21 32 05 15 25 35
+
+ ; xmm0 0
+ ; xmm1 1
+ ; xmm2 2
+ ; xmm3 3
+
+ ; first stage
+ movdqa xmm5, xmm0
+ movdqa xmm4, xmm1
+
+ paddw xmm0, xmm3 ; a1 = 0 + 3
+ paddw xmm1, xmm2 ; b1 = 1 + 2
+
+ psubw xmm4, xmm2 ; c1 = 1 - 2
+ psubw xmm5, xmm3 ; d1 = 0 - 3
+
+ psllw xmm5, 3
+ psllw xmm4, 3
+
+ psllw xmm0, 3
+ psllw xmm1, 3
+
+ ; output 0 and 2
+ movdqa xmm2, xmm0 ; a1
+
+ paddw xmm0, xmm1 ; op[0] = a1 + b1
+ psubw xmm2, xmm1 ; op[2] = a1 - b1
+
+ ; output 1 and 3
+ ; interleave c1, d1
+ movdqa xmm1, xmm5 ; d1
+ punpcklwd xmm1, xmm4 ; c1 d1
+ punpckhwd xmm5, xmm4 ; c1 d1
+
+ movdqa xmm3, xmm1
+ movdqa xmm4, xmm5
+
+ pmaddwd xmm1, XMMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352
+ pmaddwd xmm4, XMMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352
+
+ pmaddwd xmm3, XMMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
+ pmaddwd xmm5, XMMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
+
+ paddd xmm1, XMMWORD PTR[GLOBAL(_14500)]
+ paddd xmm4, XMMWORD PTR[GLOBAL(_14500)]
+ paddd xmm3, XMMWORD PTR[GLOBAL(_7500)]
+ paddd xmm5, XMMWORD PTR[GLOBAL(_7500)]
+
+ psrad xmm1, 12 ; (c1 * 2217 + d1 * 5352 + 14500)>>12
+ psrad xmm4, 12 ; (c1 * 2217 + d1 * 5352 + 14500)>>12
+ psrad xmm3, 12 ; (d1 * 2217 - c1 * 5352 + 7500)>>12
+ psrad xmm5, 12 ; (d1 * 2217 - c1 * 5352 + 7500)>>12
+
+ packssdw xmm1, xmm4 ; op[1]
+ packssdw xmm3, xmm5 ; op[3]
+
+ ; done with vertical
+ ; transpose for the second stage
+ movdqa xmm4, xmm0 ; 00 10 20 30 04 14 24 34
+ movdqa xmm5, xmm2 ; 02 12 22 32 06 16 26 36
+
+ punpcklwd xmm0, xmm1 ; 00 01 10 11 20 21 30 31
+ punpckhwd xmm4, xmm1 ; 04 05 14 15 24 25 34 35
+
+ punpcklwd xmm2, xmm3 ; 02 03 12 13 22 23 32 33
+ punpckhwd xmm5, xmm3 ; 06 07 16 17 26 27 36 37
+
+ movdqa xmm1, xmm0 ; 00 01 10 11 20 21 30 31
+ punpckldq xmm0, xmm2 ; 00 01 02 03 10 11 12 13
+
+ punpckhdq xmm1, xmm2 ; 20 21 22 23 30 31 32 33
+
+ movdqa xmm2, xmm4 ; 04 05 14 15 24 25 34 35
+ punpckldq xmm2, xmm5 ; 04 05 06 07 14 15 16 17
+
+ punpckhdq xmm4, xmm5 ; 24 25 26 27 34 35 36 37
+ movdqa xmm3, xmm1 ; 20 21 22 23 30 31 32 33
+
+ punpckhqdq xmm3, xmm4 ; 30 31 32 33 34 35 36 37
+ punpcklqdq xmm1, xmm4 ; 20 21 22 23 24 25 26 27
+
+ movdqa xmm4, xmm0 ; 00 01 02 03 10 11 12 13
+ punpcklqdq xmm0, xmm2 ; 00 01 02 03 04 05 06 07
+
+ punpckhqdq xmm4, xmm2 ; 10 11 12 13 14 15 16 17
+
+ ; xmm0 0
+ ; xmm1 4
+ ; xmm2 1
+ ; xmm3 3
+
+ movdqa xmm5, xmm0
+ movdqa xmm2, xmm1
+
+ paddw xmm0, xmm3 ; a1 = 0 + 3
+ paddw xmm1, xmm4 ; b1 = 1 + 2
+
+ psubw xmm4, xmm2 ; c1 = 1 - 2
+ psubw xmm5, xmm3 ; d1 = 0 - 3
+
+ pxor xmm6, xmm6 ; zero out for compare
+
+ pcmpeqw xmm6, xmm5 ; d1 != 0
+
+ pandn xmm6, XMMWORD PTR[GLOBAL(_cmp_mask8x4)] ; clear upper,
+ ; and keep bit 0 of lower
+
+ ; output 0 and 2
+ movdqa xmm2, xmm0 ; a1
+
+ paddw xmm0, xmm1 ; a1 + b1
+ psubw xmm2, xmm1 ; a1 - b1
+
+ paddw xmm0, XMMWORD PTR[GLOBAL(_7w)]
+ paddw xmm2, XMMWORD PTR[GLOBAL(_7w)]
+
+ psraw xmm0, 4 ; op[0] = (a1 + b1 + 7)>>4
+ psraw xmm2, 4 ; op[8] = (a1 - b1 + 7)>>4
+
+ ; output 1 and 3
+ ; interleave c1, d1
+ movdqa xmm1, xmm5 ; d1
+ punpcklwd xmm1, xmm4 ; c1 d1
+ punpckhwd xmm5, xmm4 ; c1 d1
+
+ movdqa xmm3, xmm1
+ movdqa xmm4, xmm5
+
+ pmaddwd xmm1, XMMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352
+ pmaddwd xmm4, XMMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352
+
+ pmaddwd xmm3, XMMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
+ pmaddwd xmm5, XMMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
+
+ paddd xmm1, XMMWORD PTR[GLOBAL(_12000)]
+ paddd xmm4, XMMWORD PTR[GLOBAL(_12000)]
+ paddd xmm3, XMMWORD PTR[GLOBAL(_51000)]
+ paddd xmm5, XMMWORD PTR[GLOBAL(_51000)]
+
+ psrad xmm1, 16 ; (c1 * 2217 + d1 * 5352 + 14500)>>16
+ psrad xmm4, 16 ; (c1 * 2217 + d1 * 5352 + 14500)>>16
+ psrad xmm3, 16 ; (d1 * 2217 - c1 * 5352 + 7500)>>16
+ psrad xmm5, 16 ; (d1 * 2217 - c1 * 5352 + 7500)>>16
+
+ packssdw xmm1, xmm4 ; op[4]
+ packssdw xmm3, xmm5 ; op[12]
+
+ paddw xmm1, xmm6 ; op[4] += (d1!=0)
+
+ movdqa xmm4, xmm0
+ movdqa xmm5, xmm2
+
+ punpcklqdq xmm0, xmm1
+ punpckhqdq xmm4, xmm1
+
+ punpcklqdq xmm2, xmm3
+ punpckhqdq xmm5, xmm3
+
+ movdqa XMMWORD PTR[output + 0 ], xmm0
+ movdqa XMMWORD PTR[output + 16], xmm2
+ movdqa XMMWORD PTR[output + 32], xmm4
+ movdqa XMMWORD PTR[output + 48], xmm5
+
+ STACK_FRAME_DESTROY
+
+SECTION_RODATA
+align 16
+_5352_2217:
+ dw 5352
+ dw 2217
+ dw 5352
+ dw 2217
+ dw 5352
+ dw 2217
+ dw 5352
+ dw 2217
+align 16
+_2217_neg5352:
+ dw 2217
+ dw -5352
+ dw 2217
+ dw -5352
+ dw 2217
+ dw -5352
+ dw 2217
+ dw -5352
+align 16
+_mult_add:
+ times 8 dw 1
+align 16
+_cmp_mask:
+ times 4 dw 1
+ times 4 dw 0
+align 16
+_cmp_mask8x4:
+ times 8 dw 1
+align 16
+_mult_sub:
+ dw 1
+ dw -1
+ dw 1
+ dw -1
+ dw 1
+ dw -1
+ dw 1
+ dw -1
+align 16
+_7:
+ times 4 dd 7
+align 16
+_7w:
+ times 8 dw 7
+align 16
+_14500:
+ times 4 dd 14500
+align 16
+_7500:
+ times 4 dd 7500
+align 16
+_12000:
+ times 4 dd 12000
+align 16
+_51000:
+ times 4 dd 51000
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/denoising_sse2.c b/media/libvpx/libvpx/vp8/encoder/x86/denoising_sse2.c
new file mode 100644
index 0000000000..f35b930169
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/denoising_sse2.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp8/encoder/denoising.h"
+#include "vp8/common/reconinter.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8_rtcd.h"
+
+#include <emmintrin.h>
+#include "vpx_ports/emmintrin_compat.h"
+
+/* Compute the sum of all pixel differences of this MB. */
+static INLINE unsigned int abs_sum_diff_16x1(__m128i acc_diff) {
+ const __m128i k_1 = _mm_set1_epi16(1);
+ const __m128i acc_diff_lo =
+ _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_hi =
+ _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi);
+ const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1);
+ const __m128i hgfe_dcba =
+ _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
+ const __m128i hgfedcba =
+ _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
+ unsigned int sum_diff = (unsigned int)abs(_mm_cvtsi128_si32(hgfedcba));
+
+ return sum_diff;
+}
+
+int vp8_denoiser_filter_sse2(unsigned char *mc_running_avg_y,
+ int mc_avg_y_stride, unsigned char *running_avg_y,
+ int avg_y_stride, unsigned char *sig,
+ int sig_stride, unsigned int motion_magnitude,
+ int increase_denoising) {
+ unsigned char *running_avg_y_start = running_avg_y;
+ unsigned char *sig_start = sig;
+ unsigned int sum_diff_thresh;
+ int r;
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
+ ? 1
+ : 0;
+ __m128i acc_diff = _mm_setzero_si128();
+ const __m128i k_0 = _mm_setzero_si128();
+ const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
+ const __m128i k_8 = _mm_set1_epi8(8);
+ const __m128i k_16 = _mm_set1_epi8(16);
+ /* Modify each level's adjustment according to motion_magnitude. */
+ const __m128i l3 = _mm_set1_epi8(
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6);
+ /* Difference between level 3 and level 2 is 2. */
+ const __m128i l32 = _mm_set1_epi8(2);
+ /* Difference between level 2 and level 1 is 1. */
+ const __m128i l21 = _mm_set1_epi8(1);
+
+ for (r = 0; r < 16; ++r) {
+ /* Calculate differences */
+ const __m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128((__m128i *)(&mc_running_avg_y[0]));
+ __m128i v_running_avg_y;
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ /* Obtain the sign. FF if diff is negative. */
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ /* Clamp absolute difference to 16 to be used to get mask. Doing this
+ * allows us to use _mm_cmpgt_epi8, which operates on signed byte. */
+ const __m128i clamped_absdiff =
+ _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16);
+ /* Get masks for l2 l1 and l0 adjustments */
+ const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff);
+ const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff);
+ const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff);
+ /* Get adjustments for l2, l1, and l0 */
+ __m128i adj2 = _mm_and_si128(mask2, l32);
+ const __m128i adj1 = _mm_and_si128(mask1, l21);
+ const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
+ __m128i adj, padj, nadj;
+
+ /* Combine the adjustments and get absolute adjustments. */
+ adj2 = _mm_add_epi8(adj2, adj1);
+ adj = _mm_sub_epi8(l3, adj2);
+ adj = _mm_andnot_si128(mask0, adj);
+ adj = _mm_or_si128(adj, adj0);
+
+ /* Restore the sign and get positive and negative adjustments. */
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+
+ /* Calculate filtered value. */
+ v_running_avg_y = _mm_adds_epu8(v_sig, padj);
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y);
+
+ /* Adjustments <=7, and each element in acc_diff can fit in signed
+ * char.
+ */
+ acc_diff = _mm_adds_epi8(acc_diff, padj);
+ acc_diff = _mm_subs_epi8(acc_diff, nadj);
+
+ /* Update pointers for next iteration. */
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ {
+ /* Compute the sum of all pixel differences of this MB. */
+ unsigned int abs_sum_diff = abs_sum_diff_16x1(acc_diff);
+ sum_diff_thresh = SUM_DIFF_THRESHOLD;
+ if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
+ if (abs_sum_diff > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // check if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the acceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ int delta = ((abs_sum_diff - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const __m128i k_delta = _mm_set1_epi8(delta);
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_avg_y_stride * 16;
+ running_avg_y -= avg_y_stride * 16;
+ for (r = 0; r < 16; ++r) {
+ __m128i v_running_avg_y =
+ _mm_loadu_si128((__m128i *)(&running_avg_y[0]));
+ // Calculate differences.
+ const __m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128((__m128i *)(&mc_running_avg_y[0]));
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ // Clamp absolute difference to delta to get the adjustment.
+ const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
+ // Restore the sign and get positive and negative adjustments.
+ __m128i padj, nadj;
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+ // Calculate filtered value.
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj);
+ v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y);
+
+ // Accumulate the adjustments.
+ acc_diff = _mm_subs_epi8(acc_diff, padj);
+ acc_diff = _mm_adds_epi8(acc_diff, nadj);
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+ abs_sum_diff = abs_sum_diff_16x1(acc_diff);
+ if (abs_sum_diff > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride);
+ return FILTER_BLOCK;
+}
+
+int vp8_denoiser_filter_uv_sse2(unsigned char *mc_running_avg,
+ int mc_avg_stride, unsigned char *running_avg,
+ int avg_stride, unsigned char *sig,
+ int sig_stride, unsigned int motion_magnitude,
+ int increase_denoising) {
+ unsigned char *running_avg_start = running_avg;
+ unsigned char *sig_start = sig;
+ unsigned int sum_diff_thresh;
+ int r;
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV)
+ ? 1
+ : 0;
+ __m128i acc_diff = _mm_setzero_si128();
+ const __m128i k_0 = _mm_setzero_si128();
+ const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
+ const __m128i k_8 = _mm_set1_epi8(8);
+ const __m128i k_16 = _mm_set1_epi8(16);
+ /* Modify each level's adjustment according to motion_magnitude. */
+ const __m128i l3 = _mm_set1_epi8(
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 7 + shift_inc : 6);
+ /* Difference between level 3 and level 2 is 2. */
+ const __m128i l32 = _mm_set1_epi8(2);
+ /* Difference between level 2 and level 1 is 1. */
+ const __m128i l21 = _mm_set1_epi8(1);
+
+ {
+ const __m128i k_1 = _mm_set1_epi16(1);
+ __m128i vec_sum_block = _mm_setzero_si128();
+
+ // Avoid denoising color signal if its close to average level.
+ for (r = 0; r < 8; ++r) {
+ const __m128i v_sig = _mm_loadl_epi64((__m128i *)(&sig[0]));
+ const __m128i v_sig_unpack = _mm_unpacklo_epi8(v_sig, k_0);
+ vec_sum_block = _mm_add_epi16(vec_sum_block, v_sig_unpack);
+ sig += sig_stride;
+ }
+ sig -= sig_stride * 8;
+ {
+ const __m128i hg_fe_dc_ba = _mm_madd_epi16(vec_sum_block, k_1);
+ const __m128i hgfe_dcba =
+ _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
+ const __m128i hgfedcba =
+ _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
+ const int sum_block = _mm_cvtsi128_si32(hgfedcba);
+ if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ for (r = 0; r < 4; ++r) {
+ /* Calculate differences */
+ const __m128i v_sig_low =
+ _mm_castpd_si128(_mm_load_sd((double *)(&sig[0])));
+ const __m128i v_sig = _mm_castpd_si128(_mm_loadh_pd(
+ _mm_castsi128_pd(v_sig_low), (double *)(&sig[sig_stride])));
+ const __m128i v_mc_running_avg_low =
+ _mm_castpd_si128(_mm_load_sd((double *)(&mc_running_avg[0])));
+ const __m128i v_mc_running_avg = _mm_castpd_si128(
+ _mm_loadh_pd(_mm_castsi128_pd(v_mc_running_avg_low),
+ (double *)(&mc_running_avg[mc_avg_stride])));
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg);
+ /* Obtain the sign. FF if diff is negative. */
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ /* Clamp absolute difference to 16 to be used to get mask. Doing this
+ * allows us to use _mm_cmpgt_epi8, which operates on signed byte. */
+ const __m128i clamped_absdiff =
+ _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16);
+ /* Get masks for l2 l1 and l0 adjustments */
+ const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff);
+ const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff);
+ const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff);
+ /* Get adjustments for l2, l1, and l0 */
+ __m128i adj2 = _mm_and_si128(mask2, l32);
+ const __m128i adj1 = _mm_and_si128(mask1, l21);
+ const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
+ __m128i adj, padj, nadj;
+ __m128i v_running_avg;
+
+ /* Combine the adjustments and get absolute adjustments. */
+ adj2 = _mm_add_epi8(adj2, adj1);
+ adj = _mm_sub_epi8(l3, adj2);
+ adj = _mm_andnot_si128(mask0, adj);
+ adj = _mm_or_si128(adj, adj0);
+
+ /* Restore the sign and get positive and negative adjustments. */
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+
+ /* Calculate filtered value. */
+ v_running_avg = _mm_adds_epu8(v_sig, padj);
+ v_running_avg = _mm_subs_epu8(v_running_avg, nadj);
+
+ _mm_storel_pd((double *)&running_avg[0], _mm_castsi128_pd(v_running_avg));
+ _mm_storeh_pd((double *)&running_avg[avg_stride],
+ _mm_castsi128_pd(v_running_avg));
+
+ /* Adjustments <=7, and each element in acc_diff can fit in signed
+ * char.
+ */
+ acc_diff = _mm_adds_epi8(acc_diff, padj);
+ acc_diff = _mm_subs_epi8(acc_diff, nadj);
+
+ /* Update pointers for next iteration. */
+ sig += sig_stride * 2;
+ mc_running_avg += mc_avg_stride * 2;
+ running_avg += avg_stride * 2;
+ }
+
+ {
+ unsigned int abs_sum_diff = abs_sum_diff_16x1(acc_diff);
+ sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
+ if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV;
+ if (abs_sum_diff > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // check if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the acceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ int delta = ((abs_sum_diff - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const __m128i k_delta = _mm_set1_epi8(delta);
+ sig -= sig_stride * 8;
+ mc_running_avg -= mc_avg_stride * 8;
+ running_avg -= avg_stride * 8;
+ for (r = 0; r < 4; ++r) {
+ // Calculate differences.
+ const __m128i v_sig_low =
+ _mm_castpd_si128(_mm_load_sd((double *)(&sig[0])));
+ const __m128i v_sig = _mm_castpd_si128(_mm_loadh_pd(
+ _mm_castsi128_pd(v_sig_low), (double *)(&sig[sig_stride])));
+ const __m128i v_mc_running_avg_low =
+ _mm_castpd_si128(_mm_load_sd((double *)(&mc_running_avg[0])));
+ const __m128i v_mc_running_avg = _mm_castpd_si128(
+ _mm_loadh_pd(_mm_castsi128_pd(v_mc_running_avg_low),
+ (double *)(&mc_running_avg[mc_avg_stride])));
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ // Clamp absolute difference to delta to get the adjustment.
+ const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
+ // Restore the sign and get positive and negative adjustments.
+ __m128i padj, nadj;
+ const __m128i v_running_avg_low =
+ _mm_castpd_si128(_mm_load_sd((double *)(&running_avg[0])));
+ __m128i v_running_avg = _mm_castpd_si128(
+ _mm_loadh_pd(_mm_castsi128_pd(v_running_avg_low),
+ (double *)(&running_avg[avg_stride])));
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+ // Calculate filtered value.
+ v_running_avg = _mm_subs_epu8(v_running_avg, padj);
+ v_running_avg = _mm_adds_epu8(v_running_avg, nadj);
+
+ _mm_storel_pd((double *)&running_avg[0],
+ _mm_castsi128_pd(v_running_avg));
+ _mm_storeh_pd((double *)&running_avg[avg_stride],
+ _mm_castsi128_pd(v_running_avg));
+
+ // Accumulate the adjustments.
+ acc_diff = _mm_subs_epi8(acc_diff, padj);
+ acc_diff = _mm_adds_epi8(acc_diff, nadj);
+
+ // Update pointers for next iteration.
+ sig += sig_stride * 2;
+ mc_running_avg += mc_avg_stride * 2;
+ running_avg += avg_stride * 2;
+ }
+ abs_sum_diff = abs_sum_diff_16x1(acc_diff);
+ if (abs_sum_diff > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ vp8_copy_mem8x8(running_avg_start, avg_stride, sig_start, sig_stride);
+ return FILTER_BLOCK;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/fwalsh_sse2.asm b/media/libvpx/libvpx/vp8/encoder/x86/fwalsh_sse2.asm
new file mode 100644
index 0000000000..938fc173ff
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/fwalsh_sse2.asm
@@ -0,0 +1,166 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+;void vp8_short_walsh4x4_sse2(short *input, short *output, int pitch)
+globalsym(vp8_short_walsh4x4_sse2)
+sym(vp8_short_walsh4x4_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 3
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ; input
+ mov rdi, arg(1) ; output
+ movsxd rdx, dword ptr arg(2) ; pitch
+
+ ; first for loop
+ movq xmm0, MMWORD PTR [rsi] ; load input
+ movq xmm1, MMWORD PTR [rsi + rdx]
+ lea rsi, [rsi + rdx*2]
+ movq xmm2, MMWORD PTR [rsi]
+ movq xmm3, MMWORD PTR [rsi + rdx]
+
+ punpcklwd xmm0, xmm1
+ punpcklwd xmm2, xmm3
+
+ movdqa xmm1, xmm0
+ punpckldq xmm0, xmm2 ; ip[1] ip[0]
+ punpckhdq xmm1, xmm2 ; ip[3] ip[2]
+
+ movdqa xmm2, xmm0
+ paddw xmm0, xmm1
+ psubw xmm2, xmm1
+
+ psllw xmm0, 2 ; d1 a1
+ psllw xmm2, 2 ; c1 b1
+
+ movdqa xmm1, xmm0
+ punpcklqdq xmm0, xmm2 ; b1 a1
+ punpckhqdq xmm1, xmm2 ; c1 d1
+
+ pxor xmm6, xmm6
+ movq xmm6, xmm0
+ pxor xmm7, xmm7
+ pcmpeqw xmm7, xmm6
+ paddw xmm7, [GLOBAL(c1)]
+
+ movdqa xmm2, xmm0
+ paddw xmm0, xmm1 ; b1+c1 a1+d1
+ psubw xmm2, xmm1 ; b1-c1 a1-d1
+ paddw xmm0, xmm7 ; b1+c1 a1+d1+(a1!=0)
+
+ ; second for loop
+ ; input: 13 9 5 1 12 8 4 0 (xmm0)
+ ; 14 10 6 2 15 11 7 3 (xmm2)
+ ; after shuffle:
+ ; 13 5 9 1 12 4 8 0 (xmm0)
+ ; 14 6 10 2 15 7 11 3 (xmm1)
+ pshuflw xmm3, xmm0, 0xd8
+ pshufhw xmm0, xmm3, 0xd8
+ pshuflw xmm3, xmm2, 0xd8
+ pshufhw xmm1, xmm3, 0xd8
+
+ movdqa xmm2, xmm0
+ pmaddwd xmm0, [GLOBAL(c1)] ; d11 a11 d10 a10
+ pmaddwd xmm2, [GLOBAL(cn1)] ; c11 b11 c10 b10
+ movdqa xmm3, xmm1
+ pmaddwd xmm1, [GLOBAL(c1)] ; d12 a12 d13 a13
+ pmaddwd xmm3, [GLOBAL(cn1)] ; c12 b12 c13 b13
+
+ pshufd xmm4, xmm0, 0xd8 ; d11 d10 a11 a10
+ pshufd xmm5, xmm2, 0xd8 ; c11 c10 b11 b10
+ pshufd xmm6, xmm1, 0x72 ; d13 d12 a13 a12
+ pshufd xmm7, xmm3, 0x72 ; c13 c12 b13 b12
+
+ movdqa xmm0, xmm4
+ punpcklqdq xmm0, xmm5 ; b11 b10 a11 a10
+ punpckhqdq xmm4, xmm5 ; c11 c10 d11 d10
+ movdqa xmm1, xmm6
+ punpcklqdq xmm1, xmm7 ; b13 b12 a13 a12
+ punpckhqdq xmm6, xmm7 ; c13 c12 d13 d12
+
+ movdqa xmm2, xmm0
+ paddd xmm0, xmm4 ; b21 b20 a21 a20
+ psubd xmm2, xmm4 ; c21 c20 d21 d20
+ movdqa xmm3, xmm1
+ paddd xmm1, xmm6 ; b23 b22 a23 a22
+ psubd xmm3, xmm6 ; c23 c22 d23 d22
+
+ pxor xmm4, xmm4
+ movdqa xmm5, xmm4
+ pcmpgtd xmm4, xmm0
+ pcmpgtd xmm5, xmm2
+ pand xmm4, [GLOBAL(cd1)]
+ pand xmm5, [GLOBAL(cd1)]
+
+ pxor xmm6, xmm6
+ movdqa xmm7, xmm6
+ pcmpgtd xmm6, xmm1
+ pcmpgtd xmm7, xmm3
+ pand xmm6, [GLOBAL(cd1)]
+ pand xmm7, [GLOBAL(cd1)]
+
+ paddd xmm0, xmm4
+ paddd xmm2, xmm5
+ paddd xmm0, [GLOBAL(cd3)]
+ paddd xmm2, [GLOBAL(cd3)]
+ paddd xmm1, xmm6
+ paddd xmm3, xmm7
+ paddd xmm1, [GLOBAL(cd3)]
+ paddd xmm3, [GLOBAL(cd3)]
+
+ psrad xmm0, 3
+ psrad xmm1, 3
+ psrad xmm2, 3
+ psrad xmm3, 3
+ movdqa xmm4, xmm0
+ punpcklqdq xmm0, xmm1 ; a23 a22 a21 a20
+ punpckhqdq xmm4, xmm1 ; b23 b22 b21 b20
+ movdqa xmm5, xmm2
+ punpckhqdq xmm2, xmm3 ; c23 c22 c21 c20
+ punpcklqdq xmm5, xmm3 ; d23 d22 d21 d20
+
+ packssdw xmm0, xmm4 ; b23 b22 b21 b20 a23 a22 a21 a20
+ packssdw xmm2, xmm5 ; d23 d22 d21 d20 c23 c22 c21 c20
+
+ movdqa XMMWORD PTR [rdi], xmm0
+ movdqa XMMWORD PTR [rdi + 16], xmm2
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+c1:
+ dw 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001
+align 16
+cn1:
+ dw 0x0001, 0xffff, 0x0001, 0xffff, 0x0001, 0xffff, 0x0001, 0xffff
+align 16
+cd1:
+ dd 0x00000001, 0x00000001, 0x00000001, 0x00000001
+align 16
+cd3:
+ dd 0x00000003, 0x00000003, 0x00000003, 0x00000003
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/quantize_sse4.c b/media/libvpx/libvpx/vp8/encoder/x86/quantize_sse4.c
new file mode 100644
index 0000000000..4c2d24cc27
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/quantize_sse4.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <smmintrin.h> /* SSE4.1 */
+
+#include "./vp8_rtcd.h"
+#include "vp8/encoder/block.h"
+#include "vpx_ports/bitops.h" /* get_lsb */
+#include "vpx_ports/compiler_attributes.h"
+
+// Unsigned shift overflow is disabled for the use of ~1U << eob with ymask.
+VPX_NO_UNSIGNED_SHIFT_CHECK void vp8_regular_quantize_b_sse4_1(BLOCK *b,
+ BLOCKD *d) {
+ int eob = -1;
+ short *zbin_boost_ptr = b->zrun_zbin_boost;
+ __m128i zbin_boost0 = _mm_load_si128((__m128i *)(zbin_boost_ptr));
+ __m128i zbin_boost1 = _mm_load_si128((__m128i *)(zbin_boost_ptr + 8));
+ __m128i x0, x1, y0, y1, x_minus_zbin0, x_minus_zbin1, dqcoeff0, dqcoeff1;
+ __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift));
+ __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8));
+ __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
+ __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8));
+ __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra);
+ __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin));
+ __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8));
+ __m128i round0 = _mm_load_si128((__m128i *)(b->round));
+ __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
+ __m128i quant0 = _mm_load_si128((__m128i *)(b->quant));
+ __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8));
+ __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
+ __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
+ __m128i qcoeff0, qcoeff1, t0, t1, x_shuf0, x_shuf1;
+ uint32_t mask, ymask;
+ DECLARE_ALIGNED(16, static const uint8_t,
+ zig_zag_mask[16]) = { 0, 1, 4, 8, 5, 2, 3, 6,
+ 9, 12, 13, 10, 7, 11, 14, 15 };
+ DECLARE_ALIGNED(16, uint16_t, qcoeff[16]) = { 0 };
+
+ /* Duplicate to all lanes. */
+ zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0);
+ zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra);
+
+ /* x = abs(z) */
+ x0 = _mm_abs_epi16(z0);
+ x1 = _mm_abs_epi16(z1);
+
+ /* zbin[] + zbin_extra */
+ zbin0 = _mm_add_epi16(zbin0, zbin_extra);
+ zbin1 = _mm_add_epi16(zbin1, zbin_extra);
+
+ /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance
+ * the equation because boost is the only value which can change:
+ * x - (zbin[] + extra) >= boost */
+ x_minus_zbin0 = _mm_sub_epi16(x0, zbin0);
+ x_minus_zbin1 = _mm_sub_epi16(x1, zbin1);
+
+ /* All the remaining calculations are valid whether they are done now with
+ * simd or later inside the loop one at a time. */
+ x0 = _mm_add_epi16(x0, round0);
+ x1 = _mm_add_epi16(x1, round1);
+
+ y0 = _mm_mulhi_epi16(x0, quant0);
+ y1 = _mm_mulhi_epi16(x1, quant1);
+
+ y0 = _mm_add_epi16(y0, x0);
+ y1 = _mm_add_epi16(y1, x1);
+
+ /* Instead of shifting each value independently we convert the scaling
+ * factor with 1 << (16 - shift) so we can use multiply/return high half. */
+ y0 = _mm_mulhi_epi16(y0, quant_shift0);
+ y1 = _mm_mulhi_epi16(y1, quant_shift1);
+
+ /* Restore the sign. */
+ y0 = _mm_sign_epi16(y0, z0);
+ y1 = _mm_sign_epi16(y1, z1);
+
+ {
+ const __m128i zig_zag_i16_0 =
+ _mm_setr_epi8(0, 1, 2, 3, 8, 9, 14, 15, 10, 11, 4, 5, 6, 7, 12, 13);
+ const __m128i zig_zag_i16_1 =
+ _mm_setr_epi8(0, 1, 6, 7, 8, 9, 2, 3, 14, 15, 4, 5, 10, 11, 12, 13);
+
+ /* The first part of the zig zag needs a value
+ * from x_minus_zbin1 and vice versa. */
+ t1 = _mm_alignr_epi8(x_minus_zbin1, x_minus_zbin1, 2);
+ t0 = _mm_blend_epi16(x_minus_zbin0, t1, 0x80);
+ t1 = _mm_blend_epi16(t1, x_minus_zbin0, 0x80);
+ x_shuf0 = _mm_shuffle_epi8(t0, zig_zag_i16_0);
+ x_shuf1 = _mm_shuffle_epi8(t1, zig_zag_i16_1);
+ }
+
+ /* Check if y is nonzero and put it in zig zag order. */
+ t0 = _mm_packs_epi16(y0, y1);
+ t0 = _mm_cmpeq_epi8(t0, _mm_setzero_si128());
+ t0 = _mm_shuffle_epi8(t0, _mm_load_si128((const __m128i *)zig_zag_mask));
+ ymask = _mm_movemask_epi8(t0) ^ 0xffff;
+
+ for (;;) {
+ t0 = _mm_cmpgt_epi16(zbin_boost0, x_shuf0);
+ t1 = _mm_cmpgt_epi16(zbin_boost1, x_shuf1);
+ t0 = _mm_packs_epi16(t0, t1);
+ mask = _mm_movemask_epi8(t0);
+ mask = ~mask & ymask;
+ if (!mask) break;
+ /* |eob| will contain the index of the next found element where:
+ * boost[i - old_eob - 1] <= x[zigzag[i]] && y[zigzag[i]] != 0 */
+ eob = get_lsb(mask);
+ /* Need to clear the mask from processed elements so that
+ * they are no longer counted in the next iteration. */
+ ymask &= ~1U << eob;
+ /* It's safe to read ahead of this buffer if struct VP8_COMP has at
+ * least 32 bytes before the zrun_zbin_boost_* fields (it has 384).
+ * Any data read outside of the buffer is masked by the updated |ymask|. */
+ zbin_boost0 = _mm_loadu_si128((__m128i *)(zbin_boost_ptr - eob - 1));
+ zbin_boost1 = _mm_loadu_si128((__m128i *)(zbin_boost_ptr - eob + 7));
+ qcoeff[zig_zag_mask[eob]] = 0xffff;
+ }
+
+ qcoeff0 = _mm_load_si128((__m128i *)(qcoeff));
+ qcoeff1 = _mm_load_si128((__m128i *)(qcoeff + 8));
+ qcoeff0 = _mm_and_si128(qcoeff0, y0);
+ qcoeff1 = _mm_and_si128(qcoeff1, y1);
+
+ _mm_store_si128((__m128i *)(d->qcoeff), qcoeff0);
+ _mm_store_si128((__m128i *)(d->qcoeff + 8), qcoeff1);
+
+ dqcoeff0 = _mm_mullo_epi16(qcoeff0, dequant0);
+ dqcoeff1 = _mm_mullo_epi16(qcoeff1, dequant1);
+
+ _mm_store_si128((__m128i *)(d->dqcoeff), dqcoeff0);
+ _mm_store_si128((__m128i *)(d->dqcoeff + 8), dqcoeff1);
+
+ *d->eob = eob + 1;
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm b/media/libvpx/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
new file mode 100644
index 0000000000..67102064a1
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
@@ -0,0 +1,209 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+SECTION .text
+
+; void vp8_temporal_filter_apply_sse2 | arg
+; (unsigned char *frame1, | 0
+; unsigned int stride, | 1
+; unsigned char *frame2, | 2
+; unsigned int block_size, | 3
+; int strength, | 4
+; int filter_weight, | 5
+; unsigned int *accumulator, | 6
+; unsigned short *count) | 7
+globalsym(vp8_temporal_filter_apply_sse2)
+sym(vp8_temporal_filter_apply_sse2):
+
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ALIGN_STACK 16, rax
+ %define block_size 0
+ %define strength 16
+ %define filter_weight 32
+ %define rounding_bit 48
+ %define rbp_backup 64
+ %define stack_size 80
+ sub rsp, stack_size
+ mov [rsp + rbp_backup], rbp
+ ; end prolog
+
+ mov rdx, arg(3)
+ mov [rsp + block_size], rdx
+ movd xmm6, arg(4)
+ movdqa [rsp + strength], xmm6 ; where strength is used, all 16 bytes are read
+
+ ; calculate the rounding bit outside the loop
+ ; 0x8000 >> (16 - strength)
+ mov rdx, 16
+ sub rdx, arg(4) ; 16 - strength
+ movq xmm4, rdx ; can't use rdx w/ shift
+ movdqa xmm5, [GLOBAL(_const_top_bit)]
+ psrlw xmm5, xmm4
+ movdqa [rsp + rounding_bit], xmm5
+
+ mov rsi, arg(0) ; src/frame1
+ mov rdx, arg(2) ; predictor frame
+ mov rdi, arg(6) ; accumulator
+ mov rax, arg(7) ; count
+
+ ; dup the filter weight and store for later
+ movd xmm0, arg(5) ; filter_weight
+ pshuflw xmm0, xmm0, 0
+ punpcklwd xmm0, xmm0
+ movdqa [rsp + filter_weight], xmm0
+
+ mov rbp, arg(1) ; stride
+ pxor xmm7, xmm7 ; zero for extraction
+
+ lea rcx, [rdx + 16*16*1]
+ cmp dword ptr [rsp + block_size], 8
+ jne .temporal_filter_apply_load_16
+ lea rcx, [rdx + 8*8*1]
+
+.temporal_filter_apply_load_8:
+ movq xmm0, [rsi] ; first row
+ lea rsi, [rsi + rbp] ; += stride
+ punpcklbw xmm0, xmm7 ; src[ 0- 7]
+ movq xmm1, [rsi] ; second row
+ lea rsi, [rsi + rbp] ; += stride
+ punpcklbw xmm1, xmm7 ; src[ 8-15]
+ jmp .temporal_filter_apply_load_finished
+
+.temporal_filter_apply_load_16:
+ movdqa xmm0, [rsi] ; src (frame1)
+ lea rsi, [rsi + rbp] ; += stride
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm7 ; src[ 0- 7]
+ punpckhbw xmm1, xmm7 ; src[ 8-15]
+
+.temporal_filter_apply_load_finished:
+ movdqa xmm2, [rdx] ; predictor (frame2)
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, xmm7 ; pred[ 0- 7]
+ punpckhbw xmm3, xmm7 ; pred[ 8-15]
+
+ ; modifier = src_byte - pixel_value
+ psubw xmm0, xmm2 ; src - pred[ 0- 7]
+ psubw xmm1, xmm3 ; src - pred[ 8-15]
+
+ ; modifier *= modifier
+ pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
+ pmullw xmm1, xmm1 ; modifer[ 8-15]^2
+
+ ; modifier *= 3
+ pmullw xmm0, [GLOBAL(_const_3w)]
+ pmullw xmm1, [GLOBAL(_const_3w)]
+
+ ; modifer += 0x8000 >> (16 - strength)
+ paddw xmm0, [rsp + rounding_bit]
+ paddw xmm1, [rsp + rounding_bit]
+
+ ; modifier >>= strength
+ psrlw xmm0, [rsp + strength]
+ psrlw xmm1, [rsp + strength]
+
+ ; modifier = 16 - modifier
+ ; saturation takes care of modifier > 16
+ movdqa xmm3, [GLOBAL(_const_16w)]
+ movdqa xmm2, [GLOBAL(_const_16w)]
+ psubusw xmm3, xmm1
+ psubusw xmm2, xmm0
+
+ ; modifier *= filter_weight
+ pmullw xmm2, [rsp + filter_weight]
+ pmullw xmm3, [rsp + filter_weight]
+
+ ; count
+ movdqa xmm4, [rax]
+ movdqa xmm5, [rax+16]
+ ; += modifier
+ paddw xmm4, xmm2
+ paddw xmm5, xmm3
+ ; write back
+ movdqa [rax], xmm4
+ movdqa [rax+16], xmm5
+ lea rax, [rax + 16*2] ; count += 16*(sizeof(short))
+
+ ; load and extract the predictor up to shorts
+ pxor xmm7, xmm7
+ movdqa xmm0, [rdx]
+ lea rdx, [rdx + 16*1] ; pred += 16*(sizeof(char))
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm7 ; pred[ 0- 7]
+ punpckhbw xmm1, xmm7 ; pred[ 8-15]
+
+ ; modifier *= pixel_value
+ pmullw xmm0, xmm2
+ pmullw xmm1, xmm3
+
+ ; expand to double words
+ movdqa xmm2, xmm0
+ punpcklwd xmm0, xmm7 ; [ 0- 3]
+ punpckhwd xmm2, xmm7 ; [ 4- 7]
+ movdqa xmm3, xmm1
+ punpcklwd xmm1, xmm7 ; [ 8-11]
+ punpckhwd xmm3, xmm7 ; [12-15]
+
+ ; accumulator
+ movdqa xmm4, [rdi]
+ movdqa xmm5, [rdi+16]
+ movdqa xmm6, [rdi+32]
+ movdqa xmm7, [rdi+48]
+ ; += modifier
+ paddd xmm4, xmm0
+ paddd xmm5, xmm2
+ paddd xmm6, xmm1
+ paddd xmm7, xmm3
+ ; write back
+ movdqa [rdi], xmm4
+ movdqa [rdi+16], xmm5
+ movdqa [rdi+32], xmm6
+ movdqa [rdi+48], xmm7
+ lea rdi, [rdi + 16*4] ; accumulator += 16*(sizeof(int))
+
+ cmp rdx, rcx
+ je .temporal_filter_apply_epilog
+ pxor xmm7, xmm7 ; zero for extraction
+ cmp dword ptr [rsp + block_size], 16
+ je .temporal_filter_apply_load_16
+ jmp .temporal_filter_apply_load_8
+
+.temporal_filter_apply_epilog:
+ ; begin epilog
+ mov rbp, [rsp + rbp_backup]
+ add rsp, stack_size
+ pop rsp
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+_const_3w:
+ times 8 dw 3
+align 16
+_const_top_bit:
+ times 8 dw 1<<15
+align 16
+_const_16w:
+ times 8 dw 16
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c b/media/libvpx/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c
new file mode 100644
index 0000000000..d0752453ee
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx_ports/x86.h"
+#include "vp8/encoder/block.h"
+
+int vp8_mbblock_error_sse2_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
+int vp8_mbblock_error_sse2(MACROBLOCK *mb, int dc) {
+ short *coeff_ptr = mb->block[0].coeff;
+ short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
+ return vp8_mbblock_error_sse2_impl(coeff_ptr, dcoef_ptr, dc);
+}
+
+int vp8_mbuverror_sse2_impl(short *s_ptr, short *d_ptr);
+int vp8_mbuverror_sse2(MACROBLOCK *mb) {
+ short *s_ptr = &mb->coeff[256];
+ short *d_ptr = &mb->e_mbd.dqcoeff[256];
+ return vp8_mbuverror_sse2_impl(s_ptr, d_ptr);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/vp8_quantize_sse2.c b/media/libvpx/libvpx/vp8/encoder/x86/vp8_quantize_sse2.c
new file mode 100644
index 0000000000..581d2565ee
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/vp8_quantize_sse2.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx_ports/x86.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/encoder/block.h"
+#include "vp8/common/entropy.h" /* vp8_default_inv_zig_zag */
+
+#include <mmintrin.h> /* MMX */
+#include <xmmintrin.h> /* SSE */
+#include <emmintrin.h> /* SSE2 */
+
+#define SELECT_EOB(i, z) \
+ do { \
+ short boost = *zbin_boost_ptr; \
+ int cmp = (x[z] < boost) | (y[z] == 0); \
+ zbin_boost_ptr++; \
+ if (cmp) break; \
+ qcoeff_ptr[z] = y[z]; \
+ eob = i; \
+ zbin_boost_ptr = b->zrun_zbin_boost; \
+ } while (0)
+
+void vp8_regular_quantize_b_sse2(BLOCK *b, BLOCKD *d) {
+ char eob = 0;
+ short *zbin_boost_ptr;
+ short *qcoeff_ptr = d->qcoeff;
+ DECLARE_ALIGNED(16, short, x[16]);
+ DECLARE_ALIGNED(16, short, y[16]);
+
+ __m128i sz0, x0, sz1, x1, y0, y1, x_minus_zbin0, x_minus_zbin1;
+ __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift));
+ __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8));
+ __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
+ __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8));
+ __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra);
+ __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin));
+ __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8));
+ __m128i round0 = _mm_load_si128((__m128i *)(b->round));
+ __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
+ __m128i quant0 = _mm_load_si128((__m128i *)(b->quant));
+ __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8));
+ __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
+ __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
+
+ memset(qcoeff_ptr, 0, 32);
+
+ /* Duplicate to all lanes. */
+ zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0);
+ zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra);
+
+ /* Sign of z: z >> 15 */
+ sz0 = _mm_srai_epi16(z0, 15);
+ sz1 = _mm_srai_epi16(z1, 15);
+
+ /* x = abs(z): (z ^ sz) - sz */
+ x0 = _mm_xor_si128(z0, sz0);
+ x1 = _mm_xor_si128(z1, sz1);
+ x0 = _mm_sub_epi16(x0, sz0);
+ x1 = _mm_sub_epi16(x1, sz1);
+
+ /* zbin[] + zbin_extra */
+ zbin0 = _mm_add_epi16(zbin0, zbin_extra);
+ zbin1 = _mm_add_epi16(zbin1, zbin_extra);
+
+ /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance
+ * the equation because boost is the only value which can change:
+ * x - (zbin[] + extra) >= boost */
+ x_minus_zbin0 = _mm_sub_epi16(x0, zbin0);
+ x_minus_zbin1 = _mm_sub_epi16(x1, zbin1);
+
+ _mm_store_si128((__m128i *)(x), x_minus_zbin0);
+ _mm_store_si128((__m128i *)(x + 8), x_minus_zbin1);
+
+ /* All the remaining calculations are valid whether they are done now with
+ * simd or later inside the loop one at a time. */
+ x0 = _mm_add_epi16(x0, round0);
+ x1 = _mm_add_epi16(x1, round1);
+
+ y0 = _mm_mulhi_epi16(x0, quant0);
+ y1 = _mm_mulhi_epi16(x1, quant1);
+
+ y0 = _mm_add_epi16(y0, x0);
+ y1 = _mm_add_epi16(y1, x1);
+
+ /* Instead of shifting each value independently we convert the scaling
+ * factor with 1 << (16 - shift) so we can use multiply/return high half. */
+ y0 = _mm_mulhi_epi16(y0, quant_shift0);
+ y1 = _mm_mulhi_epi16(y1, quant_shift1);
+
+ /* Return the sign: (y ^ sz) - sz */
+ y0 = _mm_xor_si128(y0, sz0);
+ y1 = _mm_xor_si128(y1, sz1);
+ y0 = _mm_sub_epi16(y0, sz0);
+ y1 = _mm_sub_epi16(y1, sz1);
+
+ _mm_store_si128((__m128i *)(y), y0);
+ _mm_store_si128((__m128i *)(y + 8), y1);
+
+ zbin_boost_ptr = b->zrun_zbin_boost;
+
+ /* The loop gets unrolled anyway. Avoid the vp8_default_zig_zag1d lookup. */
+ SELECT_EOB(1, 0);
+ SELECT_EOB(2, 1);
+ SELECT_EOB(3, 4);
+ SELECT_EOB(4, 8);
+ SELECT_EOB(5, 5);
+ SELECT_EOB(6, 2);
+ SELECT_EOB(7, 3);
+ SELECT_EOB(8, 6);
+ SELECT_EOB(9, 9);
+ SELECT_EOB(10, 12);
+ SELECT_EOB(11, 13);
+ SELECT_EOB(12, 10);
+ SELECT_EOB(13, 7);
+ SELECT_EOB(14, 11);
+ SELECT_EOB(15, 14);
+ SELECT_EOB(16, 15);
+
+ y0 = _mm_load_si128((__m128i *)(d->qcoeff));
+ y1 = _mm_load_si128((__m128i *)(d->qcoeff + 8));
+
+ /* dqcoeff = qcoeff * dequant */
+ y0 = _mm_mullo_epi16(y0, dequant0);
+ y1 = _mm_mullo_epi16(y1, dequant1);
+
+ _mm_store_si128((__m128i *)(d->dqcoeff), y0);
+ _mm_store_si128((__m128i *)(d->dqcoeff + 8), y1);
+
+ *d->eob = eob;
+}
+
+void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d) {
+ __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
+ __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8));
+ __m128i round0 = _mm_load_si128((__m128i *)(b->round));
+ __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
+ __m128i quant_fast0 = _mm_load_si128((__m128i *)(b->quant_fast));
+ __m128i quant_fast1 = _mm_load_si128((__m128i *)(b->quant_fast + 8));
+ __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
+ __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
+ __m128i inv_zig_zag0 =
+ _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag));
+ __m128i inv_zig_zag1 =
+ _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag + 8));
+
+ __m128i sz0, sz1, x0, x1, y0, y1, xdq0, xdq1, zeros, ones;
+
+ /* sign of z: z >> 15 */
+ sz0 = _mm_srai_epi16(z0, 15);
+ sz1 = _mm_srai_epi16(z1, 15);
+
+ /* x = abs(z): (z ^ sz) - sz */
+ x0 = _mm_xor_si128(z0, sz0);
+ x1 = _mm_xor_si128(z1, sz1);
+ x0 = _mm_sub_epi16(x0, sz0);
+ x1 = _mm_sub_epi16(x1, sz1);
+
+ /* x += round */
+ x0 = _mm_add_epi16(x0, round0);
+ x1 = _mm_add_epi16(x1, round1);
+
+ /* y = (x * quant) >> 16 */
+ y0 = _mm_mulhi_epi16(x0, quant_fast0);
+ y1 = _mm_mulhi_epi16(x1, quant_fast1);
+
+ /* x = abs(y) = (y ^ sz) - sz */
+ y0 = _mm_xor_si128(y0, sz0);
+ y1 = _mm_xor_si128(y1, sz1);
+ x0 = _mm_sub_epi16(y0, sz0);
+ x1 = _mm_sub_epi16(y1, sz1);
+
+ /* qcoeff = x */
+ _mm_store_si128((__m128i *)(d->qcoeff), x0);
+ _mm_store_si128((__m128i *)(d->qcoeff + 8), x1);
+
+ /* x * dequant */
+ xdq0 = _mm_mullo_epi16(x0, dequant0);
+ xdq1 = _mm_mullo_epi16(x1, dequant1);
+
+ /* dqcoeff = x * dequant */
+ _mm_store_si128((__m128i *)(d->dqcoeff), xdq0);
+ _mm_store_si128((__m128i *)(d->dqcoeff + 8), xdq1);
+
+ /* build a mask for the zig zag */
+ zeros = _mm_setzero_si128();
+
+ x0 = _mm_cmpeq_epi16(x0, zeros);
+ x1 = _mm_cmpeq_epi16(x1, zeros);
+
+ ones = _mm_cmpeq_epi16(zeros, zeros);
+
+ x0 = _mm_xor_si128(x0, ones);
+ x1 = _mm_xor_si128(x1, ones);
+
+ x0 = _mm_and_si128(x0, inv_zig_zag0);
+ x1 = _mm_and_si128(x1, inv_zig_zag1);
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* now down to 8 */
+ x1 = _mm_shuffle_epi32(x0, 0xE); // 0b00001110
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* only 4 left */
+ x1 = _mm_shufflelo_epi16(x0, 0xE); // 0b00001110
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* okay, just 2! */
+ x1 = _mm_shufflelo_epi16(x0, 0x1); // 0b00000001
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ *d->eob = 0xFF & _mm_cvtsi128_si32(x0);
+}
diff --git a/media/libvpx/libvpx/vp8/encoder/x86/vp8_quantize_ssse3.c b/media/libvpx/libvpx/vp8/encoder/x86/vp8_quantize_ssse3.c
new file mode 100644
index 0000000000..f6df146f08
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/encoder/x86/vp8_quantize_ssse3.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <tmmintrin.h> /* SSSE3 */
+
+#include "./vp8_rtcd.h"
+#include "vp8/encoder/block.h"
+#include "vpx_ports/bitops.h" /* get_msb */
+
+void vp8_fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d) {
+ int eob, mask;
+
+ __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
+ __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8));
+ __m128i round0 = _mm_load_si128((__m128i *)(b->round));
+ __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
+ __m128i quant_fast0 = _mm_load_si128((__m128i *)(b->quant_fast));
+ __m128i quant_fast1 = _mm_load_si128((__m128i *)(b->quant_fast + 8));
+ __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
+ __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
+
+ __m128i sz0, sz1, x, x0, x1, y0, y1, zeros, abs0, abs1;
+
+ DECLARE_ALIGNED(16, const uint8_t,
+ pshufb_zig_zag_mask[16]) = { 0, 1, 4, 8, 5, 2, 3, 6,
+ 9, 12, 13, 10, 7, 11, 14, 15 };
+ __m128i zig_zag = _mm_load_si128((const __m128i *)pshufb_zig_zag_mask);
+
+ /* sign of z: z >> 15 */
+ sz0 = _mm_srai_epi16(z0, 15);
+ sz1 = _mm_srai_epi16(z1, 15);
+
+ /* x = abs(z) */
+ x0 = _mm_abs_epi16(z0);
+ x1 = _mm_abs_epi16(z1);
+
+ /* x += round */
+ x0 = _mm_add_epi16(x0, round0);
+ x1 = _mm_add_epi16(x1, round1);
+
+ /* y = (x * quant) >> 16 */
+ y0 = _mm_mulhi_epi16(x0, quant_fast0);
+ y1 = _mm_mulhi_epi16(x1, quant_fast1);
+
+ /* ASM saves Y for EOB */
+ /* I think we can ignore that because adding the sign doesn't change anything
+ * and multiplying 0 by dequant is OK as well */
+ abs0 = y0;
+ abs1 = y1;
+
+ /* Restore the sign bit. */
+ y0 = _mm_xor_si128(y0, sz0);
+ y1 = _mm_xor_si128(y1, sz1);
+ x0 = _mm_sub_epi16(y0, sz0);
+ x1 = _mm_sub_epi16(y1, sz1);
+
+ /* qcoeff = x */
+ _mm_store_si128((__m128i *)(d->qcoeff), x0);
+ _mm_store_si128((__m128i *)(d->qcoeff + 8), x1);
+
+ /* x * dequant */
+ x0 = _mm_mullo_epi16(x0, dequant0);
+ x1 = _mm_mullo_epi16(x1, dequant1);
+
+ /* dqcoeff = x * dequant */
+ _mm_store_si128((__m128i *)(d->dqcoeff), x0);
+ _mm_store_si128((__m128i *)(d->dqcoeff + 8), x1);
+
+ zeros = _mm_setzero_si128();
+
+ x0 = _mm_cmpgt_epi16(abs0, zeros);
+ x1 = _mm_cmpgt_epi16(abs1, zeros);
+
+ x = _mm_packs_epi16(x0, x1);
+
+ x = _mm_shuffle_epi8(x, zig_zag);
+
+ mask = _mm_movemask_epi8(x);
+
+ /* x2 is needed to increase the result from non-zero masks by 1,
+ * +1 is needed to mask undefined behavior for a null argument,
+ * the result of get_msb(1) is 0 */
+ eob = get_msb(mask * 2 + 1);
+
+ *d->eob = eob;
+}
diff --git a/media/libvpx/libvpx/vp8/exports_dec b/media/libvpx/libvpx/vp8/exports_dec
new file mode 100644
index 0000000000..100ac5c27d
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/exports_dec
@@ -0,0 +1,2 @@
+data vpx_codec_vp8_dx_algo
+text vpx_codec_vp8_dx
diff --git a/media/libvpx/libvpx/vp8/exports_enc b/media/libvpx/libvpx/vp8/exports_enc
new file mode 100644
index 0000000000..29ff35ef7b
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/exports_enc
@@ -0,0 +1,2 @@
+data vpx_codec_vp8_cx_algo
+text vpx_codec_vp8_cx
diff --git a/media/libvpx/libvpx/vp8/vp8_common.mk b/media/libvpx/libvpx/vp8/vp8_common.mk
new file mode 100644
index 0000000000..d485965d3d
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/vp8_common.mk
@@ -0,0 +1,149 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+VP8_COMMON_SRCS-yes += vp8_common.mk
+VP8_COMMON_SRCS-yes += common/ppflags.h
+VP8_COMMON_SRCS-yes += common/onyx.h
+VP8_COMMON_SRCS-yes += common/onyxd.h
+VP8_COMMON_SRCS-yes += common/alloccommon.c
+VP8_COMMON_SRCS-yes += common/blockd.c
+VP8_COMMON_SRCS-yes += common/coefupdateprobs.h
+# VP8_COMMON_SRCS-yes += common/debugmodes.c
+VP8_COMMON_SRCS-yes += common/default_coef_probs.h
+VP8_COMMON_SRCS-yes += common/dequantize.c
+VP8_COMMON_SRCS-yes += common/entropy.c
+VP8_COMMON_SRCS-yes += common/entropymode.c
+VP8_COMMON_SRCS-yes += common/entropymv.c
+VP8_COMMON_SRCS-yes += common/extend.c
+VP8_COMMON_SRCS-yes += common/filter.c
+VP8_COMMON_SRCS-yes += common/filter.h
+VP8_COMMON_SRCS-yes += common/findnearmv.c
+VP8_COMMON_SRCS-yes += common/generic/systemdependent.c
+VP8_COMMON_SRCS-yes += common/idct_blk.c
+VP8_COMMON_SRCS-yes += common/idctllm.c
+VP8_COMMON_SRCS-yes += common/alloccommon.h
+VP8_COMMON_SRCS-yes += common/blockd.h
+VP8_COMMON_SRCS-yes += common/common.h
+VP8_COMMON_SRCS-yes += common/entropy.h
+VP8_COMMON_SRCS-yes += common/entropymode.h
+VP8_COMMON_SRCS-yes += common/entropymv.h
+VP8_COMMON_SRCS-yes += common/extend.h
+VP8_COMMON_SRCS-yes += common/findnearmv.h
+VP8_COMMON_SRCS-yes += common/header.h
+VP8_COMMON_SRCS-yes += common/invtrans.h
+VP8_COMMON_SRCS-yes += common/loopfilter.h
+VP8_COMMON_SRCS-yes += common/modecont.h
+VP8_COMMON_SRCS-yes += common/mv.h
+VP8_COMMON_SRCS-yes += common/onyxc_int.h
+VP8_COMMON_SRCS-yes += common/quant_common.h
+VP8_COMMON_SRCS-yes += common/reconinter.h
+VP8_COMMON_SRCS-yes += common/reconintra.h
+VP8_COMMON_SRCS-yes += common/reconintra4x4.h
+VP8_COMMON_SRCS-yes += common/rtcd.c
+VP8_COMMON_SRCS-yes += common/rtcd_defs.pl
+VP8_COMMON_SRCS-yes += common/setupintrarecon.h
+VP8_COMMON_SRCS-yes += common/swapyv12buffer.h
+VP8_COMMON_SRCS-yes += common/systemdependent.h
+VP8_COMMON_SRCS-yes += common/threading.h
+VP8_COMMON_SRCS-yes += common/treecoder.h
+VP8_COMMON_SRCS-yes += common/vp8_loopfilter.c
+VP8_COMMON_SRCS-yes += common/loopfilter_filters.c
+VP8_COMMON_SRCS-yes += common/mbpitch.c
+VP8_COMMON_SRCS-yes += common/modecont.c
+VP8_COMMON_SRCS-yes += common/quant_common.c
+VP8_COMMON_SRCS-yes += common/reconinter.c
+VP8_COMMON_SRCS-yes += common/reconintra.c
+VP8_COMMON_SRCS-yes += common/reconintra4x4.c
+VP8_COMMON_SRCS-yes += common/setupintrarecon.c
+VP8_COMMON_SRCS-yes += common/swapyv12buffer.c
+VP8_COMMON_SRCS-yes += common/vp8_entropymodedata.h
+
+
+
+VP8_COMMON_SRCS-yes += common/treecoder.c
+
+VP8_COMMON_SRCS-$(VPX_ARCH_X86)$(VPX_ARCH_X86_64) += common/x86/vp8_asm_stubs.c
+VP8_COMMON_SRCS-$(VPX_ARCH_X86)$(VPX_ARCH_X86_64) += common/x86/loopfilter_x86.c
+VP8_COMMON_SRCS-$(CONFIG_POSTPROC) += common/mfqe.c
+VP8_COMMON_SRCS-$(CONFIG_POSTPROC) += common/postproc.h
+VP8_COMMON_SRCS-$(CONFIG_POSTPROC) += common/postproc.c
+VP8_COMMON_SRCS-$(HAVE_MMX) += common/x86/dequantize_mmx.asm
+VP8_COMMON_SRCS-$(HAVE_MMX) += common/x86/idct_blk_mmx.c
+VP8_COMMON_SRCS-$(HAVE_MMX) += common/x86/idctllm_mmx.asm
+VP8_COMMON_SRCS-$(HAVE_MMX) += common/x86/recon_mmx.asm
+VP8_COMMON_SRCS-$(HAVE_MMX) += common/x86/subpixel_mmx.asm
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_blk_sse2.c
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idctllm_sse2.asm
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/recon_sse2.asm
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/bilinear_filter_sse2.c
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/subpixel_sse2.asm
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/loopfilter_sse2.asm
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/iwalsh_sse2.asm
+VP8_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/subpixel_ssse3.asm
+
+ifeq ($(CONFIG_POSTPROC),yes)
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/mfqe_sse2.asm
+endif
+
+ifeq ($(VPX_ARCH_X86_64),yes)
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/loopfilter_block_sse2_x86_64.asm
+endif
+
+# common (c)
+VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/idctllm_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/filter_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp8_loopfilter_filters_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/reconinter_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/idct_blk_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/dequantize_dspr2.c
+
+# common (c)
+VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/bilinear_filter_msa.c
+VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/copymem_msa.c
+VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct_msa.c
+VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/loopfilter_filters_msa.c
+VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/sixtap_filter_msa.c
+VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp8_macros_msa.h
+
+# common (c)
+VP8_COMMON_SRCS-$(HAVE_MMI) += common/mips/mmi/sixtap_filter_mmi.c
+VP8_COMMON_SRCS-$(HAVE_MMI) += common/mips/mmi/loopfilter_filters_mmi.c
+VP8_COMMON_SRCS-$(HAVE_MMI) += common/mips/mmi/idctllm_mmi.c
+VP8_COMMON_SRCS-$(HAVE_MMI) += common/mips/mmi/dequantize_mmi.c
+VP8_COMMON_SRCS-$(HAVE_MMI) += common/mips/mmi/copymem_mmi.c
+VP8_COMMON_SRCS-$(HAVE_MMI) += common/mips/mmi/idct_blk_mmi.c
+
+ifeq ($(CONFIG_POSTPROC),yes)
+VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/mfqe_msa.c
+endif
+
+# common (loongarch LSX intrinsics)
+VP8_COMMON_SRCS-$(HAVE_LSX) += common/loongarch/loopfilter_filters_lsx.c
+VP8_COMMON_SRCS-$(HAVE_LSX) += common/loongarch/sixtap_filter_lsx.c
+VP8_COMMON_SRCS-$(HAVE_LSX) += common/loongarch/idct_lsx.c
+
+# common (neon intrinsics)
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/loopfilter_arm.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/loopfilter_arm.h
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/bilinearpredict_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/copymem_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/dc_only_idct_add_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/dequant_idct_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/dequantizeb_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/idct_blk_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iwalsh_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp8_loopfilter_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/loopfiltersimplehorizontaledge_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/loopfiltersimpleverticaledge_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/mbloopfilter_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/shortidct4x4llm_neon.c
+VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/sixtappredict_neon.c
+
+$(eval $(call rtcd_h_template,vp8_rtcd,vp8/common/rtcd_defs.pl))
diff --git a/media/libvpx/libvpx/vp8/vp8_cx_iface.c b/media/libvpx/libvpx/vp8/vp8_cx_iface.c
new file mode 100644
index 0000000000..e2b75a3b21
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/vp8_cx_iface.c
@@ -0,0 +1,1387 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_version.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/static_assert.h"
+#include "vpx_ports/system_state.h"
+#include "vpx_util/vpx_timestamp.h"
+#include "vp8/encoder/onyx_int.h"
+#include "vpx/vp8cx.h"
+#include "vp8/encoder/firstpass.h"
+#include "vp8/common/onyx.h"
+#include "vp8/common/common.h"
+#include <stdlib.h>
+#include <string.h>
+
+struct vp8_extracfg {
+ struct vpx_codec_pkt_list *pkt_list;
+ int cpu_used; /** available cpu percentage in 1/16*/
+ /** if encoder decides to uses alternate reference frame */
+ unsigned int enable_auto_alt_ref;
+ unsigned int noise_sensitivity;
+ unsigned int Sharpness;
+ unsigned int static_thresh;
+ unsigned int token_partitions;
+ unsigned int arnr_max_frames; /* alt_ref Noise Reduction Max Frame Count */
+ unsigned int arnr_strength; /* alt_ref Noise Reduction Strength */
+ unsigned int arnr_type; /* alt_ref filter type */
+ vp8e_tuning tuning;
+ unsigned int cq_level; /* constrained quality level */
+ unsigned int rc_max_intra_bitrate_pct;
+ unsigned int gf_cbr_boost_pct;
+ unsigned int screen_content_mode;
+};
+
+static struct vp8_extracfg default_extracfg = {
+ NULL,
+#if !(CONFIG_REALTIME_ONLY)
+ 0, /* cpu_used */
+#else
+ 4, /* cpu_used */
+#endif
+ 0, /* enable_auto_alt_ref */
+ 0, /* noise_sensitivity */
+ 0, /* Sharpness */
+ 0, /* static_thresh */
+#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ VP8_EIGHT_TOKENPARTITION,
+#else
+ VP8_ONE_TOKENPARTITION, /* token_partitions */
+#endif
+ 0, /* arnr_max_frames */
+ 3, /* arnr_strength */
+ 3, /* arnr_type*/
+ 0, /* tuning*/
+ 10, /* cq_level */
+ 0, /* rc_max_intra_bitrate_pct */
+ 0, /* gf_cbr_boost_pct */
+ 0, /* screen_content_mode */
+};
+
+struct vpx_codec_alg_priv {
+ vpx_codec_priv_t base;
+ vpx_codec_enc_cfg_t cfg;
+ struct vp8_extracfg vp8_cfg;
+ vpx_rational64_t timestamp_ratio;
+ vpx_codec_pts_t pts_offset;
+ unsigned char pts_offset_initialized;
+ VP8_CONFIG oxcf;
+ struct VP8_COMP *cpi;
+ unsigned char *cx_data;
+ unsigned int cx_data_sz;
+ vpx_image_t preview_img;
+ unsigned int next_frame_flag;
+ vp8_postproc_cfg_t preview_ppcfg;
+ /* pkt_list size depends on the maximum number of lagged frames allowed. */
+ vpx_codec_pkt_list_decl(64) pkt_list;
+ unsigned int fixed_kf_cntr;
+ vpx_enc_frame_flags_t control_frame_flags;
+};
+
+static vpx_codec_err_t update_error_state(
+ vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
+ vpx_codec_err_t res;
+
+ if ((res = error->error_code)) {
+ ctx->base.err_detail = error->has_detail ? error->detail : NULL;
+ }
+
+ return res;
+}
+
+#undef ERROR
+#define ERROR(str) \
+ do { \
+ ctx->base.err_detail = str; \
+ return VPX_CODEC_INVALID_PARAM; \
+ } while (0)
+
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
+ if (!(((p)->memb == (lo) || (p)->memb > (lo)) && (p)->memb <= (hi))) \
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
+ } while (0)
+
+#define RANGE_CHECK_HI(p, memb, hi) \
+ do { \
+ if (!((p)->memb <= (hi))) ERROR(#memb " out of range [.." #hi "]"); \
+ } while (0)
+
+#define RANGE_CHECK_LO(p, memb, lo) \
+ do { \
+ if (!((p)->memb >= (lo))) ERROR(#memb " out of range [" #lo "..]"); \
+ } while (0)
+
+#define RANGE_CHECK_BOOL(p, memb) \
+ do { \
+ if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean"); \
+ } while (0)
+
+static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
+ const vpx_codec_enc_cfg_t *cfg,
+ const struct vp8_extracfg *vp8_cfg,
+ int finalize) {
+ RANGE_CHECK(cfg, g_w, 1, 16383); /* 14 bits available */
+ RANGE_CHECK(cfg, g_h, 1, 16383); /* 14 bits available */
+ RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
+ RANGE_CHECK(cfg, g_timebase.num, 1, 1000000000);
+ RANGE_CHECK_HI(cfg, g_profile, 3);
+ RANGE_CHECK_HI(cfg, rc_max_quantizer, 63);
+ RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer);
+ RANGE_CHECK_HI(cfg, g_threads, 64);
+#if CONFIG_REALTIME_ONLY
+ RANGE_CHECK_HI(cfg, g_lag_in_frames, 0);
+#elif CONFIG_MULTI_RES_ENCODING
+ if (ctx->base.enc.total_encoders > 1) RANGE_CHECK_HI(cfg, g_lag_in_frames, 0);
+#else
+ RANGE_CHECK_HI(cfg, g_lag_in_frames, 25);
+#endif
+ RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
+ RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100);
+ RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100);
+ RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
+ RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
+
+/* TODO: add spatial re-sampling support and frame dropping in
+ * multi-res-encoder.*/
+#if CONFIG_MULTI_RES_ENCODING
+ if (ctx->base.enc.total_encoders > 1)
+ RANGE_CHECK_HI(cfg, rc_resize_allowed, 0);
+#else
+ RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
+#endif
+ RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
+ RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
+ RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100);
+
+#if CONFIG_REALTIME_ONLY
+ RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_ONE_PASS);
+#elif CONFIG_MULTI_RES_ENCODING
+ if (ctx->base.enc.total_encoders > 1)
+ RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_ONE_PASS);
+#else
+ RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
+#endif
+
+ /* VP8 does not support a lower bound on the keyframe interval in
+ * automatic keyframe placement mode.
+ */
+ if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
+ cfg->kf_min_dist > 0)
+ ERROR(
+ "kf_min_dist not supported in auto mode, use 0 "
+ "or kf_max_dist instead.");
+
+ RANGE_CHECK_BOOL(vp8_cfg, enable_auto_alt_ref);
+ RANGE_CHECK(vp8_cfg, cpu_used, -16, 16);
+
+#if CONFIG_REALTIME_ONLY && !CONFIG_TEMPORAL_DENOISING
+ RANGE_CHECK(vp8_cfg, noise_sensitivity, 0, 0);
+#else
+ RANGE_CHECK_HI(vp8_cfg, noise_sensitivity, 6);
+#endif
+
+ RANGE_CHECK(vp8_cfg, token_partitions, VP8_ONE_TOKENPARTITION,
+ VP8_EIGHT_TOKENPARTITION);
+ RANGE_CHECK_HI(vp8_cfg, Sharpness, 7);
+ RANGE_CHECK(vp8_cfg, arnr_max_frames, 0, 15);
+ RANGE_CHECK_HI(vp8_cfg, arnr_strength, 6);
+ RANGE_CHECK(vp8_cfg, arnr_type, 1, 3);
+ RANGE_CHECK(vp8_cfg, cq_level, 0, 63);
+ RANGE_CHECK_HI(vp8_cfg, screen_content_mode, 2);
+ if (finalize && (cfg->rc_end_usage == VPX_CQ || cfg->rc_end_usage == VPX_Q))
+ RANGE_CHECK(vp8_cfg, cq_level, cfg->rc_min_quantizer,
+ cfg->rc_max_quantizer);
+
+#if !(CONFIG_REALTIME_ONLY)
+ if (cfg->g_pass == VPX_RC_LAST_PASS) {
+ size_t packet_sz = sizeof(FIRSTPASS_STATS);
+ int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz);
+ FIRSTPASS_STATS *stats;
+
+ if (!cfg->rc_twopass_stats_in.buf)
+ ERROR("rc_twopass_stats_in.buf not set.");
+
+ if (cfg->rc_twopass_stats_in.sz % packet_sz)
+ ERROR("rc_twopass_stats_in.sz indicates truncated packet.");
+
+ if (cfg->rc_twopass_stats_in.sz < 2 * packet_sz)
+ ERROR("rc_twopass_stats_in requires at least two packets.");
+
+ stats = (void *)((char *)cfg->rc_twopass_stats_in.buf +
+ (n_packets - 1) * packet_sz);
+
+ if ((int)(stats->count + 0.5) != n_packets - 1)
+ ERROR("rc_twopass_stats_in missing EOS stats packet");
+ }
+#endif
+
+ RANGE_CHECK(cfg, ts_number_layers, 1, 5);
+
+ if (cfg->ts_number_layers > 1) {
+ unsigned int i;
+ RANGE_CHECK_HI(cfg, ts_periodicity, 16);
+
+ for (i = 1; i < cfg->ts_number_layers; ++i) {
+ if (cfg->ts_target_bitrate[i] <= cfg->ts_target_bitrate[i - 1] &&
+ cfg->rc_target_bitrate > 0)
+ ERROR("ts_target_bitrate entries are not strictly increasing");
+ }
+
+ RANGE_CHECK(cfg, ts_rate_decimator[cfg->ts_number_layers - 1], 1, 1);
+ for (i = cfg->ts_number_layers - 2; i > 0; i--) {
+ if (cfg->ts_rate_decimator[i - 1] != 2 * cfg->ts_rate_decimator[i])
+ ERROR("ts_rate_decimator factors are not powers of 2");
+ }
+
+ RANGE_CHECK_HI(cfg, ts_layer_id[i], cfg->ts_number_layers - 1);
+ }
+
+#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
+ if (cfg->g_threads > (1 << vp8_cfg->token_partitions))
+ ERROR("g_threads cannot be bigger than number of token partitions");
+#endif
+
+ // The range below shall be further tuned.
+ RANGE_CHECK(cfg, use_vizier_rc_params, 0, 1);
+ RANGE_CHECK(cfg, active_wq_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, err_per_mb_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, sr_default_decay_limit.den, 1, 1000);
+ RANGE_CHECK(cfg, sr_diff_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, kf_err_per_mb_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, kf_frame_min_boost_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, kf_frame_max_boost_subs_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, kf_max_total_boost_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, gf_max_total_boost_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, gf_frame_max_boost_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, zm_factor.den, 1, 1000);
+ RANGE_CHECK(cfg, rd_mult_inter_qp_fac.den, 1, 1000);
+ RANGE_CHECK(cfg, rd_mult_arf_qp_fac.den, 1, 1000);
+ RANGE_CHECK(cfg, rd_mult_key_qp_fac.den, 1, 1000);
+
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
+ const vpx_image_t *img) {
+ switch (img->fmt) {
+ case VPX_IMG_FMT_YV12:
+ case VPX_IMG_FMT_I420:
+ case VPX_IMG_FMT_NV12: break;
+ default:
+ ERROR(
+ "Invalid image format. Only YV12, I420 and NV12 images are "
+ "supported");
+ }
+
+ if ((img->d_w != ctx->cfg.g_w) || (img->d_h != ctx->cfg.g_h))
+ ERROR("Image size must match encoder init configuration size");
+
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t set_vp8e_config(VP8_CONFIG *oxcf,
+ vpx_codec_enc_cfg_t cfg,
+ struct vp8_extracfg vp8_cfg,
+ vpx_codec_priv_enc_mr_cfg_t *mr_cfg) {
+ oxcf->multi_threaded = cfg.g_threads;
+ oxcf->Version = cfg.g_profile;
+
+ oxcf->Width = cfg.g_w;
+ oxcf->Height = cfg.g_h;
+ oxcf->timebase = cfg.g_timebase;
+
+ oxcf->error_resilient_mode = cfg.g_error_resilient;
+
+ switch (cfg.g_pass) {
+ case VPX_RC_ONE_PASS: oxcf->Mode = MODE_BESTQUALITY; break;
+ case VPX_RC_FIRST_PASS: oxcf->Mode = MODE_FIRSTPASS; break;
+ case VPX_RC_LAST_PASS: oxcf->Mode = MODE_SECONDPASS_BEST; break;
+ }
+
+ if (cfg.g_pass == VPX_RC_FIRST_PASS || cfg.g_pass == VPX_RC_ONE_PASS) {
+ oxcf->allow_lag = 0;
+ oxcf->lag_in_frames = 0;
+ } else {
+ oxcf->allow_lag = (cfg.g_lag_in_frames) > 0;
+ oxcf->lag_in_frames = cfg.g_lag_in_frames;
+ }
+
+ oxcf->allow_df = (cfg.rc_dropframe_thresh > 0);
+ oxcf->drop_frames_water_mark = cfg.rc_dropframe_thresh;
+
+ oxcf->allow_spatial_resampling = cfg.rc_resize_allowed;
+ oxcf->resample_up_water_mark = cfg.rc_resize_up_thresh;
+ oxcf->resample_down_water_mark = cfg.rc_resize_down_thresh;
+
+ if (cfg.rc_end_usage == VPX_VBR) {
+ oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK;
+ } else if (cfg.rc_end_usage == VPX_CBR) {
+ oxcf->end_usage = USAGE_STREAM_FROM_SERVER;
+ } else if (cfg.rc_end_usage == VPX_CQ) {
+ oxcf->end_usage = USAGE_CONSTRAINED_QUALITY;
+ } else if (cfg.rc_end_usage == VPX_Q) {
+ oxcf->end_usage = USAGE_CONSTANT_QUALITY;
+ }
+
+ // Cap the target rate to 1000 Mbps to avoid some integer overflows in
+ // target bandwidth calculations.
+ oxcf->target_bandwidth = VPXMIN(cfg.rc_target_bitrate, 1000000);
+ oxcf->rc_max_intra_bitrate_pct = vp8_cfg.rc_max_intra_bitrate_pct;
+ oxcf->gf_cbr_boost_pct = vp8_cfg.gf_cbr_boost_pct;
+
+ oxcf->best_allowed_q = cfg.rc_min_quantizer;
+ oxcf->worst_allowed_q = cfg.rc_max_quantizer;
+ oxcf->cq_level = vp8_cfg.cq_level;
+ oxcf->fixed_q = -1;
+
+ oxcf->under_shoot_pct = cfg.rc_undershoot_pct;
+ oxcf->over_shoot_pct = cfg.rc_overshoot_pct;
+
+ oxcf->maximum_buffer_size_in_ms = cfg.rc_buf_sz;
+ oxcf->starting_buffer_level_in_ms = cfg.rc_buf_initial_sz;
+ oxcf->optimal_buffer_level_in_ms = cfg.rc_buf_optimal_sz;
+
+ oxcf->maximum_buffer_size = cfg.rc_buf_sz;
+ oxcf->starting_buffer_level = cfg.rc_buf_initial_sz;
+ oxcf->optimal_buffer_level = cfg.rc_buf_optimal_sz;
+
+ oxcf->two_pass_vbrbias = cfg.rc_2pass_vbr_bias_pct;
+ oxcf->two_pass_vbrmin_section = cfg.rc_2pass_vbr_minsection_pct;
+ oxcf->two_pass_vbrmax_section = cfg.rc_2pass_vbr_maxsection_pct;
+
+ oxcf->auto_key =
+ cfg.kf_mode == VPX_KF_AUTO && cfg.kf_min_dist != cfg.kf_max_dist;
+ oxcf->key_freq = cfg.kf_max_dist;
+
+ oxcf->number_of_layers = cfg.ts_number_layers;
+ oxcf->periodicity = cfg.ts_periodicity;
+
+ if (oxcf->number_of_layers > 1) {
+ memcpy(oxcf->target_bitrate, cfg.ts_target_bitrate,
+ sizeof(cfg.ts_target_bitrate));
+ memcpy(oxcf->rate_decimator, cfg.ts_rate_decimator,
+ sizeof(cfg.ts_rate_decimator));
+ memcpy(oxcf->layer_id, cfg.ts_layer_id, sizeof(cfg.ts_layer_id));
+ }
+
+#if CONFIG_MULTI_RES_ENCODING
+ /* When mr_cfg is NULL, oxcf->mr_total_resolutions and oxcf->mr_encoder_id
+ * are both memset to 0, which ensures the correct logic under this
+ * situation.
+ */
+ if (mr_cfg) {
+ oxcf->mr_total_resolutions = mr_cfg->mr_total_resolutions;
+ oxcf->mr_encoder_id = mr_cfg->mr_encoder_id;
+ oxcf->mr_down_sampling_factor.num = mr_cfg->mr_down_sampling_factor.num;
+ oxcf->mr_down_sampling_factor.den = mr_cfg->mr_down_sampling_factor.den;
+ oxcf->mr_low_res_mode_info = mr_cfg->mr_low_res_mode_info;
+ }
+#else
+ (void)mr_cfg;
+#endif
+
+ oxcf->cpu_used = vp8_cfg.cpu_used;
+ if (cfg.g_pass == VPX_RC_FIRST_PASS) {
+ oxcf->cpu_used = VPXMAX(4, oxcf->cpu_used);
+ }
+ oxcf->encode_breakout = vp8_cfg.static_thresh;
+ oxcf->play_alternate = vp8_cfg.enable_auto_alt_ref;
+ oxcf->noise_sensitivity = vp8_cfg.noise_sensitivity;
+ oxcf->Sharpness = vp8_cfg.Sharpness;
+ oxcf->token_partitions = vp8_cfg.token_partitions;
+
+ oxcf->two_pass_stats_in = cfg.rc_twopass_stats_in;
+ oxcf->output_pkt_list = vp8_cfg.pkt_list;
+
+ oxcf->arnr_max_frames = vp8_cfg.arnr_max_frames;
+ oxcf->arnr_strength = vp8_cfg.arnr_strength;
+ oxcf->arnr_type = vp8_cfg.arnr_type;
+
+ oxcf->tuning = vp8_cfg.tuning;
+
+ oxcf->screen_content_mode = vp8_cfg.screen_content_mode;
+
+ /*
+ printf("Current VP8 Settings: \n");
+ printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
+ printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity);
+ printf("Sharpness: %d\n", oxcf->Sharpness);
+ printf("cpu_used: %d\n", oxcf->cpu_used);
+ printf("Mode: %d\n", oxcf->Mode);
+ printf("auto_key: %d\n", oxcf->auto_key);
+ printf("key_freq: %d\n", oxcf->key_freq);
+ printf("end_usage: %d\n", oxcf->end_usage);
+ printf("under_shoot_pct: %d\n", oxcf->under_shoot_pct);
+ printf("over_shoot_pct: %d\n", oxcf->over_shoot_pct);
+ printf("starting_buffer_level: %d\n", oxcf->starting_buffer_level);
+ printf("optimal_buffer_level: %d\n", oxcf->optimal_buffer_level);
+ printf("maximum_buffer_size: %d\n", oxcf->maximum_buffer_size);
+ printf("fixed_q: %d\n", oxcf->fixed_q);
+ printf("worst_allowed_q: %d\n", oxcf->worst_allowed_q);
+ printf("best_allowed_q: %d\n", oxcf->best_allowed_q);
+ printf("allow_spatial_resampling: %d\n", oxcf->allow_spatial_resampling);
+ printf("resample_down_water_mark: %d\n", oxcf->resample_down_water_mark);
+ printf("resample_up_water_mark: %d\n", oxcf->resample_up_water_mark);
+ printf("allow_df: %d\n", oxcf->allow_df);
+ printf("drop_frames_water_mark: %d\n", oxcf->drop_frames_water_mark);
+ printf("two_pass_vbrbias: %d\n", oxcf->two_pass_vbrbias);
+ printf("two_pass_vbrmin_section: %d\n", oxcf->two_pass_vbrmin_section);
+ printf("two_pass_vbrmax_section: %d\n", oxcf->two_pass_vbrmax_section);
+ printf("allow_lag: %d\n", oxcf->allow_lag);
+ printf("lag_in_frames: %d\n", oxcf->lag_in_frames);
+ printf("play_alternate: %d\n", oxcf->play_alternate);
+ printf("Version: %d\n", oxcf->Version);
+ printf("multi_threaded: %d\n", oxcf->multi_threaded);
+ printf("encode_breakout: %d\n", oxcf->encode_breakout);
+ */
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp8e_set_config(vpx_codec_alg_priv_t *ctx,
+ const vpx_codec_enc_cfg_t *cfg) {
+ vpx_codec_err_t res;
+
+ if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h) {
+ if (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS)
+ ERROR("Cannot change width or height after initialization");
+ if ((ctx->cpi->initial_width && (int)cfg->g_w > ctx->cpi->initial_width) ||
+ (ctx->cpi->initial_height && (int)cfg->g_h > ctx->cpi->initial_height))
+ ERROR("Cannot increase width or height larger than their initial values");
+ }
+
+ /* Prevent increasing lag_in_frames. This check is stricter than it needs
+ * to be -- the limit is not increasing past the first lag_in_frames
+ * value, but we don't track the initial config, only the last successful
+ * config.
+ */
+ if ((cfg->g_lag_in_frames > ctx->cfg.g_lag_in_frames))
+ ERROR("Cannot increase lag_in_frames");
+
+ res = validate_config(ctx, cfg, &ctx->vp8_cfg, 0);
+ if (res != VPX_CODEC_OK) return res;
+
+ if (setjmp(ctx->cpi->common.error.jmp)) {
+ const vpx_codec_err_t codec_err =
+ update_error_state(ctx, &ctx->cpi->common.error);
+ ctx->cpi->common.error.setjmp = 0;
+ vpx_clear_system_state();
+ assert(codec_err != VPX_CODEC_OK);
+ return codec_err;
+ }
+
+ ctx->cpi->common.error.setjmp = 1;
+ ctx->cfg = *cfg;
+ set_vp8e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg, NULL);
+ vp8_change_config(ctx->cpi, &ctx->oxcf);
+ ctx->cpi->common.error.setjmp = 0;
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t get_quantizer(vpx_codec_alg_priv_t *ctx, va_list args) {
+ int *const arg = va_arg(args, int *);
+ if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
+ *arg = vp8_get_quantizer(ctx->cpi);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t get_quantizer64(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ int *const arg = va_arg(args, int *);
+ if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
+ *arg = vp8_reverse_trans(vp8_get_quantizer(ctx->cpi));
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t update_extracfg(vpx_codec_alg_priv_t *ctx,
+ const struct vp8_extracfg *extra_cfg) {
+ const vpx_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg, 0);
+ if (res == VPX_CODEC_OK) {
+ ctx->vp8_cfg = *extra_cfg;
+ set_vp8e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg, NULL);
+ vp8_change_config(ctx->cpi, &ctx->oxcf);
+ }
+ return res;
+}
+
+static vpx_codec_err_t set_cpu_used(vpx_codec_alg_priv_t *ctx, va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.cpu_used = CAST(VP8E_SET_CPUUSED, args);
+ // Use fastest speed setting (speed 16 or -16) if it's set beyond the range.
+ extra_cfg.cpu_used = VPXMIN(16, extra_cfg.cpu_used);
+ extra_cfg.cpu_used = VPXMAX(-16, extra_cfg.cpu_used);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_enable_auto_alt_ref(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.enable_auto_alt_ref = CAST(VP8E_SET_ENABLEAUTOALTREF, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_noise_sensitivity(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.noise_sensitivity = CAST(VP8E_SET_NOISE_SENSITIVITY, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_sharpness(vpx_codec_alg_priv_t *ctx, va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.Sharpness = CAST(VP8E_SET_SHARPNESS, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_static_thresh(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.static_thresh = CAST(VP8E_SET_STATIC_THRESHOLD, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_token_partitions(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.token_partitions = CAST(VP8E_SET_TOKEN_PARTITIONS, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_arnr_max_frames(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.arnr_max_frames = CAST(VP8E_SET_ARNR_MAXFRAMES, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_arnr_strength(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.arnr_strength = CAST(VP8E_SET_ARNR_STRENGTH, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_arnr_type(vpx_codec_alg_priv_t *ctx, va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.arnr_type = CAST(VP8E_SET_ARNR_TYPE, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_tuning(vpx_codec_alg_priv_t *ctx, va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.tuning = CAST(VP8E_SET_TUNING, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_cq_level(vpx_codec_alg_priv_t *ctx, va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.cq_level = CAST(VP8E_SET_CQ_LEVEL, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_rc_max_intra_bitrate_pct(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.rc_max_intra_bitrate_pct =
+ CAST(VP8E_SET_MAX_INTRA_BITRATE_PCT, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.gf_cbr_boost_pct = CAST(VP8E_SET_GF_CBR_BOOST_PCT, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t set_screen_content_mode(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.screen_content_mode = CAST(VP8E_SET_SCREEN_CONTENT_MODE, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
+static vpx_codec_err_t ctrl_set_rtc_external_ratectrl(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ VP8_COMP *cpi = ctx->cpi;
+ const unsigned int data = CAST(VP8E_SET_GF_CBR_BOOST_PCT, args);
+ if (data) {
+ cpi->cyclic_refresh_mode_enabled = 0;
+ cpi->rt_always_update_correction_factor = 1;
+ }
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp8e_mr_alloc_mem(const vpx_codec_enc_cfg_t *cfg,
+ void **mem_loc) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+#if CONFIG_MULTI_RES_ENCODING
+ LOWER_RES_FRAME_INFO *shared_mem_loc;
+ int mb_rows = ((cfg->g_w + 15) >> 4);
+ int mb_cols = ((cfg->g_h + 15) >> 4);
+
+ shared_mem_loc = calloc(1, sizeof(LOWER_RES_FRAME_INFO));
+ if (!shared_mem_loc) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ shared_mem_loc->mb_info =
+ calloc(mb_rows * mb_cols, sizeof(LOWER_RES_MB_INFO));
+ if (!(shared_mem_loc->mb_info)) {
+ free(shared_mem_loc);
+ res = VPX_CODEC_MEM_ERROR;
+ } else {
+ *mem_loc = (void *)shared_mem_loc;
+ res = VPX_CODEC_OK;
+ }
+#else
+ (void)cfg;
+ (void)mem_loc;
+#endif
+ return res;
+}
+
+static vpx_codec_err_t vp8e_init(vpx_codec_ctx_t *ctx,
+ vpx_codec_priv_enc_mr_cfg_t *mr_cfg) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ vp8_rtcd();
+ vpx_dsp_rtcd();
+ vpx_scale_rtcd();
+
+ if (!ctx->priv) {
+ struct vpx_codec_alg_priv *priv =
+ (struct vpx_codec_alg_priv *)vpx_calloc(1, sizeof(*priv));
+
+ if (!priv) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ ctx->priv = (vpx_codec_priv_t *)priv;
+ ctx->priv->init_flags = ctx->init_flags;
+
+ if (ctx->config.enc) {
+ /* Update the reference to the config structure to an
+ * internal copy.
+ */
+ priv->cfg = *ctx->config.enc;
+ ctx->config.enc = &priv->cfg;
+ }
+
+ priv->vp8_cfg = default_extracfg;
+ priv->vp8_cfg.pkt_list = &priv->pkt_list.head;
+
+ priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2;
+
+ if (priv->cx_data_sz < 32768) priv->cx_data_sz = 32768;
+
+ priv->cx_data = malloc(priv->cx_data_sz);
+
+ if (!priv->cx_data) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ if (mr_cfg) {
+ ctx->priv->enc.total_encoders = mr_cfg->mr_total_resolutions;
+ } else {
+ ctx->priv->enc.total_encoders = 1;
+ }
+
+ vp8_initialize_enc();
+
+ res = validate_config(priv, &priv->cfg, &priv->vp8_cfg, 0);
+
+ if (!res) {
+ priv->pts_offset_initialized = 0;
+ priv->timestamp_ratio.den = priv->cfg.g_timebase.den;
+ priv->timestamp_ratio.num = (int64_t)priv->cfg.g_timebase.num;
+ priv->timestamp_ratio.num *= TICKS_PER_SEC;
+ reduce_ratio(&priv->timestamp_ratio);
+
+ set_vp8e_config(&priv->oxcf, priv->cfg, priv->vp8_cfg, mr_cfg);
+ priv->cpi = vp8_create_compressor(&priv->oxcf);
+ if (!priv->cpi) res = VPX_CODEC_MEM_ERROR;
+ }
+ }
+
+ return res;
+}
+
+static vpx_codec_err_t vp8e_destroy(vpx_codec_alg_priv_t *ctx) {
+#if CONFIG_MULTI_RES_ENCODING
+ /* Free multi-encoder shared memory */
+ if (ctx->oxcf.mr_total_resolutions > 0 &&
+ (ctx->oxcf.mr_encoder_id == ctx->oxcf.mr_total_resolutions - 1)) {
+ LOWER_RES_FRAME_INFO *shared_mem_loc =
+ (LOWER_RES_FRAME_INFO *)ctx->oxcf.mr_low_res_mode_info;
+ free(shared_mem_loc->mb_info);
+ free(ctx->oxcf.mr_low_res_mode_info);
+ }
+#endif
+
+ free(ctx->cx_data);
+ vp8_remove_compressor(&ctx->cpi);
+ vpx_free(ctx);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
+ YV12_BUFFER_CONFIG *yv12) {
+ const int y_w = img->d_w;
+ const int y_h = img->d_h;
+ const int uv_w = (img->d_w + 1) / 2;
+ const int uv_h = (img->d_h + 1) / 2;
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ yv12->y_buffer = img->planes[VPX_PLANE_Y];
+ yv12->u_buffer = img->planes[VPX_PLANE_U];
+ yv12->v_buffer = img->planes[VPX_PLANE_V];
+
+ yv12->y_crop_width = y_w;
+ yv12->y_crop_height = y_h;
+ yv12->y_width = y_w;
+ yv12->y_height = y_h;
+ yv12->uv_crop_width = uv_w;
+ yv12->uv_crop_height = uv_h;
+ yv12->uv_width = uv_w;
+ yv12->uv_height = uv_h;
+
+ yv12->y_stride = img->stride[VPX_PLANE_Y];
+ yv12->uv_stride = img->stride[VPX_PLANE_U];
+
+ yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
+ return res;
+}
+
+static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
+ unsigned long duration,
+ unsigned long deadline) {
+ int new_qc;
+
+#if !(CONFIG_REALTIME_ONLY)
+ /* Use best quality mode if no deadline is given. */
+ new_qc = MODE_BESTQUALITY;
+
+ if (deadline) {
+ /* Convert duration parameter from stream timebase to microseconds */
+ uint64_t duration_us;
+
+ VPX_STATIC_ASSERT(TICKS_PER_SEC > 1000000 &&
+ (TICKS_PER_SEC % 1000000) == 0);
+
+ duration_us = duration * (uint64_t)ctx->timestamp_ratio.num /
+ (ctx->timestamp_ratio.den * (TICKS_PER_SEC / 1000000));
+
+ /* If the deadline is more that the duration this frame is to be shown,
+ * use good quality mode. Otherwise use realtime mode.
+ */
+ new_qc = (deadline > duration_us) ? MODE_GOODQUALITY : MODE_REALTIME;
+ }
+
+#else
+ (void)duration;
+ new_qc = MODE_REALTIME;
+#endif
+
+ if (deadline == VPX_DL_REALTIME) {
+ new_qc = MODE_REALTIME;
+ } else if (ctx->cfg.g_pass == VPX_RC_FIRST_PASS) {
+ new_qc = MODE_FIRSTPASS;
+ } else if (ctx->cfg.g_pass == VPX_RC_LAST_PASS) {
+ new_qc =
+ (new_qc == MODE_BESTQUALITY) ? MODE_SECONDPASS_BEST : MODE_SECONDPASS;
+ }
+
+ if (ctx->oxcf.Mode != new_qc) {
+ ctx->oxcf.Mode = new_qc;
+ vp8_change_config(ctx->cpi, &ctx->oxcf);
+ }
+}
+
+static vpx_codec_err_t set_reference_and_update(vpx_codec_alg_priv_t *ctx,
+ vpx_enc_frame_flags_t flags) {
+ /* Handle Flags */
+ if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) ||
+ ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
+ ctx->base.err_detail = "Conflicting flags.";
+ return VPX_CODEC_INVALID_PARAM;
+ }
+
+ if (flags &
+ (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
+ int ref = 7;
+
+ if (flags & VP8_EFLAG_NO_REF_LAST) ref ^= VP8_LAST_FRAME;
+
+ if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VP8_GOLD_FRAME;
+
+ if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VP8_ALTR_FRAME;
+
+ vp8_use_as_reference(ctx->cpi, ref);
+ }
+
+ if (flags &
+ (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) {
+ int upd = 7;
+
+ if (flags & VP8_EFLAG_NO_UPD_LAST) upd ^= VP8_LAST_FRAME;
+
+ if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VP8_GOLD_FRAME;
+
+ if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VP8_ALTR_FRAME;
+
+ vp8_update_reference(ctx->cpi, upd);
+ }
+
+ if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
+ vp8_update_entropy(ctx->cpi, 0);
+ }
+
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
+ const vpx_image_t *img, vpx_codec_pts_t pts,
+ unsigned long duration,
+ vpx_enc_frame_flags_t enc_flags,
+ unsigned long deadline) {
+ volatile vpx_codec_err_t res = VPX_CODEC_OK;
+ // Make a copy as volatile to avoid -Wclobbered with longjmp.
+ volatile vpx_enc_frame_flags_t flags = enc_flags;
+ volatile vpx_codec_pts_t pts_val = pts;
+
+ if (!ctx->cfg.rc_target_bitrate) {
+#if CONFIG_MULTI_RES_ENCODING
+ if (!ctx->cpi) return VPX_CODEC_ERROR;
+ if (ctx->cpi->oxcf.mr_total_resolutions > 1) {
+ LOWER_RES_FRAME_INFO *low_res_frame_info =
+ (LOWER_RES_FRAME_INFO *)ctx->cpi->oxcf.mr_low_res_mode_info;
+ if (!low_res_frame_info) return VPX_CODEC_ERROR;
+ low_res_frame_info->skip_encoding_prev_stream = 1;
+ if (ctx->cpi->oxcf.mr_encoder_id == 0)
+ low_res_frame_info->skip_encoding_base_stream = 1;
+ }
+#endif
+ return res;
+ }
+
+ if (img) res = validate_img(ctx, img);
+
+ if (!res) res = validate_config(ctx, &ctx->cfg, &ctx->vp8_cfg, 1);
+
+ if (!ctx->pts_offset_initialized) {
+ ctx->pts_offset = pts_val;
+ ctx->pts_offset_initialized = 1;
+ }
+ pts_val -= ctx->pts_offset;
+
+ pick_quickcompress_mode(ctx, duration, deadline);
+ vpx_codec_pkt_list_init(&ctx->pkt_list);
+
+ // If no flags are set in the encode call, then use the frame flags as
+ // defined via the control function: vp8e_set_frame_flags.
+ if (!flags) {
+ flags = ctx->control_frame_flags;
+ }
+ ctx->control_frame_flags = 0;
+
+ if (!res) res = set_reference_and_update(ctx, flags);
+
+ /* Handle fixed keyframe intervals */
+ if (ctx->cfg.kf_mode == VPX_KF_AUTO &&
+ ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) {
+ if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) {
+ flags |= VPX_EFLAG_FORCE_KF;
+ ctx->fixed_kf_cntr = 1;
+ }
+ }
+
+ /* Initialize the encoder instance on the first frame*/
+ if (!res && ctx->cpi) {
+ unsigned int lib_flags;
+ YV12_BUFFER_CONFIG sd;
+ int64_t dst_time_stamp, dst_end_time_stamp;
+ size_t size, cx_data_sz;
+ unsigned char *cx_data;
+ unsigned char *cx_data_end;
+ int comp_data_state = 0;
+
+ if (setjmp(ctx->cpi->common.error.jmp)) {
+ ctx->cpi->common.error.setjmp = 0;
+ vpx_clear_system_state();
+ return VPX_CODEC_CORRUPT_FRAME;
+ }
+ ctx->cpi->common.error.setjmp = 1;
+
+ /* Set up internal flags */
+ if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) {
+ ((VP8_COMP *)ctx->cpi)->b_calculate_psnr = 1;
+ }
+
+ if (ctx->base.init_flags & VPX_CODEC_USE_OUTPUT_PARTITION) {
+ ((VP8_COMP *)ctx->cpi)->output_partition = 1;
+ }
+
+ /* Convert API flags to internal codec lib flags */
+ lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
+
+ dst_time_stamp =
+ pts_val * ctx->timestamp_ratio.num / ctx->timestamp_ratio.den;
+ dst_end_time_stamp = (pts_val + (int64_t)duration) *
+ ctx->timestamp_ratio.num / ctx->timestamp_ratio.den;
+
+ if (img != NULL) {
+ res = image2yuvconfig(img, &sd);
+
+ if (sd.y_width != ctx->cfg.g_w || sd.y_height != ctx->cfg.g_h) {
+ /* from vpx_encoder.h for g_w/g_h:
+ "Note that the frames passed as input to the encoder must have this
+ resolution"
+ */
+ ctx->base.err_detail = "Invalid input frame resolution";
+ res = VPX_CODEC_INVALID_PARAM;
+ } else {
+ if (vp8_receive_raw_frame(ctx->cpi, ctx->next_frame_flag | lib_flags,
+ &sd, dst_time_stamp, dst_end_time_stamp)) {
+ VP8_COMP *cpi = (VP8_COMP *)ctx->cpi;
+ res = update_error_state(ctx, &cpi->common.error);
+ }
+ }
+
+ /* reset for next frame */
+ ctx->next_frame_flag = 0;
+ }
+
+ cx_data = ctx->cx_data;
+ cx_data_sz = ctx->cx_data_sz;
+ cx_data_end = ctx->cx_data + cx_data_sz;
+ lib_flags = 0;
+
+ while (cx_data_sz >= ctx->cx_data_sz / 2) {
+ comp_data_state = vp8_get_compressed_data(
+ ctx->cpi, &lib_flags, &size, cx_data, cx_data_end, &dst_time_stamp,
+ &dst_end_time_stamp, !img);
+
+ if (comp_data_state == VPX_CODEC_CORRUPT_FRAME) {
+ return VPX_CODEC_CORRUPT_FRAME;
+ } else if (comp_data_state == -1) {
+ break;
+ }
+
+ if (size) {
+ vpx_codec_pts_t round, delta;
+ vpx_codec_cx_pkt_t pkt;
+ VP8_COMP *cpi = (VP8_COMP *)ctx->cpi;
+
+ /* Add the frame packet to the list of returned packets. */
+ round = (vpx_codec_pts_t)ctx->timestamp_ratio.num / 2;
+ if (round > 0) --round;
+ delta = (dst_end_time_stamp - dst_time_stamp);
+ pkt.kind = VPX_CODEC_CX_FRAME_PKT;
+ pkt.data.frame.pts =
+ (dst_time_stamp * ctx->timestamp_ratio.den + round) /
+ ctx->timestamp_ratio.num +
+ ctx->pts_offset;
+ pkt.data.frame.duration =
+ (unsigned long)((delta * ctx->timestamp_ratio.den + round) /
+ ctx->timestamp_ratio.num);
+ pkt.data.frame.flags = lib_flags << 16;
+ pkt.data.frame.width[0] = cpi->common.Width;
+ pkt.data.frame.height[0] = cpi->common.Height;
+ pkt.data.frame.spatial_layer_encoded[0] = 1;
+
+ if (lib_flags & FRAMEFLAGS_KEY) {
+ pkt.data.frame.flags |= VPX_FRAME_IS_KEY;
+ }
+
+ if (!cpi->common.show_frame) {
+ pkt.data.frame.flags |= VPX_FRAME_IS_INVISIBLE;
+
+ /* This timestamp should be as close as possible to the
+ * prior PTS so that if a decoder uses pts to schedule when
+ * to do this, we start right after last frame was decoded.
+ * Invisible frames have no duration.
+ */
+ pkt.data.frame.pts =
+ ((cpi->last_time_stamp_seen * ctx->timestamp_ratio.den + round) /
+ ctx->timestamp_ratio.num) +
+ ctx->pts_offset + 1;
+ pkt.data.frame.duration = 0;
+ }
+
+ if (cpi->droppable) pkt.data.frame.flags |= VPX_FRAME_IS_DROPPABLE;
+
+ if (cpi->output_partition) {
+ int i;
+ const int num_partitions =
+ (1 << cpi->common.multi_token_partition) + 1;
+
+ pkt.data.frame.flags |= VPX_FRAME_IS_FRAGMENT;
+
+ for (i = 0; i < num_partitions; ++i) {
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ pkt.data.frame.buf = cpi->partition_d[i];
+#else
+ pkt.data.frame.buf = cx_data;
+ cx_data += cpi->partition_sz[i];
+ cx_data_sz -= cpi->partition_sz[i];
+#endif
+ pkt.data.frame.sz = cpi->partition_sz[i];
+ pkt.data.frame.partition_id = i;
+ /* don't set the fragment bit for the last partition */
+ if (i == (num_partitions - 1)) {
+ pkt.data.frame.flags &= ~VPX_FRAME_IS_FRAGMENT;
+ }
+ vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
+ }
+#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
+ /* In lagged mode the encoder can buffer multiple frames.
+ * We don't want this in partitioned output because
+ * partitions are spread all over the output buffer.
+ * So, force an exit!
+ */
+ cx_data_sz -= ctx->cx_data_sz / 2;
+#endif
+ } else {
+ pkt.data.frame.buf = cx_data;
+ pkt.data.frame.sz = size;
+ pkt.data.frame.partition_id = -1;
+ vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
+ cx_data += size;
+ cx_data_sz -= size;
+ }
+ }
+ }
+ ctx->cpi->common.error.setjmp = 0;
+ }
+
+ return res;
+}
+
+static const vpx_codec_cx_pkt_t *vp8e_get_cxdata(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter) {
+ return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter);
+}
+
+static vpx_codec_err_t vp8e_set_reference(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+ vp8_set_reference(ctx->cpi, frame->frame_type, &sd);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8e_get_reference(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+ vp8_get_reference(ctx->cpi, frame->frame_type, &sd);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8e_set_previewpp(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+#if CONFIG_POSTPROC
+ vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *);
+
+ if (data) {
+ ctx->preview_ppcfg = *((vp8_postproc_cfg_t *)data);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+#else
+ (void)ctx;
+ (void)args;
+ return VPX_CODEC_INCAPABLE;
+#endif
+}
+
+static vpx_image_t *vp8e_get_preview(vpx_codec_alg_priv_t *ctx) {
+ YV12_BUFFER_CONFIG sd;
+ vp8_ppflags_t flags;
+ vp8_zero(flags);
+
+ if (ctx->preview_ppcfg.post_proc_flag) {
+ flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag;
+ flags.deblocking_level = ctx->preview_ppcfg.deblocking_level;
+ flags.noise_level = ctx->preview_ppcfg.noise_level;
+ }
+
+ if (0 == vp8_get_preview_raw_frame(ctx->cpi, &sd, &flags)) {
+ /*
+ vpx_img_wrap(&ctx->preview_img, VPX_IMG_FMT_YV12,
+ sd.y_width + 2*VP8BORDERINPIXELS,
+ sd.y_height + 2*VP8BORDERINPIXELS,
+ 1,
+ sd.buffer_alloc);
+ vpx_img_set_rect(&ctx->preview_img,
+ VP8BORDERINPIXELS, VP8BORDERINPIXELS,
+ sd.y_width, sd.y_height);
+ */
+
+ ctx->preview_img.bps = 12;
+ ctx->preview_img.planes[VPX_PLANE_Y] = sd.y_buffer;
+ ctx->preview_img.planes[VPX_PLANE_U] = sd.u_buffer;
+ ctx->preview_img.planes[VPX_PLANE_V] = sd.v_buffer;
+
+ ctx->preview_img.fmt = VPX_IMG_FMT_I420;
+ ctx->preview_img.x_chroma_shift = 1;
+ ctx->preview_img.y_chroma_shift = 1;
+
+ ctx->preview_img.d_w = sd.y_width;
+ ctx->preview_img.d_h = sd.y_height;
+ ctx->preview_img.stride[VPX_PLANE_Y] = sd.y_stride;
+ ctx->preview_img.stride[VPX_PLANE_U] = sd.uv_stride;
+ ctx->preview_img.stride[VPX_PLANE_V] = sd.uv_stride;
+ ctx->preview_img.w = sd.y_width;
+ ctx->preview_img.h = sd.y_height;
+
+ return &ctx->preview_img;
+ } else {
+ return NULL;
+ }
+}
+
+static vpx_codec_err_t vp8e_set_frame_flags(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ int frame_flags = va_arg(args, int);
+ ctx->control_frame_flags = frame_flags;
+ return set_reference_and_update(ctx, frame_flags);
+}
+
+static vpx_codec_err_t vp8e_set_temporal_layer_id(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ int layer_id = va_arg(args, int);
+ if (layer_id < 0 || layer_id >= (int)ctx->cfg.ts_number_layers) {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+ ctx->cpi->temporal_layer_id = layer_id;
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp8e_set_roi_map(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ vpx_roi_map_t *data = va_arg(args, vpx_roi_map_t *);
+
+ if (data) {
+ vpx_roi_map_t *roi = (vpx_roi_map_t *)data;
+
+ if (!vp8_set_roimap(ctx->cpi, roi->roi_map, roi->rows, roi->cols,
+ roi->delta_q, roi->delta_lf, roi->static_threshold)) {
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8e_set_activemap(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ vpx_active_map_t *data = va_arg(args, vpx_active_map_t *);
+
+ if (data) {
+ vpx_active_map_t *map = (vpx_active_map_t *)data;
+
+ if (!vp8_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols)) {
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ vpx_scaling_mode_t *data = va_arg(args, vpx_scaling_mode_t *);
+
+ if (data) {
+ int res;
+ vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data;
+ res = vp8_set_internal_size(ctx->cpi, scalemode.h_scaling_mode,
+ scalemode.v_scaling_mode);
+
+ if (!res) {
+ /*force next frame a key frame to effect scaling mode */
+ ctx->next_frame_flag |= FRAMEFLAGS_KEY;
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_ctrl_fn_map_t vp8e_ctf_maps[] = {
+ { VP8_SET_REFERENCE, vp8e_set_reference },
+ { VP8_COPY_REFERENCE, vp8e_get_reference },
+ { VP8_SET_POSTPROC, vp8e_set_previewpp },
+ { VP8E_SET_FRAME_FLAGS, vp8e_set_frame_flags },
+ { VP8E_SET_TEMPORAL_LAYER_ID, vp8e_set_temporal_layer_id },
+ { VP8E_SET_ROI_MAP, vp8e_set_roi_map },
+ { VP8E_SET_ACTIVEMAP, vp8e_set_activemap },
+ { VP8E_SET_SCALEMODE, vp8e_set_scalemode },
+ { VP8E_SET_CPUUSED, set_cpu_used },
+ { VP8E_SET_NOISE_SENSITIVITY, set_noise_sensitivity },
+ { VP8E_SET_ENABLEAUTOALTREF, set_enable_auto_alt_ref },
+ { VP8E_SET_SHARPNESS, set_sharpness },
+ { VP8E_SET_STATIC_THRESHOLD, set_static_thresh },
+ { VP8E_SET_TOKEN_PARTITIONS, set_token_partitions },
+ { VP8E_GET_LAST_QUANTIZER, get_quantizer },
+ { VP8E_GET_LAST_QUANTIZER_64, get_quantizer64 },
+ { VP8E_SET_ARNR_MAXFRAMES, set_arnr_max_frames },
+ { VP8E_SET_ARNR_STRENGTH, set_arnr_strength },
+ { VP8E_SET_ARNR_TYPE, set_arnr_type },
+ { VP8E_SET_TUNING, set_tuning },
+ { VP8E_SET_CQ_LEVEL, set_cq_level },
+ { VP8E_SET_MAX_INTRA_BITRATE_PCT, set_rc_max_intra_bitrate_pct },
+ { VP8E_SET_SCREEN_CONTENT_MODE, set_screen_content_mode },
+ { VP8E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct },
+ { VP8E_SET_RTC_EXTERNAL_RATECTRL, ctrl_set_rtc_external_ratectrl },
+ { -1, NULL },
+};
+
+static vpx_codec_enc_cfg_map_t vp8e_usage_cfg_map[] = {
+ { 0,
+ {
+ 0, /* g_usage (unused) */
+ 0, /* g_threads */
+ 0, /* g_profile */
+
+ 320, /* g_width */
+ 240, /* g_height */
+ VPX_BITS_8, /* g_bit_depth */
+ 8, /* g_input_bit_depth */
+
+ { 1, 30 }, /* g_timebase */
+
+ 0, /* g_error_resilient */
+
+ VPX_RC_ONE_PASS, /* g_pass */
+
+ 0, /* g_lag_in_frames */
+
+ 0, /* rc_dropframe_thresh */
+ 0, /* rc_resize_allowed */
+ 1, /* rc_scaled_width */
+ 1, /* rc_scaled_height */
+ 60, /* rc_resize_down_thresold */
+ 30, /* rc_resize_up_thresold */
+
+ VPX_VBR, /* rc_end_usage */
+ { NULL, 0 }, /* rc_twopass_stats_in */
+ { NULL, 0 }, /* rc_firstpass_mb_stats_in */
+ 256, /* rc_target_bitrate */
+ 4, /* rc_min_quantizer */
+ 63, /* rc_max_quantizer */
+ 100, /* rc_undershoot_pct */
+ 100, /* rc_overshoot_pct */
+
+ 6000, /* rc_max_buffer_size */
+ 4000, /* rc_buffer_initial_size; */
+ 5000, /* rc_buffer_optimal_size; */
+
+ 50, /* rc_two_pass_vbrbias */
+ 0, /* rc_two_pass_vbrmin_section */
+ 400, /* rc_two_pass_vbrmax_section */
+ 0, // rc_2pass_vbr_corpus_complexity (only has meaningfull for VP9)
+
+ /* keyframing settings (kf) */
+ VPX_KF_AUTO, /* g_kfmode*/
+ 0, /* kf_min_dist */
+ 128, /* kf_max_dist */
+
+ VPX_SS_DEFAULT_LAYERS, /* ss_number_layers */
+ { 0 },
+ { 0 }, /* ss_target_bitrate */
+ 1, /* ts_number_layers */
+ { 0 }, /* ts_target_bitrate */
+ { 0 }, /* ts_rate_decimator */
+ 0, /* ts_periodicity */
+ { 0 }, /* ts_layer_id */
+ { 0 }, /* layer_target_bitrate */
+ 0, /* temporal_layering_mode */
+ 0, /* use_vizier_rc_params */
+ { 1, 1 }, /* active_wq_factor */
+ { 1, 1 }, /* err_per_mb_factor */
+ { 1, 1 }, /* sr_default_decay_limit */
+ { 1, 1 }, /* sr_diff_factor */
+ { 1, 1 }, /* kf_err_per_mb_factor */
+ { 1, 1 }, /* kf_frame_min_boost_factor */
+ { 1, 1 }, /* kf_frame_max_boost_first_factor */
+ { 1, 1 }, /* kf_frame_max_boost_subs_factor */
+ { 1, 1 }, /* kf_max_total_boost_factor */
+ { 1, 1 }, /* gf_max_total_boost_factor */
+ { 1, 1 }, /* gf_frame_max_boost_factor */
+ { 1, 1 }, /* zm_factor */
+ { 1, 1 }, /* rd_mult_inter_qp_fac */
+ { 1, 1 }, /* rd_mult_arf_qp_fac */
+ { 1, 1 }, /* rd_mult_key_qp_fac */
+ } },
+};
+
+#ifndef VERSION_STRING
+#define VERSION_STRING
+#endif
+CODEC_INTERFACE(vpx_codec_vp8_cx) = {
+ "WebM Project VP8 Encoder" VERSION_STRING,
+ VPX_CODEC_INTERNAL_ABI_VERSION,
+ VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR | VPX_CODEC_CAP_OUTPUT_PARTITION,
+ /* vpx_codec_caps_t caps; */
+ vp8e_init, /* vpx_codec_init_fn_t init; */
+ vp8e_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp8e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ {
+ NULL, /* vpx_codec_peek_si_fn_t peek_si; */
+ NULL, /* vpx_codec_get_si_fn_t get_si; */
+ NULL, /* vpx_codec_decode_fn_t decode; */
+ NULL, /* vpx_codec_frame_get_fn_t frame_get; */
+ NULL, /* vpx_codec_set_fb_fn_t set_fb_fn; */
+ },
+ {
+ 1, /* 1 cfg map */
+ vp8e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t cfg_maps; */
+ vp8e_encode, /* vpx_codec_encode_fn_t encode; */
+ vp8e_get_cxdata, /* vpx_codec_get_cx_data_fn_t get_cx_data; */
+ vp8e_set_config,
+ NULL,
+ vp8e_get_preview,
+ vp8e_mr_alloc_mem,
+ } /* encoder functions */
+};
diff --git a/media/libvpx/libvpx/vp8/vp8_dx_iface.c b/media/libvpx/libvpx/vp8/vp8_dx_iface.c
new file mode 100644
index 0000000000..fdc0b35dd4
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/vp8_dx_iface.c
@@ -0,0 +1,735 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include "./vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx/vp8dx.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_version.h"
+#include "common/alloccommon.h"
+#include "common/common.h"
+#include "common/onyxd.h"
+#include "decoder/onyxd_int.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/system_state.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "decoder/error_concealment.h"
+#endif
+#include "decoder/decoderthreading.h"
+
+#define VP8_CAP_POSTPROC (CONFIG_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0)
+#define VP8_CAP_ERROR_CONCEALMENT \
+ (CONFIG_ERROR_CONCEALMENT ? VPX_CODEC_CAP_ERROR_CONCEALMENT : 0)
+
+typedef vpx_codec_stream_info_t vp8_stream_info_t;
+
+/* Structures for handling memory allocations */
+typedef enum { VP8_SEG_ALG_PRIV = 256, VP8_SEG_MAX } mem_seg_id_t;
+#define NELEMENTS(x) ((int)(sizeof(x) / sizeof((x)[0])))
+
+struct vpx_codec_alg_priv {
+ vpx_codec_priv_t base;
+ vpx_codec_dec_cfg_t cfg;
+ vp8_stream_info_t si;
+ int decoder_init;
+#if CONFIG_MULTITHREAD
+ // Restart threads on next frame if set to 1.
+ // This is set when error happens in multithreaded decoding and all threads
+ // are shut down.
+ int restart_threads;
+#endif
+ int postproc_cfg_set;
+ vp8_postproc_cfg_t postproc_cfg;
+ vpx_decrypt_cb decrypt_cb;
+ void *decrypt_state;
+ vpx_image_t img;
+ int img_setup;
+ struct frame_buffers yv12_frame_buffers;
+ void *user_priv;
+ FRAGMENT_DATA fragments;
+};
+
+static int vp8_init_ctx(vpx_codec_ctx_t *ctx) {
+ vpx_codec_alg_priv_t *priv =
+ (vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv));
+ if (!priv) return 1;
+
+ ctx->priv = (vpx_codec_priv_t *)priv;
+ ctx->priv->init_flags = ctx->init_flags;
+
+ priv->si.sz = sizeof(priv->si);
+ priv->decrypt_cb = NULL;
+ priv->decrypt_state = NULL;
+
+ if (ctx->config.dec) {
+ /* Update the reference to the config structure to an internal copy. */
+ priv->cfg = *ctx->config.dec;
+ ctx->config.dec = &priv->cfg;
+ }
+
+ return 0;
+}
+
+static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
+ vpx_codec_priv_enc_mr_cfg_t *data) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ (void)data;
+
+ vp8_rtcd();
+ vpx_dsp_rtcd();
+ vpx_scale_rtcd();
+
+ /* This function only allocates space for the vpx_codec_alg_priv_t
+ * structure. More memory may be required at the time the stream
+ * information becomes known.
+ */
+ if (!ctx->priv) {
+ vpx_codec_alg_priv_t *priv;
+
+ if (vp8_init_ctx(ctx)) return VPX_CODEC_MEM_ERROR;
+
+ priv = (vpx_codec_alg_priv_t *)ctx->priv;
+
+ /* initialize number of fragments to zero */
+ priv->fragments.count = 0;
+ /* is input fragments enabled? */
+ priv->fragments.enabled =
+ (priv->base.init_flags & VPX_CODEC_USE_INPUT_FRAGMENTS);
+
+ /*post processing level initialized to do nothing */
+ }
+
+ return res;
+}
+
+static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx) {
+ vp8_remove_decoder_instances(&ctx->yv12_frame_buffers);
+
+ vpx_free(ctx);
+
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp8_peek_si_internal(const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si,
+ vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ assert(data != NULL);
+
+ if (data + data_sz <= data) {
+ res = VPX_CODEC_INVALID_PARAM;
+ } else {
+ /* Parse uncompresssed part of key frame header.
+ * 3 bytes:- including version, frame type and an offset
+ * 3 bytes:- sync code (0x9d, 0x01, 0x2a)
+ * 4 bytes:- including image width and height in the lowest 14 bits
+ * of each 2-byte value.
+ */
+ uint8_t clear_buffer[10];
+ const uint8_t *clear = data;
+ if (decrypt_cb) {
+ int n = VPXMIN(sizeof(clear_buffer), data_sz);
+ decrypt_cb(decrypt_state, data, clear_buffer, n);
+ clear = clear_buffer;
+ }
+ si->is_kf = 0;
+
+ if (data_sz >= 10 && !(clear[0] & 0x01)) { /* I-Frame */
+ si->is_kf = 1;
+
+ /* vet via sync code */
+ if (clear[3] != 0x9d || clear[4] != 0x01 || clear[5] != 0x2a) {
+ return VPX_CODEC_UNSUP_BITSTREAM;
+ }
+
+ si->w = (clear[6] | (clear[7] << 8)) & 0x3fff;
+ si->h = (clear[8] | (clear[9] << 8)) & 0x3fff;
+
+ /*printf("w=%d, h=%d\n", si->w, si->h);*/
+ if (!(si->h && si->w)) res = VPX_CODEC_CORRUPT_FRAME;
+ } else {
+ res = VPX_CODEC_UNSUP_BITSTREAM;
+ }
+ }
+
+ return res;
+}
+
+static vpx_codec_err_t vp8_peek_si(const uint8_t *data, unsigned int data_sz,
+ vpx_codec_stream_info_t *si) {
+ return vp8_peek_si_internal(data, data_sz, si, NULL, NULL);
+}
+
+static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_stream_info_t *si) {
+ unsigned int sz;
+
+ if (si->sz >= sizeof(vp8_stream_info_t)) {
+ sz = sizeof(vp8_stream_info_t);
+ } else {
+ sz = sizeof(vpx_codec_stream_info_t);
+ }
+
+ memcpy(si, &ctx->si, sz);
+ si->sz = sz;
+
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t update_error_state(
+ vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
+ vpx_codec_err_t res;
+
+ if ((res = error->error_code)) {
+ ctx->base.err_detail = error->has_detail ? error->detail : NULL;
+ }
+
+ return res;
+}
+
+static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
+ void *user_priv) {
+ /** vpx_img_wrap() doesn't allow specifying independent strides for
+ * the Y, U, and V planes, nor other alignment adjustments that
+ * might be representable by a YV12_BUFFER_CONFIG, so we just
+ * initialize all the fields.*/
+ img->fmt = VPX_IMG_FMT_I420;
+ img->w = yv12->y_stride;
+ img->h = (yv12->y_height + 2 * VP8BORDERINPIXELS + 15) & ~15;
+ img->d_w = img->r_w = yv12->y_width;
+ img->d_h = img->r_h = yv12->y_height;
+ img->x_chroma_shift = 1;
+ img->y_chroma_shift = 1;
+ img->planes[VPX_PLANE_Y] = yv12->y_buffer;
+ img->planes[VPX_PLANE_U] = yv12->u_buffer;
+ img->planes[VPX_PLANE_V] = yv12->v_buffer;
+ img->planes[VPX_PLANE_ALPHA] = NULL;
+ img->stride[VPX_PLANE_Y] = yv12->y_stride;
+ img->stride[VPX_PLANE_U] = yv12->uv_stride;
+ img->stride[VPX_PLANE_V] = yv12->uv_stride;
+ img->stride[VPX_PLANE_ALPHA] = yv12->y_stride;
+ img->bit_depth = 8;
+ img->bps = 12;
+ img->user_priv = user_priv;
+ img->img_data = yv12->buffer_alloc;
+ img->img_data_owner = 0;
+ img->self_allocd = 0;
+}
+
+static int update_fragments(vpx_codec_alg_priv_t *ctx, const uint8_t *data,
+ unsigned int data_sz,
+ volatile vpx_codec_err_t *res) {
+ *res = VPX_CODEC_OK;
+
+ if (ctx->fragments.count == 0) {
+ /* New frame, reset fragment pointers and sizes */
+ memset((void *)ctx->fragments.ptrs, 0, sizeof(ctx->fragments.ptrs));
+ memset(ctx->fragments.sizes, 0, sizeof(ctx->fragments.sizes));
+ }
+ if (ctx->fragments.enabled && !(data == NULL && data_sz == 0)) {
+ /* Store a pointer to this fragment and return. We haven't
+ * received the complete frame yet, so we will wait with decoding.
+ */
+ ctx->fragments.ptrs[ctx->fragments.count] = data;
+ ctx->fragments.sizes[ctx->fragments.count] = data_sz;
+ ctx->fragments.count++;
+ if (ctx->fragments.count > (1 << EIGHT_PARTITION) + 1) {
+ ctx->fragments.count = 0;
+ *res = VPX_CODEC_INVALID_PARAM;
+ return -1;
+ }
+ return 0;
+ }
+
+ if (!ctx->fragments.enabled && (data == NULL && data_sz == 0)) {
+ return 0;
+ }
+
+ if (!ctx->fragments.enabled) {
+ ctx->fragments.ptrs[0] = data;
+ ctx->fragments.sizes[0] = data_sz;
+ ctx->fragments.count = 1;
+ }
+
+ return 1;
+}
+
+static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
+ const uint8_t *data, unsigned int data_sz,
+ void *user_priv, long deadline) {
+ volatile vpx_codec_err_t res;
+ volatile unsigned int resolution_change = 0;
+ volatile unsigned int w, h;
+
+ if (!ctx->fragments.enabled && (data == NULL && data_sz == 0)) {
+ return 0;
+ }
+
+ /* Update the input fragment data */
+ if (update_fragments(ctx, data, data_sz, &res) <= 0) return res;
+
+ /* Determine the stream parameters. Note that we rely on peek_si to
+ * validate that we have a buffer that does not wrap around the top
+ * of the heap.
+ */
+ w = ctx->si.w;
+ h = ctx->si.h;
+
+ res = vp8_peek_si_internal(ctx->fragments.ptrs[0], ctx->fragments.sizes[0],
+ &ctx->si, ctx->decrypt_cb, ctx->decrypt_state);
+
+ if ((res == VPX_CODEC_UNSUP_BITSTREAM) && !ctx->si.is_kf) {
+ /* the peek function returns an error for non keyframes, however for
+ * this case, it is not an error */
+ res = VPX_CODEC_OK;
+ }
+
+ if (!ctx->decoder_init && !ctx->si.is_kf) res = VPX_CODEC_UNSUP_BITSTREAM;
+
+ if ((ctx->si.h != h) || (ctx->si.w != w)) resolution_change = 1;
+
+#if CONFIG_MULTITHREAD
+ if (!res && ctx->restart_threads) {
+ struct frame_buffers *fb = &ctx->yv12_frame_buffers;
+ VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
+ VP8_COMMON *const pc = &pbi->common;
+ if (setjmp(pbi->common.error.jmp)) {
+ pbi->common.error.setjmp = 0;
+ vp8_remove_decoder_instances(fb);
+ vp8_zero(fb->pbi);
+ vpx_clear_system_state();
+ return VPX_CODEC_ERROR;
+ }
+ pbi->common.error.setjmp = 1;
+ pbi->max_threads = ctx->cfg.threads;
+ vp8_decoder_create_threads(pbi);
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd)) {
+ vp8mt_alloc_temp_buffers(pbi, pc->Width, pc->mb_rows);
+ }
+ ctx->restart_threads = 0;
+ pbi->common.error.setjmp = 0;
+ }
+#endif
+ /* Initialize the decoder instance on the first frame*/
+ if (!res && !ctx->decoder_init) {
+ VP8D_CONFIG oxcf;
+
+ oxcf.Width = ctx->si.w;
+ oxcf.Height = ctx->si.h;
+ oxcf.Version = 9;
+ oxcf.postprocess = 0;
+ oxcf.max_threads = ctx->cfg.threads;
+ oxcf.error_concealment =
+ (ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
+
+ /* If postprocessing was enabled by the application and a
+ * configuration has not been provided, default it.
+ */
+ if (!ctx->postproc_cfg_set &&
+ (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) {
+ ctx->postproc_cfg.post_proc_flag =
+ VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE;
+ ctx->postproc_cfg.deblocking_level = 4;
+ ctx->postproc_cfg.noise_level = 0;
+ }
+
+ res = vp8_create_decoder_instances(&ctx->yv12_frame_buffers, &oxcf);
+ if (res == VPX_CODEC_OK) ctx->decoder_init = 1;
+ }
+
+ /* Set these even if already initialized. The caller may have changed the
+ * decrypt config between frames.
+ */
+ if (ctx->decoder_init) {
+ ctx->yv12_frame_buffers.pbi[0]->decrypt_cb = ctx->decrypt_cb;
+ ctx->yv12_frame_buffers.pbi[0]->decrypt_state = ctx->decrypt_state;
+ }
+
+ if (!res) {
+ VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
+ VP8_COMMON *const pc = &pbi->common;
+ if (resolution_change) {
+ MACROBLOCKD *const xd = &pbi->mb;
+#if CONFIG_MULTITHREAD
+ int i;
+#endif
+ pc->Width = ctx->si.w;
+ pc->Height = ctx->si.h;
+ {
+ if (setjmp(pbi->common.error.jmp)) {
+ pbi->common.error.setjmp = 0;
+ /* on failure clear the cached resolution to ensure a full
+ * reallocation is attempted on resync. */
+ ctx->si.w = 0;
+ ctx->si.h = 0;
+ vpx_clear_system_state();
+ /* same return value as used in vp8dx_receive_compressed_data */
+ return -1;
+ }
+
+ pbi->common.error.setjmp = 1;
+
+ if (pc->Width <= 0) {
+ pc->Width = w;
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame width");
+ }
+
+ if (pc->Height <= 0) {
+ pc->Height = h;
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame height");
+ }
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd)) {
+ vp8mt_de_alloc_temp_buffers(pbi, pc->mb_rows);
+ }
+#endif
+
+ if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height)) {
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+ }
+
+ xd->pre = pc->yv12_fb[pc->lst_fb_idx];
+ xd->dst = pc->yv12_fb[pc->new_fb_idx];
+
+#if CONFIG_MULTITHREAD
+ for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) {
+ pbi->mb_row_di[i].mbd.dst = pc->yv12_fb[pc->new_fb_idx];
+ vp8_build_block_doffsets(&pbi->mb_row_di[i].mbd);
+ }
+#endif
+ vp8_build_block_doffsets(&pbi->mb);
+
+/* allocate memory for last frame MODE_INFO array */
+#if CONFIG_ERROR_CONCEALMENT
+
+ if (pbi->ec_enabled) {
+ /* old prev_mip was released by vp8_de_alloc_frame_buffers()
+ * called in vp8_alloc_frame_buffers() */
+ pc->prev_mip = vpx_calloc((pc->mb_cols + 1) * (pc->mb_rows + 1),
+ sizeof(MODE_INFO));
+
+ if (!pc->prev_mip) {
+ vp8_de_alloc_frame_buffers(pc);
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate"
+ "last frame MODE_INFO array");
+ }
+
+ pc->prev_mi = pc->prev_mip + pc->mode_info_stride + 1;
+
+ if (vp8_alloc_overlap_lists(pbi))
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate overlap lists "
+ "for error concealment");
+ }
+
+#endif
+
+#if CONFIG_MULTITHREAD
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd)) {
+ vp8mt_alloc_temp_buffers(pbi, pc->Width, 0);
+ }
+#endif
+ }
+
+ pbi->common.error.setjmp = 0;
+
+ /* required to get past the first get_free_fb() call */
+ pbi->common.fb_idx_ref_cnt[0] = 0;
+ }
+
+ if (setjmp(pbi->common.error.jmp)) {
+ vpx_clear_system_state();
+ /* We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ */
+ pc->yv12_fb[pc->lst_fb_idx].corrupted = 1;
+
+ if (pc->fb_idx_ref_cnt[pc->new_fb_idx] > 0) {
+ pc->fb_idx_ref_cnt[pc->new_fb_idx]--;
+ }
+ pc->error.setjmp = 0;
+#if CONFIG_MULTITHREAD
+ if (pbi->restart_threads) {
+ ctx->si.w = 0;
+ ctx->si.h = 0;
+ ctx->restart_threads = 1;
+ }
+#endif
+ res = update_error_state(ctx, &pbi->common.error);
+ return res;
+ }
+
+ pbi->common.error.setjmp = 1;
+
+ /* update the pbi fragment data */
+ pbi->fragments = ctx->fragments;
+#if CONFIG_MULTITHREAD
+ pbi->restart_threads = 0;
+#endif
+ ctx->user_priv = user_priv;
+ if (vp8dx_receive_compressed_data(pbi, deadline)) {
+ res = update_error_state(ctx, &pbi->common.error);
+ }
+
+ /* get ready for the next series of fragments */
+ ctx->fragments.count = 0;
+ pbi->common.error.setjmp = 0;
+ }
+
+ return res;
+}
+
+static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter) {
+ vpx_image_t *img = NULL;
+
+ /* iter acts as a flip flop, so an image is only returned on the first
+ * call to get_frame.
+ */
+ if (!(*iter) && ctx->yv12_frame_buffers.pbi[0]) {
+ YV12_BUFFER_CONFIG sd;
+ int64_t time_stamp = 0, time_end_stamp = 0;
+ vp8_ppflags_t flags;
+ vp8_zero(flags);
+
+ if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC) {
+ flags.post_proc_flag = ctx->postproc_cfg.post_proc_flag;
+ flags.deblocking_level = ctx->postproc_cfg.deblocking_level;
+ flags.noise_level = ctx->postproc_cfg.noise_level;
+ }
+
+ if (0 == vp8dx_get_raw_frame(ctx->yv12_frame_buffers.pbi[0], &sd,
+ &time_stamp, &time_end_stamp, &flags)) {
+ yuvconfig2image(&ctx->img, &sd, ctx->user_priv);
+
+ img = &ctx->img;
+ *iter = img;
+ }
+ }
+
+ return img;
+}
+
+static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
+ YV12_BUFFER_CONFIG *yv12) {
+ const int y_w = img->d_w;
+ const int y_h = img->d_h;
+ const int uv_w = (img->d_w + 1) / 2;
+ const int uv_h = (img->d_h + 1) / 2;
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ yv12->y_buffer = img->planes[VPX_PLANE_Y];
+ yv12->u_buffer = img->planes[VPX_PLANE_U];
+ yv12->v_buffer = img->planes[VPX_PLANE_V];
+
+ yv12->y_crop_width = y_w;
+ yv12->y_crop_height = y_h;
+ yv12->y_width = y_w;
+ yv12->y_height = y_h;
+ yv12->uv_crop_width = uv_w;
+ yv12->uv_crop_height = uv_h;
+ yv12->uv_width = uv_w;
+ yv12->uv_height = uv_h;
+
+ yv12->y_stride = img->stride[VPX_PLANE_Y];
+ yv12->uv_stride = img->stride[VPX_PLANE_U];
+
+ yv12->border = (img->stride[VPX_PLANE_Y] - img->d_w) / 2;
+ return res;
+}
+
+static vpx_codec_err_t vp8_set_reference(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+
+ return vp8dx_set_reference(ctx->yv12_frame_buffers.pbi[0],
+ frame->frame_type, &sd);
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8_get_reference(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+
+ return vp8dx_get_reference(ctx->yv12_frame_buffers.pbi[0],
+ frame->frame_type, &sd);
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8_get_quantizer(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ int *const arg = va_arg(args, int *);
+ VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
+ if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
+ if (pbi == NULL) return VPX_CODEC_CORRUPT_FRAME;
+ *arg = vp8dx_get_quantizer(pbi);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp8_set_postproc(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+#if CONFIG_POSTPROC
+ vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *);
+
+ if (data) {
+ ctx->postproc_cfg_set = 1;
+ ctx->postproc_cfg = *((vp8_postproc_cfg_t *)data);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+
+#else
+ (void)ctx;
+ (void)args;
+ return VPX_CODEC_INCAPABLE;
+#endif
+}
+
+static vpx_codec_err_t vp8_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ int *update_info = va_arg(args, int *);
+
+ if (update_info) {
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
+ if (pbi == NULL) return VPX_CODEC_CORRUPT_FRAME;
+
+ *update_info = pbi->common.refresh_alt_ref_frame * (int)VP8_ALTR_FRAME +
+ pbi->common.refresh_golden_frame * (int)VP8_GOLD_FRAME +
+ pbi->common.refresh_last_frame * (int)VP8_LAST_FRAME;
+
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8_get_last_ref_frame(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ int *ref_info = va_arg(args, int *);
+
+ if (ref_info) {
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
+ if (pbi) {
+ VP8_COMMON *oci = &pbi->common;
+ *ref_info =
+ (vp8dx_references_buffer(oci, ALTREF_FRAME) ? VP8_ALTR_FRAME : 0) |
+ (vp8dx_references_buffer(oci, GOLDEN_FRAME) ? VP8_GOLD_FRAME : 0) |
+ (vp8dx_references_buffer(oci, LAST_FRAME) ? VP8_LAST_FRAME : 0);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_CORRUPT_FRAME;
+ }
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ int *corrupted = va_arg(args, int *);
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
+
+ if (corrupted && pbi) {
+ const YV12_BUFFER_CONFIG *const frame = pbi->common.frame_to_show;
+ if (frame == NULL) return VPX_CODEC_ERROR;
+ *corrupted = frame->corrupted;
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8_set_decryptor(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ vpx_decrypt_init *init = va_arg(args, vpx_decrypt_init *);
+
+ if (init) {
+ ctx->decrypt_cb = init->decrypt_cb;
+ ctx->decrypt_state = init->decrypt_state;
+ } else {
+ ctx->decrypt_cb = NULL;
+ ctx->decrypt_state = NULL;
+ }
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] = {
+ { VP8_SET_REFERENCE, vp8_set_reference },
+ { VP8_COPY_REFERENCE, vp8_get_reference },
+ { VP8_SET_POSTPROC, vp8_set_postproc },
+ { VP8D_GET_LAST_REF_UPDATES, vp8_get_last_ref_updates },
+ { VP8D_GET_FRAME_CORRUPTED, vp8_get_frame_corrupted },
+ { VP8D_GET_LAST_REF_USED, vp8_get_last_ref_frame },
+ { VPXD_GET_LAST_QUANTIZER, vp8_get_quantizer },
+ { VPXD_SET_DECRYPTOR, vp8_set_decryptor },
+ { -1, NULL },
+};
+
+#ifndef VERSION_STRING
+#define VERSION_STRING
+#endif
+CODEC_INTERFACE(vpx_codec_vp8_dx) = {
+ "WebM Project VP8 Decoder" VERSION_STRING,
+ VPX_CODEC_INTERNAL_ABI_VERSION,
+ VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT |
+ VPX_CODEC_CAP_INPUT_FRAGMENTS,
+ /* vpx_codec_caps_t caps; */
+ vp8_init, /* vpx_codec_init_fn_t init; */
+ vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp8_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ {
+ vp8_peek_si, /* vpx_codec_peek_si_fn_t peek_si; */
+ vp8_get_si, /* vpx_codec_get_si_fn_t get_si; */
+ vp8_decode, /* vpx_codec_decode_fn_t decode; */
+ vp8_get_frame, /* vpx_codec_frame_get_fn_t frame_get; */
+ NULL,
+ },
+ {
+ /* encoder functions */
+ 0, NULL, /* vpx_codec_enc_cfg_map_t */
+ NULL, /* vpx_codec_encode_fn_t */
+ NULL, /* vpx_codec_get_cx_data_fn_t */
+ NULL, /* vpx_codec_enc_config_set_fn_t */
+ NULL, /* vpx_codec_get_global_headers_fn_t */
+ NULL, /* vpx_codec_get_preview_frame_fn_t */
+ NULL /* vpx_codec_enc_mr_get_mem_loc_fn_t */
+ }
+};
diff --git a/media/libvpx/libvpx/vp8/vp8_ratectrl_rtc.cc b/media/libvpx/libvpx/vp8/vp8_ratectrl_rtc.cc
new file mode 100644
index 0000000000..60bc258a6f
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/vp8_ratectrl_rtc.cc
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2021 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <new>
+#include "vp8/common/common.h"
+#include "vp8/vp8_ratectrl_rtc.h"
+#include "vp8/encoder/onyx_int.h"
+#include "vp8/encoder/ratectrl.h"
+#include "vpx_ports/system_state.h"
+
+namespace libvpx {
+/* Quant MOD */
+static const int kQTrans[] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
+ 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
+ 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
+ 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
+};
+
+static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
+ 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
+ 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
+};
+
+static const unsigned char inter_minq[QINDEX_RANGE] = {
+ 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
+ 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
+ 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
+};
+
+static int rescale(int val, int num, int denom) {
+ int64_t llnum = num;
+ int64_t llden = denom;
+ int64_t llval = val;
+
+ return (int)(llval * llnum / llden);
+}
+
+std::unique_ptr<VP8RateControlRTC> VP8RateControlRTC::Create(
+ const VP8RateControlRtcConfig &cfg) {
+ std::unique_ptr<VP8RateControlRTC> rc_api(new (std::nothrow)
+ VP8RateControlRTC());
+ if (!rc_api) return nullptr;
+ rc_api->cpi_ = static_cast<VP8_COMP *>(vpx_memalign(32, sizeof(*cpi_)));
+ if (!rc_api->cpi_) return nullptr;
+ vp8_zero(*rc_api->cpi_);
+
+ if (!rc_api->InitRateControl(cfg)) return nullptr;
+
+ return rc_api;
+}
+
+VP8RateControlRTC::~VP8RateControlRTC() {
+ if (cpi_) {
+ vpx_free(cpi_->gf_active_flags);
+ vpx_free(cpi_);
+ }
+}
+
+bool VP8RateControlRTC::InitRateControl(const VP8RateControlRtcConfig &rc_cfg) {
+ VP8_COMMON *cm = &cpi_->common;
+ VP8_CONFIG *oxcf = &cpi_->oxcf;
+ oxcf->end_usage = USAGE_STREAM_FROM_SERVER;
+ cpi_->pass = 0;
+ cm->show_frame = 1;
+ oxcf->drop_frames_water_mark = 0;
+ cm->current_video_frame = 0;
+ cpi_->auto_gold = 1;
+ cpi_->key_frame_count = 1;
+ cpi_->rate_correction_factor = 1.0;
+ cpi_->key_frame_rate_correction_factor = 1.0;
+ cpi_->cyclic_refresh_mode_enabled = 0;
+ cpi_->auto_worst_q = 1;
+ cpi_->kf_overspend_bits = 0;
+ cpi_->kf_bitrate_adjustment = 0;
+ cpi_->gf_overspend_bits = 0;
+ cpi_->non_gf_bitrate_adjustment = 0;
+ if (!UpdateRateControl(rc_cfg)) return false;
+ cpi_->buffer_level = oxcf->starting_buffer_level;
+ cpi_->bits_off_target = oxcf->starting_buffer_level;
+ return true;
+}
+
+bool VP8RateControlRTC::UpdateRateControl(
+ const VP8RateControlRtcConfig &rc_cfg) {
+ if (rc_cfg.ts_number_layers < 1 ||
+ rc_cfg.ts_number_layers > VPX_TS_MAX_LAYERS) {
+ return false;
+ }
+
+ VP8_COMMON *cm = &cpi_->common;
+ VP8_CONFIG *oxcf = &cpi_->oxcf;
+ const unsigned int prev_number_of_layers = oxcf->number_of_layers;
+ vpx_clear_system_state();
+ cm->Width = rc_cfg.width;
+ cm->Height = rc_cfg.height;
+ oxcf->Width = rc_cfg.width;
+ oxcf->Height = rc_cfg.height;
+ oxcf->worst_allowed_q = kQTrans[rc_cfg.max_quantizer];
+ oxcf->best_allowed_q = kQTrans[rc_cfg.min_quantizer];
+ cpi_->worst_quality = oxcf->worst_allowed_q;
+ cpi_->best_quality = oxcf->best_allowed_q;
+ cpi_->output_framerate = rc_cfg.framerate;
+ oxcf->target_bandwidth =
+ static_cast<unsigned int>(1000 * rc_cfg.target_bandwidth);
+ cpi_->ref_framerate = cpi_->output_framerate;
+ oxcf->fixed_q = -1;
+ oxcf->error_resilient_mode = 1;
+ oxcf->starting_buffer_level_in_ms = rc_cfg.buf_initial_sz;
+ oxcf->optimal_buffer_level_in_ms = rc_cfg.buf_optimal_sz;
+ oxcf->maximum_buffer_size_in_ms = rc_cfg.buf_sz;
+ oxcf->starting_buffer_level = rc_cfg.buf_initial_sz;
+ oxcf->optimal_buffer_level = rc_cfg.buf_optimal_sz;
+ oxcf->maximum_buffer_size = rc_cfg.buf_sz;
+ oxcf->number_of_layers = rc_cfg.ts_number_layers;
+ cpi_->buffered_mode = oxcf->optimal_buffer_level > 0;
+ oxcf->under_shoot_pct = rc_cfg.undershoot_pct;
+ oxcf->over_shoot_pct = rc_cfg.overshoot_pct;
+ cpi_->oxcf.rc_max_intra_bitrate_pct = rc_cfg.max_intra_bitrate_pct;
+ cpi_->framerate = rc_cfg.framerate;
+ for (int i = 0; i < KEY_FRAME_CONTEXT; ++i) {
+ cpi_->prior_key_frame_distance[i] =
+ static_cast<int>(cpi_->output_framerate);
+ }
+
+ if (oxcf->number_of_layers > 1 || prev_number_of_layers > 1) {
+ memcpy(oxcf->target_bitrate, rc_cfg.layer_target_bitrate,
+ sizeof(rc_cfg.layer_target_bitrate));
+ memcpy(oxcf->rate_decimator, rc_cfg.ts_rate_decimator,
+ sizeof(rc_cfg.ts_rate_decimator));
+ if (cm->current_video_frame == 0) {
+ double prev_layer_framerate = 0;
+ for (unsigned int i = 0; i < oxcf->number_of_layers; ++i) {
+ vp8_init_temporal_layer_context(cpi_, oxcf, i, prev_layer_framerate);
+ prev_layer_framerate = cpi_->output_framerate / oxcf->rate_decimator[i];
+ }
+ } else if (oxcf->number_of_layers != prev_number_of_layers) {
+ // The number of temporal layers has changed, so reset/initialize the
+ // temporal layer context for the new layer configuration: this means
+ // calling vp8_reset_temporal_layer_change() below.
+
+ // Start at the base of the pattern cycle, so set the layer id to 0 and
+ // reset the temporal pattern counter.
+ // TODO(marpan/jianj): don't think lines 148-151 are needed (user controls
+ // the layer_id) so remove.
+ if (cpi_->temporal_layer_id > 0) {
+ cpi_->temporal_layer_id = 0;
+ }
+ cpi_->temporal_pattern_counter = 0;
+
+ vp8_reset_temporal_layer_change(cpi_, oxcf,
+ static_cast<int>(prev_number_of_layers));
+ }
+ }
+
+ cpi_->total_actual_bits = 0;
+ cpi_->total_target_vs_actual = 0;
+
+ cm->mb_rows = cm->Height >> 4;
+ cm->mb_cols = cm->Width >> 4;
+ cm->MBs = cm->mb_rows * cm->mb_cols;
+ cm->mode_info_stride = cm->mb_cols + 1;
+
+ // For temporal layers: starting/maximum/optimal_buffer_level is already set
+ // via vp8_init_temporal_layer_context() or vp8_reset_temporal_layer_change().
+ if (oxcf->number_of_layers <= 1 && prev_number_of_layers <= 1) {
+ oxcf->starting_buffer_level =
+ rescale((int)oxcf->starting_buffer_level, oxcf->target_bandwidth, 1000);
+ /* Set or reset optimal and maximum buffer levels. */
+ if (oxcf->optimal_buffer_level == 0) {
+ oxcf->optimal_buffer_level = oxcf->target_bandwidth / 8;
+ } else {
+ oxcf->optimal_buffer_level = rescale((int)oxcf->optimal_buffer_level,
+ oxcf->target_bandwidth, 1000);
+ }
+ if (oxcf->maximum_buffer_size == 0) {
+ oxcf->maximum_buffer_size = oxcf->target_bandwidth / 8;
+ } else {
+ oxcf->maximum_buffer_size =
+ rescale((int)oxcf->maximum_buffer_size, oxcf->target_bandwidth, 1000);
+ }
+ }
+
+ if (cpi_->bits_off_target > oxcf->maximum_buffer_size) {
+ cpi_->bits_off_target = oxcf->maximum_buffer_size;
+ cpi_->buffer_level = cpi_->bits_off_target;
+ }
+
+ vp8_new_framerate(cpi_, cpi_->framerate);
+ vpx_clear_system_state();
+ return true;
+}
+
+void VP8RateControlRTC::ComputeQP(const VP8FrameParamsQpRTC &frame_params) {
+ VP8_COMMON *const cm = &cpi_->common;
+ vpx_clear_system_state();
+ if (cpi_->oxcf.number_of_layers > 1) {
+ cpi_->temporal_layer_id = frame_params.temporal_layer_id;
+ const int layer = frame_params.temporal_layer_id;
+ vp8_update_layer_contexts(cpi_);
+ /* Restore layer specific context & set frame rate */
+ vp8_restore_layer_context(cpi_, layer);
+ vp8_new_framerate(cpi_, cpi_->layer_context[layer].framerate);
+ }
+ cm->frame_type = static_cast<FRAME_TYPE>(frame_params.frame_type);
+ cm->refresh_golden_frame = (cm->frame_type == KEY_FRAME) ? 1 : 0;
+ cm->refresh_alt_ref_frame = (cm->frame_type == KEY_FRAME) ? 1 : 0;
+ if (cm->frame_type == KEY_FRAME && cpi_->common.current_video_frame > 0) {
+ cpi_->common.frame_flags |= FRAMEFLAGS_KEY;
+ }
+
+ vp8_pick_frame_size(cpi_);
+
+ if (cpi_->buffer_level >= cpi_->oxcf.optimal_buffer_level &&
+ cpi_->buffered_mode) {
+ /* Max adjustment is 1/4 */
+ int Adjustment = cpi_->active_worst_quality / 4;
+ if (Adjustment) {
+ int buff_lvl_step;
+ if (cpi_->buffer_level < cpi_->oxcf.maximum_buffer_size) {
+ buff_lvl_step = (int)((cpi_->oxcf.maximum_buffer_size -
+ cpi_->oxcf.optimal_buffer_level) /
+ Adjustment);
+ if (buff_lvl_step) {
+ Adjustment =
+ (int)((cpi_->buffer_level - cpi_->oxcf.optimal_buffer_level) /
+ buff_lvl_step);
+ } else {
+ Adjustment = 0;
+ }
+ }
+ cpi_->active_worst_quality -= Adjustment;
+ if (cpi_->active_worst_quality < cpi_->active_best_quality) {
+ cpi_->active_worst_quality = cpi_->active_best_quality;
+ }
+ }
+ }
+
+ if (cpi_->ni_frames > 150) {
+ int q = cpi_->active_worst_quality;
+ if (cm->frame_type == KEY_FRAME) {
+ cpi_->active_best_quality = kf_high_motion_minq[q];
+ } else {
+ cpi_->active_best_quality = inter_minq[q];
+ }
+
+ if (cpi_->buffer_level >= cpi_->oxcf.maximum_buffer_size) {
+ cpi_->active_best_quality = cpi_->best_quality;
+
+ } else if (cpi_->buffer_level > cpi_->oxcf.optimal_buffer_level) {
+ int Fraction =
+ (int)(((cpi_->buffer_level - cpi_->oxcf.optimal_buffer_level) * 128) /
+ (cpi_->oxcf.maximum_buffer_size -
+ cpi_->oxcf.optimal_buffer_level));
+ int min_qadjustment =
+ ((cpi_->active_best_quality - cpi_->best_quality) * Fraction) / 128;
+
+ cpi_->active_best_quality -= min_qadjustment;
+ }
+ }
+
+ /* Clip the active best and worst quality values to limits */
+ if (cpi_->active_worst_quality > cpi_->worst_quality) {
+ cpi_->active_worst_quality = cpi_->worst_quality;
+ }
+ if (cpi_->active_best_quality < cpi_->best_quality) {
+ cpi_->active_best_quality = cpi_->best_quality;
+ }
+ if (cpi_->active_worst_quality < cpi_->active_best_quality) {
+ cpi_->active_worst_quality = cpi_->active_best_quality;
+ }
+
+ q_ = vp8_regulate_q(cpi_, cpi_->this_frame_target);
+ vp8_set_quantizer(cpi_, q_);
+ vpx_clear_system_state();
+}
+
+int VP8RateControlRTC::GetQP() const { return q_; }
+
+int VP8RateControlRTC::GetLoopfilterLevel() const {
+ VP8_COMMON *cm = &cpi_->common;
+ const double qp = q_;
+
+ // This model is from linear regression
+ if (cm->Width * cm->Height <= 320 * 240) {
+ cm->filter_level = static_cast<int>(0.352685 * qp + 2.957774);
+ } else if (cm->Width * cm->Height <= 640 * 480) {
+ cm->filter_level = static_cast<int>(0.485069 * qp - 0.534462);
+ } else {
+ cm->filter_level = static_cast<int>(0.314875 * qp + 7.959003);
+ }
+
+ int min_filter_level = 0;
+ // This logic is from get_min_filter_level() in picklpf.c
+ if (q_ > 6 && q_ <= 16) {
+ min_filter_level = 1;
+ } else {
+ min_filter_level = (q_ / 8);
+ }
+
+ const int max_filter_level = 63;
+ if (cm->filter_level < min_filter_level) cm->filter_level = min_filter_level;
+ if (cm->filter_level > max_filter_level) cm->filter_level = max_filter_level;
+
+ return cm->filter_level;
+}
+
+void VP8RateControlRTC::PostEncodeUpdate(uint64_t encoded_frame_size) {
+ VP8_COMMON *const cm = &cpi_->common;
+ vpx_clear_system_state();
+ cpi_->total_byte_count += encoded_frame_size;
+ cpi_->projected_frame_size = static_cast<int>(encoded_frame_size << 3);
+ if (cpi_->oxcf.number_of_layers > 1) {
+ for (unsigned int i = cpi_->current_layer + 1;
+ i < cpi_->oxcf.number_of_layers; ++i) {
+ cpi_->layer_context[i].total_byte_count += encoded_frame_size;
+ }
+ }
+
+ vp8_update_rate_correction_factors(cpi_, 2);
+
+ cpi_->last_q[cm->frame_type] = cm->base_qindex;
+
+ if (cm->frame_type == KEY_FRAME) {
+ vp8_adjust_key_frame_context(cpi_);
+ }
+
+ /* Keep a record of ambient average Q. */
+ if (cm->frame_type != KEY_FRAME) {
+ cpi_->avg_frame_qindex =
+ (2 + 3 * cpi_->avg_frame_qindex + cm->base_qindex) >> 2;
+ }
+ /* Keep a record from which we can calculate the average Q excluding
+ * key frames.
+ */
+ if (cm->frame_type != KEY_FRAME) {
+ cpi_->ni_frames++;
+ /* Damp value for first few frames */
+ if (cpi_->ni_frames > 150) {
+ cpi_->ni_tot_qi += q_;
+ cpi_->ni_av_qi = (cpi_->ni_tot_qi / cpi_->ni_frames);
+ } else {
+ cpi_->ni_tot_qi += q_;
+ cpi_->ni_av_qi =
+ ((cpi_->ni_tot_qi / cpi_->ni_frames) + cpi_->worst_quality + 1) / 2;
+ }
+
+ /* If the average Q is higher than what was used in the last
+ * frame (after going through the recode loop to keep the frame
+ * size within range) then use the last frame value - 1. The -1
+ * is designed to stop Q and hence the data rate, from
+ * progressively falling away during difficult sections, but at
+ * the same time reduce the number of itterations around the
+ * recode loop.
+ */
+ if (q_ > cpi_->ni_av_qi) cpi_->ni_av_qi = q_ - 1;
+ }
+
+ cpi_->bits_off_target +=
+ cpi_->av_per_frame_bandwidth - cpi_->projected_frame_size;
+ if (cpi_->bits_off_target > cpi_->oxcf.maximum_buffer_size) {
+ cpi_->bits_off_target = cpi_->oxcf.maximum_buffer_size;
+ }
+
+ cpi_->total_actual_bits += cpi_->projected_frame_size;
+ cpi_->buffer_level = cpi_->bits_off_target;
+
+ /* Propagate values to higher temporal layers */
+ if (cpi_->oxcf.number_of_layers > 1) {
+ for (unsigned int i = cpi_->current_layer + 1;
+ i < cpi_->oxcf.number_of_layers; ++i) {
+ LAYER_CONTEXT *lc = &cpi_->layer_context[i];
+ int bits_off_for_this_layer = (int)round(
+ lc->target_bandwidth / lc->framerate - cpi_->projected_frame_size);
+
+ lc->bits_off_target += bits_off_for_this_layer;
+
+ /* Clip buffer level to maximum buffer size for the layer */
+ if (lc->bits_off_target > lc->maximum_buffer_size) {
+ lc->bits_off_target = lc->maximum_buffer_size;
+ }
+
+ lc->total_actual_bits += cpi_->projected_frame_size;
+ lc->total_target_vs_actual += bits_off_for_this_layer;
+ lc->buffer_level = lc->bits_off_target;
+ }
+ }
+
+ cpi_->common.current_video_frame++;
+ cpi_->frames_since_key++;
+
+ if (cpi_->oxcf.number_of_layers > 1) vp8_save_layer_context(cpi_);
+ vpx_clear_system_state();
+}
+} // namespace libvpx
diff --git a/media/libvpx/libvpx/vp8/vp8_ratectrl_rtc.h b/media/libvpx/libvpx/vp8/vp8_ratectrl_rtc.h
new file mode 100644
index 0000000000..496ef9eaad
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/vp8_ratectrl_rtc.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2021 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VP8_RATECTRL_RTC_H_
+#define VPX_VP8_RATECTRL_RTC_H_
+
+#include <cstdint>
+#include <cstring>
+#include <memory>
+
+#include "vpx/internal/vpx_ratectrl_rtc.h"
+
+struct VP8_COMP;
+
+namespace libvpx {
+struct VP8RateControlRtcConfig : public VpxRateControlRtcConfig {
+ public:
+ VP8RateControlRtcConfig() {
+ memset(&layer_target_bitrate, 0, sizeof(layer_target_bitrate));
+ memset(&ts_rate_decimator, 0, sizeof(ts_rate_decimator));
+ }
+};
+
+struct VP8FrameParamsQpRTC {
+ RcFrameType frame_type;
+ int temporal_layer_id;
+};
+
+class VP8RateControlRTC {
+ public:
+ static std::unique_ptr<VP8RateControlRTC> Create(
+ const VP8RateControlRtcConfig &cfg);
+ ~VP8RateControlRTC();
+
+ bool UpdateRateControl(const VP8RateControlRtcConfig &rc_cfg);
+ // GetQP() needs to be called after ComputeQP() to get the latest QP
+ int GetQP() const;
+ // GetLoopfilterLevel() needs to be called after ComputeQP() since loopfilter
+ // level is calculated from frame qp.
+ int GetLoopfilterLevel() const;
+ // int GetLoopfilterLevel() const;
+ void ComputeQP(const VP8FrameParamsQpRTC &frame_params);
+ // Feedback to rate control with the size of current encoded frame
+ void PostEncodeUpdate(uint64_t encoded_frame_size);
+
+ private:
+ VP8RateControlRTC() {}
+ bool InitRateControl(const VP8RateControlRtcConfig &cfg);
+ struct VP8_COMP *cpi_;
+ int q_;
+};
+
+} // namespace libvpx
+
+#endif // VPX_VP8_RATECTRL_RTC_H_
diff --git a/media/libvpx/libvpx/vp8/vp8cx.mk b/media/libvpx/libvpx/vp8/vp8cx.mk
new file mode 100644
index 0000000000..b4b3fda9ea
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/vp8cx.mk
@@ -0,0 +1,132 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+
+VP8_CX_EXPORTS += exports_enc
+
+VP8_CX_SRCS-yes += $(VP8_COMMON_SRCS-yes)
+VP8_CX_SRCS-no += $(VP8_COMMON_SRCS-no)
+VP8_CX_SRCS_REMOVE-yes += $(VP8_COMMON_SRCS_REMOVE-yes)
+VP8_CX_SRCS_REMOVE-no += $(VP8_COMMON_SRCS_REMOVE-no)
+
+VP8_CX_SRCS-yes += vp8cx.mk
+
+VP8_CX_SRCS-yes += vp8_cx_iface.c
+
+VP8_CX_SRCS-yes += encoder/defaultcoefcounts.h
+VP8_CX_SRCS-yes += encoder/bitstream.c
+VP8_CX_SRCS-yes += encoder/boolhuff.c
+VP8_CX_SRCS-yes += encoder/copy_c.c
+VP8_CX_SRCS-yes += encoder/dct.c
+VP8_CX_SRCS-yes += encoder/encodeframe.c
+VP8_CX_SRCS-yes += encoder/encodeframe.h
+VP8_CX_SRCS-yes += encoder/encodeintra.c
+VP8_CX_SRCS-yes += encoder/encodemb.c
+VP8_CX_SRCS-yes += encoder/encodemv.c
+VP8_CX_SRCS-$(CONFIG_MULTITHREAD) += encoder/ethreading.c
+VP8_CX_SRCS-$(CONFIG_MULTITHREAD) += encoder/ethreading.h
+VP8_CX_SRCS-yes += encoder/firstpass.c
+VP8_CX_SRCS-yes += encoder/block.h
+VP8_CX_SRCS-yes += encoder/boolhuff.h
+VP8_CX_SRCS-yes += encoder/bitstream.h
+VP8_CX_SRCS-$(CONFIG_TEMPORAL_DENOISING) += encoder/denoising.h
+VP8_CX_SRCS-$(CONFIG_TEMPORAL_DENOISING) += encoder/denoising.c
+VP8_CX_SRCS-yes += encoder/encodeintra.h
+VP8_CX_SRCS-yes += encoder/encodemb.h
+VP8_CX_SRCS-yes += encoder/encodemv.h
+VP8_CX_SRCS-yes += encoder/firstpass.h
+VP8_CX_SRCS-yes += encoder/lookahead.c
+VP8_CX_SRCS-yes += encoder/lookahead.h
+VP8_CX_SRCS-yes += encoder/mcomp.h
+VP8_CX_SRCS-yes += encoder/modecosts.h
+VP8_CX_SRCS-yes += encoder/onyx_int.h
+VP8_CX_SRCS-yes += encoder/pickinter.h
+VP8_CX_SRCS-yes += encoder/quantize.h
+VP8_CX_SRCS-yes += encoder/ratectrl.h
+VP8_CX_SRCS-yes += encoder/rdopt.h
+VP8_CX_SRCS-yes += encoder/tokenize.h
+VP8_CX_SRCS-yes += encoder/treewriter.h
+VP8_CX_SRCS-yes += encoder/mcomp.c
+VP8_CX_SRCS-yes += encoder/modecosts.c
+VP8_CX_SRCS-yes += encoder/onyx_if.c
+VP8_CX_SRCS-yes += encoder/pickinter.c
+VP8_CX_SRCS-yes += encoder/picklpf.c
+VP8_CX_SRCS-yes += encoder/picklpf.h
+VP8_CX_SRCS-yes += encoder/vp8_quantize.c
+VP8_CX_SRCS-yes += encoder/ratectrl.c
+VP8_CX_SRCS-yes += encoder/rdopt.c
+VP8_CX_SRCS-yes += encoder/segmentation.c
+VP8_CX_SRCS-yes += encoder/segmentation.h
+VP8_CX_SRCS-yes += common/vp8_skin_detection.c
+VP8_CX_SRCS-yes += common/vp8_skin_detection.h
+VP8_CX_SRCS-yes += encoder/tokenize.c
+VP8_CX_SRCS-yes += encoder/dct_value_cost.h
+VP8_CX_SRCS-yes += encoder/dct_value_tokens.h
+VP8_CX_SRCS-yes += encoder/treewriter.c
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
+VP8_CX_SRCS-yes += encoder/temporal_filter.c
+VP8_CX_SRCS-yes += encoder/temporal_filter.h
+VP8_CX_SRCS-$(CONFIG_MULTI_RES_ENCODING) += encoder/mr_dissim.c
+VP8_CX_SRCS-$(CONFIG_MULTI_RES_ENCODING) += encoder/mr_dissim.h
+
+ifeq ($(CONFIG_REALTIME_ONLY),yes)
+VP8_CX_SRCS_REMOVE-yes += encoder/firstpass.c
+VP8_CX_SRCS_REMOVE-yes += encoder/temporal_filter.c
+VP8_CX_SRCS_REMOVE-yes += encoder/temporal_filter.h
+endif
+
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/copy_sse2.asm
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/copy_sse3.asm
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/fwalsh_sse2.asm
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp8_quantize_sse2.c
+VP8_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp8_quantize_ssse3.c
+VP8_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/quantize_sse4.c
+
+ifeq ($(CONFIG_TEMPORAL_DENOISING),yes)
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/denoising_sse2.c
+endif
+
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/block_error_sse2.asm
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp8_enc_stubs_sse2.c
+
+ifeq ($(CONFIG_REALTIME_ONLY),yes)
+VP8_CX_SRCS_REMOVE-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
+endif
+
+VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/denoising_neon.c
+VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/fastquantizeb_neon.c
+VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/shortfdct_neon.c
+VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_shortwalsh4x4_neon.c
+
+VP8_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/dct_msa.c
+VP8_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/encodeopt_msa.c
+VP8_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/quantize_msa.c
+VP8_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
+
+VP8_CX_SRCS-$(HAVE_MMI) += encoder/mips/mmi/vp8_quantize_mmi.c
+VP8_CX_SRCS-$(HAVE_MMI) += encoder/mips/mmi/dct_mmi.c
+
+ifeq ($(CONFIG_TEMPORAL_DENOISING),yes)
+VP8_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/denoising_msa.c
+endif
+
+ifeq ($(CONFIG_REALTIME_ONLY),yes)
+VP8_CX_SRCS_REMOVE-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
+endif
+
+# common (loongarch LSX intrinsics)
+VP8_CX_SRCS-$(HAVE_LSX) += encoder/loongarch/dct_lsx.c
+VP8_CX_SRCS-$(HAVE_LSX) += encoder/loongarch/encodeopt_lsx.c
+VP8_CX_SRCS-$(HAVE_LSX) += encoder/loongarch/vp8_quantize_lsx.c
+
+VP8_CX_SRCS-yes := $(filter-out $(VP8_CX_SRCS_REMOVE-yes),$(VP8_CX_SRCS-yes))
diff --git a/media/libvpx/libvpx/vp8/vp8dx.mk b/media/libvpx/libvpx/vp8/vp8dx.mk
new file mode 100644
index 0000000000..892ed70f52
--- /dev/null
+++ b/media/libvpx/libvpx/vp8/vp8dx.mk
@@ -0,0 +1,39 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+
+VP8_DX_EXPORTS += exports_dec
+
+VP8_DX_SRCS-yes += $(VP8_COMMON_SRCS-yes)
+VP8_DX_SRCS-no += $(VP8_COMMON_SRCS-no)
+VP8_DX_SRCS_REMOVE-yes += $(VP8_COMMON_SRCS_REMOVE-yes)
+VP8_DX_SRCS_REMOVE-no += $(VP8_COMMON_SRCS_REMOVE-no)
+
+VP8_DX_SRCS-yes += vp8dx.mk
+
+VP8_DX_SRCS-yes += vp8_dx_iface.c
+
+VP8_DX_SRCS-yes += decoder/dboolhuff.c
+VP8_DX_SRCS-yes += decoder/decodemv.c
+VP8_DX_SRCS-yes += decoder/decodeframe.c
+VP8_DX_SRCS-yes += decoder/detokenize.c
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/ec_types.h
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/error_concealment.h
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/error_concealment.c
+VP8_DX_SRCS-yes += decoder/dboolhuff.h
+VP8_DX_SRCS-yes += decoder/decodemv.h
+VP8_DX_SRCS-yes += decoder/decoderthreading.h
+VP8_DX_SRCS-yes += decoder/detokenize.h
+VP8_DX_SRCS-yes += decoder/onyxd_int.h
+VP8_DX_SRCS-yes += decoder/treereader.h
+VP8_DX_SRCS-yes += decoder/onyxd_if.c
+VP8_DX_SRCS-$(CONFIG_MULTITHREAD) += decoder/threading.c
+
+VP8_DX_SRCS-yes := $(filter-out $(VP8_DX_SRCS_REMOVE-yes),$(VP8_DX_SRCS-yes))