summaryrefslogtreecommitdiffstats
path: root/media/ffvpx
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
commit6bf0a5cb5034a7e684dcc3500e841785237ce2dd (patch)
treea68f146d7fa01f0134297619fbe7e33db084e0aa /media/ffvpx
parentInitial commit. (diff)
downloadthunderbird-6bf0a5cb5034a7e684dcc3500e841785237ce2dd.tar.xz
thunderbird-6bf0a5cb5034a7e684dcc3500e841785237ce2dd.zip
Adding upstream version 1:115.7.0.upstream/1%115.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'media/ffvpx')
-rw-r--r--media/ffvpx/1846703.patch76
-rw-r--r--media/ffvpx/COPYING.LGPLv2.1502
-rw-r--r--media/ffvpx/COPYING.LGPLv3165
-rw-r--r--media/ffvpx/README_MOZILLA161
-rw-r--r--media/ffvpx/changes.patch82
-rw-r--r--media/ffvpx/compat/atomics/win32/stdatomic.h181
-rw-r--r--media/ffvpx/compat/va_copy.h34
-rw-r--r--media/ffvpx/compat/w32pthreads.h191
-rw-r--r--media/ffvpx/config.h65
-rw-r--r--media/ffvpx/config_android32.h740
-rw-r--r--media/ffvpx/config_android64.h740
-rw-r--r--media/ffvpx/config_android_x86_64.h740
-rw-r--r--media/ffvpx/config_audio.h680
-rw-r--r--media/ffvpx/config_components.h27
-rw-r--r--media/ffvpx/config_components_audio_only.h2091
-rw-r--r--media/ffvpx/config_components_audio_video.h2091
-rw-r--r--media/ffvpx/config_darwin64.asm664
-rw-r--r--media/ffvpx/config_darwin64.h680
-rw-r--r--media/ffvpx/config_darwin_aarch64.h740
-rw-r--r--media/ffvpx/config_generic.h740
-rw-r--r--media/ffvpx/config_override.h35
-rw-r--r--media/ffvpx/config_unix32.h735
-rw-r--r--media/ffvpx/config_unix64.asm723
-rw-r--r--media/ffvpx/config_unix64.h733
-rw-r--r--media/ffvpx/config_unix_aarch64.h735
-rw-r--r--media/ffvpx/config_win32.asm718
-rw-r--r--media/ffvpx/config_win32.h735
-rw-r--r--media/ffvpx/config_win64.asm718
-rw-r--r--media/ffvpx/config_win64.h735
-rw-r--r--media/ffvpx/config_win64_aarch64.h680
-rw-r--r--media/ffvpx/ffvpxcommon.mozbuild71
-rw-r--r--media/ffvpx/libavcodec/aarch64/fft_init_aarch64.c52
-rw-r--r--media/ffvpx/libavcodec/aarch64/fft_neon.S447
-rw-r--r--media/ffvpx/libavcodec/aarch64/h264chroma_init_aarch64.c59
-rw-r--r--media/ffvpx/libavcodec/aarch64/h264cmc_neon.S452
-rw-r--r--media/ffvpx/libavcodec/aarch64/h264dsp_init_aarch64.c166
-rw-r--r--media/ffvpx/libavcodec/aarch64/h264dsp_neon.S1076
-rw-r--r--media/ffvpx/libavcodec/aarch64/h264idct_neon.S415
-rw-r--r--media/ffvpx/libavcodec/aarch64/h264pred_init.c135
-rw-r--r--media/ffvpx/libavcodec/aarch64/h264pred_neon.S765
-rw-r--r--media/ffvpx/libavcodec/aarch64/hpeldsp_init_aarch64.c123
-rw-r--r--media/ffvpx/libavcodec/aarch64/hpeldsp_neon.S397
-rw-r--r--media/ffvpx/libavcodec/aarch64/idct.h29
-rw-r--r--media/ffvpx/libavcodec/aarch64/idctdsp_init_aarch64.c55
-rw-r--r--media/ffvpx/libavcodec/aarch64/idctdsp_neon.S130
-rw-r--r--media/ffvpx/libavcodec/aarch64/mdct_neon.S326
-rw-r--r--media/ffvpx/libavcodec/aarch64/moz.build59
-rw-r--r--media/ffvpx/libavcodec/aarch64/mpegaudiodsp_init.c40
-rw-r--r--media/ffvpx/libavcodec/aarch64/mpegaudiodsp_neon.S225
-rw-r--r--media/ffvpx/libavcodec/aarch64/neon.S162
-rw-r--r--media/ffvpx/libavcodec/aarch64/simple_idct_neon.S362
-rw-r--r--media/ffvpx/libavcodec/aarch64/vc1dsp_init_aarch64.c141
-rw-r--r--media/ffvpx/libavcodec/aarch64/videodsp.S29
-rw-r--r--media/ffvpx/libavcodec/aarch64/videodsp_init.c32
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp8dsp.h75
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp8dsp_init_aarch64.c124
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp8dsp_neon.S1790
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9dsp_init.h29
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c23
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c23
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9dsp_init_16bpp_aarch64_template.c274
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9dsp_init_aarch64.c259
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9itxfm_16bpp_neon.S2017
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9itxfm_neon.S1580
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9lpf_16bpp_neon.S861
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9lpf_neon.S1334
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9mc_16bpp_neon.S606
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9mc_aarch64.S81
-rw-r--r--media/ffvpx/libavcodec/aarch64/vp9mc_neon.S657
-rw-r--r--media/ffvpx/libavcodec/allcodecs.c995
-rw-r--r--media/ffvpx/libavcodec/arm/fft_init_arm.c63
-rw-r--r--media/ffvpx/libavcodec/arm/fft_neon.S375
-rw-r--r--media/ffvpx/libavcodec/arm/fft_vfp.S530
-rw-r--r--media/ffvpx/libavcodec/arm/flacdsp_arm.S146
-rw-r--r--media/ffvpx/libavcodec/arm/flacdsp_init_arm.c30
-rw-r--r--media/ffvpx/libavcodec/arm/idct.h41
-rw-r--r--media/ffvpx/libavcodec/arm/idctdsp_arm.S120
-rw-r--r--media/ffvpx/libavcodec/arm/idctdsp_arm.h34
-rw-r--r--media/ffvpx/libavcodec/arm/idctdsp_armv6.S48
-rw-r--r--media/ffvpx/libavcodec/arm/idctdsp_init_arm.c94
-rw-r--r--media/ffvpx/libavcodec/arm/idctdsp_init_armv5te.c41
-rw-r--r--media/ffvpx/libavcodec/arm/idctdsp_init_armv6.c45
-rw-r--r--media/ffvpx/libavcodec/arm/idctdsp_init_neon.c51
-rw-r--r--media/ffvpx/libavcodec/arm/idctdsp_neon.S128
-rw-r--r--media/ffvpx/libavcodec/arm/jrevdct_arm.S383
-rw-r--r--media/ffvpx/libavcodec/arm/mathops.h108
-rw-r--r--media/ffvpx/libavcodec/arm/moz.build33
-rw-r--r--media/ffvpx/libavcodec/arm/mpegaudiodsp_fixed_armv6.S143
-rw-r--r--media/ffvpx/libavcodec/arm/mpegaudiodsp_init_arm.c38
-rw-r--r--media/ffvpx/libavcodec/arm/rdft_init_arm.c33
-rw-r--r--media/ffvpx/libavcodec/arm/rdft_neon.S155
-rw-r--r--media/ffvpx/libavcodec/arm/simple_idct_arm.S480
-rw-r--r--media/ffvpx/libavcodec/arm/simple_idct_armv5te.S613
-rw-r--r--media/ffvpx/libavcodec/arm/simple_idct_armv6.S425
-rw-r--r--media/ffvpx/libavcodec/arm/simple_idct_neon.S375
-rw-r--r--media/ffvpx/libavcodec/atsc_a53.c119
-rw-r--r--media/ffvpx/libavcodec/atsc_a53.h56
-rw-r--r--media/ffvpx/libavcodec/av1.h184
-rw-r--r--media/ffvpx/libavcodec/av1_frame_split_bsf.c261
-rw-r--r--media/ffvpx/libavcodec/av1_parse.h184
-rw-r--r--media/ffvpx/libavcodec/av1_parser.c216
-rw-r--r--media/ffvpx/libavcodec/av1dec.c1286
-rw-r--r--media/ffvpx/libavcodec/av1dec.h89
-rw-r--r--media/ffvpx/libavcodec/avcodec.c716
-rw-r--r--media/ffvpx/libavcodec/avcodec.h3193
-rw-r--r--media/ffvpx/libavcodec/avcodec.symbols81
-rw-r--r--media/ffvpx/libavcodec/avdct.c130
-rw-r--r--media/ffvpx/libavcodec/avdct.h88
-rw-r--r--media/ffvpx/libavcodec/avfft.c145
-rw-r--r--media/ffvpx/libavcodec/avfft.h118
-rw-r--r--media/ffvpx/libavcodec/avpacket.c647
-rw-r--r--media/ffvpx/libavcodec/avpicture.c82
-rw-r--r--media/ffvpx/libavcodec/bit_depth_template.c108
-rw-r--r--media/ffvpx/libavcodec/bitstream.c72
-rw-r--r--media/ffvpx/libavcodec/bitstream_filters.c109
-rw-r--r--media/ffvpx/libavcodec/blockdsp.h47
-rw-r--r--media/ffvpx/libavcodec/bsf.c562
-rw-r--r--media/ffvpx/libavcodec/bsf.h332
-rw-r--r--media/ffvpx/libavcodec/bsf_internal.h60
-rw-r--r--media/ffvpx/libavcodec/bsf_list.c11
-rw-r--r--media/ffvpx/libavcodec/bytestream.h380
-rw-r--r--media/ffvpx/libavcodec/cbs.c1028
-rw-r--r--media/ffvpx/libavcodec/cbs.h436
-rw-r--r--media/ffvpx/libavcodec/cbs_av1.c1366
-rw-r--r--media/ffvpx/libavcodec/cbs_av1.h464
-rw-r--r--media/ffvpx/libavcodec/cbs_av1_syntax_template.c2050
-rw-r--r--media/ffvpx/libavcodec/cbs_internal.h253
-rw-r--r--media/ffvpx/libavcodec/codec.h375
-rw-r--r--media/ffvpx/libavcodec/codec_desc.c3693
-rw-r--r--media/ffvpx/libavcodec/codec_desc.h128
-rw-r--r--media/ffvpx/libavcodec/codec_id.h661
-rw-r--r--media/ffvpx/libavcodec/codec_internal.h330
-rw-r--r--media/ffvpx/libavcodec/codec_list.c20
-rw-r--r--media/ffvpx/libavcodec/codec_par.c263
-rw-r--r--media/ffvpx/libavcodec/codec_par.h247
-rw-r--r--media/ffvpx/libavcodec/dct.c228
-rw-r--r--media/ffvpx/libavcodec/dct.h69
-rw-r--r--media/ffvpx/libavcodec/dct32.h25
-rw-r--r--media/ffvpx/libavcodec/dct32_fixed.c20
-rw-r--r--media/ffvpx/libavcodec/dct32_float.c20
-rw-r--r--media/ffvpx/libavcodec/dct32_template.c288
-rw-r--r--media/ffvpx/libavcodec/decode.c1687
-rw-r--r--media/ffvpx/libavcodec/decode.h153
-rw-r--r--media/ffvpx/libavcodec/defs.h192
-rw-r--r--media/ffvpx/libavcodec/encode.c774
-rw-r--r--media/ffvpx/libavcodec/encode.h99
-rw-r--r--media/ffvpx/libavcodec/error_resilience.h97
-rw-r--r--media/ffvpx/libavcodec/faandct.c215
-rw-r--r--media/ffvpx/libavcodec/faandct.h37
-rw-r--r--media/ffvpx/libavcodec/faanidct.c166
-rw-r--r--media/ffvpx/libavcodec/faanidct.h32
-rw-r--r--media/ffvpx/libavcodec/fdctdsp.c51
-rw-r--r--media/ffvpx/libavcodec/fdctdsp.h37
-rw-r--r--media/ffvpx/libavcodec/fdctdsp_init.c44
-rw-r--r--media/ffvpx/libavcodec/fft-internal.h62
-rw-r--r--media/ffvpx/libavcodec/fft.h160
-rw-r--r--media/ffvpx/libavcodec/fft_fixed_32.c51
-rw-r--r--media/ffvpx/libavcodec/fft_float.c20
-rw-r--r--media/ffvpx/libavcodec/fft_init_table.c344
-rw-r--r--media/ffvpx/libavcodec/fft_table.h66
-rw-r--r--media/ffvpx/libavcodec/fft_template.c628
-rw-r--r--media/ffvpx/libavcodec/flac.c225
-rw-r--r--media/ffvpx/libavcodec/flac.h75
-rw-r--r--media/ffvpx/libavcodec/flac_parse.h89
-rw-r--r--media/ffvpx/libavcodec/flacdata.c33
-rw-r--r--media/ffvpx/libavcodec/flacdata.h31
-rw-r--r--media/ffvpx/libavcodec/flacdec.c846
-rw-r--r--media/ffvpx/libavcodec/flacdsp.c127
-rw-r--r--media/ffvpx/libavcodec/flacdsp.h43
-rw-r--r--media/ffvpx/libavcodec/flacdsp_lpc_template.c159
-rw-r--r--media/ffvpx/libavcodec/flacdsp_template.c103
-rw-r--r--media/ffvpx/libavcodec/frame_thread_encoder.h35
-rw-r--r--media/ffvpx/libavcodec/get_bits.h685
-rw-r--r--media/ffvpx/libavcodec/get_buffer.c304
-rw-r--r--media/ffvpx/libavcodec/golomb.c173
-rw-r--r--media/ffvpx/libavcodec/golomb.h616
-rw-r--r--media/ffvpx/libavcodec/h263dsp.h35
-rw-r--r--media/ffvpx/libavcodec/h264chroma.h41
-rw-r--r--media/ffvpx/libavcodec/h264dsp.h135
-rw-r--r--media/ffvpx/libavcodec/h264pred.c602
-rw-r--r--media/ffvpx/libavcodec/h264pred.h130
-rw-r--r--media/ffvpx/libavcodec/h264pred_template.c1333
-rw-r--r--media/ffvpx/libavcodec/hpeldsp.h107
-rw-r--r--media/ffvpx/libavcodec/hwaccel.h84
-rw-r--r--media/ffvpx/libavcodec/hwaccels.h85
-rw-r--r--media/ffvpx/libavcodec/hwconfig.h100
-rw-r--r--media/ffvpx/libavcodec/idctdsp.c315
-rw-r--r--media/ffvpx/libavcodec/idctdsp.h117
-rw-r--r--media/ffvpx/libavcodec/imgconvert.c48
-rw-r--r--media/ffvpx/libavcodec/internal.h247
-rw-r--r--media/ffvpx/libavcodec/jfdctfst.c331
-rw-r--r--media/ffvpx/libavcodec/jfdctint.c25
-rw-r--r--media/ffvpx/libavcodec/jfdctint_template.c398
-rw-r--r--media/ffvpx/libavcodec/jrevdct.c1172
-rw-r--r--media/ffvpx/libavcodec/libdav1d.c642
-rw-r--r--media/ffvpx/libavcodec/log2_tab.c1
-rw-r--r--media/ffvpx/libavcodec/mathops.h255
-rw-r--r--media/ffvpx/libavcodec/mathtables.c123
-rw-r--r--media/ffvpx/libavcodec/me_cmp.h96
-rw-r--r--media/ffvpx/libavcodec/motion_est.h132
-rw-r--r--media/ffvpx/libavcodec/moz.build143
-rw-r--r--media/ffvpx/libavcodec/mpeg12data.h53
-rw-r--r--media/ffvpx/libavcodec/mpegaudio.c50
-rw-r--r--media/ffvpx/libavcodec/mpegaudio.h81
-rw-r--r--media/ffvpx/libavcodec/mpegaudio_tablegen.h89
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodata.c133
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodata.h82
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodec_common.c483
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodec_common_tablegen.h72
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodec_fixed.c148
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodec_template.c1899
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodecheader.c152
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodecheader.h81
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodectab.h615
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodsp.c113
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodsp.h92
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodsp_data.c56
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodsp_fixed.c20
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodsp_float.c20
-rw-r--r--media/ffvpx/libavcodec/mpegaudiodsp_template.c372
-rw-r--r--media/ffvpx/libavcodec/mpegaudiotab.h102
-rw-r--r--media/ffvpx/libavcodec/mpegaudiotabs.c22
-rw-r--r--media/ffvpx/libavcodec/mpegaudiotabs.h39
-rw-r--r--media/ffvpx/libavcodec/mpegpicture.h105
-rw-r--r--media/ffvpx/libavcodec/mpegutils.h142
-rw-r--r--media/ffvpx/libavcodec/mpegvideo.h612
-rw-r--r--media/ffvpx/libavcodec/mpegvideodata.h39
-rw-r--r--media/ffvpx/libavcodec/mpegvideodsp.h47
-rw-r--r--media/ffvpx/libavcodec/mpegvideoencdsp.h58
-rw-r--r--media/ffvpx/libavcodec/null_bsf.c29
-rw-r--r--media/ffvpx/libavcodec/options.c215
-rw-r--r--media/ffvpx/libavcodec/options_table.h412
-rw-r--r--media/ffvpx/libavcodec/packet.h731
-rw-r--r--media/ffvpx/libavcodec/packet_internal.h73
-rw-r--r--media/ffvpx/libavcodec/parser.c289
-rw-r--r--media/ffvpx/libavcodec/parser.h58
-rw-r--r--media/ffvpx/libavcodec/parser_list.c10
-rw-r--r--media/ffvpx/libavcodec/parsers.c93
-rw-r--r--media/ffvpx/libavcodec/pixblockdsp.h62
-rw-r--r--media/ffvpx/libavcodec/profiles.c185
-rw-r--r--media/ffvpx/libavcodec/profiles.h76
-rw-r--r--media/ffvpx/libavcodec/pthread.c125
-rw-r--r--media/ffvpx/libavcodec/pthread_frame.c977
-rw-r--r--media/ffvpx/libavcodec/pthread_internal.h66
-rw-r--r--media/ffvpx/libavcodec/pthread_slice.c260
-rw-r--r--media/ffvpx/libavcodec/put_bits.h428
-rw-r--r--media/ffvpx/libavcodec/qpeldsp.h83
-rw-r--r--media/ffvpx/libavcodec/qsv_api.c42
-rw-r--r--media/ffvpx/libavcodec/ratecontrol.h96
-rw-r--r--media/ffvpx/libavcodec/raw.c370
-rw-r--r--media/ffvpx/libavcodec/raw.h48
-rw-r--r--media/ffvpx/libavcodec/rdft.c120
-rw-r--r--media/ffvpx/libavcodec/rdft.h52
-rw-r--r--media/ffvpx/libavcodec/rectangle.h124
-rw-r--r--media/ffvpx/libavcodec/reverse.c1
-rw-r--r--media/ffvpx/libavcodec/rl.h112
-rw-r--r--media/ffvpx/libavcodec/rnd_avg.h51
-rw-r--r--media/ffvpx/libavcodec/simple_idct.c269
-rw-r--r--media/ffvpx/libavcodec/simple_idct.h64
-rw-r--r--media/ffvpx/libavcodec/simple_idct_template.c371
-rw-r--r--media/ffvpx/libavcodec/startcode.h36
-rw-r--r--media/ffvpx/libavcodec/thread.h100
-rw-r--r--media/ffvpx/libavcodec/threadframe.h89
-rw-r--r--media/ffvpx/libavcodec/unary.h69
-rw-r--r--media/ffvpx/libavcodec/utils.c1170
-rw-r--r--media/ffvpx/libavcodec/vaapi.h86
-rw-r--r--media/ffvpx/libavcodec/vaapi_av1.c451
-rw-r--r--media/ffvpx/libavcodec/vaapi_decode.c726
-rw-r--r--media/ffvpx/libavcodec/vaapi_decode.h91
-rw-r--r--media/ffvpx/libavcodec/vaapi_hevc.h27
-rw-r--r--media/ffvpx/libavcodec/vaapi_vp8.c237
-rw-r--r--media/ffvpx/libavcodec/vaapi_vp9.c185
-rw-r--r--media/ffvpx/libavcodec/version.c50
-rw-r--r--media/ffvpx/libavcodec/version.h45
-rw-r--r--media/ffvpx/libavcodec/version_major.h52
-rw-r--r--media/ffvpx/libavcodec/videodsp.c61
-rw-r--r--media/ffvpx/libavcodec/videodsp.h88
-rw-r--r--media/ffvpx/libavcodec/videodsp_template.c105
-rw-r--r--media/ffvpx/libavcodec/vlc.c378
-rw-r--r--media/ffvpx/libavcodec/vlc.h144
-rw-r--r--media/ffvpx/libavcodec/vorbis_parser.c343
-rw-r--r--media/ffvpx/libavcodec/vorbis_parser.h74
-rw-r--r--media/ffvpx/libavcodec/vorbis_parser_internal.h46
-rw-r--r--media/ffvpx/libavcodec/vp3dsp.h64
-rw-r--r--media/ffvpx/libavcodec/vp56.h256
-rw-r--r--media/ffvpx/libavcodec/vp56dsp.h44
-rw-r--r--media/ffvpx/libavcodec/vp8.c3014
-rw-r--r--media/ffvpx/libavcodec/vp8.h361
-rw-r--r--media/ffvpx/libavcodec/vp89_rac.h66
-rw-r--r--media/ffvpx/libavcodec/vp8_parser.c79
-rw-r--r--media/ffvpx/libavcodec/vp8data.h824
-rw-r--r--media/ffvpx/libavcodec/vp8dsp.c753
-rw-r--r--media/ffvpx/libavcodec/vp8dsp.h102
-rw-r--r--media/ffvpx/libavcodec/vp9.c1914
-rw-r--r--media/ffvpx/libavcodec/vp9.h73
-rw-r--r--media/ffvpx/libavcodec/vp9_mc_template.c439
-rw-r--r--media/ffvpx/libavcodec/vp9_parser.c70
-rw-r--r--media/ffvpx/libavcodec/vp9_superframe_split_bsf.c170
-rw-r--r--media/ffvpx/libavcodec/vp9block.c1457
-rw-r--r--media/ffvpx/libavcodec/vp9data.c2247
-rw-r--r--media/ffvpx/libavcodec/vp9data.h71
-rw-r--r--media/ffvpx/libavcodec/vp9dec.h259
-rw-r--r--media/ffvpx/libavcodec/vp9dsp.c110
-rw-r--r--media/ffvpx/libavcodec/vp9dsp.h138
-rw-r--r--media/ffvpx/libavcodec/vp9dsp_10bpp.c26
-rw-r--r--media/ffvpx/libavcodec/vp9dsp_12bpp.c26
-rw-r--r--media/ffvpx/libavcodec/vp9dsp_8bpp.c26
-rw-r--r--media/ffvpx/libavcodec/vp9dsp_template.c2546
-rw-r--r--media/ffvpx/libavcodec/vp9lpf.c202
-rw-r--r--media/ffvpx/libavcodec/vp9mvs.c364
-rw-r--r--media/ffvpx/libavcodec/vp9prob.c272
-rw-r--r--media/ffvpx/libavcodec/vp9recon.c654
-rw-r--r--media/ffvpx/libavcodec/vp9shared.h175
-rw-r--r--media/ffvpx/libavcodec/vpx_rac.c53
-rw-r--r--media/ffvpx/libavcodec/vpx_rac.h135
-rw-r--r--media/ffvpx/libavcodec/x86/constants.c93
-rw-r--r--media/ffvpx/libavcodec/x86/constants.h72
-rw-r--r--media/ffvpx/libavcodec/x86/dct32.asm481
-rw-r--r--media/ffvpx/libavcodec/x86/dct_init.c36
-rw-r--r--media/ffvpx/libavcodec/x86/fdct.c378
-rw-r--r--media/ffvpx/libavcodec/x86/fdct.h26
-rw-r--r--media/ffvpx/libavcodec/x86/fdctdsp_init.c38
-rw-r--r--media/ffvpx/libavcodec/x86/fft.asm838
-rw-r--r--media/ffvpx/libavcodec/x86/fft.h32
-rw-r--r--media/ffvpx/libavcodec/x86/fft_init.c47
-rw-r--r--media/ffvpx/libavcodec/x86/flacdsp.asm326
-rw-r--r--media/ffvpx/libavcodec/x86/flacdsp_init.c115
-rw-r--r--media/ffvpx/libavcodec/x86/h264_intrapred.asm2044
-rw-r--r--media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm1119
-rw-r--r--media/ffvpx/libavcodec/x86/h264_intrapred_init.c336
-rw-r--r--media/ffvpx/libavcodec/x86/idctdsp.asm112
-rw-r--r--media/ffvpx/libavcodec/x86/idctdsp.h33
-rw-r--r--media/ffvpx/libavcodec/x86/idctdsp_init.c159
-rw-r--r--media/ffvpx/libavcodec/x86/imdct36.asm736
-rw-r--r--media/ffvpx/libavcodec/x86/mathops.h153
-rw-r--r--media/ffvpx/libavcodec/x86/moz.build55
-rw-r--r--media/ffvpx/libavcodec/x86/mpegaudiodsp.c284
-rw-r--r--media/ffvpx/libavcodec/x86/simple_idct.asm871
-rw-r--r--media/ffvpx/libavcodec/x86/simple_idct.h53
-rw-r--r--media/ffvpx/libavcodec/x86/simple_idct10.asm205
-rw-r--r--media/ffvpx/libavcodec/x86/simple_idct10_template.asm369
-rw-r--r--media/ffvpx/libavcodec/x86/videodsp.asm436
-rw-r--r--media/ffvpx/libavcodec/x86/videodsp_init.c237
-rw-r--r--media/ffvpx/libavcodec/x86/vp56_arith.h53
-rw-r--r--media/ffvpx/libavcodec/x86/vp8dsp.asm1116
-rw-r--r--media/ffvpx/libavcodec/x86/vp8dsp_init.c383
-rw-r--r--media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm1234
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init.c415
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init.h192
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init_10bpp.c25
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init_12bpp.c25
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c152
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c239
-rw-r--r--media/ffvpx/libavcodec/x86/vp9intrapred.asm2044
-rw-r--r--media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm2497
-rw-r--r--media/ffvpx/libavcodec/x86/vp9itxfm.asm3197
-rw-r--r--media/ffvpx/libavcodec/x86/vp9itxfm_16bpp.asm2044
-rw-r--r--media/ffvpx/libavcodec/x86/vp9itxfm_template.asm142
-rw-r--r--media/ffvpx/libavcodec/x86/vp9lpf.asm1211
-rw-r--r--media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm823
-rw-r--r--media/ffvpx/libavcodec/x86/vp9mc.asm680
-rw-r--r--media/ffvpx/libavcodec/x86/vp9mc_16bpp.asm431
-rw-r--r--media/ffvpx/libavcodec/x86/vpx_arith.h55
-rw-r--r--media/ffvpx/libavcodec/xiph.c65
-rw-r--r--media/ffvpx/libavcodec/xiph.h43
-rw-r--r--media/ffvpx/libavcodec/xvididct.h36
-rw-r--r--media/ffvpx/libavutil/aarch64/asm.S237
-rw-r--r--media/ffvpx/libavutil/aarch64/bswap.h56
-rw-r--r--media/ffvpx/libavutil/aarch64/cpu.c38
-rw-r--r--media/ffvpx/libavutil/aarch64/cpu.h29
-rw-r--r--media/ffvpx/libavutil/aarch64/float_dsp_init.c69
-rw-r--r--media/ffvpx/libavutil/aarch64/float_dsp_neon.S202
-rw-r--r--media/ffvpx/libavutil/aarch64/moz.build19
-rw-r--r--media/ffvpx/libavutil/aarch64/timer.h50
-rw-r--r--media/ffvpx/libavutil/adler32.c96
-rw-r--r--media/ffvpx/libavutil/adler32.h63
-rw-r--r--media/ffvpx/libavutil/arm/asm.S367
-rw-r--r--media/ffvpx/libavutil/arm/bswap.h70
-rw-r--r--media/ffvpx/libavutil/arm/cpu.c188
-rw-r--r--media/ffvpx/libavutil/arm/cpu.h38
-rw-r--r--media/ffvpx/libavutil/arm/float_dsp_arm.h29
-rw-r--r--media/ffvpx/libavutil/arm/float_dsp_init_arm.c32
-rw-r--r--media/ffvpx/libavutil/arm/float_dsp_init_neon.c59
-rw-r--r--media/ffvpx/libavutil/arm/float_dsp_init_vfp.c46
-rw-r--r--media/ffvpx/libavutil/arm/float_dsp_neon.S271
-rw-r--r--media/ffvpx/libavutil/arm/float_dsp_vfp.S457
-rw-r--r--media/ffvpx/libavutil/arm/intmath.h146
-rw-r--r--media/ffvpx/libavutil/arm/intreadwrite.h91
-rw-r--r--media/ffvpx/libavutil/arm/moz.build18
-rw-r--r--media/ffvpx/libavutil/arm/timer.h46
-rw-r--r--media/ffvpx/libavutil/attributes.h173
-rw-r--r--media/ffvpx/libavutil/attributes_internal.h34
-rw-r--r--media/ffvpx/libavutil/avassert.h75
-rw-r--r--media/ffvpx/libavutil/avconfig.h6
-rw-r--r--media/ffvpx/libavutil/avsscanf.c970
-rw-r--r--media/ffvpx/libavutil/avstring.c463
-rw-r--r--media/ffvpx/libavutil/avstring.h429
-rw-r--r--media/ffvpx/libavutil/avutil.h371
-rw-r--r--media/ffvpx/libavutil/avutil.symbols326
-rw-r--r--media/ffvpx/libavutil/avutilres.rc55
-rw-r--r--media/ffvpx/libavutil/base64.c181
-rw-r--r--media/ffvpx/libavutil/base64.h72
-rw-r--r--media/ffvpx/libavutil/bprint.c332
-rw-r--r--media/ffvpx/libavutil/bprint.h251
-rw-r--r--media/ffvpx/libavutil/bswap.h111
-rw-r--r--media/ffvpx/libavutil/buffer.c416
-rw-r--r--media/ffvpx/libavutil/buffer.h322
-rw-r--r--media/ffvpx/libavutil/buffer_internal.h110
-rw-r--r--media/ffvpx/libavutil/channel_layout.c1006
-rw-r--r--media/ffvpx/libavutil/channel_layout.h783
-rw-r--r--media/ffvpx/libavutil/color_utils.c234
-rw-r--r--media/ffvpx/libavutil/color_utils.h56
-rw-r--r--media/ffvpx/libavutil/colorspace.h150
-rw-r--r--media/ffvpx/libavutil/common.h578
-rw-r--r--media/ffvpx/libavutil/cpu.c282
-rw-r--r--media/ffvpx/libavutil/cpu.h134
-rw-r--r--media/ffvpx/libavutil/cpu_internal.h62
-rw-r--r--media/ffvpx/libavutil/crc.c415
-rw-r--r--media/ffvpx/libavutil/crc.h102
-rw-r--r--media/ffvpx/libavutil/dict.c292
-rw-r--r--media/ffvpx/libavutil/dict.h241
-rw-r--r--media/ffvpx/libavutil/dict_internal.h37
-rw-r--r--media/ffvpx/libavutil/dynarray.h70
-rw-r--r--media/ffvpx/libavutil/error.c132
-rw-r--r--media/ffvpx/libavutil/error.h128
-rw-r--r--media/ffvpx/libavutil/eval.c792
-rw-r--r--media/ffvpx/libavutil/eval.h140
-rw-r--r--media/ffvpx/libavutil/ffmath.h67
-rw-r--r--media/ffvpx/libavutil/fftime.h57
-rw-r--r--media/ffvpx/libavutil/ffversion.h5
-rw-r--r--media/ffvpx/libavutil/fifo.c510
-rw-r--r--media/ffvpx/libavutil/fifo.h448
-rw-r--r--media/ffvpx/libavutil/film_grain_params.c42
-rw-r--r--media/ffvpx/libavutil/film_grain_params.h260
-rw-r--r--media/ffvpx/libavutil/fixed_dsp.c172
-rw-r--r--media/ffvpx/libavutil/fixed_dsp.h206
-rw-r--r--media/ffvpx/libavutil/float_dsp.c167
-rw-r--r--media/ffvpx/libavutil/float_dsp.h219
-rw-r--r--media/ffvpx/libavutil/frame.c928
-rw-r--r--media/ffvpx/libavutil/frame.h964
-rw-r--r--media/ffvpx/libavutil/hwcontext.c955
-rw-r--r--media/ffvpx/libavutil/hwcontext.h610
-rw-r--r--media/ffvpx/libavutil/hwcontext_drm.h169
-rw-r--r--media/ffvpx/libavutil/hwcontext_internal.h178
-rw-r--r--media/ffvpx/libavutil/hwcontext_vaapi.c1948
-rw-r--r--media/ffvpx/libavutil/hwcontext_vaapi.h117
-rw-r--r--media/ffvpx/libavutil/imgutils.c692
-rw-r--r--media/ffvpx/libavutil/imgutils.h331
-rw-r--r--media/ffvpx/libavutil/imgutils_internal.h30
-rw-r--r--media/ffvpx/libavutil/integer.c166
-rw-r--r--media/ffvpx/libavutil/integer.h86
-rw-r--r--media/ffvpx/libavutil/internal.h190
-rw-r--r--media/ffvpx/libavutil/intfloat.h77
-rw-r--r--media/ffvpx/libavutil/intmath.c34
-rw-r--r--media/ffvpx/libavutil/intmath.h166
-rw-r--r--media/ffvpx/libavutil/intreadwrite.h644
-rw-r--r--media/ffvpx/libavutil/libm.h471
-rw-r--r--media/ffvpx/libavutil/lls.c123
-rw-r--r--media/ffvpx/libavutil/lls.h63
-rw-r--r--media/ffvpx/libavutil/log.c492
-rw-r--r--media/ffvpx/libavutil/log.h387
-rw-r--r--media/ffvpx/libavutil/log2_tab.c32
-rw-r--r--media/ffvpx/libavutil/macros.h80
-rw-r--r--media/ffvpx/libavutil/mastering_display_metadata.c66
-rw-r--r--media/ffvpx/libavutil/mastering_display_metadata.h128
-rw-r--r--media/ffvpx/libavutil/mathematics.c215
-rw-r--r--media/ffvpx/libavutil/mathematics.h245
-rw-r--r--media/ffvpx/libavutil/mem.c568
-rw-r--r--media/ffvpx/libavutil/mem.h609
-rw-r--r--media/ffvpx/libavutil/mem_internal.h138
-rw-r--r--media/ffvpx/libavutil/moz.build83
-rw-r--r--media/ffvpx/libavutil/opt.c2241
-rw-r--r--media/ffvpx/libavutil/opt.h891
-rw-r--r--media/ffvpx/libavutil/parseutils.c790
-rw-r--r--media/ffvpx/libavutil/parseutils.h197
-rw-r--r--media/ffvpx/libavutil/pixdesc.c3327
-rw-r--r--media/ffvpx/libavutil/pixdesc.h435
-rw-r--r--media/ffvpx/libavutil/pixelutils.c95
-rw-r--r--media/ffvpx/libavutil/pixelutils.h51
-rw-r--r--media/ffvpx/libavutil/pixfmt.h691
-rw-r--r--media/ffvpx/libavutil/qsort.h122
-rw-r--r--media/ffvpx/libavutil/rational.c193
-rw-r--r--media/ffvpx/libavutil/rational.h221
-rw-r--r--media/ffvpx/libavutil/replaygain.h50
-rw-r--r--media/ffvpx/libavutil/reverse.c40
-rw-r--r--media/ffvpx/libavutil/reverse.h28
-rw-r--r--media/ffvpx/libavutil/samplefmt.c263
-rw-r--r--media/ffvpx/libavutil/samplefmt.h269
-rw-r--r--media/ffvpx/libavutil/slicethread.c259
-rw-r--r--media/ffvpx/libavutil/slicethread.h52
-rw-r--r--media/ffvpx/libavutil/thread.h204
-rw-r--r--media/ffvpx/libavutil/threadmessage.c237
-rw-r--r--media/ffvpx/libavutil/threadmessage.h115
-rw-r--r--media/ffvpx/libavutil/time.c98
-rw-r--r--media/ffvpx/libavutil/time_internal.h49
-rw-r--r--media/ffvpx/libavutil/timecode.c265
-rw-r--r--media/ffvpx/libavutil/timecode.h199
-rw-r--r--media/ffvpx/libavutil/timer.h156
-rw-r--r--media/ffvpx/libavutil/timestamp.h78
-rw-r--r--media/ffvpx/libavutil/utils.c112
-rw-r--r--media/ffvpx/libavutil/version.h122
-rw-r--r--media/ffvpx/libavutil/video_enc_params.c80
-rw-r--r--media/ffvpx/libavutil/video_enc_params.h171
-rw-r--r--media/ffvpx/libavutil/x86/asm.h154
-rw-r--r--media/ffvpx/libavutil/x86/bswap.h88
-rw-r--r--media/ffvpx/libavutil/x86/cpu.c284
-rw-r--r--media/ffvpx/libavutil/x86/cpu.h114
-rw-r--r--media/ffvpx/libavutil/x86/cpuid.asm91
-rw-r--r--media/ffvpx/libavutil/x86/emms.asm30
-rw-r--r--media/ffvpx/libavutil/x86/emms.h58
-rw-r--r--media/ffvpx/libavutil/x86/fixed_dsp.asm48
-rw-r--r--media/ffvpx/libavutil/x86/fixed_dsp_init.c35
-rw-r--r--media/ffvpx/libavutil/x86/float_dsp.asm588
-rw-r--r--media/ffvpx/libavutil/x86/float_dsp_init.c118
-rw-r--r--media/ffvpx/libavutil/x86/imgutils.asm53
-rw-r--r--media/ffvpx/libavutil/x86/imgutils_init.c48
-rw-r--r--media/ffvpx/libavutil/x86/intmath.h169
-rw-r--r--media/ffvpx/libavutil/x86/intreadwrite.h99
-rw-r--r--media/ffvpx/libavutil/x86/lls.asm290
-rw-r--r--media/ffvpx/libavutil/x86/lls_init.c46
-rw-r--r--media/ffvpx/libavutil/x86/moz.build23
-rw-r--r--media/ffvpx/libavutil/x86/pixelutils.asm328
-rw-r--r--media/ffvpx/libavutil/x86/pixelutils.h26
-rw-r--r--media/ffvpx/libavutil/x86/pixelutils_init.c85
-rw-r--r--media/ffvpx/libavutil/x86/timer.h50
-rw-r--r--media/ffvpx/libavutil/x86/x86inc.asm1716
-rw-r--r--media/ffvpx/libavutil/x86/x86util.asm1028
-rw-r--r--media/ffvpx/libavutil_visibility.h20
-rw-r--r--media/ffvpx/moz.build13
529 files changed, 180328 insertions, 0 deletions
diff --git a/media/ffvpx/1846703.patch b/media/ffvpx/1846703.patch
new file mode 100644
index 0000000000..33fd3d484f
--- /dev/null
+++ b/media/ffvpx/1846703.patch
@@ -0,0 +1,76 @@
+From effadce6c756247ea8bae32dc13bb3e6f464f0eb Mon Sep 17 00:00:00 2001
+From: =?utf8?q?R=C3=A9mi=20Denis-Courmont?= <remi@remlab.net>
+Date: Sun, 16 Jul 2023 18:18:02 +0300
+Subject: [PATCH] avcodec/x86/mathops: clip constants used with shift
+ instructions within inline assembly
+
+Fixes assembling with binutil as >= 2.41
+
+Signed-off-by: James Almer <jamrial@gmail.com>
+---
+ libavcodec/x86/mathops.h | 26 +++++++++++++++++++++++---
+ 1 file changed, 23 insertions(+), 3 deletions(-)
+
+diff --git a/libavcodec/x86/mathops.h b/libavcodec/x86/mathops.h
+index 6298f5ed19..ca7e2dffc1 100644
+--- a/libavcodec/x86/mathops.h
++++ b/libavcodec/x86/mathops.h
+@@ -35,12 +35,20 @@
+ static av_always_inline av_const int MULL(int a, int b, unsigned shift)
+ {
+ int rt, dummy;
++ if (__builtin_constant_p(shift))
+ __asm__ (
+ "imull %3 \n\t"
+ "shrdl %4, %%edx, %%eax \n\t"
+ :"=a"(rt), "=d"(dummy)
+- :"a"(a), "rm"(b), "ci"((uint8_t)shift)
++ :"a"(a), "rm"(b), "i"(shift & 0x1F)
+ );
++ else
++ __asm__ (
++ "imull %3 \n\t"
++ "shrdl %4, %%edx, %%eax \n\t"
++ :"=a"(rt), "=d"(dummy)
++ :"a"(a), "rm"(b), "c"((uint8_t)shift)
++ );
+ return rt;
+ }
+
+@@ -113,19 +121,31 @@ __asm__ volatile(\
+ // avoid +32 for shift optimization (gcc should do that ...)
+ #define NEG_SSR32 NEG_SSR32
+ static inline int32_t NEG_SSR32( int32_t a, int8_t s){
++ if (__builtin_constant_p(s))
+ __asm__ ("sarl %1, %0\n\t"
+ : "+r" (a)
+- : "ic" ((uint8_t)(-s))
++ : "i" (-s & 0x1F)
+ );
++ else
++ __asm__ ("sarl %1, %0\n\t"
++ : "+r" (a)
++ : "c" ((uint8_t)(-s))
++ );
+ return a;
+ }
+
+ #define NEG_USR32 NEG_USR32
+ static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
++ if (__builtin_constant_p(s))
+ __asm__ ("shrl %1, %0\n\t"
+ : "+r" (a)
+- : "ic" ((uint8_t)(-s))
++ : "i" (-s & 0x1F)
+ );
++ else
++ __asm__ ("shrl %1, %0\n\t"
++ : "+r" (a)
++ : "c" ((uint8_t)(-s))
++ );
+ return a;
+ }
+
+--
+2.30.2
+
diff --git a/media/ffvpx/COPYING.LGPLv2.1 b/media/ffvpx/COPYING.LGPLv2.1
new file mode 100644
index 0000000000..58af0d3787
--- /dev/null
+++ b/media/ffvpx/COPYING.LGPLv2.1
@@ -0,0 +1,502 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/media/ffvpx/COPYING.LGPLv3 b/media/ffvpx/COPYING.LGPLv3
new file mode 100644
index 0000000000..65c5ca88a6
--- /dev/null
+++ b/media/ffvpx/COPYING.LGPLv3
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/media/ffvpx/README_MOZILLA b/media/ffvpx/README_MOZILLA
new file mode 100644
index 0000000000..85616cc902
--- /dev/null
+++ b/media/ffvpx/README_MOZILLA
@@ -0,0 +1,161 @@
+# FFVPX
+
+This directory contains files used in gecko builds from FFmpeg
+(http://ffmpeg.org). The current files are from FFmpeg as of
+revision fed07efcde72824ac1ada80d4af4e91ac4fcfc14
+git clone https://git.ffmpeg.org/ffmpeg.git ffmpeg
+git checkout fed07efcde72824ac1ada80d4af4e91ac4fcfc14
+
+This copy has a single patch backported from a later revision to fix the build with recent binutils:
+
+http://git.videolan.org/?p=ffmpeg.git;a=patch;h=effadce6c756247ea8bae32dc13bb3e6f464f0eb
+that is available as `1846703.patch` in this directory. As usual this has other
+changes that don't come from upstream, in `changes.patch`
+
+All source files match their path from the library's source archive.
+
+Currently, we only use the vp8, vp9, av1 (libdav1d), mp3, and flac portion of
+the library. If this changes, configuration files will most likely need to be
+updated. Decoding AV1 via libdav1d is supported, although the decoder
+(libdav1d) is vendored separately, `ffvpx` only contains the code to use `libdav1d` through the `ffmpeg` API.
+
+The ffmpeg project recommends to use ffmpeg's tip, not a particular release.
+
+## Source files
+
+This library only use a small part of `ffmpeg`. To update the source tree, run:
+
+> rsync -av --existing ffmpeg-upstream ffmpeg-mozilla-dir
+
+Compilation will reveal if any files are missing.
+
+Then, make sure the files:
+
+- `libavcodec/codec_list.c`
+- `libavcodec/bsf_list.c`
+- `libavcodec/parser_list.c`
+
+include conditional compilation directives, by probably reverting them (or
+reverting and adjusting them if new codecs are added).
+
+## Add headers for a new major ffmpeg version
+
+If a new major version of ffmpeg is being imported in the tree, it's necessary
+to vendor the new ffmpeg headers in `dom/media/platforms/ffmpeg/ffmpegxx` where
+xx is the new ffmpeg major version number, and to modify the dynamic linker
+wrapper in
+`dom/media/platforms/dom/media/platforms/ffmpeg/{FFmpegLibWrapper.cpp,FFmpegRungtimeLinker.cpp}` with the new version and the new API in this new version.
+
+## `config.h` generation
+
+Configuration files are generated as follow using the configure script (here
+for Linux desktop):
+
+> ./configure --disable-all --enable-avcodec --enable-decoder=vp8,vp9,mp3,flac,av1 \
+ --enable-parser=vp8,vp9 --disable-static --enable-shared \
+ --disable-autodetect --disable-iconv --enable-libdav1d --disable-linux-perf
+
+For Linux Desktop, there are a number of overrides in `config_common.h` for
+VAAPI. We can't put them in `config_components.h` because this breaks the build.
+
+`config_components.h` is common to all platforms, and then a specific config
+file is included, based on the platform and architecture. This needs to be
+regenerated on all OSes. macOS is similar to Linux Desktop. For other OSes,
+instructions below:
+
+# Linux 32-bits from a Linux 64-bits host:
+
+To produce `config_unix32.h` on a 64-bits machine, add to the configure command
+above: `--disable-asm --disable-x86asm --cc='clang -m32'`
+
+Those files shouldn't be Linux-specific, and are included in e.g. various BSDs,
+hence the `--disable-linux-perf` above.
+
+# macOS, x86 and aarch64
+
+Similar to Linux Desktop in all aspects.
+
+# Android
+
+config_android32.h: Only mp3 and flac are enabled on this platform. Assuming
+the standard toolchain location for a Firefox Developer, substitute with the
+ndk version in use when updating:
+
+> ./configure --disable-all --enable-avcodec --enable-decoder=mp3,flac \
+ --disable-static --enable-shared --disable-autodetect \
+ --enable-small --target-os=linux --arch=arm --enable-cross-compile \
+ --cc=$HOME/.mozbuild/android-ndk-r20/toolchains/llvm/prebuilt/darwin-x86_64/bin/armv7a-linux-androideabi16-clang
+
+config_android64.h, config_androidx86_64.h: substitute with aarch64 and the right compiler.
+
+For config_androidx86_64.h, enable vp8 and vp9.
+
+# Windows
+
+The general idea is to get an msys prompt with an msvc toolchain
+configured, so that the configure script for ffmpeg can run successfully
+with the thread flavor Gecko prefers on Windows (win32, not pthreads).
+
+- Open Visual Studio (2019 when writing this)
+- Tools → Command Line → Developer command prompt
+- Check that compiling a small C program with cl.exe works
+- Install msys2 from https://www.msys2.org/
+- From the Visual Studio Developer Command Prompt, run the following:
+ > C:\msys64\msys_shell.cmd -mysy -use-full-path
+- This pops up a new shell window, with a 32-bits build environment
+- Check that compiling a small C program with `cl.exe` works
+- Run ffmpeg's ./configure with the options needed:
+
+> ./configure --disable-all --enable-avcodec
+ --enable-decoder=vp8,vp9,mp3,flac --enable-parser=vp8,vp9
+ --disable-static --enable-shared --disable-autodetect
+ --enable-w32threads --toolchain=msvc
+
+- Copy `config.h` and `config.asm` as `config_win32.h` and `config_win32.asm`
+ in this directory
+- Close the `msys2` window, go back to the `cmd.exe` window
+- Run this command, maybe substituting the Visual Studio version:
+
+> "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" amd64
+
+- This pops up a new shell window, now with a 64-bits build
+ environment
+- Check that compiling a small C program with `cl.exe` works
+- Run ffmpeg's `./configure` with the options needed:
+
+> ./configure --disable-all --enable-avcodec --enable-decoder=vp8,vp9,mp3,flac
+ --enable-parser=vp8,vp9 --disable-static --enable-shared
+ --disable-autodetect --enable-w32threads --toolchain=msvc
+
+- Copy `config.h` and `config.asm` as `config_win64.h` and `config_win64.asm`.
+
+config_aarch64_win64.h: You will need to install cpp (either using msys or
+wsl), then:
+
+> ./configure --disable-all --enable-avcodec --enable-decoder=vp8,vp9,mp3,flac \
+ --enable-parser=vp8,vp9 --disable-static --enable-shared \
+ --disable-autodetect --toolchain=msvc --enable-cross-compile \
+ --target-os=win32 --arch=arm64
+
+
+# Finally
+
+Run the following command on all config* file:
+
+> sed -i -E '/HAVE_(MALLOC_H|ARC4RANDOM|LOCALTIME_R|MEMALIGN|POSIX_MEMALIGN)/d' config.*
+
+to avoid redefined macros.
+
+# Build, fix errors
+
+Missing files need to be added, and the patch to rename `time.h` to `fftime.h`
+might need to be reaplied or extended to other files including `time.h`. The
+issue being that it's included instead of the system header, causing all sorts
+of issues.
+
+`moz.build` files might need to be modified as well, in light of compilation
+and link errors.
+
+There are going to be a lot of changes in terms of symbols exported. Adjust
+`libavutil/avutil.symbols` and `libavcodec/avcodec.symbols` by removing and
+adding symbols until the build passes.
diff --git a/media/ffvpx/changes.patch b/media/ffvpx/changes.patch
new file mode 100644
index 0000000000..af2aa20849
--- /dev/null
+++ b/media/ffvpx/changes.patch
@@ -0,0 +1,82 @@
+diff --git a/media/ffvpx/libavutil/eval.c b/media/ffvpx/libavutil/eval.c
+index 7642b91..e938bd5 100644
+--- a/media/ffvpx/libavutil/eval.c
++++ b/media/ffvpx/libavutil/eval.c
+@@ -34,7 +34,7 @@
+ #include "internal.h"
+ #include "log.h"
+ #include "mathematics.h"
+-#include "time.h"
++#include "fftime.h"
+ #include "avstring.h"
+ #include "timer.h"
+
+diff --git a/media/ffvpx/libavutil/time.c b/media/ffvpx/libavutil/time.c
+index dbaee02..69419e6 100644
+--- a/media/ffvpx/libavutil/time.c
++++ b/media/ffvpx/libavutil/time.c
+@@ -33,7 +33,7 @@
+ #include <windows.h>
+ #endif
+
+-#include "time.h"
++#include "fftime.h"
+ #include "error.h"
+
+ int64_t av_gettime(void)
+diff --git a/media/ffvpx/libavutil/parseutils.c b/media/ffvpx/libavutil/parseutils.c
+index 9fb8d0a..97ad3b9 100644
+--- a/media/ffvpx/libavutil/parseutils.c
++++ b/media/ffvpx/libavutil/parseutils.c
+@@ -28,7 +28,7 @@
+ #include "common.h"
+ #include "eval.h"
+ #include "log.h"
+-#include "random_seed.h"
++/* #include "random_seed.h" */
+ #include "time_internal.h"
+ #include "parseutils.h"
+
+@@ -367,7 +367,7 @@ int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
+ rgba_color[3] = 255;
+
+ if (!av_strcasecmp(color_string2, "random") || !av_strcasecmp(color_string2, "bikeshed")) {
+- int rgba = av_get_random_seed();
++ int rgba = 0xffffffff; /* av_get_random_seed(); */
+ rgba_color[0] = rgba >> 24;
+ rgba_color[1] = rgba >> 16;
+ rgba_color[2] = rgba >> 8;
+diff -up media/ffvpx/libavutil/fftime.h media/ffvpx/libavutil/fftime.h
+--- media/ffvpx/libavutil/fftime.h 2021-12-06 14:51:40.378642713 +0100
++++ media/ffvpx/libavutil/fftime.h 2021-12-06 14:51:54.618098212 +0100
+@@ -22,6 +22,7 @@
+ #define AVUTIL_TIME_H
+
+ #include <stdint.h>
++#include <time.h>
+
+ /**
+ * Get the current time in microseconds.
+ *
+diff --git a/media/ffvpx/compat/w32pthreads.h b/media/ffvpx/compat/w32pthreads.h
+--- a/media/ffvpx/compat/w32pthreads.h
++++ b/media/ffvpx/compat/w32pthreads.h
+@@ -39,17 +39,17 @@
+ #include <windows.h>
+ #include <process.h>
+ #include <time.h>
+
+ #include "libavutil/attributes.h"
+ #include "libavutil/common.h"
+ #include "libavutil/internal.h"
+ #include "libavutil/mem.h"
+-#include "libavutil/time.h"
++#include "libavutil/fftime.h"
+
+ typedef struct pthread_t {
+ void *handle;
+ void *(*func)(void* arg);
+ void *arg;
+ void *ret;
+ } pthread_t;
+
diff --git a/media/ffvpx/compat/atomics/win32/stdatomic.h b/media/ffvpx/compat/atomics/win32/stdatomic.h
new file mode 100644
index 0000000000..28a627bfd3
--- /dev/null
+++ b/media/ffvpx/compat/atomics/win32/stdatomic.h
@@ -0,0 +1,181 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef COMPAT_ATOMICS_WIN32_STDATOMIC_H
+#define COMPAT_ATOMICS_WIN32_STDATOMIC_H
+
+#define WIN32_LEAN_AND_MEAN
+#include <stddef.h>
+#include <stdint.h>
+#include <windows.h>
+
+#define ATOMIC_FLAG_INIT 0
+
+#define ATOMIC_VAR_INIT(value) (value)
+
+#define atomic_init(obj, value) \
+do { \
+ *(obj) = (value); \
+} while(0)
+
+#define kill_dependency(y) ((void)0)
+
+#define atomic_thread_fence(order) \
+ MemoryBarrier();
+
+#define atomic_signal_fence(order) \
+ ((void)0)
+
+#define atomic_is_lock_free(obj) 0
+
+typedef intptr_t atomic_flag;
+typedef intptr_t atomic_bool;
+typedef intptr_t atomic_char;
+typedef intptr_t atomic_schar;
+typedef intptr_t atomic_uchar;
+typedef intptr_t atomic_short;
+typedef intptr_t atomic_ushort;
+typedef intptr_t atomic_int;
+typedef intptr_t atomic_uint;
+typedef intptr_t atomic_long;
+typedef intptr_t atomic_ulong;
+typedef intptr_t atomic_llong;
+typedef intptr_t atomic_ullong;
+typedef intptr_t atomic_wchar_t;
+typedef intptr_t atomic_int_least8_t;
+typedef intptr_t atomic_uint_least8_t;
+typedef intptr_t atomic_int_least16_t;
+typedef intptr_t atomic_uint_least16_t;
+typedef intptr_t atomic_int_least32_t;
+typedef intptr_t atomic_uint_least32_t;
+typedef intptr_t atomic_int_least64_t;
+typedef intptr_t atomic_uint_least64_t;
+typedef intptr_t atomic_int_fast8_t;
+typedef intptr_t atomic_uint_fast8_t;
+typedef intptr_t atomic_int_fast16_t;
+typedef intptr_t atomic_uint_fast16_t;
+typedef intptr_t atomic_int_fast32_t;
+typedef intptr_t atomic_uint_fast32_t;
+typedef intptr_t atomic_int_fast64_t;
+typedef intptr_t atomic_uint_fast64_t;
+typedef intptr_t atomic_intptr_t;
+typedef intptr_t atomic_uintptr_t;
+typedef intptr_t atomic_size_t;
+typedef intptr_t atomic_ptrdiff_t;
+typedef intptr_t atomic_intmax_t;
+typedef intptr_t atomic_uintmax_t;
+
+#define atomic_store(object, desired) \
+do { \
+ *(object) = (desired); \
+ MemoryBarrier(); \
+} while (0)
+
+#define atomic_store_explicit(object, desired, order) \
+ atomic_store(object, desired)
+
+#define atomic_load(object) \
+ (MemoryBarrier(), *(object))
+
+#define atomic_load_explicit(object, order) \
+ atomic_load(object)
+
+#define atomic_exchange(object, desired) \
+ InterlockedExchangePointer((PVOID volatile *)object, (PVOID)desired)
+
+#define atomic_exchange_explicit(object, desired, order) \
+ atomic_exchange(object, desired)
+
+static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *expected,
+ intptr_t desired)
+{
+ intptr_t old = *expected;
+ *expected = (intptr_t)InterlockedCompareExchangePointer(
+ (PVOID *)object, (PVOID)desired, (PVOID)old);
+ return *expected == old;
+}
+
+#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \
+ atomic_compare_exchange_strong(object, expected, desired)
+
+#define atomic_compare_exchange_weak(object, expected, desired) \
+ atomic_compare_exchange_strong(object, expected, desired)
+
+#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \
+ atomic_compare_exchange_weak(object, expected, desired)
+
+#ifdef _WIN64
+#define atomic_fetch_add(object, operand) \
+ InterlockedExchangeAdd64(object, operand)
+
+#define atomic_fetch_sub(object, operand) \
+ InterlockedExchangeAdd64(object, -(operand))
+
+#define atomic_fetch_or(object, operand) \
+ InterlockedOr64(object, operand)
+
+#define atomic_fetch_xor(object, operand) \
+ InterlockedXor64(object, operand)
+
+#define atomic_fetch_and(object, operand) \
+ InterlockedAnd64(object, operand)
+#else
+#define atomic_fetch_add(object, operand) \
+ InterlockedExchangeAdd(object, operand)
+
+#define atomic_fetch_sub(object, operand) \
+ InterlockedExchangeAdd(object, -(operand))
+
+#define atomic_fetch_or(object, operand) \
+ InterlockedOr(object, operand)
+
+#define atomic_fetch_xor(object, operand) \
+ InterlockedXor(object, operand)
+
+#define atomic_fetch_and(object, operand) \
+ InterlockedAnd(object, operand)
+#endif /* _WIN64 */
+
+#define atomic_fetch_add_explicit(object, operand, order) \
+ atomic_fetch_add(object, operand)
+
+#define atomic_fetch_sub_explicit(object, operand, order) \
+ atomic_fetch_sub(object, operand)
+
+#define atomic_fetch_or_explicit(object, operand, order) \
+ atomic_fetch_or(object, operand)
+
+#define atomic_fetch_xor_explicit(object, operand, order) \
+ atomic_fetch_xor(object, operand)
+
+#define atomic_fetch_and_explicit(object, operand, order) \
+ atomic_fetch_and(object, operand)
+
+#define atomic_flag_test_and_set(object) \
+ atomic_exchange(object, 1)
+
+#define atomic_flag_test_and_set_explicit(object, order) \
+ atomic_flag_test_and_set(object)
+
+#define atomic_flag_clear(object) \
+ atomic_store(object, 0)
+
+#define atomic_flag_clear_explicit(object, order) \
+ atomic_flag_clear(object)
+
+#endif /* COMPAT_ATOMICS_WIN32_STDATOMIC_H */
diff --git a/media/ffvpx/compat/va_copy.h b/media/ffvpx/compat/va_copy.h
new file mode 100644
index 0000000000..a40bbe6637
--- /dev/null
+++ b/media/ffvpx/compat/va_copy.h
@@ -0,0 +1,34 @@
+/*
+ * MSVC Compatible va_copy macro
+ * Copyright (c) 2012 Derek Buitenhuis
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef COMPAT_VA_COPY_H
+#define COMPAT_VA_COPY_H
+
+#include <stdarg.h>
+
+#if !defined(va_copy) && defined(_MSC_VER)
+#define va_copy(dst, src) ((dst) = (src))
+#endif
+#if !defined(va_copy) && defined(__GNUC__) && __GNUC__ < 3
+#define va_copy(dst, src) __va_copy(dst, src)
+#endif
+
+#endif /* COMPAT_VA_COPY_H */
diff --git a/media/ffvpx/compat/w32pthreads.h b/media/ffvpx/compat/w32pthreads.h
new file mode 100644
index 0000000000..b7a65b233b
--- /dev/null
+++ b/media/ffvpx/compat/w32pthreads.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2010-2011 x264 project
+ *
+ * Authors: Steven Walters <kemuri9@gmail.com>
+ * Pegasys Inc. <http://www.pegasys-inc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * w32threads to pthreads wrapper
+ */
+
+#ifndef COMPAT_W32PTHREADS_H
+#define COMPAT_W32PTHREADS_H
+
+/* Build up a pthread-like API using underlying Windows API. Have only static
+ * methods so as to not conflict with a potentially linked in pthread-win32
+ * library.
+ * As most functions here are used without checking return values,
+ * only implement return values as necessary. */
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <process.h>
+#include <time.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/common.h"
+#include "libavutil/internal.h"
+#include "libavutil/mem.h"
+#include "libavutil/fftime.h"
+
+typedef struct pthread_t {
+ void *handle;
+ void *(*func)(void* arg);
+ void *arg;
+ void *ret;
+} pthread_t;
+
+/* use light weight mutex/condition variable API for Windows Vista and later */
+typedef SRWLOCK pthread_mutex_t;
+typedef CONDITION_VARIABLE pthread_cond_t;
+
+#define PTHREAD_MUTEX_INITIALIZER SRWLOCK_INIT
+#define PTHREAD_COND_INITIALIZER CONDITION_VARIABLE_INIT
+
+#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
+#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
+
+#define PTHREAD_CANCEL_ENABLE 1
+#define PTHREAD_CANCEL_DISABLE 0
+
+static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
+{
+ pthread_t *h = (pthread_t*)arg;
+ h->ret = h->func(h->arg);
+ return 0;
+}
+
+static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
+ void *(*start_routine)(void*), void *arg)
+{
+ thread->func = start_routine;
+ thread->arg = arg;
+#if HAVE_WINRT
+ thread->handle = (void*)CreateThread(NULL, 0, win32thread_worker, thread,
+ 0, NULL);
+#else
+ thread->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, thread,
+ 0, NULL);
+#endif
+ return !thread->handle;
+}
+
+static av_unused int pthread_join(pthread_t thread, void **value_ptr)
+{
+ DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
+ if (ret != WAIT_OBJECT_0) {
+ if (ret == WAIT_ABANDONED)
+ return EINVAL;
+ else
+ return EDEADLK;
+ }
+ if (value_ptr)
+ *value_ptr = thread.ret;
+ CloseHandle(thread.handle);
+ return 0;
+}
+
+static inline int pthread_mutex_init(pthread_mutex_t *m, void* attr)
+{
+ InitializeSRWLock(m);
+ return 0;
+}
+static inline int pthread_mutex_destroy(pthread_mutex_t *m)
+{
+ /* Unlocked SWR locks use no resources */
+ return 0;
+}
+static inline int pthread_mutex_lock(pthread_mutex_t *m)
+{
+ AcquireSRWLockExclusive(m);
+ return 0;
+}
+static inline int pthread_mutex_unlock(pthread_mutex_t *m)
+{
+ ReleaseSRWLockExclusive(m);
+ return 0;
+}
+
+typedef INIT_ONCE pthread_once_t;
+#define PTHREAD_ONCE_INIT INIT_ONCE_STATIC_INIT
+
+static av_unused int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
+{
+ BOOL pending = FALSE;
+ InitOnceBeginInitialize(once_control, 0, &pending, NULL);
+ if (pending)
+ init_routine();
+ InitOnceComplete(once_control, 0, NULL);
+ return 0;
+}
+
+static inline int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
+{
+ InitializeConditionVariable(cond);
+ return 0;
+}
+
+/* native condition variables do not destroy */
+static inline int pthread_cond_destroy(pthread_cond_t *cond)
+{
+ return 0;
+}
+
+static inline int pthread_cond_broadcast(pthread_cond_t *cond)
+{
+ WakeAllConditionVariable(cond);
+ return 0;
+}
+
+static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+{
+ SleepConditionVariableSRW(cond, mutex, INFINITE, 0);
+ return 0;
+}
+
+static inline int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
+ DWORD t = av_clip64(abs_milli - av_gettime() / 1000, 0, UINT32_MAX);
+
+ if (!SleepConditionVariableSRW(cond, mutex, t, 0)) {
+ DWORD err = GetLastError();
+ if (err == ERROR_TIMEOUT)
+ return ETIMEDOUT;
+ else
+ return EINVAL;
+ }
+ return 0;
+}
+
+static inline int pthread_cond_signal(pthread_cond_t *cond)
+{
+ WakeConditionVariable(cond);
+ return 0;
+}
+
+static inline int pthread_setcancelstate(int state, int *oldstate)
+{
+ return 0;
+}
+
+#endif /* COMPAT_W32PTHREADS_H */
diff --git a/media/ffvpx/config.h b/media/ffvpx/config.h
new file mode 100644
index 0000000000..528215efd6
--- /dev/null
+++ b/media/ffvpx/config.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MOZ_FFVPX_CONFIG_H
+#define MOZ_FFVPX_CONFIG_H
+
+#if defined(MOZ_FFVPX_AUDIOONLY)
+# if defined(MOZ_WIDGET_ANDROID) && defined(__arm__)
+# include "config_android32.h"
+# elif defined(MOZ_WIDGET_ANDROID) && defined(__aarch64__)
+# include "config_android64.h"
+# elif defined(__aarch64__)
+# include "config_unix_aarch64.h"
+# else
+ // no assembly
+# include "config_generic.h"
+# endif
+#else // MOZ_FFVPX_AUDIOONLY
+# if defined(XP_WIN)
+// Avoid conflicts with mozilla-config.h
+# if !defined(_MSC_VER)
+# undef HAVE_DIRENT_H
+# undef HAVE_UNISTD_H
+# endif
+# if defined(_ARM64_)
+# include "config_win64_aarch64.h"
+# else
+# if defined(HAVE_64BIT_BUILD)
+# include "config_win64.h"
+# else
+# include "config_win32.h"
+# endif
+# endif
+// Adjust configure defines for GCC
+# if !defined(_MSC_VER)
+# if !defined(HAVE_64BIT_BUILD)
+# undef HAVE_MM_EMPTY
+# define HAVE_MM_EMPTY 0
+# endif
+# undef HAVE_LIBC_MSVCRT
+# define HAVE_LIBC_MSVCRT 0
+# endif
+# elif defined(XP_DARWIN)
+# if defined(__aarch64__)
+# include "config_darwin_aarch64.h"
+# else
+# include "config_darwin64.h"
+# endif
+# elif defined(MOZ_WIDGET_ANDROID) && defined(HAVE_64BIT_BUILD)
+# include "config_android_x86_64.h"
+# elif defined(XP_UNIX)
+# if defined(HAVE_64BIT_BUILD)
+# include "config_unix64.h"
+# else
+# include "config_unix32.h"
+# endif
+# endif
+#endif // else MOZ_FFVPX_AUDIOONLY
+ //
+#include "config_override.h"
+
+#endif // MOZ_FFVPX_CONFIG_H
diff --git a/media/ffvpx/config_android32.h b/media/ffvpx/config_android32.h
new file mode 100644
index 0000000000..d5a5a1c546
--- /dev/null
+++ b/media/ffvpx/config_android32.h
@@ -0,0 +1,740 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='mp3,flac' --disable-static --enable-shared --disable-autodetect --enable-small --target-os=linux --arch=arm --enable-cross-compile --cc=/Users/padenot/.mozbuild/android-ndk-r21d/toolchains/llvm/prebuilt/darwin-x86_64/bin/armv7a-linux-androideabi16-clang"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Android (6454773 based on r365631c2) clang version 9.0.8 (https://android.googlesource.com/toolchain/llvm-project 98c855489587874b2a325e7a516b99d838599c6f) (based on LLVM 9.0.8svn)"
+#define OS_NAME linux
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".so"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 1
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 1
+#define HAVE_ARMV6 1
+#define HAVE_ARMV6T2 1
+#define HAVE_ARMV8 0
+#define HAVE_NEON 1
+#define HAVE_VFP 1
+#define HAVE_VFPV3 1
+#define HAVE_SETEND 1
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_AVX512 0
+#define HAVE_AVX512ICL 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 1
+#define HAVE_ARMV6_EXTERNAL 1
+#define HAVE_ARMV6T2_EXTERNAL 1
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 1
+#define HAVE_VFP_EXTERNAL 1
+#define HAVE_VFPV3_EXTERNAL 1
+#define HAVE_SETEND_EXTERNAL 1
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_AVX512_EXTERNAL 0
+#define HAVE_AVX512ICL_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 1
+#define HAVE_ARMV6_INLINE 1
+#define HAVE_ARMV6T2_INLINE 1
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 1
+#define HAVE_VFP_INLINE 1
+#define HAVE_VFPV3_INLINE 1
+#define HAVE_SETEND_INLINE 1
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_AVX512ICL_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 0
+#define HAVE_FAST_64BIT 0
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 0
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 0
+#define HAVE_SIMD_ALIGN_64 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 0
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 1
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 1
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 1
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_MALLOC_H 1
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 1
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 0
+#define HAVE_LOG2F 0
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 1
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_ARC4RANDOM 1
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 0
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETMODULEHANDLE 0
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSTDHANDLE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 0
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LOCALTIME_R 1
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MEMALIGN 1
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_POSIX_MEMALIGN 0
+#define HAVE_PTHREAD_CANCEL 0
+#define HAVE_SCHED_GETAFFINITY 1
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETDLLDIRECTORY 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 1
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 1
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 1
+#define HAVE_ASM_MOD_Q 1
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 0
+#define HAVE_EBX_AVAILABLE 0
+#define HAVE_GNU_AS 1
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 1
+#define HAVE_SYMVER_GNU_ASM 1
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 1
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 1
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 0
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 0
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 0
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 0
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_android64.h b/media/ffvpx/config_android64.h
new file mode 100644
index 0000000000..f71d2877fa
--- /dev/null
+++ b/media/ffvpx/config_android64.h
@@ -0,0 +1,740 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='mp3,flac' --disable-static --enable-shared --disable-autodetect --enable-small --target-os=linux --arch=aarch64 --enable-cross-compile --cc=/Users/padenot/.mozbuild/android-ndk-r21d/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android21-clang"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Android (6454773 based on r365631c2) clang version 9.0.8 (https://android.googlesource.com/toolchain/llvm-project 98c855489587874b2a325e7a516b99d838599c6f) (based on LLVM 9.0.8svn)"
+#define OS_NAME linux
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".so"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 1
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 1
+#define HAVE_NEON 1
+#define HAVE_VFP 1
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_AVX512 0
+#define HAVE_AVX512ICL 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 1
+#define HAVE_NEON_EXTERNAL 1
+#define HAVE_VFP_EXTERNAL 1
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_AVX512_EXTERNAL 0
+#define HAVE_AVX512ICL_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 1
+#define HAVE_NEON_INLINE 1
+#define HAVE_VFP_INLINE 1
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_AVX512ICL_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 1
+#define HAVE_FAST_64BIT 1
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 0
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 0
+#define HAVE_SIMD_ALIGN_64 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 0
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 1
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 1
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 1
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_MALLOC_H 1
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 1
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 1
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_ARC4RANDOM 1
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 1
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETMODULEHANDLE 0
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSTDHANDLE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 0
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LOCALTIME_R 1
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MEMALIGN 1
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_POSIX_MEMALIGN 1
+#define HAVE_PTHREAD_CANCEL 0
+#define HAVE_SCHED_GETAFFINITY 1
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETDLLDIRECTORY 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 0
+#define HAVE_EBX_AVAILABLE 0
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 1
+#define HAVE_SYMVER_GNU_ASM 1
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 1
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 1
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 0
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 0
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 0
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 0
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_android_x86_64.h b/media/ffvpx/config_android_x86_64.h
new file mode 100644
index 0000000000..06c913cf99
--- /dev/null
+++ b/media/ffvpx/config_android_x86_64.h
@@ -0,0 +1,740 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-parser='vp8,vp9' --enable-decoder='mp3,flac,vp8,vp9' --disable-static --enable-shared --disable-autodetect --enable-small --target-os=linux --arch=x86_64 --enable-cross-compile --cc=/Users/padenot/.mozbuild/android-ndk-r21d/toolchains/llvm/prebuilt/darwin-x86_64/bin/x86_64-linux-android21-clang"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Android (6454773 based on r365631c2) clang version 9.0.8 (https://android.googlesource.com/toolchain/llvm-project 98c855489587874b2a325e7a516b99d838599c6f) (based on LLVM 9.0.8svn)"
+#define OS_NAME linux
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".so"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 1
+#define ARCH_X86_32 0
+#define ARCH_X86_64 1
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 1
+#define HAVE_AMD3DNOW 1
+#define HAVE_AMD3DNOWEXT 1
+#define HAVE_AVX 1
+#define HAVE_AVX2 1
+#define HAVE_AVX512 1
+#define HAVE_AVX512ICL 1
+#define HAVE_FMA3 1
+#define HAVE_FMA4 1
+#define HAVE_MMX 1
+#define HAVE_MMXEXT 1
+#define HAVE_SSE 1
+#define HAVE_SSE2 1
+#define HAVE_SSE3 1
+#define HAVE_SSE4 1
+#define HAVE_SSE42 1
+#define HAVE_SSSE3 1
+#define HAVE_XOP 1
+#define HAVE_CPUNOP 0
+#define HAVE_I686 1
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 0
+#define HAVE_VFP_EXTERNAL 0
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 1
+#define HAVE_AMD3DNOW_EXTERNAL 1
+#define HAVE_AMD3DNOWEXT_EXTERNAL 1
+#define HAVE_AVX_EXTERNAL 1
+#define HAVE_AVX2_EXTERNAL 1
+#define HAVE_AVX512_EXTERNAL 1
+#define HAVE_AVX512ICL_EXTERNAL 1
+#define HAVE_FMA3_EXTERNAL 1
+#define HAVE_FMA4_EXTERNAL 1
+#define HAVE_MMX_EXTERNAL 1
+#define HAVE_MMXEXT_EXTERNAL 1
+#define HAVE_SSE_EXTERNAL 1
+#define HAVE_SSE2_EXTERNAL 1
+#define HAVE_SSE3_EXTERNAL 1
+#define HAVE_SSE4_EXTERNAL 1
+#define HAVE_SSE42_EXTERNAL 1
+#define HAVE_SSSE3_EXTERNAL 1
+#define HAVE_XOP_EXTERNAL 1
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 1
+#define HAVE_AMD3DNOW_INLINE 1
+#define HAVE_AMD3DNOWEXT_INLINE 1
+#define HAVE_AVX_INLINE 1
+#define HAVE_AVX2_INLINE 1
+#define HAVE_AVX512_INLINE 1
+#define HAVE_AVX512ICL_INLINE 1
+#define HAVE_FMA3_INLINE 1
+#define HAVE_FMA4_INLINE 1
+#define HAVE_MMX_INLINE 1
+#define HAVE_MMXEXT_INLINE 1
+#define HAVE_SSE_INLINE 1
+#define HAVE_SSE2_INLINE 1
+#define HAVE_SSE3_INLINE 1
+#define HAVE_SSE4_INLINE 1
+#define HAVE_SSE42_INLINE 1
+#define HAVE_SSSE3_INLINE 1
+#define HAVE_XOP_INLINE 1
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 1
+#define HAVE_FAST_64BIT 1
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 1
+#define HAVE_LOCAL_ALIGNED 1
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 1
+#define HAVE_SIMD_ALIGN_64 1
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 1
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 1
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 1
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 1
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 1
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_MALLOC_H 1
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 1
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_ARC4RANDOM 1
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 1
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETMODULEHANDLE 0
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSTDHANDLE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 0
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LOCALTIME_R 1
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MEMALIGN 1
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_POSIX_MEMALIGN 1
+#define HAVE_PTHREAD_CANCEL 0
+#define HAVE_SCHED_GETAFFINITY 1
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETDLLDIRECTORY 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 1
+#define HAVE_EBX_AVAILABLE 1
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 1
+#define HAVE_SYMVER_GNU_ASM 1
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 1
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 1
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 1
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_audio.h b/media/ffvpx/config_audio.h
new file mode 100644
index 0000000000..91650e4aef
--- /dev/null
+++ b/media/ffvpx/config_audio.h
@@ -0,0 +1,680 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-everything --disable-protocols --disable-demuxers --disable-muxers --disable-filters --disable-programs --disable-doc --disable-parsers --disable-static --enable-shared --disable-debug --disable-sdl2 --disable-libxcb --disable-securetransport --disable-iconv --disable-swresample --disable-swscale --disable-avdevice --disable-avfilter --disable-avformat --disable-d3d11va --disable-dxva2 --disable-vaapi --disable-vdpau --disable-videotoolbox --enable-decoder=flac --disable-crystalhd --disable-asm --disable-cuda --disable-cuvid --enable-decoder=mp3"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2018
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "gcc 7 (Ubuntu 7.2.0-8ubuntu3)"
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".so"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_AVX512 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_MSA2 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 0
+#define HAVE_VFP_EXTERNAL 0
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_AVX512_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_MSA2_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_MSA2_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_ALIGNED_STACK 0
+#define HAVE_FAST_64BIT 0
+#define HAVE_FAST_CLZ 0
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 1
+#define HAVE_SIMD_ALIGN_16 0
+#define HAVE_SIMD_ALIGN_32 0
+#define HAVE_SIMD_ALIGN_64 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 1
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 1
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 1
+#define HAVE_CEXP 1
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 0
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 1
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_PERF_EVENT_H 0
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 1
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 0
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 1
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_PTHREAD_CANCEL 1
+#define HAVE_SCHED_GETAFFINITY 1
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 1
+#define HAVE_EBP_AVAILABLE 1
+#define HAVE_EBX_AVAILABLE 1
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 1
+#define HAVE_SYMVER_ASM_LABEL 0
+#define HAVE_SYMVER_GNU_ASM 1
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 1
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 1
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 1
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 1
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 1
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_DIR_CMD_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWAVPACK 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 1
+#define CONFIG_AVFOUNDATION 1
+#define CONFIG_BZLIB 1
+#define CONFIG_COREIMAGE 1
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 1
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 1
+#define CONFIG_ZLIB 1
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 1
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_V4L2_M2M 1
+#define CONFIG_XVMC 1
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 1
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 0
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_LZO 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 1
+#define CONFIG_DECODERS 1
+#define CONFIG_PARSERS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 0
+#define CONFIG_FAANIDCT 0
+#define CONFIG_FDCTDSP 0
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 0
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 0
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 0
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 0
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#define CONFIG_NULL_BSF 1
+#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 0
+#define CONFIG_VP8_DECODER 0
+#define CONFIG_VP9_DECODER 0
+#define CONFIG_MP3_DECODER 1
+#define CONFIG_FLAC_DECODER 1
+#define CONFIG_VP8_PARSER 0
+#define CONFIG_VP9_PARSER 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_components.h b/media/ffvpx/config_components.h
new file mode 100644
index 0000000000..7528cf0314
--- /dev/null
+++ b/media/ffvpx/config_components.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MOZ_FFVPX_CONFIG__COMPONENTS_H
+#define MOZ_FFVPX_CONFIG__COMPONENTS_H
+
+
+#ifdef CONFIG_VAAPI
+#undef CONFIG_VAAPI
+#undef CONFIG_VAAPI_1
+#undef CONFIG_VP8_VAAPI_HWACCEL
+#undef CONFIG_VP9_VAAPI_HWACCEL
+#undef CONFIG_AV1_VAAPI_HWACCEL
+#endif
+
+#if defined(MOZ_FFVPX_AUDIOONLY)
+# include "config_components_audio_only.h"
+#else
+# include "config_components_audio_video.h"
+#endif
+
+#include "config_override.h"
+
+#endif // MOZ_FFVPX_CONFIG__COMPONENTS_H
diff --git a/media/ffvpx/config_components_audio_only.h b/media/ffvpx/config_components_audio_only.h
new file mode 100644
index 0000000000..6ca57dbaeb
--- /dev/null
+++ b/media/ffvpx/config_components_audio_only.h
@@ -0,0 +1,2091 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_COMPONENTS_H
+#define FFMPEG_CONFIG_COMPONENTS_H
+#define CONFIG_AAC_ADTSTOASC_BSF 0
+#define CONFIG_AV1_FRAME_MERGE_BSF 0
+#define CONFIG_AV1_FRAME_SPLIT_BSF 0
+#define CONFIG_AV1_METADATA_BSF 0
+#define CONFIG_CHOMP_BSF 0
+#define CONFIG_DUMP_EXTRADATA_BSF 0
+#define CONFIG_DCA_CORE_BSF 0
+#define CONFIG_DV_ERROR_MARKER_BSF 0
+#define CONFIG_EAC3_CORE_BSF 0
+#define CONFIG_EXTRACT_EXTRADATA_BSF 0
+#define CONFIG_FILTER_UNITS_BSF 0
+#define CONFIG_H264_METADATA_BSF 0
+#define CONFIG_H264_MP4TOANNEXB_BSF 0
+#define CONFIG_H264_REDUNDANT_PPS_BSF 0
+#define CONFIG_HAPQA_EXTRACT_BSF 0
+#define CONFIG_HEVC_METADATA_BSF 0
+#define CONFIG_HEVC_MP4TOANNEXB_BSF 0
+#define CONFIG_IMX_DUMP_HEADER_BSF 0
+#define CONFIG_MJPEG2JPEG_BSF 0
+#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0
+#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0
+#define CONFIG_MPEG2_METADATA_BSF 0
+#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0
+#define CONFIG_MOV2TEXTSUB_BSF 0
+#define CONFIG_NOISE_BSF 0
+#define CONFIG_NULL_BSF 0
+#define CONFIG_OPUS_METADATA_BSF 0
+#define CONFIG_PCM_RECHUNK_BSF 0
+#define CONFIG_PGS_FRAME_MERGE_BSF 0
+#define CONFIG_PRORES_METADATA_BSF 0
+#define CONFIG_REMOVE_EXTRADATA_BSF 0
+#define CONFIG_SETTS_BSF 0
+#define CONFIG_TEXT2MOVSUB_BSF 0
+#define CONFIG_TRACE_HEADERS_BSF 0
+#define CONFIG_TRUEHD_CORE_BSF 0
+#define CONFIG_VP9_METADATA_BSF 0
+#define CONFIG_VP9_RAW_REORDER_BSF 0
+#define CONFIG_VP9_SUPERFRAME_BSF 0
+#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 0
+#define CONFIG_AASC_DECODER 0
+#define CONFIG_AIC_DECODER 0
+#define CONFIG_ALIAS_PIX_DECODER 0
+#define CONFIG_AGM_DECODER 0
+#define CONFIG_AMV_DECODER 0
+#define CONFIG_ANM_DECODER 0
+#define CONFIG_ANSI_DECODER 0
+#define CONFIG_APNG_DECODER 0
+#define CONFIG_ARBC_DECODER 0
+#define CONFIG_ARGO_DECODER 0
+#define CONFIG_ASV1_DECODER 0
+#define CONFIG_ASV2_DECODER 0
+#define CONFIG_AURA_DECODER 0
+#define CONFIG_AURA2_DECODER 0
+#define CONFIG_AVRP_DECODER 0
+#define CONFIG_AVRN_DECODER 0
+#define CONFIG_AVS_DECODER 0
+#define CONFIG_AVUI_DECODER 0
+#define CONFIG_AYUV_DECODER 0
+#define CONFIG_BETHSOFTVID_DECODER 0
+#define CONFIG_BFI_DECODER 0
+#define CONFIG_BINK_DECODER 0
+#define CONFIG_BITPACKED_DECODER 0
+#define CONFIG_BMP_DECODER 0
+#define CONFIG_BMV_VIDEO_DECODER 0
+#define CONFIG_BRENDER_PIX_DECODER 0
+#define CONFIG_C93_DECODER 0
+#define CONFIG_CAVS_DECODER 0
+#define CONFIG_CDGRAPHICS_DECODER 0
+#define CONFIG_CDTOONS_DECODER 0
+#define CONFIG_CDXL_DECODER 0
+#define CONFIG_CFHD_DECODER 0
+#define CONFIG_CINEPAK_DECODER 0
+#define CONFIG_CLEARVIDEO_DECODER 0
+#define CONFIG_CLJR_DECODER 0
+#define CONFIG_CLLC_DECODER 0
+#define CONFIG_COMFORTNOISE_DECODER 0
+#define CONFIG_CPIA_DECODER 0
+#define CONFIG_CRI_DECODER 0
+#define CONFIG_CSCD_DECODER 0
+#define CONFIG_CYUV_DECODER 0
+#define CONFIG_DDS_DECODER 0
+#define CONFIG_DFA_DECODER 0
+#define CONFIG_DIRAC_DECODER 0
+#define CONFIG_DNXHD_DECODER 0
+#define CONFIG_DPX_DECODER 0
+#define CONFIG_DSICINVIDEO_DECODER 0
+#define CONFIG_DVAUDIO_DECODER 0
+#define CONFIG_DVVIDEO_DECODER 0
+#define CONFIG_DXA_DECODER 0
+#define CONFIG_DXTORY_DECODER 0
+#define CONFIG_DXV_DECODER 0
+#define CONFIG_EACMV_DECODER 0
+#define CONFIG_EAMAD_DECODER 0
+#define CONFIG_EATGQ_DECODER 0
+#define CONFIG_EATGV_DECODER 0
+#define CONFIG_EATQI_DECODER 0
+#define CONFIG_EIGHTBPS_DECODER 0
+#define CONFIG_EIGHTSVX_EXP_DECODER 0
+#define CONFIG_EIGHTSVX_FIB_DECODER 0
+#define CONFIG_ESCAPE124_DECODER 0
+#define CONFIG_ESCAPE130_DECODER 0
+#define CONFIG_EXR_DECODER 0
+#define CONFIG_FFV1_DECODER 0
+#define CONFIG_FFVHUFF_DECODER 0
+#define CONFIG_FIC_DECODER 0
+#define CONFIG_FITS_DECODER 0
+#define CONFIG_FLASHSV_DECODER 0
+#define CONFIG_FLASHSV2_DECODER 0
+#define CONFIG_FLIC_DECODER 0
+#define CONFIG_FLV_DECODER 0
+#define CONFIG_FMVC_DECODER 0
+#define CONFIG_FOURXM_DECODER 0
+#define CONFIG_FRAPS_DECODER 0
+#define CONFIG_FRWU_DECODER 0
+#define CONFIG_G2M_DECODER 0
+#define CONFIG_GDV_DECODER 0
+#define CONFIG_GEM_DECODER 0
+#define CONFIG_GIF_DECODER 0
+#define CONFIG_H261_DECODER 0
+#define CONFIG_H263_DECODER 0
+#define CONFIG_H263I_DECODER 0
+#define CONFIG_H263P_DECODER 0
+#define CONFIG_H263_V4L2M2M_DECODER 0
+#define CONFIG_H264_DECODER 0
+#define CONFIG_H264_CRYSTALHD_DECODER 0
+#define CONFIG_H264_V4L2M2M_DECODER 0
+#define CONFIG_H264_MEDIACODEC_DECODER 0
+#define CONFIG_H264_MMAL_DECODER 0
+#define CONFIG_H264_QSV_DECODER 0
+#define CONFIG_H264_RKMPP_DECODER 0
+#define CONFIG_HAP_DECODER 0
+#define CONFIG_HEVC_DECODER 0
+#define CONFIG_HEVC_QSV_DECODER 0
+#define CONFIG_HEVC_RKMPP_DECODER 0
+#define CONFIG_HEVC_V4L2M2M_DECODER 0
+#define CONFIG_HNM4_VIDEO_DECODER 0
+#define CONFIG_HQ_HQA_DECODER 0
+#define CONFIG_HQX_DECODER 0
+#define CONFIG_HUFFYUV_DECODER 0
+#define CONFIG_HYMT_DECODER 0
+#define CONFIG_IDCIN_DECODER 0
+#define CONFIG_IFF_ILBM_DECODER 0
+#define CONFIG_IMM4_DECODER 0
+#define CONFIG_IMM5_DECODER 0
+#define CONFIG_INDEO2_DECODER 0
+#define CONFIG_INDEO3_DECODER 0
+#define CONFIG_INDEO4_DECODER 0
+#define CONFIG_INDEO5_DECODER 0
+#define CONFIG_INTERPLAY_VIDEO_DECODER 0
+#define CONFIG_IPU_DECODER 0
+#define CONFIG_JPEG2000_DECODER 0
+#define CONFIG_JPEGLS_DECODER 0
+#define CONFIG_JV_DECODER 0
+#define CONFIG_KGV1_DECODER 0
+#define CONFIG_KMVC_DECODER 0
+#define CONFIG_LAGARITH_DECODER 0
+#define CONFIG_LOCO_DECODER 0
+#define CONFIG_LSCR_DECODER 0
+#define CONFIG_M101_DECODER 0
+#define CONFIG_MAGICYUV_DECODER 0
+#define CONFIG_MDEC_DECODER 0
+#define CONFIG_MIMIC_DECODER 0
+#define CONFIG_MJPEG_DECODER 0
+#define CONFIG_MJPEGB_DECODER 0
+#define CONFIG_MMVIDEO_DECODER 0
+#define CONFIG_MOBICLIP_DECODER 0
+#define CONFIG_MOTIONPIXELS_DECODER 0
+#define CONFIG_MPEG1VIDEO_DECODER 0
+#define CONFIG_MPEG2VIDEO_DECODER 0
+#define CONFIG_MPEG4_DECODER 0
+#define CONFIG_MPEG4_CRYSTALHD_DECODER 0
+#define CONFIG_MPEG4_V4L2M2M_DECODER 0
+#define CONFIG_MPEG4_MMAL_DECODER 0
+#define CONFIG_MPEGVIDEO_DECODER 0
+#define CONFIG_MPEG1_V4L2M2M_DECODER 0
+#define CONFIG_MPEG2_MMAL_DECODER 0
+#define CONFIG_MPEG2_CRYSTALHD_DECODER 0
+#define CONFIG_MPEG2_V4L2M2M_DECODER 0
+#define CONFIG_MPEG2_QSV_DECODER 0
+#define CONFIG_MPEG2_MEDIACODEC_DECODER 0
+#define CONFIG_MSA1_DECODER 0
+#define CONFIG_MSCC_DECODER 0
+#define CONFIG_MSMPEG4V1_DECODER 0
+#define CONFIG_MSMPEG4V2_DECODER 0
+#define CONFIG_MSMPEG4V3_DECODER 0
+#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0
+#define CONFIG_MSP2_DECODER 0
+#define CONFIG_MSRLE_DECODER 0
+#define CONFIG_MSS1_DECODER 0
+#define CONFIG_MSS2_DECODER 0
+#define CONFIG_MSVIDEO1_DECODER 0
+#define CONFIG_MSZH_DECODER 0
+#define CONFIG_MTS2_DECODER 0
+#define CONFIG_MV30_DECODER 0
+#define CONFIG_MVC1_DECODER 0
+#define CONFIG_MVC2_DECODER 0
+#define CONFIG_MVDV_DECODER 0
+#define CONFIG_MVHA_DECODER 0
+#define CONFIG_MWSC_DECODER 0
+#define CONFIG_MXPEG_DECODER 0
+#define CONFIG_NOTCHLC_DECODER 0
+#define CONFIG_NUV_DECODER 0
+#define CONFIG_PAF_VIDEO_DECODER 0
+#define CONFIG_PAM_DECODER 0
+#define CONFIG_PBM_DECODER 0
+#define CONFIG_PCX_DECODER 0
+#define CONFIG_PFM_DECODER 0
+#define CONFIG_PGM_DECODER 0
+#define CONFIG_PGMYUV_DECODER 0
+#define CONFIG_PGX_DECODER 0
+#define CONFIG_PHOTOCD_DECODER 0
+#define CONFIG_PICTOR_DECODER 0
+#define CONFIG_PIXLET_DECODER 0
+#define CONFIG_PNG_DECODER 0
+#define CONFIG_PPM_DECODER 0
+#define CONFIG_PRORES_DECODER 0
+#define CONFIG_PROSUMER_DECODER 0
+#define CONFIG_PSD_DECODER 0
+#define CONFIG_PTX_DECODER 0
+#define CONFIG_QDRAW_DECODER 0
+#define CONFIG_QOI_DECODER 0
+#define CONFIG_QPEG_DECODER 0
+#define CONFIG_QTRLE_DECODER 0
+#define CONFIG_R10K_DECODER 0
+#define CONFIG_R210_DECODER 0
+#define CONFIG_RASC_DECODER 0
+#define CONFIG_RAWVIDEO_DECODER 0
+#define CONFIG_RL2_DECODER 0
+#define CONFIG_ROQ_DECODER 0
+#define CONFIG_RPZA_DECODER 0
+#define CONFIG_RSCC_DECODER 0
+#define CONFIG_RV10_DECODER 0
+#define CONFIG_RV20_DECODER 0
+#define CONFIG_RV30_DECODER 0
+#define CONFIG_RV40_DECODER 0
+#define CONFIG_S302M_DECODER 0
+#define CONFIG_SANM_DECODER 0
+#define CONFIG_SCPR_DECODER 0
+#define CONFIG_SCREENPRESSO_DECODER 0
+#define CONFIG_SGA_DECODER 0
+#define CONFIG_SGI_DECODER 0
+#define CONFIG_SGIRLE_DECODER 0
+#define CONFIG_SHEERVIDEO_DECODER 0
+#define CONFIG_SIMBIOSIS_IMX_DECODER 0
+#define CONFIG_SMACKER_DECODER 0
+#define CONFIG_SMC_DECODER 0
+#define CONFIG_SMVJPEG_DECODER 0
+#define CONFIG_SNOW_DECODER 0
+#define CONFIG_SP5X_DECODER 0
+#define CONFIG_SPEEDHQ_DECODER 0
+#define CONFIG_SPEEX_DECODER 0
+#define CONFIG_SRGC_DECODER 0
+#define CONFIG_SUNRAST_DECODER 0
+#define CONFIG_SVQ1_DECODER 0
+#define CONFIG_SVQ3_DECODER 0
+#define CONFIG_TARGA_DECODER 0
+#define CONFIG_TARGA_Y216_DECODER 0
+#define CONFIG_TDSC_DECODER 0
+#define CONFIG_THEORA_DECODER 0
+#define CONFIG_THP_DECODER 0
+#define CONFIG_TIERTEXSEQVIDEO_DECODER 0
+#define CONFIG_TIFF_DECODER 0
+#define CONFIG_TMV_DECODER 0
+#define CONFIG_TRUEMOTION1_DECODER 0
+#define CONFIG_TRUEMOTION2_DECODER 0
+#define CONFIG_TRUEMOTION2RT_DECODER 0
+#define CONFIG_TSCC_DECODER 0
+#define CONFIG_TSCC2_DECODER 0
+#define CONFIG_TXD_DECODER 0
+#define CONFIG_ULTI_DECODER 0
+#define CONFIG_UTVIDEO_DECODER 0
+#define CONFIG_V210_DECODER 0
+#define CONFIG_V210X_DECODER 0
+#define CONFIG_V308_DECODER 0
+#define CONFIG_V408_DECODER 0
+#define CONFIG_V410_DECODER 0
+#define CONFIG_VB_DECODER 0
+#define CONFIG_VBN_DECODER 0
+#define CONFIG_VBLE_DECODER 0
+#define CONFIG_VC1_DECODER 0
+#define CONFIG_VC1_CRYSTALHD_DECODER 0
+#define CONFIG_VC1IMAGE_DECODER 0
+#define CONFIG_VC1_MMAL_DECODER 0
+#define CONFIG_VC1_QSV_DECODER 0
+#define CONFIG_VC1_V4L2M2M_DECODER 0
+#define CONFIG_VCR1_DECODER 0
+#define CONFIG_VMDVIDEO_DECODER 0
+#define CONFIG_VMNC_DECODER 0
+#define CONFIG_VP3_DECODER 0
+#define CONFIG_VP4_DECODER 0
+#define CONFIG_VP5_DECODER 0
+#define CONFIG_VP6_DECODER 0
+#define CONFIG_VP6A_DECODER 0
+#define CONFIG_VP6F_DECODER 0
+#define CONFIG_VP7_DECODER 0
+#define CONFIG_VP8_DECODER 0
+#define CONFIG_VP8_RKMPP_DECODER 0
+#define CONFIG_VP8_V4L2M2M_DECODER 0
+#define CONFIG_VP9_DECODER 0
+#define CONFIG_VP9_RKMPP_DECODER 0
+#define CONFIG_VP9_V4L2M2M_DECODER 0
+#define CONFIG_VQA_DECODER 0
+#define CONFIG_WEBP_DECODER 0
+#define CONFIG_WCMV_DECODER 0
+#define CONFIG_WRAPPED_AVFRAME_DECODER 0
+#define CONFIG_WMV1_DECODER 0
+#define CONFIG_WMV2_DECODER 0
+#define CONFIG_WMV3_DECODER 0
+#define CONFIG_WMV3_CRYSTALHD_DECODER 0
+#define CONFIG_WMV3IMAGE_DECODER 0
+#define CONFIG_WNV1_DECODER 0
+#define CONFIG_XAN_WC3_DECODER 0
+#define CONFIG_XAN_WC4_DECODER 0
+#define CONFIG_XBM_DECODER 0
+#define CONFIG_XFACE_DECODER 0
+#define CONFIG_XL_DECODER 0
+#define CONFIG_XPM_DECODER 0
+#define CONFIG_XWD_DECODER 0
+#define CONFIG_Y41P_DECODER 0
+#define CONFIG_YLC_DECODER 0
+#define CONFIG_YOP_DECODER 0
+#define CONFIG_YUV4_DECODER 0
+#define CONFIG_ZERO12V_DECODER 0
+#define CONFIG_ZEROCODEC_DECODER 0
+#define CONFIG_ZLIB_DECODER 0
+#define CONFIG_ZMBV_DECODER 0
+#define CONFIG_AAC_DECODER 0
+#define CONFIG_AAC_FIXED_DECODER 0
+#define CONFIG_AAC_LATM_DECODER 0
+#define CONFIG_AC3_DECODER 0
+#define CONFIG_AC3_FIXED_DECODER 0
+#define CONFIG_ACELP_KELVIN_DECODER 0
+#define CONFIG_ALAC_DECODER 0
+#define CONFIG_ALS_DECODER 0
+#define CONFIG_AMRNB_DECODER 0
+#define CONFIG_AMRWB_DECODER 0
+#define CONFIG_APE_DECODER 0
+#define CONFIG_APTX_DECODER 0
+#define CONFIG_APTX_HD_DECODER 0
+#define CONFIG_ATRAC1_DECODER 0
+#define CONFIG_ATRAC3_DECODER 0
+#define CONFIG_ATRAC3AL_DECODER 0
+#define CONFIG_ATRAC3P_DECODER 0
+#define CONFIG_ATRAC3PAL_DECODER 0
+#define CONFIG_ATRAC9_DECODER 0
+#define CONFIG_BINKAUDIO_DCT_DECODER 0
+#define CONFIG_BINKAUDIO_RDFT_DECODER 0
+#define CONFIG_BMV_AUDIO_DECODER 0
+#define CONFIG_COOK_DECODER 0
+#define CONFIG_DCA_DECODER 0
+#define CONFIG_DFPWM_DECODER 0
+#define CONFIG_DOLBY_E_DECODER 0
+#define CONFIG_DSD_LSBF_DECODER 0
+#define CONFIG_DSD_MSBF_DECODER 0
+#define CONFIG_DSD_LSBF_PLANAR_DECODER 0
+#define CONFIG_DSD_MSBF_PLANAR_DECODER 0
+#define CONFIG_DSICINAUDIO_DECODER 0
+#define CONFIG_DSS_SP_DECODER 0
+#define CONFIG_DST_DECODER 0
+#define CONFIG_EAC3_DECODER 0
+#define CONFIG_EVRC_DECODER 0
+#define CONFIG_FASTAUDIO_DECODER 0
+#define CONFIG_FFWAVESYNTH_DECODER 0
+#define CONFIG_FLAC_DECODER 1
+#define CONFIG_G723_1_DECODER 0
+#define CONFIG_G729_DECODER 0
+#define CONFIG_GSM_DECODER 0
+#define CONFIG_GSM_MS_DECODER 0
+#define CONFIG_HCA_DECODER 0
+#define CONFIG_HCOM_DECODER 0
+#define CONFIG_IAC_DECODER 0
+#define CONFIG_ILBC_DECODER 0
+#define CONFIG_IMC_DECODER 0
+#define CONFIG_INTERPLAY_ACM_DECODER 0
+#define CONFIG_MACE3_DECODER 0
+#define CONFIG_MACE6_DECODER 0
+#define CONFIG_METASOUND_DECODER 0
+#define CONFIG_MLP_DECODER 0
+#define CONFIG_MP1_DECODER 0
+#define CONFIG_MP1FLOAT_DECODER 0
+#define CONFIG_MP2_DECODER 0
+#define CONFIG_MP2FLOAT_DECODER 0
+#define CONFIG_MP3FLOAT_DECODER 0
+#define CONFIG_MP3_DECODER 1
+#define CONFIG_MP3ADUFLOAT_DECODER 0
+#define CONFIG_MP3ADU_DECODER 0
+#define CONFIG_MP3ON4FLOAT_DECODER 0
+#define CONFIG_MP3ON4_DECODER 0
+#define CONFIG_MPC7_DECODER 0
+#define CONFIG_MPC8_DECODER 0
+#define CONFIG_MSNSIREN_DECODER 0
+#define CONFIG_NELLYMOSER_DECODER 0
+#define CONFIG_ON2AVC_DECODER 0
+#define CONFIG_OPUS_DECODER 0
+#define CONFIG_PAF_AUDIO_DECODER 0
+#define CONFIG_QCELP_DECODER 0
+#define CONFIG_QDM2_DECODER 0
+#define CONFIG_QDMC_DECODER 0
+#define CONFIG_RA_144_DECODER 0
+#define CONFIG_RA_288_DECODER 0
+#define CONFIG_RALF_DECODER 0
+#define CONFIG_SBC_DECODER 0
+#define CONFIG_SHORTEN_DECODER 0
+#define CONFIG_SIPR_DECODER 0
+#define CONFIG_SIREN_DECODER 0
+#define CONFIG_SMACKAUD_DECODER 0
+#define CONFIG_SONIC_DECODER 0
+#define CONFIG_TAK_DECODER 0
+#define CONFIG_TRUEHD_DECODER 0
+#define CONFIG_TRUESPEECH_DECODER 0
+#define CONFIG_TTA_DECODER 0
+#define CONFIG_TWINVQ_DECODER 0
+#define CONFIG_VMDAUDIO_DECODER 0
+#define CONFIG_VORBIS_DECODER 0
+#define CONFIG_WAVPACK_DECODER 0
+#define CONFIG_WMALOSSLESS_DECODER 0
+#define CONFIG_WMAPRO_DECODER 0
+#define CONFIG_WMAV1_DECODER 0
+#define CONFIG_WMAV2_DECODER 0
+#define CONFIG_WMAVOICE_DECODER 0
+#define CONFIG_WS_SND1_DECODER 0
+#define CONFIG_XMA1_DECODER 0
+#define CONFIG_XMA2_DECODER 0
+#define CONFIG_PCM_ALAW_DECODER 0
+#define CONFIG_PCM_BLURAY_DECODER 0
+#define CONFIG_PCM_DVD_DECODER 0
+#define CONFIG_PCM_F16LE_DECODER 0
+#define CONFIG_PCM_F24LE_DECODER 0
+#define CONFIG_PCM_F32BE_DECODER 0
+#define CONFIG_PCM_F32LE_DECODER 0
+#define CONFIG_PCM_F64BE_DECODER 0
+#define CONFIG_PCM_F64LE_DECODER 0
+#define CONFIG_PCM_LXF_DECODER 0
+#define CONFIG_PCM_MULAW_DECODER 0
+#define CONFIG_PCM_S8_DECODER 0
+#define CONFIG_PCM_S8_PLANAR_DECODER 0
+#define CONFIG_PCM_S16BE_DECODER 0
+#define CONFIG_PCM_S16BE_PLANAR_DECODER 0
+#define CONFIG_PCM_S16LE_DECODER 0
+#define CONFIG_PCM_S16LE_PLANAR_DECODER 0
+#define CONFIG_PCM_S24BE_DECODER 0
+#define CONFIG_PCM_S24DAUD_DECODER 0
+#define CONFIG_PCM_S24LE_DECODER 0
+#define CONFIG_PCM_S24LE_PLANAR_DECODER 0
+#define CONFIG_PCM_S32BE_DECODER 0
+#define CONFIG_PCM_S32LE_DECODER 0
+#define CONFIG_PCM_S32LE_PLANAR_DECODER 0
+#define CONFIG_PCM_S64BE_DECODER 0
+#define CONFIG_PCM_S64LE_DECODER 0
+#define CONFIG_PCM_SGA_DECODER 0
+#define CONFIG_PCM_U8_DECODER 0
+#define CONFIG_PCM_U16BE_DECODER 0
+#define CONFIG_PCM_U16LE_DECODER 0
+#define CONFIG_PCM_U24BE_DECODER 0
+#define CONFIG_PCM_U24LE_DECODER 0
+#define CONFIG_PCM_U32BE_DECODER 0
+#define CONFIG_PCM_U32LE_DECODER 0
+#define CONFIG_PCM_VIDC_DECODER 0
+#define CONFIG_DERF_DPCM_DECODER 0
+#define CONFIG_GREMLIN_DPCM_DECODER 0
+#define CONFIG_INTERPLAY_DPCM_DECODER 0
+#define CONFIG_ROQ_DPCM_DECODER 0
+#define CONFIG_SDX2_DPCM_DECODER 0
+#define CONFIG_SOL_DPCM_DECODER 0
+#define CONFIG_XAN_DPCM_DECODER 0
+#define CONFIG_ADPCM_4XM_DECODER 0
+#define CONFIG_ADPCM_ADX_DECODER 0
+#define CONFIG_ADPCM_AFC_DECODER 0
+#define CONFIG_ADPCM_AGM_DECODER 0
+#define CONFIG_ADPCM_AICA_DECODER 0
+#define CONFIG_ADPCM_ARGO_DECODER 0
+#define CONFIG_ADPCM_CT_DECODER 0
+#define CONFIG_ADPCM_DTK_DECODER 0
+#define CONFIG_ADPCM_EA_DECODER 0
+#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0
+#define CONFIG_ADPCM_EA_R1_DECODER 0
+#define CONFIG_ADPCM_EA_R2_DECODER 0
+#define CONFIG_ADPCM_EA_R3_DECODER 0
+#define CONFIG_ADPCM_EA_XAS_DECODER 0
+#define CONFIG_ADPCM_G722_DECODER 0
+#define CONFIG_ADPCM_G726_DECODER 0
+#define CONFIG_ADPCM_G726LE_DECODER 0
+#define CONFIG_ADPCM_IMA_ACORN_DECODER 0
+#define CONFIG_ADPCM_IMA_AMV_DECODER 0
+#define CONFIG_ADPCM_IMA_ALP_DECODER 0
+#define CONFIG_ADPCM_IMA_APC_DECODER 0
+#define CONFIG_ADPCM_IMA_APM_DECODER 0
+#define CONFIG_ADPCM_IMA_CUNNING_DECODER 0
+#define CONFIG_ADPCM_IMA_DAT4_DECODER 0
+#define CONFIG_ADPCM_IMA_DK3_DECODER 0
+#define CONFIG_ADPCM_IMA_DK4_DECODER 0
+#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0
+#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0
+#define CONFIG_ADPCM_IMA_ISS_DECODER 0
+#define CONFIG_ADPCM_IMA_MOFLEX_DECODER 0
+#define CONFIG_ADPCM_IMA_MTF_DECODER 0
+#define CONFIG_ADPCM_IMA_OKI_DECODER 0
+#define CONFIG_ADPCM_IMA_QT_DECODER 0
+#define CONFIG_ADPCM_IMA_RAD_DECODER 0
+#define CONFIG_ADPCM_IMA_SSI_DECODER 0
+#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0
+#define CONFIG_ADPCM_IMA_WAV_DECODER 0
+#define CONFIG_ADPCM_IMA_WS_DECODER 0
+#define CONFIG_ADPCM_MS_DECODER 0
+#define CONFIG_ADPCM_MTAF_DECODER 0
+#define CONFIG_ADPCM_PSX_DECODER 0
+#define CONFIG_ADPCM_SBPRO_2_DECODER 0
+#define CONFIG_ADPCM_SBPRO_3_DECODER 0
+#define CONFIG_ADPCM_SBPRO_4_DECODER 0
+#define CONFIG_ADPCM_SWF_DECODER 0
+#define CONFIG_ADPCM_THP_DECODER 0
+#define CONFIG_ADPCM_THP_LE_DECODER 0
+#define CONFIG_ADPCM_VIMA_DECODER 0
+#define CONFIG_ADPCM_XA_DECODER 0
+#define CONFIG_ADPCM_YAMAHA_DECODER 0
+#define CONFIG_ADPCM_ZORK_DECODER 0
+#define CONFIG_SSA_DECODER 0
+#define CONFIG_ASS_DECODER 0
+#define CONFIG_CCAPTION_DECODER 0
+#define CONFIG_DVBSUB_DECODER 0
+#define CONFIG_DVDSUB_DECODER 0
+#define CONFIG_JACOSUB_DECODER 0
+#define CONFIG_MICRODVD_DECODER 0
+#define CONFIG_MOVTEXT_DECODER 0
+#define CONFIG_MPL2_DECODER 0
+#define CONFIG_PGSSUB_DECODER 0
+#define CONFIG_PJS_DECODER 0
+#define CONFIG_REALTEXT_DECODER 0
+#define CONFIG_SAMI_DECODER 0
+#define CONFIG_SRT_DECODER 0
+#define CONFIG_STL_DECODER 0
+#define CONFIG_SUBRIP_DECODER 0
+#define CONFIG_SUBVIEWER_DECODER 0
+#define CONFIG_SUBVIEWER1_DECODER 0
+#define CONFIG_TEXT_DECODER 0
+#define CONFIG_VPLAYER_DECODER 0
+#define CONFIG_WEBVTT_DECODER 0
+#define CONFIG_XSUB_DECODER 0
+#define CONFIG_AAC_AT_DECODER 0
+#define CONFIG_AC3_AT_DECODER 0
+#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0
+#define CONFIG_ALAC_AT_DECODER 0
+#define CONFIG_AMR_NB_AT_DECODER 0
+#define CONFIG_EAC3_AT_DECODER 0
+#define CONFIG_GSM_MS_AT_DECODER 0
+#define CONFIG_ILBC_AT_DECODER 0
+#define CONFIG_MP1_AT_DECODER 0
+#define CONFIG_MP2_AT_DECODER 0
+#define CONFIG_MP3_AT_DECODER 0
+#define CONFIG_PCM_ALAW_AT_DECODER 0
+#define CONFIG_PCM_MULAW_AT_DECODER 0
+#define CONFIG_QDMC_AT_DECODER 0
+#define CONFIG_QDM2_AT_DECODER 0
+#define CONFIG_LIBARIBB24_DECODER 0
+#define CONFIG_LIBCELT_DECODER 0
+#define CONFIG_LIBCODEC2_DECODER 0
+#define CONFIG_LIBDAV1D_DECODER 0
+#define CONFIG_LIBDAVS2_DECODER 0
+#define CONFIG_LIBFDK_AAC_DECODER 0
+#define CONFIG_LIBGSM_DECODER 0
+#define CONFIG_LIBGSM_MS_DECODER 0
+#define CONFIG_LIBILBC_DECODER 0
+#define CONFIG_LIBJXL_DECODER 0
+#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0
+#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0
+#define CONFIG_LIBOPENJPEG_DECODER 0
+#define CONFIG_LIBOPUS_DECODER 0
+#define CONFIG_LIBRSVG_DECODER 0
+#define CONFIG_LIBSPEEX_DECODER 0
+#define CONFIG_LIBUAVS3D_DECODER 0
+#define CONFIG_LIBVORBIS_DECODER 0
+#define CONFIG_LIBVPX_VP8_DECODER 0
+#define CONFIG_LIBVPX_VP9_DECODER 0
+#define CONFIG_LIBZVBI_TELETEXT_DECODER 0
+#define CONFIG_BINTEXT_DECODER 0
+#define CONFIG_XBIN_DECODER 0
+#define CONFIG_IDF_DECODER 0
+#define CONFIG_LIBAOM_AV1_DECODER 0
+#define CONFIG_AV1_DECODER 0
+#define CONFIG_AV1_CUVID_DECODER 0
+#define CONFIG_AV1_QSV_DECODER 0
+#define CONFIG_LIBOPENH264_DECODER 0
+#define CONFIG_H264_CUVID_DECODER 0
+#define CONFIG_HEVC_CUVID_DECODER 0
+#define CONFIG_HEVC_MEDIACODEC_DECODER 0
+#define CONFIG_MJPEG_CUVID_DECODER 0
+#define CONFIG_MJPEG_QSV_DECODER 0
+#define CONFIG_MPEG1_CUVID_DECODER 0
+#define CONFIG_MPEG2_CUVID_DECODER 0
+#define CONFIG_MPEG4_CUVID_DECODER 0
+#define CONFIG_MPEG4_MEDIACODEC_DECODER 0
+#define CONFIG_VC1_CUVID_DECODER 0
+#define CONFIG_VP8_CUVID_DECODER 0
+#define CONFIG_VP8_MEDIACODEC_DECODER 0
+#define CONFIG_VP8_QSV_DECODER 0
+#define CONFIG_VP9_CUVID_DECODER 0
+#define CONFIG_VP9_MEDIACODEC_DECODER 0
+#define CONFIG_VP9_QSV_DECODER 0
+#define CONFIG_A64MULTI_ENCODER 0
+#define CONFIG_A64MULTI5_ENCODER 0
+#define CONFIG_ALIAS_PIX_ENCODER 0
+#define CONFIG_AMV_ENCODER 0
+#define CONFIG_APNG_ENCODER 0
+#define CONFIG_ASV1_ENCODER 0
+#define CONFIG_ASV2_ENCODER 0
+#define CONFIG_AVRP_ENCODER 0
+#define CONFIG_AVUI_ENCODER 0
+#define CONFIG_AYUV_ENCODER 0
+#define CONFIG_BITPACKED_ENCODER 0
+#define CONFIG_BMP_ENCODER 0
+#define CONFIG_CFHD_ENCODER 0
+#define CONFIG_CINEPAK_ENCODER 0
+#define CONFIG_CLJR_ENCODER 0
+#define CONFIG_COMFORTNOISE_ENCODER 0
+#define CONFIG_DNXHD_ENCODER 0
+#define CONFIG_DPX_ENCODER 0
+#define CONFIG_DVVIDEO_ENCODER 0
+#define CONFIG_EXR_ENCODER 0
+#define CONFIG_FFV1_ENCODER 0
+#define CONFIG_FFVHUFF_ENCODER 0
+#define CONFIG_FITS_ENCODER 0
+#define CONFIG_FLASHSV_ENCODER 0
+#define CONFIG_FLASHSV2_ENCODER 0
+#define CONFIG_FLV_ENCODER 0
+#define CONFIG_GIF_ENCODER 0
+#define CONFIG_H261_ENCODER 0
+#define CONFIG_H263_ENCODER 0
+#define CONFIG_H263P_ENCODER 0
+#define CONFIG_HAP_ENCODER 0
+#define CONFIG_HUFFYUV_ENCODER 0
+#define CONFIG_JPEG2000_ENCODER 0
+#define CONFIG_JPEGLS_ENCODER 0
+#define CONFIG_LJPEG_ENCODER 0
+#define CONFIG_MAGICYUV_ENCODER 0
+#define CONFIG_MJPEG_ENCODER 0
+#define CONFIG_MPEG1VIDEO_ENCODER 0
+#define CONFIG_MPEG2VIDEO_ENCODER 0
+#define CONFIG_MPEG4_ENCODER 0
+#define CONFIG_MSMPEG4V2_ENCODER 0
+#define CONFIG_MSMPEG4V3_ENCODER 0
+#define CONFIG_MSVIDEO1_ENCODER 0
+#define CONFIG_PAM_ENCODER 0
+#define CONFIG_PBM_ENCODER 0
+#define CONFIG_PCX_ENCODER 0
+#define CONFIG_PFM_ENCODER 0
+#define CONFIG_PGM_ENCODER 0
+#define CONFIG_PGMYUV_ENCODER 0
+#define CONFIG_PNG_ENCODER 0
+#define CONFIG_PPM_ENCODER 0
+#define CONFIG_PRORES_ENCODER 0
+#define CONFIG_PRORES_AW_ENCODER 0
+#define CONFIG_PRORES_KS_ENCODER 0
+#define CONFIG_QOI_ENCODER 0
+#define CONFIG_QTRLE_ENCODER 0
+#define CONFIG_R10K_ENCODER 0
+#define CONFIG_R210_ENCODER 0
+#define CONFIG_RAWVIDEO_ENCODER 0
+#define CONFIG_ROQ_ENCODER 0
+#define CONFIG_RPZA_ENCODER 0
+#define CONFIG_RV10_ENCODER 0
+#define CONFIG_RV20_ENCODER 0
+#define CONFIG_S302M_ENCODER 0
+#define CONFIG_SGI_ENCODER 0
+#define CONFIG_SMC_ENCODER 0
+#define CONFIG_SNOW_ENCODER 0
+#define CONFIG_SPEEDHQ_ENCODER 0
+#define CONFIG_SUNRAST_ENCODER 0
+#define CONFIG_SVQ1_ENCODER 0
+#define CONFIG_TARGA_ENCODER 0
+#define CONFIG_TIFF_ENCODER 0
+#define CONFIG_UTVIDEO_ENCODER 0
+#define CONFIG_V210_ENCODER 0
+#define CONFIG_V308_ENCODER 0
+#define CONFIG_V408_ENCODER 0
+#define CONFIG_V410_ENCODER 0
+#define CONFIG_VBN_ENCODER 0
+#define CONFIG_VC2_ENCODER 0
+#define CONFIG_WRAPPED_AVFRAME_ENCODER 0
+#define CONFIG_WMV1_ENCODER 0
+#define CONFIG_WMV2_ENCODER 0
+#define CONFIG_XBM_ENCODER 0
+#define CONFIG_XFACE_ENCODER 0
+#define CONFIG_XWD_ENCODER 0
+#define CONFIG_Y41P_ENCODER 0
+#define CONFIG_YUV4_ENCODER 0
+#define CONFIG_ZLIB_ENCODER 0
+#define CONFIG_ZMBV_ENCODER 0
+#define CONFIG_AAC_ENCODER 0
+#define CONFIG_AC3_ENCODER 0
+#define CONFIG_AC3_FIXED_ENCODER 0
+#define CONFIG_ALAC_ENCODER 0
+#define CONFIG_APTX_ENCODER 0
+#define CONFIG_APTX_HD_ENCODER 0
+#define CONFIG_DCA_ENCODER 0
+#define CONFIG_DFPWM_ENCODER 0
+#define CONFIG_EAC3_ENCODER 0
+#define CONFIG_FLAC_ENCODER 0
+#define CONFIG_G723_1_ENCODER 0
+#define CONFIG_MLP_ENCODER 0
+#define CONFIG_MP2_ENCODER 0
+#define CONFIG_MP2FIXED_ENCODER 0
+#define CONFIG_NELLYMOSER_ENCODER 0
+#define CONFIG_OPUS_ENCODER 0
+#define CONFIG_RA_144_ENCODER 0
+#define CONFIG_SBC_ENCODER 0
+#define CONFIG_SONIC_ENCODER 0
+#define CONFIG_SONIC_LS_ENCODER 0
+#define CONFIG_TRUEHD_ENCODER 0
+#define CONFIG_TTA_ENCODER 0
+#define CONFIG_VORBIS_ENCODER 0
+#define CONFIG_WAVPACK_ENCODER 0
+#define CONFIG_WMAV1_ENCODER 0
+#define CONFIG_WMAV2_ENCODER 0
+#define CONFIG_PCM_ALAW_ENCODER 0
+#define CONFIG_PCM_BLURAY_ENCODER 0
+#define CONFIG_PCM_DVD_ENCODER 0
+#define CONFIG_PCM_F32BE_ENCODER 0
+#define CONFIG_PCM_F32LE_ENCODER 0
+#define CONFIG_PCM_F64BE_ENCODER 0
+#define CONFIG_PCM_F64LE_ENCODER 0
+#define CONFIG_PCM_MULAW_ENCODER 0
+#define CONFIG_PCM_S8_ENCODER 0
+#define CONFIG_PCM_S8_PLANAR_ENCODER 0
+#define CONFIG_PCM_S16BE_ENCODER 0
+#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S16LE_ENCODER 0
+#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S24BE_ENCODER 0
+#define CONFIG_PCM_S24DAUD_ENCODER 0
+#define CONFIG_PCM_S24LE_ENCODER 0
+#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S32BE_ENCODER 0
+#define CONFIG_PCM_S32LE_ENCODER 0
+#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S64BE_ENCODER 0
+#define CONFIG_PCM_S64LE_ENCODER 0
+#define CONFIG_PCM_U8_ENCODER 0
+#define CONFIG_PCM_U16BE_ENCODER 0
+#define CONFIG_PCM_U16LE_ENCODER 0
+#define CONFIG_PCM_U24BE_ENCODER 0
+#define CONFIG_PCM_U24LE_ENCODER 0
+#define CONFIG_PCM_U32BE_ENCODER 0
+#define CONFIG_PCM_U32LE_ENCODER 0
+#define CONFIG_PCM_VIDC_ENCODER 0
+#define CONFIG_ROQ_DPCM_ENCODER 0
+#define CONFIG_ADPCM_ADX_ENCODER 0
+#define CONFIG_ADPCM_ARGO_ENCODER 0
+#define CONFIG_ADPCM_G722_ENCODER 0
+#define CONFIG_ADPCM_G726_ENCODER 0
+#define CONFIG_ADPCM_G726LE_ENCODER 0
+#define CONFIG_ADPCM_IMA_AMV_ENCODER 0
+#define CONFIG_ADPCM_IMA_ALP_ENCODER 0
+#define CONFIG_ADPCM_IMA_APM_ENCODER 0
+#define CONFIG_ADPCM_IMA_QT_ENCODER 0
+#define CONFIG_ADPCM_IMA_SSI_ENCODER 0
+#define CONFIG_ADPCM_IMA_WAV_ENCODER 0
+#define CONFIG_ADPCM_IMA_WS_ENCODER 0
+#define CONFIG_ADPCM_MS_ENCODER 0
+#define CONFIG_ADPCM_SWF_ENCODER 0
+#define CONFIG_ADPCM_YAMAHA_ENCODER 0
+#define CONFIG_SSA_ENCODER 0
+#define CONFIG_ASS_ENCODER 0
+#define CONFIG_DVBSUB_ENCODER 0
+#define CONFIG_DVDSUB_ENCODER 0
+#define CONFIG_MOVTEXT_ENCODER 0
+#define CONFIG_SRT_ENCODER 0
+#define CONFIG_SUBRIP_ENCODER 0
+#define CONFIG_TEXT_ENCODER 0
+#define CONFIG_TTML_ENCODER 0
+#define CONFIG_WEBVTT_ENCODER 0
+#define CONFIG_XSUB_ENCODER 0
+#define CONFIG_AAC_AT_ENCODER 0
+#define CONFIG_ALAC_AT_ENCODER 0
+#define CONFIG_ILBC_AT_ENCODER 0
+#define CONFIG_PCM_ALAW_AT_ENCODER 0
+#define CONFIG_PCM_MULAW_AT_ENCODER 0
+#define CONFIG_LIBAOM_AV1_ENCODER 0
+#define CONFIG_LIBCODEC2_ENCODER 0
+#define CONFIG_LIBFDK_AAC_ENCODER 0
+#define CONFIG_LIBGSM_ENCODER 0
+#define CONFIG_LIBGSM_MS_ENCODER 0
+#define CONFIG_LIBILBC_ENCODER 0
+#define CONFIG_LIBJXL_ENCODER 0
+#define CONFIG_LIBMP3LAME_ENCODER 0
+#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0
+#define CONFIG_LIBOPENJPEG_ENCODER 0
+#define CONFIG_LIBOPUS_ENCODER 0
+#define CONFIG_LIBRAV1E_ENCODER 0
+#define CONFIG_LIBSHINE_ENCODER 0
+#define CONFIG_LIBSPEEX_ENCODER 0
+#define CONFIG_LIBSVTAV1_ENCODER 0
+#define CONFIG_LIBTHEORA_ENCODER 0
+#define CONFIG_LIBTWOLAME_ENCODER 0
+#define CONFIG_LIBVO_AMRWBENC_ENCODER 0
+#define CONFIG_LIBVORBIS_ENCODER 0
+#define CONFIG_LIBVPX_VP8_ENCODER 0
+#define CONFIG_LIBVPX_VP9_ENCODER 0
+#define CONFIG_LIBWEBP_ANIM_ENCODER 0
+#define CONFIG_LIBWEBP_ENCODER 0
+#define CONFIG_LIBX262_ENCODER 0
+#define CONFIG_LIBX264_ENCODER 0
+#define CONFIG_LIBX264RGB_ENCODER 0
+#define CONFIG_LIBX265_ENCODER 0
+#define CONFIG_LIBXAVS_ENCODER 0
+#define CONFIG_LIBXAVS2_ENCODER 0
+#define CONFIG_LIBXVID_ENCODER 0
+#define CONFIG_AAC_MF_ENCODER 0
+#define CONFIG_AC3_MF_ENCODER 0
+#define CONFIG_H263_V4L2M2M_ENCODER 0
+#define CONFIG_LIBOPENH264_ENCODER 0
+#define CONFIG_H264_AMF_ENCODER 0
+#define CONFIG_H264_MF_ENCODER 0
+#define CONFIG_H264_NVENC_ENCODER 0
+#define CONFIG_H264_OMX_ENCODER 0
+#define CONFIG_H264_QSV_ENCODER 0
+#define CONFIG_H264_V4L2M2M_ENCODER 0
+#define CONFIG_H264_VAAPI_ENCODER 0
+#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0
+#define CONFIG_HEVC_AMF_ENCODER 0
+#define CONFIG_HEVC_MF_ENCODER 0
+#define CONFIG_HEVC_NVENC_ENCODER 0
+#define CONFIG_HEVC_QSV_ENCODER 0
+#define CONFIG_HEVC_V4L2M2M_ENCODER 0
+#define CONFIG_HEVC_VAAPI_ENCODER 0
+#define CONFIG_HEVC_VIDEOTOOLBOX_ENCODER 0
+#define CONFIG_LIBKVAZAAR_ENCODER 0
+#define CONFIG_MJPEG_QSV_ENCODER 0
+#define CONFIG_MJPEG_VAAPI_ENCODER 0
+#define CONFIG_MP3_MF_ENCODER 0
+#define CONFIG_MPEG2_QSV_ENCODER 0
+#define CONFIG_MPEG2_VAAPI_ENCODER 0
+#define CONFIG_MPEG4_OMX_ENCODER 0
+#define CONFIG_MPEG4_V4L2M2M_ENCODER 0
+#define CONFIG_PRORES_VIDEOTOOLBOX_ENCODER 0
+#define CONFIG_VP8_V4L2M2M_ENCODER 0
+#define CONFIG_VP8_VAAPI_ENCODER 0
+#define CONFIG_VP9_VAAPI_ENCODER 0
+#define CONFIG_VP9_QSV_ENCODER 0
+#define CONFIG_AV1_D3D11VA_HWACCEL 0
+#define CONFIG_AV1_D3D11VA2_HWACCEL 0
+#define CONFIG_AV1_DXVA2_HWACCEL 0
+#define CONFIG_AV1_NVDEC_HWACCEL 0
+#define CONFIG_AV1_VAAPI_HWACCEL 0
+#define CONFIG_AV1_VDPAU_HWACCEL 0
+#define CONFIG_H263_VAAPI_HWACCEL 0
+#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_H264_D3D11VA_HWACCEL 0
+#define CONFIG_H264_D3D11VA2_HWACCEL 0
+#define CONFIG_H264_DXVA2_HWACCEL 0
+#define CONFIG_H264_NVDEC_HWACCEL 0
+#define CONFIG_H264_VAAPI_HWACCEL 0
+#define CONFIG_H264_VDPAU_HWACCEL 0
+#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_HEVC_D3D11VA_HWACCEL 0
+#define CONFIG_HEVC_D3D11VA2_HWACCEL 0
+#define CONFIG_HEVC_DXVA2_HWACCEL 0
+#define CONFIG_HEVC_NVDEC_HWACCEL 0
+#define CONFIG_HEVC_VAAPI_HWACCEL 0
+#define CONFIG_HEVC_VDPAU_HWACCEL 0
+#define CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_MJPEG_NVDEC_HWACCEL 0
+#define CONFIG_MJPEG_VAAPI_HWACCEL 0
+#define CONFIG_MPEG1_NVDEC_HWACCEL 0
+#define CONFIG_MPEG1_VDPAU_HWACCEL 0
+#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_MPEG2_D3D11VA_HWACCEL 0
+#define CONFIG_MPEG2_D3D11VA2_HWACCEL 0
+#define CONFIG_MPEG2_NVDEC_HWACCEL 0
+#define CONFIG_MPEG2_DXVA2_HWACCEL 0
+#define CONFIG_MPEG2_VAAPI_HWACCEL 0
+#define CONFIG_MPEG2_VDPAU_HWACCEL 0
+#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_MPEG4_NVDEC_HWACCEL 0
+#define CONFIG_MPEG4_VAAPI_HWACCEL 0
+#define CONFIG_MPEG4_VDPAU_HWACCEL 0
+#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_VC1_D3D11VA_HWACCEL 0
+#define CONFIG_VC1_D3D11VA2_HWACCEL 0
+#define CONFIG_VC1_DXVA2_HWACCEL 0
+#define CONFIG_VC1_NVDEC_HWACCEL 0
+#define CONFIG_VC1_VAAPI_HWACCEL 0
+#define CONFIG_VC1_VDPAU_HWACCEL 0
+#define CONFIG_VP8_NVDEC_HWACCEL 0
+#define CONFIG_VP8_VAAPI_HWACCEL 0
+#define CONFIG_VP9_D3D11VA_HWACCEL 0
+#define CONFIG_VP9_D3D11VA2_HWACCEL 0
+#define CONFIG_VP9_DXVA2_HWACCEL 0
+#define CONFIG_VP9_NVDEC_HWACCEL 0
+#define CONFIG_VP9_VAAPI_HWACCEL 0
+#define CONFIG_VP9_VDPAU_HWACCEL 0
+#define CONFIG_VP9_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_WMV3_D3D11VA_HWACCEL 0
+#define CONFIG_WMV3_D3D11VA2_HWACCEL 0
+#define CONFIG_WMV3_DXVA2_HWACCEL 0
+#define CONFIG_WMV3_NVDEC_HWACCEL 0
+#define CONFIG_WMV3_VAAPI_HWACCEL 0
+#define CONFIG_WMV3_VDPAU_HWACCEL 0
+#define CONFIG_AAC_PARSER 0
+#define CONFIG_AAC_LATM_PARSER 0
+#define CONFIG_AC3_PARSER 0
+#define CONFIG_ADX_PARSER 0
+#define CONFIG_AMR_PARSER 0
+#define CONFIG_AV1_PARSER 0
+#define CONFIG_AVS2_PARSER 0
+#define CONFIG_AVS3_PARSER 0
+#define CONFIG_BMP_PARSER 0
+#define CONFIG_CAVSVIDEO_PARSER 0
+#define CONFIG_COOK_PARSER 0
+#define CONFIG_CRI_PARSER 0
+#define CONFIG_DCA_PARSER 0
+#define CONFIG_DIRAC_PARSER 0
+#define CONFIG_DNXHD_PARSER 0
+#define CONFIG_DOLBY_E_PARSER 0
+#define CONFIG_DPX_PARSER 0
+#define CONFIG_DVAUDIO_PARSER 0
+#define CONFIG_DVBSUB_PARSER 0
+#define CONFIG_DVDSUB_PARSER 0
+#define CONFIG_DVD_NAV_PARSER 0
+#define CONFIG_FLAC_PARSER 0
+#define CONFIG_G723_1_PARSER 0
+#define CONFIG_G729_PARSER 0
+#define CONFIG_GIF_PARSER 0
+#define CONFIG_GSM_PARSER 0
+#define CONFIG_H261_PARSER 0
+#define CONFIG_H263_PARSER 0
+#define CONFIG_H264_PARSER 0
+#define CONFIG_HEVC_PARSER 0
+#define CONFIG_IPU_PARSER 0
+#define CONFIG_JPEG2000_PARSER 0
+#define CONFIG_MJPEG_PARSER 0
+#define CONFIG_MLP_PARSER 0
+#define CONFIG_MPEG4VIDEO_PARSER 0
+#define CONFIG_MPEGAUDIO_PARSER 0
+#define CONFIG_MPEGVIDEO_PARSER 0
+#define CONFIG_OPUS_PARSER 0
+#define CONFIG_PNG_PARSER 0
+#define CONFIG_PNM_PARSER 0
+#define CONFIG_QOI_PARSER 0
+#define CONFIG_RV30_PARSER 0
+#define CONFIG_RV40_PARSER 0
+#define CONFIG_SBC_PARSER 0
+#define CONFIG_SIPR_PARSER 0
+#define CONFIG_TAK_PARSER 0
+#define CONFIG_VC1_PARSER 0
+#define CONFIG_VORBIS_PARSER 0
+#define CONFIG_VP3_PARSER 0
+#define CONFIG_VP8_PARSER 0
+#define CONFIG_VP9_PARSER 0
+#define CONFIG_WEBP_PARSER 0
+#define CONFIG_XBM_PARSER 0
+#define CONFIG_XMA_PARSER 0
+#define CONFIG_ALSA_INDEV 0
+#define CONFIG_ANDROID_CAMERA_INDEV 0
+#define CONFIG_AVFOUNDATION_INDEV 0
+#define CONFIG_BKTR_INDEV 0
+#define CONFIG_DECKLINK_INDEV 0
+#define CONFIG_DSHOW_INDEV 0
+#define CONFIG_FBDEV_INDEV 0
+#define CONFIG_GDIGRAB_INDEV 0
+#define CONFIG_IEC61883_INDEV 0
+#define CONFIG_JACK_INDEV 0
+#define CONFIG_KMSGRAB_INDEV 0
+#define CONFIG_LAVFI_INDEV 0
+#define CONFIG_OPENAL_INDEV 0
+#define CONFIG_OSS_INDEV 0
+#define CONFIG_PULSE_INDEV 0
+#define CONFIG_SNDIO_INDEV 0
+#define CONFIG_V4L2_INDEV 0
+#define CONFIG_VFWCAP_INDEV 0
+#define CONFIG_XCBGRAB_INDEV 0
+#define CONFIG_LIBCDIO_INDEV 0
+#define CONFIG_LIBDC1394_INDEV 0
+#define CONFIG_ALSA_OUTDEV 0
+#define CONFIG_AUDIOTOOLBOX_OUTDEV 0
+#define CONFIG_CACA_OUTDEV 0
+#define CONFIG_DECKLINK_OUTDEV 0
+#define CONFIG_FBDEV_OUTDEV 0
+#define CONFIG_OPENGL_OUTDEV 0
+#define CONFIG_OSS_OUTDEV 0
+#define CONFIG_PULSE_OUTDEV 0
+#define CONFIG_SDL2_OUTDEV 0
+#define CONFIG_SNDIO_OUTDEV 0
+#define CONFIG_V4L2_OUTDEV 0
+#define CONFIG_XV_OUTDEV 0
+#define CONFIG_ABENCH_FILTER 0
+#define CONFIG_ACOMPRESSOR_FILTER 0
+#define CONFIG_ACONTRAST_FILTER 0
+#define CONFIG_ACOPY_FILTER 0
+#define CONFIG_ACUE_FILTER 0
+#define CONFIG_ACROSSFADE_FILTER 0
+#define CONFIG_ACROSSOVER_FILTER 0
+#define CONFIG_ACRUSHER_FILTER 0
+#define CONFIG_ADECLICK_FILTER 0
+#define CONFIG_ADECLIP_FILTER 0
+#define CONFIG_ADECORRELATE_FILTER 0
+#define CONFIG_ADELAY_FILTER 0
+#define CONFIG_ADENORM_FILTER 0
+#define CONFIG_ADERIVATIVE_FILTER 0
+#define CONFIG_ADYNAMICEQUALIZER_FILTER 0
+#define CONFIG_ADYNAMICSMOOTH_FILTER 0
+#define CONFIG_AECHO_FILTER 0
+#define CONFIG_AEMPHASIS_FILTER 0
+#define CONFIG_AEVAL_FILTER 0
+#define CONFIG_AEXCITER_FILTER 0
+#define CONFIG_AFADE_FILTER 0
+#define CONFIG_AFFTDN_FILTER 0
+#define CONFIG_AFFTFILT_FILTER 0
+#define CONFIG_AFIR_FILTER 0
+#define CONFIG_AFORMAT_FILTER 0
+#define CONFIG_AFREQSHIFT_FILTER 0
+#define CONFIG_AFWTDN_FILTER 0
+#define CONFIG_AGATE_FILTER 0
+#define CONFIG_AIIR_FILTER 0
+#define CONFIG_AINTEGRAL_FILTER 0
+#define CONFIG_AINTERLEAVE_FILTER 0
+#define CONFIG_ALATENCY_FILTER 0
+#define CONFIG_ALIMITER_FILTER 0
+#define CONFIG_ALLPASS_FILTER 0
+#define CONFIG_ALOOP_FILTER 0
+#define CONFIG_AMERGE_FILTER 0
+#define CONFIG_AMETADATA_FILTER 0
+#define CONFIG_AMIX_FILTER 0
+#define CONFIG_AMULTIPLY_FILTER 0
+#define CONFIG_ANEQUALIZER_FILTER 0
+#define CONFIG_ANLMDN_FILTER 0
+#define CONFIG_ANLMF_FILTER 0
+#define CONFIG_ANLMS_FILTER 0
+#define CONFIG_ANULL_FILTER 0
+#define CONFIG_APAD_FILTER 0
+#define CONFIG_APERMS_FILTER 0
+#define CONFIG_APHASER_FILTER 0
+#define CONFIG_APHASESHIFT_FILTER 0
+#define CONFIG_APSYCLIP_FILTER 0
+#define CONFIG_APULSATOR_FILTER 0
+#define CONFIG_AREALTIME_FILTER 0
+#define CONFIG_ARESAMPLE_FILTER 0
+#define CONFIG_AREVERSE_FILTER 0
+#define CONFIG_ARNNDN_FILTER 0
+#define CONFIG_ASDR_FILTER 0
+#define CONFIG_ASEGMENT_FILTER 0
+#define CONFIG_ASELECT_FILTER 0
+#define CONFIG_ASENDCMD_FILTER 0
+#define CONFIG_ASETNSAMPLES_FILTER 0
+#define CONFIG_ASETPTS_FILTER 0
+#define CONFIG_ASETRATE_FILTER 0
+#define CONFIG_ASETTB_FILTER 0
+#define CONFIG_ASHOWINFO_FILTER 0
+#define CONFIG_ASIDEDATA_FILTER 0
+#define CONFIG_ASOFTCLIP_FILTER 0
+#define CONFIG_ASPECTRALSTATS_FILTER 0
+#define CONFIG_ASPLIT_FILTER 0
+#define CONFIG_ASR_FILTER 0
+#define CONFIG_ASTATS_FILTER 0
+#define CONFIG_ASTREAMSELECT_FILTER 0
+#define CONFIG_ASUBBOOST_FILTER 0
+#define CONFIG_ASUBCUT_FILTER 0
+#define CONFIG_ASUPERCUT_FILTER 0
+#define CONFIG_ASUPERPASS_FILTER 0
+#define CONFIG_ASUPERSTOP_FILTER 0
+#define CONFIG_ATEMPO_FILTER 0
+#define CONFIG_ATILT_FILTER 0
+#define CONFIG_ATRIM_FILTER 0
+#define CONFIG_AXCORRELATE_FILTER 0
+#define CONFIG_AZMQ_FILTER 0
+#define CONFIG_BANDPASS_FILTER 0
+#define CONFIG_BANDREJECT_FILTER 0
+#define CONFIG_BASS_FILTER 0
+#define CONFIG_BIQUAD_FILTER 0
+#define CONFIG_BS2B_FILTER 0
+#define CONFIG_CHANNELMAP_FILTER 0
+#define CONFIG_CHANNELSPLIT_FILTER 0
+#define CONFIG_CHORUS_FILTER 0
+#define CONFIG_COMPAND_FILTER 0
+#define CONFIG_COMPENSATIONDELAY_FILTER 0
+#define CONFIG_CROSSFEED_FILTER 0
+#define CONFIG_CRYSTALIZER_FILTER 0
+#define CONFIG_DCSHIFT_FILTER 0
+#define CONFIG_DEESSER_FILTER 0
+#define CONFIG_DIALOGUENHANCE_FILTER 0
+#define CONFIG_DRMETER_FILTER 0
+#define CONFIG_DYNAUDNORM_FILTER 0
+#define CONFIG_EARWAX_FILTER 0
+#define CONFIG_EBUR128_FILTER 0
+#define CONFIG_EQUALIZER_FILTER 0
+#define CONFIG_EXTRASTEREO_FILTER 0
+#define CONFIG_FIREQUALIZER_FILTER 0
+#define CONFIG_FLANGER_FILTER 0
+#define CONFIG_HAAS_FILTER 0
+#define CONFIG_HDCD_FILTER 0
+#define CONFIG_HEADPHONE_FILTER 0
+#define CONFIG_HIGHPASS_FILTER 0
+#define CONFIG_HIGHSHELF_FILTER 0
+#define CONFIG_JOIN_FILTER 0
+#define CONFIG_LADSPA_FILTER 0
+#define CONFIG_LOUDNORM_FILTER 0
+#define CONFIG_LOWPASS_FILTER 0
+#define CONFIG_LOWSHELF_FILTER 0
+#define CONFIG_LV2_FILTER 0
+#define CONFIG_MCOMPAND_FILTER 0
+#define CONFIG_PAN_FILTER 0
+#define CONFIG_REPLAYGAIN_FILTER 0
+#define CONFIG_RUBBERBAND_FILTER 0
+#define CONFIG_SIDECHAINCOMPRESS_FILTER 0
+#define CONFIG_SIDECHAINGATE_FILTER 0
+#define CONFIG_SILENCEDETECT_FILTER 0
+#define CONFIG_SILENCEREMOVE_FILTER 0
+#define CONFIG_SOFALIZER_FILTER 0
+#define CONFIG_SPEECHNORM_FILTER 0
+#define CONFIG_STEREOTOOLS_FILTER 0
+#define CONFIG_STEREOWIDEN_FILTER 0
+#define CONFIG_SUPEREQUALIZER_FILTER 0
+#define CONFIG_SURROUND_FILTER 0
+#define CONFIG_TILTSHELF_FILTER 0
+#define CONFIG_TREBLE_FILTER 0
+#define CONFIG_TREMOLO_FILTER 0
+#define CONFIG_VIBRATO_FILTER 0
+#define CONFIG_VIRTUALBASS_FILTER 0
+#define CONFIG_VOLUME_FILTER 0
+#define CONFIG_VOLUMEDETECT_FILTER 0
+#define CONFIG_AEVALSRC_FILTER 0
+#define CONFIG_AFIRSRC_FILTER 0
+#define CONFIG_ANOISESRC_FILTER 0
+#define CONFIG_ANULLSRC_FILTER 0
+#define CONFIG_FLITE_FILTER 0
+#define CONFIG_HILBERT_FILTER 0
+#define CONFIG_SINC_FILTER 0
+#define CONFIG_SINE_FILTER 0
+#define CONFIG_ANULLSINK_FILTER 0
+#define CONFIG_ADDROI_FILTER 0
+#define CONFIG_ALPHAEXTRACT_FILTER 0
+#define CONFIG_ALPHAMERGE_FILTER 0
+#define CONFIG_AMPLIFY_FILTER 0
+#define CONFIG_ASS_FILTER 0
+#define CONFIG_ATADENOISE_FILTER 0
+#define CONFIG_AVGBLUR_FILTER 0
+#define CONFIG_AVGBLUR_OPENCL_FILTER 0
+#define CONFIG_AVGBLUR_VULKAN_FILTER 0
+#define CONFIG_BBOX_FILTER 0
+#define CONFIG_BENCH_FILTER 0
+#define CONFIG_BILATERAL_FILTER 0
+#define CONFIG_BITPLANENOISE_FILTER 0
+#define CONFIG_BLACKDETECT_FILTER 0
+#define CONFIG_BLACKFRAME_FILTER 0
+#define CONFIG_BLEND_FILTER 0
+#define CONFIG_BLEND_VULKAN_FILTER 0
+#define CONFIG_BLOCKDETECT_FILTER 0
+#define CONFIG_BLURDETECT_FILTER 0
+#define CONFIG_BM3D_FILTER 0
+#define CONFIG_BOXBLUR_FILTER 0
+#define CONFIG_BOXBLUR_OPENCL_FILTER 0
+#define CONFIG_BWDIF_FILTER 0
+#define CONFIG_CAS_FILTER 0
+#define CONFIG_CHROMABER_VULKAN_FILTER 0
+#define CONFIG_CHROMAHOLD_FILTER 0
+#define CONFIG_CHROMAKEY_FILTER 0
+#define CONFIG_CHROMANR_FILTER 0
+#define CONFIG_CHROMASHIFT_FILTER 0
+#define CONFIG_CIESCOPE_FILTER 0
+#define CONFIG_CODECVIEW_FILTER 0
+#define CONFIG_COLORBALANCE_FILTER 0
+#define CONFIG_COLORCHANNELMIXER_FILTER 0
+#define CONFIG_COLORCONTRAST_FILTER 0
+#define CONFIG_COLORCORRECT_FILTER 0
+#define CONFIG_COLORIZE_FILTER 0
+#define CONFIG_COLORKEY_FILTER 0
+#define CONFIG_COLORKEY_OPENCL_FILTER 0
+#define CONFIG_COLORHOLD_FILTER 0
+#define CONFIG_COLORLEVELS_FILTER 0
+#define CONFIG_COLORMAP_FILTER 0
+#define CONFIG_COLORMATRIX_FILTER 0
+#define CONFIG_COLORSPACE_FILTER 0
+#define CONFIG_COLORTEMPERATURE_FILTER 0
+#define CONFIG_CONVOLUTION_FILTER 0
+#define CONFIG_CONVOLUTION_OPENCL_FILTER 0
+#define CONFIG_CONVOLVE_FILTER 0
+#define CONFIG_COPY_FILTER 0
+#define CONFIG_COREIMAGE_FILTER 0
+#define CONFIG_COVER_RECT_FILTER 0
+#define CONFIG_CROP_FILTER 0
+#define CONFIG_CROPDETECT_FILTER 0
+#define CONFIG_CUE_FILTER 0
+#define CONFIG_CURVES_FILTER 0
+#define CONFIG_DATASCOPE_FILTER 0
+#define CONFIG_DBLUR_FILTER 0
+#define CONFIG_DCTDNOIZ_FILTER 0
+#define CONFIG_DEBAND_FILTER 0
+#define CONFIG_DEBLOCK_FILTER 0
+#define CONFIG_DECIMATE_FILTER 0
+#define CONFIG_DECONVOLVE_FILTER 0
+#define CONFIG_DEDOT_FILTER 0
+#define CONFIG_DEFLATE_FILTER 0
+#define CONFIG_DEFLICKER_FILTER 0
+#define CONFIG_DEINTERLACE_QSV_FILTER 0
+#define CONFIG_DEINTERLACE_VAAPI_FILTER 0
+#define CONFIG_DEJUDDER_FILTER 0
+#define CONFIG_DELOGO_FILTER 0
+#define CONFIG_DENOISE_VAAPI_FILTER 0
+#define CONFIG_DERAIN_FILTER 0
+#define CONFIG_DESHAKE_FILTER 0
+#define CONFIG_DESHAKE_OPENCL_FILTER 0
+#define CONFIG_DESPILL_FILTER 0
+#define CONFIG_DETELECINE_FILTER 0
+#define CONFIG_DILATION_FILTER 0
+#define CONFIG_DILATION_OPENCL_FILTER 0
+#define CONFIG_DISPLACE_FILTER 0
+#define CONFIG_DNN_CLASSIFY_FILTER 0
+#define CONFIG_DNN_DETECT_FILTER 0
+#define CONFIG_DNN_PROCESSING_FILTER 0
+#define CONFIG_DOUBLEWEAVE_FILTER 0
+#define CONFIG_DRAWBOX_FILTER 0
+#define CONFIG_DRAWGRAPH_FILTER 0
+#define CONFIG_DRAWGRID_FILTER 0
+#define CONFIG_DRAWTEXT_FILTER 0
+#define CONFIG_EDGEDETECT_FILTER 0
+#define CONFIG_ELBG_FILTER 0
+#define CONFIG_ENTROPY_FILTER 0
+#define CONFIG_EPX_FILTER 0
+#define CONFIG_EQ_FILTER 0
+#define CONFIG_EROSION_FILTER 0
+#define CONFIG_EROSION_OPENCL_FILTER 0
+#define CONFIG_ESTDIF_FILTER 0
+#define CONFIG_EXPOSURE_FILTER 0
+#define CONFIG_EXTRACTPLANES_FILTER 0
+#define CONFIG_FADE_FILTER 0
+#define CONFIG_FEEDBACK_FILTER 0
+#define CONFIG_FFTDNOIZ_FILTER 0
+#define CONFIG_FFTFILT_FILTER 0
+#define CONFIG_FIELD_FILTER 0
+#define CONFIG_FIELDHINT_FILTER 0
+#define CONFIG_FIELDMATCH_FILTER 0
+#define CONFIG_FIELDORDER_FILTER 0
+#define CONFIG_FILLBORDERS_FILTER 0
+#define CONFIG_FIND_RECT_FILTER 0
+#define CONFIG_FLIP_VULKAN_FILTER 0
+#define CONFIG_FLOODFILL_FILTER 0
+#define CONFIG_FORMAT_FILTER 0
+#define CONFIG_FPS_FILTER 0
+#define CONFIG_FRAMEPACK_FILTER 0
+#define CONFIG_FRAMERATE_FILTER 0
+#define CONFIG_FRAMESTEP_FILTER 0
+#define CONFIG_FREEZEDETECT_FILTER 0
+#define CONFIG_FREEZEFRAMES_FILTER 0
+#define CONFIG_FREI0R_FILTER 0
+#define CONFIG_FSPP_FILTER 0
+#define CONFIG_GBLUR_FILTER 0
+#define CONFIG_GBLUR_VULKAN_FILTER 0
+#define CONFIG_GEQ_FILTER 0
+#define CONFIG_GRADFUN_FILTER 0
+#define CONFIG_GRAPHMONITOR_FILTER 0
+#define CONFIG_GRAYWORLD_FILTER 0
+#define CONFIG_GREYEDGE_FILTER 0
+#define CONFIG_GUIDED_FILTER 0
+#define CONFIG_HALDCLUT_FILTER 0
+#define CONFIG_HFLIP_FILTER 0
+#define CONFIG_HFLIP_VULKAN_FILTER 0
+#define CONFIG_HISTEQ_FILTER 0
+#define CONFIG_HISTOGRAM_FILTER 0
+#define CONFIG_HQDN3D_FILTER 0
+#define CONFIG_HQX_FILTER 0
+#define CONFIG_HSTACK_FILTER 0
+#define CONFIG_HSVHOLD_FILTER 0
+#define CONFIG_HSVKEY_FILTER 0
+#define CONFIG_HUE_FILTER 0
+#define CONFIG_HUESATURATION_FILTER 0
+#define CONFIG_HWDOWNLOAD_FILTER 0
+#define CONFIG_HWMAP_FILTER 0
+#define CONFIG_HWUPLOAD_FILTER 0
+#define CONFIG_HWUPLOAD_CUDA_FILTER 0
+#define CONFIG_HYSTERESIS_FILTER 0
+#define CONFIG_ICCDETECT_FILTER 0
+#define CONFIG_ICCGEN_FILTER 0
+#define CONFIG_IDENTITY_FILTER 0
+#define CONFIG_IDET_FILTER 0
+#define CONFIG_IL_FILTER 0
+#define CONFIG_INFLATE_FILTER 0
+#define CONFIG_INTERLACE_FILTER 0
+#define CONFIG_INTERLEAVE_FILTER 0
+#define CONFIG_KERNDEINT_FILTER 0
+#define CONFIG_KIRSCH_FILTER 0
+#define CONFIG_LAGFUN_FILTER 0
+#define CONFIG_LATENCY_FILTER 0
+#define CONFIG_LENSCORRECTION_FILTER 0
+#define CONFIG_LENSFUN_FILTER 0
+#define CONFIG_LIBPLACEBO_FILTER 0
+#define CONFIG_LIBVMAF_FILTER 0
+#define CONFIG_LIMITDIFF_FILTER 0
+#define CONFIG_LIMITER_FILTER 0
+#define CONFIG_LOOP_FILTER 0
+#define CONFIG_LUMAKEY_FILTER 0
+#define CONFIG_LUT_FILTER 0
+#define CONFIG_LUT1D_FILTER 0
+#define CONFIG_LUT2_FILTER 0
+#define CONFIG_LUT3D_FILTER 0
+#define CONFIG_LUTRGB_FILTER 0
+#define CONFIG_LUTYUV_FILTER 0
+#define CONFIG_MASKEDCLAMP_FILTER 0
+#define CONFIG_MASKEDMAX_FILTER 0
+#define CONFIG_MASKEDMERGE_FILTER 0
+#define CONFIG_MASKEDMIN_FILTER 0
+#define CONFIG_MASKEDTHRESHOLD_FILTER 0
+#define CONFIG_MASKFUN_FILTER 0
+#define CONFIG_MCDEINT_FILTER 0
+#define CONFIG_MEDIAN_FILTER 0
+#define CONFIG_MERGEPLANES_FILTER 0
+#define CONFIG_MESTIMATE_FILTER 0
+#define CONFIG_METADATA_FILTER 0
+#define CONFIG_MIDEQUALIZER_FILTER 0
+#define CONFIG_MINTERPOLATE_FILTER 0
+#define CONFIG_MIX_FILTER 0
+#define CONFIG_MONOCHROME_FILTER 0
+#define CONFIG_MORPHO_FILTER 0
+#define CONFIG_MPDECIMATE_FILTER 0
+#define CONFIG_MSAD_FILTER 0
+#define CONFIG_MULTIPLY_FILTER 0
+#define CONFIG_NEGATE_FILTER 0
+#define CONFIG_NLMEANS_FILTER 0
+#define CONFIG_NLMEANS_OPENCL_FILTER 0
+#define CONFIG_NNEDI_FILTER 0
+#define CONFIG_NOFORMAT_FILTER 0
+#define CONFIG_NOISE_FILTER 0
+#define CONFIG_NORMALIZE_FILTER 0
+#define CONFIG_NULL_FILTER 0
+#define CONFIG_OCR_FILTER 0
+#define CONFIG_OCV_FILTER 0
+#define CONFIG_OSCILLOSCOPE_FILTER 0
+#define CONFIG_OVERLAY_FILTER 0
+#define CONFIG_OVERLAY_OPENCL_FILTER 0
+#define CONFIG_OVERLAY_QSV_FILTER 0
+#define CONFIG_OVERLAY_VAAPI_FILTER 0
+#define CONFIG_OVERLAY_VULKAN_FILTER 0
+#define CONFIG_OVERLAY_CUDA_FILTER 0
+#define CONFIG_OWDENOISE_FILTER 0
+#define CONFIG_PAD_FILTER 0
+#define CONFIG_PAD_OPENCL_FILTER 0
+#define CONFIG_PALETTEGEN_FILTER 0
+#define CONFIG_PALETTEUSE_FILTER 0
+#define CONFIG_PERMS_FILTER 0
+#define CONFIG_PERSPECTIVE_FILTER 0
+#define CONFIG_PHASE_FILTER 0
+#define CONFIG_PHOTOSENSITIVITY_FILTER 0
+#define CONFIG_PIXDESCTEST_FILTER 0
+#define CONFIG_PIXELIZE_FILTER 0
+#define CONFIG_PIXSCOPE_FILTER 0
+#define CONFIG_PP_FILTER 0
+#define CONFIG_PP7_FILTER 0
+#define CONFIG_PREMULTIPLY_FILTER 0
+#define CONFIG_PREWITT_FILTER 0
+#define CONFIG_PREWITT_OPENCL_FILTER 0
+#define CONFIG_PROCAMP_VAAPI_FILTER 0
+#define CONFIG_PROGRAM_OPENCL_FILTER 0
+#define CONFIG_PSEUDOCOLOR_FILTER 0
+#define CONFIG_PSNR_FILTER 0
+#define CONFIG_PULLUP_FILTER 0
+#define CONFIG_QP_FILTER 0
+#define CONFIG_RANDOM_FILTER 0
+#define CONFIG_READEIA608_FILTER 0
+#define CONFIG_READVITC_FILTER 0
+#define CONFIG_REALTIME_FILTER 0
+#define CONFIG_REMAP_FILTER 0
+#define CONFIG_REMOVEGRAIN_FILTER 0
+#define CONFIG_REMOVELOGO_FILTER 0
+#define CONFIG_REPEATFIELDS_FILTER 0
+#define CONFIG_REVERSE_FILTER 0
+#define CONFIG_RGBASHIFT_FILTER 0
+#define CONFIG_ROBERTS_FILTER 0
+#define CONFIG_ROBERTS_OPENCL_FILTER 0
+#define CONFIG_ROTATE_FILTER 0
+#define CONFIG_SAB_FILTER 0
+#define CONFIG_SCALE_FILTER 0
+#define CONFIG_SCALE_CUDA_FILTER 0
+#define CONFIG_SCALE_NPP_FILTER 0
+#define CONFIG_SCALE_QSV_FILTER 0
+#define CONFIG_SCALE_VAAPI_FILTER 0
+#define CONFIG_SCALE_VULKAN_FILTER 0
+#define CONFIG_SCALE2REF_FILTER 0
+#define CONFIG_SCALE2REF_NPP_FILTER 0
+#define CONFIG_SCDET_FILTER 0
+#define CONFIG_SCHARR_FILTER 0
+#define CONFIG_SCROLL_FILTER 0
+#define CONFIG_SEGMENT_FILTER 0
+#define CONFIG_SELECT_FILTER 0
+#define CONFIG_SELECTIVECOLOR_FILTER 0
+#define CONFIG_SENDCMD_FILTER 0
+#define CONFIG_SEPARATEFIELDS_FILTER 0
+#define CONFIG_SETDAR_FILTER 0
+#define CONFIG_SETFIELD_FILTER 0
+#define CONFIG_SETPARAMS_FILTER 0
+#define CONFIG_SETPTS_FILTER 0
+#define CONFIG_SETRANGE_FILTER 0
+#define CONFIG_SETSAR_FILTER 0
+#define CONFIG_SETTB_FILTER 0
+#define CONFIG_SHARPEN_NPP_FILTER 0
+#define CONFIG_SHARPNESS_VAAPI_FILTER 0
+#define CONFIG_SHEAR_FILTER 0
+#define CONFIG_SHOWINFO_FILTER 0
+#define CONFIG_SHOWPALETTE_FILTER 0
+#define CONFIG_SHUFFLEFRAMES_FILTER 0
+#define CONFIG_SHUFFLEPIXELS_FILTER 0
+#define CONFIG_SHUFFLEPLANES_FILTER 0
+#define CONFIG_SIDEDATA_FILTER 0
+#define CONFIG_SIGNALSTATS_FILTER 0
+#define CONFIG_SIGNATURE_FILTER 0
+#define CONFIG_SITI_FILTER 0
+#define CONFIG_SMARTBLUR_FILTER 0
+#define CONFIG_SOBEL_FILTER 0
+#define CONFIG_SOBEL_OPENCL_FILTER 0
+#define CONFIG_SPLIT_FILTER 0
+#define CONFIG_SPP_FILTER 0
+#define CONFIG_SR_FILTER 0
+#define CONFIG_SSIM_FILTER 0
+#define CONFIG_STEREO3D_FILTER 0
+#define CONFIG_STREAMSELECT_FILTER 0
+#define CONFIG_SUBTITLES_FILTER 0
+#define CONFIG_SUPER2XSAI_FILTER 0
+#define CONFIG_SWAPRECT_FILTER 0
+#define CONFIG_SWAPUV_FILTER 0
+#define CONFIG_TBLEND_FILTER 0
+#define CONFIG_TELECINE_FILTER 0
+#define CONFIG_THISTOGRAM_FILTER 0
+#define CONFIG_THRESHOLD_FILTER 0
+#define CONFIG_THUMBNAIL_FILTER 0
+#define CONFIG_THUMBNAIL_CUDA_FILTER 0
+#define CONFIG_TILE_FILTER 0
+#define CONFIG_TINTERLACE_FILTER 0
+#define CONFIG_TLUT2_FILTER 0
+#define CONFIG_TMEDIAN_FILTER 0
+#define CONFIG_TMIDEQUALIZER_FILTER 0
+#define CONFIG_TMIX_FILTER 0
+#define CONFIG_TONEMAP_FILTER 0
+#define CONFIG_TONEMAP_OPENCL_FILTER 0
+#define CONFIG_TONEMAP_VAAPI_FILTER 0
+#define CONFIG_TPAD_FILTER 0
+#define CONFIG_TRANSPOSE_FILTER 0
+#define CONFIG_TRANSPOSE_NPP_FILTER 0
+#define CONFIG_TRANSPOSE_OPENCL_FILTER 0
+#define CONFIG_TRANSPOSE_VAAPI_FILTER 0
+#define CONFIG_TRANSPOSE_VULKAN_FILTER 0
+#define CONFIG_TRIM_FILTER 0
+#define CONFIG_UNPREMULTIPLY_FILTER 0
+#define CONFIG_UNSHARP_FILTER 0
+#define CONFIG_UNSHARP_OPENCL_FILTER 0
+#define CONFIG_UNTILE_FILTER 0
+#define CONFIG_USPP_FILTER 0
+#define CONFIG_V360_FILTER 0
+#define CONFIG_VAGUEDENOISER_FILTER 0
+#define CONFIG_VARBLUR_FILTER 0
+#define CONFIG_VECTORSCOPE_FILTER 0
+#define CONFIG_VFLIP_FILTER 0
+#define CONFIG_VFLIP_VULKAN_FILTER 0
+#define CONFIG_VFRDET_FILTER 0
+#define CONFIG_VIBRANCE_FILTER 0
+#define CONFIG_VIDSTABDETECT_FILTER 0
+#define CONFIG_VIDSTABTRANSFORM_FILTER 0
+#define CONFIG_VIF_FILTER 0
+#define CONFIG_VIGNETTE_FILTER 0
+#define CONFIG_VMAFMOTION_FILTER 0
+#define CONFIG_VPP_QSV_FILTER 0
+#define CONFIG_VSTACK_FILTER 0
+#define CONFIG_W3FDIF_FILTER 0
+#define CONFIG_WAVEFORM_FILTER 0
+#define CONFIG_WEAVE_FILTER 0
+#define CONFIG_XBR_FILTER 0
+#define CONFIG_XCORRELATE_FILTER 0
+#define CONFIG_XFADE_FILTER 0
+#define CONFIG_XFADE_OPENCL_FILTER 0
+#define CONFIG_XMEDIAN_FILTER 0
+#define CONFIG_XSTACK_FILTER 0
+#define CONFIG_YADIF_FILTER 0
+#define CONFIG_YADIF_CUDA_FILTER 0
+#define CONFIG_YADIF_VIDEOTOOLBOX_FILTER 0
+#define CONFIG_YAEPBLUR_FILTER 0
+#define CONFIG_ZMQ_FILTER 0
+#define CONFIG_ZOOMPAN_FILTER 0
+#define CONFIG_ZSCALE_FILTER 0
+#define CONFIG_ALLRGB_FILTER 0
+#define CONFIG_ALLYUV_FILTER 0
+#define CONFIG_CELLAUTO_FILTER 0
+#define CONFIG_COLOR_FILTER 0
+#define CONFIG_COLORCHART_FILTER 0
+#define CONFIG_COLORSPECTRUM_FILTER 0
+#define CONFIG_COREIMAGESRC_FILTER 0
+#define CONFIG_FREI0R_SRC_FILTER 0
+#define CONFIG_GRADIENTS_FILTER 0
+#define CONFIG_HALDCLUTSRC_FILTER 0
+#define CONFIG_LIFE_FILTER 0
+#define CONFIG_MANDELBROT_FILTER 0
+#define CONFIG_MPTESTSRC_FILTER 0
+#define CONFIG_NULLSRC_FILTER 0
+#define CONFIG_OPENCLSRC_FILTER 0
+#define CONFIG_PAL75BARS_FILTER 0
+#define CONFIG_PAL100BARS_FILTER 0
+#define CONFIG_RGBTESTSRC_FILTER 0
+#define CONFIG_SIERPINSKI_FILTER 0
+#define CONFIG_SMPTEBARS_FILTER 0
+#define CONFIG_SMPTEHDBARS_FILTER 0
+#define CONFIG_TESTSRC_FILTER 0
+#define CONFIG_TESTSRC2_FILTER 0
+#define CONFIG_YUVTESTSRC_FILTER 0
+#define CONFIG_NULLSINK_FILTER 0
+#define CONFIG_ABITSCOPE_FILTER 0
+#define CONFIG_ADRAWGRAPH_FILTER 0
+#define CONFIG_AGRAPHMONITOR_FILTER 0
+#define CONFIG_AHISTOGRAM_FILTER 0
+#define CONFIG_APHASEMETER_FILTER 0
+#define CONFIG_AVECTORSCOPE_FILTER 0
+#define CONFIG_CONCAT_FILTER 0
+#define CONFIG_SHOWCQT_FILTER 0
+#define CONFIG_SHOWFREQS_FILTER 0
+#define CONFIG_SHOWSPATIAL_FILTER 0
+#define CONFIG_SHOWSPECTRUM_FILTER 0
+#define CONFIG_SHOWSPECTRUMPIC_FILTER 0
+#define CONFIG_SHOWVOLUME_FILTER 0
+#define CONFIG_SHOWWAVES_FILTER 0
+#define CONFIG_SHOWWAVESPIC_FILTER 0
+#define CONFIG_SPECTRUMSYNTH_FILTER 0
+#define CONFIG_AVSYNCTEST_FILTER 0
+#define CONFIG_AMOVIE_FILTER 0
+#define CONFIG_MOVIE_FILTER 0
+#define CONFIG_AFIFO_FILTER 0
+#define CONFIG_FIFO_FILTER 0
+#define CONFIG_AA_DEMUXER 0
+#define CONFIG_AAC_DEMUXER 0
+#define CONFIG_AAX_DEMUXER 0
+#define CONFIG_AC3_DEMUXER 0
+#define CONFIG_ACE_DEMUXER 0
+#define CONFIG_ACM_DEMUXER 0
+#define CONFIG_ACT_DEMUXER 0
+#define CONFIG_ADF_DEMUXER 0
+#define CONFIG_ADP_DEMUXER 0
+#define CONFIG_ADS_DEMUXER 0
+#define CONFIG_ADX_DEMUXER 0
+#define CONFIG_AEA_DEMUXER 0
+#define CONFIG_AFC_DEMUXER 0
+#define CONFIG_AIFF_DEMUXER 0
+#define CONFIG_AIX_DEMUXER 0
+#define CONFIG_ALP_DEMUXER 0
+#define CONFIG_AMR_DEMUXER 0
+#define CONFIG_AMRNB_DEMUXER 0
+#define CONFIG_AMRWB_DEMUXER 0
+#define CONFIG_ANM_DEMUXER 0
+#define CONFIG_APC_DEMUXER 0
+#define CONFIG_APE_DEMUXER 0
+#define CONFIG_APM_DEMUXER 0
+#define CONFIG_APNG_DEMUXER 0
+#define CONFIG_APTX_DEMUXER 0
+#define CONFIG_APTX_HD_DEMUXER 0
+#define CONFIG_AQTITLE_DEMUXER 0
+#define CONFIG_ARGO_ASF_DEMUXER 0
+#define CONFIG_ARGO_BRP_DEMUXER 0
+#define CONFIG_ARGO_CVG_DEMUXER 0
+#define CONFIG_ASF_DEMUXER 0
+#define CONFIG_ASF_O_DEMUXER 0
+#define CONFIG_ASS_DEMUXER 0
+#define CONFIG_AST_DEMUXER 0
+#define CONFIG_AU_DEMUXER 0
+#define CONFIG_AV1_DEMUXER 0
+#define CONFIG_AVI_DEMUXER 0
+#define CONFIG_AVISYNTH_DEMUXER 0
+#define CONFIG_AVR_DEMUXER 0
+#define CONFIG_AVS_DEMUXER 0
+#define CONFIG_AVS2_DEMUXER 0
+#define CONFIG_AVS3_DEMUXER 0
+#define CONFIG_BETHSOFTVID_DEMUXER 0
+#define CONFIG_BFI_DEMUXER 0
+#define CONFIG_BINTEXT_DEMUXER 0
+#define CONFIG_BINK_DEMUXER 0
+#define CONFIG_BINKA_DEMUXER 0
+#define CONFIG_BIT_DEMUXER 0
+#define CONFIG_BITPACKED_DEMUXER 0
+#define CONFIG_BMV_DEMUXER 0
+#define CONFIG_BFSTM_DEMUXER 0
+#define CONFIG_BRSTM_DEMUXER 0
+#define CONFIG_BOA_DEMUXER 0
+#define CONFIG_C93_DEMUXER 0
+#define CONFIG_CAF_DEMUXER 0
+#define CONFIG_CAVSVIDEO_DEMUXER 0
+#define CONFIG_CDG_DEMUXER 0
+#define CONFIG_CDXL_DEMUXER 0
+#define CONFIG_CINE_DEMUXER 0
+#define CONFIG_CODEC2_DEMUXER 0
+#define CONFIG_CODEC2RAW_DEMUXER 0
+#define CONFIG_CONCAT_DEMUXER 0
+#define CONFIG_DASH_DEMUXER 0
+#define CONFIG_DATA_DEMUXER 0
+#define CONFIG_DAUD_DEMUXER 0
+#define CONFIG_DCSTR_DEMUXER 0
+#define CONFIG_DERF_DEMUXER 0
+#define CONFIG_DFA_DEMUXER 0
+#define CONFIG_DFPWM_DEMUXER 0
+#define CONFIG_DHAV_DEMUXER 0
+#define CONFIG_DIRAC_DEMUXER 0
+#define CONFIG_DNXHD_DEMUXER 0
+#define CONFIG_DSF_DEMUXER 0
+#define CONFIG_DSICIN_DEMUXER 0
+#define CONFIG_DSS_DEMUXER 0
+#define CONFIG_DTS_DEMUXER 0
+#define CONFIG_DTSHD_DEMUXER 0
+#define CONFIG_DV_DEMUXER 0
+#define CONFIG_DVBSUB_DEMUXER 0
+#define CONFIG_DVBTXT_DEMUXER 0
+#define CONFIG_DXA_DEMUXER 0
+#define CONFIG_EA_DEMUXER 0
+#define CONFIG_EA_CDATA_DEMUXER 0
+#define CONFIG_EAC3_DEMUXER 0
+#define CONFIG_EPAF_DEMUXER 0
+#define CONFIG_FFMETADATA_DEMUXER 0
+#define CONFIG_FILMSTRIP_DEMUXER 0
+#define CONFIG_FITS_DEMUXER 0
+#define CONFIG_FLAC_DEMUXER 0
+#define CONFIG_FLIC_DEMUXER 0
+#define CONFIG_FLV_DEMUXER 0
+#define CONFIG_LIVE_FLV_DEMUXER 0
+#define CONFIG_FOURXM_DEMUXER 0
+#define CONFIG_FRM_DEMUXER 0
+#define CONFIG_FSB_DEMUXER 0
+#define CONFIG_FWSE_DEMUXER 0
+#define CONFIG_G722_DEMUXER 0
+#define CONFIG_G723_1_DEMUXER 0
+#define CONFIG_G726_DEMUXER 0
+#define CONFIG_G726LE_DEMUXER 0
+#define CONFIG_G729_DEMUXER 0
+#define CONFIG_GDV_DEMUXER 0
+#define CONFIG_GENH_DEMUXER 0
+#define CONFIG_GIF_DEMUXER 0
+#define CONFIG_GSM_DEMUXER 0
+#define CONFIG_GXF_DEMUXER 0
+#define CONFIG_H261_DEMUXER 0
+#define CONFIG_H263_DEMUXER 0
+#define CONFIG_H264_DEMUXER 0
+#define CONFIG_HCA_DEMUXER 0
+#define CONFIG_HCOM_DEMUXER 0
+#define CONFIG_HEVC_DEMUXER 0
+#define CONFIG_HLS_DEMUXER 0
+#define CONFIG_HNM_DEMUXER 0
+#define CONFIG_ICO_DEMUXER 0
+#define CONFIG_IDCIN_DEMUXER 0
+#define CONFIG_IDF_DEMUXER 0
+#define CONFIG_IFF_DEMUXER 0
+#define CONFIG_IFV_DEMUXER 0
+#define CONFIG_ILBC_DEMUXER 0
+#define CONFIG_IMAGE2_DEMUXER 0
+#define CONFIG_IMAGE2PIPE_DEMUXER 0
+#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0
+#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0
+#define CONFIG_IMF_DEMUXER 0
+#define CONFIG_INGENIENT_DEMUXER 0
+#define CONFIG_IPMOVIE_DEMUXER 0
+#define CONFIG_IPU_DEMUXER 0
+#define CONFIG_IRCAM_DEMUXER 0
+#define CONFIG_ISS_DEMUXER 0
+#define CONFIG_IV8_DEMUXER 0
+#define CONFIG_IVF_DEMUXER 0
+#define CONFIG_IVR_DEMUXER 0
+#define CONFIG_JACOSUB_DEMUXER 0
+#define CONFIG_JV_DEMUXER 0
+#define CONFIG_KUX_DEMUXER 0
+#define CONFIG_KVAG_DEMUXER 0
+#define CONFIG_LMLM4_DEMUXER 0
+#define CONFIG_LOAS_DEMUXER 0
+#define CONFIG_LUODAT_DEMUXER 0
+#define CONFIG_LRC_DEMUXER 0
+#define CONFIG_LVF_DEMUXER 0
+#define CONFIG_LXF_DEMUXER 0
+#define CONFIG_M4V_DEMUXER 0
+#define CONFIG_MCA_DEMUXER 0
+#define CONFIG_MCC_DEMUXER 0
+#define CONFIG_MATROSKA_DEMUXER 0
+#define CONFIG_MGSTS_DEMUXER 0
+#define CONFIG_MICRODVD_DEMUXER 0
+#define CONFIG_MJPEG_DEMUXER 0
+#define CONFIG_MJPEG_2000_DEMUXER 0
+#define CONFIG_MLP_DEMUXER 0
+#define CONFIG_MLV_DEMUXER 0
+#define CONFIG_MM_DEMUXER 0
+#define CONFIG_MMF_DEMUXER 0
+#define CONFIG_MODS_DEMUXER 0
+#define CONFIG_MOFLEX_DEMUXER 0
+#define CONFIG_MOV_DEMUXER 0
+#define CONFIG_MP3_DEMUXER 0
+#define CONFIG_MPC_DEMUXER 0
+#define CONFIG_MPC8_DEMUXER 0
+#define CONFIG_MPEGPS_DEMUXER 0
+#define CONFIG_MPEGTS_DEMUXER 0
+#define CONFIG_MPEGTSRAW_DEMUXER 0
+#define CONFIG_MPEGVIDEO_DEMUXER 0
+#define CONFIG_MPJPEG_DEMUXER 0
+#define CONFIG_MPL2_DEMUXER 0
+#define CONFIG_MPSUB_DEMUXER 0
+#define CONFIG_MSF_DEMUXER 0
+#define CONFIG_MSNWC_TCP_DEMUXER 0
+#define CONFIG_MSP_DEMUXER 0
+#define CONFIG_MTAF_DEMUXER 0
+#define CONFIG_MTV_DEMUXER 0
+#define CONFIG_MUSX_DEMUXER 0
+#define CONFIG_MV_DEMUXER 0
+#define CONFIG_MVI_DEMUXER 0
+#define CONFIG_MXF_DEMUXER 0
+#define CONFIG_MXG_DEMUXER 0
+#define CONFIG_NC_DEMUXER 0
+#define CONFIG_NISTSPHERE_DEMUXER 0
+#define CONFIG_NSP_DEMUXER 0
+#define CONFIG_NSV_DEMUXER 0
+#define CONFIG_NUT_DEMUXER 0
+#define CONFIG_NUV_DEMUXER 0
+#define CONFIG_OBU_DEMUXER 0
+#define CONFIG_OGG_DEMUXER 0
+#define CONFIG_OMA_DEMUXER 0
+#define CONFIG_PAF_DEMUXER 0
+#define CONFIG_PCM_ALAW_DEMUXER 0
+#define CONFIG_PCM_MULAW_DEMUXER 0
+#define CONFIG_PCM_VIDC_DEMUXER 0
+#define CONFIG_PCM_F64BE_DEMUXER 0
+#define CONFIG_PCM_F64LE_DEMUXER 0
+#define CONFIG_PCM_F32BE_DEMUXER 0
+#define CONFIG_PCM_F32LE_DEMUXER 0
+#define CONFIG_PCM_S32BE_DEMUXER 0
+#define CONFIG_PCM_S32LE_DEMUXER 0
+#define CONFIG_PCM_S24BE_DEMUXER 0
+#define CONFIG_PCM_S24LE_DEMUXER 0
+#define CONFIG_PCM_S16BE_DEMUXER 0
+#define CONFIG_PCM_S16LE_DEMUXER 0
+#define CONFIG_PCM_S8_DEMUXER 0
+#define CONFIG_PCM_U32BE_DEMUXER 0
+#define CONFIG_PCM_U32LE_DEMUXER 0
+#define CONFIG_PCM_U24BE_DEMUXER 0
+#define CONFIG_PCM_U24LE_DEMUXER 0
+#define CONFIG_PCM_U16BE_DEMUXER 0
+#define CONFIG_PCM_U16LE_DEMUXER 0
+#define CONFIG_PCM_U8_DEMUXER 0
+#define CONFIG_PJS_DEMUXER 0
+#define CONFIG_PMP_DEMUXER 0
+#define CONFIG_PP_BNK_DEMUXER 0
+#define CONFIG_PVA_DEMUXER 0
+#define CONFIG_PVF_DEMUXER 0
+#define CONFIG_QCP_DEMUXER 0
+#define CONFIG_R3D_DEMUXER 0
+#define CONFIG_RAWVIDEO_DEMUXER 0
+#define CONFIG_REALTEXT_DEMUXER 0
+#define CONFIG_REDSPARK_DEMUXER 0
+#define CONFIG_RL2_DEMUXER 0
+#define CONFIG_RM_DEMUXER 0
+#define CONFIG_ROQ_DEMUXER 0
+#define CONFIG_RPL_DEMUXER 0
+#define CONFIG_RSD_DEMUXER 0
+#define CONFIG_RSO_DEMUXER 0
+#define CONFIG_RTP_DEMUXER 0
+#define CONFIG_RTSP_DEMUXER 0
+#define CONFIG_S337M_DEMUXER 0
+#define CONFIG_SAMI_DEMUXER 0
+#define CONFIG_SAP_DEMUXER 0
+#define CONFIG_SBC_DEMUXER 0
+#define CONFIG_SBG_DEMUXER 0
+#define CONFIG_SCC_DEMUXER 0
+#define CONFIG_SCD_DEMUXER 0
+#define CONFIG_SDP_DEMUXER 0
+#define CONFIG_SDR2_DEMUXER 0
+#define CONFIG_SDS_DEMUXER 0
+#define CONFIG_SDX_DEMUXER 0
+#define CONFIG_SEGAFILM_DEMUXER 0
+#define CONFIG_SER_DEMUXER 0
+#define CONFIG_SGA_DEMUXER 0
+#define CONFIG_SHORTEN_DEMUXER 0
+#define CONFIG_SIFF_DEMUXER 0
+#define CONFIG_SIMBIOSIS_IMX_DEMUXER 0
+#define CONFIG_SLN_DEMUXER 0
+#define CONFIG_SMACKER_DEMUXER 0
+#define CONFIG_SMJPEG_DEMUXER 0
+#define CONFIG_SMUSH_DEMUXER 0
+#define CONFIG_SOL_DEMUXER 0
+#define CONFIG_SOX_DEMUXER 0
+#define CONFIG_SPDIF_DEMUXER 0
+#define CONFIG_SRT_DEMUXER 0
+#define CONFIG_STR_DEMUXER 0
+#define CONFIG_STL_DEMUXER 0
+#define CONFIG_SUBVIEWER1_DEMUXER 0
+#define CONFIG_SUBVIEWER_DEMUXER 0
+#define CONFIG_SUP_DEMUXER 0
+#define CONFIG_SVAG_DEMUXER 0
+#define CONFIG_SVS_DEMUXER 0
+#define CONFIG_SWF_DEMUXER 0
+#define CONFIG_TAK_DEMUXER 0
+#define CONFIG_TEDCAPTIONS_DEMUXER 0
+#define CONFIG_THP_DEMUXER 0
+#define CONFIG_THREEDOSTR_DEMUXER 0
+#define CONFIG_TIERTEXSEQ_DEMUXER 0
+#define CONFIG_TMV_DEMUXER 0
+#define CONFIG_TRUEHD_DEMUXER 0
+#define CONFIG_TTA_DEMUXER 0
+#define CONFIG_TXD_DEMUXER 0
+#define CONFIG_TTY_DEMUXER 0
+#define CONFIG_TY_DEMUXER 0
+#define CONFIG_V210_DEMUXER 0
+#define CONFIG_V210X_DEMUXER 0
+#define CONFIG_VAG_DEMUXER 0
+#define CONFIG_VC1_DEMUXER 0
+#define CONFIG_VC1T_DEMUXER 0
+#define CONFIG_VIVIDAS_DEMUXER 0
+#define CONFIG_VIVO_DEMUXER 0
+#define CONFIG_VMD_DEMUXER 0
+#define CONFIG_VOBSUB_DEMUXER 0
+#define CONFIG_VOC_DEMUXER 0
+#define CONFIG_VPK_DEMUXER 0
+#define CONFIG_VPLAYER_DEMUXER 0
+#define CONFIG_VQF_DEMUXER 0
+#define CONFIG_W64_DEMUXER 0
+#define CONFIG_WAV_DEMUXER 0
+#define CONFIG_WC3_DEMUXER 0
+#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0
+#define CONFIG_WEBVTT_DEMUXER 0
+#define CONFIG_WSAUD_DEMUXER 0
+#define CONFIG_WSD_DEMUXER 0
+#define CONFIG_WSVQA_DEMUXER 0
+#define CONFIG_WTV_DEMUXER 0
+#define CONFIG_WVE_DEMUXER 0
+#define CONFIG_WV_DEMUXER 0
+#define CONFIG_XA_DEMUXER 0
+#define CONFIG_XBIN_DEMUXER 0
+#define CONFIG_XMV_DEMUXER 0
+#define CONFIG_XVAG_DEMUXER 0
+#define CONFIG_XWMA_DEMUXER 0
+#define CONFIG_YOP_DEMUXER 0
+#define CONFIG_YUV4MPEGPIPE_DEMUXER 0
+#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_CRI_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_GEM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_JPEGXL_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PGX_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PHOTOCD_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_QOI_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_SVG_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_VBN_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_XBM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_XWD_PIPE_DEMUXER 0
+#define CONFIG_LIBGME_DEMUXER 0
+#define CONFIG_LIBMODPLUG_DEMUXER 0
+#define CONFIG_LIBOPENMPT_DEMUXER 0
+#define CONFIG_VAPOURSYNTH_DEMUXER 0
+#define CONFIG_A64_MUXER 0
+#define CONFIG_AC3_MUXER 0
+#define CONFIG_ADTS_MUXER 0
+#define CONFIG_ADX_MUXER 0
+#define CONFIG_AIFF_MUXER 0
+#define CONFIG_ALP_MUXER 0
+#define CONFIG_AMR_MUXER 0
+#define CONFIG_AMV_MUXER 0
+#define CONFIG_APM_MUXER 0
+#define CONFIG_APNG_MUXER 0
+#define CONFIG_APTX_MUXER 0
+#define CONFIG_APTX_HD_MUXER 0
+#define CONFIG_ARGO_ASF_MUXER 0
+#define CONFIG_ARGO_CVG_MUXER 0
+#define CONFIG_ASF_MUXER 0
+#define CONFIG_ASS_MUXER 0
+#define CONFIG_AST_MUXER 0
+#define CONFIG_ASF_STREAM_MUXER 0
+#define CONFIG_AU_MUXER 0
+#define CONFIG_AVI_MUXER 0
+#define CONFIG_AVIF_MUXER 0
+#define CONFIG_AVM2_MUXER 0
+#define CONFIG_AVS2_MUXER 0
+#define CONFIG_AVS3_MUXER 0
+#define CONFIG_BIT_MUXER 0
+#define CONFIG_CAF_MUXER 0
+#define CONFIG_CAVSVIDEO_MUXER 0
+#define CONFIG_CODEC2_MUXER 0
+#define CONFIG_CODEC2RAW_MUXER 0
+#define CONFIG_CRC_MUXER 0
+#define CONFIG_DASH_MUXER 0
+#define CONFIG_DATA_MUXER 0
+#define CONFIG_DAUD_MUXER 0
+#define CONFIG_DFPWM_MUXER 0
+#define CONFIG_DIRAC_MUXER 0
+#define CONFIG_DNXHD_MUXER 0
+#define CONFIG_DTS_MUXER 0
+#define CONFIG_DV_MUXER 0
+#define CONFIG_EAC3_MUXER 0
+#define CONFIG_F4V_MUXER 0
+#define CONFIG_FFMETADATA_MUXER 0
+#define CONFIG_FIFO_MUXER 0
+#define CONFIG_FIFO_TEST_MUXER 0
+#define CONFIG_FILMSTRIP_MUXER 0
+#define CONFIG_FITS_MUXER 0
+#define CONFIG_FLAC_MUXER 0
+#define CONFIG_FLV_MUXER 0
+#define CONFIG_FRAMECRC_MUXER 0
+#define CONFIG_FRAMEHASH_MUXER 0
+#define CONFIG_FRAMEMD5_MUXER 0
+#define CONFIG_G722_MUXER 0
+#define CONFIG_G723_1_MUXER 0
+#define CONFIG_G726_MUXER 0
+#define CONFIG_G726LE_MUXER 0
+#define CONFIG_GIF_MUXER 0
+#define CONFIG_GSM_MUXER 0
+#define CONFIG_GXF_MUXER 0
+#define CONFIG_H261_MUXER 0
+#define CONFIG_H263_MUXER 0
+#define CONFIG_H264_MUXER 0
+#define CONFIG_HASH_MUXER 0
+#define CONFIG_HDS_MUXER 0
+#define CONFIG_HEVC_MUXER 0
+#define CONFIG_HLS_MUXER 0
+#define CONFIG_ICO_MUXER 0
+#define CONFIG_ILBC_MUXER 0
+#define CONFIG_IMAGE2_MUXER 0
+#define CONFIG_IMAGE2PIPE_MUXER 0
+#define CONFIG_IPOD_MUXER 0
+#define CONFIG_IRCAM_MUXER 0
+#define CONFIG_ISMV_MUXER 0
+#define CONFIG_IVF_MUXER 0
+#define CONFIG_JACOSUB_MUXER 0
+#define CONFIG_KVAG_MUXER 0
+#define CONFIG_LATM_MUXER 0
+#define CONFIG_LRC_MUXER 0
+#define CONFIG_M4V_MUXER 0
+#define CONFIG_MD5_MUXER 0
+#define CONFIG_MATROSKA_MUXER 0
+#define CONFIG_MATROSKA_AUDIO_MUXER 0
+#define CONFIG_MICRODVD_MUXER 0
+#define CONFIG_MJPEG_MUXER 0
+#define CONFIG_MLP_MUXER 0
+#define CONFIG_MMF_MUXER 0
+#define CONFIG_MOV_MUXER 0
+#define CONFIG_MP2_MUXER 0
+#define CONFIG_MP3_MUXER 0
+#define CONFIG_MP4_MUXER 0
+#define CONFIG_MPEG1SYSTEM_MUXER 0
+#define CONFIG_MPEG1VCD_MUXER 0
+#define CONFIG_MPEG1VIDEO_MUXER 0
+#define CONFIG_MPEG2DVD_MUXER 0
+#define CONFIG_MPEG2SVCD_MUXER 0
+#define CONFIG_MPEG2VIDEO_MUXER 0
+#define CONFIG_MPEG2VOB_MUXER 0
+#define CONFIG_MPEGTS_MUXER 0
+#define CONFIG_MPJPEG_MUXER 0
+#define CONFIG_MXF_MUXER 0
+#define CONFIG_MXF_D10_MUXER 0
+#define CONFIG_MXF_OPATOM_MUXER 0
+#define CONFIG_NULL_MUXER 0
+#define CONFIG_NUT_MUXER 0
+#define CONFIG_OBU_MUXER 0
+#define CONFIG_OGA_MUXER 0
+#define CONFIG_OGG_MUXER 0
+#define CONFIG_OGV_MUXER 0
+#define CONFIG_OMA_MUXER 0
+#define CONFIG_OPUS_MUXER 0
+#define CONFIG_PCM_ALAW_MUXER 0
+#define CONFIG_PCM_MULAW_MUXER 0
+#define CONFIG_PCM_VIDC_MUXER 0
+#define CONFIG_PCM_F64BE_MUXER 0
+#define CONFIG_PCM_F64LE_MUXER 0
+#define CONFIG_PCM_F32BE_MUXER 0
+#define CONFIG_PCM_F32LE_MUXER 0
+#define CONFIG_PCM_S32BE_MUXER 0
+#define CONFIG_PCM_S32LE_MUXER 0
+#define CONFIG_PCM_S24BE_MUXER 0
+#define CONFIG_PCM_S24LE_MUXER 0
+#define CONFIG_PCM_S16BE_MUXER 0
+#define CONFIG_PCM_S16LE_MUXER 0
+#define CONFIG_PCM_S8_MUXER 0
+#define CONFIG_PCM_U32BE_MUXER 0
+#define CONFIG_PCM_U32LE_MUXER 0
+#define CONFIG_PCM_U24BE_MUXER 0
+#define CONFIG_PCM_U24LE_MUXER 0
+#define CONFIG_PCM_U16BE_MUXER 0
+#define CONFIG_PCM_U16LE_MUXER 0
+#define CONFIG_PCM_U8_MUXER 0
+#define CONFIG_PSP_MUXER 0
+#define CONFIG_RAWVIDEO_MUXER 0
+#define CONFIG_RM_MUXER 0
+#define CONFIG_ROQ_MUXER 0
+#define CONFIG_RSO_MUXER 0
+#define CONFIG_RTP_MUXER 0
+#define CONFIG_RTP_MPEGTS_MUXER 0
+#define CONFIG_RTSP_MUXER 0
+#define CONFIG_SAP_MUXER 0
+#define CONFIG_SBC_MUXER 0
+#define CONFIG_SCC_MUXER 0
+#define CONFIG_SEGAFILM_MUXER 0
+#define CONFIG_SEGMENT_MUXER 0
+#define CONFIG_STREAM_SEGMENT_MUXER 0
+#define CONFIG_SMJPEG_MUXER 0
+#define CONFIG_SMOOTHSTREAMING_MUXER 0
+#define CONFIG_SOX_MUXER 0
+#define CONFIG_SPX_MUXER 0
+#define CONFIG_SPDIF_MUXER 0
+#define CONFIG_SRT_MUXER 0
+#define CONFIG_STREAMHASH_MUXER 0
+#define CONFIG_SUP_MUXER 0
+#define CONFIG_SWF_MUXER 0
+#define CONFIG_TEE_MUXER 0
+#define CONFIG_TG2_MUXER 0
+#define CONFIG_TGP_MUXER 0
+#define CONFIG_MKVTIMESTAMP_V2_MUXER 0
+#define CONFIG_TRUEHD_MUXER 0
+#define CONFIG_TTA_MUXER 0
+#define CONFIG_TTML_MUXER 0
+#define CONFIG_UNCODEDFRAMECRC_MUXER 0
+#define CONFIG_VC1_MUXER 0
+#define CONFIG_VC1T_MUXER 0
+#define CONFIG_VOC_MUXER 0
+#define CONFIG_W64_MUXER 0
+#define CONFIG_WAV_MUXER 0
+#define CONFIG_WEBM_MUXER 0
+#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0
+#define CONFIG_WEBM_CHUNK_MUXER 0
+#define CONFIG_WEBP_MUXER 0
+#define CONFIG_WEBVTT_MUXER 0
+#define CONFIG_WSAUD_MUXER 0
+#define CONFIG_WTV_MUXER 0
+#define CONFIG_WV_MUXER 0
+#define CONFIG_YUV4MPEGPIPE_MUXER 0
+#define CONFIG_CHROMAPRINT_MUXER 0
+#define CONFIG_ASYNC_PROTOCOL 0
+#define CONFIG_BLURAY_PROTOCOL 0
+#define CONFIG_CACHE_PROTOCOL 0
+#define CONFIG_CONCAT_PROTOCOL 0
+#define CONFIG_CONCATF_PROTOCOL 0
+#define CONFIG_CRYPTO_PROTOCOL 0
+#define CONFIG_DATA_PROTOCOL 0
+#define CONFIG_FFRTMPCRYPT_PROTOCOL 0
+#define CONFIG_FFRTMPHTTP_PROTOCOL 0
+#define CONFIG_FILE_PROTOCOL 0
+#define CONFIG_FTP_PROTOCOL 0
+#define CONFIG_GOPHER_PROTOCOL 0
+#define CONFIG_GOPHERS_PROTOCOL 0
+#define CONFIG_HLS_PROTOCOL 0
+#define CONFIG_HTTP_PROTOCOL 0
+#define CONFIG_HTTPPROXY_PROTOCOL 0
+#define CONFIG_HTTPS_PROTOCOL 0
+#define CONFIG_ICECAST_PROTOCOL 0
+#define CONFIG_MMSH_PROTOCOL 0
+#define CONFIG_MMST_PROTOCOL 0
+#define CONFIG_MD5_PROTOCOL 0
+#define CONFIG_PIPE_PROTOCOL 0
+#define CONFIG_PROMPEG_PROTOCOL 0
+#define CONFIG_RTMP_PROTOCOL 0
+#define CONFIG_RTMPE_PROTOCOL 0
+#define CONFIG_RTMPS_PROTOCOL 0
+#define CONFIG_RTMPT_PROTOCOL 0
+#define CONFIG_RTMPTE_PROTOCOL 0
+#define CONFIG_RTMPTS_PROTOCOL 0
+#define CONFIG_RTP_PROTOCOL 0
+#define CONFIG_SCTP_PROTOCOL 0
+#define CONFIG_SRTP_PROTOCOL 0
+#define CONFIG_SUBFILE_PROTOCOL 0
+#define CONFIG_TEE_PROTOCOL 0
+#define CONFIG_TCP_PROTOCOL 0
+#define CONFIG_TLS_PROTOCOL 0
+#define CONFIG_UDP_PROTOCOL 0
+#define CONFIG_UDPLITE_PROTOCOL 0
+#define CONFIG_UNIX_PROTOCOL 0
+#define CONFIG_LIBAMQP_PROTOCOL 0
+#define CONFIG_LIBRIST_PROTOCOL 0
+#define CONFIG_LIBRTMP_PROTOCOL 0
+#define CONFIG_LIBRTMPE_PROTOCOL 0
+#define CONFIG_LIBRTMPS_PROTOCOL 0
+#define CONFIG_LIBRTMPT_PROTOCOL 0
+#define CONFIG_LIBRTMPTE_PROTOCOL 0
+#define CONFIG_LIBSRT_PROTOCOL 0
+#define CONFIG_LIBSSH_PROTOCOL 0
+#define CONFIG_LIBSMBCLIENT_PROTOCOL 0
+#define CONFIG_LIBZMQ_PROTOCOL 0
+#define CONFIG_IPFS_PROTOCOL 0
+#define CONFIG_IPNS_PROTOCOL 0
+#endif /* FFMPEG_CONFIG_COMPONENTS_H */
diff --git a/media/ffvpx/config_components_audio_video.h b/media/ffvpx/config_components_audio_video.h
new file mode 100644
index 0000000000..31547214e0
--- /dev/null
+++ b/media/ffvpx/config_components_audio_video.h
@@ -0,0 +1,2091 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_COMPONENTS_H
+#define FFMPEG_CONFIG_COMPONENTS_H
+#define CONFIG_AAC_ADTSTOASC_BSF 0
+#define CONFIG_AV1_FRAME_MERGE_BSF 0
+#define CONFIG_AV1_FRAME_SPLIT_BSF 0
+#define CONFIG_AV1_METADATA_BSF 0
+#define CONFIG_CHOMP_BSF 0
+#define CONFIG_DUMP_EXTRADATA_BSF 0
+#define CONFIG_DCA_CORE_BSF 0
+#define CONFIG_DV_ERROR_MARKER_BSF 0
+#define CONFIG_EAC3_CORE_BSF 0
+#define CONFIG_EXTRACT_EXTRADATA_BSF 0
+#define CONFIG_FILTER_UNITS_BSF 0
+#define CONFIG_H264_METADATA_BSF 0
+#define CONFIG_H264_MP4TOANNEXB_BSF 0
+#define CONFIG_H264_REDUNDANT_PPS_BSF 0
+#define CONFIG_HAPQA_EXTRACT_BSF 0
+#define CONFIG_HEVC_METADATA_BSF 0
+#define CONFIG_HEVC_MP4TOANNEXB_BSF 0
+#define CONFIG_IMX_DUMP_HEADER_BSF 0
+#define CONFIG_MJPEG2JPEG_BSF 0
+#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0
+#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0
+#define CONFIG_MPEG2_METADATA_BSF 0
+#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0
+#define CONFIG_MOV2TEXTSUB_BSF 0
+#define CONFIG_NOISE_BSF 0
+#define CONFIG_NULL_BSF 0
+#define CONFIG_OPUS_METADATA_BSF 0
+#define CONFIG_PCM_RECHUNK_BSF 0
+#define CONFIG_PGS_FRAME_MERGE_BSF 0
+#define CONFIG_PRORES_METADATA_BSF 0
+#define CONFIG_REMOVE_EXTRADATA_BSF 0
+#define CONFIG_SETTS_BSF 0
+#define CONFIG_TEXT2MOVSUB_BSF 0
+#define CONFIG_TRACE_HEADERS_BSF 0
+#define CONFIG_TRUEHD_CORE_BSF 0
+#define CONFIG_VP9_METADATA_BSF 0
+#define CONFIG_VP9_RAW_REORDER_BSF 0
+#define CONFIG_VP9_SUPERFRAME_BSF 0
+#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 1
+#define CONFIG_AASC_DECODER 0
+#define CONFIG_AIC_DECODER 0
+#define CONFIG_ALIAS_PIX_DECODER 0
+#define CONFIG_AGM_DECODER 0
+#define CONFIG_AMV_DECODER 0
+#define CONFIG_ANM_DECODER 0
+#define CONFIG_ANSI_DECODER 0
+#define CONFIG_APNG_DECODER 0
+#define CONFIG_ARBC_DECODER 0
+#define CONFIG_ARGO_DECODER 0
+#define CONFIG_ASV1_DECODER 0
+#define CONFIG_ASV2_DECODER 0
+#define CONFIG_AURA_DECODER 0
+#define CONFIG_AURA2_DECODER 0
+#define CONFIG_AVRP_DECODER 0
+#define CONFIG_AVRN_DECODER 0
+#define CONFIG_AVS_DECODER 0
+#define CONFIG_AVUI_DECODER 0
+#define CONFIG_AYUV_DECODER 0
+#define CONFIG_BETHSOFTVID_DECODER 0
+#define CONFIG_BFI_DECODER 0
+#define CONFIG_BINK_DECODER 0
+#define CONFIG_BITPACKED_DECODER 0
+#define CONFIG_BMP_DECODER 0
+#define CONFIG_BMV_VIDEO_DECODER 0
+#define CONFIG_BRENDER_PIX_DECODER 0
+#define CONFIG_C93_DECODER 0
+#define CONFIG_CAVS_DECODER 0
+#define CONFIG_CDGRAPHICS_DECODER 0
+#define CONFIG_CDTOONS_DECODER 0
+#define CONFIG_CDXL_DECODER 0
+#define CONFIG_CFHD_DECODER 0
+#define CONFIG_CINEPAK_DECODER 0
+#define CONFIG_CLEARVIDEO_DECODER 0
+#define CONFIG_CLJR_DECODER 0
+#define CONFIG_CLLC_DECODER 0
+#define CONFIG_COMFORTNOISE_DECODER 0
+#define CONFIG_CPIA_DECODER 0
+#define CONFIG_CRI_DECODER 0
+#define CONFIG_CSCD_DECODER 0
+#define CONFIG_CYUV_DECODER 0
+#define CONFIG_DDS_DECODER 0
+#define CONFIG_DFA_DECODER 0
+#define CONFIG_DIRAC_DECODER 0
+#define CONFIG_DNXHD_DECODER 0
+#define CONFIG_DPX_DECODER 0
+#define CONFIG_DSICINVIDEO_DECODER 0
+#define CONFIG_DVAUDIO_DECODER 0
+#define CONFIG_DVVIDEO_DECODER 0
+#define CONFIG_DXA_DECODER 0
+#define CONFIG_DXTORY_DECODER 0
+#define CONFIG_DXV_DECODER 0
+#define CONFIG_EACMV_DECODER 0
+#define CONFIG_EAMAD_DECODER 0
+#define CONFIG_EATGQ_DECODER 0
+#define CONFIG_EATGV_DECODER 0
+#define CONFIG_EATQI_DECODER 0
+#define CONFIG_EIGHTBPS_DECODER 0
+#define CONFIG_EIGHTSVX_EXP_DECODER 0
+#define CONFIG_EIGHTSVX_FIB_DECODER 0
+#define CONFIG_ESCAPE124_DECODER 0
+#define CONFIG_ESCAPE130_DECODER 0
+#define CONFIG_EXR_DECODER 0
+#define CONFIG_FFV1_DECODER 0
+#define CONFIG_FFVHUFF_DECODER 0
+#define CONFIG_FIC_DECODER 0
+#define CONFIG_FITS_DECODER 0
+#define CONFIG_FLASHSV_DECODER 0
+#define CONFIG_FLASHSV2_DECODER 0
+#define CONFIG_FLIC_DECODER 0
+#define CONFIG_FLV_DECODER 0
+#define CONFIG_FMVC_DECODER 0
+#define CONFIG_FOURXM_DECODER 0
+#define CONFIG_FRAPS_DECODER 0
+#define CONFIG_FRWU_DECODER 0
+#define CONFIG_G2M_DECODER 0
+#define CONFIG_GDV_DECODER 0
+#define CONFIG_GEM_DECODER 0
+#define CONFIG_GIF_DECODER 0
+#define CONFIG_H261_DECODER 0
+#define CONFIG_H263_DECODER 0
+#define CONFIG_H263I_DECODER 0
+#define CONFIG_H263P_DECODER 0
+#define CONFIG_H263_V4L2M2M_DECODER 0
+#define CONFIG_H264_DECODER 0
+#define CONFIG_H264_CRYSTALHD_DECODER 0
+#define CONFIG_H264_V4L2M2M_DECODER 0
+#define CONFIG_H264_MEDIACODEC_DECODER 0
+#define CONFIG_H264_MMAL_DECODER 0
+#define CONFIG_H264_QSV_DECODER 0
+#define CONFIG_H264_RKMPP_DECODER 0
+#define CONFIG_HAP_DECODER 0
+#define CONFIG_HEVC_DECODER 0
+#define CONFIG_HEVC_QSV_DECODER 0
+#define CONFIG_HEVC_RKMPP_DECODER 0
+#define CONFIG_HEVC_V4L2M2M_DECODER 0
+#define CONFIG_HNM4_VIDEO_DECODER 0
+#define CONFIG_HQ_HQA_DECODER 0
+#define CONFIG_HQX_DECODER 0
+#define CONFIG_HUFFYUV_DECODER 0
+#define CONFIG_HYMT_DECODER 0
+#define CONFIG_IDCIN_DECODER 0
+#define CONFIG_IFF_ILBM_DECODER 0
+#define CONFIG_IMM4_DECODER 0
+#define CONFIG_IMM5_DECODER 0
+#define CONFIG_INDEO2_DECODER 0
+#define CONFIG_INDEO3_DECODER 0
+#define CONFIG_INDEO4_DECODER 0
+#define CONFIG_INDEO5_DECODER 0
+#define CONFIG_INTERPLAY_VIDEO_DECODER 0
+#define CONFIG_IPU_DECODER 0
+#define CONFIG_JPEG2000_DECODER 0
+#define CONFIG_JPEGLS_DECODER 0
+#define CONFIG_JV_DECODER 0
+#define CONFIG_KGV1_DECODER 0
+#define CONFIG_KMVC_DECODER 0
+#define CONFIG_LAGARITH_DECODER 0
+#define CONFIG_LOCO_DECODER 0
+#define CONFIG_LSCR_DECODER 0
+#define CONFIG_M101_DECODER 0
+#define CONFIG_MAGICYUV_DECODER 0
+#define CONFIG_MDEC_DECODER 0
+#define CONFIG_MIMIC_DECODER 0
+#define CONFIG_MJPEG_DECODER 0
+#define CONFIG_MJPEGB_DECODER 0
+#define CONFIG_MMVIDEO_DECODER 0
+#define CONFIG_MOBICLIP_DECODER 0
+#define CONFIG_MOTIONPIXELS_DECODER 0
+#define CONFIG_MPEG1VIDEO_DECODER 0
+#define CONFIG_MPEG2VIDEO_DECODER 0
+#define CONFIG_MPEG4_DECODER 0
+#define CONFIG_MPEG4_CRYSTALHD_DECODER 0
+#define CONFIG_MPEG4_V4L2M2M_DECODER 0
+#define CONFIG_MPEG4_MMAL_DECODER 0
+#define CONFIG_MPEGVIDEO_DECODER 0
+#define CONFIG_MPEG1_V4L2M2M_DECODER 0
+#define CONFIG_MPEG2_MMAL_DECODER 0
+#define CONFIG_MPEG2_CRYSTALHD_DECODER 0
+#define CONFIG_MPEG2_V4L2M2M_DECODER 0
+#define CONFIG_MPEG2_QSV_DECODER 0
+#define CONFIG_MPEG2_MEDIACODEC_DECODER 0
+#define CONFIG_MSA1_DECODER 0
+#define CONFIG_MSCC_DECODER 0
+#define CONFIG_MSMPEG4V1_DECODER 0
+#define CONFIG_MSMPEG4V2_DECODER 0
+#define CONFIG_MSMPEG4V3_DECODER 0
+#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0
+#define CONFIG_MSP2_DECODER 0
+#define CONFIG_MSRLE_DECODER 0
+#define CONFIG_MSS1_DECODER 0
+#define CONFIG_MSS2_DECODER 0
+#define CONFIG_MSVIDEO1_DECODER 0
+#define CONFIG_MSZH_DECODER 0
+#define CONFIG_MTS2_DECODER 0
+#define CONFIG_MV30_DECODER 0
+#define CONFIG_MVC1_DECODER 0
+#define CONFIG_MVC2_DECODER 0
+#define CONFIG_MVDV_DECODER 0
+#define CONFIG_MVHA_DECODER 0
+#define CONFIG_MWSC_DECODER 0
+#define CONFIG_MXPEG_DECODER 0
+#define CONFIG_NOTCHLC_DECODER 0
+#define CONFIG_NUV_DECODER 0
+#define CONFIG_PAF_VIDEO_DECODER 0
+#define CONFIG_PAM_DECODER 0
+#define CONFIG_PBM_DECODER 0
+#define CONFIG_PCX_DECODER 0
+#define CONFIG_PFM_DECODER 0
+#define CONFIG_PGM_DECODER 0
+#define CONFIG_PGMYUV_DECODER 0
+#define CONFIG_PGX_DECODER 0
+#define CONFIG_PHOTOCD_DECODER 0
+#define CONFIG_PICTOR_DECODER 0
+#define CONFIG_PIXLET_DECODER 0
+#define CONFIG_PNG_DECODER 0
+#define CONFIG_PPM_DECODER 0
+#define CONFIG_PRORES_DECODER 0
+#define CONFIG_PROSUMER_DECODER 0
+#define CONFIG_PSD_DECODER 0
+#define CONFIG_PTX_DECODER 0
+#define CONFIG_QDRAW_DECODER 0
+#define CONFIG_QOI_DECODER 0
+#define CONFIG_QPEG_DECODER 0
+#define CONFIG_QTRLE_DECODER 0
+#define CONFIG_R10K_DECODER 0
+#define CONFIG_R210_DECODER 0
+#define CONFIG_RASC_DECODER 0
+#define CONFIG_RAWVIDEO_DECODER 0
+#define CONFIG_RL2_DECODER 0
+#define CONFIG_ROQ_DECODER 0
+#define CONFIG_RPZA_DECODER 0
+#define CONFIG_RSCC_DECODER 0
+#define CONFIG_RV10_DECODER 0
+#define CONFIG_RV20_DECODER 0
+#define CONFIG_RV30_DECODER 0
+#define CONFIG_RV40_DECODER 0
+#define CONFIG_S302M_DECODER 0
+#define CONFIG_SANM_DECODER 0
+#define CONFIG_SCPR_DECODER 0
+#define CONFIG_SCREENPRESSO_DECODER 0
+#define CONFIG_SGA_DECODER 0
+#define CONFIG_SGI_DECODER 0
+#define CONFIG_SGIRLE_DECODER 0
+#define CONFIG_SHEERVIDEO_DECODER 0
+#define CONFIG_SIMBIOSIS_IMX_DECODER 0
+#define CONFIG_SMACKER_DECODER 0
+#define CONFIG_SMC_DECODER 0
+#define CONFIG_SMVJPEG_DECODER 0
+#define CONFIG_SNOW_DECODER 0
+#define CONFIG_SP5X_DECODER 0
+#define CONFIG_SPEEDHQ_DECODER 0
+#define CONFIG_SPEEX_DECODER 0
+#define CONFIG_SRGC_DECODER 0
+#define CONFIG_SUNRAST_DECODER 0
+#define CONFIG_SVQ1_DECODER 0
+#define CONFIG_SVQ3_DECODER 0
+#define CONFIG_TARGA_DECODER 0
+#define CONFIG_TARGA_Y216_DECODER 0
+#define CONFIG_TDSC_DECODER 0
+#define CONFIG_THEORA_DECODER 0
+#define CONFIG_THP_DECODER 0
+#define CONFIG_TIERTEXSEQVIDEO_DECODER 0
+#define CONFIG_TIFF_DECODER 0
+#define CONFIG_TMV_DECODER 0
+#define CONFIG_TRUEMOTION1_DECODER 0
+#define CONFIG_TRUEMOTION2_DECODER 0
+#define CONFIG_TRUEMOTION2RT_DECODER 0
+#define CONFIG_TSCC_DECODER 0
+#define CONFIG_TSCC2_DECODER 0
+#define CONFIG_TXD_DECODER 0
+#define CONFIG_ULTI_DECODER 0
+#define CONFIG_UTVIDEO_DECODER 0
+#define CONFIG_V210_DECODER 0
+#define CONFIG_V210X_DECODER 0
+#define CONFIG_V308_DECODER 0
+#define CONFIG_V408_DECODER 0
+#define CONFIG_V410_DECODER 0
+#define CONFIG_VB_DECODER 0
+#define CONFIG_VBN_DECODER 0
+#define CONFIG_VBLE_DECODER 0
+#define CONFIG_VC1_DECODER 0
+#define CONFIG_VC1_CRYSTALHD_DECODER 0
+#define CONFIG_VC1IMAGE_DECODER 0
+#define CONFIG_VC1_MMAL_DECODER 0
+#define CONFIG_VC1_QSV_DECODER 0
+#define CONFIG_VC1_V4L2M2M_DECODER 0
+#define CONFIG_VCR1_DECODER 0
+#define CONFIG_VMDVIDEO_DECODER 0
+#define CONFIG_VMNC_DECODER 0
+#define CONFIG_VP3_DECODER 0
+#define CONFIG_VP4_DECODER 0
+#define CONFIG_VP5_DECODER 0
+#define CONFIG_VP6_DECODER 0
+#define CONFIG_VP6A_DECODER 0
+#define CONFIG_VP6F_DECODER 0
+#define CONFIG_VP7_DECODER 0
+#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP8_RKMPP_DECODER 0
+#define CONFIG_VP8_V4L2M2M_DECODER 0
+#define CONFIG_VP9_DECODER 1
+#define CONFIG_VP9_RKMPP_DECODER 0
+#define CONFIG_VP9_V4L2M2M_DECODER 0
+#define CONFIG_VQA_DECODER 0
+#define CONFIG_WEBP_DECODER 0
+#define CONFIG_WCMV_DECODER 0
+#define CONFIG_WRAPPED_AVFRAME_DECODER 0
+#define CONFIG_WMV1_DECODER 0
+#define CONFIG_WMV2_DECODER 0
+#define CONFIG_WMV3_DECODER 0
+#define CONFIG_WMV3_CRYSTALHD_DECODER 0
+#define CONFIG_WMV3IMAGE_DECODER 0
+#define CONFIG_WNV1_DECODER 0
+#define CONFIG_XAN_WC3_DECODER 0
+#define CONFIG_XAN_WC4_DECODER 0
+#define CONFIG_XBM_DECODER 0
+#define CONFIG_XFACE_DECODER 0
+#define CONFIG_XL_DECODER 0
+#define CONFIG_XPM_DECODER 0
+#define CONFIG_XWD_DECODER 0
+#define CONFIG_Y41P_DECODER 0
+#define CONFIG_YLC_DECODER 0
+#define CONFIG_YOP_DECODER 0
+#define CONFIG_YUV4_DECODER 0
+#define CONFIG_ZERO12V_DECODER 0
+#define CONFIG_ZEROCODEC_DECODER 0
+#define CONFIG_ZLIB_DECODER 0
+#define CONFIG_ZMBV_DECODER 0
+#define CONFIG_AAC_DECODER 0
+#define CONFIG_AAC_FIXED_DECODER 0
+#define CONFIG_AAC_LATM_DECODER 0
+#define CONFIG_AC3_DECODER 0
+#define CONFIG_AC3_FIXED_DECODER 0
+#define CONFIG_ACELP_KELVIN_DECODER 0
+#define CONFIG_ALAC_DECODER 0
+#define CONFIG_ALS_DECODER 0
+#define CONFIG_AMRNB_DECODER 0
+#define CONFIG_AMRWB_DECODER 0
+#define CONFIG_APE_DECODER 0
+#define CONFIG_APTX_DECODER 0
+#define CONFIG_APTX_HD_DECODER 0
+#define CONFIG_ATRAC1_DECODER 0
+#define CONFIG_ATRAC3_DECODER 0
+#define CONFIG_ATRAC3AL_DECODER 0
+#define CONFIG_ATRAC3P_DECODER 0
+#define CONFIG_ATRAC3PAL_DECODER 0
+#define CONFIG_ATRAC9_DECODER 0
+#define CONFIG_BINKAUDIO_DCT_DECODER 0
+#define CONFIG_BINKAUDIO_RDFT_DECODER 0
+#define CONFIG_BMV_AUDIO_DECODER 0
+#define CONFIG_COOK_DECODER 0
+#define CONFIG_DCA_DECODER 0
+#define CONFIG_DFPWM_DECODER 0
+#define CONFIG_DOLBY_E_DECODER 0
+#define CONFIG_DSD_LSBF_DECODER 0
+#define CONFIG_DSD_MSBF_DECODER 0
+#define CONFIG_DSD_LSBF_PLANAR_DECODER 0
+#define CONFIG_DSD_MSBF_PLANAR_DECODER 0
+#define CONFIG_DSICINAUDIO_DECODER 0
+#define CONFIG_DSS_SP_DECODER 0
+#define CONFIG_DST_DECODER 0
+#define CONFIG_EAC3_DECODER 0
+#define CONFIG_EVRC_DECODER 0
+#define CONFIG_FASTAUDIO_DECODER 0
+#define CONFIG_FFWAVESYNTH_DECODER 0
+#define CONFIG_FLAC_DECODER 1
+#define CONFIG_G723_1_DECODER 0
+#define CONFIG_G729_DECODER 0
+#define CONFIG_GSM_DECODER 0
+#define CONFIG_GSM_MS_DECODER 0
+#define CONFIG_HCA_DECODER 0
+#define CONFIG_HCOM_DECODER 0
+#define CONFIG_IAC_DECODER 0
+#define CONFIG_ILBC_DECODER 0
+#define CONFIG_IMC_DECODER 0
+#define CONFIG_INTERPLAY_ACM_DECODER 0
+#define CONFIG_MACE3_DECODER 0
+#define CONFIG_MACE6_DECODER 0
+#define CONFIG_METASOUND_DECODER 0
+#define CONFIG_MLP_DECODER 0
+#define CONFIG_MP1_DECODER 0
+#define CONFIG_MP1FLOAT_DECODER 0
+#define CONFIG_MP2_DECODER 0
+#define CONFIG_MP2FLOAT_DECODER 0
+#define CONFIG_MP3FLOAT_DECODER 0
+#define CONFIG_MP3_DECODER 1
+#define CONFIG_MP3ADUFLOAT_DECODER 0
+#define CONFIG_MP3ADU_DECODER 0
+#define CONFIG_MP3ON4FLOAT_DECODER 0
+#define CONFIG_MP3ON4_DECODER 0
+#define CONFIG_MPC7_DECODER 0
+#define CONFIG_MPC8_DECODER 0
+#define CONFIG_MSNSIREN_DECODER 0
+#define CONFIG_NELLYMOSER_DECODER 0
+#define CONFIG_ON2AVC_DECODER 0
+#define CONFIG_OPUS_DECODER 0
+#define CONFIG_PAF_AUDIO_DECODER 0
+#define CONFIG_QCELP_DECODER 0
+#define CONFIG_QDM2_DECODER 0
+#define CONFIG_QDMC_DECODER 0
+#define CONFIG_RA_144_DECODER 0
+#define CONFIG_RA_288_DECODER 0
+#define CONFIG_RALF_DECODER 0
+#define CONFIG_SBC_DECODER 0
+#define CONFIG_SHORTEN_DECODER 0
+#define CONFIG_SIPR_DECODER 0
+#define CONFIG_SIREN_DECODER 0
+#define CONFIG_SMACKAUD_DECODER 0
+#define CONFIG_SONIC_DECODER 0
+#define CONFIG_TAK_DECODER 0
+#define CONFIG_TRUEHD_DECODER 0
+#define CONFIG_TRUESPEECH_DECODER 0
+#define CONFIG_TTA_DECODER 0
+#define CONFIG_TWINVQ_DECODER 0
+#define CONFIG_VMDAUDIO_DECODER 0
+#define CONFIG_VORBIS_DECODER 0
+#define CONFIG_WAVPACK_DECODER 0
+#define CONFIG_WMALOSSLESS_DECODER 0
+#define CONFIG_WMAPRO_DECODER 0
+#define CONFIG_WMAV1_DECODER 0
+#define CONFIG_WMAV2_DECODER 0
+#define CONFIG_WMAVOICE_DECODER 0
+#define CONFIG_WS_SND1_DECODER 0
+#define CONFIG_XMA1_DECODER 0
+#define CONFIG_XMA2_DECODER 0
+#define CONFIG_PCM_ALAW_DECODER 0
+#define CONFIG_PCM_BLURAY_DECODER 0
+#define CONFIG_PCM_DVD_DECODER 0
+#define CONFIG_PCM_F16LE_DECODER 0
+#define CONFIG_PCM_F24LE_DECODER 0
+#define CONFIG_PCM_F32BE_DECODER 0
+#define CONFIG_PCM_F32LE_DECODER 0
+#define CONFIG_PCM_F64BE_DECODER 0
+#define CONFIG_PCM_F64LE_DECODER 0
+#define CONFIG_PCM_LXF_DECODER 0
+#define CONFIG_PCM_MULAW_DECODER 0
+#define CONFIG_PCM_S8_DECODER 0
+#define CONFIG_PCM_S8_PLANAR_DECODER 0
+#define CONFIG_PCM_S16BE_DECODER 0
+#define CONFIG_PCM_S16BE_PLANAR_DECODER 0
+#define CONFIG_PCM_S16LE_DECODER 0
+#define CONFIG_PCM_S16LE_PLANAR_DECODER 0
+#define CONFIG_PCM_S24BE_DECODER 0
+#define CONFIG_PCM_S24DAUD_DECODER 0
+#define CONFIG_PCM_S24LE_DECODER 0
+#define CONFIG_PCM_S24LE_PLANAR_DECODER 0
+#define CONFIG_PCM_S32BE_DECODER 0
+#define CONFIG_PCM_S32LE_DECODER 0
+#define CONFIG_PCM_S32LE_PLANAR_DECODER 0
+#define CONFIG_PCM_S64BE_DECODER 0
+#define CONFIG_PCM_S64LE_DECODER 0
+#define CONFIG_PCM_SGA_DECODER 0
+#define CONFIG_PCM_U8_DECODER 0
+#define CONFIG_PCM_U16BE_DECODER 0
+#define CONFIG_PCM_U16LE_DECODER 0
+#define CONFIG_PCM_U24BE_DECODER 0
+#define CONFIG_PCM_U24LE_DECODER 0
+#define CONFIG_PCM_U32BE_DECODER 0
+#define CONFIG_PCM_U32LE_DECODER 0
+#define CONFIG_PCM_VIDC_DECODER 0
+#define CONFIG_DERF_DPCM_DECODER 0
+#define CONFIG_GREMLIN_DPCM_DECODER 0
+#define CONFIG_INTERPLAY_DPCM_DECODER 0
+#define CONFIG_ROQ_DPCM_DECODER 0
+#define CONFIG_SDX2_DPCM_DECODER 0
+#define CONFIG_SOL_DPCM_DECODER 0
+#define CONFIG_XAN_DPCM_DECODER 0
+#define CONFIG_ADPCM_4XM_DECODER 0
+#define CONFIG_ADPCM_ADX_DECODER 0
+#define CONFIG_ADPCM_AFC_DECODER 0
+#define CONFIG_ADPCM_AGM_DECODER 0
+#define CONFIG_ADPCM_AICA_DECODER 0
+#define CONFIG_ADPCM_ARGO_DECODER 0
+#define CONFIG_ADPCM_CT_DECODER 0
+#define CONFIG_ADPCM_DTK_DECODER 0
+#define CONFIG_ADPCM_EA_DECODER 0
+#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0
+#define CONFIG_ADPCM_EA_R1_DECODER 0
+#define CONFIG_ADPCM_EA_R2_DECODER 0
+#define CONFIG_ADPCM_EA_R3_DECODER 0
+#define CONFIG_ADPCM_EA_XAS_DECODER 0
+#define CONFIG_ADPCM_G722_DECODER 0
+#define CONFIG_ADPCM_G726_DECODER 0
+#define CONFIG_ADPCM_G726LE_DECODER 0
+#define CONFIG_ADPCM_IMA_ACORN_DECODER 0
+#define CONFIG_ADPCM_IMA_AMV_DECODER 0
+#define CONFIG_ADPCM_IMA_ALP_DECODER 0
+#define CONFIG_ADPCM_IMA_APC_DECODER 0
+#define CONFIG_ADPCM_IMA_APM_DECODER 0
+#define CONFIG_ADPCM_IMA_CUNNING_DECODER 0
+#define CONFIG_ADPCM_IMA_DAT4_DECODER 0
+#define CONFIG_ADPCM_IMA_DK3_DECODER 0
+#define CONFIG_ADPCM_IMA_DK4_DECODER 0
+#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0
+#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0
+#define CONFIG_ADPCM_IMA_ISS_DECODER 0
+#define CONFIG_ADPCM_IMA_MOFLEX_DECODER 0
+#define CONFIG_ADPCM_IMA_MTF_DECODER 0
+#define CONFIG_ADPCM_IMA_OKI_DECODER 0
+#define CONFIG_ADPCM_IMA_QT_DECODER 0
+#define CONFIG_ADPCM_IMA_RAD_DECODER 0
+#define CONFIG_ADPCM_IMA_SSI_DECODER 0
+#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0
+#define CONFIG_ADPCM_IMA_WAV_DECODER 0
+#define CONFIG_ADPCM_IMA_WS_DECODER 0
+#define CONFIG_ADPCM_MS_DECODER 0
+#define CONFIG_ADPCM_MTAF_DECODER 0
+#define CONFIG_ADPCM_PSX_DECODER 0
+#define CONFIG_ADPCM_SBPRO_2_DECODER 0
+#define CONFIG_ADPCM_SBPRO_3_DECODER 0
+#define CONFIG_ADPCM_SBPRO_4_DECODER 0
+#define CONFIG_ADPCM_SWF_DECODER 0
+#define CONFIG_ADPCM_THP_DECODER 0
+#define CONFIG_ADPCM_THP_LE_DECODER 0
+#define CONFIG_ADPCM_VIMA_DECODER 0
+#define CONFIG_ADPCM_XA_DECODER 0
+#define CONFIG_ADPCM_YAMAHA_DECODER 0
+#define CONFIG_ADPCM_ZORK_DECODER 0
+#define CONFIG_SSA_DECODER 0
+#define CONFIG_ASS_DECODER 0
+#define CONFIG_CCAPTION_DECODER 0
+#define CONFIG_DVBSUB_DECODER 0
+#define CONFIG_DVDSUB_DECODER 0
+#define CONFIG_JACOSUB_DECODER 0
+#define CONFIG_MICRODVD_DECODER 0
+#define CONFIG_MOVTEXT_DECODER 0
+#define CONFIG_MPL2_DECODER 0
+#define CONFIG_PGSSUB_DECODER 0
+#define CONFIG_PJS_DECODER 0
+#define CONFIG_REALTEXT_DECODER 0
+#define CONFIG_SAMI_DECODER 0
+#define CONFIG_SRT_DECODER 0
+#define CONFIG_STL_DECODER 0
+#define CONFIG_SUBRIP_DECODER 0
+#define CONFIG_SUBVIEWER_DECODER 0
+#define CONFIG_SUBVIEWER1_DECODER 0
+#define CONFIG_TEXT_DECODER 0
+#define CONFIG_VPLAYER_DECODER 0
+#define CONFIG_WEBVTT_DECODER 0
+#define CONFIG_XSUB_DECODER 0
+#define CONFIG_AAC_AT_DECODER 0
+#define CONFIG_AC3_AT_DECODER 0
+#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0
+#define CONFIG_ALAC_AT_DECODER 0
+#define CONFIG_AMR_NB_AT_DECODER 0
+#define CONFIG_EAC3_AT_DECODER 0
+#define CONFIG_GSM_MS_AT_DECODER 0
+#define CONFIG_ILBC_AT_DECODER 0
+#define CONFIG_MP1_AT_DECODER 0
+#define CONFIG_MP2_AT_DECODER 0
+#define CONFIG_MP3_AT_DECODER 0
+#define CONFIG_PCM_ALAW_AT_DECODER 0
+#define CONFIG_PCM_MULAW_AT_DECODER 0
+#define CONFIG_QDMC_AT_DECODER 0
+#define CONFIG_QDM2_AT_DECODER 0
+#define CONFIG_LIBARIBB24_DECODER 0
+#define CONFIG_LIBCELT_DECODER 0
+#define CONFIG_LIBCODEC2_DECODER 0
+#define CONFIG_LIBDAV1D_DECODER 1
+#define CONFIG_LIBDAVS2_DECODER 0
+#define CONFIG_LIBFDK_AAC_DECODER 0
+#define CONFIG_LIBGSM_DECODER 0
+#define CONFIG_LIBGSM_MS_DECODER 0
+#define CONFIG_LIBILBC_DECODER 0
+#define CONFIG_LIBJXL_DECODER 0
+#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0
+#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0
+#define CONFIG_LIBOPENJPEG_DECODER 0
+#define CONFIG_LIBOPUS_DECODER 0
+#define CONFIG_LIBRSVG_DECODER 0
+#define CONFIG_LIBSPEEX_DECODER 0
+#define CONFIG_LIBUAVS3D_DECODER 0
+#define CONFIG_LIBVORBIS_DECODER 0
+#define CONFIG_LIBVPX_VP8_DECODER 0
+#define CONFIG_LIBVPX_VP9_DECODER 0
+#define CONFIG_LIBZVBI_TELETEXT_DECODER 0
+#define CONFIG_BINTEXT_DECODER 0
+#define CONFIG_XBIN_DECODER 0
+#define CONFIG_IDF_DECODER 0
+#define CONFIG_LIBAOM_AV1_DECODER 0
+#define CONFIG_AV1_DECODER 1
+#define CONFIG_AV1_CUVID_DECODER 0
+#define CONFIG_AV1_QSV_DECODER 0
+#define CONFIG_LIBOPENH264_DECODER 0
+#define CONFIG_H264_CUVID_DECODER 0
+#define CONFIG_HEVC_CUVID_DECODER 0
+#define CONFIG_HEVC_MEDIACODEC_DECODER 0
+#define CONFIG_MJPEG_CUVID_DECODER 0
+#define CONFIG_MJPEG_QSV_DECODER 0
+#define CONFIG_MPEG1_CUVID_DECODER 0
+#define CONFIG_MPEG2_CUVID_DECODER 0
+#define CONFIG_MPEG4_CUVID_DECODER 0
+#define CONFIG_MPEG4_MEDIACODEC_DECODER 0
+#define CONFIG_VC1_CUVID_DECODER 0
+#define CONFIG_VP8_CUVID_DECODER 0
+#define CONFIG_VP8_MEDIACODEC_DECODER 0
+#define CONFIG_VP8_QSV_DECODER 0
+#define CONFIG_VP9_CUVID_DECODER 0
+#define CONFIG_VP9_MEDIACODEC_DECODER 0
+#define CONFIG_VP9_QSV_DECODER 0
+#define CONFIG_A64MULTI_ENCODER 0
+#define CONFIG_A64MULTI5_ENCODER 0
+#define CONFIG_ALIAS_PIX_ENCODER 0
+#define CONFIG_AMV_ENCODER 0
+#define CONFIG_APNG_ENCODER 0
+#define CONFIG_ASV1_ENCODER 0
+#define CONFIG_ASV2_ENCODER 0
+#define CONFIG_AVRP_ENCODER 0
+#define CONFIG_AVUI_ENCODER 0
+#define CONFIG_AYUV_ENCODER 0
+#define CONFIG_BITPACKED_ENCODER 0
+#define CONFIG_BMP_ENCODER 0
+#define CONFIG_CFHD_ENCODER 0
+#define CONFIG_CINEPAK_ENCODER 0
+#define CONFIG_CLJR_ENCODER 0
+#define CONFIG_COMFORTNOISE_ENCODER 0
+#define CONFIG_DNXHD_ENCODER 0
+#define CONFIG_DPX_ENCODER 0
+#define CONFIG_DVVIDEO_ENCODER 0
+#define CONFIG_EXR_ENCODER 0
+#define CONFIG_FFV1_ENCODER 0
+#define CONFIG_FFVHUFF_ENCODER 0
+#define CONFIG_FITS_ENCODER 0
+#define CONFIG_FLASHSV_ENCODER 0
+#define CONFIG_FLASHSV2_ENCODER 0
+#define CONFIG_FLV_ENCODER 0
+#define CONFIG_GIF_ENCODER 0
+#define CONFIG_H261_ENCODER 0
+#define CONFIG_H263_ENCODER 0
+#define CONFIG_H263P_ENCODER 0
+#define CONFIG_HAP_ENCODER 0
+#define CONFIG_HUFFYUV_ENCODER 0
+#define CONFIG_JPEG2000_ENCODER 0
+#define CONFIG_JPEGLS_ENCODER 0
+#define CONFIG_LJPEG_ENCODER 0
+#define CONFIG_MAGICYUV_ENCODER 0
+#define CONFIG_MJPEG_ENCODER 0
+#define CONFIG_MPEG1VIDEO_ENCODER 0
+#define CONFIG_MPEG2VIDEO_ENCODER 0
+#define CONFIG_MPEG4_ENCODER 0
+#define CONFIG_MSMPEG4V2_ENCODER 0
+#define CONFIG_MSMPEG4V3_ENCODER 0
+#define CONFIG_MSVIDEO1_ENCODER 0
+#define CONFIG_PAM_ENCODER 0
+#define CONFIG_PBM_ENCODER 0
+#define CONFIG_PCX_ENCODER 0
+#define CONFIG_PFM_ENCODER 0
+#define CONFIG_PGM_ENCODER 0
+#define CONFIG_PGMYUV_ENCODER 0
+#define CONFIG_PNG_ENCODER 0
+#define CONFIG_PPM_ENCODER 0
+#define CONFIG_PRORES_ENCODER 0
+#define CONFIG_PRORES_AW_ENCODER 0
+#define CONFIG_PRORES_KS_ENCODER 0
+#define CONFIG_QOI_ENCODER 0
+#define CONFIG_QTRLE_ENCODER 0
+#define CONFIG_R10K_ENCODER 0
+#define CONFIG_R210_ENCODER 0
+#define CONFIG_RAWVIDEO_ENCODER 0
+#define CONFIG_ROQ_ENCODER 0
+#define CONFIG_RPZA_ENCODER 0
+#define CONFIG_RV10_ENCODER 0
+#define CONFIG_RV20_ENCODER 0
+#define CONFIG_S302M_ENCODER 0
+#define CONFIG_SGI_ENCODER 0
+#define CONFIG_SMC_ENCODER 0
+#define CONFIG_SNOW_ENCODER 0
+#define CONFIG_SPEEDHQ_ENCODER 0
+#define CONFIG_SUNRAST_ENCODER 0
+#define CONFIG_SVQ1_ENCODER 0
+#define CONFIG_TARGA_ENCODER 0
+#define CONFIG_TIFF_ENCODER 0
+#define CONFIG_UTVIDEO_ENCODER 0
+#define CONFIG_V210_ENCODER 0
+#define CONFIG_V308_ENCODER 0
+#define CONFIG_V408_ENCODER 0
+#define CONFIG_V410_ENCODER 0
+#define CONFIG_VBN_ENCODER 0
+#define CONFIG_VC2_ENCODER 0
+#define CONFIG_WRAPPED_AVFRAME_ENCODER 0
+#define CONFIG_WMV1_ENCODER 0
+#define CONFIG_WMV2_ENCODER 0
+#define CONFIG_XBM_ENCODER 0
+#define CONFIG_XFACE_ENCODER 0
+#define CONFIG_XWD_ENCODER 0
+#define CONFIG_Y41P_ENCODER 0
+#define CONFIG_YUV4_ENCODER 0
+#define CONFIG_ZLIB_ENCODER 0
+#define CONFIG_ZMBV_ENCODER 0
+#define CONFIG_AAC_ENCODER 0
+#define CONFIG_AC3_ENCODER 0
+#define CONFIG_AC3_FIXED_ENCODER 0
+#define CONFIG_ALAC_ENCODER 0
+#define CONFIG_APTX_ENCODER 0
+#define CONFIG_APTX_HD_ENCODER 0
+#define CONFIG_DCA_ENCODER 0
+#define CONFIG_DFPWM_ENCODER 0
+#define CONFIG_EAC3_ENCODER 0
+#define CONFIG_FLAC_ENCODER 0
+#define CONFIG_G723_1_ENCODER 0
+#define CONFIG_MLP_ENCODER 0
+#define CONFIG_MP2_ENCODER 0
+#define CONFIG_MP2FIXED_ENCODER 0
+#define CONFIG_NELLYMOSER_ENCODER 0
+#define CONFIG_OPUS_ENCODER 0
+#define CONFIG_RA_144_ENCODER 0
+#define CONFIG_SBC_ENCODER 0
+#define CONFIG_SONIC_ENCODER 0
+#define CONFIG_SONIC_LS_ENCODER 0
+#define CONFIG_TRUEHD_ENCODER 0
+#define CONFIG_TTA_ENCODER 0
+#define CONFIG_VORBIS_ENCODER 0
+#define CONFIG_WAVPACK_ENCODER 0
+#define CONFIG_WMAV1_ENCODER 0
+#define CONFIG_WMAV2_ENCODER 0
+#define CONFIG_PCM_ALAW_ENCODER 0
+#define CONFIG_PCM_BLURAY_ENCODER 0
+#define CONFIG_PCM_DVD_ENCODER 0
+#define CONFIG_PCM_F32BE_ENCODER 0
+#define CONFIG_PCM_F32LE_ENCODER 0
+#define CONFIG_PCM_F64BE_ENCODER 0
+#define CONFIG_PCM_F64LE_ENCODER 0
+#define CONFIG_PCM_MULAW_ENCODER 0
+#define CONFIG_PCM_S8_ENCODER 0
+#define CONFIG_PCM_S8_PLANAR_ENCODER 0
+#define CONFIG_PCM_S16BE_ENCODER 0
+#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S16LE_ENCODER 0
+#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S24BE_ENCODER 0
+#define CONFIG_PCM_S24DAUD_ENCODER 0
+#define CONFIG_PCM_S24LE_ENCODER 0
+#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S32BE_ENCODER 0
+#define CONFIG_PCM_S32LE_ENCODER 0
+#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0
+#define CONFIG_PCM_S64BE_ENCODER 0
+#define CONFIG_PCM_S64LE_ENCODER 0
+#define CONFIG_PCM_U8_ENCODER 0
+#define CONFIG_PCM_U16BE_ENCODER 0
+#define CONFIG_PCM_U16LE_ENCODER 0
+#define CONFIG_PCM_U24BE_ENCODER 0
+#define CONFIG_PCM_U24LE_ENCODER 0
+#define CONFIG_PCM_U32BE_ENCODER 0
+#define CONFIG_PCM_U32LE_ENCODER 0
+#define CONFIG_PCM_VIDC_ENCODER 0
+#define CONFIG_ROQ_DPCM_ENCODER 0
+#define CONFIG_ADPCM_ADX_ENCODER 0
+#define CONFIG_ADPCM_ARGO_ENCODER 0
+#define CONFIG_ADPCM_G722_ENCODER 0
+#define CONFIG_ADPCM_G726_ENCODER 0
+#define CONFIG_ADPCM_G726LE_ENCODER 0
+#define CONFIG_ADPCM_IMA_AMV_ENCODER 0
+#define CONFIG_ADPCM_IMA_ALP_ENCODER 0
+#define CONFIG_ADPCM_IMA_APM_ENCODER 0
+#define CONFIG_ADPCM_IMA_QT_ENCODER 0
+#define CONFIG_ADPCM_IMA_SSI_ENCODER 0
+#define CONFIG_ADPCM_IMA_WAV_ENCODER 0
+#define CONFIG_ADPCM_IMA_WS_ENCODER 0
+#define CONFIG_ADPCM_MS_ENCODER 0
+#define CONFIG_ADPCM_SWF_ENCODER 0
+#define CONFIG_ADPCM_YAMAHA_ENCODER 0
+#define CONFIG_SSA_ENCODER 0
+#define CONFIG_ASS_ENCODER 0
+#define CONFIG_DVBSUB_ENCODER 0
+#define CONFIG_DVDSUB_ENCODER 0
+#define CONFIG_MOVTEXT_ENCODER 0
+#define CONFIG_SRT_ENCODER 0
+#define CONFIG_SUBRIP_ENCODER 0
+#define CONFIG_TEXT_ENCODER 0
+#define CONFIG_TTML_ENCODER 0
+#define CONFIG_WEBVTT_ENCODER 0
+#define CONFIG_XSUB_ENCODER 0
+#define CONFIG_AAC_AT_ENCODER 0
+#define CONFIG_ALAC_AT_ENCODER 0
+#define CONFIG_ILBC_AT_ENCODER 0
+#define CONFIG_PCM_ALAW_AT_ENCODER 0
+#define CONFIG_PCM_MULAW_AT_ENCODER 0
+#define CONFIG_LIBAOM_AV1_ENCODER 0
+#define CONFIG_LIBCODEC2_ENCODER 0
+#define CONFIG_LIBFDK_AAC_ENCODER 0
+#define CONFIG_LIBGSM_ENCODER 0
+#define CONFIG_LIBGSM_MS_ENCODER 0
+#define CONFIG_LIBILBC_ENCODER 0
+#define CONFIG_LIBJXL_ENCODER 0
+#define CONFIG_LIBMP3LAME_ENCODER 0
+#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0
+#define CONFIG_LIBOPENJPEG_ENCODER 0
+#define CONFIG_LIBOPUS_ENCODER 0
+#define CONFIG_LIBRAV1E_ENCODER 0
+#define CONFIG_LIBSHINE_ENCODER 0
+#define CONFIG_LIBSPEEX_ENCODER 0
+#define CONFIG_LIBSVTAV1_ENCODER 0
+#define CONFIG_LIBTHEORA_ENCODER 0
+#define CONFIG_LIBTWOLAME_ENCODER 0
+#define CONFIG_LIBVO_AMRWBENC_ENCODER 0
+#define CONFIG_LIBVORBIS_ENCODER 0
+#define CONFIG_LIBVPX_VP8_ENCODER 0
+#define CONFIG_LIBVPX_VP9_ENCODER 0
+#define CONFIG_LIBWEBP_ANIM_ENCODER 0
+#define CONFIG_LIBWEBP_ENCODER 0
+#define CONFIG_LIBX262_ENCODER 0
+#define CONFIG_LIBX264_ENCODER 0
+#define CONFIG_LIBX264RGB_ENCODER 0
+#define CONFIG_LIBX265_ENCODER 0
+#define CONFIG_LIBXAVS_ENCODER 0
+#define CONFIG_LIBXAVS2_ENCODER 0
+#define CONFIG_LIBXVID_ENCODER 0
+#define CONFIG_AAC_MF_ENCODER 0
+#define CONFIG_AC3_MF_ENCODER 0
+#define CONFIG_H263_V4L2M2M_ENCODER 0
+#define CONFIG_LIBOPENH264_ENCODER 0
+#define CONFIG_H264_AMF_ENCODER 0
+#define CONFIG_H264_MF_ENCODER 0
+#define CONFIG_H264_NVENC_ENCODER 0
+#define CONFIG_H264_OMX_ENCODER 0
+#define CONFIG_H264_QSV_ENCODER 0
+#define CONFIG_H264_V4L2M2M_ENCODER 0
+#define CONFIG_H264_VAAPI_ENCODER 0
+#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0
+#define CONFIG_HEVC_AMF_ENCODER 0
+#define CONFIG_HEVC_MF_ENCODER 0
+#define CONFIG_HEVC_NVENC_ENCODER 0
+#define CONFIG_HEVC_QSV_ENCODER 0
+#define CONFIG_HEVC_V4L2M2M_ENCODER 0
+#define CONFIG_HEVC_VAAPI_ENCODER 0
+#define CONFIG_HEVC_VIDEOTOOLBOX_ENCODER 0
+#define CONFIG_LIBKVAZAAR_ENCODER 0
+#define CONFIG_MJPEG_QSV_ENCODER 0
+#define CONFIG_MJPEG_VAAPI_ENCODER 0
+#define CONFIG_MP3_MF_ENCODER 0
+#define CONFIG_MPEG2_QSV_ENCODER 0
+#define CONFIG_MPEG2_VAAPI_ENCODER 0
+#define CONFIG_MPEG4_OMX_ENCODER 0
+#define CONFIG_MPEG4_V4L2M2M_ENCODER 0
+#define CONFIG_PRORES_VIDEOTOOLBOX_ENCODER 0
+#define CONFIG_VP8_V4L2M2M_ENCODER 0
+#define CONFIG_VP8_VAAPI_ENCODER 0
+#define CONFIG_VP9_VAAPI_ENCODER 0
+#define CONFIG_VP9_QSV_ENCODER 0
+#define CONFIG_AV1_D3D11VA_HWACCEL 0
+#define CONFIG_AV1_D3D11VA2_HWACCEL 0
+#define CONFIG_AV1_DXVA2_HWACCEL 0
+#define CONFIG_AV1_NVDEC_HWACCEL 0
+#define CONFIG_AV1_VAAPI_HWACCEL 0
+#define CONFIG_AV1_VDPAU_HWACCEL 0
+#define CONFIG_H263_VAAPI_HWACCEL 0
+#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_H264_D3D11VA_HWACCEL 0
+#define CONFIG_H264_D3D11VA2_HWACCEL 0
+#define CONFIG_H264_DXVA2_HWACCEL 0
+#define CONFIG_H264_NVDEC_HWACCEL 0
+#define CONFIG_H264_VAAPI_HWACCEL 0
+#define CONFIG_H264_VDPAU_HWACCEL 0
+#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_HEVC_D3D11VA_HWACCEL 0
+#define CONFIG_HEVC_D3D11VA2_HWACCEL 0
+#define CONFIG_HEVC_DXVA2_HWACCEL 0
+#define CONFIG_HEVC_NVDEC_HWACCEL 0
+#define CONFIG_HEVC_VAAPI_HWACCEL 0
+#define CONFIG_HEVC_VDPAU_HWACCEL 0
+#define CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_MJPEG_NVDEC_HWACCEL 0
+#define CONFIG_MJPEG_VAAPI_HWACCEL 0
+#define CONFIG_MPEG1_NVDEC_HWACCEL 0
+#define CONFIG_MPEG1_VDPAU_HWACCEL 0
+#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_MPEG2_D3D11VA_HWACCEL 0
+#define CONFIG_MPEG2_D3D11VA2_HWACCEL 0
+#define CONFIG_MPEG2_NVDEC_HWACCEL 0
+#define CONFIG_MPEG2_DXVA2_HWACCEL 0
+#define CONFIG_MPEG2_VAAPI_HWACCEL 0
+#define CONFIG_MPEG2_VDPAU_HWACCEL 0
+#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_MPEG4_NVDEC_HWACCEL 0
+#define CONFIG_MPEG4_VAAPI_HWACCEL 0
+#define CONFIG_MPEG4_VDPAU_HWACCEL 0
+#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_VC1_D3D11VA_HWACCEL 0
+#define CONFIG_VC1_D3D11VA2_HWACCEL 0
+#define CONFIG_VC1_DXVA2_HWACCEL 0
+#define CONFIG_VC1_NVDEC_HWACCEL 0
+#define CONFIG_VC1_VAAPI_HWACCEL 0
+#define CONFIG_VC1_VDPAU_HWACCEL 0
+#define CONFIG_VP8_NVDEC_HWACCEL 0
+#define CONFIG_VP8_VAAPI_HWACCEL 0
+#define CONFIG_VP9_D3D11VA_HWACCEL 0
+#define CONFIG_VP9_D3D11VA2_HWACCEL 0
+#define CONFIG_VP9_DXVA2_HWACCEL 0
+#define CONFIG_VP9_NVDEC_HWACCEL 0
+#define CONFIG_VP9_VAAPI_HWACCEL 0
+#define CONFIG_VP9_VDPAU_HWACCEL 0
+#define CONFIG_VP9_VIDEOTOOLBOX_HWACCEL 0
+#define CONFIG_WMV3_D3D11VA_HWACCEL 0
+#define CONFIG_WMV3_D3D11VA2_HWACCEL 0
+#define CONFIG_WMV3_DXVA2_HWACCEL 0
+#define CONFIG_WMV3_NVDEC_HWACCEL 0
+#define CONFIG_WMV3_VAAPI_HWACCEL 0
+#define CONFIG_WMV3_VDPAU_HWACCEL 0
+#define CONFIG_AAC_PARSER 0
+#define CONFIG_AAC_LATM_PARSER 0
+#define CONFIG_AC3_PARSER 0
+#define CONFIG_ADX_PARSER 0
+#define CONFIG_AMR_PARSER 0
+#define CONFIG_AV1_PARSER 0
+#define CONFIG_AVS2_PARSER 0
+#define CONFIG_AVS3_PARSER 0
+#define CONFIG_BMP_PARSER 0
+#define CONFIG_CAVSVIDEO_PARSER 0
+#define CONFIG_COOK_PARSER 0
+#define CONFIG_CRI_PARSER 0
+#define CONFIG_DCA_PARSER 0
+#define CONFIG_DIRAC_PARSER 0
+#define CONFIG_DNXHD_PARSER 0
+#define CONFIG_DOLBY_E_PARSER 0
+#define CONFIG_DPX_PARSER 0
+#define CONFIG_DVAUDIO_PARSER 0
+#define CONFIG_DVBSUB_PARSER 0
+#define CONFIG_DVDSUB_PARSER 0
+#define CONFIG_DVD_NAV_PARSER 0
+#define CONFIG_FLAC_PARSER 0
+#define CONFIG_G723_1_PARSER 0
+#define CONFIG_G729_PARSER 0
+#define CONFIG_GIF_PARSER 0
+#define CONFIG_GSM_PARSER 0
+#define CONFIG_H261_PARSER 0
+#define CONFIG_H263_PARSER 0
+#define CONFIG_H264_PARSER 0
+#define CONFIG_HEVC_PARSER 0
+#define CONFIG_IPU_PARSER 0
+#define CONFIG_JPEG2000_PARSER 0
+#define CONFIG_MJPEG_PARSER 0
+#define CONFIG_MLP_PARSER 0
+#define CONFIG_MPEG4VIDEO_PARSER 0
+#define CONFIG_MPEGAUDIO_PARSER 0
+#define CONFIG_MPEGVIDEO_PARSER 0
+#define CONFIG_OPUS_PARSER 0
+#define CONFIG_PNG_PARSER 0
+#define CONFIG_PNM_PARSER 0
+#define CONFIG_QOI_PARSER 0
+#define CONFIG_RV30_PARSER 0
+#define CONFIG_RV40_PARSER 0
+#define CONFIG_SBC_PARSER 0
+#define CONFIG_SIPR_PARSER 0
+#define CONFIG_TAK_PARSER 0
+#define CONFIG_VC1_PARSER 0
+#define CONFIG_VORBIS_PARSER 0
+#define CONFIG_VP3_PARSER 0
+#define CONFIG_VP8_PARSER 1
+#define CONFIG_VP9_PARSER 1
+#define CONFIG_WEBP_PARSER 0
+#define CONFIG_XBM_PARSER 0
+#define CONFIG_XMA_PARSER 0
+#define CONFIG_ALSA_INDEV 0
+#define CONFIG_ANDROID_CAMERA_INDEV 0
+#define CONFIG_AVFOUNDATION_INDEV 0
+#define CONFIG_BKTR_INDEV 0
+#define CONFIG_DECKLINK_INDEV 0
+#define CONFIG_DSHOW_INDEV 0
+#define CONFIG_FBDEV_INDEV 0
+#define CONFIG_GDIGRAB_INDEV 0
+#define CONFIG_IEC61883_INDEV 0
+#define CONFIG_JACK_INDEV 0
+#define CONFIG_KMSGRAB_INDEV 0
+#define CONFIG_LAVFI_INDEV 0
+#define CONFIG_OPENAL_INDEV 0
+#define CONFIG_OSS_INDEV 0
+#define CONFIG_PULSE_INDEV 0
+#define CONFIG_SNDIO_INDEV 0
+#define CONFIG_V4L2_INDEV 0
+#define CONFIG_VFWCAP_INDEV 0
+#define CONFIG_XCBGRAB_INDEV 0
+#define CONFIG_LIBCDIO_INDEV 0
+#define CONFIG_LIBDC1394_INDEV 0
+#define CONFIG_ALSA_OUTDEV 0
+#define CONFIG_AUDIOTOOLBOX_OUTDEV 0
+#define CONFIG_CACA_OUTDEV 0
+#define CONFIG_DECKLINK_OUTDEV 0
+#define CONFIG_FBDEV_OUTDEV 0
+#define CONFIG_OPENGL_OUTDEV 0
+#define CONFIG_OSS_OUTDEV 0
+#define CONFIG_PULSE_OUTDEV 0
+#define CONFIG_SDL2_OUTDEV 0
+#define CONFIG_SNDIO_OUTDEV 0
+#define CONFIG_V4L2_OUTDEV 0
+#define CONFIG_XV_OUTDEV 0
+#define CONFIG_ABENCH_FILTER 0
+#define CONFIG_ACOMPRESSOR_FILTER 0
+#define CONFIG_ACONTRAST_FILTER 0
+#define CONFIG_ACOPY_FILTER 0
+#define CONFIG_ACUE_FILTER 0
+#define CONFIG_ACROSSFADE_FILTER 0
+#define CONFIG_ACROSSOVER_FILTER 0
+#define CONFIG_ACRUSHER_FILTER 0
+#define CONFIG_ADECLICK_FILTER 0
+#define CONFIG_ADECLIP_FILTER 0
+#define CONFIG_ADECORRELATE_FILTER 0
+#define CONFIG_ADELAY_FILTER 0
+#define CONFIG_ADENORM_FILTER 0
+#define CONFIG_ADERIVATIVE_FILTER 0
+#define CONFIG_ADYNAMICEQUALIZER_FILTER 0
+#define CONFIG_ADYNAMICSMOOTH_FILTER 0
+#define CONFIG_AECHO_FILTER 0
+#define CONFIG_AEMPHASIS_FILTER 0
+#define CONFIG_AEVAL_FILTER 0
+#define CONFIG_AEXCITER_FILTER 0
+#define CONFIG_AFADE_FILTER 0
+#define CONFIG_AFFTDN_FILTER 0
+#define CONFIG_AFFTFILT_FILTER 0
+#define CONFIG_AFIR_FILTER 0
+#define CONFIG_AFORMAT_FILTER 0
+#define CONFIG_AFREQSHIFT_FILTER 0
+#define CONFIG_AFWTDN_FILTER 0
+#define CONFIG_AGATE_FILTER 0
+#define CONFIG_AIIR_FILTER 0
+#define CONFIG_AINTEGRAL_FILTER 0
+#define CONFIG_AINTERLEAVE_FILTER 0
+#define CONFIG_ALATENCY_FILTER 0
+#define CONFIG_ALIMITER_FILTER 0
+#define CONFIG_ALLPASS_FILTER 0
+#define CONFIG_ALOOP_FILTER 0
+#define CONFIG_AMERGE_FILTER 0
+#define CONFIG_AMETADATA_FILTER 0
+#define CONFIG_AMIX_FILTER 0
+#define CONFIG_AMULTIPLY_FILTER 0
+#define CONFIG_ANEQUALIZER_FILTER 0
+#define CONFIG_ANLMDN_FILTER 0
+#define CONFIG_ANLMF_FILTER 0
+#define CONFIG_ANLMS_FILTER 0
+#define CONFIG_ANULL_FILTER 0
+#define CONFIG_APAD_FILTER 0
+#define CONFIG_APERMS_FILTER 0
+#define CONFIG_APHASER_FILTER 0
+#define CONFIG_APHASESHIFT_FILTER 0
+#define CONFIG_APSYCLIP_FILTER 0
+#define CONFIG_APULSATOR_FILTER 0
+#define CONFIG_AREALTIME_FILTER 0
+#define CONFIG_ARESAMPLE_FILTER 0
+#define CONFIG_AREVERSE_FILTER 0
+#define CONFIG_ARNNDN_FILTER 0
+#define CONFIG_ASDR_FILTER 0
+#define CONFIG_ASEGMENT_FILTER 0
+#define CONFIG_ASELECT_FILTER 0
+#define CONFIG_ASENDCMD_FILTER 0
+#define CONFIG_ASETNSAMPLES_FILTER 0
+#define CONFIG_ASETPTS_FILTER 0
+#define CONFIG_ASETRATE_FILTER 0
+#define CONFIG_ASETTB_FILTER 0
+#define CONFIG_ASHOWINFO_FILTER 0
+#define CONFIG_ASIDEDATA_FILTER 0
+#define CONFIG_ASOFTCLIP_FILTER 0
+#define CONFIG_ASPECTRALSTATS_FILTER 0
+#define CONFIG_ASPLIT_FILTER 0
+#define CONFIG_ASR_FILTER 0
+#define CONFIG_ASTATS_FILTER 0
+#define CONFIG_ASTREAMSELECT_FILTER 0
+#define CONFIG_ASUBBOOST_FILTER 0
+#define CONFIG_ASUBCUT_FILTER 0
+#define CONFIG_ASUPERCUT_FILTER 0
+#define CONFIG_ASUPERPASS_FILTER 0
+#define CONFIG_ASUPERSTOP_FILTER 0
+#define CONFIG_ATEMPO_FILTER 0
+#define CONFIG_ATILT_FILTER 0
+#define CONFIG_ATRIM_FILTER 0
+#define CONFIG_AXCORRELATE_FILTER 0
+#define CONFIG_AZMQ_FILTER 0
+#define CONFIG_BANDPASS_FILTER 0
+#define CONFIG_BANDREJECT_FILTER 0
+#define CONFIG_BASS_FILTER 0
+#define CONFIG_BIQUAD_FILTER 0
+#define CONFIG_BS2B_FILTER 0
+#define CONFIG_CHANNELMAP_FILTER 0
+#define CONFIG_CHANNELSPLIT_FILTER 0
+#define CONFIG_CHORUS_FILTER 0
+#define CONFIG_COMPAND_FILTER 0
+#define CONFIG_COMPENSATIONDELAY_FILTER 0
+#define CONFIG_CROSSFEED_FILTER 0
+#define CONFIG_CRYSTALIZER_FILTER 0
+#define CONFIG_DCSHIFT_FILTER 0
+#define CONFIG_DEESSER_FILTER 0
+#define CONFIG_DIALOGUENHANCE_FILTER 0
+#define CONFIG_DRMETER_FILTER 0
+#define CONFIG_DYNAUDNORM_FILTER 0
+#define CONFIG_EARWAX_FILTER 0
+#define CONFIG_EBUR128_FILTER 0
+#define CONFIG_EQUALIZER_FILTER 0
+#define CONFIG_EXTRASTEREO_FILTER 0
+#define CONFIG_FIREQUALIZER_FILTER 0
+#define CONFIG_FLANGER_FILTER 0
+#define CONFIG_HAAS_FILTER 0
+#define CONFIG_HDCD_FILTER 0
+#define CONFIG_HEADPHONE_FILTER 0
+#define CONFIG_HIGHPASS_FILTER 0
+#define CONFIG_HIGHSHELF_FILTER 0
+#define CONFIG_JOIN_FILTER 0
+#define CONFIG_LADSPA_FILTER 0
+#define CONFIG_LOUDNORM_FILTER 0
+#define CONFIG_LOWPASS_FILTER 0
+#define CONFIG_LOWSHELF_FILTER 0
+#define CONFIG_LV2_FILTER 0
+#define CONFIG_MCOMPAND_FILTER 0
+#define CONFIG_PAN_FILTER 0
+#define CONFIG_REPLAYGAIN_FILTER 0
+#define CONFIG_RUBBERBAND_FILTER 0
+#define CONFIG_SIDECHAINCOMPRESS_FILTER 0
+#define CONFIG_SIDECHAINGATE_FILTER 0
+#define CONFIG_SILENCEDETECT_FILTER 0
+#define CONFIG_SILENCEREMOVE_FILTER 0
+#define CONFIG_SOFALIZER_FILTER 0
+#define CONFIG_SPEECHNORM_FILTER 0
+#define CONFIG_STEREOTOOLS_FILTER 0
+#define CONFIG_STEREOWIDEN_FILTER 0
+#define CONFIG_SUPEREQUALIZER_FILTER 0
+#define CONFIG_SURROUND_FILTER 0
+#define CONFIG_TILTSHELF_FILTER 0
+#define CONFIG_TREBLE_FILTER 0
+#define CONFIG_TREMOLO_FILTER 0
+#define CONFIG_VIBRATO_FILTER 0
+#define CONFIG_VIRTUALBASS_FILTER 0
+#define CONFIG_VOLUME_FILTER 0
+#define CONFIG_VOLUMEDETECT_FILTER 0
+#define CONFIG_AEVALSRC_FILTER 0
+#define CONFIG_AFIRSRC_FILTER 0
+#define CONFIG_ANOISESRC_FILTER 0
+#define CONFIG_ANULLSRC_FILTER 0
+#define CONFIG_FLITE_FILTER 0
+#define CONFIG_HILBERT_FILTER 0
+#define CONFIG_SINC_FILTER 0
+#define CONFIG_SINE_FILTER 0
+#define CONFIG_ANULLSINK_FILTER 0
+#define CONFIG_ADDROI_FILTER 0
+#define CONFIG_ALPHAEXTRACT_FILTER 0
+#define CONFIG_ALPHAMERGE_FILTER 0
+#define CONFIG_AMPLIFY_FILTER 0
+#define CONFIG_ASS_FILTER 0
+#define CONFIG_ATADENOISE_FILTER 0
+#define CONFIG_AVGBLUR_FILTER 0
+#define CONFIG_AVGBLUR_OPENCL_FILTER 0
+#define CONFIG_AVGBLUR_VULKAN_FILTER 0
+#define CONFIG_BBOX_FILTER 0
+#define CONFIG_BENCH_FILTER 0
+#define CONFIG_BILATERAL_FILTER 0
+#define CONFIG_BITPLANENOISE_FILTER 0
+#define CONFIG_BLACKDETECT_FILTER 0
+#define CONFIG_BLACKFRAME_FILTER 0
+#define CONFIG_BLEND_FILTER 0
+#define CONFIG_BLEND_VULKAN_FILTER 0
+#define CONFIG_BLOCKDETECT_FILTER 0
+#define CONFIG_BLURDETECT_FILTER 0
+#define CONFIG_BM3D_FILTER 0
+#define CONFIG_BOXBLUR_FILTER 0
+#define CONFIG_BOXBLUR_OPENCL_FILTER 0
+#define CONFIG_BWDIF_FILTER 0
+#define CONFIG_CAS_FILTER 0
+#define CONFIG_CHROMABER_VULKAN_FILTER 0
+#define CONFIG_CHROMAHOLD_FILTER 0
+#define CONFIG_CHROMAKEY_FILTER 0
+#define CONFIG_CHROMANR_FILTER 0
+#define CONFIG_CHROMASHIFT_FILTER 0
+#define CONFIG_CIESCOPE_FILTER 0
+#define CONFIG_CODECVIEW_FILTER 0
+#define CONFIG_COLORBALANCE_FILTER 0
+#define CONFIG_COLORCHANNELMIXER_FILTER 0
+#define CONFIG_COLORCONTRAST_FILTER 0
+#define CONFIG_COLORCORRECT_FILTER 0
+#define CONFIG_COLORIZE_FILTER 0
+#define CONFIG_COLORKEY_FILTER 0
+#define CONFIG_COLORKEY_OPENCL_FILTER 0
+#define CONFIG_COLORHOLD_FILTER 0
+#define CONFIG_COLORLEVELS_FILTER 0
+#define CONFIG_COLORMAP_FILTER 0
+#define CONFIG_COLORMATRIX_FILTER 0
+#define CONFIG_COLORSPACE_FILTER 0
+#define CONFIG_COLORTEMPERATURE_FILTER 0
+#define CONFIG_CONVOLUTION_FILTER 0
+#define CONFIG_CONVOLUTION_OPENCL_FILTER 0
+#define CONFIG_CONVOLVE_FILTER 0
+#define CONFIG_COPY_FILTER 0
+#define CONFIG_COREIMAGE_FILTER 0
+#define CONFIG_COVER_RECT_FILTER 0
+#define CONFIG_CROP_FILTER 0
+#define CONFIG_CROPDETECT_FILTER 0
+#define CONFIG_CUE_FILTER 0
+#define CONFIG_CURVES_FILTER 0
+#define CONFIG_DATASCOPE_FILTER 0
+#define CONFIG_DBLUR_FILTER 0
+#define CONFIG_DCTDNOIZ_FILTER 0
+#define CONFIG_DEBAND_FILTER 0
+#define CONFIG_DEBLOCK_FILTER 0
+#define CONFIG_DECIMATE_FILTER 0
+#define CONFIG_DECONVOLVE_FILTER 0
+#define CONFIG_DEDOT_FILTER 0
+#define CONFIG_DEFLATE_FILTER 0
+#define CONFIG_DEFLICKER_FILTER 0
+#define CONFIG_DEINTERLACE_QSV_FILTER 0
+#define CONFIG_DEINTERLACE_VAAPI_FILTER 0
+#define CONFIG_DEJUDDER_FILTER 0
+#define CONFIG_DELOGO_FILTER 0
+#define CONFIG_DENOISE_VAAPI_FILTER 0
+#define CONFIG_DERAIN_FILTER 0
+#define CONFIG_DESHAKE_FILTER 0
+#define CONFIG_DESHAKE_OPENCL_FILTER 0
+#define CONFIG_DESPILL_FILTER 0
+#define CONFIG_DETELECINE_FILTER 0
+#define CONFIG_DILATION_FILTER 0
+#define CONFIG_DILATION_OPENCL_FILTER 0
+#define CONFIG_DISPLACE_FILTER 0
+#define CONFIG_DNN_CLASSIFY_FILTER 0
+#define CONFIG_DNN_DETECT_FILTER 0
+#define CONFIG_DNN_PROCESSING_FILTER 0
+#define CONFIG_DOUBLEWEAVE_FILTER 0
+#define CONFIG_DRAWBOX_FILTER 0
+#define CONFIG_DRAWGRAPH_FILTER 0
+#define CONFIG_DRAWGRID_FILTER 0
+#define CONFIG_DRAWTEXT_FILTER 0
+#define CONFIG_EDGEDETECT_FILTER 0
+#define CONFIG_ELBG_FILTER 0
+#define CONFIG_ENTROPY_FILTER 0
+#define CONFIG_EPX_FILTER 0
+#define CONFIG_EQ_FILTER 0
+#define CONFIG_EROSION_FILTER 0
+#define CONFIG_EROSION_OPENCL_FILTER 0
+#define CONFIG_ESTDIF_FILTER 0
+#define CONFIG_EXPOSURE_FILTER 0
+#define CONFIG_EXTRACTPLANES_FILTER 0
+#define CONFIG_FADE_FILTER 0
+#define CONFIG_FEEDBACK_FILTER 0
+#define CONFIG_FFTDNOIZ_FILTER 0
+#define CONFIG_FFTFILT_FILTER 0
+#define CONFIG_FIELD_FILTER 0
+#define CONFIG_FIELDHINT_FILTER 0
+#define CONFIG_FIELDMATCH_FILTER 0
+#define CONFIG_FIELDORDER_FILTER 0
+#define CONFIG_FILLBORDERS_FILTER 0
+#define CONFIG_FIND_RECT_FILTER 0
+#define CONFIG_FLIP_VULKAN_FILTER 0
+#define CONFIG_FLOODFILL_FILTER 0
+#define CONFIG_FORMAT_FILTER 0
+#define CONFIG_FPS_FILTER 0
+#define CONFIG_FRAMEPACK_FILTER 0
+#define CONFIG_FRAMERATE_FILTER 0
+#define CONFIG_FRAMESTEP_FILTER 0
+#define CONFIG_FREEZEDETECT_FILTER 0
+#define CONFIG_FREEZEFRAMES_FILTER 0
+#define CONFIG_FREI0R_FILTER 0
+#define CONFIG_FSPP_FILTER 0
+#define CONFIG_GBLUR_FILTER 0
+#define CONFIG_GBLUR_VULKAN_FILTER 0
+#define CONFIG_GEQ_FILTER 0
+#define CONFIG_GRADFUN_FILTER 0
+#define CONFIG_GRAPHMONITOR_FILTER 0
+#define CONFIG_GRAYWORLD_FILTER 0
+#define CONFIG_GREYEDGE_FILTER 0
+#define CONFIG_GUIDED_FILTER 0
+#define CONFIG_HALDCLUT_FILTER 0
+#define CONFIG_HFLIP_FILTER 0
+#define CONFIG_HFLIP_VULKAN_FILTER 0
+#define CONFIG_HISTEQ_FILTER 0
+#define CONFIG_HISTOGRAM_FILTER 0
+#define CONFIG_HQDN3D_FILTER 0
+#define CONFIG_HQX_FILTER 0
+#define CONFIG_HSTACK_FILTER 0
+#define CONFIG_HSVHOLD_FILTER 0
+#define CONFIG_HSVKEY_FILTER 0
+#define CONFIG_HUE_FILTER 0
+#define CONFIG_HUESATURATION_FILTER 0
+#define CONFIG_HWDOWNLOAD_FILTER 0
+#define CONFIG_HWMAP_FILTER 0
+#define CONFIG_HWUPLOAD_FILTER 0
+#define CONFIG_HWUPLOAD_CUDA_FILTER 0
+#define CONFIG_HYSTERESIS_FILTER 0
+#define CONFIG_ICCDETECT_FILTER 0
+#define CONFIG_ICCGEN_FILTER 0
+#define CONFIG_IDENTITY_FILTER 0
+#define CONFIG_IDET_FILTER 0
+#define CONFIG_IL_FILTER 0
+#define CONFIG_INFLATE_FILTER 0
+#define CONFIG_INTERLACE_FILTER 0
+#define CONFIG_INTERLEAVE_FILTER 0
+#define CONFIG_KERNDEINT_FILTER 0
+#define CONFIG_KIRSCH_FILTER 0
+#define CONFIG_LAGFUN_FILTER 0
+#define CONFIG_LATENCY_FILTER 0
+#define CONFIG_LENSCORRECTION_FILTER 0
+#define CONFIG_LENSFUN_FILTER 0
+#define CONFIG_LIBPLACEBO_FILTER 0
+#define CONFIG_LIBVMAF_FILTER 0
+#define CONFIG_LIMITDIFF_FILTER 0
+#define CONFIG_LIMITER_FILTER 0
+#define CONFIG_LOOP_FILTER 0
+#define CONFIG_LUMAKEY_FILTER 0
+#define CONFIG_LUT_FILTER 0
+#define CONFIG_LUT1D_FILTER 0
+#define CONFIG_LUT2_FILTER 0
+#define CONFIG_LUT3D_FILTER 0
+#define CONFIG_LUTRGB_FILTER 0
+#define CONFIG_LUTYUV_FILTER 0
+#define CONFIG_MASKEDCLAMP_FILTER 0
+#define CONFIG_MASKEDMAX_FILTER 0
+#define CONFIG_MASKEDMERGE_FILTER 0
+#define CONFIG_MASKEDMIN_FILTER 0
+#define CONFIG_MASKEDTHRESHOLD_FILTER 0
+#define CONFIG_MASKFUN_FILTER 0
+#define CONFIG_MCDEINT_FILTER 0
+#define CONFIG_MEDIAN_FILTER 0
+#define CONFIG_MERGEPLANES_FILTER 0
+#define CONFIG_MESTIMATE_FILTER 0
+#define CONFIG_METADATA_FILTER 0
+#define CONFIG_MIDEQUALIZER_FILTER 0
+#define CONFIG_MINTERPOLATE_FILTER 0
+#define CONFIG_MIX_FILTER 0
+#define CONFIG_MONOCHROME_FILTER 0
+#define CONFIG_MORPHO_FILTER 0
+#define CONFIG_MPDECIMATE_FILTER 0
+#define CONFIG_MSAD_FILTER 0
+#define CONFIG_MULTIPLY_FILTER 0
+#define CONFIG_NEGATE_FILTER 0
+#define CONFIG_NLMEANS_FILTER 0
+#define CONFIG_NLMEANS_OPENCL_FILTER 0
+#define CONFIG_NNEDI_FILTER 0
+#define CONFIG_NOFORMAT_FILTER 0
+#define CONFIG_NOISE_FILTER 0
+#define CONFIG_NORMALIZE_FILTER 0
+#define CONFIG_NULL_FILTER 0
+#define CONFIG_OCR_FILTER 0
+#define CONFIG_OCV_FILTER 0
+#define CONFIG_OSCILLOSCOPE_FILTER 0
+#define CONFIG_OVERLAY_FILTER 0
+#define CONFIG_OVERLAY_OPENCL_FILTER 0
+#define CONFIG_OVERLAY_QSV_FILTER 0
+#define CONFIG_OVERLAY_VAAPI_FILTER 0
+#define CONFIG_OVERLAY_VULKAN_FILTER 0
+#define CONFIG_OVERLAY_CUDA_FILTER 0
+#define CONFIG_OWDENOISE_FILTER 0
+#define CONFIG_PAD_FILTER 0
+#define CONFIG_PAD_OPENCL_FILTER 0
+#define CONFIG_PALETTEGEN_FILTER 0
+#define CONFIG_PALETTEUSE_FILTER 0
+#define CONFIG_PERMS_FILTER 0
+#define CONFIG_PERSPECTIVE_FILTER 0
+#define CONFIG_PHASE_FILTER 0
+#define CONFIG_PHOTOSENSITIVITY_FILTER 0
+#define CONFIG_PIXDESCTEST_FILTER 0
+#define CONFIG_PIXELIZE_FILTER 0
+#define CONFIG_PIXSCOPE_FILTER 0
+#define CONFIG_PP_FILTER 0
+#define CONFIG_PP7_FILTER 0
+#define CONFIG_PREMULTIPLY_FILTER 0
+#define CONFIG_PREWITT_FILTER 0
+#define CONFIG_PREWITT_OPENCL_FILTER 0
+#define CONFIG_PROCAMP_VAAPI_FILTER 0
+#define CONFIG_PROGRAM_OPENCL_FILTER 0
+#define CONFIG_PSEUDOCOLOR_FILTER 0
+#define CONFIG_PSNR_FILTER 0
+#define CONFIG_PULLUP_FILTER 0
+#define CONFIG_QP_FILTER 0
+#define CONFIG_RANDOM_FILTER 0
+#define CONFIG_READEIA608_FILTER 0
+#define CONFIG_READVITC_FILTER 0
+#define CONFIG_REALTIME_FILTER 0
+#define CONFIG_REMAP_FILTER 0
+#define CONFIG_REMOVEGRAIN_FILTER 0
+#define CONFIG_REMOVELOGO_FILTER 0
+#define CONFIG_REPEATFIELDS_FILTER 0
+#define CONFIG_REVERSE_FILTER 0
+#define CONFIG_RGBASHIFT_FILTER 0
+#define CONFIG_ROBERTS_FILTER 0
+#define CONFIG_ROBERTS_OPENCL_FILTER 0
+#define CONFIG_ROTATE_FILTER 0
+#define CONFIG_SAB_FILTER 0
+#define CONFIG_SCALE_FILTER 0
+#define CONFIG_SCALE_CUDA_FILTER 0
+#define CONFIG_SCALE_NPP_FILTER 0
+#define CONFIG_SCALE_QSV_FILTER 0
+#define CONFIG_SCALE_VAAPI_FILTER 0
+#define CONFIG_SCALE_VULKAN_FILTER 0
+#define CONFIG_SCALE2REF_FILTER 0
+#define CONFIG_SCALE2REF_NPP_FILTER 0
+#define CONFIG_SCDET_FILTER 0
+#define CONFIG_SCHARR_FILTER 0
+#define CONFIG_SCROLL_FILTER 0
+#define CONFIG_SEGMENT_FILTER 0
+#define CONFIG_SELECT_FILTER 0
+#define CONFIG_SELECTIVECOLOR_FILTER 0
+#define CONFIG_SENDCMD_FILTER 0
+#define CONFIG_SEPARATEFIELDS_FILTER 0
+#define CONFIG_SETDAR_FILTER 0
+#define CONFIG_SETFIELD_FILTER 0
+#define CONFIG_SETPARAMS_FILTER 0
+#define CONFIG_SETPTS_FILTER 0
+#define CONFIG_SETRANGE_FILTER 0
+#define CONFIG_SETSAR_FILTER 0
+#define CONFIG_SETTB_FILTER 0
+#define CONFIG_SHARPEN_NPP_FILTER 0
+#define CONFIG_SHARPNESS_VAAPI_FILTER 0
+#define CONFIG_SHEAR_FILTER 0
+#define CONFIG_SHOWINFO_FILTER 0
+#define CONFIG_SHOWPALETTE_FILTER 0
+#define CONFIG_SHUFFLEFRAMES_FILTER 0
+#define CONFIG_SHUFFLEPIXELS_FILTER 0
+#define CONFIG_SHUFFLEPLANES_FILTER 0
+#define CONFIG_SIDEDATA_FILTER 0
+#define CONFIG_SIGNALSTATS_FILTER 0
+#define CONFIG_SIGNATURE_FILTER 0
+#define CONFIG_SITI_FILTER 0
+#define CONFIG_SMARTBLUR_FILTER 0
+#define CONFIG_SOBEL_FILTER 0
+#define CONFIG_SOBEL_OPENCL_FILTER 0
+#define CONFIG_SPLIT_FILTER 0
+#define CONFIG_SPP_FILTER 0
+#define CONFIG_SR_FILTER 0
+#define CONFIG_SSIM_FILTER 0
+#define CONFIG_STEREO3D_FILTER 0
+#define CONFIG_STREAMSELECT_FILTER 0
+#define CONFIG_SUBTITLES_FILTER 0
+#define CONFIG_SUPER2XSAI_FILTER 0
+#define CONFIG_SWAPRECT_FILTER 0
+#define CONFIG_SWAPUV_FILTER 0
+#define CONFIG_TBLEND_FILTER 0
+#define CONFIG_TELECINE_FILTER 0
+#define CONFIG_THISTOGRAM_FILTER 0
+#define CONFIG_THRESHOLD_FILTER 0
+#define CONFIG_THUMBNAIL_FILTER 0
+#define CONFIG_THUMBNAIL_CUDA_FILTER 0
+#define CONFIG_TILE_FILTER 0
+#define CONFIG_TINTERLACE_FILTER 0
+#define CONFIG_TLUT2_FILTER 0
+#define CONFIG_TMEDIAN_FILTER 0
+#define CONFIG_TMIDEQUALIZER_FILTER 0
+#define CONFIG_TMIX_FILTER 0
+#define CONFIG_TONEMAP_FILTER 0
+#define CONFIG_TONEMAP_OPENCL_FILTER 0
+#define CONFIG_TONEMAP_VAAPI_FILTER 0
+#define CONFIG_TPAD_FILTER 0
+#define CONFIG_TRANSPOSE_FILTER 0
+#define CONFIG_TRANSPOSE_NPP_FILTER 0
+#define CONFIG_TRANSPOSE_OPENCL_FILTER 0
+#define CONFIG_TRANSPOSE_VAAPI_FILTER 0
+#define CONFIG_TRANSPOSE_VULKAN_FILTER 0
+#define CONFIG_TRIM_FILTER 0
+#define CONFIG_UNPREMULTIPLY_FILTER 0
+#define CONFIG_UNSHARP_FILTER 0
+#define CONFIG_UNSHARP_OPENCL_FILTER 0
+#define CONFIG_UNTILE_FILTER 0
+#define CONFIG_USPP_FILTER 0
+#define CONFIG_V360_FILTER 0
+#define CONFIG_VAGUEDENOISER_FILTER 0
+#define CONFIG_VARBLUR_FILTER 0
+#define CONFIG_VECTORSCOPE_FILTER 0
+#define CONFIG_VFLIP_FILTER 0
+#define CONFIG_VFLIP_VULKAN_FILTER 0
+#define CONFIG_VFRDET_FILTER 0
+#define CONFIG_VIBRANCE_FILTER 0
+#define CONFIG_VIDSTABDETECT_FILTER 0
+#define CONFIG_VIDSTABTRANSFORM_FILTER 0
+#define CONFIG_VIF_FILTER 0
+#define CONFIG_VIGNETTE_FILTER 0
+#define CONFIG_VMAFMOTION_FILTER 0
+#define CONFIG_VPP_QSV_FILTER 0
+#define CONFIG_VSTACK_FILTER 0
+#define CONFIG_W3FDIF_FILTER 0
+#define CONFIG_WAVEFORM_FILTER 0
+#define CONFIG_WEAVE_FILTER 0
+#define CONFIG_XBR_FILTER 0
+#define CONFIG_XCORRELATE_FILTER 0
+#define CONFIG_XFADE_FILTER 0
+#define CONFIG_XFADE_OPENCL_FILTER 0
+#define CONFIG_XMEDIAN_FILTER 0
+#define CONFIG_XSTACK_FILTER 0
+#define CONFIG_YADIF_FILTER 0
+#define CONFIG_YADIF_CUDA_FILTER 0
+#define CONFIG_YADIF_VIDEOTOOLBOX_FILTER 0
+#define CONFIG_YAEPBLUR_FILTER 0
+#define CONFIG_ZMQ_FILTER 0
+#define CONFIG_ZOOMPAN_FILTER 0
+#define CONFIG_ZSCALE_FILTER 0
+#define CONFIG_ALLRGB_FILTER 0
+#define CONFIG_ALLYUV_FILTER 0
+#define CONFIG_CELLAUTO_FILTER 0
+#define CONFIG_COLOR_FILTER 0
+#define CONFIG_COLORCHART_FILTER 0
+#define CONFIG_COLORSPECTRUM_FILTER 0
+#define CONFIG_COREIMAGESRC_FILTER 0
+#define CONFIG_FREI0R_SRC_FILTER 0
+#define CONFIG_GRADIENTS_FILTER 0
+#define CONFIG_HALDCLUTSRC_FILTER 0
+#define CONFIG_LIFE_FILTER 0
+#define CONFIG_MANDELBROT_FILTER 0
+#define CONFIG_MPTESTSRC_FILTER 0
+#define CONFIG_NULLSRC_FILTER 0
+#define CONFIG_OPENCLSRC_FILTER 0
+#define CONFIG_PAL75BARS_FILTER 0
+#define CONFIG_PAL100BARS_FILTER 0
+#define CONFIG_RGBTESTSRC_FILTER 0
+#define CONFIG_SIERPINSKI_FILTER 0
+#define CONFIG_SMPTEBARS_FILTER 0
+#define CONFIG_SMPTEHDBARS_FILTER 0
+#define CONFIG_TESTSRC_FILTER 0
+#define CONFIG_TESTSRC2_FILTER 0
+#define CONFIG_YUVTESTSRC_FILTER 0
+#define CONFIG_NULLSINK_FILTER 0
+#define CONFIG_ABITSCOPE_FILTER 0
+#define CONFIG_ADRAWGRAPH_FILTER 0
+#define CONFIG_AGRAPHMONITOR_FILTER 0
+#define CONFIG_AHISTOGRAM_FILTER 0
+#define CONFIG_APHASEMETER_FILTER 0
+#define CONFIG_AVECTORSCOPE_FILTER 0
+#define CONFIG_CONCAT_FILTER 0
+#define CONFIG_SHOWCQT_FILTER 0
+#define CONFIG_SHOWFREQS_FILTER 0
+#define CONFIG_SHOWSPATIAL_FILTER 0
+#define CONFIG_SHOWSPECTRUM_FILTER 0
+#define CONFIG_SHOWSPECTRUMPIC_FILTER 0
+#define CONFIG_SHOWVOLUME_FILTER 0
+#define CONFIG_SHOWWAVES_FILTER 0
+#define CONFIG_SHOWWAVESPIC_FILTER 0
+#define CONFIG_SPECTRUMSYNTH_FILTER 0
+#define CONFIG_AVSYNCTEST_FILTER 0
+#define CONFIG_AMOVIE_FILTER 0
+#define CONFIG_MOVIE_FILTER 0
+#define CONFIG_AFIFO_FILTER 0
+#define CONFIG_FIFO_FILTER 0
+#define CONFIG_AA_DEMUXER 0
+#define CONFIG_AAC_DEMUXER 0
+#define CONFIG_AAX_DEMUXER 0
+#define CONFIG_AC3_DEMUXER 0
+#define CONFIG_ACE_DEMUXER 0
+#define CONFIG_ACM_DEMUXER 0
+#define CONFIG_ACT_DEMUXER 0
+#define CONFIG_ADF_DEMUXER 0
+#define CONFIG_ADP_DEMUXER 0
+#define CONFIG_ADS_DEMUXER 0
+#define CONFIG_ADX_DEMUXER 0
+#define CONFIG_AEA_DEMUXER 0
+#define CONFIG_AFC_DEMUXER 0
+#define CONFIG_AIFF_DEMUXER 0
+#define CONFIG_AIX_DEMUXER 0
+#define CONFIG_ALP_DEMUXER 0
+#define CONFIG_AMR_DEMUXER 0
+#define CONFIG_AMRNB_DEMUXER 0
+#define CONFIG_AMRWB_DEMUXER 0
+#define CONFIG_ANM_DEMUXER 0
+#define CONFIG_APC_DEMUXER 0
+#define CONFIG_APE_DEMUXER 0
+#define CONFIG_APM_DEMUXER 0
+#define CONFIG_APNG_DEMUXER 0
+#define CONFIG_APTX_DEMUXER 0
+#define CONFIG_APTX_HD_DEMUXER 0
+#define CONFIG_AQTITLE_DEMUXER 0
+#define CONFIG_ARGO_ASF_DEMUXER 0
+#define CONFIG_ARGO_BRP_DEMUXER 0
+#define CONFIG_ARGO_CVG_DEMUXER 0
+#define CONFIG_ASF_DEMUXER 0
+#define CONFIG_ASF_O_DEMUXER 0
+#define CONFIG_ASS_DEMUXER 0
+#define CONFIG_AST_DEMUXER 0
+#define CONFIG_AU_DEMUXER 0
+#define CONFIG_AV1_DEMUXER 0
+#define CONFIG_AVI_DEMUXER 0
+#define CONFIG_AVISYNTH_DEMUXER 0
+#define CONFIG_AVR_DEMUXER 0
+#define CONFIG_AVS_DEMUXER 0
+#define CONFIG_AVS2_DEMUXER 0
+#define CONFIG_AVS3_DEMUXER 0
+#define CONFIG_BETHSOFTVID_DEMUXER 0
+#define CONFIG_BFI_DEMUXER 0
+#define CONFIG_BINTEXT_DEMUXER 0
+#define CONFIG_BINK_DEMUXER 0
+#define CONFIG_BINKA_DEMUXER 0
+#define CONFIG_BIT_DEMUXER 0
+#define CONFIG_BITPACKED_DEMUXER 0
+#define CONFIG_BMV_DEMUXER 0
+#define CONFIG_BFSTM_DEMUXER 0
+#define CONFIG_BRSTM_DEMUXER 0
+#define CONFIG_BOA_DEMUXER 0
+#define CONFIG_C93_DEMUXER 0
+#define CONFIG_CAF_DEMUXER 0
+#define CONFIG_CAVSVIDEO_DEMUXER 0
+#define CONFIG_CDG_DEMUXER 0
+#define CONFIG_CDXL_DEMUXER 0
+#define CONFIG_CINE_DEMUXER 0
+#define CONFIG_CODEC2_DEMUXER 0
+#define CONFIG_CODEC2RAW_DEMUXER 0
+#define CONFIG_CONCAT_DEMUXER 0
+#define CONFIG_DASH_DEMUXER 0
+#define CONFIG_DATA_DEMUXER 0
+#define CONFIG_DAUD_DEMUXER 0
+#define CONFIG_DCSTR_DEMUXER 0
+#define CONFIG_DERF_DEMUXER 0
+#define CONFIG_DFA_DEMUXER 0
+#define CONFIG_DFPWM_DEMUXER 0
+#define CONFIG_DHAV_DEMUXER 0
+#define CONFIG_DIRAC_DEMUXER 0
+#define CONFIG_DNXHD_DEMUXER 0
+#define CONFIG_DSF_DEMUXER 0
+#define CONFIG_DSICIN_DEMUXER 0
+#define CONFIG_DSS_DEMUXER 0
+#define CONFIG_DTS_DEMUXER 0
+#define CONFIG_DTSHD_DEMUXER 0
+#define CONFIG_DV_DEMUXER 0
+#define CONFIG_DVBSUB_DEMUXER 0
+#define CONFIG_DVBTXT_DEMUXER 0
+#define CONFIG_DXA_DEMUXER 0
+#define CONFIG_EA_DEMUXER 0
+#define CONFIG_EA_CDATA_DEMUXER 0
+#define CONFIG_EAC3_DEMUXER 0
+#define CONFIG_EPAF_DEMUXER 0
+#define CONFIG_FFMETADATA_DEMUXER 0
+#define CONFIG_FILMSTRIP_DEMUXER 0
+#define CONFIG_FITS_DEMUXER 0
+#define CONFIG_FLAC_DEMUXER 0
+#define CONFIG_FLIC_DEMUXER 0
+#define CONFIG_FLV_DEMUXER 0
+#define CONFIG_LIVE_FLV_DEMUXER 0
+#define CONFIG_FOURXM_DEMUXER 0
+#define CONFIG_FRM_DEMUXER 0
+#define CONFIG_FSB_DEMUXER 0
+#define CONFIG_FWSE_DEMUXER 0
+#define CONFIG_G722_DEMUXER 0
+#define CONFIG_G723_1_DEMUXER 0
+#define CONFIG_G726_DEMUXER 0
+#define CONFIG_G726LE_DEMUXER 0
+#define CONFIG_G729_DEMUXER 0
+#define CONFIG_GDV_DEMUXER 0
+#define CONFIG_GENH_DEMUXER 0
+#define CONFIG_GIF_DEMUXER 0
+#define CONFIG_GSM_DEMUXER 0
+#define CONFIG_GXF_DEMUXER 0
+#define CONFIG_H261_DEMUXER 0
+#define CONFIG_H263_DEMUXER 0
+#define CONFIG_H264_DEMUXER 0
+#define CONFIG_HCA_DEMUXER 0
+#define CONFIG_HCOM_DEMUXER 0
+#define CONFIG_HEVC_DEMUXER 0
+#define CONFIG_HLS_DEMUXER 0
+#define CONFIG_HNM_DEMUXER 0
+#define CONFIG_ICO_DEMUXER 0
+#define CONFIG_IDCIN_DEMUXER 0
+#define CONFIG_IDF_DEMUXER 0
+#define CONFIG_IFF_DEMUXER 0
+#define CONFIG_IFV_DEMUXER 0
+#define CONFIG_ILBC_DEMUXER 0
+#define CONFIG_IMAGE2_DEMUXER 0
+#define CONFIG_IMAGE2PIPE_DEMUXER 0
+#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0
+#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0
+#define CONFIG_IMF_DEMUXER 0
+#define CONFIG_INGENIENT_DEMUXER 0
+#define CONFIG_IPMOVIE_DEMUXER 0
+#define CONFIG_IPU_DEMUXER 0
+#define CONFIG_IRCAM_DEMUXER 0
+#define CONFIG_ISS_DEMUXER 0
+#define CONFIG_IV8_DEMUXER 0
+#define CONFIG_IVF_DEMUXER 0
+#define CONFIG_IVR_DEMUXER 0
+#define CONFIG_JACOSUB_DEMUXER 0
+#define CONFIG_JV_DEMUXER 0
+#define CONFIG_KUX_DEMUXER 0
+#define CONFIG_KVAG_DEMUXER 0
+#define CONFIG_LMLM4_DEMUXER 0
+#define CONFIG_LOAS_DEMUXER 0
+#define CONFIG_LUODAT_DEMUXER 0
+#define CONFIG_LRC_DEMUXER 0
+#define CONFIG_LVF_DEMUXER 0
+#define CONFIG_LXF_DEMUXER 0
+#define CONFIG_M4V_DEMUXER 0
+#define CONFIG_MCA_DEMUXER 0
+#define CONFIG_MCC_DEMUXER 0
+#define CONFIG_MATROSKA_DEMUXER 0
+#define CONFIG_MGSTS_DEMUXER 0
+#define CONFIG_MICRODVD_DEMUXER 0
+#define CONFIG_MJPEG_DEMUXER 0
+#define CONFIG_MJPEG_2000_DEMUXER 0
+#define CONFIG_MLP_DEMUXER 0
+#define CONFIG_MLV_DEMUXER 0
+#define CONFIG_MM_DEMUXER 0
+#define CONFIG_MMF_DEMUXER 0
+#define CONFIG_MODS_DEMUXER 0
+#define CONFIG_MOFLEX_DEMUXER 0
+#define CONFIG_MOV_DEMUXER 0
+#define CONFIG_MP3_DEMUXER 0
+#define CONFIG_MPC_DEMUXER 0
+#define CONFIG_MPC8_DEMUXER 0
+#define CONFIG_MPEGPS_DEMUXER 0
+#define CONFIG_MPEGTS_DEMUXER 0
+#define CONFIG_MPEGTSRAW_DEMUXER 0
+#define CONFIG_MPEGVIDEO_DEMUXER 0
+#define CONFIG_MPJPEG_DEMUXER 0
+#define CONFIG_MPL2_DEMUXER 0
+#define CONFIG_MPSUB_DEMUXER 0
+#define CONFIG_MSF_DEMUXER 0
+#define CONFIG_MSNWC_TCP_DEMUXER 0
+#define CONFIG_MSP_DEMUXER 0
+#define CONFIG_MTAF_DEMUXER 0
+#define CONFIG_MTV_DEMUXER 0
+#define CONFIG_MUSX_DEMUXER 0
+#define CONFIG_MV_DEMUXER 0
+#define CONFIG_MVI_DEMUXER 0
+#define CONFIG_MXF_DEMUXER 0
+#define CONFIG_MXG_DEMUXER 0
+#define CONFIG_NC_DEMUXER 0
+#define CONFIG_NISTSPHERE_DEMUXER 0
+#define CONFIG_NSP_DEMUXER 0
+#define CONFIG_NSV_DEMUXER 0
+#define CONFIG_NUT_DEMUXER 0
+#define CONFIG_NUV_DEMUXER 0
+#define CONFIG_OBU_DEMUXER 0
+#define CONFIG_OGG_DEMUXER 0
+#define CONFIG_OMA_DEMUXER 0
+#define CONFIG_PAF_DEMUXER 0
+#define CONFIG_PCM_ALAW_DEMUXER 0
+#define CONFIG_PCM_MULAW_DEMUXER 0
+#define CONFIG_PCM_VIDC_DEMUXER 0
+#define CONFIG_PCM_F64BE_DEMUXER 0
+#define CONFIG_PCM_F64LE_DEMUXER 0
+#define CONFIG_PCM_F32BE_DEMUXER 0
+#define CONFIG_PCM_F32LE_DEMUXER 0
+#define CONFIG_PCM_S32BE_DEMUXER 0
+#define CONFIG_PCM_S32LE_DEMUXER 0
+#define CONFIG_PCM_S24BE_DEMUXER 0
+#define CONFIG_PCM_S24LE_DEMUXER 0
+#define CONFIG_PCM_S16BE_DEMUXER 0
+#define CONFIG_PCM_S16LE_DEMUXER 0
+#define CONFIG_PCM_S8_DEMUXER 0
+#define CONFIG_PCM_U32BE_DEMUXER 0
+#define CONFIG_PCM_U32LE_DEMUXER 0
+#define CONFIG_PCM_U24BE_DEMUXER 0
+#define CONFIG_PCM_U24LE_DEMUXER 0
+#define CONFIG_PCM_U16BE_DEMUXER 0
+#define CONFIG_PCM_U16LE_DEMUXER 0
+#define CONFIG_PCM_U8_DEMUXER 0
+#define CONFIG_PJS_DEMUXER 0
+#define CONFIG_PMP_DEMUXER 0
+#define CONFIG_PP_BNK_DEMUXER 0
+#define CONFIG_PVA_DEMUXER 0
+#define CONFIG_PVF_DEMUXER 0
+#define CONFIG_QCP_DEMUXER 0
+#define CONFIG_R3D_DEMUXER 0
+#define CONFIG_RAWVIDEO_DEMUXER 0
+#define CONFIG_REALTEXT_DEMUXER 0
+#define CONFIG_REDSPARK_DEMUXER 0
+#define CONFIG_RL2_DEMUXER 0
+#define CONFIG_RM_DEMUXER 0
+#define CONFIG_ROQ_DEMUXER 0
+#define CONFIG_RPL_DEMUXER 0
+#define CONFIG_RSD_DEMUXER 0
+#define CONFIG_RSO_DEMUXER 0
+#define CONFIG_RTP_DEMUXER 0
+#define CONFIG_RTSP_DEMUXER 0
+#define CONFIG_S337M_DEMUXER 0
+#define CONFIG_SAMI_DEMUXER 0
+#define CONFIG_SAP_DEMUXER 0
+#define CONFIG_SBC_DEMUXER 0
+#define CONFIG_SBG_DEMUXER 0
+#define CONFIG_SCC_DEMUXER 0
+#define CONFIG_SCD_DEMUXER 0
+#define CONFIG_SDP_DEMUXER 0
+#define CONFIG_SDR2_DEMUXER 0
+#define CONFIG_SDS_DEMUXER 0
+#define CONFIG_SDX_DEMUXER 0
+#define CONFIG_SEGAFILM_DEMUXER 0
+#define CONFIG_SER_DEMUXER 0
+#define CONFIG_SGA_DEMUXER 0
+#define CONFIG_SHORTEN_DEMUXER 0
+#define CONFIG_SIFF_DEMUXER 0
+#define CONFIG_SIMBIOSIS_IMX_DEMUXER 0
+#define CONFIG_SLN_DEMUXER 0
+#define CONFIG_SMACKER_DEMUXER 0
+#define CONFIG_SMJPEG_DEMUXER 0
+#define CONFIG_SMUSH_DEMUXER 0
+#define CONFIG_SOL_DEMUXER 0
+#define CONFIG_SOX_DEMUXER 0
+#define CONFIG_SPDIF_DEMUXER 0
+#define CONFIG_SRT_DEMUXER 0
+#define CONFIG_STR_DEMUXER 0
+#define CONFIG_STL_DEMUXER 0
+#define CONFIG_SUBVIEWER1_DEMUXER 0
+#define CONFIG_SUBVIEWER_DEMUXER 0
+#define CONFIG_SUP_DEMUXER 0
+#define CONFIG_SVAG_DEMUXER 0
+#define CONFIG_SVS_DEMUXER 0
+#define CONFIG_SWF_DEMUXER 0
+#define CONFIG_TAK_DEMUXER 0
+#define CONFIG_TEDCAPTIONS_DEMUXER 0
+#define CONFIG_THP_DEMUXER 0
+#define CONFIG_THREEDOSTR_DEMUXER 0
+#define CONFIG_TIERTEXSEQ_DEMUXER 0
+#define CONFIG_TMV_DEMUXER 0
+#define CONFIG_TRUEHD_DEMUXER 0
+#define CONFIG_TTA_DEMUXER 0
+#define CONFIG_TXD_DEMUXER 0
+#define CONFIG_TTY_DEMUXER 0
+#define CONFIG_TY_DEMUXER 0
+#define CONFIG_V210_DEMUXER 0
+#define CONFIG_V210X_DEMUXER 0
+#define CONFIG_VAG_DEMUXER 0
+#define CONFIG_VC1_DEMUXER 0
+#define CONFIG_VC1T_DEMUXER 0
+#define CONFIG_VIVIDAS_DEMUXER 0
+#define CONFIG_VIVO_DEMUXER 0
+#define CONFIG_VMD_DEMUXER 0
+#define CONFIG_VOBSUB_DEMUXER 0
+#define CONFIG_VOC_DEMUXER 0
+#define CONFIG_VPK_DEMUXER 0
+#define CONFIG_VPLAYER_DEMUXER 0
+#define CONFIG_VQF_DEMUXER 0
+#define CONFIG_W64_DEMUXER 0
+#define CONFIG_WAV_DEMUXER 0
+#define CONFIG_WC3_DEMUXER 0
+#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0
+#define CONFIG_WEBVTT_DEMUXER 0
+#define CONFIG_WSAUD_DEMUXER 0
+#define CONFIG_WSD_DEMUXER 0
+#define CONFIG_WSVQA_DEMUXER 0
+#define CONFIG_WTV_DEMUXER 0
+#define CONFIG_WVE_DEMUXER 0
+#define CONFIG_WV_DEMUXER 0
+#define CONFIG_XA_DEMUXER 0
+#define CONFIG_XBIN_DEMUXER 0
+#define CONFIG_XMV_DEMUXER 0
+#define CONFIG_XVAG_DEMUXER 0
+#define CONFIG_XWMA_DEMUXER 0
+#define CONFIG_YOP_DEMUXER 0
+#define CONFIG_YUV4MPEGPIPE_DEMUXER 0
+#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_CRI_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_GEM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_JPEGXL_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PGX_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PHOTOCD_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_QOI_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_SVG_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_VBN_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_XBM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0
+#define CONFIG_IMAGE_XWD_PIPE_DEMUXER 0
+#define CONFIG_LIBGME_DEMUXER 0
+#define CONFIG_LIBMODPLUG_DEMUXER 0
+#define CONFIG_LIBOPENMPT_DEMUXER 0
+#define CONFIG_VAPOURSYNTH_DEMUXER 0
+#define CONFIG_A64_MUXER 0
+#define CONFIG_AC3_MUXER 0
+#define CONFIG_ADTS_MUXER 0
+#define CONFIG_ADX_MUXER 0
+#define CONFIG_AIFF_MUXER 0
+#define CONFIG_ALP_MUXER 0
+#define CONFIG_AMR_MUXER 0
+#define CONFIG_AMV_MUXER 0
+#define CONFIG_APM_MUXER 0
+#define CONFIG_APNG_MUXER 0
+#define CONFIG_APTX_MUXER 0
+#define CONFIG_APTX_HD_MUXER 0
+#define CONFIG_ARGO_ASF_MUXER 0
+#define CONFIG_ARGO_CVG_MUXER 0
+#define CONFIG_ASF_MUXER 0
+#define CONFIG_ASS_MUXER 0
+#define CONFIG_AST_MUXER 0
+#define CONFIG_ASF_STREAM_MUXER 0
+#define CONFIG_AU_MUXER 0
+#define CONFIG_AVI_MUXER 0
+#define CONFIG_AVIF_MUXER 0
+#define CONFIG_AVM2_MUXER 0
+#define CONFIG_AVS2_MUXER 0
+#define CONFIG_AVS3_MUXER 0
+#define CONFIG_BIT_MUXER 0
+#define CONFIG_CAF_MUXER 0
+#define CONFIG_CAVSVIDEO_MUXER 0
+#define CONFIG_CODEC2_MUXER 0
+#define CONFIG_CODEC2RAW_MUXER 0
+#define CONFIG_CRC_MUXER 0
+#define CONFIG_DASH_MUXER 0
+#define CONFIG_DATA_MUXER 0
+#define CONFIG_DAUD_MUXER 0
+#define CONFIG_DFPWM_MUXER 0
+#define CONFIG_DIRAC_MUXER 0
+#define CONFIG_DNXHD_MUXER 0
+#define CONFIG_DTS_MUXER 0
+#define CONFIG_DV_MUXER 0
+#define CONFIG_EAC3_MUXER 0
+#define CONFIG_F4V_MUXER 0
+#define CONFIG_FFMETADATA_MUXER 0
+#define CONFIG_FIFO_MUXER 0
+#define CONFIG_FIFO_TEST_MUXER 0
+#define CONFIG_FILMSTRIP_MUXER 0
+#define CONFIG_FITS_MUXER 0
+#define CONFIG_FLAC_MUXER 0
+#define CONFIG_FLV_MUXER 0
+#define CONFIG_FRAMECRC_MUXER 0
+#define CONFIG_FRAMEHASH_MUXER 0
+#define CONFIG_FRAMEMD5_MUXER 0
+#define CONFIG_G722_MUXER 0
+#define CONFIG_G723_1_MUXER 0
+#define CONFIG_G726_MUXER 0
+#define CONFIG_G726LE_MUXER 0
+#define CONFIG_GIF_MUXER 0
+#define CONFIG_GSM_MUXER 0
+#define CONFIG_GXF_MUXER 0
+#define CONFIG_H261_MUXER 0
+#define CONFIG_H263_MUXER 0
+#define CONFIG_H264_MUXER 0
+#define CONFIG_HASH_MUXER 0
+#define CONFIG_HDS_MUXER 0
+#define CONFIG_HEVC_MUXER 0
+#define CONFIG_HLS_MUXER 0
+#define CONFIG_ICO_MUXER 0
+#define CONFIG_ILBC_MUXER 0
+#define CONFIG_IMAGE2_MUXER 0
+#define CONFIG_IMAGE2PIPE_MUXER 0
+#define CONFIG_IPOD_MUXER 0
+#define CONFIG_IRCAM_MUXER 0
+#define CONFIG_ISMV_MUXER 0
+#define CONFIG_IVF_MUXER 0
+#define CONFIG_JACOSUB_MUXER 0
+#define CONFIG_KVAG_MUXER 0
+#define CONFIG_LATM_MUXER 0
+#define CONFIG_LRC_MUXER 0
+#define CONFIG_M4V_MUXER 0
+#define CONFIG_MD5_MUXER 0
+#define CONFIG_MATROSKA_MUXER 0
+#define CONFIG_MATROSKA_AUDIO_MUXER 0
+#define CONFIG_MICRODVD_MUXER 0
+#define CONFIG_MJPEG_MUXER 0
+#define CONFIG_MLP_MUXER 0
+#define CONFIG_MMF_MUXER 0
+#define CONFIG_MOV_MUXER 0
+#define CONFIG_MP2_MUXER 0
+#define CONFIG_MP3_MUXER 0
+#define CONFIG_MP4_MUXER 0
+#define CONFIG_MPEG1SYSTEM_MUXER 0
+#define CONFIG_MPEG1VCD_MUXER 0
+#define CONFIG_MPEG1VIDEO_MUXER 0
+#define CONFIG_MPEG2DVD_MUXER 0
+#define CONFIG_MPEG2SVCD_MUXER 0
+#define CONFIG_MPEG2VIDEO_MUXER 0
+#define CONFIG_MPEG2VOB_MUXER 0
+#define CONFIG_MPEGTS_MUXER 0
+#define CONFIG_MPJPEG_MUXER 0
+#define CONFIG_MXF_MUXER 0
+#define CONFIG_MXF_D10_MUXER 0
+#define CONFIG_MXF_OPATOM_MUXER 0
+#define CONFIG_NULL_MUXER 0
+#define CONFIG_NUT_MUXER 0
+#define CONFIG_OBU_MUXER 0
+#define CONFIG_OGA_MUXER 0
+#define CONFIG_OGG_MUXER 0
+#define CONFIG_OGV_MUXER 0
+#define CONFIG_OMA_MUXER 0
+#define CONFIG_OPUS_MUXER 0
+#define CONFIG_PCM_ALAW_MUXER 0
+#define CONFIG_PCM_MULAW_MUXER 0
+#define CONFIG_PCM_VIDC_MUXER 0
+#define CONFIG_PCM_F64BE_MUXER 0
+#define CONFIG_PCM_F64LE_MUXER 0
+#define CONFIG_PCM_F32BE_MUXER 0
+#define CONFIG_PCM_F32LE_MUXER 0
+#define CONFIG_PCM_S32BE_MUXER 0
+#define CONFIG_PCM_S32LE_MUXER 0
+#define CONFIG_PCM_S24BE_MUXER 0
+#define CONFIG_PCM_S24LE_MUXER 0
+#define CONFIG_PCM_S16BE_MUXER 0
+#define CONFIG_PCM_S16LE_MUXER 0
+#define CONFIG_PCM_S8_MUXER 0
+#define CONFIG_PCM_U32BE_MUXER 0
+#define CONFIG_PCM_U32LE_MUXER 0
+#define CONFIG_PCM_U24BE_MUXER 0
+#define CONFIG_PCM_U24LE_MUXER 0
+#define CONFIG_PCM_U16BE_MUXER 0
+#define CONFIG_PCM_U16LE_MUXER 0
+#define CONFIG_PCM_U8_MUXER 0
+#define CONFIG_PSP_MUXER 0
+#define CONFIG_RAWVIDEO_MUXER 0
+#define CONFIG_RM_MUXER 0
+#define CONFIG_ROQ_MUXER 0
+#define CONFIG_RSO_MUXER 0
+#define CONFIG_RTP_MUXER 0
+#define CONFIG_RTP_MPEGTS_MUXER 0
+#define CONFIG_RTSP_MUXER 0
+#define CONFIG_SAP_MUXER 0
+#define CONFIG_SBC_MUXER 0
+#define CONFIG_SCC_MUXER 0
+#define CONFIG_SEGAFILM_MUXER 0
+#define CONFIG_SEGMENT_MUXER 0
+#define CONFIG_STREAM_SEGMENT_MUXER 0
+#define CONFIG_SMJPEG_MUXER 0
+#define CONFIG_SMOOTHSTREAMING_MUXER 0
+#define CONFIG_SOX_MUXER 0
+#define CONFIG_SPX_MUXER 0
+#define CONFIG_SPDIF_MUXER 0
+#define CONFIG_SRT_MUXER 0
+#define CONFIG_STREAMHASH_MUXER 0
+#define CONFIG_SUP_MUXER 0
+#define CONFIG_SWF_MUXER 0
+#define CONFIG_TEE_MUXER 0
+#define CONFIG_TG2_MUXER 0
+#define CONFIG_TGP_MUXER 0
+#define CONFIG_MKVTIMESTAMP_V2_MUXER 0
+#define CONFIG_TRUEHD_MUXER 0
+#define CONFIG_TTA_MUXER 0
+#define CONFIG_TTML_MUXER 0
+#define CONFIG_UNCODEDFRAMECRC_MUXER 0
+#define CONFIG_VC1_MUXER 0
+#define CONFIG_VC1T_MUXER 0
+#define CONFIG_VOC_MUXER 0
+#define CONFIG_W64_MUXER 0
+#define CONFIG_WAV_MUXER 0
+#define CONFIG_WEBM_MUXER 0
+#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0
+#define CONFIG_WEBM_CHUNK_MUXER 0
+#define CONFIG_WEBP_MUXER 0
+#define CONFIG_WEBVTT_MUXER 0
+#define CONFIG_WSAUD_MUXER 0
+#define CONFIG_WTV_MUXER 0
+#define CONFIG_WV_MUXER 0
+#define CONFIG_YUV4MPEGPIPE_MUXER 0
+#define CONFIG_CHROMAPRINT_MUXER 0
+#define CONFIG_ASYNC_PROTOCOL 0
+#define CONFIG_BLURAY_PROTOCOL 0
+#define CONFIG_CACHE_PROTOCOL 0
+#define CONFIG_CONCAT_PROTOCOL 0
+#define CONFIG_CONCATF_PROTOCOL 0
+#define CONFIG_CRYPTO_PROTOCOL 0
+#define CONFIG_DATA_PROTOCOL 0
+#define CONFIG_FFRTMPCRYPT_PROTOCOL 0
+#define CONFIG_FFRTMPHTTP_PROTOCOL 0
+#define CONFIG_FILE_PROTOCOL 0
+#define CONFIG_FTP_PROTOCOL 0
+#define CONFIG_GOPHER_PROTOCOL 0
+#define CONFIG_GOPHERS_PROTOCOL 0
+#define CONFIG_HLS_PROTOCOL 0
+#define CONFIG_HTTP_PROTOCOL 0
+#define CONFIG_HTTPPROXY_PROTOCOL 0
+#define CONFIG_HTTPS_PROTOCOL 0
+#define CONFIG_ICECAST_PROTOCOL 0
+#define CONFIG_MMSH_PROTOCOL 0
+#define CONFIG_MMST_PROTOCOL 0
+#define CONFIG_MD5_PROTOCOL 0
+#define CONFIG_PIPE_PROTOCOL 0
+#define CONFIG_PROMPEG_PROTOCOL 0
+#define CONFIG_RTMP_PROTOCOL 0
+#define CONFIG_RTMPE_PROTOCOL 0
+#define CONFIG_RTMPS_PROTOCOL 0
+#define CONFIG_RTMPT_PROTOCOL 0
+#define CONFIG_RTMPTE_PROTOCOL 0
+#define CONFIG_RTMPTS_PROTOCOL 0
+#define CONFIG_RTP_PROTOCOL 0
+#define CONFIG_SCTP_PROTOCOL 0
+#define CONFIG_SRTP_PROTOCOL 0
+#define CONFIG_SUBFILE_PROTOCOL 0
+#define CONFIG_TEE_PROTOCOL 0
+#define CONFIG_TCP_PROTOCOL 0
+#define CONFIG_TLS_PROTOCOL 0
+#define CONFIG_UDP_PROTOCOL 0
+#define CONFIG_UDPLITE_PROTOCOL 0
+#define CONFIG_UNIX_PROTOCOL 0
+#define CONFIG_LIBAMQP_PROTOCOL 0
+#define CONFIG_LIBRIST_PROTOCOL 0
+#define CONFIG_LIBRTMP_PROTOCOL 0
+#define CONFIG_LIBRTMPE_PROTOCOL 0
+#define CONFIG_LIBRTMPS_PROTOCOL 0
+#define CONFIG_LIBRTMPT_PROTOCOL 0
+#define CONFIG_LIBRTMPTE_PROTOCOL 0
+#define CONFIG_LIBSRT_PROTOCOL 0
+#define CONFIG_LIBSSH_PROTOCOL 0
+#define CONFIG_LIBSMBCLIENT_PROTOCOL 0
+#define CONFIG_LIBZMQ_PROTOCOL 0
+#define CONFIG_IPFS_PROTOCOL 0
+#define CONFIG_IPNS_PROTOCOL 0
+#endif /* FFMPEG_CONFIG_COMPONENTS_H */
diff --git a/media/ffvpx/config_darwin64.asm b/media/ffvpx/config_darwin64.asm
new file mode 100644
index 0000000000..6123b35b20
--- /dev/null
+++ b/media/ffvpx/config_darwin64.asm
@@ -0,0 +1,664 @@
+; Automatically generated by configure - do not modify!
+%define ARCH_AARCH64 0
+%define ARCH_ALPHA 0
+%define ARCH_ARM 0
+%define ARCH_AVR32 0
+%define ARCH_AVR32_AP 0
+%define ARCH_AVR32_UC 0
+%define ARCH_BFIN 0
+%define ARCH_IA64 0
+%define ARCH_M68K 0
+%define ARCH_MIPS 0
+%define ARCH_MIPS64 0
+%define ARCH_PARISC 0
+%define ARCH_PPC 0
+%define ARCH_PPC64 0
+%define ARCH_S390 0
+%define ARCH_SH4 0
+%define ARCH_SPARC 0
+%define ARCH_SPARC64 0
+%define ARCH_TILEGX 0
+%define ARCH_TILEPRO 0
+%define ARCH_TOMI 0
+%define ARCH_X86 1
+%define ARCH_X86_32 0
+%define ARCH_X86_64 1
+%define HAVE_ARMV5TE 0
+%define HAVE_ARMV6 0
+%define HAVE_ARMV6T2 0
+%define HAVE_ARMV8 0
+%define HAVE_NEON 0
+%define HAVE_VFP 0
+%define HAVE_VFPV3 0
+%define HAVE_SETEND 0
+%define HAVE_ALTIVEC 0
+%define HAVE_DCBZL 0
+%define HAVE_LDBRX 0
+%define HAVE_POWER8 0
+%define HAVE_PPC4XX 0
+%define HAVE_VSX 0
+%define HAVE_AESNI 1
+%define HAVE_AMD3DNOW 1
+%define HAVE_AMD3DNOWEXT 1
+%define HAVE_AVX 1
+%define HAVE_AVX2 1
+%define HAVE_AVX512 1
+%define HAVE_FMA3 1
+%define HAVE_FMA4 1
+%define HAVE_MMX 1
+%define HAVE_MMXEXT 1
+%define HAVE_SSE 1
+%define HAVE_SSE2 1
+%define HAVE_SSE3 1
+%define HAVE_SSE4 1
+%define HAVE_SSE42 1
+%define HAVE_SSSE3 1
+%define HAVE_XOP 1
+%define HAVE_CPUNOP 0
+%define HAVE_I686 1
+%define HAVE_MIPSFPU 0
+%define HAVE_MIPS32R2 0
+%define HAVE_MIPS32R5 0
+%define HAVE_MIPS64R2 0
+%define HAVE_MIPS32R6 0
+%define HAVE_MIPS64R6 0
+%define HAVE_MIPSDSP 0
+%define HAVE_MIPSDSPR2 0
+%define HAVE_MSA 0
+%define HAVE_MSA2 0
+%define HAVE_LOONGSON2 0
+%define HAVE_LOONGSON3 0
+%define HAVE_MMI 0
+%define HAVE_ARMV5TE_EXTERNAL 0
+%define HAVE_ARMV6_EXTERNAL 0
+%define HAVE_ARMV6T2_EXTERNAL 0
+%define HAVE_ARMV8_EXTERNAL 0
+%define HAVE_NEON_EXTERNAL 0
+%define HAVE_VFP_EXTERNAL 0
+%define HAVE_VFPV3_EXTERNAL 0
+%define HAVE_SETEND_EXTERNAL 0
+%define HAVE_ALTIVEC_EXTERNAL 0
+%define HAVE_DCBZL_EXTERNAL 0
+%define HAVE_LDBRX_EXTERNAL 0
+%define HAVE_POWER8_EXTERNAL 0
+%define HAVE_PPC4XX_EXTERNAL 0
+%define HAVE_VSX_EXTERNAL 0
+%define HAVE_AESNI_EXTERNAL 1
+%define HAVE_AMD3DNOW_EXTERNAL 1
+%define HAVE_AMD3DNOWEXT_EXTERNAL 1
+%define HAVE_AVX_EXTERNAL 1
+%define HAVE_AVX2_EXTERNAL 1
+%define HAVE_AVX512_EXTERNAL 1
+%define HAVE_FMA3_EXTERNAL 1
+%define HAVE_FMA4_EXTERNAL 1
+%define HAVE_MMX_EXTERNAL 1
+%define HAVE_MMXEXT_EXTERNAL 1
+%define HAVE_SSE_EXTERNAL 1
+%define HAVE_SSE2_EXTERNAL 1
+%define HAVE_SSE3_EXTERNAL 1
+%define HAVE_SSE4_EXTERNAL 1
+%define HAVE_SSE42_EXTERNAL 1
+%define HAVE_SSSE3_EXTERNAL 1
+%define HAVE_XOP_EXTERNAL 1
+%define HAVE_CPUNOP_EXTERNAL 0
+%define HAVE_I686_EXTERNAL 0
+%define HAVE_MIPSFPU_EXTERNAL 0
+%define HAVE_MIPS32R2_EXTERNAL 0
+%define HAVE_MIPS32R5_EXTERNAL 0
+%define HAVE_MIPS64R2_EXTERNAL 0
+%define HAVE_MIPS32R6_EXTERNAL 0
+%define HAVE_MIPS64R6_EXTERNAL 0
+%define HAVE_MIPSDSP_EXTERNAL 0
+%define HAVE_MIPSDSPR2_EXTERNAL 0
+%define HAVE_MSA_EXTERNAL 0
+%define HAVE_MSA2_EXTERNAL 0
+%define HAVE_LOONGSON2_EXTERNAL 0
+%define HAVE_LOONGSON3_EXTERNAL 0
+%define HAVE_MMI_EXTERNAL 0
+%define HAVE_ARMV5TE_INLINE 0
+%define HAVE_ARMV6_INLINE 0
+%define HAVE_ARMV6T2_INLINE 0
+%define HAVE_ARMV8_INLINE 0
+%define HAVE_NEON_INLINE 0
+%define HAVE_VFP_INLINE 0
+%define HAVE_VFPV3_INLINE 0
+%define HAVE_SETEND_INLINE 0
+%define HAVE_ALTIVEC_INLINE 0
+%define HAVE_DCBZL_INLINE 0
+%define HAVE_LDBRX_INLINE 0
+%define HAVE_POWER8_INLINE 0
+%define HAVE_PPC4XX_INLINE 0
+%define HAVE_VSX_INLINE 0
+%define HAVE_AESNI_INLINE 1
+%define HAVE_AMD3DNOW_INLINE 1
+%define HAVE_AMD3DNOWEXT_INLINE 1
+%define HAVE_AVX_INLINE 1
+%define HAVE_AVX2_INLINE 1
+%define HAVE_AVX512_INLINE 1
+%define HAVE_FMA3_INLINE 1
+%define HAVE_FMA4_INLINE 1
+%define HAVE_MMX_INLINE 1
+%define HAVE_MMXEXT_INLINE 1
+%define HAVE_SSE_INLINE 1
+%define HAVE_SSE2_INLINE 1
+%define HAVE_SSE3_INLINE 1
+%define HAVE_SSE4_INLINE 1
+%define HAVE_SSE42_INLINE 1
+%define HAVE_SSSE3_INLINE 1
+%define HAVE_XOP_INLINE 1
+%define HAVE_CPUNOP_INLINE 0
+%define HAVE_I686_INLINE 0
+%define HAVE_MIPSFPU_INLINE 0
+%define HAVE_MIPS32R2_INLINE 0
+%define HAVE_MIPS32R5_INLINE 0
+%define HAVE_MIPS64R2_INLINE 0
+%define HAVE_MIPS32R6_INLINE 0
+%define HAVE_MIPS64R6_INLINE 0
+%define HAVE_MIPSDSP_INLINE 0
+%define HAVE_MIPSDSPR2_INLINE 0
+%define HAVE_MSA_INLINE 0
+%define HAVE_MSA2_INLINE 0
+%define HAVE_LOONGSON2_INLINE 0
+%define HAVE_LOONGSON3_INLINE 0
+%define HAVE_MMI_INLINE 0
+%define HAVE_ALIGNED_STACK 1
+%define HAVE_FAST_64BIT 1
+%define HAVE_FAST_CLZ 1
+%define HAVE_FAST_CMOV 1
+%define HAVE_LOCAL_ALIGNED 1
+%define HAVE_SIMD_ALIGN_16 1
+%define HAVE_SIMD_ALIGN_32 1
+%define HAVE_SIMD_ALIGN_64 1
+%define HAVE_ATOMIC_CAS_PTR 0
+%define HAVE_MACHINE_RW_BARRIER 0
+%define HAVE_MEMORYBARRIER 0
+%define HAVE_MM_EMPTY 1
+%define HAVE_RDTSC 0
+%define HAVE_SEM_TIMEDWAIT 0
+%define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+%define HAVE_CABS 1
+%define HAVE_CEXP 1
+%define HAVE_INLINE_ASM 1
+%define HAVE_SYMVER 1
+%define HAVE_X86ASM 1
+%define HAVE_BIGENDIAN 0
+%define HAVE_FAST_UNALIGNED 1
+%define HAVE_ARPA_INET_H 1
+%define HAVE_ASM_TYPES_H 0
+%define HAVE_CDIO_PARANOIA_H 0
+%define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+%define HAVE_CUDA_H 0
+%define HAVE_DISPATCH_DISPATCH_H 1
+%define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+%define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+%define HAVE_DEV_IC_BT8XX_H 0
+%define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+%define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+%define HAVE_DIRECT_H 0
+%define HAVE_DIRENT_H 1
+%define HAVE_DXGIDEBUG_H 0
+%define HAVE_DXVA_H 0
+%define HAVE_ES2_GL_H 0
+%define HAVE_GSM_H 0
+%define HAVE_IO_H 0
+%define HAVE_LINUX_PERF_EVENT_H 0
+%define HAVE_MACHINE_IOCTL_BT848_H 0
+%define HAVE_MACHINE_IOCTL_METEOR_H 0
+%define HAVE_OPENCV2_CORE_CORE_C_H 0
+%define HAVE_OPENGL_GL3_H 0
+%define HAVE_POLL_H 1
+%define HAVE_SYS_PARAM_H 1
+%define HAVE_SYS_RESOURCE_H 1
+%define HAVE_SYS_SELECT_H 1
+%define HAVE_SYS_SOUNDCARD_H 0
+%define HAVE_SYS_TIME_H 1
+%define HAVE_SYS_UN_H 1
+%define HAVE_SYS_VIDEOIO_H 0
+%define HAVE_TERMIOS_H 1
+%define HAVE_UDPLITE_H 0
+%define HAVE_UNISTD_H 1
+%define HAVE_VALGRIND_VALGRIND_H 0
+%define HAVE_WINDOWS_H 0
+%define HAVE_WINSOCK2_H 0
+%define HAVE_INTRINSICS_NEON 0
+%define HAVE_ATANF 1
+%define HAVE_ATAN2F 1
+%define HAVE_CBRT 1
+%define HAVE_CBRTF 1
+%define HAVE_COPYSIGN 1
+%define HAVE_COSF 1
+%define HAVE_ERF 1
+%define HAVE_EXP2 1
+%define HAVE_EXP2F 1
+%define HAVE_EXPF 1
+%define HAVE_HYPOT 1
+%define HAVE_ISFINITE 1
+%define HAVE_ISINF 1
+%define HAVE_ISNAN 1
+%define HAVE_LDEXPF 1
+%define HAVE_LLRINT 1
+%define HAVE_LLRINTF 1
+%define HAVE_LOG2 1
+%define HAVE_LOG2F 1
+%define HAVE_LOG10F 1
+%define HAVE_LRINT 1
+%define HAVE_LRINTF 1
+%define HAVE_POWF 1
+%define HAVE_RINT 1
+%define HAVE_ROUND 1
+%define HAVE_ROUNDF 1
+%define HAVE_SINF 1
+%define HAVE_TRUNC 1
+%define HAVE_TRUNCF 1
+%define HAVE_DOS_PATHS 0
+%define HAVE_LIBC_MSVCRT 0
+%define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+%define HAVE_SECTION_DATA_REL_RO 0
+%define HAVE_THREADS 1
+%define HAVE_UWP 0
+%define HAVE_WINRT 0
+%define HAVE_ACCESS 1
+%define HAVE_ALIGNED_MALLOC 0
+%define HAVE_CLOCK_GETTIME 1
+%define HAVE_CLOSESOCKET 0
+%define HAVE_COMMANDLINETOARGVW 0
+%define HAVE_FCNTL 1
+%define HAVE_GETADDRINFO 1
+%define HAVE_GETHRTIME 0
+%define HAVE_GETOPT 1
+%define HAVE_GETPROCESSAFFINITYMASK 0
+%define HAVE_GETPROCESSMEMORYINFO 0
+%define HAVE_GETPROCESSTIMES 0
+%define HAVE_GETRUSAGE 1
+%define HAVE_GETSYSTEMTIMEASFILETIME 0
+%define HAVE_GETTIMEOFDAY 1
+%define HAVE_GLOB 1
+%define HAVE_GLXGETPROCADDRESS 0
+%define HAVE_GMTIME_R 1
+%define HAVE_INET_ATON 1
+%define HAVE_ISATTY 1
+%define HAVE_KBHIT 0
+%define HAVE_LSTAT 1
+%define HAVE_LZO1X_999_COMPRESS 0
+%define HAVE_MACH_ABSOLUTE_TIME 1
+%define HAVE_MAPVIEWOFFILE 0
+%define HAVE_MKSTEMP 1
+%define HAVE_MMAP 1
+%define HAVE_MPROTECT 1
+%define HAVE_NANOSLEEP 1
+%define HAVE_PEEKNAMEDPIPE 0
+%define HAVE_PTHREAD_CANCEL 1
+%define HAVE_SCHED_GETAFFINITY 0
+%define HAVE_SECITEMIMPORT 0
+%define HAVE_SETCONSOLETEXTATTRIBUTE 0
+%define HAVE_SETCONSOLECTRLHANDLER 0
+%define HAVE_SETMODE 0
+%define HAVE_SETRLIMIT 1
+%define HAVE_SLEEP 0
+%define HAVE_STRERROR_R 1
+%define HAVE_SYSCONF 1
+%define HAVE_SYSCTL 1
+%define HAVE_USLEEP 1
+%define HAVE_UTGETOSTYPEFROMSTRING 0
+%define HAVE_VIRTUALALLOC 0
+%define HAVE_WGLGETPROCADDRESS 0
+%define HAVE_BCRYPT 0
+%define HAVE_VAAPI_DRM 0
+%define HAVE_VAAPI_X11 0
+%define HAVE_VDPAU_X11 0
+%define HAVE_PTHREADS 1
+%define HAVE_OS2THREADS 0
+%define HAVE_W32THREADS 0
+%define HAVE_AS_ARCH_DIRECTIVE 0
+%define HAVE_AS_DN_DIRECTIVE 0
+%define HAVE_AS_FPU_DIRECTIVE 0
+%define HAVE_AS_FUNC 0
+%define HAVE_AS_OBJECT_ARCH 0
+%define HAVE_ASM_MOD_Q 0
+%define HAVE_BLOCKS_EXTENSION 1
+%define HAVE_EBP_AVAILABLE 1
+%define HAVE_EBX_AVAILABLE 1
+%define HAVE_GNU_AS 0
+%define HAVE_GNU_WINDRES 0
+%define HAVE_IBM_ASM 0
+%define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1
+%define HAVE_INLINE_ASM_LABELS 1
+%define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+%define HAVE_PRAGMA_DEPRECATED 1
+%define HAVE_RSYNC_CONTIMEOUT 0
+%define HAVE_SYMVER_ASM_LABEL 1
+%define HAVE_SYMVER_GNU_ASM 0
+%define HAVE_VFP_ARGS 0
+%define HAVE_XFORM_ASM 0
+%define HAVE_XMM_CLOBBERS 1
+%define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+%define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_SOCKLEN_T 1
+%define HAVE_STRUCT_ADDRINFO 1
+%define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+%define HAVE_STRUCT_IP_MREQ_SOURCE 1
+%define HAVE_STRUCT_IPV6_MREQ 1
+%define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+%define HAVE_STRUCT_POLLFD 1
+%define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+%define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+%define HAVE_STRUCT_SOCKADDR_IN6 1
+%define HAVE_STRUCT_SOCKADDR_SA_LEN 1
+%define HAVE_STRUCT_SOCKADDR_STORAGE 1
+%define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+%define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+%define HAVE_MAKEINFO 1
+%define HAVE_MAKEINFO_HTML 0
+%define HAVE_OPENCL_D3D11 0
+%define HAVE_OPENCL_DRM_ARM 0
+%define HAVE_OPENCL_DRM_BEIGNET 0
+%define HAVE_OPENCL_DXVA2 0
+%define HAVE_OPENCL_VAAPI_BEIGNET 0
+%define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+%define HAVE_PERL 1
+%define HAVE_POD2MAN 1
+%define HAVE_TEXI2HTML 0
+%define CONFIG_DOC 0
+%define CONFIG_HTMLPAGES 0
+%define CONFIG_MANPAGES 1
+%define CONFIG_PODPAGES 1
+%define CONFIG_TXTPAGES 1
+%define CONFIG_AVIO_DIR_CMD_EXAMPLE 1
+%define CONFIG_AVIO_READING_EXAMPLE 1
+%define CONFIG_DECODE_AUDIO_EXAMPLE 1
+%define CONFIG_DECODE_VIDEO_EXAMPLE 1
+%define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+%define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+%define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+%define CONFIG_EXTRACT_MVS_EXAMPLE 0
+%define CONFIG_FILTER_AUDIO_EXAMPLE 0
+%define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+%define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+%define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+%define CONFIG_HW_DECODE_EXAMPLE 0
+%define CONFIG_METADATA_EXAMPLE 0
+%define CONFIG_MUXING_EXAMPLE 0
+%define CONFIG_QSVDEC_EXAMPLE 0
+%define CONFIG_REMUXING_EXAMPLE 0
+%define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+%define CONFIG_SCALING_VIDEO_EXAMPLE 0
+%define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+%define CONFIG_TRANSCODING_EXAMPLE 0
+%define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+%define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+%define CONFIG_AVISYNTH 0
+%define CONFIG_FREI0R 0
+%define CONFIG_LIBCDIO 0
+%define CONFIG_LIBDAVS2 0
+%define CONFIG_LIBRUBBERBAND 0
+%define CONFIG_LIBVIDSTAB 0
+%define CONFIG_LIBX264 0
+%define CONFIG_LIBX265 0
+%define CONFIG_LIBXAVS 0
+%define CONFIG_LIBXAVS2 0
+%define CONFIG_LIBXVID 0
+%define CONFIG_DECKLINK 0
+%define CONFIG_LIBFDK_AAC 0
+%define CONFIG_OPENSSL 0
+%define CONFIG_LIBTLS 0
+%define CONFIG_GMP 0
+%define CONFIG_LIBARIBB24 0
+%define CONFIG_LIBLENSFUN 0
+%define CONFIG_LIBOPENCORE_AMRNB 0
+%define CONFIG_LIBOPENCORE_AMRWB 0
+%define CONFIG_LIBVMAF 0
+%define CONFIG_LIBVO_AMRWBENC 0
+%define CONFIG_MBEDTLS 0
+%define CONFIG_RKMPP 0
+%define CONFIG_LIBSMBCLIENT 0
+%define CONFIG_CHROMAPRINT 0
+%define CONFIG_GCRYPT 0
+%define CONFIG_GNUTLS 0
+%define CONFIG_JNI 0
+%define CONFIG_LADSPA 0
+%define CONFIG_LIBAOM 0
+%define CONFIG_LIBASS 0
+%define CONFIG_LIBBLURAY 0
+%define CONFIG_LIBBS2B 0
+%define CONFIG_LIBCACA 0
+%define CONFIG_LIBCELT 0
+%define CONFIG_LIBCODEC2 0
+%define CONFIG_LIBDAV1D 0
+%define CONFIG_LIBDC1394 0
+%define CONFIG_LIBDRM 0
+%define CONFIG_LIBFLITE 0
+%define CONFIG_LIBFONTCONFIG 0
+%define CONFIG_LIBFREETYPE 0
+%define CONFIG_LIBFRIBIDI 0
+%define CONFIG_LIBGME 0
+%define CONFIG_LIBGSM 0
+%define CONFIG_LIBIEC61883 0
+%define CONFIG_LIBILBC 0
+%define CONFIG_LIBJACK 0
+%define CONFIG_LIBKLVANC 0
+%define CONFIG_LIBKVAZAAR 0
+%define CONFIG_LIBMODPLUG 0
+%define CONFIG_LIBMP3LAME 0
+%define CONFIG_LIBMYSOFA 0
+%define CONFIG_LIBOPENCV 0
+%define CONFIG_LIBOPENH264 0
+%define CONFIG_LIBOPENJPEG 0
+%define CONFIG_LIBOPENMPT 0
+%define CONFIG_LIBOPUS 0
+%define CONFIG_LIBPULSE 0
+%define CONFIG_LIBRSVG 0
+%define CONFIG_LIBRTMP 0
+%define CONFIG_LIBSHINE 0
+%define CONFIG_LIBSMBCLIENT 0
+%define CONFIG_LIBSNAPPY 0
+%define CONFIG_LIBSOXR 0
+%define CONFIG_LIBSPEEX 0
+%define CONFIG_LIBSRT 0
+%define CONFIG_LIBSSH 0
+%define CONFIG_LIBTENSORFLOW 0
+%define CONFIG_LIBTESSERACT 0
+%define CONFIG_LIBTHEORA 0
+%define CONFIG_LIBTWOLAME 0
+%define CONFIG_LIBV4L2 0
+%define CONFIG_LIBVORBIS 0
+%define CONFIG_LIBVPX 0
+%define CONFIG_LIBWAVPACK 0
+%define CONFIG_LIBWEBP 0
+%define CONFIG_LIBXML2 0
+%define CONFIG_LIBZIMG 0
+%define CONFIG_LIBZMQ 0
+%define CONFIG_LIBZVBI 0
+%define CONFIG_LV2 0
+%define CONFIG_MEDIACODEC 0
+%define CONFIG_OPENAL 0
+%define CONFIG_OPENGL 0
+%define CONFIG_POCKETSPHINX 0
+%define CONFIG_VAPOURSYNTH 0
+%define CONFIG_ALSA 0
+%define CONFIG_APPKIT 1
+%define CONFIG_AVFOUNDATION 1
+%define CONFIG_BZLIB 1
+%define CONFIG_COREIMAGE 1
+%define CONFIG_ICONV 0
+%define CONFIG_LIBXCB 0
+%define CONFIG_LIBXCB_SHM 0
+%define CONFIG_LIBXCB_SHAPE 0
+%define CONFIG_LIBXCB_XFIXES 0
+%define CONFIG_LZMA 1
+%define CONFIG_SCHANNEL 0
+%define CONFIG_SDL2 0
+%define CONFIG_SECURETRANSPORT 0
+%define CONFIG_SNDIO 0
+%define CONFIG_XLIB 0
+%define CONFIG_ZLIB 1
+%define CONFIG_CUDA_NVCC 0
+%define CONFIG_CUDA_SDK 0
+%define CONFIG_LIBNPP 0
+%define CONFIG_LIBMFX 0
+%define CONFIG_MMAL 0
+%define CONFIG_OMX 0
+%define CONFIG_OPENCL 0
+%define CONFIG_AMF 0
+%define CONFIG_AUDIOTOOLBOX 1
+%define CONFIG_CRYSTALHD 0
+%define CONFIG_CUDA 0
+%define CONFIG_CUDA_LLVM 0
+%define CONFIG_CUVID 0
+%define CONFIG_D3D11VA 0
+%define CONFIG_DXVA2 0
+%define CONFIG_FFNVCODEC 0
+%define CONFIG_NVDEC 0
+%define CONFIG_NVENC 0
+%define CONFIG_VAAPI 0
+%define CONFIG_VDPAU 0
+%define CONFIG_VIDEOTOOLBOX 0
+%define CONFIG_V4L2_M2M 0
+%define CONFIG_XVMC 0
+%define CONFIG_FTRAPV 0
+%define CONFIG_GRAY 0
+%define CONFIG_HARDCODED_TABLES 0
+%define CONFIG_OMX_RPI 0
+%define CONFIG_RUNTIME_CPUDETECT 1
+%define CONFIG_SAFE_BITSTREAM_READER 1
+%define CONFIG_SHARED 1
+%define CONFIG_SMALL 0
+%define CONFIG_STATIC 0
+%define CONFIG_SWSCALE_ALPHA 1
+%define CONFIG_GPL 0
+%define CONFIG_NONFREE 0
+%define CONFIG_VERSION3 0
+%define CONFIG_AVDEVICE 0
+%define CONFIG_AVFILTER 0
+%define CONFIG_SWSCALE 0
+%define CONFIG_POSTPROC 0
+%define CONFIG_AVFORMAT 0
+%define CONFIG_AVCODEC 1
+%define CONFIG_SWRESAMPLE 0
+%define CONFIG_AVRESAMPLE 0
+%define CONFIG_AVUTIL 1
+%define CONFIG_FFPLAY 0
+%define CONFIG_FFPROBE 0
+%define CONFIG_FFMPEG 0
+%define CONFIG_DCT 1
+%define CONFIG_DWT 0
+%define CONFIG_ERROR_RESILIENCE 0
+%define CONFIG_FAAN 1
+%define CONFIG_FAST_UNALIGNED 1
+%define CONFIG_FFT 1
+%define CONFIG_LSP 0
+%define CONFIG_LZO 0
+%define CONFIG_MDCT 0
+%define CONFIG_PIXELUTILS 0
+%define CONFIG_NETWORK 0
+%define CONFIG_RDFT 1
+%define CONFIG_AUTODETECT 0
+%define CONFIG_FONTCONFIG 0
+%define CONFIG_LINUX_PERF 0
+%define CONFIG_MEMORY_POISONING 0
+%define CONFIG_NEON_CLOBBER_TEST 0
+%define CONFIG_OSSFUZZ 0
+%define CONFIG_PIC 1
+%define CONFIG_THUMB 0
+%define CONFIG_VALGRIND_BACKTRACE 0
+%define CONFIG_XMM_CLOBBER_TEST 0
+%define CONFIG_BSFS 1
+%define CONFIG_DECODERS 1
+%define CONFIG_PARSERS 1
+%define CONFIG_AANDCTTABLES 0
+%define CONFIG_AC3DSP 0
+%define CONFIG_ADTS_HEADER 0
+%define CONFIG_AUDIO_FRAME_QUEUE 0
+%define CONFIG_AUDIODSP 0
+%define CONFIG_BLOCKDSP 0
+%define CONFIG_BSWAPDSP 0
+%define CONFIG_CABAC 0
+%define CONFIG_CBS 0
+%define CONFIG_CBS_AV1 0
+%define CONFIG_CBS_H264 0
+%define CONFIG_CBS_H265 0
+%define CONFIG_CBS_JPEG 0
+%define CONFIG_CBS_MPEG2 0
+%define CONFIG_CBS_VP9 0
+%define CONFIG_DIRAC_PARSE 0
+%define CONFIG_DNN 0
+%define CONFIG_DVPROFILE 0
+%define CONFIG_EXIF 0
+%define CONFIG_FAANDCT 1
+%define CONFIG_FAANIDCT 1
+%define CONFIG_FDCTDSP 1
+%define CONFIG_FLACDSP 1
+%define CONFIG_FMTCONVERT 0
+%define CONFIG_FRAME_THREAD_ENCODER 0
+%define CONFIG_G722DSP 0
+%define CONFIG_GOLOMB 0
+%define CONFIG_GPLV3 0
+%define CONFIG_H263DSP 0
+%define CONFIG_H264CHROMA 0
+%define CONFIG_H264DSP 0
+%define CONFIG_H264PARSE 0
+%define CONFIG_H264PRED 1
+%define CONFIG_H264QPEL 0
+%define CONFIG_HEVCPARSE 0
+%define CONFIG_HPELDSP 0
+%define CONFIG_HUFFMAN 0
+%define CONFIG_HUFFYUVDSP 0
+%define CONFIG_HUFFYUVENCDSP 0
+%define CONFIG_IDCTDSP 1
+%define CONFIG_IIRFILTER 0
+%define CONFIG_MDCT15 0
+%define CONFIG_INTRAX8 0
+%define CONFIG_ISO_MEDIA 0
+%define CONFIG_IVIDSP 0
+%define CONFIG_JPEGTABLES 0
+%define CONFIG_LGPLV3 0
+%define CONFIG_LIBX262 0
+%define CONFIG_LLAUDDSP 0
+%define CONFIG_LLVIDDSP 0
+%define CONFIG_LLVIDENCDSP 0
+%define CONFIG_LPC 0
+%define CONFIG_LZF 0
+%define CONFIG_ME_CMP 0
+%define CONFIG_MPEG_ER 0
+%define CONFIG_MPEGAUDIO 1
+%define CONFIG_MPEGAUDIODSP 1
+%define CONFIG_MPEGAUDIOHEADER 1
+%define CONFIG_MPEGVIDEO 0
+%define CONFIG_MPEGVIDEOENC 0
+%define CONFIG_MSS34DSP 0
+%define CONFIG_PIXBLOCKDSP 0
+%define CONFIG_QPELDSP 0
+%define CONFIG_QSV 0
+%define CONFIG_QSVDEC 0
+%define CONFIG_QSVENC 0
+%define CONFIG_QSVVPP 0
+%define CONFIG_RANGECODER 0
+%define CONFIG_RIFFDEC 0
+%define CONFIG_RIFFENC 0
+%define CONFIG_RTPDEC 0
+%define CONFIG_RTPENC_CHAIN 0
+%define CONFIG_RV34DSP 0
+%define CONFIG_SCENE_SAD 0
+%define CONFIG_SINEWIN 0
+%define CONFIG_SNAPPY 0
+%define CONFIG_SRTP 0
+%define CONFIG_STARTCODE 0
+%define CONFIG_TEXTUREDSP 0
+%define CONFIG_TEXTUREDSPENC 0
+%define CONFIG_TPELDSP 0
+%define CONFIG_VAAPI_1 0
+%define CONFIG_VAAPI_ENCODE 0
+%define CONFIG_VC1DSP 0
+%define CONFIG_VIDEODSP 1
+%define CONFIG_VP3DSP 0
+%define CONFIG_VP56DSP 0
+%define CONFIG_VP8DSP 1
+%define CONFIG_WMA_FREQS 0
+%define CONFIG_WMV2DSP 0
+%define CONFIG_NULL_BSF 1
+%define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 1
+%define CONFIG_VP8_DECODER 1
+%define CONFIG_VP9_DECODER 1
+%define CONFIG_FLAC_DECODER 1
+%define CONFIG_MP3_DECODER 1
+%define CONFIG_VP8_PARSER 1
+%define CONFIG_VP9_PARSER 1
diff --git a/media/ffvpx/config_darwin64.h b/media/ffvpx/config_darwin64.h
new file mode 100644
index 0000000000..8c548525d5
--- /dev/null
+++ b/media/ffvpx/config_darwin64.h
@@ -0,0 +1,680 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-everything --disable-protocols --disable-demuxers --disable-muxers --disable-filters --disable-programs --disable-doc --disable-parsers --enable-parser=vp8 --enable-parser=vp9 --enable-decoder=vp8 --enable-decoder=vp9 --disable-static --enable-shared --disable-debug --disable-sdl2 --disable-libxcb --disable-securetransport --disable-iconv --disable-swresample --disable-swscale --disable-avdevice --disable-avfilter --disable-avformat --disable-d3d11va --disable-dxva2 --disable-vaapi --disable-vdpau --disable-videotoolbox --enable-decoder=flac --enable-asm --enable-x86asm --disable-cuda --disable-cuvid --enable-decoder=mp3"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2019
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Apple clang version 11.0.0 (clang-1100.0.33.17)"
+#define av_restrict restrict
+#define EXTERN_PREFIX "_"
+#define EXTERN_ASM _
+#define BUILDSUF ""
+#define SLIBSUF ".dylib"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 1
+#define ARCH_X86_32 0
+#define ARCH_X86_64 1
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 1
+#define HAVE_AMD3DNOW 1
+#define HAVE_AMD3DNOWEXT 1
+#define HAVE_AVX 1
+#define HAVE_AVX2 1
+#define HAVE_AVX512 1
+#define HAVE_FMA3 1
+#define HAVE_FMA4 1
+#define HAVE_MMX 1
+#define HAVE_MMXEXT 1
+#define HAVE_SSE 1
+#define HAVE_SSE2 1
+#define HAVE_SSE3 1
+#define HAVE_SSE4 1
+#define HAVE_SSE42 1
+#define HAVE_SSSE3 1
+#define HAVE_XOP 1
+#define HAVE_CPUNOP 0
+#define HAVE_I686 1
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_MSA2 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 0
+#define HAVE_VFP_EXTERNAL 0
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 1
+#define HAVE_AMD3DNOW_EXTERNAL 1
+#define HAVE_AMD3DNOWEXT_EXTERNAL 1
+#define HAVE_AVX_EXTERNAL 1
+#define HAVE_AVX2_EXTERNAL 1
+#define HAVE_AVX512_EXTERNAL 1
+#define HAVE_FMA3_EXTERNAL 1
+#define HAVE_FMA4_EXTERNAL 1
+#define HAVE_MMX_EXTERNAL 1
+#define HAVE_MMXEXT_EXTERNAL 1
+#define HAVE_SSE_EXTERNAL 1
+#define HAVE_SSE2_EXTERNAL 1
+#define HAVE_SSE3_EXTERNAL 1
+#define HAVE_SSE4_EXTERNAL 1
+#define HAVE_SSE42_EXTERNAL 1
+#define HAVE_SSSE3_EXTERNAL 1
+#define HAVE_XOP_EXTERNAL 1
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_MSA2_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 1
+#define HAVE_AMD3DNOW_INLINE 1
+#define HAVE_AMD3DNOWEXT_INLINE 1
+#define HAVE_AVX_INLINE 1
+#define HAVE_AVX2_INLINE 1
+#define HAVE_AVX512_INLINE 1
+#define HAVE_FMA3_INLINE 1
+#define HAVE_FMA4_INLINE 1
+#define HAVE_MMX_INLINE 1
+#define HAVE_MMXEXT_INLINE 1
+#define HAVE_SSE_INLINE 1
+#define HAVE_SSE2_INLINE 1
+#define HAVE_SSE3_INLINE 1
+#define HAVE_SSE4_INLINE 1
+#define HAVE_SSE42_INLINE 1
+#define HAVE_SSSE3_INLINE 1
+#define HAVE_XOP_INLINE 1
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_MSA2_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_ALIGNED_STACK 1
+#define HAVE_FAST_64BIT 1
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 1
+#define HAVE_LOCAL_ALIGNED 1
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 1
+#define HAVE_SIMD_ALIGN_64 1
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 1
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 0
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 1
+#define HAVE_CEXP 1
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 1
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 0
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 1
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_PERF_EVENT_H 0
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 0
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 1
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 1
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_PTHREAD_CANCEL 1
+#define HAVE_SCHED_GETAFFINITY 0
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 1
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 1
+#define HAVE_EBP_AVAILABLE 1
+#define HAVE_EBX_AVAILABLE 1
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 1
+#define HAVE_SYMVER_GNU_ASM 0
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 1
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 1
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_DIR_CMD_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWAVPACK 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 1
+#define CONFIG_AVFOUNDATION 1
+#define CONFIG_BZLIB 1
+#define CONFIG_COREIMAGE 1
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 1
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 1
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 1
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_XVMC 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 0
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_LZO 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 1
+#define CONFIG_DECODERS 1
+#define CONFIG_PARSERS 1
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#define CONFIG_NULL_BSF 1
+#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 1
+#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_DECODER 1
+#define CONFIG_FLAC_DECODER 1
+#define CONFIG_MP3_DECODER 1
+#define CONFIG_VP8_PARSER 1
+#define CONFIG_VP9_PARSER 1
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_darwin_aarch64.h b/media/ffvpx/config_darwin_aarch64.h
new file mode 100644
index 0000000000..8d9ce0a145
--- /dev/null
+++ b/media/ffvpx/config_darwin_aarch64.h
@@ -0,0 +1,740 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='vp8,vp9,mp3,flac' --enable-parser='vp8,vp9' --disable-static --enable-shared --disable-autodetect --enable-neon"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Apple clang version 13.1.6 (clang-1316.0.21.2.5)"
+#define OS_NAME darwin
+#define av_restrict restrict
+#define EXTERN_PREFIX "_"
+#define EXTERN_ASM _
+#define BUILDSUF ""
+#define SLIBSUF ".dylib"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 1
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 1
+#define HAVE_NEON 1
+#define HAVE_VFP 1
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_AVX512 0
+#define HAVE_AVX512ICL 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 1
+#define HAVE_NEON_EXTERNAL 1
+#define HAVE_VFP_EXTERNAL 1
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_AVX512_EXTERNAL 0
+#define HAVE_AVX512ICL_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 1
+#define HAVE_NEON_INLINE 1
+#define HAVE_VFP_INLINE 1
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_AVX512ICL_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 1
+#define HAVE_FAST_64BIT 1
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 0
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 0
+#define HAVE_SIMD_ALIGN_64 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 0
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 0
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 1
+#define HAVE_CEXP 1
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 0
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 1
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 0
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_MALLOC_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 1
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 0
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_ARC4RANDOM 1
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 0
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETMODULEHANDLE 0
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSTDHANDLE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 1
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LOCALTIME_R 1
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 1
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MEMALIGN 0
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_POSIX_MEMALIGN 1
+#define HAVE_PTHREAD_CANCEL 1
+#define HAVE_SCHED_GETAFFINITY 0
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETDLLDIRECTORY 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 1
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 1
+#define HAVE_EBP_AVAILABLE 0
+#define HAVE_EBX_AVAILABLE 0
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 1
+#define HAVE_SYMVER_GNU_ASM 0
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 1
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 0
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 1
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_generic.h b/media/ffvpx/config_generic.h
new file mode 100644
index 0000000000..da6c35970a
--- /dev/null
+++ b/media/ffvpx/config_generic.h
@@ -0,0 +1,740 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='mp3,flac' --disable-static --enable-shared --disable-autodetect --enable-small --disable-asm"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Apple clang version 13.1.6 (clang-1316.0.21.2.5)"
+#define OS_NAME darwin
+#define av_restrict restrict
+#define EXTERN_PREFIX "_"
+#define EXTERN_ASM _
+#define BUILDSUF ""
+#define SLIBSUF ".dylib"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_AVX512 0
+#define HAVE_AVX512ICL 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 1
+#define HAVE_NEON_EXTERNAL 1
+#define HAVE_VFP_EXTERNAL 1
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_AVX512_EXTERNAL 0
+#define HAVE_AVX512ICL_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 1
+#define HAVE_NEON_INLINE 1
+#define HAVE_VFP_INLINE 1
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_AVX512ICL_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 0
+#define HAVE_FAST_64BIT 0
+#define HAVE_FAST_CLZ 0
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 0
+#define HAVE_SIMD_ALIGN_16 0
+#define HAVE_SIMD_ALIGN_32 0
+#define HAVE_SIMD_ALIGN_64 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 0
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 0
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 1
+#define HAVE_CEXP 1
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 0
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 0
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 1
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 0
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_MALLOC_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 0
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_ARC4RANDOM 1
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 0
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETMODULEHANDLE 0
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSTDHANDLE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 1
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LOCALTIME_R 1
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MEMALIGN 0
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_POSIX_MEMALIGN 0
+#define HAVE_PTHREAD_CANCEL 1
+#define HAVE_SCHED_GETAFFINITY 0
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETDLLDIRECTORY 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 1
+#define HAVE_EBP_AVAILABLE 0
+#define HAVE_EBX_AVAILABLE 0
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 1
+#define HAVE_SYMVER_GNU_ASM 0
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 1
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 1
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 0
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 0
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 0
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 0
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 0
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_override.h b/media/ffvpx/config_override.h
new file mode 100644
index 0000000000..8902e64ec2
--- /dev/null
+++ b/media/ffvpx/config_override.h
@@ -0,0 +1,35 @@
+// This file contains overrides for config.h, that can be platform-specific.
+
+#ifdef MOZ_LIBAV_FFT
+#undef CONFIG_FFT
+#undef CONFIG_RDFT
+#define CONFIG_FFT 1
+#define CONFIG_RDFT 1
+#endif
+
+// override '#define EXTERN_ASM _' in config_generic.h to allow building with
+// gcc on arm
+#if defined(__GNUC__) && defined(__arm__)
+#undef EXTERN_ASM
+#define EXTERN_ASM
+#endif
+
+#undef CONFIG_VAAPI
+#undef CONFIG_VAAPI_1
+#undef CONFIG_VP8_VAAPI_HWACCEL
+#undef CONFIG_VP9_VAAPI_HWACCEL
+#undef CONFIG_AV1_VAAPI_HWACCEL
+
+#if defined(MOZ_WAYLAND) && !defined(MOZ_FFVPX_AUDIOONLY)
+#define CONFIG_VAAPI 1
+#define CONFIG_VAAPI_1 1
+#define CONFIG_VP8_VAAPI_HWACCEL 1
+#define CONFIG_VP9_VAAPI_HWACCEL 1
+#define CONFIG_AV1_VAAPI_HWACCEL 1
+#else
+#define CONFIG_VAAPI 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VP8_VAAPI_HWACCEL 0
+#define CONFIG_VP9_VAAPI_HWACCEL 0
+#define CONFIG_AV1_VAAPI_HWACCEL 0
+#endif
diff --git a/media/ffvpx/config_unix32.h b/media/ffvpx/config_unix32.h
new file mode 100644
index 0000000000..4f7942c468
--- /dev/null
+++ b/media/ffvpx/config_unix32.h
@@ -0,0 +1,735 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='vp8,vp9,mp3,flac' --enable-parser='vp8,vp9' --disable-static --enable-shared --disable-autodetect --disable-asm --disable-x86asm --cc='clang -m32' --disable-iconv"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Ubuntu clang version 14.0.0-1ubuntu1"
+#define OS_NAME linux
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".so"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_AVX512 0
+#define HAVE_AVX512ICL 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 0
+#define HAVE_VFP_EXTERNAL 0
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_AVX512_EXTERNAL 0
+#define HAVE_AVX512ICL_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_AVX512ICL_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 0
+#define HAVE_FAST_64BIT 0
+#define HAVE_FAST_CLZ 0
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 1
+#define HAVE_SIMD_ALIGN_16 0
+#define HAVE_SIMD_ALIGN_32 0
+#define HAVE_SIMD_ALIGN_64 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 0
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 1
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 0
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 1
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 1
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 1
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 1
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 1
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETMODULEHANDLE 0
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSTDHANDLE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 1
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_PTHREAD_CANCEL 1
+#define HAVE_SCHED_GETAFFINITY 1
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETDLLDIRECTORY 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 1
+#define HAVE_EBX_AVAILABLE 1
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 1
+#define HAVE_SYMVER_ASM_LABEL 1
+#define HAVE_SYMVER_GNU_ASM 1
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 1
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 1
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 1
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 1
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 0
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 0
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 1
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_unix64.asm b/media/ffvpx/config_unix64.asm
new file mode 100644
index 0000000000..8ad475cd06
--- /dev/null
+++ b/media/ffvpx/config_unix64.asm
@@ -0,0 +1,723 @@
+; Automatically generated by configure - do not modify!
+%define ARCH_AARCH64 0
+%define ARCH_ALPHA 0
+%define ARCH_ARM 0
+%define ARCH_AVR32 0
+%define ARCH_AVR32_AP 0
+%define ARCH_AVR32_UC 0
+%define ARCH_BFIN 0
+%define ARCH_IA64 0
+%define ARCH_LOONGARCH 0
+%define ARCH_LOONGARCH32 0
+%define ARCH_LOONGARCH64 0
+%define ARCH_M68K 0
+%define ARCH_MIPS 0
+%define ARCH_MIPS64 0
+%define ARCH_PARISC 0
+%define ARCH_PPC 0
+%define ARCH_PPC64 0
+%define ARCH_RISCV 0
+%define ARCH_S390 0
+%define ARCH_SH4 0
+%define ARCH_SPARC 0
+%define ARCH_SPARC64 0
+%define ARCH_TILEGX 0
+%define ARCH_TILEPRO 0
+%define ARCH_TOMI 0
+%define ARCH_X86 1
+%define ARCH_X86_32 0
+%define ARCH_X86_64 1
+%define HAVE_ARMV5TE 0
+%define HAVE_ARMV6 0
+%define HAVE_ARMV6T2 0
+%define HAVE_ARMV8 0
+%define HAVE_NEON 0
+%define HAVE_VFP 0
+%define HAVE_VFPV3 0
+%define HAVE_SETEND 0
+%define HAVE_ALTIVEC 0
+%define HAVE_DCBZL 0
+%define HAVE_LDBRX 0
+%define HAVE_POWER8 0
+%define HAVE_PPC4XX 0
+%define HAVE_VSX 0
+%define HAVE_AESNI 1
+%define HAVE_AMD3DNOW 1
+%define HAVE_AMD3DNOWEXT 1
+%define HAVE_AVX 1
+%define HAVE_AVX2 1
+%define HAVE_AVX512 1
+%define HAVE_AVX512ICL 1
+%define HAVE_FMA3 1
+%define HAVE_FMA4 1
+%define HAVE_MMX 1
+%define HAVE_MMXEXT 1
+%define HAVE_SSE 1
+%define HAVE_SSE2 1
+%define HAVE_SSE3 1
+%define HAVE_SSE4 1
+%define HAVE_SSE42 1
+%define HAVE_SSSE3 1
+%define HAVE_XOP 1
+%define HAVE_CPUNOP 0
+%define HAVE_I686 1
+%define HAVE_MIPSFPU 0
+%define HAVE_MIPS32R2 0
+%define HAVE_MIPS32R5 0
+%define HAVE_MIPS64R2 0
+%define HAVE_MIPS32R6 0
+%define HAVE_MIPS64R6 0
+%define HAVE_MIPSDSP 0
+%define HAVE_MIPSDSPR2 0
+%define HAVE_MSA 0
+%define HAVE_LOONGSON2 0
+%define HAVE_LOONGSON3 0
+%define HAVE_MMI 0
+%define HAVE_LSX 0
+%define HAVE_LASX 0
+%define HAVE_ARMV5TE_EXTERNAL 0
+%define HAVE_ARMV6_EXTERNAL 0
+%define HAVE_ARMV6T2_EXTERNAL 0
+%define HAVE_ARMV8_EXTERNAL 0
+%define HAVE_NEON_EXTERNAL 0
+%define HAVE_VFP_EXTERNAL 0
+%define HAVE_VFPV3_EXTERNAL 0
+%define HAVE_SETEND_EXTERNAL 0
+%define HAVE_ALTIVEC_EXTERNAL 0
+%define HAVE_DCBZL_EXTERNAL 0
+%define HAVE_LDBRX_EXTERNAL 0
+%define HAVE_POWER8_EXTERNAL 0
+%define HAVE_PPC4XX_EXTERNAL 0
+%define HAVE_VSX_EXTERNAL 0
+%define HAVE_AESNI_EXTERNAL 1
+%define HAVE_AMD3DNOW_EXTERNAL 1
+%define HAVE_AMD3DNOWEXT_EXTERNAL 1
+%define HAVE_AVX_EXTERNAL 1
+%define HAVE_AVX2_EXTERNAL 1
+%define HAVE_AVX512_EXTERNAL 1
+%define HAVE_AVX512ICL_EXTERNAL 1
+%define HAVE_FMA3_EXTERNAL 1
+%define HAVE_FMA4_EXTERNAL 1
+%define HAVE_MMX_EXTERNAL 1
+%define HAVE_MMXEXT_EXTERNAL 1
+%define HAVE_SSE_EXTERNAL 1
+%define HAVE_SSE2_EXTERNAL 1
+%define HAVE_SSE3_EXTERNAL 1
+%define HAVE_SSE4_EXTERNAL 1
+%define HAVE_SSE42_EXTERNAL 1
+%define HAVE_SSSE3_EXTERNAL 1
+%define HAVE_XOP_EXTERNAL 1
+%define HAVE_CPUNOP_EXTERNAL 0
+%define HAVE_I686_EXTERNAL 0
+%define HAVE_MIPSFPU_EXTERNAL 0
+%define HAVE_MIPS32R2_EXTERNAL 0
+%define HAVE_MIPS32R5_EXTERNAL 0
+%define HAVE_MIPS64R2_EXTERNAL 0
+%define HAVE_MIPS32R6_EXTERNAL 0
+%define HAVE_MIPS64R6_EXTERNAL 0
+%define HAVE_MIPSDSP_EXTERNAL 0
+%define HAVE_MIPSDSPR2_EXTERNAL 0
+%define HAVE_MSA_EXTERNAL 0
+%define HAVE_LOONGSON2_EXTERNAL 0
+%define HAVE_LOONGSON3_EXTERNAL 0
+%define HAVE_MMI_EXTERNAL 0
+%define HAVE_LSX_EXTERNAL 0
+%define HAVE_LASX_EXTERNAL 0
+%define HAVE_ARMV5TE_INLINE 0
+%define HAVE_ARMV6_INLINE 0
+%define HAVE_ARMV6T2_INLINE 0
+%define HAVE_ARMV8_INLINE 0
+%define HAVE_NEON_INLINE 0
+%define HAVE_VFP_INLINE 0
+%define HAVE_VFPV3_INLINE 0
+%define HAVE_SETEND_INLINE 0
+%define HAVE_ALTIVEC_INLINE 0
+%define HAVE_DCBZL_INLINE 0
+%define HAVE_LDBRX_INLINE 0
+%define HAVE_POWER8_INLINE 0
+%define HAVE_PPC4XX_INLINE 0
+%define HAVE_VSX_INLINE 0
+%define HAVE_AESNI_INLINE 1
+%define HAVE_AMD3DNOW_INLINE 1
+%define HAVE_AMD3DNOWEXT_INLINE 1
+%define HAVE_AVX_INLINE 1
+%define HAVE_AVX2_INLINE 1
+%define HAVE_AVX512_INLINE 1
+%define HAVE_AVX512ICL_INLINE 1
+%define HAVE_FMA3_INLINE 1
+%define HAVE_FMA4_INLINE 1
+%define HAVE_MMX_INLINE 1
+%define HAVE_MMXEXT_INLINE 1
+%define HAVE_SSE_INLINE 1
+%define HAVE_SSE2_INLINE 1
+%define HAVE_SSE3_INLINE 1
+%define HAVE_SSE4_INLINE 1
+%define HAVE_SSE42_INLINE 1
+%define HAVE_SSSE3_INLINE 1
+%define HAVE_XOP_INLINE 1
+%define HAVE_CPUNOP_INLINE 0
+%define HAVE_I686_INLINE 0
+%define HAVE_MIPSFPU_INLINE 0
+%define HAVE_MIPS32R2_INLINE 0
+%define HAVE_MIPS32R5_INLINE 0
+%define HAVE_MIPS64R2_INLINE 0
+%define HAVE_MIPS32R6_INLINE 0
+%define HAVE_MIPS64R6_INLINE 0
+%define HAVE_MIPSDSP_INLINE 0
+%define HAVE_MIPSDSPR2_INLINE 0
+%define HAVE_MSA_INLINE 0
+%define HAVE_LOONGSON2_INLINE 0
+%define HAVE_LOONGSON3_INLINE 0
+%define HAVE_MMI_INLINE 0
+%define HAVE_LSX_INLINE 0
+%define HAVE_LASX_INLINE 0
+%define HAVE_ALIGNED_STACK 1
+%define HAVE_FAST_64BIT 1
+%define HAVE_FAST_CLZ 1
+%define HAVE_FAST_CMOV 1
+%define HAVE_LOCAL_ALIGNED 1
+%define HAVE_SIMD_ALIGN_16 1
+%define HAVE_SIMD_ALIGN_32 1
+%define HAVE_SIMD_ALIGN_64 1
+%define HAVE_ATOMIC_CAS_PTR 0
+%define HAVE_MACHINE_RW_BARRIER 0
+%define HAVE_MEMORYBARRIER 0
+%define HAVE_MM_EMPTY 1
+%define HAVE_RDTSC 0
+%define HAVE_SEM_TIMEDWAIT 1
+%define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+%define HAVE_CABS 0
+%define HAVE_CEXP 0
+%define HAVE_INLINE_ASM 1
+%define HAVE_SYMVER 1
+%define HAVE_X86ASM 1
+%define HAVE_BIGENDIAN 0
+%define HAVE_FAST_UNALIGNED 1
+%define HAVE_ARPA_INET_H 1
+%define HAVE_ASM_TYPES_H 1
+%define HAVE_CDIO_PARANOIA_H 0
+%define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+%define HAVE_CUDA_H 0
+%define HAVE_DISPATCH_DISPATCH_H 0
+%define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+%define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+%define HAVE_DEV_IC_BT8XX_H 0
+%define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+%define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+%define HAVE_DIRECT_H 0
+%define HAVE_DIRENT_H 1
+%define HAVE_DXGIDEBUG_H 0
+%define HAVE_DXVA_H 0
+%define HAVE_ES2_GL_H 0
+%define HAVE_GSM_H 0
+%define HAVE_IO_H 0
+%define HAVE_LINUX_DMA_BUF_H 0
+%define HAVE_LINUX_PERF_EVENT_H 1
+%define HAVE_MACHINE_IOCTL_BT848_H 0
+%define HAVE_MACHINE_IOCTL_METEOR_H 0
+%define HAVE_MALLOC_H 1
+%define HAVE_OPENCV2_CORE_CORE_C_H 0
+%define HAVE_OPENGL_GL3_H 0
+%define HAVE_POLL_H 1
+%define HAVE_SYS_PARAM_H 1
+%define HAVE_SYS_RESOURCE_H 1
+%define HAVE_SYS_SELECT_H 1
+%define HAVE_SYS_SOUNDCARD_H 1
+%define HAVE_SYS_TIME_H 1
+%define HAVE_SYS_UN_H 1
+%define HAVE_SYS_VIDEOIO_H 0
+%define HAVE_TERMIOS_H 1
+%define HAVE_UDPLITE_H 0
+%define HAVE_UNISTD_H 1
+%define HAVE_VALGRIND_VALGRIND_H 0
+%define HAVE_WINDOWS_H 0
+%define HAVE_WINSOCK2_H 0
+%define HAVE_INTRINSICS_NEON 0
+%define HAVE_ATANF 1
+%define HAVE_ATAN2F 1
+%define HAVE_CBRT 1
+%define HAVE_CBRTF 1
+%define HAVE_COPYSIGN 1
+%define HAVE_COSF 1
+%define HAVE_ERF 1
+%define HAVE_EXP2 1
+%define HAVE_EXP2F 1
+%define HAVE_EXPF 1
+%define HAVE_HYPOT 1
+%define HAVE_ISFINITE 1
+%define HAVE_ISINF 1
+%define HAVE_ISNAN 1
+%define HAVE_LDEXPF 1
+%define HAVE_LLRINT 1
+%define HAVE_LLRINTF 1
+%define HAVE_LOG2 1
+%define HAVE_LOG2F 1
+%define HAVE_LOG10F 1
+%define HAVE_LRINT 1
+%define HAVE_LRINTF 1
+%define HAVE_POWF 1
+%define HAVE_RINT 1
+%define HAVE_ROUND 1
+%define HAVE_ROUNDF 1
+%define HAVE_SINF 1
+%define HAVE_TRUNC 1
+%define HAVE_TRUNCF 1
+%define HAVE_DOS_PATHS 0
+%define HAVE_LIBC_MSVCRT 0
+%define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+%define HAVE_SECTION_DATA_REL_RO 1
+%define HAVE_THREADS 1
+%define HAVE_UWP 0
+%define HAVE_WINRT 0
+%define HAVE_ACCESS 1
+%define HAVE_ALIGNED_MALLOC 0
+%define HAVE_ARC4RANDOM 0
+%define HAVE_CLOCK_GETTIME 1
+%define HAVE_CLOSESOCKET 0
+%define HAVE_COMMANDLINETOARGVW 0
+%define HAVE_FCNTL 1
+%define HAVE_GETADDRINFO 1
+%define HAVE_GETAUXVAL 1
+%define HAVE_GETENV 1
+%define HAVE_GETHRTIME 0
+%define HAVE_GETOPT 1
+%define HAVE_GETMODULEHANDLE 0
+%define HAVE_GETPROCESSAFFINITYMASK 0
+%define HAVE_GETPROCESSMEMORYINFO 0
+%define HAVE_GETPROCESSTIMES 0
+%define HAVE_GETRUSAGE 1
+%define HAVE_GETSTDHANDLE 0
+%define HAVE_GETSYSTEMTIMEASFILETIME 0
+%define HAVE_GETTIMEOFDAY 1
+%define HAVE_GLOB 1
+%define HAVE_GLXGETPROCADDRESS 0
+%define HAVE_GMTIME_R 1
+%define HAVE_INET_ATON 1
+%define HAVE_ISATTY 1
+%define HAVE_KBHIT 0
+%define HAVE_LOCALTIME_R 1
+%define HAVE_LSTAT 1
+%define HAVE_LZO1X_999_COMPRESS 0
+%define HAVE_MACH_ABSOLUTE_TIME 0
+%define HAVE_MAPVIEWOFFILE 0
+%define HAVE_MEMALIGN 1
+%define HAVE_MKSTEMP 1
+%define HAVE_MMAP 1
+%define HAVE_MPROTECT 1
+%define HAVE_NANOSLEEP 1
+%define HAVE_PEEKNAMEDPIPE 0
+%define HAVE_POSIX_MEMALIGN 1
+%define HAVE_PTHREAD_CANCEL 1
+%define HAVE_SCHED_GETAFFINITY 1
+%define HAVE_SECITEMIMPORT 0
+%define HAVE_SETCONSOLETEXTATTRIBUTE 0
+%define HAVE_SETCONSOLECTRLHANDLER 0
+%define HAVE_SETDLLDIRECTORY 0
+%define HAVE_SETMODE 0
+%define HAVE_SETRLIMIT 1
+%define HAVE_SLEEP 0
+%define HAVE_STRERROR_R 1
+%define HAVE_SYSCONF 1
+%define HAVE_SYSCTL 0
+%define HAVE_USLEEP 1
+%define HAVE_UTGETOSTYPEFROMSTRING 0
+%define HAVE_VIRTUALALLOC 0
+%define HAVE_WGLGETPROCADDRESS 0
+%define HAVE_BCRYPT 0
+%define HAVE_VAAPI_DRM 1
+%define HAVE_VAAPI_X11 0
+%define HAVE_VDPAU_X11 0
+%define HAVE_PTHREADS 1
+%define HAVE_OS2THREADS 0
+%define HAVE_W32THREADS 0
+%define HAVE_AS_ARCH_DIRECTIVE 0
+%define HAVE_AS_DN_DIRECTIVE 0
+%define HAVE_AS_FPU_DIRECTIVE 0
+%define HAVE_AS_FUNC 0
+%define HAVE_AS_OBJECT_ARCH 0
+%define HAVE_ASM_MOD_Q 0
+%define HAVE_BLOCKS_EXTENSION 0
+%define HAVE_EBP_AVAILABLE 1
+%define HAVE_EBX_AVAILABLE 1
+%define HAVE_GNU_AS 0
+%define HAVE_GNU_WINDRES 0
+%define HAVE_IBM_ASM 0
+%define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1
+%define HAVE_INLINE_ASM_LABELS 1
+%define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+%define HAVE_PRAGMA_DEPRECATED 1
+%define HAVE_RSYNC_CONTIMEOUT 1
+%define HAVE_SYMVER_ASM_LABEL 0
+%define HAVE_SYMVER_GNU_ASM 1
+%define HAVE_VFP_ARGS 0
+%define HAVE_XFORM_ASM 0
+%define HAVE_XMM_CLOBBERS 1
+%define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+%define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+%define HAVE_KCMVIDEOCODECTYPE_VP9 0
+%define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+%define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+%define HAVE_SOCKLEN_T 1
+%define HAVE_STRUCT_ADDRINFO 1
+%define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+%define HAVE_STRUCT_IP_MREQ_SOURCE 1
+%define HAVE_STRUCT_IPV6_MREQ 1
+%define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+%define HAVE_STRUCT_POLLFD 1
+%define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+%define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+%define HAVE_STRUCT_SOCKADDR_IN6 1
+%define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+%define HAVE_STRUCT_SOCKADDR_STORAGE 1
+%define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+%define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+%define HAVE_GZIP 1
+%define HAVE_LIBDRM_GETFB2 0
+%define HAVE_MAKEINFO 1
+%define HAVE_MAKEINFO_HTML 1
+%define HAVE_OPENCL_D3D11 0
+%define HAVE_OPENCL_DRM_ARM 0
+%define HAVE_OPENCL_DRM_BEIGNET 0
+%define HAVE_OPENCL_DXVA2 0
+%define HAVE_OPENCL_VAAPI_BEIGNET 0
+%define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+%define HAVE_PERL 1
+%define HAVE_POD2MAN 1
+%define HAVE_TEXI2HTML 0
+%define HAVE_XMLLINT 1
+%define HAVE_ZLIB_GZIP 0
+%define CONFIG_DOC 0
+%define CONFIG_HTMLPAGES 1
+%define CONFIG_MANPAGES 1
+%define CONFIG_PODPAGES 1
+%define CONFIG_TXTPAGES 1
+%define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+%define CONFIG_AVIO_READING_EXAMPLE 1
+%define CONFIG_DECODE_AUDIO_EXAMPLE 1
+%define CONFIG_DECODE_VIDEO_EXAMPLE 1
+%define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+%define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+%define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+%define CONFIG_EXTRACT_MVS_EXAMPLE 0
+%define CONFIG_FILTER_AUDIO_EXAMPLE 0
+%define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+%define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+%define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+%define CONFIG_HW_DECODE_EXAMPLE 0
+%define CONFIG_METADATA_EXAMPLE 0
+%define CONFIG_MUXING_EXAMPLE 0
+%define CONFIG_QSVDEC_EXAMPLE 0
+%define CONFIG_REMUXING_EXAMPLE 0
+%define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+%define CONFIG_SCALING_VIDEO_EXAMPLE 0
+%define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+%define CONFIG_TRANSCODING_EXAMPLE 0
+%define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+%define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+%define CONFIG_AVISYNTH 0
+%define CONFIG_FREI0R 0
+%define CONFIG_LIBCDIO 0
+%define CONFIG_LIBDAVS2 0
+%define CONFIG_LIBRUBBERBAND 0
+%define CONFIG_LIBVIDSTAB 0
+%define CONFIG_LIBX264 0
+%define CONFIG_LIBX265 0
+%define CONFIG_LIBXAVS 0
+%define CONFIG_LIBXAVS2 0
+%define CONFIG_LIBXVID 0
+%define CONFIG_DECKLINK 0
+%define CONFIG_LIBFDK_AAC 0
+%define CONFIG_LIBTLS 0
+%define CONFIG_GMP 0
+%define CONFIG_LIBARIBB24 0
+%define CONFIG_LIBLENSFUN 0
+%define CONFIG_LIBOPENCORE_AMRNB 0
+%define CONFIG_LIBOPENCORE_AMRWB 0
+%define CONFIG_LIBVO_AMRWBENC 0
+%define CONFIG_MBEDTLS 0
+%define CONFIG_RKMPP 0
+%define CONFIG_LIBSMBCLIENT 0
+%define CONFIG_CHROMAPRINT 0
+%define CONFIG_GCRYPT 0
+%define CONFIG_GNUTLS 0
+%define CONFIG_JNI 0
+%define CONFIG_LADSPA 0
+%define CONFIG_LCMS2 0
+%define CONFIG_LIBAOM 0
+%define CONFIG_LIBASS 0
+%define CONFIG_LIBBLURAY 0
+%define CONFIG_LIBBS2B 0
+%define CONFIG_LIBCACA 0
+%define CONFIG_LIBCELT 0
+%define CONFIG_LIBCODEC2 0
+%define CONFIG_LIBDAV1D 0
+%define CONFIG_LIBDC1394 0
+%define CONFIG_LIBDRM 0
+%define CONFIG_LIBFLITE 0
+%define CONFIG_LIBFONTCONFIG 0
+%define CONFIG_LIBFREETYPE 0
+%define CONFIG_LIBFRIBIDI 0
+%define CONFIG_LIBGLSLANG 0
+%define CONFIG_LIBGME 0
+%define CONFIG_LIBGSM 0
+%define CONFIG_LIBIEC61883 0
+%define CONFIG_LIBILBC 0
+%define CONFIG_LIBJACK 0
+%define CONFIG_LIBJXL 0
+%define CONFIG_LIBKLVANC 0
+%define CONFIG_LIBKVAZAAR 0
+%define CONFIG_LIBMODPLUG 0
+%define CONFIG_LIBMP3LAME 0
+%define CONFIG_LIBMYSOFA 0
+%define CONFIG_LIBOPENCV 0
+%define CONFIG_LIBOPENH264 0
+%define CONFIG_LIBOPENJPEG 0
+%define CONFIG_LIBOPENMPT 0
+%define CONFIG_LIBOPENVINO 0
+%define CONFIG_LIBOPUS 0
+%define CONFIG_LIBPLACEBO 0
+%define CONFIG_LIBPULSE 0
+%define CONFIG_LIBRABBITMQ 0
+%define CONFIG_LIBRAV1E 0
+%define CONFIG_LIBRIST 0
+%define CONFIG_LIBRSVG 0
+%define CONFIG_LIBRTMP 0
+%define CONFIG_LIBSHADERC 0
+%define CONFIG_LIBSHINE 0
+%define CONFIG_LIBSMBCLIENT 0
+%define CONFIG_LIBSNAPPY 0
+%define CONFIG_LIBSOXR 0
+%define CONFIG_LIBSPEEX 0
+%define CONFIG_LIBSRT 0
+%define CONFIG_LIBSSH 0
+%define CONFIG_LIBSVTAV1 0
+%define CONFIG_LIBTENSORFLOW 0
+%define CONFIG_LIBTESSERACT 0
+%define CONFIG_LIBTHEORA 0
+%define CONFIG_LIBTWOLAME 0
+%define CONFIG_LIBUAVS3D 0
+%define CONFIG_LIBV4L2 0
+%define CONFIG_LIBVMAF 0
+%define CONFIG_LIBVORBIS 0
+%define CONFIG_LIBVPX 0
+%define CONFIG_LIBWEBP 0
+%define CONFIG_LIBXML2 0
+%define CONFIG_LIBZIMG 0
+%define CONFIG_LIBZMQ 0
+%define CONFIG_LIBZVBI 0
+%define CONFIG_LV2 0
+%define CONFIG_MEDIACODEC 0
+%define CONFIG_OPENAL 0
+%define CONFIG_OPENGL 0
+%define CONFIG_OPENSSL 0
+%define CONFIG_POCKETSPHINX 0
+%define CONFIG_VAPOURSYNTH 0
+%define CONFIG_ALSA 0
+%define CONFIG_APPKIT 0
+%define CONFIG_AVFOUNDATION 0
+%define CONFIG_BZLIB 0
+%define CONFIG_COREIMAGE 0
+%define CONFIG_ICONV 0
+%define CONFIG_LIBXCB 0
+%define CONFIG_LIBXCB_SHM 0
+%define CONFIG_LIBXCB_SHAPE 0
+%define CONFIG_LIBXCB_XFIXES 0
+%define CONFIG_LZMA 0
+%define CONFIG_MEDIAFOUNDATION 0
+%define CONFIG_METAL 0
+%define CONFIG_SCHANNEL 0
+%define CONFIG_SDL2 0
+%define CONFIG_SECURETRANSPORT 0
+%define CONFIG_SNDIO 0
+%define CONFIG_XLIB 0
+%define CONFIG_ZLIB 0
+%define CONFIG_CUDA_NVCC 0
+%define CONFIG_CUDA_SDK 0
+%define CONFIG_LIBNPP 0
+%define CONFIG_LIBMFX 0
+%define CONFIG_MMAL 0
+%define CONFIG_OMX 0
+%define CONFIG_OPENCL 0
+%define CONFIG_AMF 0
+%define CONFIG_AUDIOTOOLBOX 0
+%define CONFIG_CRYSTALHD 0
+%define CONFIG_CUDA 0
+%define CONFIG_CUDA_LLVM 0
+%define CONFIG_CUVID 0
+%define CONFIG_D3D11VA 0
+%define CONFIG_DXVA2 0
+%define CONFIG_FFNVCODEC 0
+%define CONFIG_NVDEC 0
+%define CONFIG_NVENC 0
+%define CONFIG_VAAPI 1
+%define CONFIG_VDPAU 0
+%define CONFIG_VIDEOTOOLBOX 0
+%define CONFIG_VULKAN 0
+%define CONFIG_V4L2_M2M 0
+%define CONFIG_FTRAPV 0
+%define CONFIG_GRAY 0
+%define CONFIG_HARDCODED_TABLES 0
+%define CONFIG_OMX_RPI 0
+%define CONFIG_RUNTIME_CPUDETECT 1
+%define CONFIG_SAFE_BITSTREAM_READER 1
+%define CONFIG_SHARED 1
+%define CONFIG_SMALL 0
+%define CONFIG_STATIC 0
+%define CONFIG_SWSCALE_ALPHA 1
+%define CONFIG_GPL 0
+%define CONFIG_NONFREE 0
+%define CONFIG_VERSION3 0
+%define CONFIG_AVDEVICE 0
+%define CONFIG_AVFILTER 0
+%define CONFIG_SWSCALE 0
+%define CONFIG_POSTPROC 0
+%define CONFIG_AVFORMAT 0
+%define CONFIG_AVCODEC 1
+%define CONFIG_SWRESAMPLE 0
+%define CONFIG_AVUTIL 1
+%define CONFIG_FFPLAY 0
+%define CONFIG_FFPROBE 0
+%define CONFIG_FFMPEG 0
+%define CONFIG_DCT 1
+%define CONFIG_DWT 0
+%define CONFIG_ERROR_RESILIENCE 0
+%define CONFIG_FAAN 1
+%define CONFIG_FAST_UNALIGNED 1
+%define CONFIG_FFT 1
+%define CONFIG_LSP 0
+%define CONFIG_MDCT 0
+%define CONFIG_PIXELUTILS 0
+%define CONFIG_NETWORK 0
+%define CONFIG_RDFT 1
+%define CONFIG_AUTODETECT 0
+%define CONFIG_FONTCONFIG 0
+%define CONFIG_LARGE_TESTS 1
+%define CONFIG_LINUX_PERF 0
+%define CONFIG_MACOS_KPERF 0
+%define CONFIG_MEMORY_POISONING 0
+%define CONFIG_NEON_CLOBBER_TEST 0
+%define CONFIG_OSSFUZZ 0
+%define CONFIG_PIC 1
+%define CONFIG_PTX_COMPRESSION 0
+%define CONFIG_THUMB 0
+%define CONFIG_VALGRIND_BACKTRACE 0
+%define CONFIG_XMM_CLOBBER_TEST 0
+%define CONFIG_BSFS 0
+%define CONFIG_DECODERS 1
+%define CONFIG_ENCODERS 0
+%define CONFIG_HWACCELS 1
+%define CONFIG_PARSERS 1
+%define CONFIG_INDEVS 0
+%define CONFIG_OUTDEVS 0
+%define CONFIG_FILTERS 0
+%define CONFIG_DEMUXERS 0
+%define CONFIG_MUXERS 0
+%define CONFIG_PROTOCOLS 0
+%define CONFIG_AANDCTTABLES 0
+%define CONFIG_AC3DSP 0
+%define CONFIG_ADTS_HEADER 0
+%define CONFIG_ATSC_A53 0
+%define CONFIG_AUDIO_FRAME_QUEUE 0
+%define CONFIG_AUDIODSP 0
+%define CONFIG_BLOCKDSP 0
+%define CONFIG_BSWAPDSP 0
+%define CONFIG_CABAC 0
+%define CONFIG_CBS 1
+%define CONFIG_CBS_AV1 1
+%define CONFIG_CBS_H264 0
+%define CONFIG_CBS_H265 0
+%define CONFIG_CBS_JPEG 0
+%define CONFIG_CBS_MPEG2 0
+%define CONFIG_CBS_VP9 0
+%define CONFIG_DEFLATE_WRAPPER 0
+%define CONFIG_DIRAC_PARSE 0
+%define CONFIG_DNN 0
+%define CONFIG_DOVI_RPU 0
+%define CONFIG_DVPROFILE 0
+%define CONFIG_EXIF 0
+%define CONFIG_FAANDCT 1
+%define CONFIG_FAANIDCT 1
+%define CONFIG_FDCTDSP 1
+%define CONFIG_FLACDSP 1
+%define CONFIG_FMTCONVERT 0
+%define CONFIG_FRAME_THREAD_ENCODER 0
+%define CONFIG_G722DSP 0
+%define CONFIG_GOLOMB 0
+%define CONFIG_GPLV3 0
+%define CONFIG_H263DSP 0
+%define CONFIG_H264CHROMA 0
+%define CONFIG_H264DSP 0
+%define CONFIG_H264PARSE 0
+%define CONFIG_H264PRED 1
+%define CONFIG_H264QPEL 0
+%define CONFIG_HEVCPARSE 0
+%define CONFIG_HPELDSP 0
+%define CONFIG_HUFFMAN 0
+%define CONFIG_HUFFYUVDSP 0
+%define CONFIG_HUFFYUVENCDSP 0
+%define CONFIG_IDCTDSP 1
+%define CONFIG_IIRFILTER 0
+%define CONFIG_MDCT15 0
+%define CONFIG_INFLATE_WRAPPER 0
+%define CONFIG_INTRAX8 0
+%define CONFIG_ISO_MEDIA 0
+%define CONFIG_IVIDSP 0
+%define CONFIG_JPEGTABLES 0
+%define CONFIG_LGPLV3 0
+%define CONFIG_LIBX262 0
+%define CONFIG_LLAUDDSP 0
+%define CONFIG_LLVIDDSP 0
+%define CONFIG_LLVIDENCDSP 0
+%define CONFIG_LPC 0
+%define CONFIG_LZF 0
+%define CONFIG_ME_CMP 0
+%define CONFIG_MPEG_ER 0
+%define CONFIG_MPEGAUDIO 1
+%define CONFIG_MPEGAUDIODSP 1
+%define CONFIG_MPEGAUDIOHEADER 1
+%define CONFIG_MPEG4AUDIO 0
+%define CONFIG_MPEGVIDEO 0
+%define CONFIG_MPEGVIDEODEC 0
+%define CONFIG_MPEGVIDEOENC 0
+%define CONFIG_MSS34DSP 0
+%define CONFIG_PIXBLOCKDSP 0
+%define CONFIG_QPELDSP 0
+%define CONFIG_QSV 0
+%define CONFIG_QSVDEC 0
+%define CONFIG_QSVENC 0
+%define CONFIG_QSVVPP 0
+%define CONFIG_RANGECODER 0
+%define CONFIG_RIFFDEC 0
+%define CONFIG_RIFFENC 0
+%define CONFIG_RTPDEC 0
+%define CONFIG_RTPENC_CHAIN 0
+%define CONFIG_RV34DSP 0
+%define CONFIG_SCENE_SAD 0
+%define CONFIG_SINEWIN 0
+%define CONFIG_SNAPPY 0
+%define CONFIG_SRTP 0
+%define CONFIG_STARTCODE 0
+%define CONFIG_TEXTUREDSP 0
+%define CONFIG_TEXTUREDSPENC 0
+%define CONFIG_TPELDSP 0
+%define CONFIG_VAAPI_1 1
+%define CONFIG_VAAPI_ENCODE 0
+%define CONFIG_VC1DSP 0
+%define CONFIG_VIDEODSP 1
+%define CONFIG_VP3DSP 0
+%define CONFIG_VP56DSP 0
+%define CONFIG_VP8DSP 1
+%define CONFIG_WMA_FREQS 0
+%define CONFIG_WMV2DSP 0
diff --git a/media/ffvpx/config_unix64.h b/media/ffvpx/config_unix64.h
new file mode 100644
index 0000000000..c4311cf86e
--- /dev/null
+++ b/media/ffvpx/config_unix64.h
@@ -0,0 +1,733 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='vp8,vp9,mp3,flac,av1' --enable-parser='vp8,vp9' --disable-static --enable-shared --disable-autodetect --disable-iconv --enable-vaapi --enable-hwaccel='vp9_vaapi,vp8_vaapi,av1_vaapi' --enable-libdav1d"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "gcc 11 (Ubuntu 11.2.0-19ubuntu1)"
+#define OS_NAME linux
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".so"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 1
+#define ARCH_X86_32 0
+#define ARCH_X86_64 1
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 1
+#define HAVE_AMD3DNOW 1
+#define HAVE_AMD3DNOWEXT 1
+#define HAVE_AVX 1
+#define HAVE_AVX2 1
+#define HAVE_AVX512 1
+#define HAVE_AVX512ICL 1
+#define HAVE_FMA3 1
+#define HAVE_FMA4 1
+#define HAVE_MMX 1
+#define HAVE_MMXEXT 1
+#define HAVE_SSE 1
+#define HAVE_SSE2 1
+#define HAVE_SSE3 1
+#define HAVE_SSE4 1
+#define HAVE_SSE42 1
+#define HAVE_SSSE3 1
+#define HAVE_XOP 1
+#define HAVE_CPUNOP 0
+#define HAVE_I686 1
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 0
+#define HAVE_VFP_EXTERNAL 0
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 1
+#define HAVE_AMD3DNOW_EXTERNAL 1
+#define HAVE_AMD3DNOWEXT_EXTERNAL 1
+#define HAVE_AVX_EXTERNAL 1
+#define HAVE_AVX2_EXTERNAL 1
+#define HAVE_AVX512_EXTERNAL 1
+#define HAVE_AVX512ICL_EXTERNAL 1
+#define HAVE_FMA3_EXTERNAL 1
+#define HAVE_FMA4_EXTERNAL 1
+#define HAVE_MMX_EXTERNAL 1
+#define HAVE_MMXEXT_EXTERNAL 1
+#define HAVE_SSE_EXTERNAL 1
+#define HAVE_SSE2_EXTERNAL 1
+#define HAVE_SSE3_EXTERNAL 1
+#define HAVE_SSE4_EXTERNAL 1
+#define HAVE_SSE42_EXTERNAL 1
+#define HAVE_SSSE3_EXTERNAL 1
+#define HAVE_XOP_EXTERNAL 1
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 1
+#define HAVE_AMD3DNOW_INLINE 1
+#define HAVE_AMD3DNOWEXT_INLINE 1
+#define HAVE_AVX_INLINE 1
+#define HAVE_AVX2_INLINE 1
+#define HAVE_AVX512_INLINE 1
+#define HAVE_AVX512ICL_INLINE 1
+#define HAVE_FMA3_INLINE 1
+#define HAVE_FMA4_INLINE 1
+#define HAVE_MMX_INLINE 1
+#define HAVE_MMXEXT_INLINE 1
+#define HAVE_SSE_INLINE 1
+#define HAVE_SSE2_INLINE 1
+#define HAVE_SSE3_INLINE 1
+#define HAVE_SSE4_INLINE 1
+#define HAVE_SSE42_INLINE 1
+#define HAVE_SSSE3_INLINE 1
+#define HAVE_XOP_INLINE 1
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 1
+#define HAVE_FAST_64BIT 1
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 1
+#define HAVE_LOCAL_ALIGNED 1
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 1
+#define HAVE_SIMD_ALIGN_64 1
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 1
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 1
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 1
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 1
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 1
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 1
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 1
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 1
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETMODULEHANDLE 0
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSTDHANDLE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 1
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_PTHREAD_CANCEL 1
+#define HAVE_SCHED_GETAFFINITY 1
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETDLLDIRECTORY 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 1
+#define HAVE_EBX_AVAILABLE 1
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 1
+#define HAVE_SYMVER_ASM_LABEL 0
+#define HAVE_SYMVER_GNU_ASM 1
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 1
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 1
+#define HAVE_MAKEINFO_HTML 1
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 1
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 1
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 1
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 0
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 1
+#define CONFIG_PARSERS 1
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 1
+#define CONFIG_CBS_AV1 1
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_unix_aarch64.h b/media/ffvpx/config_unix_aarch64.h
new file mode 100644
index 0000000000..2fcc27d431
--- /dev/null
+++ b/media/ffvpx/config_unix_aarch64.h
@@ -0,0 +1,735 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='vp8,vp9,mp3,flac,av1' --enable-parser='vp8,vp9' --disable-static --enable-shared --disable-autodetect --disable-iconv --as=clang --cc=clang"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Debian clang version 11.0.1-2"
+#define OS_NAME linux
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".so"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 1
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 1
+#define HAVE_NEON 1
+#define HAVE_VFP 1
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_AVX512 0
+#define HAVE_AVX512ICL 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 1
+#define HAVE_NEON_EXTERNAL 1
+#define HAVE_VFP_EXTERNAL 1
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_AVX512_EXTERNAL 0
+#define HAVE_AVX512ICL_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 1
+#define HAVE_NEON_INLINE 1
+#define HAVE_VFP_INLINE 1
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_AVX512ICL_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 1
+#define HAVE_FAST_64BIT 1
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 0
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 0
+#define HAVE_SIMD_ALIGN_64 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 0
+#define HAVE_MM_EMPTY 0
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 1
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 1
+#define HAVE_SYMVER 1
+#define HAVE_X86ASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 1
+#define HAVE_ASM_TYPES_H 1
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 0
+#define HAVE_DIRENT_H 1
+#define HAVE_DXGIDEBUG_H 0
+#define HAVE_DXVA_H 0
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 0
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 1
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_RESOURCE_H 1
+#define HAVE_SYS_SELECT_H 1
+#define HAVE_SYS_SOUNDCARD_H 1
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_UN_H 1
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 1
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 1
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 0
+#define HAVE_WINSOCK2_H 0
+#define HAVE_INTRINSICS_NEON 1
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 0
+#define HAVE_LIBC_MSVCRT 0
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 1
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 0
+#define HAVE_CLOCK_GETTIME 1
+#define HAVE_CLOSESOCKET 0
+#define HAVE_COMMANDLINETOARGVW 0
+#define HAVE_FCNTL 1
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 1
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 1
+#define HAVE_GETMODULEHANDLE 0
+#define HAVE_GETPROCESSAFFINITYMASK 0
+#define HAVE_GETPROCESSMEMORYINFO 0
+#define HAVE_GETPROCESSTIMES 0
+#define HAVE_GETRUSAGE 1
+#define HAVE_GETSTDHANDLE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 0
+#define HAVE_GETTIMEOFDAY 1
+#define HAVE_GLOB 1
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 1
+#define HAVE_INET_ATON 1
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 0
+#define HAVE_LSTAT 1
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 0
+#define HAVE_MKSTEMP 1
+#define HAVE_MMAP 1
+#define HAVE_MPROTECT 1
+#define HAVE_NANOSLEEP 1
+#define HAVE_PEEKNAMEDPIPE 0
+#define HAVE_PTHREAD_CANCEL 1
+#define HAVE_SCHED_GETAFFINITY 1
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 0
+#define HAVE_SETCONSOLECTRLHANDLER 0
+#define HAVE_SETDLLDIRECTORY 0
+#define HAVE_SETMODE 0
+#define HAVE_SETRLIMIT 1
+#define HAVE_SLEEP 0
+#define HAVE_STRERROR_R 1
+#define HAVE_SYSCONF 1
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 1
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 0
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 0
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 1
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 0
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 0
+#define HAVE_EBX_AVAILABLE 0
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+#define HAVE_INLINE_ASM_LABELS 1
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 1
+#define HAVE_SYMVER_GNU_ASM 1
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 0
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 0
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 0
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 0
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 1
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 1
+#define CONFIG_CBS_AV1 1
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_win32.asm b/media/ffvpx/config_win32.asm
new file mode 100644
index 0000000000..811303d6e1
--- /dev/null
+++ b/media/ffvpx/config_win32.asm
@@ -0,0 +1,718 @@
+; Automatically generated by configure - do not modify!
+%define ARCH_AARCH64 0
+%define ARCH_ALPHA 0
+%define ARCH_ARM 0
+%define ARCH_AVR32 0
+%define ARCH_AVR32_AP 0
+%define ARCH_AVR32_UC 0
+%define ARCH_BFIN 0
+%define ARCH_IA64 0
+%define ARCH_LOONGARCH 0
+%define ARCH_LOONGARCH32 0
+%define ARCH_LOONGARCH64 0
+%define ARCH_M68K 0
+%define ARCH_MIPS 0
+%define ARCH_MIPS64 0
+%define ARCH_PARISC 0
+%define ARCH_PPC 0
+%define ARCH_PPC64 0
+%define ARCH_RISCV 0
+%define ARCH_S390 0
+%define ARCH_SH4 0
+%define ARCH_SPARC 0
+%define ARCH_SPARC64 0
+%define ARCH_TILEGX 0
+%define ARCH_TILEPRO 0
+%define ARCH_TOMI 0
+%define ARCH_X86 1
+%define ARCH_X86_32 1
+%define ARCH_X86_64 0
+%define HAVE_ARMV5TE 0
+%define HAVE_ARMV6 0
+%define HAVE_ARMV6T2 0
+%define HAVE_ARMV8 0
+%define HAVE_NEON 0
+%define HAVE_VFP 0
+%define HAVE_VFPV3 0
+%define HAVE_SETEND 0
+%define HAVE_ALTIVEC 0
+%define HAVE_DCBZL 0
+%define HAVE_LDBRX 0
+%define HAVE_POWER8 0
+%define HAVE_PPC4XX 0
+%define HAVE_VSX 0
+%define HAVE_AESNI 1
+%define HAVE_AMD3DNOW 1
+%define HAVE_AMD3DNOWEXT 1
+%define HAVE_AVX 1
+%define HAVE_AVX2 1
+%define HAVE_AVX512 1
+%define HAVE_AVX512ICL 1
+%define HAVE_FMA3 1
+%define HAVE_FMA4 1
+%define HAVE_MMX 1
+%define HAVE_MMXEXT 1
+%define HAVE_SSE 1
+%define HAVE_SSE2 1
+%define HAVE_SSE3 1
+%define HAVE_SSE4 1
+%define HAVE_SSE42 1
+%define HAVE_SSSE3 1
+%define HAVE_XOP 1
+%define HAVE_CPUNOP 0
+%define HAVE_I686 1
+%define HAVE_MIPSFPU 0
+%define HAVE_MIPS32R2 0
+%define HAVE_MIPS32R5 0
+%define HAVE_MIPS64R2 0
+%define HAVE_MIPS32R6 0
+%define HAVE_MIPS64R6 0
+%define HAVE_MIPSDSP 0
+%define HAVE_MIPSDSPR2 0
+%define HAVE_MSA 0
+%define HAVE_LOONGSON2 0
+%define HAVE_LOONGSON3 0
+%define HAVE_MMI 0
+%define HAVE_LSX 0
+%define HAVE_LASX 0
+%define HAVE_ARMV5TE_EXTERNAL 0
+%define HAVE_ARMV6_EXTERNAL 0
+%define HAVE_ARMV6T2_EXTERNAL 0
+%define HAVE_ARMV8_EXTERNAL 0
+%define HAVE_NEON_EXTERNAL 0
+%define HAVE_VFP_EXTERNAL 0
+%define HAVE_VFPV3_EXTERNAL 0
+%define HAVE_SETEND_EXTERNAL 0
+%define HAVE_ALTIVEC_EXTERNAL 0
+%define HAVE_DCBZL_EXTERNAL 0
+%define HAVE_LDBRX_EXTERNAL 0
+%define HAVE_POWER8_EXTERNAL 0
+%define HAVE_PPC4XX_EXTERNAL 0
+%define HAVE_VSX_EXTERNAL 0
+%define HAVE_AESNI_EXTERNAL 1
+%define HAVE_AMD3DNOW_EXTERNAL 1
+%define HAVE_AMD3DNOWEXT_EXTERNAL 1
+%define HAVE_AVX_EXTERNAL 1
+%define HAVE_AVX2_EXTERNAL 1
+%define HAVE_AVX512_EXTERNAL 1
+%define HAVE_AVX512ICL_EXTERNAL 0
+%define HAVE_FMA3_EXTERNAL 1
+%define HAVE_FMA4_EXTERNAL 1
+%define HAVE_MMX_EXTERNAL 1
+%define HAVE_MMXEXT_EXTERNAL 1
+%define HAVE_SSE_EXTERNAL 1
+%define HAVE_SSE2_EXTERNAL 1
+%define HAVE_SSE3_EXTERNAL 1
+%define HAVE_SSE4_EXTERNAL 1
+%define HAVE_SSE42_EXTERNAL 1
+%define HAVE_SSSE3_EXTERNAL 1
+%define HAVE_XOP_EXTERNAL 1
+%define HAVE_CPUNOP_EXTERNAL 0
+%define HAVE_I686_EXTERNAL 0
+%define HAVE_MIPSFPU_EXTERNAL 0
+%define HAVE_MIPS32R2_EXTERNAL 0
+%define HAVE_MIPS32R5_EXTERNAL 0
+%define HAVE_MIPS64R2_EXTERNAL 0
+%define HAVE_MIPS32R6_EXTERNAL 0
+%define HAVE_MIPS64R6_EXTERNAL 0
+%define HAVE_MIPSDSP_EXTERNAL 0
+%define HAVE_MIPSDSPR2_EXTERNAL 0
+%define HAVE_MSA_EXTERNAL 0
+%define HAVE_LOONGSON2_EXTERNAL 0
+%define HAVE_LOONGSON3_EXTERNAL 0
+%define HAVE_MMI_EXTERNAL 0
+%define HAVE_LSX_EXTERNAL 0
+%define HAVE_LASX_EXTERNAL 0
+%define HAVE_ARMV5TE_INLINE 0
+%define HAVE_ARMV6_INLINE 0
+%define HAVE_ARMV6T2_INLINE 0
+%define HAVE_ARMV8_INLINE 0
+%define HAVE_NEON_INLINE 0
+%define HAVE_VFP_INLINE 0
+%define HAVE_VFPV3_INLINE 0
+%define HAVE_SETEND_INLINE 0
+%define HAVE_ALTIVEC_INLINE 0
+%define HAVE_DCBZL_INLINE 0
+%define HAVE_LDBRX_INLINE 0
+%define HAVE_POWER8_INLINE 0
+%define HAVE_PPC4XX_INLINE 0
+%define HAVE_VSX_INLINE 0
+%define HAVE_AESNI_INLINE 0
+%define HAVE_AMD3DNOW_INLINE 0
+%define HAVE_AMD3DNOWEXT_INLINE 0
+%define HAVE_AVX_INLINE 0
+%define HAVE_AVX2_INLINE 0
+%define HAVE_AVX512_INLINE 0
+%define HAVE_AVX512ICL_INLINE 0
+%define HAVE_FMA3_INLINE 0
+%define HAVE_FMA4_INLINE 0
+%define HAVE_MMX_INLINE 0
+%define HAVE_MMXEXT_INLINE 0
+%define HAVE_SSE_INLINE 0
+%define HAVE_SSE2_INLINE 0
+%define HAVE_SSE3_INLINE 0
+%define HAVE_SSE4_INLINE 0
+%define HAVE_SSE42_INLINE 0
+%define HAVE_SSSE3_INLINE 0
+%define HAVE_XOP_INLINE 0
+%define HAVE_CPUNOP_INLINE 0
+%define HAVE_I686_INLINE 0
+%define HAVE_MIPSFPU_INLINE 0
+%define HAVE_MIPS32R2_INLINE 0
+%define HAVE_MIPS32R5_INLINE 0
+%define HAVE_MIPS64R2_INLINE 0
+%define HAVE_MIPS32R6_INLINE 0
+%define HAVE_MIPS64R6_INLINE 0
+%define HAVE_MIPSDSP_INLINE 0
+%define HAVE_MIPSDSPR2_INLINE 0
+%define HAVE_MSA_INLINE 0
+%define HAVE_LOONGSON2_INLINE 0
+%define HAVE_LOONGSON3_INLINE 0
+%define HAVE_MMI_INLINE 0
+%define HAVE_LSX_INLINE 0
+%define HAVE_LASX_INLINE 0
+%define HAVE_ALIGNED_STACK 0
+%define HAVE_FAST_64BIT 0
+%define HAVE_FAST_CLZ 1
+%define HAVE_FAST_CMOV 0
+%define HAVE_LOCAL_ALIGNED 1
+%define HAVE_SIMD_ALIGN_16 1
+%define HAVE_SIMD_ALIGN_32 1
+%define HAVE_SIMD_ALIGN_64 1
+%define HAVE_ATOMIC_CAS_PTR 0
+%define HAVE_MACHINE_RW_BARRIER 0
+%define HAVE_MEMORYBARRIER 1
+%define HAVE_MM_EMPTY 1
+%define HAVE_RDTSC 1
+%define HAVE_SEM_TIMEDWAIT 0
+%define HAVE_SYNC_VAL_COMPARE_AND_SWAP 0
+%define HAVE_CABS 0
+%define HAVE_CEXP 0
+%define HAVE_INLINE_ASM 0
+%define HAVE_SYMVER 0
+%define HAVE_X86ASM 1
+%define HAVE_BIGENDIAN 0
+%define HAVE_FAST_UNALIGNED 1
+%define HAVE_ARPA_INET_H 0
+%define HAVE_ASM_TYPES_H 0
+%define HAVE_CDIO_PARANOIA_H 0
+%define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+%define HAVE_CUDA_H 0
+%define HAVE_DISPATCH_DISPATCH_H 0
+%define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+%define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+%define HAVE_DEV_IC_BT8XX_H 0
+%define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+%define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+%define HAVE_DIRECT_H 1
+%define HAVE_DIRENT_H 0
+%define HAVE_DXGIDEBUG_H 1
+%define HAVE_DXVA_H 1
+%define HAVE_ES2_GL_H 0
+%define HAVE_GSM_H 0
+%define HAVE_IO_H 1
+%define HAVE_LINUX_DMA_BUF_H 0
+%define HAVE_LINUX_PERF_EVENT_H 0
+%define HAVE_MACHINE_IOCTL_BT848_H 0
+%define HAVE_MACHINE_IOCTL_METEOR_H 0
+%define HAVE_OPENCV2_CORE_CORE_C_H 0
+%define HAVE_OPENGL_GL3_H 0
+%define HAVE_POLL_H 0
+%define HAVE_SYS_PARAM_H 0
+%define HAVE_SYS_RESOURCE_H 0
+%define HAVE_SYS_SELECT_H 0
+%define HAVE_SYS_SOUNDCARD_H 0
+%define HAVE_SYS_TIME_H 0
+%define HAVE_SYS_UN_H 0
+%define HAVE_SYS_VIDEOIO_H 0
+%define HAVE_TERMIOS_H 0
+%define HAVE_UDPLITE_H 0
+%define HAVE_UNISTD_H 0
+%define HAVE_VALGRIND_VALGRIND_H 0
+%define HAVE_WINDOWS_H 1
+%define HAVE_WINSOCK2_H 1
+%define HAVE_INTRINSICS_NEON 0
+%define HAVE_ATANF 1
+%define HAVE_ATAN2F 1
+%define HAVE_CBRT 1
+%define HAVE_CBRTF 1
+%define HAVE_COPYSIGN 1
+%define HAVE_COSF 1
+%define HAVE_ERF 1
+%define HAVE_EXP2 1
+%define HAVE_EXP2F 1
+%define HAVE_EXPF 1
+%define HAVE_HYPOT 1
+%define HAVE_ISFINITE 1
+%define HAVE_ISINF 1
+%define HAVE_ISNAN 1
+%define HAVE_LDEXPF 1
+%define HAVE_LLRINT 1
+%define HAVE_LLRINTF 1
+%define HAVE_LOG2 1
+%define HAVE_LOG2F 1
+%define HAVE_LOG10F 1
+%define HAVE_LRINT 1
+%define HAVE_LRINTF 1
+%define HAVE_POWF 1
+%define HAVE_RINT 1
+%define HAVE_ROUND 1
+%define HAVE_ROUNDF 1
+%define HAVE_SINF 1
+%define HAVE_TRUNC 1
+%define HAVE_TRUNCF 1
+%define HAVE_DOS_PATHS 1
+%define HAVE_LIBC_MSVCRT 1
+%define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+%define HAVE_SECTION_DATA_REL_RO 0
+%define HAVE_THREADS 1
+%define HAVE_UWP 0
+%define HAVE_WINRT 0
+%define HAVE_ACCESS 1
+%define HAVE_ALIGNED_MALLOC 1
+%define HAVE_CLOCK_GETTIME 0
+%define HAVE_CLOSESOCKET 1
+%define HAVE_COMMANDLINETOARGVW 1
+%define HAVE_FCNTL 0
+%define HAVE_GETADDRINFO 1
+%define HAVE_GETAUXVAL 0
+%define HAVE_GETENV 1
+%define HAVE_GETHRTIME 0
+%define HAVE_GETOPT 0
+%define HAVE_GETMODULEHANDLE 1
+%define HAVE_GETPROCESSAFFINITYMASK 1
+%define HAVE_GETPROCESSMEMORYINFO 1
+%define HAVE_GETPROCESSTIMES 1
+%define HAVE_GETRUSAGE 0
+%define HAVE_GETSTDHANDLE 1
+%define HAVE_GETSYSTEMTIMEASFILETIME 1
+%define HAVE_GETTIMEOFDAY 0
+%define HAVE_GLOB 0
+%define HAVE_GLXGETPROCADDRESS 0
+%define HAVE_GMTIME_R 0
+%define HAVE_INET_ATON 0
+%define HAVE_ISATTY 1
+%define HAVE_KBHIT 1
+%define HAVE_LSTAT 0
+%define HAVE_LZO1X_999_COMPRESS 0
+%define HAVE_MACH_ABSOLUTE_TIME 0
+%define HAVE_MAPVIEWOFFILE 1
+%define HAVE_MKSTEMP 0
+%define HAVE_MMAP 0
+%define HAVE_MPROTECT 0
+%define HAVE_NANOSLEEP 0
+%define HAVE_PEEKNAMEDPIPE 1
+%define HAVE_PTHREAD_CANCEL 0
+%define HAVE_SCHED_GETAFFINITY 0
+%define HAVE_SECITEMIMPORT 0
+%define HAVE_SETCONSOLETEXTATTRIBUTE 1
+%define HAVE_SETCONSOLECTRLHANDLER 1
+%define HAVE_SETDLLDIRECTORY 1
+%define HAVE_SETMODE 1
+%define HAVE_SETRLIMIT 0
+%define HAVE_SLEEP 1
+%define HAVE_STRERROR_R 0
+%define HAVE_SYSCONF 0
+%define HAVE_SYSCTL 0
+%define HAVE_USLEEP 0
+%define HAVE_UTGETOSTYPEFROMSTRING 0
+%define HAVE_VIRTUALALLOC 1
+%define HAVE_WGLGETPROCADDRESS 0
+%define HAVE_BCRYPT 1
+%define HAVE_VAAPI_DRM 0
+%define HAVE_VAAPI_X11 0
+%define HAVE_VDPAU_X11 0
+%define HAVE_PTHREADS 0
+%define HAVE_OS2THREADS 0
+%define HAVE_W32THREADS 1
+%define HAVE_AS_ARCH_DIRECTIVE 0
+%define HAVE_AS_DN_DIRECTIVE 0
+%define HAVE_AS_FPU_DIRECTIVE 0
+%define HAVE_AS_FUNC 0
+%define HAVE_AS_OBJECT_ARCH 0
+%define HAVE_ASM_MOD_Q 0
+%define HAVE_BLOCKS_EXTENSION 0
+%define HAVE_EBP_AVAILABLE 0
+%define HAVE_EBX_AVAILABLE 0
+%define HAVE_GNU_AS 0
+%define HAVE_GNU_WINDRES 0
+%define HAVE_IBM_ASM 0
+%define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+%define HAVE_INLINE_ASM_LABELS 0
+%define HAVE_INLINE_ASM_NONLOCAL_LABELS 0
+%define HAVE_PRAGMA_DEPRECATED 1
+%define HAVE_RSYNC_CONTIMEOUT 0
+%define HAVE_SYMVER_ASM_LABEL 0
+%define HAVE_SYMVER_GNU_ASM 0
+%define HAVE_VFP_ARGS 0
+%define HAVE_XFORM_ASM 0
+%define HAVE_XMM_CLOBBERS 0
+%define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+%define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+%define HAVE_KCMVIDEOCODECTYPE_VP9 0
+%define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+%define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+%define HAVE_SOCKLEN_T 1
+%define HAVE_STRUCT_ADDRINFO 1
+%define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+%define HAVE_STRUCT_IP_MREQ_SOURCE 1
+%define HAVE_STRUCT_IPV6_MREQ 1
+%define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0
+%define HAVE_STRUCT_POLLFD 1
+%define HAVE_STRUCT_RUSAGE_RU_MAXRSS 0
+%define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+%define HAVE_STRUCT_SOCKADDR_IN6 1
+%define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+%define HAVE_STRUCT_SOCKADDR_STORAGE 1
+%define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+%define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+%define HAVE_GZIP 1
+%define HAVE_LIBDRM_GETFB2 0
+%define HAVE_MAKEINFO 0
+%define HAVE_MAKEINFO_HTML 0
+%define HAVE_OPENCL_D3D11 0
+%define HAVE_OPENCL_DRM_ARM 0
+%define HAVE_OPENCL_DRM_BEIGNET 0
+%define HAVE_OPENCL_DXVA2 0
+%define HAVE_OPENCL_VAAPI_BEIGNET 0
+%define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+%define HAVE_PERL 1
+%define HAVE_POD2MAN 1
+%define HAVE_TEXI2HTML 0
+%define HAVE_XMLLINT 1
+%define HAVE_ZLIB_GZIP 0
+%define CONFIG_DOC 0
+%define CONFIG_HTMLPAGES 0
+%define CONFIG_MANPAGES 1
+%define CONFIG_PODPAGES 1
+%define CONFIG_TXTPAGES 0
+%define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+%define CONFIG_AVIO_READING_EXAMPLE 1
+%define CONFIG_DECODE_AUDIO_EXAMPLE 1
+%define CONFIG_DECODE_VIDEO_EXAMPLE 1
+%define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+%define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+%define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+%define CONFIG_EXTRACT_MVS_EXAMPLE 0
+%define CONFIG_FILTER_AUDIO_EXAMPLE 0
+%define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+%define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+%define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+%define CONFIG_HW_DECODE_EXAMPLE 0
+%define CONFIG_METADATA_EXAMPLE 0
+%define CONFIG_MUXING_EXAMPLE 0
+%define CONFIG_QSVDEC_EXAMPLE 0
+%define CONFIG_REMUXING_EXAMPLE 0
+%define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+%define CONFIG_SCALING_VIDEO_EXAMPLE 0
+%define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+%define CONFIG_TRANSCODING_EXAMPLE 0
+%define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+%define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+%define CONFIG_AVISYNTH 0
+%define CONFIG_FREI0R 0
+%define CONFIG_LIBCDIO 0
+%define CONFIG_LIBDAVS2 0
+%define CONFIG_LIBRUBBERBAND 0
+%define CONFIG_LIBVIDSTAB 0
+%define CONFIG_LIBX264 0
+%define CONFIG_LIBX265 0
+%define CONFIG_LIBXAVS 0
+%define CONFIG_LIBXAVS2 0
+%define CONFIG_LIBXVID 0
+%define CONFIG_DECKLINK 0
+%define CONFIG_LIBFDK_AAC 0
+%define CONFIG_LIBTLS 0
+%define CONFIG_GMP 0
+%define CONFIG_LIBARIBB24 0
+%define CONFIG_LIBLENSFUN 0
+%define CONFIG_LIBOPENCORE_AMRNB 0
+%define CONFIG_LIBOPENCORE_AMRWB 0
+%define CONFIG_LIBVO_AMRWBENC 0
+%define CONFIG_MBEDTLS 0
+%define CONFIG_RKMPP 0
+%define CONFIG_LIBSMBCLIENT 0
+%define CONFIG_CHROMAPRINT 0
+%define CONFIG_GCRYPT 0
+%define CONFIG_GNUTLS 0
+%define CONFIG_JNI 0
+%define CONFIG_LADSPA 0
+%define CONFIG_LCMS2 0
+%define CONFIG_LIBAOM 0
+%define CONFIG_LIBASS 0
+%define CONFIG_LIBBLURAY 0
+%define CONFIG_LIBBS2B 0
+%define CONFIG_LIBCACA 0
+%define CONFIG_LIBCELT 0
+%define CONFIG_LIBCODEC2 0
+%define CONFIG_LIBDAV1D 0
+%define CONFIG_LIBDC1394 0
+%define CONFIG_LIBDRM 0
+%define CONFIG_LIBFLITE 0
+%define CONFIG_LIBFONTCONFIG 0
+%define CONFIG_LIBFREETYPE 0
+%define CONFIG_LIBFRIBIDI 0
+%define CONFIG_LIBGLSLANG 0
+%define CONFIG_LIBGME 0
+%define CONFIG_LIBGSM 0
+%define CONFIG_LIBIEC61883 0
+%define CONFIG_LIBILBC 0
+%define CONFIG_LIBJACK 0
+%define CONFIG_LIBJXL 0
+%define CONFIG_LIBKLVANC 0
+%define CONFIG_LIBKVAZAAR 0
+%define CONFIG_LIBMODPLUG 0
+%define CONFIG_LIBMP3LAME 0
+%define CONFIG_LIBMYSOFA 0
+%define CONFIG_LIBOPENCV 0
+%define CONFIG_LIBOPENH264 0
+%define CONFIG_LIBOPENJPEG 0
+%define CONFIG_LIBOPENMPT 0
+%define CONFIG_LIBOPENVINO 0
+%define CONFIG_LIBOPUS 0
+%define CONFIG_LIBPLACEBO 0
+%define CONFIG_LIBPULSE 0
+%define CONFIG_LIBRABBITMQ 0
+%define CONFIG_LIBRAV1E 0
+%define CONFIG_LIBRIST 0
+%define CONFIG_LIBRSVG 0
+%define CONFIG_LIBRTMP 0
+%define CONFIG_LIBSHADERC 0
+%define CONFIG_LIBSHINE 0
+%define CONFIG_LIBSMBCLIENT 0
+%define CONFIG_LIBSNAPPY 0
+%define CONFIG_LIBSOXR 0
+%define CONFIG_LIBSPEEX 0
+%define CONFIG_LIBSRT 0
+%define CONFIG_LIBSSH 0
+%define CONFIG_LIBSVTAV1 0
+%define CONFIG_LIBTENSORFLOW 0
+%define CONFIG_LIBTESSERACT 0
+%define CONFIG_LIBTHEORA 0
+%define CONFIG_LIBTWOLAME 0
+%define CONFIG_LIBUAVS3D 0
+%define CONFIG_LIBV4L2 0
+%define CONFIG_LIBVMAF 0
+%define CONFIG_LIBVORBIS 0
+%define CONFIG_LIBVPX 0
+%define CONFIG_LIBWEBP 0
+%define CONFIG_LIBXML2 0
+%define CONFIG_LIBZIMG 0
+%define CONFIG_LIBZMQ 0
+%define CONFIG_LIBZVBI 0
+%define CONFIG_LV2 0
+%define CONFIG_MEDIACODEC 0
+%define CONFIG_OPENAL 0
+%define CONFIG_OPENGL 0
+%define CONFIG_OPENSSL 0
+%define CONFIG_POCKETSPHINX 0
+%define CONFIG_VAPOURSYNTH 0
+%define CONFIG_ALSA 0
+%define CONFIG_APPKIT 0
+%define CONFIG_AVFOUNDATION 0
+%define CONFIG_BZLIB 0
+%define CONFIG_COREIMAGE 0
+%define CONFIG_ICONV 0
+%define CONFIG_LIBXCB 0
+%define CONFIG_LIBXCB_SHM 0
+%define CONFIG_LIBXCB_SHAPE 0
+%define CONFIG_LIBXCB_XFIXES 0
+%define CONFIG_LZMA 0
+%define CONFIG_MEDIAFOUNDATION 0
+%define CONFIG_METAL 0
+%define CONFIG_SCHANNEL 0
+%define CONFIG_SDL2 0
+%define CONFIG_SECURETRANSPORT 0
+%define CONFIG_SNDIO 0
+%define CONFIG_XLIB 0
+%define CONFIG_ZLIB 0
+%define CONFIG_CUDA_NVCC 0
+%define CONFIG_CUDA_SDK 0
+%define CONFIG_LIBNPP 0
+%define CONFIG_LIBMFX 0
+%define CONFIG_MMAL 0
+%define CONFIG_OMX 0
+%define CONFIG_OPENCL 0
+%define CONFIG_AMF 0
+%define CONFIG_AUDIOTOOLBOX 0
+%define CONFIG_CRYSTALHD 0
+%define CONFIG_CUDA 0
+%define CONFIG_CUDA_LLVM 0
+%define CONFIG_CUVID 0
+%define CONFIG_D3D11VA 0
+%define CONFIG_DXVA2 0
+%define CONFIG_FFNVCODEC 0
+%define CONFIG_NVDEC 0
+%define CONFIG_NVENC 0
+%define CONFIG_VAAPI 0
+%define CONFIG_VDPAU 0
+%define CONFIG_VIDEOTOOLBOX 0
+%define CONFIG_VULKAN 0
+%define CONFIG_V4L2_M2M 0
+%define CONFIG_FTRAPV 0
+%define CONFIG_GRAY 0
+%define CONFIG_HARDCODED_TABLES 0
+%define CONFIG_OMX_RPI 0
+%define CONFIG_RUNTIME_CPUDETECT 1
+%define CONFIG_SAFE_BITSTREAM_READER 1
+%define CONFIG_SHARED 1
+%define CONFIG_SMALL 0
+%define CONFIG_STATIC 0
+%define CONFIG_SWSCALE_ALPHA 1
+%define CONFIG_GPL 0
+%define CONFIG_NONFREE 0
+%define CONFIG_VERSION3 0
+%define CONFIG_AVDEVICE 0
+%define CONFIG_AVFILTER 0
+%define CONFIG_SWSCALE 0
+%define CONFIG_POSTPROC 0
+%define CONFIG_AVFORMAT 0
+%define CONFIG_AVCODEC 1
+%define CONFIG_SWRESAMPLE 0
+%define CONFIG_AVUTIL 1
+%define CONFIG_FFPLAY 0
+%define CONFIG_FFPROBE 0
+%define CONFIG_FFMPEG 0
+%define CONFIG_DCT 1
+%define CONFIG_DWT 0
+%define CONFIG_ERROR_RESILIENCE 0
+%define CONFIG_FAAN 1
+%define CONFIG_FAST_UNALIGNED 1
+%define CONFIG_FFT 1
+%define CONFIG_LSP 0
+%define CONFIG_MDCT 0
+%define CONFIG_PIXELUTILS 0
+%define CONFIG_NETWORK 0
+%define CONFIG_RDFT 1
+%define CONFIG_AUTODETECT 0
+%define CONFIG_FONTCONFIG 0
+%define CONFIG_LARGE_TESTS 1
+%define CONFIG_LINUX_PERF 0
+%define CONFIG_MACOS_KPERF 0
+%define CONFIG_MEMORY_POISONING 0
+%define CONFIG_NEON_CLOBBER_TEST 0
+%define CONFIG_OSSFUZZ 0
+%define CONFIG_PIC 0
+%define CONFIG_PTX_COMPRESSION 0
+%define CONFIG_THUMB 0
+%define CONFIG_VALGRIND_BACKTRACE 0
+%define CONFIG_XMM_CLOBBER_TEST 0
+%define CONFIG_BSFS 0
+%define CONFIG_DECODERS 1
+%define CONFIG_ENCODERS 0
+%define CONFIG_HWACCELS 0
+%define CONFIG_PARSERS 1
+%define CONFIG_INDEVS 0
+%define CONFIG_OUTDEVS 0
+%define CONFIG_FILTERS 0
+%define CONFIG_DEMUXERS 0
+%define CONFIG_MUXERS 0
+%define CONFIG_PROTOCOLS 0
+%define CONFIG_AANDCTTABLES 0
+%define CONFIG_AC3DSP 0
+%define CONFIG_ADTS_HEADER 0
+%define CONFIG_ATSC_A53 0
+%define CONFIG_AUDIO_FRAME_QUEUE 0
+%define CONFIG_AUDIODSP 0
+%define CONFIG_BLOCKDSP 0
+%define CONFIG_BSWAPDSP 0
+%define CONFIG_CABAC 0
+%define CONFIG_CBS 0
+%define CONFIG_CBS_AV1 0
+%define CONFIG_CBS_H264 0
+%define CONFIG_CBS_H265 0
+%define CONFIG_CBS_JPEG 0
+%define CONFIG_CBS_MPEG2 0
+%define CONFIG_CBS_VP9 0
+%define CONFIG_DEFLATE_WRAPPER 0
+%define CONFIG_DIRAC_PARSE 0
+%define CONFIG_DNN 0
+%define CONFIG_DOVI_RPU 0
+%define CONFIG_DVPROFILE 0
+%define CONFIG_EXIF 0
+%define CONFIG_FAANDCT 1
+%define CONFIG_FAANIDCT 1
+%define CONFIG_FDCTDSP 1
+%define CONFIG_FLACDSP 1
+%define CONFIG_FMTCONVERT 0
+%define CONFIG_FRAME_THREAD_ENCODER 0
+%define CONFIG_G722DSP 0
+%define CONFIG_GOLOMB 0
+%define CONFIG_GPLV3 0
+%define CONFIG_H263DSP 0
+%define CONFIG_H264CHROMA 0
+%define CONFIG_H264DSP 0
+%define CONFIG_H264PARSE 0
+%define CONFIG_H264PRED 1
+%define CONFIG_H264QPEL 0
+%define CONFIG_HEVCPARSE 0
+%define CONFIG_HPELDSP 0
+%define CONFIG_HUFFMAN 0
+%define CONFIG_HUFFYUVDSP 0
+%define CONFIG_HUFFYUVENCDSP 0
+%define CONFIG_IDCTDSP 1
+%define CONFIG_IIRFILTER 0
+%define CONFIG_MDCT15 0
+%define CONFIG_INFLATE_WRAPPER 0
+%define CONFIG_INTRAX8 0
+%define CONFIG_ISO_MEDIA 0
+%define CONFIG_IVIDSP 0
+%define CONFIG_JPEGTABLES 0
+%define CONFIG_LGPLV3 0
+%define CONFIG_LIBX262 0
+%define CONFIG_LLAUDDSP 0
+%define CONFIG_LLVIDDSP 0
+%define CONFIG_LLVIDENCDSP 0
+%define CONFIG_LPC 0
+%define CONFIG_LZF 0
+%define CONFIG_ME_CMP 0
+%define CONFIG_MPEG_ER 0
+%define CONFIG_MPEGAUDIO 1
+%define CONFIG_MPEGAUDIODSP 1
+%define CONFIG_MPEGAUDIOHEADER 1
+%define CONFIG_MPEG4AUDIO 0
+%define CONFIG_MPEGVIDEO 0
+%define CONFIG_MPEGVIDEODEC 0
+%define CONFIG_MPEGVIDEOENC 0
+%define CONFIG_MSS34DSP 0
+%define CONFIG_PIXBLOCKDSP 0
+%define CONFIG_QPELDSP 0
+%define CONFIG_QSV 0
+%define CONFIG_QSVDEC 0
+%define CONFIG_QSVENC 0
+%define CONFIG_QSVVPP 0
+%define CONFIG_RANGECODER 0
+%define CONFIG_RIFFDEC 0
+%define CONFIG_RIFFENC 0
+%define CONFIG_RTPDEC 0
+%define CONFIG_RTPENC_CHAIN 0
+%define CONFIG_RV34DSP 0
+%define CONFIG_SCENE_SAD 0
+%define CONFIG_SINEWIN 0
+%define CONFIG_SNAPPY 0
+%define CONFIG_SRTP 0
+%define CONFIG_STARTCODE 0
+%define CONFIG_TEXTUREDSP 0
+%define CONFIG_TEXTUREDSPENC 0
+%define CONFIG_TPELDSP 0
+%define CONFIG_VAAPI_1 0
+%define CONFIG_VAAPI_ENCODE 0
+%define CONFIG_VC1DSP 0
+%define CONFIG_VIDEODSP 1
+%define CONFIG_VP3DSP 0
+%define CONFIG_VP56DSP 0
+%define CONFIG_VP8DSP 1
+%define CONFIG_WMA_FREQS 0
+%define CONFIG_WMV2DSP 0
diff --git a/media/ffvpx/config_win32.h b/media/ffvpx/config_win32.h
new file mode 100644
index 0000000000..8d310f0fbc
--- /dev/null
+++ b/media/ffvpx/config_win32.h
@@ -0,0 +1,735 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='vp8,vp9,mp3,flac' --enable-parser='vp8,vp9' --disable-static --enable-shared --disable-autodetect --toolchain=msvc"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Microsoft (R) C/C++ Optimizing Compiler Version 19.29.30133 for x86"
+#define OS_NAME win32
+#define av_restrict restrict
+#define EXTERN_PREFIX "_"
+#define EXTERN_ASM _
+#define BUILDSUF ""
+#define SLIBSUF ".dll"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 1
+#define ARCH_X86_32 1
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 1
+#define HAVE_AMD3DNOW 1
+#define HAVE_AMD3DNOWEXT 1
+#define HAVE_AVX 1
+#define HAVE_AVX2 1
+#define HAVE_AVX512 1
+#define HAVE_AVX512ICL 1
+#define HAVE_FMA3 1
+#define HAVE_FMA4 1
+#define HAVE_MMX 1
+#define HAVE_MMXEXT 1
+#define HAVE_SSE 1
+#define HAVE_SSE2 1
+#define HAVE_SSE3 1
+#define HAVE_SSE4 1
+#define HAVE_SSE42 1
+#define HAVE_SSSE3 1
+#define HAVE_XOP 1
+#define HAVE_CPUNOP 0
+#define HAVE_I686 1
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 0
+#define HAVE_VFP_EXTERNAL 0
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 1
+#define HAVE_AMD3DNOW_EXTERNAL 1
+#define HAVE_AMD3DNOWEXT_EXTERNAL 1
+#define HAVE_AVX_EXTERNAL 1
+#define HAVE_AVX2_EXTERNAL 1
+#define HAVE_AVX512_EXTERNAL 1
+#define HAVE_AVX512ICL_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 1
+#define HAVE_FMA4_EXTERNAL 1
+#define HAVE_MMX_EXTERNAL 1
+#define HAVE_MMXEXT_EXTERNAL 1
+#define HAVE_SSE_EXTERNAL 1
+#define HAVE_SSE2_EXTERNAL 1
+#define HAVE_SSE3_EXTERNAL 1
+#define HAVE_SSE4_EXTERNAL 1
+#define HAVE_SSE42_EXTERNAL 1
+#define HAVE_SSSE3_EXTERNAL 1
+#define HAVE_XOP_EXTERNAL 1
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_AVX512ICL_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 0
+#define HAVE_FAST_64BIT 0
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 1
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 1
+#define HAVE_SIMD_ALIGN_64 1
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 1
+#define HAVE_MM_EMPTY 1
+#define HAVE_RDTSC 1
+#define HAVE_SEM_TIMEDWAIT 0
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 0
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 0
+#define HAVE_SYMVER 0
+#define HAVE_X86ASM 1
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 0
+#define HAVE_ASM_TYPES_H 0
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 1
+#define HAVE_DIRENT_H 0
+#define HAVE_DXGIDEBUG_H 1
+#define HAVE_DXVA_H 1
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 1
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 0
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 0
+#define HAVE_SYS_PARAM_H 0
+#define HAVE_SYS_RESOURCE_H 0
+#define HAVE_SYS_SELECT_H 0
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 0
+#define HAVE_SYS_UN_H 0
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 0
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 0
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 1
+#define HAVE_WINSOCK2_H 1
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 1
+#define HAVE_LIBC_MSVCRT 1
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 0
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 1
+#define HAVE_CLOCK_GETTIME 0
+#define HAVE_CLOSESOCKET 1
+#define HAVE_COMMANDLINETOARGVW 1
+#define HAVE_FCNTL 0
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 0
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 0
+#define HAVE_GETMODULEHANDLE 1
+#define HAVE_GETPROCESSAFFINITYMASK 1
+#define HAVE_GETPROCESSMEMORYINFO 1
+#define HAVE_GETPROCESSTIMES 1
+#define HAVE_GETRUSAGE 0
+#define HAVE_GETSTDHANDLE 1
+#define HAVE_GETSYSTEMTIMEASFILETIME 1
+#define HAVE_GETTIMEOFDAY 0
+#define HAVE_GLOB 0
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 0
+#define HAVE_INET_ATON 0
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 1
+#define HAVE_LSTAT 0
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 1
+#define HAVE_MKSTEMP 0
+#define HAVE_MMAP 0
+#define HAVE_MPROTECT 0
+#define HAVE_NANOSLEEP 0
+#define HAVE_PEEKNAMEDPIPE 1
+#define HAVE_PTHREAD_CANCEL 0
+#define HAVE_SCHED_GETAFFINITY 0
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 1
+#define HAVE_SETCONSOLECTRLHANDLER 1
+#define HAVE_SETDLLDIRECTORY 1
+#define HAVE_SETMODE 1
+#define HAVE_SETRLIMIT 0
+#define HAVE_SLEEP 1
+#define HAVE_STRERROR_R 0
+#define HAVE_SYSCONF 0
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 0
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 1
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 1
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 0
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 1
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 0
+#define HAVE_EBX_AVAILABLE 0
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+#define HAVE_INLINE_ASM_LABELS 0
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 0
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 0
+#define HAVE_SYMVER_GNU_ASM 0
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 0
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 0
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 0
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 0
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 0
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 1
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_win64.asm b/media/ffvpx/config_win64.asm
new file mode 100644
index 0000000000..5b80ae3ea4
--- /dev/null
+++ b/media/ffvpx/config_win64.asm
@@ -0,0 +1,718 @@
+; Automatically generated by configure - do not modify!
+%define ARCH_AARCH64 0
+%define ARCH_ALPHA 0
+%define ARCH_ARM 0
+%define ARCH_AVR32 0
+%define ARCH_AVR32_AP 0
+%define ARCH_AVR32_UC 0
+%define ARCH_BFIN 0
+%define ARCH_IA64 0
+%define ARCH_LOONGARCH 0
+%define ARCH_LOONGARCH32 0
+%define ARCH_LOONGARCH64 0
+%define ARCH_M68K 0
+%define ARCH_MIPS 0
+%define ARCH_MIPS64 0
+%define ARCH_PARISC 0
+%define ARCH_PPC 0
+%define ARCH_PPC64 0
+%define ARCH_RISCV 0
+%define ARCH_S390 0
+%define ARCH_SH4 0
+%define ARCH_SPARC 0
+%define ARCH_SPARC64 0
+%define ARCH_TILEGX 0
+%define ARCH_TILEPRO 0
+%define ARCH_TOMI 0
+%define ARCH_X86 1
+%define ARCH_X86_32 0
+%define ARCH_X86_64 1
+%define HAVE_ARMV5TE 0
+%define HAVE_ARMV6 0
+%define HAVE_ARMV6T2 0
+%define HAVE_ARMV8 0
+%define HAVE_NEON 0
+%define HAVE_VFP 0
+%define HAVE_VFPV3 0
+%define HAVE_SETEND 0
+%define HAVE_ALTIVEC 0
+%define HAVE_DCBZL 0
+%define HAVE_LDBRX 0
+%define HAVE_POWER8 0
+%define HAVE_PPC4XX 0
+%define HAVE_VSX 0
+%define HAVE_AESNI 1
+%define HAVE_AMD3DNOW 1
+%define HAVE_AMD3DNOWEXT 1
+%define HAVE_AVX 1
+%define HAVE_AVX2 1
+%define HAVE_AVX512 1
+%define HAVE_AVX512ICL 1
+%define HAVE_FMA3 1
+%define HAVE_FMA4 1
+%define HAVE_MMX 1
+%define HAVE_MMXEXT 1
+%define HAVE_SSE 1
+%define HAVE_SSE2 1
+%define HAVE_SSE3 1
+%define HAVE_SSE4 1
+%define HAVE_SSE42 1
+%define HAVE_SSSE3 1
+%define HAVE_XOP 1
+%define HAVE_CPUNOP 0
+%define HAVE_I686 1
+%define HAVE_MIPSFPU 0
+%define HAVE_MIPS32R2 0
+%define HAVE_MIPS32R5 0
+%define HAVE_MIPS64R2 0
+%define HAVE_MIPS32R6 0
+%define HAVE_MIPS64R6 0
+%define HAVE_MIPSDSP 0
+%define HAVE_MIPSDSPR2 0
+%define HAVE_MSA 0
+%define HAVE_LOONGSON2 0
+%define HAVE_LOONGSON3 0
+%define HAVE_MMI 0
+%define HAVE_LSX 0
+%define HAVE_LASX 0
+%define HAVE_ARMV5TE_EXTERNAL 0
+%define HAVE_ARMV6_EXTERNAL 0
+%define HAVE_ARMV6T2_EXTERNAL 0
+%define HAVE_ARMV8_EXTERNAL 0
+%define HAVE_NEON_EXTERNAL 0
+%define HAVE_VFP_EXTERNAL 0
+%define HAVE_VFPV3_EXTERNAL 0
+%define HAVE_SETEND_EXTERNAL 0
+%define HAVE_ALTIVEC_EXTERNAL 0
+%define HAVE_DCBZL_EXTERNAL 0
+%define HAVE_LDBRX_EXTERNAL 0
+%define HAVE_POWER8_EXTERNAL 0
+%define HAVE_PPC4XX_EXTERNAL 0
+%define HAVE_VSX_EXTERNAL 0
+%define HAVE_AESNI_EXTERNAL 1
+%define HAVE_AMD3DNOW_EXTERNAL 1
+%define HAVE_AMD3DNOWEXT_EXTERNAL 1
+%define HAVE_AVX_EXTERNAL 1
+%define HAVE_AVX2_EXTERNAL 1
+%define HAVE_AVX512_EXTERNAL 1
+%define HAVE_AVX512ICL_EXTERNAL 1
+%define HAVE_FMA3_EXTERNAL 1
+%define HAVE_FMA4_EXTERNAL 1
+%define HAVE_MMX_EXTERNAL 1
+%define HAVE_MMXEXT_EXTERNAL 1
+%define HAVE_SSE_EXTERNAL 1
+%define HAVE_SSE2_EXTERNAL 1
+%define HAVE_SSE3_EXTERNAL 1
+%define HAVE_SSE4_EXTERNAL 1
+%define HAVE_SSE42_EXTERNAL 1
+%define HAVE_SSSE3_EXTERNAL 1
+%define HAVE_XOP_EXTERNAL 1
+%define HAVE_CPUNOP_EXTERNAL 0
+%define HAVE_I686_EXTERNAL 0
+%define HAVE_MIPSFPU_EXTERNAL 0
+%define HAVE_MIPS32R2_EXTERNAL 0
+%define HAVE_MIPS32R5_EXTERNAL 0
+%define HAVE_MIPS64R2_EXTERNAL 0
+%define HAVE_MIPS32R6_EXTERNAL 0
+%define HAVE_MIPS64R6_EXTERNAL 0
+%define HAVE_MIPSDSP_EXTERNAL 0
+%define HAVE_MIPSDSPR2_EXTERNAL 0
+%define HAVE_MSA_EXTERNAL 0
+%define HAVE_LOONGSON2_EXTERNAL 0
+%define HAVE_LOONGSON3_EXTERNAL 0
+%define HAVE_MMI_EXTERNAL 0
+%define HAVE_LSX_EXTERNAL 0
+%define HAVE_LASX_EXTERNAL 0
+%define HAVE_ARMV5TE_INLINE 0
+%define HAVE_ARMV6_INLINE 0
+%define HAVE_ARMV6T2_INLINE 0
+%define HAVE_ARMV8_INLINE 0
+%define HAVE_NEON_INLINE 0
+%define HAVE_VFP_INLINE 0
+%define HAVE_VFPV3_INLINE 0
+%define HAVE_SETEND_INLINE 0
+%define HAVE_ALTIVEC_INLINE 0
+%define HAVE_DCBZL_INLINE 0
+%define HAVE_LDBRX_INLINE 0
+%define HAVE_POWER8_INLINE 0
+%define HAVE_PPC4XX_INLINE 0
+%define HAVE_VSX_INLINE 0
+%define HAVE_AESNI_INLINE 0
+%define HAVE_AMD3DNOW_INLINE 0
+%define HAVE_AMD3DNOWEXT_INLINE 0
+%define HAVE_AVX_INLINE 0
+%define HAVE_AVX2_INLINE 0
+%define HAVE_AVX512_INLINE 0
+%define HAVE_AVX512ICL_INLINE 0
+%define HAVE_FMA3_INLINE 0
+%define HAVE_FMA4_INLINE 0
+%define HAVE_MMX_INLINE 0
+%define HAVE_MMXEXT_INLINE 0
+%define HAVE_SSE_INLINE 0
+%define HAVE_SSE2_INLINE 0
+%define HAVE_SSE3_INLINE 0
+%define HAVE_SSE4_INLINE 0
+%define HAVE_SSE42_INLINE 0
+%define HAVE_SSSE3_INLINE 0
+%define HAVE_XOP_INLINE 0
+%define HAVE_CPUNOP_INLINE 0
+%define HAVE_I686_INLINE 0
+%define HAVE_MIPSFPU_INLINE 0
+%define HAVE_MIPS32R2_INLINE 0
+%define HAVE_MIPS32R5_INLINE 0
+%define HAVE_MIPS64R2_INLINE 0
+%define HAVE_MIPS32R6_INLINE 0
+%define HAVE_MIPS64R6_INLINE 0
+%define HAVE_MIPSDSP_INLINE 0
+%define HAVE_MIPSDSPR2_INLINE 0
+%define HAVE_MSA_INLINE 0
+%define HAVE_LOONGSON2_INLINE 0
+%define HAVE_LOONGSON3_INLINE 0
+%define HAVE_MMI_INLINE 0
+%define HAVE_LSX_INLINE 0
+%define HAVE_LASX_INLINE 0
+%define HAVE_ALIGNED_STACK 1
+%define HAVE_FAST_64BIT 1
+%define HAVE_FAST_CLZ 1
+%define HAVE_FAST_CMOV 1
+%define HAVE_LOCAL_ALIGNED 1
+%define HAVE_SIMD_ALIGN_16 1
+%define HAVE_SIMD_ALIGN_32 1
+%define HAVE_SIMD_ALIGN_64 1
+%define HAVE_ATOMIC_CAS_PTR 0
+%define HAVE_MACHINE_RW_BARRIER 0
+%define HAVE_MEMORYBARRIER 1
+%define HAVE_MM_EMPTY 0
+%define HAVE_RDTSC 1
+%define HAVE_SEM_TIMEDWAIT 0
+%define HAVE_SYNC_VAL_COMPARE_AND_SWAP 0
+%define HAVE_CABS 0
+%define HAVE_CEXP 0
+%define HAVE_INLINE_ASM 0
+%define HAVE_SYMVER 0
+%define HAVE_X86ASM 1
+%define HAVE_BIGENDIAN 0
+%define HAVE_FAST_UNALIGNED 1
+%define HAVE_ARPA_INET_H 0
+%define HAVE_ASM_TYPES_H 0
+%define HAVE_CDIO_PARANOIA_H 0
+%define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+%define HAVE_CUDA_H 0
+%define HAVE_DISPATCH_DISPATCH_H 0
+%define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+%define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+%define HAVE_DEV_IC_BT8XX_H 0
+%define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+%define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+%define HAVE_DIRECT_H 1
+%define HAVE_DIRENT_H 0
+%define HAVE_DXGIDEBUG_H 1
+%define HAVE_DXVA_H 1
+%define HAVE_ES2_GL_H 0
+%define HAVE_GSM_H 0
+%define HAVE_IO_H 1
+%define HAVE_LINUX_DMA_BUF_H 0
+%define HAVE_LINUX_PERF_EVENT_H 0
+%define HAVE_MACHINE_IOCTL_BT848_H 0
+%define HAVE_MACHINE_IOCTL_METEOR_H 0
+%define HAVE_OPENCV2_CORE_CORE_C_H 0
+%define HAVE_OPENGL_GL3_H 0
+%define HAVE_POLL_H 0
+%define HAVE_SYS_PARAM_H 0
+%define HAVE_SYS_RESOURCE_H 0
+%define HAVE_SYS_SELECT_H 0
+%define HAVE_SYS_SOUNDCARD_H 0
+%define HAVE_SYS_TIME_H 0
+%define HAVE_SYS_UN_H 0
+%define HAVE_SYS_VIDEOIO_H 0
+%define HAVE_TERMIOS_H 0
+%define HAVE_UDPLITE_H 0
+%define HAVE_UNISTD_H 0
+%define HAVE_VALGRIND_VALGRIND_H 0
+%define HAVE_WINDOWS_H 1
+%define HAVE_WINSOCK2_H 1
+%define HAVE_INTRINSICS_NEON 0
+%define HAVE_ATANF 1
+%define HAVE_ATAN2F 1
+%define HAVE_CBRT 1
+%define HAVE_CBRTF 1
+%define HAVE_COPYSIGN 1
+%define HAVE_COSF 1
+%define HAVE_ERF 1
+%define HAVE_EXP2 1
+%define HAVE_EXP2F 1
+%define HAVE_EXPF 1
+%define HAVE_HYPOT 1
+%define HAVE_ISFINITE 1
+%define HAVE_ISINF 1
+%define HAVE_ISNAN 1
+%define HAVE_LDEXPF 1
+%define HAVE_LLRINT 1
+%define HAVE_LLRINTF 1
+%define HAVE_LOG2 1
+%define HAVE_LOG2F 1
+%define HAVE_LOG10F 1
+%define HAVE_LRINT 1
+%define HAVE_LRINTF 1
+%define HAVE_POWF 1
+%define HAVE_RINT 1
+%define HAVE_ROUND 1
+%define HAVE_ROUNDF 1
+%define HAVE_SINF 1
+%define HAVE_TRUNC 1
+%define HAVE_TRUNCF 1
+%define HAVE_DOS_PATHS 1
+%define HAVE_LIBC_MSVCRT 1
+%define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+%define HAVE_SECTION_DATA_REL_RO 0
+%define HAVE_THREADS 1
+%define HAVE_UWP 0
+%define HAVE_WINRT 0
+%define HAVE_ACCESS 1
+%define HAVE_ALIGNED_MALLOC 1
+%define HAVE_CLOCK_GETTIME 0
+%define HAVE_CLOSESOCKET 1
+%define HAVE_COMMANDLINETOARGVW 1
+%define HAVE_FCNTL 0
+%define HAVE_GETADDRINFO 1
+%define HAVE_GETAUXVAL 0
+%define HAVE_GETENV 1
+%define HAVE_GETHRTIME 0
+%define HAVE_GETOPT 0
+%define HAVE_GETMODULEHANDLE 1
+%define HAVE_GETPROCESSAFFINITYMASK 1
+%define HAVE_GETPROCESSMEMORYINFO 1
+%define HAVE_GETPROCESSTIMES 1
+%define HAVE_GETRUSAGE 0
+%define HAVE_GETSTDHANDLE 1
+%define HAVE_GETSYSTEMTIMEASFILETIME 1
+%define HAVE_GETTIMEOFDAY 0
+%define HAVE_GLOB 0
+%define HAVE_GLXGETPROCADDRESS 0
+%define HAVE_GMTIME_R 0
+%define HAVE_INET_ATON 0
+%define HAVE_ISATTY 1
+%define HAVE_KBHIT 1
+%define HAVE_LSTAT 0
+%define HAVE_LZO1X_999_COMPRESS 0
+%define HAVE_MACH_ABSOLUTE_TIME 0
+%define HAVE_MAPVIEWOFFILE 1
+%define HAVE_MKSTEMP 0
+%define HAVE_MMAP 0
+%define HAVE_MPROTECT 0
+%define HAVE_NANOSLEEP 0
+%define HAVE_PEEKNAMEDPIPE 1
+%define HAVE_PTHREAD_CANCEL 0
+%define HAVE_SCHED_GETAFFINITY 0
+%define HAVE_SECITEMIMPORT 0
+%define HAVE_SETCONSOLETEXTATTRIBUTE 1
+%define HAVE_SETCONSOLECTRLHANDLER 1
+%define HAVE_SETDLLDIRECTORY 1
+%define HAVE_SETMODE 1
+%define HAVE_SETRLIMIT 0
+%define HAVE_SLEEP 1
+%define HAVE_STRERROR_R 0
+%define HAVE_SYSCONF 0
+%define HAVE_SYSCTL 0
+%define HAVE_USLEEP 0
+%define HAVE_UTGETOSTYPEFROMSTRING 0
+%define HAVE_VIRTUALALLOC 1
+%define HAVE_WGLGETPROCADDRESS 0
+%define HAVE_BCRYPT 1
+%define HAVE_VAAPI_DRM 0
+%define HAVE_VAAPI_X11 0
+%define HAVE_VDPAU_X11 0
+%define HAVE_PTHREADS 0
+%define HAVE_OS2THREADS 0
+%define HAVE_W32THREADS 1
+%define HAVE_AS_ARCH_DIRECTIVE 0
+%define HAVE_AS_DN_DIRECTIVE 0
+%define HAVE_AS_FPU_DIRECTIVE 0
+%define HAVE_AS_FUNC 0
+%define HAVE_AS_OBJECT_ARCH 0
+%define HAVE_ASM_MOD_Q 0
+%define HAVE_BLOCKS_EXTENSION 0
+%define HAVE_EBP_AVAILABLE 0
+%define HAVE_EBX_AVAILABLE 0
+%define HAVE_GNU_AS 0
+%define HAVE_GNU_WINDRES 0
+%define HAVE_IBM_ASM 0
+%define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+%define HAVE_INLINE_ASM_LABELS 0
+%define HAVE_INLINE_ASM_NONLOCAL_LABELS 0
+%define HAVE_PRAGMA_DEPRECATED 1
+%define HAVE_RSYNC_CONTIMEOUT 0
+%define HAVE_SYMVER_ASM_LABEL 0
+%define HAVE_SYMVER_GNU_ASM 0
+%define HAVE_VFP_ARGS 0
+%define HAVE_XFORM_ASM 0
+%define HAVE_XMM_CLOBBERS 0
+%define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+%define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+%define HAVE_KCMVIDEOCODECTYPE_VP9 0
+%define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+%define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+%define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+%define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+%define HAVE_SOCKLEN_T 1
+%define HAVE_STRUCT_ADDRINFO 1
+%define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+%define HAVE_STRUCT_IP_MREQ_SOURCE 1
+%define HAVE_STRUCT_IPV6_MREQ 1
+%define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0
+%define HAVE_STRUCT_POLLFD 1
+%define HAVE_STRUCT_RUSAGE_RU_MAXRSS 0
+%define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+%define HAVE_STRUCT_SOCKADDR_IN6 1
+%define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+%define HAVE_STRUCT_SOCKADDR_STORAGE 1
+%define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+%define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+%define HAVE_GZIP 1
+%define HAVE_LIBDRM_GETFB2 0
+%define HAVE_MAKEINFO 0
+%define HAVE_MAKEINFO_HTML 0
+%define HAVE_OPENCL_D3D11 0
+%define HAVE_OPENCL_DRM_ARM 0
+%define HAVE_OPENCL_DRM_BEIGNET 0
+%define HAVE_OPENCL_DXVA2 0
+%define HAVE_OPENCL_VAAPI_BEIGNET 0
+%define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+%define HAVE_PERL 1
+%define HAVE_POD2MAN 1
+%define HAVE_TEXI2HTML 0
+%define HAVE_XMLLINT 1
+%define HAVE_ZLIB_GZIP 0
+%define CONFIG_DOC 0
+%define CONFIG_HTMLPAGES 0
+%define CONFIG_MANPAGES 1
+%define CONFIG_PODPAGES 1
+%define CONFIG_TXTPAGES 0
+%define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+%define CONFIG_AVIO_READING_EXAMPLE 1
+%define CONFIG_DECODE_AUDIO_EXAMPLE 1
+%define CONFIG_DECODE_VIDEO_EXAMPLE 1
+%define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+%define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+%define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+%define CONFIG_EXTRACT_MVS_EXAMPLE 0
+%define CONFIG_FILTER_AUDIO_EXAMPLE 0
+%define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+%define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+%define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+%define CONFIG_HW_DECODE_EXAMPLE 0
+%define CONFIG_METADATA_EXAMPLE 0
+%define CONFIG_MUXING_EXAMPLE 0
+%define CONFIG_QSVDEC_EXAMPLE 0
+%define CONFIG_REMUXING_EXAMPLE 0
+%define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+%define CONFIG_SCALING_VIDEO_EXAMPLE 0
+%define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+%define CONFIG_TRANSCODING_EXAMPLE 0
+%define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+%define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+%define CONFIG_AVISYNTH 0
+%define CONFIG_FREI0R 0
+%define CONFIG_LIBCDIO 0
+%define CONFIG_LIBDAVS2 0
+%define CONFIG_LIBRUBBERBAND 0
+%define CONFIG_LIBVIDSTAB 0
+%define CONFIG_LIBX264 0
+%define CONFIG_LIBX265 0
+%define CONFIG_LIBXAVS 0
+%define CONFIG_LIBXAVS2 0
+%define CONFIG_LIBXVID 0
+%define CONFIG_DECKLINK 0
+%define CONFIG_LIBFDK_AAC 0
+%define CONFIG_LIBTLS 0
+%define CONFIG_GMP 0
+%define CONFIG_LIBARIBB24 0
+%define CONFIG_LIBLENSFUN 0
+%define CONFIG_LIBOPENCORE_AMRNB 0
+%define CONFIG_LIBOPENCORE_AMRWB 0
+%define CONFIG_LIBVO_AMRWBENC 0
+%define CONFIG_MBEDTLS 0
+%define CONFIG_RKMPP 0
+%define CONFIG_LIBSMBCLIENT 0
+%define CONFIG_CHROMAPRINT 0
+%define CONFIG_GCRYPT 0
+%define CONFIG_GNUTLS 0
+%define CONFIG_JNI 0
+%define CONFIG_LADSPA 0
+%define CONFIG_LCMS2 0
+%define CONFIG_LIBAOM 0
+%define CONFIG_LIBASS 0
+%define CONFIG_LIBBLURAY 0
+%define CONFIG_LIBBS2B 0
+%define CONFIG_LIBCACA 0
+%define CONFIG_LIBCELT 0
+%define CONFIG_LIBCODEC2 0
+%define CONFIG_LIBDAV1D 0
+%define CONFIG_LIBDC1394 0
+%define CONFIG_LIBDRM 0
+%define CONFIG_LIBFLITE 0
+%define CONFIG_LIBFONTCONFIG 0
+%define CONFIG_LIBFREETYPE 0
+%define CONFIG_LIBFRIBIDI 0
+%define CONFIG_LIBGLSLANG 0
+%define CONFIG_LIBGME 0
+%define CONFIG_LIBGSM 0
+%define CONFIG_LIBIEC61883 0
+%define CONFIG_LIBILBC 0
+%define CONFIG_LIBJACK 0
+%define CONFIG_LIBJXL 0
+%define CONFIG_LIBKLVANC 0
+%define CONFIG_LIBKVAZAAR 0
+%define CONFIG_LIBMODPLUG 0
+%define CONFIG_LIBMP3LAME 0
+%define CONFIG_LIBMYSOFA 0
+%define CONFIG_LIBOPENCV 0
+%define CONFIG_LIBOPENH264 0
+%define CONFIG_LIBOPENJPEG 0
+%define CONFIG_LIBOPENMPT 0
+%define CONFIG_LIBOPENVINO 0
+%define CONFIG_LIBOPUS 0
+%define CONFIG_LIBPLACEBO 0
+%define CONFIG_LIBPULSE 0
+%define CONFIG_LIBRABBITMQ 0
+%define CONFIG_LIBRAV1E 0
+%define CONFIG_LIBRIST 0
+%define CONFIG_LIBRSVG 0
+%define CONFIG_LIBRTMP 0
+%define CONFIG_LIBSHADERC 0
+%define CONFIG_LIBSHINE 0
+%define CONFIG_LIBSMBCLIENT 0
+%define CONFIG_LIBSNAPPY 0
+%define CONFIG_LIBSOXR 0
+%define CONFIG_LIBSPEEX 0
+%define CONFIG_LIBSRT 0
+%define CONFIG_LIBSSH 0
+%define CONFIG_LIBSVTAV1 0
+%define CONFIG_LIBTENSORFLOW 0
+%define CONFIG_LIBTESSERACT 0
+%define CONFIG_LIBTHEORA 0
+%define CONFIG_LIBTWOLAME 0
+%define CONFIG_LIBUAVS3D 0
+%define CONFIG_LIBV4L2 0
+%define CONFIG_LIBVMAF 0
+%define CONFIG_LIBVORBIS 0
+%define CONFIG_LIBVPX 0
+%define CONFIG_LIBWEBP 0
+%define CONFIG_LIBXML2 0
+%define CONFIG_LIBZIMG 0
+%define CONFIG_LIBZMQ 0
+%define CONFIG_LIBZVBI 0
+%define CONFIG_LV2 0
+%define CONFIG_MEDIACODEC 0
+%define CONFIG_OPENAL 0
+%define CONFIG_OPENGL 0
+%define CONFIG_OPENSSL 0
+%define CONFIG_POCKETSPHINX 0
+%define CONFIG_VAPOURSYNTH 0
+%define CONFIG_ALSA 0
+%define CONFIG_APPKIT 0
+%define CONFIG_AVFOUNDATION 0
+%define CONFIG_BZLIB 0
+%define CONFIG_COREIMAGE 0
+%define CONFIG_ICONV 0
+%define CONFIG_LIBXCB 0
+%define CONFIG_LIBXCB_SHM 0
+%define CONFIG_LIBXCB_SHAPE 0
+%define CONFIG_LIBXCB_XFIXES 0
+%define CONFIG_LZMA 0
+%define CONFIG_MEDIAFOUNDATION 0
+%define CONFIG_METAL 0
+%define CONFIG_SCHANNEL 0
+%define CONFIG_SDL2 0
+%define CONFIG_SECURETRANSPORT 0
+%define CONFIG_SNDIO 0
+%define CONFIG_XLIB 0
+%define CONFIG_ZLIB 0
+%define CONFIG_CUDA_NVCC 0
+%define CONFIG_CUDA_SDK 0
+%define CONFIG_LIBNPP 0
+%define CONFIG_LIBMFX 0
+%define CONFIG_MMAL 0
+%define CONFIG_OMX 0
+%define CONFIG_OPENCL 0
+%define CONFIG_AMF 0
+%define CONFIG_AUDIOTOOLBOX 0
+%define CONFIG_CRYSTALHD 0
+%define CONFIG_CUDA 0
+%define CONFIG_CUDA_LLVM 0
+%define CONFIG_CUVID 0
+%define CONFIG_D3D11VA 0
+%define CONFIG_DXVA2 0
+%define CONFIG_FFNVCODEC 0
+%define CONFIG_NVDEC 0
+%define CONFIG_NVENC 0
+%define CONFIG_VAAPI 0
+%define CONFIG_VDPAU 0
+%define CONFIG_VIDEOTOOLBOX 0
+%define CONFIG_VULKAN 0
+%define CONFIG_V4L2_M2M 0
+%define CONFIG_FTRAPV 0
+%define CONFIG_GRAY 0
+%define CONFIG_HARDCODED_TABLES 0
+%define CONFIG_OMX_RPI 0
+%define CONFIG_RUNTIME_CPUDETECT 1
+%define CONFIG_SAFE_BITSTREAM_READER 1
+%define CONFIG_SHARED 1
+%define CONFIG_SMALL 0
+%define CONFIG_STATIC 0
+%define CONFIG_SWSCALE_ALPHA 1
+%define CONFIG_GPL 0
+%define CONFIG_NONFREE 0
+%define CONFIG_VERSION3 0
+%define CONFIG_AVDEVICE 0
+%define CONFIG_AVFILTER 0
+%define CONFIG_SWSCALE 0
+%define CONFIG_POSTPROC 0
+%define CONFIG_AVFORMAT 0
+%define CONFIG_AVCODEC 1
+%define CONFIG_SWRESAMPLE 0
+%define CONFIG_AVUTIL 1
+%define CONFIG_FFPLAY 0
+%define CONFIG_FFPROBE 0
+%define CONFIG_FFMPEG 0
+%define CONFIG_DCT 1
+%define CONFIG_DWT 0
+%define CONFIG_ERROR_RESILIENCE 0
+%define CONFIG_FAAN 1
+%define CONFIG_FAST_UNALIGNED 1
+%define CONFIG_FFT 1
+%define CONFIG_LSP 0
+%define CONFIG_MDCT 0
+%define CONFIG_PIXELUTILS 0
+%define CONFIG_NETWORK 0
+%define CONFIG_RDFT 1
+%define CONFIG_AUTODETECT 0
+%define CONFIG_FONTCONFIG 0
+%define CONFIG_LARGE_TESTS 1
+%define CONFIG_LINUX_PERF 0
+%define CONFIG_MACOS_KPERF 0
+%define CONFIG_MEMORY_POISONING 0
+%define CONFIG_NEON_CLOBBER_TEST 0
+%define CONFIG_OSSFUZZ 0
+%define CONFIG_PIC 1
+%define CONFIG_PTX_COMPRESSION 0
+%define CONFIG_THUMB 0
+%define CONFIG_VALGRIND_BACKTRACE 0
+%define CONFIG_XMM_CLOBBER_TEST 0
+%define CONFIG_BSFS 0
+%define CONFIG_DECODERS 1
+%define CONFIG_ENCODERS 0
+%define CONFIG_HWACCELS 0
+%define CONFIG_PARSERS 1
+%define CONFIG_INDEVS 0
+%define CONFIG_OUTDEVS 0
+%define CONFIG_FILTERS 0
+%define CONFIG_DEMUXERS 0
+%define CONFIG_MUXERS 0
+%define CONFIG_PROTOCOLS 0
+%define CONFIG_AANDCTTABLES 0
+%define CONFIG_AC3DSP 0
+%define CONFIG_ADTS_HEADER 0
+%define CONFIG_ATSC_A53 0
+%define CONFIG_AUDIO_FRAME_QUEUE 0
+%define CONFIG_AUDIODSP 0
+%define CONFIG_BLOCKDSP 0
+%define CONFIG_BSWAPDSP 0
+%define CONFIG_CABAC 0
+%define CONFIG_CBS 0
+%define CONFIG_CBS_AV1 0
+%define CONFIG_CBS_H264 0
+%define CONFIG_CBS_H265 0
+%define CONFIG_CBS_JPEG 0
+%define CONFIG_CBS_MPEG2 0
+%define CONFIG_CBS_VP9 0
+%define CONFIG_DEFLATE_WRAPPER 0
+%define CONFIG_DIRAC_PARSE 0
+%define CONFIG_DNN 0
+%define CONFIG_DOVI_RPU 0
+%define CONFIG_DVPROFILE 0
+%define CONFIG_EXIF 0
+%define CONFIG_FAANDCT 1
+%define CONFIG_FAANIDCT 1
+%define CONFIG_FDCTDSP 1
+%define CONFIG_FLACDSP 1
+%define CONFIG_FMTCONVERT 0
+%define CONFIG_FRAME_THREAD_ENCODER 0
+%define CONFIG_G722DSP 0
+%define CONFIG_GOLOMB 0
+%define CONFIG_GPLV3 0
+%define CONFIG_H263DSP 0
+%define CONFIG_H264CHROMA 0
+%define CONFIG_H264DSP 0
+%define CONFIG_H264PARSE 0
+%define CONFIG_H264PRED 1
+%define CONFIG_H264QPEL 0
+%define CONFIG_HEVCPARSE 0
+%define CONFIG_HPELDSP 0
+%define CONFIG_HUFFMAN 0
+%define CONFIG_HUFFYUVDSP 0
+%define CONFIG_HUFFYUVENCDSP 0
+%define CONFIG_IDCTDSP 1
+%define CONFIG_IIRFILTER 0
+%define CONFIG_MDCT15 0
+%define CONFIG_INFLATE_WRAPPER 0
+%define CONFIG_INTRAX8 0
+%define CONFIG_ISO_MEDIA 0
+%define CONFIG_IVIDSP 0
+%define CONFIG_JPEGTABLES 0
+%define CONFIG_LGPLV3 0
+%define CONFIG_LIBX262 0
+%define CONFIG_LLAUDDSP 0
+%define CONFIG_LLVIDDSP 0
+%define CONFIG_LLVIDENCDSP 0
+%define CONFIG_LPC 0
+%define CONFIG_LZF 0
+%define CONFIG_ME_CMP 0
+%define CONFIG_MPEG_ER 0
+%define CONFIG_MPEGAUDIO 1
+%define CONFIG_MPEGAUDIODSP 1
+%define CONFIG_MPEGAUDIOHEADER 1
+%define CONFIG_MPEG4AUDIO 0
+%define CONFIG_MPEGVIDEO 0
+%define CONFIG_MPEGVIDEODEC 0
+%define CONFIG_MPEGVIDEOENC 0
+%define CONFIG_MSS34DSP 0
+%define CONFIG_PIXBLOCKDSP 0
+%define CONFIG_QPELDSP 0
+%define CONFIG_QSV 0
+%define CONFIG_QSVDEC 0
+%define CONFIG_QSVENC 0
+%define CONFIG_QSVVPP 0
+%define CONFIG_RANGECODER 0
+%define CONFIG_RIFFDEC 0
+%define CONFIG_RIFFENC 0
+%define CONFIG_RTPDEC 0
+%define CONFIG_RTPENC_CHAIN 0
+%define CONFIG_RV34DSP 0
+%define CONFIG_SCENE_SAD 0
+%define CONFIG_SINEWIN 0
+%define CONFIG_SNAPPY 0
+%define CONFIG_SRTP 0
+%define CONFIG_STARTCODE 0
+%define CONFIG_TEXTUREDSP 0
+%define CONFIG_TEXTUREDSPENC 0
+%define CONFIG_TPELDSP 0
+%define CONFIG_VAAPI_1 0
+%define CONFIG_VAAPI_ENCODE 0
+%define CONFIG_VC1DSP 0
+%define CONFIG_VIDEODSP 1
+%define CONFIG_VP3DSP 0
+%define CONFIG_VP56DSP 0
+%define CONFIG_VP8DSP 1
+%define CONFIG_WMA_FREQS 0
+%define CONFIG_WMV2DSP 0
diff --git a/media/ffvpx/config_win64.h b/media/ffvpx/config_win64.h
new file mode 100644
index 0000000000..557e7cdb76
--- /dev/null
+++ b/media/ffvpx/config_win64.h
@@ -0,0 +1,735 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-decoder='vp8,vp9,mp3,flac' --enable-parser='vp8,vp9' --disable-static --enable-shared --disable-autodetect --enable-w32threads --toolchain=msvc"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2022
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Microsoft (R) C/C++ Optimizing Compiler Version 19.29.30133 for x64"
+#define OS_NAME win32
+#define av_restrict restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".dll"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 0
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_LOONGARCH 0
+#define ARCH_LOONGARCH32 0
+#define ARCH_LOONGARCH64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_RISCV 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 1
+#define ARCH_X86_32 0
+#define ARCH_X86_64 1
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 0
+#define HAVE_NEON 0
+#define HAVE_VFP 0
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 1
+#define HAVE_AMD3DNOW 1
+#define HAVE_AMD3DNOWEXT 1
+#define HAVE_AVX 1
+#define HAVE_AVX2 1
+#define HAVE_AVX512 1
+#define HAVE_AVX512ICL 1
+#define HAVE_FMA3 1
+#define HAVE_FMA4 1
+#define HAVE_MMX 1
+#define HAVE_MMXEXT 1
+#define HAVE_SSE 1
+#define HAVE_SSE2 1
+#define HAVE_SSE3 1
+#define HAVE_SSE4 1
+#define HAVE_SSE42 1
+#define HAVE_SSSE3 1
+#define HAVE_XOP 1
+#define HAVE_CPUNOP 0
+#define HAVE_I686 1
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_LSX 0
+#define HAVE_LASX 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 0
+#define HAVE_NEON_EXTERNAL 0
+#define HAVE_VFP_EXTERNAL 0
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 1
+#define HAVE_AMD3DNOW_EXTERNAL 1
+#define HAVE_AMD3DNOWEXT_EXTERNAL 1
+#define HAVE_AVX_EXTERNAL 1
+#define HAVE_AVX2_EXTERNAL 1
+#define HAVE_AVX512_EXTERNAL 1
+#define HAVE_AVX512ICL_EXTERNAL 1
+#define HAVE_FMA3_EXTERNAL 1
+#define HAVE_FMA4_EXTERNAL 1
+#define HAVE_MMX_EXTERNAL 1
+#define HAVE_MMXEXT_EXTERNAL 1
+#define HAVE_SSE_EXTERNAL 1
+#define HAVE_SSE2_EXTERNAL 1
+#define HAVE_SSE3_EXTERNAL 1
+#define HAVE_SSE4_EXTERNAL 1
+#define HAVE_SSE42_EXTERNAL 1
+#define HAVE_SSSE3_EXTERNAL 1
+#define HAVE_XOP_EXTERNAL 1
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_LSX_EXTERNAL 0
+#define HAVE_LASX_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_AVX512ICL_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_LSX_INLINE 0
+#define HAVE_LASX_INLINE 0
+#define HAVE_ALIGNED_STACK 1
+#define HAVE_FAST_64BIT 1
+#define HAVE_FAST_CLZ 1
+#define HAVE_FAST_CMOV 1
+#define HAVE_LOCAL_ALIGNED 1
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 1
+#define HAVE_SIMD_ALIGN_64 1
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 1
+#define HAVE_MM_EMPTY 0
+#define HAVE_RDTSC 1
+#define HAVE_SEM_TIMEDWAIT 0
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 0
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 0
+#define HAVE_SYMVER 0
+#define HAVE_X86ASM 1
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 0
+#define HAVE_ASM_TYPES_H 0
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 1
+#define HAVE_DIRENT_H 0
+#define HAVE_DXGIDEBUG_H 1
+#define HAVE_DXVA_H 1
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 1
+#define HAVE_LINUX_DMA_BUF_H 0
+#define HAVE_LINUX_PERF_EVENT_H 0
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 0
+#define HAVE_SYS_PARAM_H 0
+#define HAVE_SYS_RESOURCE_H 0
+#define HAVE_SYS_SELECT_H 0
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 0
+#define HAVE_SYS_UN_H 0
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 0
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 0
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 1
+#define HAVE_WINSOCK2_H 1
+#define HAVE_INTRINSICS_NEON 0
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 1
+#define HAVE_LIBC_MSVCRT 1
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 0
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 1
+#define HAVE_CLOCK_GETTIME 0
+#define HAVE_CLOSESOCKET 1
+#define HAVE_COMMANDLINETOARGVW 1
+#define HAVE_FCNTL 0
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETAUXVAL 0
+#define HAVE_GETENV 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 0
+#define HAVE_GETMODULEHANDLE 1
+#define HAVE_GETPROCESSAFFINITYMASK 1
+#define HAVE_GETPROCESSMEMORYINFO 1
+#define HAVE_GETPROCESSTIMES 1
+#define HAVE_GETRUSAGE 0
+#define HAVE_GETSTDHANDLE 1
+#define HAVE_GETSYSTEMTIMEASFILETIME 1
+#define HAVE_GETTIMEOFDAY 0
+#define HAVE_GLOB 0
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 0
+#define HAVE_INET_ATON 0
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 1
+#define HAVE_LSTAT 0
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 1
+#define HAVE_MKSTEMP 0
+#define HAVE_MMAP 0
+#define HAVE_MPROTECT 0
+#define HAVE_NANOSLEEP 0
+#define HAVE_PEEKNAMEDPIPE 1
+#define HAVE_PTHREAD_CANCEL 0
+#define HAVE_SCHED_GETAFFINITY 0
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 1
+#define HAVE_SETCONSOLECTRLHANDLER 1
+#define HAVE_SETDLLDIRECTORY 1
+#define HAVE_SETMODE 1
+#define HAVE_SETRLIMIT 0
+#define HAVE_SLEEP 1
+#define HAVE_STRERROR_R 0
+#define HAVE_SYSCONF 0
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 0
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 1
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 1
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 0
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 1
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 0
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 0
+#define HAVE_EBX_AVAILABLE 0
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+#define HAVE_INLINE_ASM_LABELS 0
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 0
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 0
+#define HAVE_SYMVER_ASM_LABEL 0
+#define HAVE_SYMVER_GNU_ASM 0
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0
+#define HAVE_KCMVIDEOCODECTYPE_VP9 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0
+#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0
+#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 0
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_GZIP 1
+#define HAVE_LIBDRM_GETFB2 0
+#define HAVE_MAKEINFO 0
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define HAVE_XMLLINT 1
+#define HAVE_ZLIB_GZIP 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 0
+#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LCMS2 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGLSLANG 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBJXL 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPENVINO 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPLACEBO 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRABBITMQ 0
+#define CONFIG_LIBRAV1E 0
+#define CONFIG_LIBRIST 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHADERC 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBSVTAV1 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBUAVS3D 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_MEDIAFOUNDATION 0
+#define CONFIG_METAL 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_VULKAN 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 0
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LARGE_TESTS 1
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MACOS_KPERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_PTX_COMPRESSION 0
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 0
+#define CONFIG_DECODERS 1
+#define CONFIG_ENCODERS 0
+#define CONFIG_HWACCELS 0
+#define CONFIG_PARSERS 1
+#define CONFIG_INDEVS 0
+#define CONFIG_OUTDEVS 0
+#define CONFIG_FILTERS 0
+#define CONFIG_DEMUXERS 0
+#define CONFIG_MUXERS 0
+#define CONFIG_PROTOCOLS 0
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_ATSC_A53 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DEFLATE_WRAPPER 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DOVI_RPU 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INFLATE_WRAPPER 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEG4AUDIO 0
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEODEC 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/config_win64_aarch64.h b/media/ffvpx/config_win64_aarch64.h
new file mode 100644
index 0000000000..575bf7903d
--- /dev/null
+++ b/media/ffvpx/config_win64_aarch64.h
@@ -0,0 +1,680 @@
+/* Automatically generated by configure - do not modify! */
+#ifndef FFMPEG_CONFIG_H
+#define FFMPEG_CONFIG_H
+#define FFMPEG_CONFIGURATION "--disable-all --enable-avcodec --enable-parser='vp8,vp9' --enable-decoder='vp8,vp9,mp3,flac' --disable-static --enable-shared --disable-autodetect --toolchain=msvc --enable-cross-compile --target-os=win32 --arch=arm64"
+#define FFMPEG_LICENSE "LGPL version 2.1 or later"
+#define CONFIG_THIS_YEAR 2019
+#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
+#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
+#define CC_IDENT "Microsoft (R) C/C++ Optimizing Compiler Version 19.24.28315 for ARM64"
+#define av_restrict __restrict
+#define EXTERN_PREFIX ""
+#define EXTERN_ASM
+#define BUILDSUF ""
+#define SLIBSUF ".dll"
+#define HAVE_MMX2 HAVE_MMXEXT
+#define SWS_MAX_FILTER_SIZE 256
+#define ARCH_AARCH64 1
+#define ARCH_ALPHA 0
+#define ARCH_ARM 0
+#define ARCH_AVR32 0
+#define ARCH_AVR32_AP 0
+#define ARCH_AVR32_UC 0
+#define ARCH_BFIN 0
+#define ARCH_IA64 0
+#define ARCH_M68K 0
+#define ARCH_MIPS 0
+#define ARCH_MIPS64 0
+#define ARCH_PARISC 0
+#define ARCH_PPC 0
+#define ARCH_PPC64 0
+#define ARCH_S390 0
+#define ARCH_SH4 0
+#define ARCH_SPARC 0
+#define ARCH_SPARC64 0
+#define ARCH_TILEGX 0
+#define ARCH_TILEPRO 0
+#define ARCH_TOMI 0
+#define ARCH_X86 0
+#define ARCH_X86_32 0
+#define ARCH_X86_64 0
+#define HAVE_ARMV5TE 0
+#define HAVE_ARMV6 0
+#define HAVE_ARMV6T2 0
+#define HAVE_ARMV8 1
+#define HAVE_NEON 1
+#define HAVE_VFP 1
+#define HAVE_VFPV3 0
+#define HAVE_SETEND 0
+#define HAVE_ALTIVEC 0
+#define HAVE_DCBZL 0
+#define HAVE_LDBRX 0
+#define HAVE_POWER8 0
+#define HAVE_PPC4XX 0
+#define HAVE_VSX 0
+#define HAVE_AESNI 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_AMD3DNOWEXT 0
+#define HAVE_AVX 0
+#define HAVE_AVX2 0
+#define HAVE_AVX512 0
+#define HAVE_FMA3 0
+#define HAVE_FMA4 0
+#define HAVE_MMX 0
+#define HAVE_MMXEXT 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
+#define HAVE_SSE3 0
+#define HAVE_SSE4 0
+#define HAVE_SSE42 0
+#define HAVE_SSSE3 0
+#define HAVE_XOP 0
+#define HAVE_CPUNOP 0
+#define HAVE_I686 0
+#define HAVE_MIPSFPU 0
+#define HAVE_MIPS32R2 0
+#define HAVE_MIPS32R5 0
+#define HAVE_MIPS64R2 0
+#define HAVE_MIPS32R6 0
+#define HAVE_MIPS64R6 0
+#define HAVE_MIPSDSP 0
+#define HAVE_MIPSDSPR2 0
+#define HAVE_MSA 0
+#define HAVE_MSA2 0
+#define HAVE_LOONGSON2 0
+#define HAVE_LOONGSON3 0
+#define HAVE_MMI 0
+#define HAVE_ARMV5TE_EXTERNAL 0
+#define HAVE_ARMV6_EXTERNAL 0
+#define HAVE_ARMV6T2_EXTERNAL 0
+#define HAVE_ARMV8_EXTERNAL 1
+#define HAVE_NEON_EXTERNAL 1
+#define HAVE_VFP_EXTERNAL 1
+#define HAVE_VFPV3_EXTERNAL 0
+#define HAVE_SETEND_EXTERNAL 0
+#define HAVE_ALTIVEC_EXTERNAL 0
+#define HAVE_DCBZL_EXTERNAL 0
+#define HAVE_LDBRX_EXTERNAL 0
+#define HAVE_POWER8_EXTERNAL 0
+#define HAVE_PPC4XX_EXTERNAL 0
+#define HAVE_VSX_EXTERNAL 0
+#define HAVE_AESNI_EXTERNAL 0
+#define HAVE_AMD3DNOW_EXTERNAL 0
+#define HAVE_AMD3DNOWEXT_EXTERNAL 0
+#define HAVE_AVX_EXTERNAL 0
+#define HAVE_AVX2_EXTERNAL 0
+#define HAVE_AVX512_EXTERNAL 0
+#define HAVE_FMA3_EXTERNAL 0
+#define HAVE_FMA4_EXTERNAL 0
+#define HAVE_MMX_EXTERNAL 0
+#define HAVE_MMXEXT_EXTERNAL 0
+#define HAVE_SSE_EXTERNAL 0
+#define HAVE_SSE2_EXTERNAL 0
+#define HAVE_SSE3_EXTERNAL 0
+#define HAVE_SSE4_EXTERNAL 0
+#define HAVE_SSE42_EXTERNAL 0
+#define HAVE_SSSE3_EXTERNAL 0
+#define HAVE_XOP_EXTERNAL 0
+#define HAVE_CPUNOP_EXTERNAL 0
+#define HAVE_I686_EXTERNAL 0
+#define HAVE_MIPSFPU_EXTERNAL 0
+#define HAVE_MIPS32R2_EXTERNAL 0
+#define HAVE_MIPS32R5_EXTERNAL 0
+#define HAVE_MIPS64R2_EXTERNAL 0
+#define HAVE_MIPS32R6_EXTERNAL 0
+#define HAVE_MIPS64R6_EXTERNAL 0
+#define HAVE_MIPSDSP_EXTERNAL 0
+#define HAVE_MIPSDSPR2_EXTERNAL 0
+#define HAVE_MSA_EXTERNAL 0
+#define HAVE_MSA2_EXTERNAL 0
+#define HAVE_LOONGSON2_EXTERNAL 0
+#define HAVE_LOONGSON3_EXTERNAL 0
+#define HAVE_MMI_EXTERNAL 0
+#define HAVE_ARMV5TE_INLINE 0
+#define HAVE_ARMV6_INLINE 0
+#define HAVE_ARMV6T2_INLINE 0
+#define HAVE_ARMV8_INLINE 0
+#define HAVE_NEON_INLINE 0
+#define HAVE_VFP_INLINE 0
+#define HAVE_VFPV3_INLINE 0
+#define HAVE_SETEND_INLINE 0
+#define HAVE_ALTIVEC_INLINE 0
+#define HAVE_DCBZL_INLINE 0
+#define HAVE_LDBRX_INLINE 0
+#define HAVE_POWER8_INLINE 0
+#define HAVE_PPC4XX_INLINE 0
+#define HAVE_VSX_INLINE 0
+#define HAVE_AESNI_INLINE 0
+#define HAVE_AMD3DNOW_INLINE 0
+#define HAVE_AMD3DNOWEXT_INLINE 0
+#define HAVE_AVX_INLINE 0
+#define HAVE_AVX2_INLINE 0
+#define HAVE_AVX512_INLINE 0
+#define HAVE_FMA3_INLINE 0
+#define HAVE_FMA4_INLINE 0
+#define HAVE_MMX_INLINE 0
+#define HAVE_MMXEXT_INLINE 0
+#define HAVE_SSE_INLINE 0
+#define HAVE_SSE2_INLINE 0
+#define HAVE_SSE3_INLINE 0
+#define HAVE_SSE4_INLINE 0
+#define HAVE_SSE42_INLINE 0
+#define HAVE_SSSE3_INLINE 0
+#define HAVE_XOP_INLINE 0
+#define HAVE_CPUNOP_INLINE 0
+#define HAVE_I686_INLINE 0
+#define HAVE_MIPSFPU_INLINE 0
+#define HAVE_MIPS32R2_INLINE 0
+#define HAVE_MIPS32R5_INLINE 0
+#define HAVE_MIPS64R2_INLINE 0
+#define HAVE_MIPS32R6_INLINE 0
+#define HAVE_MIPS64R6_INLINE 0
+#define HAVE_MIPSDSP_INLINE 0
+#define HAVE_MIPSDSPR2_INLINE 0
+#define HAVE_MSA_INLINE 0
+#define HAVE_MSA2_INLINE 0
+#define HAVE_LOONGSON2_INLINE 0
+#define HAVE_LOONGSON3_INLINE 0
+#define HAVE_MMI_INLINE 0
+#define HAVE_ALIGNED_STACK 1
+#define HAVE_FAST_64BIT 1
+#define HAVE_FAST_CLZ 0
+#define HAVE_FAST_CMOV 0
+#define HAVE_LOCAL_ALIGNED 0
+#define HAVE_SIMD_ALIGN_16 1
+#define HAVE_SIMD_ALIGN_32 0
+#define HAVE_SIMD_ALIGN_64 0
+#define HAVE_ATOMIC_CAS_PTR 0
+#define HAVE_MACHINE_RW_BARRIER 0
+#define HAVE_MEMORYBARRIER 1
+#define HAVE_MM_EMPTY 0
+#define HAVE_RDTSC 0
+#define HAVE_SEM_TIMEDWAIT 0
+#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 0
+#define HAVE_CABS 0
+#define HAVE_CEXP 0
+#define HAVE_INLINE_ASM 0
+#define HAVE_SYMVER 0
+#define HAVE_X86ASM 0
+#define HAVE_BIGENDIAN 0
+#define HAVE_FAST_UNALIGNED 1
+#define HAVE_ARPA_INET_H 0
+#define HAVE_ASM_TYPES_H 0
+#define HAVE_CDIO_PARANOIA_H 0
+#define HAVE_CDIO_PARANOIA_PARANOIA_H 0
+#define HAVE_CUDA_H 0
+#define HAVE_DISPATCH_DISPATCH_H 0
+#define HAVE_DEV_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0
+#define HAVE_DEV_IC_BT8XX_H 0
+#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0
+#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0
+#define HAVE_DIRECT_H 1
+#define HAVE_DIRENT_H 0
+#define HAVE_DXGIDEBUG_H 1
+#define HAVE_DXVA_H 1
+#define HAVE_ES2_GL_H 0
+#define HAVE_GSM_H 0
+#define HAVE_IO_H 1
+#define HAVE_LINUX_PERF_EVENT_H 0
+#define HAVE_MACHINE_IOCTL_BT848_H 0
+#define HAVE_MACHINE_IOCTL_METEOR_H 0
+#define HAVE_OPENCV2_CORE_CORE_C_H 0
+#define HAVE_OPENGL_GL3_H 0
+#define HAVE_POLL_H 0
+#define HAVE_SYS_PARAM_H 0
+#define HAVE_SYS_RESOURCE_H 0
+#define HAVE_SYS_SELECT_H 0
+#define HAVE_SYS_SOUNDCARD_H 0
+#define HAVE_SYS_TIME_H 0
+#define HAVE_SYS_UN_H 0
+#define HAVE_SYS_VIDEOIO_H 0
+#define HAVE_TERMIOS_H 0
+#define HAVE_UDPLITE_H 0
+#define HAVE_UNISTD_H 0
+#define HAVE_VALGRIND_VALGRIND_H 0
+#define HAVE_WINDOWS_H 1
+#define HAVE_WINSOCK2_H 1
+#define HAVE_INTRINSICS_NEON 1
+#define HAVE_ATANF 1
+#define HAVE_ATAN2F 1
+#define HAVE_CBRT 1
+#define HAVE_CBRTF 1
+#define HAVE_COPYSIGN 1
+#define HAVE_COSF 1
+#define HAVE_ERF 1
+#define HAVE_EXP2 1
+#define HAVE_EXP2F 1
+#define HAVE_EXPF 1
+#define HAVE_HYPOT 1
+#define HAVE_ISFINITE 1
+#define HAVE_ISINF 1
+#define HAVE_ISNAN 1
+#define HAVE_LDEXPF 1
+#define HAVE_LLRINT 1
+#define HAVE_LLRINTF 1
+#define HAVE_LOG2 1
+#define HAVE_LOG2F 1
+#define HAVE_LOG10F 1
+#define HAVE_LRINT 1
+#define HAVE_LRINTF 1
+#define HAVE_POWF 1
+#define HAVE_RINT 1
+#define HAVE_ROUND 1
+#define HAVE_ROUNDF 1
+#define HAVE_SINF 1
+#define HAVE_TRUNC 1
+#define HAVE_TRUNCF 1
+#define HAVE_DOS_PATHS 1
+#define HAVE_LIBC_MSVCRT 1
+#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0
+#define HAVE_SECTION_DATA_REL_RO 0
+#define HAVE_THREADS 1
+#define HAVE_UWP 0
+#define HAVE_WINRT 0
+#define HAVE_ACCESS 1
+#define HAVE_ALIGNED_MALLOC 1
+#define HAVE_CLOCK_GETTIME 0
+#define HAVE_CLOSESOCKET 1
+#define HAVE_COMMANDLINETOARGVW 1
+#define HAVE_FCNTL 0
+#define HAVE_GETADDRINFO 1
+#define HAVE_GETHRTIME 0
+#define HAVE_GETOPT 0
+#define HAVE_GETPROCESSAFFINITYMASK 1
+#define HAVE_GETPROCESSMEMORYINFO 1
+#define HAVE_GETPROCESSTIMES 1
+#define HAVE_GETRUSAGE 0
+#define HAVE_GETSYSTEMTIMEASFILETIME 1
+#define HAVE_GETTIMEOFDAY 0
+#define HAVE_GLOB 0
+#define HAVE_GLXGETPROCADDRESS 0
+#define HAVE_GMTIME_R 0
+#define HAVE_INET_ATON 0
+#define HAVE_ISATTY 1
+#define HAVE_KBHIT 1
+#define HAVE_LSTAT 0
+#define HAVE_LZO1X_999_COMPRESS 0
+#define HAVE_MACH_ABSOLUTE_TIME 0
+#define HAVE_MAPVIEWOFFILE 1
+#define HAVE_MKSTEMP 0
+#define HAVE_MMAP 0
+#define HAVE_MPROTECT 0
+#define HAVE_NANOSLEEP 0
+#define HAVE_PEEKNAMEDPIPE 1
+#define HAVE_PTHREAD_CANCEL 0
+#define HAVE_SCHED_GETAFFINITY 0
+#define HAVE_SECITEMIMPORT 0
+#define HAVE_SETCONSOLETEXTATTRIBUTE 1
+#define HAVE_SETCONSOLECTRLHANDLER 1
+#define HAVE_SETMODE 1
+#define HAVE_SETRLIMIT 0
+#define HAVE_SLEEP 1
+#define HAVE_STRERROR_R 0
+#define HAVE_SYSCONF 0
+#define HAVE_SYSCTL 0
+#define HAVE_USLEEP 0
+#define HAVE_UTGETOSTYPEFROMSTRING 0
+#define HAVE_VIRTUALALLOC 1
+#define HAVE_WGLGETPROCADDRESS 0
+#define HAVE_BCRYPT 1
+#define HAVE_VAAPI_DRM 0
+#define HAVE_VAAPI_X11 0
+#define HAVE_VDPAU_X11 0
+#define HAVE_PTHREADS 0
+#define HAVE_OS2THREADS 0
+#define HAVE_W32THREADS 1
+#define HAVE_AS_ARCH_DIRECTIVE 0
+#define HAVE_AS_DN_DIRECTIVE 0
+#define HAVE_AS_FPU_DIRECTIVE 0
+#define HAVE_AS_FUNC 1
+#define HAVE_AS_OBJECT_ARCH 0
+#define HAVE_ASM_MOD_Q 0
+#define HAVE_BLOCKS_EXTENSION 0
+#define HAVE_EBP_AVAILABLE 0
+#define HAVE_EBX_AVAILABLE 0
+#define HAVE_GNU_AS 0
+#define HAVE_GNU_WINDRES 0
+#define HAVE_IBM_ASM 0
+#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0
+#define HAVE_INLINE_ASM_LABELS 0
+#define HAVE_INLINE_ASM_NONLOCAL_LABELS 0
+#define HAVE_PRAGMA_DEPRECATED 1
+#define HAVE_RSYNC_CONTIMEOUT 1
+#define HAVE_SYMVER_ASM_LABEL 0
+#define HAVE_SYMVER_GNU_ASM 0
+#define HAVE_VFP_ARGS 0
+#define HAVE_XFORM_ASM 0
+#define HAVE_XMM_CLOBBERS 0
+#define HAVE_KCMVIDEOCODECTYPE_HEVC 0
+#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0
+#define HAVE_SOCKLEN_T 1
+#define HAVE_STRUCT_ADDRINFO 1
+#define HAVE_STRUCT_GROUP_SOURCE_REQ 1
+#define HAVE_STRUCT_IP_MREQ_SOURCE 1
+#define HAVE_STRUCT_IPV6_MREQ 1
+#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0
+#define HAVE_STRUCT_POLLFD 1
+#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 0
+#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0
+#define HAVE_STRUCT_SOCKADDR_IN6 1
+#define HAVE_STRUCT_SOCKADDR_SA_LEN 0
+#define HAVE_STRUCT_SOCKADDR_STORAGE 1
+#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0
+#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0
+#define HAVE_MAKEINFO 0
+#define HAVE_MAKEINFO_HTML 0
+#define HAVE_OPENCL_D3D11 0
+#define HAVE_OPENCL_DRM_ARM 0
+#define HAVE_OPENCL_DRM_BEIGNET 0
+#define HAVE_OPENCL_DXVA2 0
+#define HAVE_OPENCL_VAAPI_BEIGNET 0
+#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0
+#define HAVE_PERL 1
+#define HAVE_POD2MAN 1
+#define HAVE_TEXI2HTML 0
+#define CONFIG_DOC 0
+#define CONFIG_HTMLPAGES 0
+#define CONFIG_MANPAGES 1
+#define CONFIG_PODPAGES 1
+#define CONFIG_TXTPAGES 0
+#define CONFIG_AVIO_DIR_CMD_EXAMPLE 1
+#define CONFIG_AVIO_READING_EXAMPLE 1
+#define CONFIG_DECODE_AUDIO_EXAMPLE 1
+#define CONFIG_DECODE_VIDEO_EXAMPLE 1
+#define CONFIG_DEMUXING_DECODING_EXAMPLE 0
+#define CONFIG_ENCODE_AUDIO_EXAMPLE 1
+#define CONFIG_ENCODE_VIDEO_EXAMPLE 1
+#define CONFIG_EXTRACT_MVS_EXAMPLE 0
+#define CONFIG_FILTER_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_AUDIO_EXAMPLE 0
+#define CONFIG_FILTERING_VIDEO_EXAMPLE 0
+#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 0
+#define CONFIG_HW_DECODE_EXAMPLE 0
+#define CONFIG_METADATA_EXAMPLE 0
+#define CONFIG_MUXING_EXAMPLE 0
+#define CONFIG_QSVDEC_EXAMPLE 0
+#define CONFIG_REMUXING_EXAMPLE 0
+#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 0
+#define CONFIG_SCALING_VIDEO_EXAMPLE 0
+#define CONFIG_TRANSCODE_AAC_EXAMPLE 0
+#define CONFIG_TRANSCODING_EXAMPLE 0
+#define CONFIG_VAAPI_ENCODE_EXAMPLE 0
+#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0
+#define CONFIG_AVISYNTH 0
+#define CONFIG_FREI0R 0
+#define CONFIG_LIBCDIO 0
+#define CONFIG_LIBDAVS2 0
+#define CONFIG_LIBRUBBERBAND 0
+#define CONFIG_LIBVIDSTAB 0
+#define CONFIG_LIBX264 0
+#define CONFIG_LIBX265 0
+#define CONFIG_LIBXAVS 0
+#define CONFIG_LIBXAVS2 0
+#define CONFIG_LIBXVID 0
+#define CONFIG_DECKLINK 0
+#define CONFIG_LIBFDK_AAC 0
+#define CONFIG_OPENSSL 0
+#define CONFIG_LIBTLS 0
+#define CONFIG_GMP 0
+#define CONFIG_LIBARIBB24 0
+#define CONFIG_LIBLENSFUN 0
+#define CONFIG_LIBOPENCORE_AMRNB 0
+#define CONFIG_LIBOPENCORE_AMRWB 0
+#define CONFIG_LIBVMAF 0
+#define CONFIG_LIBVO_AMRWBENC 0
+#define CONFIG_MBEDTLS 0
+#define CONFIG_RKMPP 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_CHROMAPRINT 0
+#define CONFIG_GCRYPT 0
+#define CONFIG_GNUTLS 0
+#define CONFIG_JNI 0
+#define CONFIG_LADSPA 0
+#define CONFIG_LIBAOM 0
+#define CONFIG_LIBASS 0
+#define CONFIG_LIBBLURAY 0
+#define CONFIG_LIBBS2B 0
+#define CONFIG_LIBCACA 0
+#define CONFIG_LIBCELT 0
+#define CONFIG_LIBCODEC2 0
+#define CONFIG_LIBDAV1D 0
+#define CONFIG_LIBDC1394 0
+#define CONFIG_LIBDRM 0
+#define CONFIG_LIBFLITE 0
+#define CONFIG_LIBFONTCONFIG 0
+#define CONFIG_LIBFREETYPE 0
+#define CONFIG_LIBFRIBIDI 0
+#define CONFIG_LIBGME 0
+#define CONFIG_LIBGSM 0
+#define CONFIG_LIBIEC61883 0
+#define CONFIG_LIBILBC 0
+#define CONFIG_LIBJACK 0
+#define CONFIG_LIBKLVANC 0
+#define CONFIG_LIBKVAZAAR 0
+#define CONFIG_LIBMODPLUG 0
+#define CONFIG_LIBMP3LAME 0
+#define CONFIG_LIBMYSOFA 0
+#define CONFIG_LIBOPENCV 0
+#define CONFIG_LIBOPENH264 0
+#define CONFIG_LIBOPENJPEG 0
+#define CONFIG_LIBOPENMPT 0
+#define CONFIG_LIBOPUS 0
+#define CONFIG_LIBPULSE 0
+#define CONFIG_LIBRSVG 0
+#define CONFIG_LIBRTMP 0
+#define CONFIG_LIBSHINE 0
+#define CONFIG_LIBSMBCLIENT 0
+#define CONFIG_LIBSNAPPY 0
+#define CONFIG_LIBSOXR 0
+#define CONFIG_LIBSPEEX 0
+#define CONFIG_LIBSRT 0
+#define CONFIG_LIBSSH 0
+#define CONFIG_LIBTENSORFLOW 0
+#define CONFIG_LIBTESSERACT 0
+#define CONFIG_LIBTHEORA 0
+#define CONFIG_LIBTWOLAME 0
+#define CONFIG_LIBV4L2 0
+#define CONFIG_LIBVORBIS 0
+#define CONFIG_LIBVPX 0
+#define CONFIG_LIBWAVPACK 0
+#define CONFIG_LIBWEBP 0
+#define CONFIG_LIBXML2 0
+#define CONFIG_LIBZIMG 0
+#define CONFIG_LIBZMQ 0
+#define CONFIG_LIBZVBI 0
+#define CONFIG_LV2 0
+#define CONFIG_MEDIACODEC 0
+#define CONFIG_OPENAL 0
+#define CONFIG_OPENGL 0
+#define CONFIG_POCKETSPHINX 0
+#define CONFIG_VAPOURSYNTH 0
+#define CONFIG_ALSA 0
+#define CONFIG_APPKIT 0
+#define CONFIG_AVFOUNDATION 0
+#define CONFIG_BZLIB 0
+#define CONFIG_COREIMAGE 0
+#define CONFIG_ICONV 0
+#define CONFIG_LIBXCB 0
+#define CONFIG_LIBXCB_SHM 0
+#define CONFIG_LIBXCB_SHAPE 0
+#define CONFIG_LIBXCB_XFIXES 0
+#define CONFIG_LZMA 0
+#define CONFIG_SCHANNEL 0
+#define CONFIG_SDL2 0
+#define CONFIG_SECURETRANSPORT 0
+#define CONFIG_SNDIO 0
+#define CONFIG_XLIB 0
+#define CONFIG_ZLIB 0
+#define CONFIG_CUDA_NVCC 0
+#define CONFIG_CUDA_SDK 0
+#define CONFIG_LIBNPP 0
+#define CONFIG_LIBMFX 0
+#define CONFIG_MMAL 0
+#define CONFIG_OMX 0
+#define CONFIG_OPENCL 0
+#define CONFIG_AMF 0
+#define CONFIG_AUDIOTOOLBOX 0
+#define CONFIG_CRYSTALHD 0
+#define CONFIG_CUDA 0
+#define CONFIG_CUDA_LLVM 0
+#define CONFIG_CUVID 0
+#define CONFIG_D3D11VA 0
+#define CONFIG_DXVA2 0
+#define CONFIG_FFNVCODEC 0
+#define CONFIG_NVDEC 0
+#define CONFIG_NVENC 0
+#define CONFIG_VAAPI 0
+#define CONFIG_VDPAU 0
+#define CONFIG_VIDEOTOOLBOX 0
+#define CONFIG_V4L2_M2M 0
+#define CONFIG_XVMC 0
+#define CONFIG_FTRAPV 0
+#define CONFIG_GRAY 0
+#define CONFIG_HARDCODED_TABLES 0
+#define CONFIG_OMX_RPI 0
+#define CONFIG_RUNTIME_CPUDETECT 1
+#define CONFIG_SAFE_BITSTREAM_READER 1
+#define CONFIG_SHARED 1
+#define CONFIG_SMALL 0
+#define CONFIG_STATIC 0
+#define CONFIG_SWSCALE_ALPHA 1
+#define CONFIG_GPL 0
+#define CONFIG_NONFREE 0
+#define CONFIG_VERSION3 0
+#define CONFIG_AVDEVICE 0
+#define CONFIG_AVFILTER 0
+#define CONFIG_SWSCALE 0
+#define CONFIG_POSTPROC 0
+#define CONFIG_AVFORMAT 0
+#define CONFIG_AVCODEC 1
+#define CONFIG_SWRESAMPLE 0
+#define CONFIG_AVRESAMPLE 0
+#define CONFIG_AVUTIL 1
+#define CONFIG_FFPLAY 0
+#define CONFIG_FFPROBE 0
+#define CONFIG_FFMPEG 0
+#define CONFIG_DCT 1
+#define CONFIG_DWT 0
+#define CONFIG_ERROR_RESILIENCE 0
+#define CONFIG_FAAN 1
+#define CONFIG_FAST_UNALIGNED 1
+#define CONFIG_FFT 1
+#define CONFIG_LSP 0
+#define CONFIG_LZO 0
+#define CONFIG_MDCT 0
+#define CONFIG_PIXELUTILS 0
+#define CONFIG_NETWORK 0
+#define CONFIG_RDFT 1
+#define CONFIG_AUTODETECT 0
+#define CONFIG_FONTCONFIG 0
+#define CONFIG_LINUX_PERF 0
+#define CONFIG_MEMORY_POISONING 0
+#define CONFIG_NEON_CLOBBER_TEST 0
+#define CONFIG_OSSFUZZ 0
+#define CONFIG_PIC 1
+#define CONFIG_THUMB 0
+#define CONFIG_VALGRIND_BACKTRACE 0
+#define CONFIG_XMM_CLOBBER_TEST 0
+#define CONFIG_BSFS 1
+#define CONFIG_DECODERS 1
+#define CONFIG_PARSERS 1
+#define CONFIG_AANDCTTABLES 0
+#define CONFIG_AC3DSP 0
+#define CONFIG_ADTS_HEADER 0
+#define CONFIG_AUDIO_FRAME_QUEUE 0
+#define CONFIG_AUDIODSP 0
+#define CONFIG_BLOCKDSP 0
+#define CONFIG_BSWAPDSP 0
+#define CONFIG_CABAC 0
+#define CONFIG_CBS 0
+#define CONFIG_CBS_AV1 0
+#define CONFIG_CBS_H264 0
+#define CONFIG_CBS_H265 0
+#define CONFIG_CBS_JPEG 0
+#define CONFIG_CBS_MPEG2 0
+#define CONFIG_CBS_VP9 0
+#define CONFIG_DIRAC_PARSE 0
+#define CONFIG_DNN 0
+#define CONFIG_DVPROFILE 0
+#define CONFIG_EXIF 0
+#define CONFIG_FAANDCT 1
+#define CONFIG_FAANIDCT 1
+#define CONFIG_FDCTDSP 1
+#define CONFIG_FLACDSP 1
+#define CONFIG_FMTCONVERT 0
+#define CONFIG_FRAME_THREAD_ENCODER 0
+#define CONFIG_G722DSP 0
+#define CONFIG_GOLOMB 0
+#define CONFIG_GPLV3 0
+#define CONFIG_H263DSP 0
+#define CONFIG_H264CHROMA 0
+#define CONFIG_H264DSP 0
+#define CONFIG_H264PARSE 0
+#define CONFIG_H264PRED 1
+#define CONFIG_H264QPEL 0
+#define CONFIG_HEVCPARSE 0
+#define CONFIG_HPELDSP 0
+#define CONFIG_HUFFMAN 0
+#define CONFIG_HUFFYUVDSP 0
+#define CONFIG_HUFFYUVENCDSP 0
+#define CONFIG_IDCTDSP 1
+#define CONFIG_IIRFILTER 0
+#define CONFIG_MDCT15 0
+#define CONFIG_INTRAX8 0
+#define CONFIG_ISO_MEDIA 0
+#define CONFIG_IVIDSP 0
+#define CONFIG_JPEGTABLES 0
+#define CONFIG_LGPLV3 0
+#define CONFIG_LIBX262 0
+#define CONFIG_LLAUDDSP 0
+#define CONFIG_LLVIDDSP 0
+#define CONFIG_LLVIDENCDSP 0
+#define CONFIG_LPC 0
+#define CONFIG_LZF 0
+#define CONFIG_ME_CMP 0
+#define CONFIG_MPEG_ER 0
+#define CONFIG_MPEGAUDIO 1
+#define CONFIG_MPEGAUDIODSP 1
+#define CONFIG_MPEGAUDIOHEADER 1
+#define CONFIG_MPEGVIDEO 0
+#define CONFIG_MPEGVIDEOENC 0
+#define CONFIG_MSS34DSP 0
+#define CONFIG_PIXBLOCKDSP 0
+#define CONFIG_QPELDSP 0
+#define CONFIG_QSV 0
+#define CONFIG_QSVDEC 0
+#define CONFIG_QSVENC 0
+#define CONFIG_QSVVPP 0
+#define CONFIG_RANGECODER 0
+#define CONFIG_RIFFDEC 0
+#define CONFIG_RIFFENC 0
+#define CONFIG_RTPDEC 0
+#define CONFIG_RTPENC_CHAIN 0
+#define CONFIG_RV34DSP 0
+#define CONFIG_SCENE_SAD 0
+#define CONFIG_SINEWIN 0
+#define CONFIG_SNAPPY 0
+#define CONFIG_SRTP 0
+#define CONFIG_STARTCODE 0
+#define CONFIG_TEXTUREDSP 0
+#define CONFIG_TEXTUREDSPENC 0
+#define CONFIG_TPELDSP 0
+#define CONFIG_VAAPI_1 0
+#define CONFIG_VAAPI_ENCODE 0
+#define CONFIG_VC1DSP 0
+#define CONFIG_VIDEODSP 1
+#define CONFIG_VP3DSP 0
+#define CONFIG_VP56DSP 0
+#define CONFIG_VP8DSP 1
+#define CONFIG_WMA_FREQS 0
+#define CONFIG_WMV2DSP 0
+#define CONFIG_NULL_BSF 1
+#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 1
+#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_DECODER 1
+#define CONFIG_FLAC_DECODER 1
+#define CONFIG_MP3_DECODER 1
+#define CONFIG_VP8_PARSER 1
+#define CONFIG_VP9_PARSER 1
+#endif /* FFMPEG_CONFIG_H */
diff --git a/media/ffvpx/ffvpxcommon.mozbuild b/media/ffvpx/ffvpxcommon.mozbuild
new file mode 100644
index 0000000000..3035d9da0e
--- /dev/null
+++ b/media/ffvpx/ffvpxcommon.mozbuild
@@ -0,0 +1,71 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Add assembler flags and includes
+if CONFIG['CPU_ARCH'] != 'aarch64':
+ ASFLAGS += CONFIG['FFVPX_ASFLAGS']
+ ASFLAGS += ['-I%s/media/ffvpx/' % TOPSRCDIR]
+ ASFLAGS += ['-I%s/media/ffvpx/libavcodec/x86/' % TOPSRCDIR]
+ ASFLAGS += ['-I%s/media/ffvpx/libavutil/x86/' % TOPSRCDIR]
+
+if CONFIG['FFVPX_ASFLAGS']:
+ if CONFIG['FFVPX_USE_NASM']:
+ USE_NASM = True
+
+ if CONFIG['OS_ARCH'] == 'WINNT':
+ # Fix inline symbols and math defines for windows.
+ DEFINES['_USE_MATH_DEFINES'] = True
+ DEFINES['inline'] = "__inline"
+
+LOCAL_INCLUDES += ['/media/ffvpx']
+
+# We allow warnings for third-party code that can be updated from upstream.
+AllowCompilerWarnings()
+
+# Suppress warnings in third-party code.
+CFLAGS += [
+ '-Wno-parentheses',
+ '-Wno-pointer-sign',
+ '-Wno-sign-compare',
+ '-Wno-switch',
+ '-Wno-type-limits',
+ '-Wno-unused-function',
+ # XXX This does not seem to have any effect on some versions of GCC.
+ '-Wno-deprecated-declarations',
+]
+if CONFIG['CC_TYPE'] in ('clang', 'clang-cl'):
+ CFLAGS += [
+ '-Wno-absolute-value',
+ '-Wno-incompatible-pointer-types',
+ '-Wno-string-conversion',
+ '-Wno-visibility',
+ ]
+ if CONFIG['CC_TYPE'] == 'clang-cl':
+ CFLAGS += [
+ '-Wno-inconsistent-dllimport',
+ '-Wno-macro-redefined', # 'WIN32_LEAN_AND_MEAN' macro redefined
+ ]
+else:
+ CFLAGS += [
+ '-Wno-discarded-qualifiers',
+ '-Wno-maybe-uninitialized',
+ ]
+if CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ # Force visibility of cpu and av_log symbols.
+ CFLAGS += ['-include', 'libavutil_visibility.h']
+if CONFIG['CC_TYPE'] == 'clang-cl':
+ LOCAL_INCLUDES += ['/media/ffvpx/compat/atomics/win32']
+DEFINES['HAVE_AV_CONFIG_H'] = True
+
+if CONFIG['MOZ_DEBUG']:
+ # Enable all assertions in debug builds.
+ DEFINES['ASSERT_LEVEL'] = 2
+elif not CONFIG['RELEASE_OR_BETA']:
+ # Enable fast assertions in opt builds of Nightly and Aurora.
+ DEFINES['ASSERT_LEVEL'] = 1
+
+# Add libFuzzer configuration directives
+include('/tools/fuzzing/libfuzzer-config.mozbuild')
diff --git a/media/ffvpx/libavcodec/aarch64/fft_init_aarch64.c b/media/ffvpx/libavcodec/aarch64/fft_init_aarch64.c
new file mode 100644
index 0000000000..77f5607960
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/fft_init_aarch64.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+
+#include "libavcodec/fft.h"
+
+void ff_fft_permute_neon(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_neon(FFTContext *s, FFTComplex *z);
+
+void ff_imdct_calc_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_mdct_calc_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
+
+av_cold void ff_fft_init_aarch64(FFTContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+ if (s->nbits < 17) {
+ s->fft_permute = ff_fft_permute_neon;
+ s->fft_calc = ff_fft_calc_neon;
+ }
+#if CONFIG_MDCT
+ s->imdct_calc = ff_imdct_calc_neon;
+ s->imdct_half = ff_imdct_half_neon;
+ s->mdct_calc = ff_mdct_calc_neon;
+ s->mdct_permutation = FF_MDCT_PERM_INTERLEAVE;
+#endif
+ }
+}
diff --git a/media/ffvpx/libavcodec/aarch64/fft_neon.S b/media/ffvpx/libavcodec/aarch64/fft_neon.S
new file mode 100644
index 0000000000..d7225511dd
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/fft_neon.S
@@ -0,0 +1,447 @@
+/*
+ * ARM NEON optimised FFT
+ *
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2009 Naotoshi Nojiri
+ * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+#define M_SQRT1_2 0.70710678118654752440
+
+.macro transpose d0, d1, s0, s1
+ trn1 \d0, \s0, \s1
+ trn2 \d1, \s0, \s1
+.endm
+
+
+function fft4_neon
+ AARCH64_VALID_JUMP_TARGET
+ ld1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0]
+
+ fadd v4.2s, v0.2s, v1.2s // r0+r1,i0+i1
+ fsub v6.2s, v0.2s, v1.2s // r0-r1,i0-i1
+
+ ext v16.8b, v2.8b, v3.8b, #4
+ ext v17.8b, v3.8b, v2.8b, #4
+
+ fadd v5.2s, v2.2s, v3.2s // i2+i3,r2+r3
+ fsub v7.2s, v16.2s, v17.2s // r3-r2,i2-i3
+
+ fadd v0.2s, v4.2s, v5.2s
+ fsub v2.2s, v4.2s, v5.2s
+ fadd v1.2s, v6.2s, v7.2s
+ fsub v3.2s, v6.2s, v7.2s
+
+ st1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0]
+
+ ret
+endfunc
+
+function fft8_neon
+ AARCH64_VALID_JUMP_TARGET
+ mov x1, x0
+ ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
+ ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0]
+ ext v22.8b, v2.8b, v3.8b, #4
+ ext v23.8b, v3.8b, v2.8b, #4
+ fadd v4.2s, v16.2s, v17.2s // r4+r5,i4+i5
+ fadd v5.2s, v18.2s, v19.2s // r6+r7,i6+i7
+ fsub v17.2s, v16.2s, v17.2s // r4-r5,i4-i5
+ fsub v19.2s, v18.2s, v19.2s // r6-r7,i6-i7
+ rev64 v27.2s, v28.2s // ???
+ fadd v20.2s, v0.2s, v1.2s // r0+r1,i0+i1
+ fadd v21.2s, v2.2s, v3.2s // r2+r3,i2+i3
+ fmul v26.2s, v17.2s, v28.2s // -a2r*w,a2i*w
+ ext v6.8b, v4.8b, v5.8b, #4
+ ext v7.8b, v5.8b, v4.8b, #4
+ fmul v27.2s, v19.2s, v27.2s // a3r*w,-a3i*w
+ fsub v23.2s, v22.2s, v23.2s // i2-i3,r3-r2
+ fsub v22.2s, v0.2s, v1.2s // r0-r1,i0-i1
+ fmul v24.2s, v17.2s, v28.s[1] // a2r*w,a2i*w
+ fmul v25.2s, v19.2s, v28.s[1] // a3r*w,a3i*w
+ fadd v0.2s, v20.2s, v21.2s
+ fsub v2.2s, v20.2s, v21.2s
+ fadd v1.2s, v22.2s, v23.2s
+ rev64 v26.2s, v26.2s
+ rev64 v27.2s, v27.2s
+ fsub v3.2s, v22.2s, v23.2s
+ fsub v6.2s, v6.2s, v7.2s
+ fadd v24.2s, v24.2s, v26.2s // a2r+a2i,a2i-a2r t1,t2
+ fadd v25.2s, v25.2s, v27.2s // a3r-a3i,a3i+a3r t5,t6
+ fadd v7.2s, v4.2s, v5.2s
+ fsub v18.2s, v2.2s, v6.2s
+ ext v26.8b, v24.8b, v25.8b, #4
+ ext v27.8b, v25.8b, v24.8b, #4
+ fadd v2.2s, v2.2s, v6.2s
+ fsub v16.2s, v0.2s, v7.2s
+ fadd v5.2s, v25.2s, v24.2s
+ fsub v4.2s, v26.2s, v27.2s
+ fadd v0.2s, v0.2s, v7.2s
+ fsub v17.2s, v1.2s, v5.2s
+ fsub v19.2s, v3.2s, v4.2s
+ fadd v3.2s, v3.2s, v4.2s
+ fadd v1.2s, v1.2s, v5.2s
+
+ st1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0]
+ st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x1]
+
+ ret
+endfunc
+
+function fft16_neon
+ AARCH64_VALID_JUMP_TARGET
+ mov x1, x0
+ ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
+ ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0], #32
+ ext v22.8b, v2.8b, v3.8b, #4
+ ext v23.8b, v3.8b, v2.8b, #4
+ fadd v4.2s, v16.2s, v17.2s // r4+r5,i4+i5
+ fadd v5.2s, v18.2s, v19.2s // r6+r7,i6+i7
+ fsub v17.2s, v16.2s, v17.2s // r4-r5,i4-i5
+ fsub v19.2s, v18.2s, v19.2s // r6-r7,i6-i7
+ rev64 v27.2s, v28.2s // ???
+ fadd v20.2s, v0.2s, v1.2s // r0+r1,i0+i1
+ fadd v21.2s, v2.2s, v3.2s // r2+r3,i2+i3
+ fmul v26.2s, v17.2s, v28.2s // -a2r*w,a2i*w
+ ext v6.8b, v4.8b, v5.8b, #4
+ ext v7.8b, v5.8b, v4.8b, #4
+ fmul v27.2s, v19.2s, v27.2s // a3r*w,-a3i*w
+ fsub v23.2s, v22.2s, v23.2s // i2-i3,r3-r2
+ fsub v22.2s, v0.2s, v1.2s // r0-r1,i0-i1
+ fmul v24.2s, v17.2s, v28.s[1] // a2r*w,a2i*w
+ fmul v25.2s, v19.2s, v28.s[1] // a3r*w,a3i*w
+ fadd v0.2s, v20.2s, v21.2s
+ fsub v2.2s, v20.2s, v21.2s
+ fadd v1.2s, v22.2s, v23.2s
+ rev64 v26.2s, v26.2s
+ rev64 v27.2s, v27.2s
+ fsub v3.2s, v22.2s, v23.2s
+ fsub v6.2s, v6.2s, v7.2s
+ fadd v24.2s, v24.2s, v26.2s // a2r+a2i,a2i-a2r t1,t2
+ fadd v25.2s, v25.2s, v27.2s // a3r-a3i,a3i+a3r t5,t6
+ fadd v7.2s, v4.2s, v5.2s
+ fsub v18.2s, v2.2s, v6.2s
+ ld1 {v20.4s,v21.4s}, [x0], #32
+ ld1 {v22.4s,v23.4s}, [x0], #32
+ ext v26.8b, v24.8b, v25.8b, #4
+ ext v27.8b, v25.8b, v24.8b, #4
+ fadd v2.2s, v2.2s, v6.2s
+ fsub v16.2s, v0.2s, v7.2s
+ fadd v5.2s, v25.2s, v24.2s
+ fsub v4.2s, v26.2s, v27.2s
+ transpose v24.2d, v25.2d, v20.2d, v22.2d
+ transpose v26.2d, v27.2d, v21.2d, v23.2d
+ fadd v0.2s, v0.2s, v7.2s
+ fsub v17.2s, v1.2s, v5.2s
+ fsub v19.2s, v3.2s, v4.2s
+ fadd v3.2s, v3.2s, v4.2s
+ fadd v1.2s, v1.2s, v5.2s
+ ext v20.16b, v21.16b, v21.16b, #4
+ ext v21.16b, v23.16b, v23.16b, #4
+
+ zip1 v0.2d, v0.2d, v1.2d // {z[0], z[1]}
+ zip1 v1.2d, v2.2d, v3.2d // {z[2], z[3]}
+ zip1 v2.2d, v16.2d, v17.2d // {z[o1], z[o1+1]}
+ zip1 v3.2d, v18.2d, v19.2d // {z[o1+2],z[o1+3]}
+
+ // 2 x fft4
+ transpose v22.2d, v23.2d, v20.2d, v21.2d
+
+ fadd v4.4s, v24.4s, v25.4s
+ fadd v5.4s, v26.4s, v27.4s
+ fsub v6.4s, v24.4s, v25.4s
+ fsub v7.4s, v22.4s, v23.4s
+
+ ld1 {v23.4s}, [x14]
+
+ fadd v24.4s, v4.4s, v5.4s // {z[o2+0],z[o2+1]}
+ fsub v26.4s, v4.4s, v5.4s // {z[o2+2],z[o2+3]}
+ fadd v25.4s, v6.4s, v7.4s // {z[o3+0],z[o3+1]}
+ fsub v27.4s, v6.4s, v7.4s // {z[o3+2],z[o3+3]}
+
+ //fft_pass_neon_16
+ rev64 v7.4s, v25.4s
+ fmul v25.4s, v25.4s, v23.s[1]
+ fmul v7.4s, v7.4s, v29.4s
+ fmla v25.4s, v7.4s, v23.s[3] // {t1a,t2a,t5a,t6a}
+
+ zip1 v20.4s, v24.4s, v25.4s
+ zip2 v21.4s, v24.4s, v25.4s
+ fneg v22.4s, v20.4s
+ fadd v4.4s, v21.4s, v20.4s
+ fsub v6.4s, v20.4s, v21.4s // just the second half
+ fadd v5.4s, v21.4s, v22.4s // just the first half
+
+ tbl v4.16b, {v4.16b}, v30.16b // trans4_float
+ tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float
+
+ fsub v20.4s, v0.4s, v4.4s // {z[o2],z[o2+1]}
+ fadd v16.4s, v0.4s, v4.4s // {z[0], z[1]}
+ fsub v22.4s, v2.4s, v5.4s // {z[o3],z[o3+1]}
+ fadd v18.4s, v2.4s, v5.4s // {z[o1],z[o1+1]}
+
+//second half
+ rev64 v6.4s, v26.4s
+ fmul v26.4s, v26.4s, v23.s[2]
+ rev64 v7.4s, v27.4s
+ fmul v27.4s, v27.4s, v23.s[3]
+ fmul v6.4s, v6.4s, v29.4s
+ fmul v7.4s, v7.4s, v29.4s
+ fmla v26.4s, v6.4s, v23.s[2] // {t1,t2,t5,t6}
+ fmla v27.4s, v7.4s, v23.s[1] // {t1a,t2a,t5a,t6a}
+
+ zip1 v24.4s, v26.4s, v27.4s
+ zip2 v25.4s, v26.4s, v27.4s
+ fneg v26.4s, v24.4s
+ fadd v4.4s, v25.4s, v24.4s
+ fsub v6.4s, v24.4s, v25.4s // just the second half
+ fadd v5.4s, v25.4s, v26.4s // just the first half
+
+ tbl v4.16b, {v4.16b}, v30.16b // trans4_float
+ tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float
+
+ fadd v17.4s, v1.4s, v4.4s // {z[2], z[3]}
+ fsub v21.4s, v1.4s, v4.4s // {z[o2+2],z[o2+3]}
+ fadd v19.4s, v3.4s, v5.4s // {z[o1+2],z[o1+3]}
+ fsub v23.4s, v3.4s, v5.4s // {z[o3+2],z[o3+3]}
+
+ st1 {v16.4s,v17.4s}, [x1], #32
+ st1 {v18.4s,v19.4s}, [x1], #32
+ st1 {v20.4s,v21.4s}, [x1], #32
+ st1 {v22.4s,v23.4s}, [x1], #32
+
+ ret
+endfunc
+
+
+const trans4_float, align=4
+ .byte 0, 1, 2, 3
+ .byte 8, 9, 10, 11
+ .byte 4, 5, 6, 7
+ .byte 12, 13, 14, 15
+endconst
+
+const trans8_float, align=4
+ .byte 24, 25, 26, 27
+ .byte 0, 1, 2, 3
+ .byte 28, 29, 30, 31
+ .byte 4, 5, 6, 7
+endconst
+
+function fft_pass_neon
+ sub x6, x2, #1 // n - 1, loop counter
+ lsl x5, x2, #3 // 2 * n * sizeof FFTSample
+ lsl x1, x2, #4 // 2 * n * sizeof FFTComplex
+ add x5, x4, x5 // wim
+ add x3, x1, x2, lsl #5 // 4 * n * sizeof FFTComplex
+ add x2, x0, x2, lsl #5 // &z[o2]
+ add x3, x0, x3 // &z[o3]
+ add x1, x0, x1 // &z[o1]
+ ld1 {v20.4s},[x2] // {z[o2],z[o2+1]}
+ ld1 {v22.4s},[x3] // {z[o3],z[o3+1]}
+ ld1 {v4.2s}, [x4], #8 // {wre[0],wre[1]}
+ trn2 v25.2d, v20.2d, v22.2d
+ sub x5, x5, #4 // wim--
+ trn1 v24.2d, v20.2d, v22.2d
+ ld1 {v5.s}[0], [x5], x7 // d5[0] = wim[-1]
+ rev64 v7.4s, v25.4s
+ fmul v25.4s, v25.4s, v4.s[1]
+ ld1 {v16.4s}, [x0] // {z[0],z[1]}
+ fmul v7.4s, v7.4s, v29.4s
+ ld1 {v17.4s}, [x1] // {z[o1],z[o1+1]}
+ prfm pldl1keep, [x2, #16]
+ prfm pldl1keep, [x3, #16]
+ fmla v25.4s, v7.4s, v5.s[0] // {t1a,t2a,t5a,t6a}
+ prfm pldl1keep, [x0, #16]
+ prfm pldl1keep, [x1, #16]
+
+ zip1 v20.4s, v24.4s, v25.4s
+ zip2 v21.4s, v24.4s, v25.4s
+ fneg v22.4s, v20.4s
+ fadd v4.4s, v21.4s, v20.4s
+ fsub v6.4s, v20.4s, v21.4s // just the second half
+ fadd v5.4s, v21.4s, v22.4s // just the first half
+
+ tbl v4.16b, {v4.16b}, v30.16b // trans4_float
+ tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float
+
+ fadd v20.4s, v16.4s, v4.4s
+ fsub v22.4s, v16.4s, v4.4s
+ fadd v21.4s, v17.4s, v5.4s
+ st1 {v20.4s}, [x0], #16 // {z[0], z[1]}
+ fsub v23.4s, v17.4s, v5.4s
+
+ st1 {v21.4s}, [x1], #16 // {z[o1],z[o1+1]}
+ st1 {v22.4s}, [x2], #16 // {z[o2],z[o2+1]}
+ st1 {v23.4s}, [x3], #16 // {z[o3],z[o3+1]}
+1:
+ ld1 {v20.4s},[x2] // {z[o2],z[o2+1]}
+ ld1 {v22.4s},[x3] // {z[o3],z[o3+1]}
+ ld1 {v4.2s}, [x4], #8 // {wre[0],wre[1]}
+ transpose v26.2d, v27.2d, v20.2d, v22.2d
+ ld1 {v5.2s}, [x5], x7 // {wim[-1],wim[0]}
+ rev64 v6.4s, v26.4s
+ fmul v26.4s, v26.4s, v4.s[0]
+ rev64 v7.4s, v27.4s
+ fmul v27.4s, v27.4s, v4.s[1]
+ fmul v6.4s, v6.4s, v29.4s
+ fmul v7.4s, v7.4s, v29.4s
+ ld1 {v16.4s},[x0] // {z[0],z[1]}
+ fmla v26.4s, v6.4s, v5.s[1] // {t1,t2,t5,t6}
+ fmla v27.4s, v7.4s, v5.s[0] // {t1a,t2a,t5a,t6a}
+ ld1 {v17.4s},[x1] // {z[o1],z[o1+1]}
+
+ subs x6, x6, #1 // n--
+
+ zip1 v20.4s, v26.4s, v27.4s
+ zip2 v21.4s, v26.4s, v27.4s
+ fneg v22.4s, v20.4s
+ fadd v4.4s, v21.4s, v20.4s
+ fsub v6.4s, v20.4s, v21.4s // just the second half
+ fadd v5.4s, v21.4s, v22.4s // just the first half
+
+ tbl v4.16b, {v4.16b}, v30.16b // trans4_float
+ tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float
+
+ fadd v20.4s, v16.4s, v4.4s
+ fsub v22.4s, v16.4s, v4.4s
+ fadd v21.4s, v17.4s, v5.4s
+ st1 {v20.4s}, [x0], #16 // {z[0], z[1]}
+ fsub v23.4s, v17.4s, v5.4s
+
+ st1 {v21.4s}, [x1], #16 // {z[o1],z[o1+1]}
+ st1 {v22.4s}, [x2], #16 // {z[o2],z[o2+1]}
+ st1 {v23.4s}, [x3], #16 // {z[o3],z[o3+1]}
+ b.ne 1b
+
+ ret
+endfunc
+
+.macro def_fft n, n2, n4
+function fft\n\()_neon, align=6
+ AARCH64_VALID_JUMP_TARGET
+ AARCH64_SIGN_LINK_REGISTER
+ stp x28, x30, [sp, #-16]!
+ add x28, x0, #\n4*2*8
+ bl fft\n2\()_neon
+ mov x0, x28
+ bl fft\n4\()_neon
+ add x0, x28, #\n4*1*8
+ bl fft\n4\()_neon
+ sub x0, x28, #\n4*2*8
+ ldp x28, x30, [sp], #16
+ AARCH64_VALIDATE_LINK_REGISTER
+ movrel x4, X(ff_cos_\n)
+ mov x2, #\n4>>1
+ b fft_pass_neon
+endfunc
+.endm
+
+ def_fft 32, 16, 8
+ def_fft 64, 32, 16
+ def_fft 128, 64, 32
+ def_fft 256, 128, 64
+ def_fft 512, 256, 128
+ def_fft 1024, 512, 256
+ def_fft 2048, 1024, 512
+ def_fft 4096, 2048, 1024
+ def_fft 8192, 4096, 2048
+ def_fft 16384, 8192, 4096
+ def_fft 32768, 16384, 8192
+ def_fft 65536, 32768, 16384
+
+function ff_fft_calc_neon, export=1
+ prfm pldl1keep, [x1]
+ movrel x10, trans4_float
+ ldr w2, [x0]
+ movrel x11, trans8_float
+ sub w2, w2, #2
+ movrel x3, fft_tab_neon
+ ld1 {v30.16b}, [x10]
+ mov x7, #-8
+ movrel x12, pmmp
+ ldr x3, [x3, x2, lsl #3]
+ movrel x13, mppm
+ movrel x14, X(ff_cos_16)
+ ld1 {v31.16b}, [x11]
+ mov x0, x1
+ ld1 {v29.4s}, [x12] // pmmp
+ ld1 {v28.4s}, [x13]
+ br x3
+endfunc
+
+function ff_fft_permute_neon, export=1
+ mov x6, #1
+ ldr w2, [x0] // nbits
+ ldr x3, [x0, #16] // tmp_buf
+ ldr x0, [x0, #8] // revtab
+ lsl x6, x6, x2
+ mov x2, x6
+1:
+ ld1 {v0.2s,v1.2s}, [x1], #16
+ ldr w4, [x0], #4
+ uxth w5, w4
+ lsr w4, w4, #16
+ add x5, x3, x5, lsl #3
+ add x4, x3, x4, lsl #3
+ st1 {v0.2s}, [x5]
+ st1 {v1.2s}, [x4]
+ subs x6, x6, #2
+ b.gt 1b
+
+ sub x1, x1, x2, lsl #3
+1:
+ ld1 {v0.4s,v1.4s}, [x3], #32
+ st1 {v0.4s,v1.4s}, [x1], #32
+ subs x2, x2, #4
+ b.gt 1b
+
+ ret
+endfunc
+
+const fft_tab_neon, relocate=1
+ .quad fft4_neon
+ .quad fft8_neon
+ .quad fft16_neon
+ .quad fft32_neon
+ .quad fft64_neon
+ .quad fft128_neon
+ .quad fft256_neon
+ .quad fft512_neon
+ .quad fft1024_neon
+ .quad fft2048_neon
+ .quad fft4096_neon
+ .quad fft8192_neon
+ .quad fft16384_neon
+ .quad fft32768_neon
+ .quad fft65536_neon
+endconst
+
+const pmmp, align=4
+ .float +1.0, -1.0, -1.0, +1.0
+endconst
+
+const mppm, align=4
+ .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
+endconst
diff --git a/media/ffvpx/libavcodec/aarch64/h264chroma_init_aarch64.c b/media/ffvpx/libavcodec/aarch64/h264chroma_init_aarch64.c
new file mode 100644
index 0000000000..00fc7b20f1
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/h264chroma_init_aarch64.c
@@ -0,0 +1,59 @@
+/*
+ * ARM NEON optimised H.264 chroma functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavcodec/h264chroma.h"
+
+#include "config.h"
+
+void ff_put_h264_chroma_mc8_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_put_h264_chroma_mc4_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_put_h264_chroma_mc2_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+
+void ff_avg_h264_chroma_mc8_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_avg_h264_chroma_mc4_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_avg_h264_chroma_mc2_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+
+av_cold void ff_h264chroma_init_aarch64(H264ChromaContext *c, int bit_depth)
+{
+ const int high_bit_depth = bit_depth > 8;
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags) && !high_bit_depth) {
+ c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_neon;
+ c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_neon;
+ c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_neon;
+
+ c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_neon;
+ c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_neon;
+ c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_neon;
+ }
+}
diff --git a/media/ffvpx/libavcodec/aarch64/h264cmc_neon.S b/media/ffvpx/libavcodec/aarch64/h264cmc_neon.S
new file mode 100644
index 0000000000..88ccd727d0
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/h264cmc_neon.S
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config_components.h"
+
+#include "libavutil/aarch64/asm.S"
+
+/* chroma_mc8(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h, int x, int y) */
+.macro h264_chroma_mc8 type, codec=h264
+function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
+ .ifc \type,avg
+ mov x8, x0
+ .endif
+ prfm pldl1strm, [x1]
+ prfm pldl1strm, [x1, x2]
+ .ifc \codec,rv40
+ movrel x6, rv40bias
+ lsr w9, w5, #1
+ lsr w10, w4, #1
+ lsl w9, w9, #3
+ lsl w10, w10, #1
+ add w9, w9, w10
+ add x6, x6, w9, UXTW
+ ld1r {v22.8H}, [x6]
+ .endif
+ .ifc \codec,vc1
+ movi v22.8H, #28
+ .endif
+ mul w7, w4, w5
+ lsl w14, w5, #3
+ lsl w13, w4, #3
+ cmp w7, #0
+ sub w6, w14, w7
+ sub w12, w13, w7
+ sub w4, w7, w13
+ sub w4, w4, w14
+ add w4, w4, #64
+ b.eq 2f
+
+ dup v0.8B, w4
+ dup v1.8B, w12
+ ld1 {v4.8B, v5.8B}, [x1], x2
+ dup v2.8B, w6
+ dup v3.8B, w7
+ ext v5.8B, v4.8B, v5.8B, #1
+1: ld1 {v6.8B, v7.8B}, [x1], x2
+ umull v16.8H, v4.8B, v0.8B
+ umlal v16.8H, v5.8B, v1.8B
+ ext v7.8B, v6.8B, v7.8B, #1
+ ld1 {v4.8B, v5.8B}, [x1], x2
+ umlal v16.8H, v6.8B, v2.8B
+ prfm pldl1strm, [x1]
+ ext v5.8B, v4.8B, v5.8B, #1
+ umlal v16.8H, v7.8B, v3.8B
+ umull v17.8H, v6.8B, v0.8B
+ subs w3, w3, #2
+ umlal v17.8H, v7.8B, v1.8B
+ umlal v17.8H, v4.8B, v2.8B
+ umlal v17.8H, v5.8B, v3.8B
+ prfm pldl1strm, [x1, x2]
+ .ifc \codec,h264
+ rshrn v16.8B, v16.8H, #6
+ rshrn v17.8B, v17.8H, #6
+ .else
+ add v16.8H, v16.8H, v22.8H
+ add v17.8H, v17.8H, v22.8H
+ shrn v16.8B, v16.8H, #6
+ shrn v17.8B, v17.8H, #6
+ .endif
+ .ifc \type,avg
+ ld1 {v20.8B}, [x8], x2
+ ld1 {v21.8B}, [x8], x2
+ urhadd v16.8B, v16.8B, v20.8B
+ urhadd v17.8B, v17.8B, v21.8B
+ .endif
+ st1 {v16.8B}, [x0], x2
+ st1 {v17.8B}, [x0], x2
+ b.gt 1b
+ ret
+
+2: adds w12, w12, w6
+ dup v0.8B, w4
+ b.eq 5f
+ tst w6, w6
+ dup v1.8B, w12
+ b.eq 4f
+
+ ld1 {v4.8B}, [x1], x2
+3: ld1 {v6.8B}, [x1], x2
+ umull v16.8H, v4.8B, v0.8B
+ umlal v16.8H, v6.8B, v1.8B
+ ld1 {v4.8B}, [x1], x2
+ umull v17.8H, v6.8B, v0.8B
+ umlal v17.8H, v4.8B, v1.8B
+ prfm pldl1strm, [x1]
+ .ifc \codec,h264
+ rshrn v16.8B, v16.8H, #6
+ rshrn v17.8B, v17.8H, #6
+ .else
+ add v16.8H, v16.8H, v22.8H
+ add v17.8H, v17.8H, v22.8H
+ shrn v16.8B, v16.8H, #6
+ shrn v17.8B, v17.8H, #6
+ .endif
+ prfm pldl1strm, [x1, x2]
+ .ifc \type,avg
+ ld1 {v20.8B}, [x8], x2
+ ld1 {v21.8B}, [x8], x2
+ urhadd v16.8B, v16.8B, v20.8B
+ urhadd v17.8B, v17.8B, v21.8B
+ .endif
+ subs w3, w3, #2
+ st1 {v16.8B}, [x0], x2
+ st1 {v17.8B}, [x0], x2
+ b.gt 3b
+ ret
+
+4: ld1 {v4.8B, v5.8B}, [x1], x2
+ ld1 {v6.8B, v7.8B}, [x1], x2
+ ext v5.8B, v4.8B, v5.8B, #1
+ ext v7.8B, v6.8B, v7.8B, #1
+ prfm pldl1strm, [x1]
+ subs w3, w3, #2
+ umull v16.8H, v4.8B, v0.8B
+ umlal v16.8H, v5.8B, v1.8B
+ umull v17.8H, v6.8B, v0.8B
+ umlal v17.8H, v7.8B, v1.8B
+ prfm pldl1strm, [x1, x2]
+ .ifc \codec,h264
+ rshrn v16.8B, v16.8H, #6
+ rshrn v17.8B, v17.8H, #6
+ .else
+ add v16.8H, v16.8H, v22.8H
+ add v17.8H, v17.8H, v22.8H
+ shrn v16.8B, v16.8H, #6
+ shrn v17.8B, v17.8H, #6
+ .endif
+ .ifc \type,avg
+ ld1 {v20.8B}, [x8], x2
+ ld1 {v21.8B}, [x8], x2
+ urhadd v16.8B, v16.8B, v20.8B
+ urhadd v17.8B, v17.8B, v21.8B
+ .endif
+ st1 {v16.8B}, [x0], x2
+ st1 {v17.8B}, [x0], x2
+ b.gt 4b
+ ret
+
+5: ld1 {v4.8B}, [x1], x2
+ ld1 {v5.8B}, [x1], x2
+ prfm pldl1strm, [x1]
+ subs w3, w3, #2
+ umull v16.8H, v4.8B, v0.8B
+ umull v17.8H, v5.8B, v0.8B
+ prfm pldl1strm, [x1, x2]
+ .ifc \codec,h264
+ rshrn v16.8B, v16.8H, #6
+ rshrn v17.8B, v17.8H, #6
+ .else
+ add v16.8H, v16.8H, v22.8H
+ add v17.8H, v17.8H, v22.8H
+ shrn v16.8B, v16.8H, #6
+ shrn v17.8B, v17.8H, #6
+ .endif
+ .ifc \type,avg
+ ld1 {v20.8B}, [x8], x2
+ ld1 {v21.8B}, [x8], x2
+ urhadd v16.8B, v16.8B, v20.8B
+ urhadd v17.8B, v17.8B, v21.8B
+ .endif
+ st1 {v16.8B}, [x0], x2
+ st1 {v17.8B}, [x0], x2
+ b.gt 5b
+ ret
+endfunc
+.endm
+
+/* chroma_mc4(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h, int x, int y) */
+.macro h264_chroma_mc4 type, codec=h264
+function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
+ .ifc \type,avg
+ mov x8, x0
+ .endif
+ prfm pldl1strm, [x1]
+ prfm pldl1strm, [x1, x2]
+ .ifc \codec,rv40
+ movrel x6, rv40bias
+ lsr w9, w5, #1
+ lsr w10, w4, #1
+ lsl w9, w9, #3
+ lsl w10, w10, #1
+ add w9, w9, w10
+ add x6, x6, w9, UXTW
+ ld1r {v22.8H}, [x6]
+ .endif
+ .ifc \codec,vc1
+ movi v22.8H, #28
+ .endif
+ mul w7, w4, w5
+ lsl w14, w5, #3
+ lsl w13, w4, #3
+ cmp w7, #0
+ sub w6, w14, w7
+ sub w12, w13, w7
+ sub w4, w7, w13
+ sub w4, w4, w14
+ add w4, w4, #64
+ b.eq 2f
+
+ dup v24.8B, w4
+ dup v25.8B, w12
+ ld1 {v4.8B}, [x1], x2
+ dup v26.8B, w6
+ dup v27.8B, w7
+ ext v5.8B, v4.8B, v5.8B, #1
+ trn1 v0.2S, v24.2S, v25.2S
+ trn1 v2.2S, v26.2S, v27.2S
+ trn1 v4.2S, v4.2S, v5.2S
+1: ld1 {v6.8B}, [x1], x2
+ ext v7.8B, v6.8B, v7.8B, #1
+ trn1 v6.2S, v6.2S, v7.2S
+ umull v18.8H, v4.8B, v0.8B
+ umlal v18.8H, v6.8B, v2.8B
+ ld1 {v4.8B}, [x1], x2
+ ext v5.8B, v4.8B, v5.8B, #1
+ trn1 v4.2S, v4.2S, v5.2S
+ prfm pldl1strm, [x1]
+ umull v19.8H, v6.8B, v0.8B
+ umlal v19.8H, v4.8B, v2.8B
+ trn1 v30.2D, v18.2D, v19.2D
+ trn2 v31.2D, v18.2D, v19.2D
+ add v18.8H, v30.8H, v31.8H
+ .ifc \codec,h264
+ rshrn v16.8B, v18.8H, #6
+ .else
+ add v18.8H, v18.8H, v22.8H
+ shrn v16.8B, v18.8H, #6
+ .endif
+ subs w3, w3, #2
+ prfm pldl1strm, [x1, x2]
+ .ifc \type,avg
+ ld1 {v20.S}[0], [x8], x2
+ ld1 {v20.S}[1], [x8], x2
+ urhadd v16.8B, v16.8B, v20.8B
+ .endif
+ st1 {v16.S}[0], [x0], x2
+ st1 {v16.S}[1], [x0], x2
+ b.gt 1b
+ ret
+
+2: adds w12, w12, w6
+ dup v30.8B, w4
+ b.eq 5f
+ tst w6, w6
+ dup v31.8B, w12
+ trn1 v0.2S, v30.2S, v31.2S
+ trn2 v1.2S, v30.2S, v31.2S
+ b.eq 4f
+
+ ext v1.8B, v0.8B, v1.8B, #4
+ ld1 {v4.S}[0], [x1], x2
+3: ld1 {v4.S}[1], [x1], x2
+ umull v18.8H, v4.8B, v0.8B
+ ld1 {v4.S}[0], [x1], x2
+ umull v19.8H, v4.8B, v1.8B
+ trn1 v30.2D, v18.2D, v19.2D
+ trn2 v31.2D, v18.2D, v19.2D
+ add v18.8H, v30.8H, v31.8H
+ prfm pldl1strm, [x1]
+ .ifc \codec,h264
+ rshrn v16.8B, v18.8H, #6
+ .else
+ add v18.8H, v18.8H, v22.8H
+ shrn v16.8B, v18.8H, #6
+ .endif
+ .ifc \type,avg
+ ld1 {v20.S}[0], [x8], x2
+ ld1 {v20.S}[1], [x8], x2
+ urhadd v16.8B, v16.8B, v20.8B
+ .endif
+ subs w3, w3, #2
+ prfm pldl1strm, [x1, x2]
+ st1 {v16.S}[0], [x0], x2
+ st1 {v16.S}[1], [x0], x2
+ b.gt 3b
+ ret
+
+4: ld1 {v4.8B}, [x1], x2
+ ld1 {v6.8B}, [x1], x2
+ ext v5.8B, v4.8B, v5.8B, #1
+ ext v7.8B, v6.8B, v7.8B, #1
+ trn1 v4.2S, v4.2S, v5.2S
+ trn1 v6.2S, v6.2S, v7.2S
+ umull v18.8H, v4.8B, v0.8B
+ umull v19.8H, v6.8B, v0.8B
+ subs w3, w3, #2
+ trn1 v30.2D, v18.2D, v19.2D
+ trn2 v31.2D, v18.2D, v19.2D
+ add v18.8H, v30.8H, v31.8H
+ prfm pldl1strm, [x1]
+ .ifc \codec,h264
+ rshrn v16.8B, v18.8H, #6
+ .else
+ add v18.8H, v18.8H, v22.8H
+ shrn v16.8B, v18.8H, #6
+ .endif
+ .ifc \type,avg
+ ld1 {v20.S}[0], [x8], x2
+ ld1 {v20.S}[1], [x8], x2
+ urhadd v16.8B, v16.8B, v20.8B
+ .endif
+ prfm pldl1strm, [x1]
+ st1 {v16.S}[0], [x0], x2
+ st1 {v16.S}[1], [x0], x2
+ b.gt 4b
+ ret
+
+5: ld1 {v4.S}[0], [x1], x2
+ ld1 {v4.S}[1], [x1], x2
+ umull v18.8H, v4.8B, v30.8B
+ subs w3, w3, #2
+ prfm pldl1strm, [x1]
+ .ifc \codec,h264
+ rshrn v16.8B, v18.8H, #6
+ .else
+ add v18.8H, v18.8H, v22.8H
+ shrn v16.8B, v18.8H, #6
+ .endif
+ .ifc \type,avg
+ ld1 {v20.S}[0], [x8], x2
+ ld1 {v20.S}[1], [x8], x2
+ urhadd v16.8B, v16.8B, v20.8B
+ .endif
+ prfm pldl1strm, [x1]
+ st1 {v16.S}[0], [x0], x2
+ st1 {v16.S}[1], [x0], x2
+ b.gt 5b
+ ret
+endfunc
+.endm
+
+.macro h264_chroma_mc2 type
+function ff_\type\()_h264_chroma_mc2_neon, export=1
+ prfm pldl1strm, [x1]
+ prfm pldl1strm, [x1, x2]
+ orr w7, w4, w5
+ cbz w7, 2f
+
+ mul w7, w4, w5
+ lsl w14, w5, #3
+ lsl w13, w4, #3
+ sub w6, w14, w7
+ sub w12, w13, w7
+ sub w4, w7, w13
+ sub w4, w4, w14
+ add w4, w4, #64
+ dup v0.8B, w4
+ dup v2.8B, w12
+ dup v1.8B, w6
+ dup v3.8B, w7
+ trn1 v0.4H, v0.4H, v2.4H
+ trn1 v1.4H, v1.4H, v3.4H
+1:
+ ld1 {v4.S}[0], [x1], x2
+ ld1 {v4.S}[1], [x1], x2
+ rev64 v5.2S, v4.2S
+ ld1 {v5.S}[1], [x1]
+ ext v6.8B, v4.8B, v5.8B, #1
+ ext v7.8B, v5.8B, v4.8B, #1
+ trn1 v4.4H, v4.4H, v6.4H
+ trn1 v5.4H, v5.4H, v7.4H
+ umull v16.8H, v4.8B, v0.8B
+ umlal v16.8H, v5.8B, v1.8B
+ .ifc \type,avg
+ ld1 {v18.H}[0], [x0], x2
+ ld1 {v18.H}[2], [x0]
+ sub x0, x0, x2
+ .endif
+ rev64 v17.4S, v16.4S
+ add v16.8H, v16.8H, v17.8H
+ rshrn v16.8B, v16.8H, #6
+ .ifc \type,avg
+ urhadd v16.8B, v16.8B, v18.8B
+ .endif
+ st1 {v16.H}[0], [x0], x2
+ st1 {v16.H}[2], [x0], x2
+ subs w3, w3, #2
+ b.gt 1b
+ ret
+
+2:
+ ld1 {v16.H}[0], [x1], x2
+ ld1 {v16.H}[1], [x1], x2
+ .ifc \type,avg
+ ld1 {v18.H}[0], [x0], x2
+ ld1 {v18.H}[1], [x0]
+ sub x0, x0, x2
+ urhadd v16.8B, v16.8B, v18.8B
+ .endif
+ st1 {v16.H}[0], [x0], x2
+ st1 {v16.H}[1], [x0], x2
+ subs w3, w3, #2
+ b.gt 2b
+ ret
+endfunc
+.endm
+
+ h264_chroma_mc8 put
+ h264_chroma_mc8 avg
+ h264_chroma_mc4 put
+ h264_chroma_mc4 avg
+ h264_chroma_mc2 put
+ h264_chroma_mc2 avg
+
+#if CONFIG_RV40_DECODER
+const rv40bias
+ .short 0, 16, 32, 16
+ .short 32, 28, 32, 28
+ .short 0, 32, 16, 32
+ .short 32, 28, 32, 28
+endconst
+
+ h264_chroma_mc8 put, rv40
+ h264_chroma_mc8 avg, rv40
+ h264_chroma_mc4 put, rv40
+ h264_chroma_mc4 avg, rv40
+#endif
+
+#if CONFIG_VC1DSP
+ h264_chroma_mc8 put, vc1
+ h264_chroma_mc8 avg, vc1
+ h264_chroma_mc4 put, vc1
+ h264_chroma_mc4 avg, vc1
+#endif
diff --git a/media/ffvpx/libavcodec/aarch64/h264dsp_init_aarch64.c b/media/ffvpx/libavcodec/aarch64/h264dsp_init_aarch64.c
new file mode 100644
index 0000000000..6bf3ecb8a1
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/h264dsp_init_aarch64.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavcodec/h264dsp.h"
+
+void ff_h264_v_loop_filter_luma_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_h_loop_filter_luma_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_v_loop_filter_luma_intra_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta);
+void ff_h264_h_loop_filter_luma_intra_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta);
+void ff_h264_v_loop_filter_chroma_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_h_loop_filter_chroma_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_h_loop_filter_chroma422_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_v_loop_filter_chroma_intra_neon(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+void ff_h264_h_loop_filter_chroma_intra_neon(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+void ff_h264_h_loop_filter_chroma422_intra_neon(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+void ff_h264_h_loop_filter_chroma_mbaff_intra_neon(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+
+void ff_weight_h264_pixels_16_neon(uint8_t *dst, ptrdiff_t stride, int height,
+ int log2_den, int weight, int offset);
+void ff_weight_h264_pixels_8_neon(uint8_t *dst, ptrdiff_t stride, int height,
+ int log2_den, int weight, int offset);
+void ff_weight_h264_pixels_4_neon(uint8_t *dst, ptrdiff_t stride, int height,
+ int log2_den, int weight, int offset);
+
+void ff_biweight_h264_pixels_16_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
+ int height, int log2_den, int weightd,
+ int weights, int offset);
+void ff_biweight_h264_pixels_8_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
+ int height, int log2_den, int weightd,
+ int weights, int offset);
+void ff_biweight_h264_pixels_4_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
+ int height, int log2_den, int weightd,
+ int weights, int offset);
+
+void ff_h264_idct_add_neon(uint8_t *dst, int16_t *block, int stride);
+void ff_h264_idct_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
+void ff_h264_idct_add16_neon(uint8_t *dst, const int *block_offset,
+ int16_t *block, int stride,
+ const uint8_t nnzc[5 * 8]);
+void ff_h264_idct_add16intra_neon(uint8_t *dst, const int *block_offset,
+ int16_t *block, int stride,
+ const uint8_t nnzc[5 * 8]);
+void ff_h264_idct_add8_neon(uint8_t **dest, const int *block_offset,
+ int16_t *block, int stride,
+ const uint8_t nnzc[15 * 8]);
+
+void ff_h264_idct8_add_neon(uint8_t *dst, int16_t *block, int stride);
+void ff_h264_idct8_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
+void ff_h264_idct8_add4_neon(uint8_t *dst, const int *block_offset,
+ int16_t *block, int stride,
+ const uint8_t nnzc[5 * 8]);
+
+void ff_h264_v_loop_filter_luma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_h_loop_filter_luma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_v_loop_filter_luma_intra_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta);
+void ff_h264_h_loop_filter_luma_intra_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta);
+void ff_h264_v_loop_filter_chroma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_h_loop_filter_chroma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_h_loop_filter_chroma422_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
+ int beta, int8_t *tc0);
+void ff_h264_v_loop_filter_chroma_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+void ff_h264_h_loop_filter_chroma_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+void ff_h264_h_loop_filter_chroma422_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+void ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+
+av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags) && bit_depth == 8) {
+ c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon;
+ c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon;
+ c->h264_v_loop_filter_luma_intra= ff_h264_v_loop_filter_luma_intra_neon;
+ c->h264_h_loop_filter_luma_intra= ff_h264_h_loop_filter_luma_intra_neon;
+
+ c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon;
+ c->h264_v_loop_filter_chroma_intra = ff_h264_v_loop_filter_chroma_intra_neon;
+
+ if (chroma_format_idc <= 1) {
+ c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
+ c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma_intra_neon;
+ c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_mbaff_intra_neon;
+ } else {
+ c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma422_neon;
+ c->h264_h_loop_filter_chroma_mbaff = ff_h264_h_loop_filter_chroma_neon;
+ c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma422_intra_neon;
+ c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_intra_neon;
+ }
+
+ c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16_neon;
+ c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels_8_neon;
+ c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels_4_neon;
+
+ c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels_16_neon;
+ c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels_8_neon;
+ c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels_4_neon;
+
+ c->h264_idct_add = ff_h264_idct_add_neon;
+ c->h264_idct_dc_add = ff_h264_idct_dc_add_neon;
+ c->h264_idct_add16 = ff_h264_idct_add16_neon;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_neon;
+ if (chroma_format_idc <= 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_neon;
+ c->h264_idct8_add = ff_h264_idct8_add_neon;
+ c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_neon;
+ } else if (have_neon(cpu_flags) && bit_depth == 10) {
+ c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon_10;
+ c->h264_v_loop_filter_chroma_intra = ff_h264_v_loop_filter_chroma_intra_neon_10;
+
+ if (chroma_format_idc <= 1) {
+ c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon_10;
+ c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma_intra_neon_10;
+ c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10;
+ } else {
+ c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma422_neon_10;
+ c->h264_h_loop_filter_chroma_mbaff = ff_h264_h_loop_filter_chroma_neon_10;
+ c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma422_intra_neon_10;
+ c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_intra_neon_10;
+ }
+ }
+}
diff --git a/media/ffvpx/libavcodec/aarch64/h264dsp_neon.S b/media/ffvpx/libavcodec/aarch64/h264dsp_neon.S
new file mode 100644
index 0000000000..ea221e6862
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/h264dsp_neon.S
@@ -0,0 +1,1076 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
+ * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+#include "neon.S"
+
+.macro h264_loop_filter_start
+ cmp w2, #0
+ ldr w6, [x4]
+ ccmp w3, #0, #0, ne
+ mov v24.S[0], w6
+ and w8, w6, w6, lsl #16
+ b.eq 1f
+ ands w8, w8, w8, lsl #8
+ b.ge 2f
+1:
+ ret
+2:
+.endm
+
+.macro h264_loop_filter_luma
+ dup v22.16B, w2 // alpha
+ uxtl v24.8H, v24.8B
+ uabd v21.16B, v16.16B, v0.16B // abs(p0 - q0)
+ uxtl v24.4S, v24.4H
+ uabd v28.16B, v18.16B, v16.16B // abs(p1 - p0)
+ sli v24.8H, v24.8H, #8
+ uabd v30.16B, v2.16B, v0.16B // abs(q1 - q0)
+ sli v24.4S, v24.4S, #16
+ cmhi v21.16B, v22.16B, v21.16B // < alpha
+ dup v22.16B, w3 // beta
+ cmlt v23.16B, v24.16B, #0
+ cmhi v28.16B, v22.16B, v28.16B // < beta
+ cmhi v30.16B, v22.16B, v30.16B // < beta
+ bic v21.16B, v21.16B, v23.16B
+ uabd v17.16B, v20.16B, v16.16B // abs(p2 - p0)
+ and v21.16B, v21.16B, v28.16B
+ uabd v19.16B, v4.16B, v0.16B // abs(q2 - q0)
+ and v21.16B, v21.16B, v30.16B // < beta
+ shrn v30.8b, v21.8h, #4
+ mov x7, v30.d[0]
+ cmhi v17.16B, v22.16B, v17.16B // < beta
+ cmhi v19.16B, v22.16B, v19.16B // < beta
+ cbz x7, 9f
+ and v17.16B, v17.16B, v21.16B
+ and v19.16B, v19.16B, v21.16B
+ and v24.16B, v24.16B, v21.16B
+ urhadd v28.16B, v16.16B, v0.16B
+ sub v21.16B, v24.16B, v17.16B
+ uqadd v23.16B, v18.16B, v24.16B
+ uhadd v20.16B, v20.16B, v28.16B
+ sub v21.16B, v21.16B, v19.16B
+ uhadd v28.16B, v4.16B, v28.16B
+ umin v23.16B, v23.16B, v20.16B
+ uqsub v22.16B, v18.16B, v24.16B
+ uqadd v4.16B, v2.16B, v24.16B
+ umax v23.16B, v23.16B, v22.16B
+ uqsub v22.16B, v2.16B, v24.16B
+ umin v28.16B, v4.16B, v28.16B
+ uxtl v4.8H, v0.8B
+ umax v28.16B, v28.16B, v22.16B
+ uxtl2 v20.8H, v0.16B
+ usubw v4.8H, v4.8H, v16.8B
+ usubw2 v20.8H, v20.8H, v16.16B
+ shl v4.8H, v4.8H, #2
+ shl v20.8H, v20.8H, #2
+ uaddw v4.8H, v4.8H, v18.8B
+ uaddw2 v20.8H, v20.8H, v18.16B
+ usubw v4.8H, v4.8H, v2.8B
+ usubw2 v20.8H, v20.8H, v2.16B
+ rshrn v4.8B, v4.8H, #3
+ rshrn2 v4.16B, v20.8H, #3
+ bsl v17.16B, v23.16B, v18.16B
+ bsl v19.16B, v28.16B, v2.16B
+ neg v23.16B, v21.16B
+ uxtl v28.8H, v16.8B
+ smin v4.16B, v4.16B, v21.16B
+ uxtl2 v21.8H, v16.16B
+ smax v4.16B, v4.16B, v23.16B
+ uxtl v22.8H, v0.8B
+ uxtl2 v24.8H, v0.16B
+ saddw v28.8H, v28.8H, v4.8B
+ saddw2 v21.8H, v21.8H, v4.16B
+ ssubw v22.8H, v22.8H, v4.8B
+ ssubw2 v24.8H, v24.8H, v4.16B
+ sqxtun v16.8B, v28.8H
+ sqxtun2 v16.16B, v21.8H
+ sqxtun v0.8B, v22.8H
+ sqxtun2 v0.16B, v24.8H
+.endm
+
+function ff_h264_v_loop_filter_luma_neon, export=1
+ h264_loop_filter_start
+
+ ld1 {v0.16B}, [x0], x1
+ ld1 {v2.16B}, [x0], x1
+ ld1 {v4.16B}, [x0], x1
+ sub x0, x0, x1, lsl #2
+ sub x0, x0, x1, lsl #1
+ ld1 {v20.16B}, [x0], x1
+ ld1 {v18.16B}, [x0], x1
+ ld1 {v16.16B}, [x0], x1
+
+ h264_loop_filter_luma
+
+ sub x0, x0, x1, lsl #1
+ st1 {v17.16B}, [x0], x1
+ st1 {v16.16B}, [x0], x1
+ st1 {v0.16B}, [x0], x1
+ st1 {v19.16B}, [x0]
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_luma_neon, export=1
+ h264_loop_filter_start
+
+ sub x0, x0, #4
+ ld1 {v6.8B}, [x0], x1
+ ld1 {v20.8B}, [x0], x1
+ ld1 {v18.8B}, [x0], x1
+ ld1 {v16.8B}, [x0], x1
+ ld1 {v0.8B}, [x0], x1
+ ld1 {v2.8B}, [x0], x1
+ ld1 {v4.8B}, [x0], x1
+ ld1 {v26.8B}, [x0], x1
+ ld1 {v6.D}[1], [x0], x1
+ ld1 {v20.D}[1], [x0], x1
+ ld1 {v18.D}[1], [x0], x1
+ ld1 {v16.D}[1], [x0], x1
+ ld1 {v0.D}[1], [x0], x1
+ ld1 {v2.D}[1], [x0], x1
+ ld1 {v4.D}[1], [x0], x1
+ ld1 {v26.D}[1], [x0], x1
+
+ transpose_8x16B v6, v20, v18, v16, v0, v2, v4, v26, v21, v23
+
+ h264_loop_filter_luma
+
+ transpose_4x16B v17, v16, v0, v19, v21, v23, v25, v27
+
+ sub x0, x0, x1, lsl #4
+ add x0, x0, #2
+ st1 {v17.S}[0], [x0], x1
+ st1 {v16.S}[0], [x0], x1
+ st1 {v0.S}[0], [x0], x1
+ st1 {v19.S}[0], [x0], x1
+ st1 {v17.S}[1], [x0], x1
+ st1 {v16.S}[1], [x0], x1
+ st1 {v0.S}[1], [x0], x1
+ st1 {v19.S}[1], [x0], x1
+ st1 {v17.S}[2], [x0], x1
+ st1 {v16.S}[2], [x0], x1
+ st1 {v0.S}[2], [x0], x1
+ st1 {v19.S}[2], [x0], x1
+ st1 {v17.S}[3], [x0], x1
+ st1 {v16.S}[3], [x0], x1
+ st1 {v0.S}[3], [x0], x1
+ st1 {v19.S}[3], [x0], x1
+9:
+ ret
+endfunc
+
+
+.macro h264_loop_filter_start_intra
+ orr w4, w2, w3
+ cbnz w4, 1f
+ ret
+1:
+ dup v30.16b, w2 // alpha
+ dup v31.16b, w3 // beta
+.endm
+
+.macro h264_loop_filter_luma_intra
+ uabd v16.16b, v7.16b, v0.16b // abs(p0 - q0)
+ uabd v17.16b, v6.16b, v7.16b // abs(p1 - p0)
+ uabd v18.16b, v1.16b, v0.16b // abs(q1 - q0)
+ cmhi v19.16b, v30.16b, v16.16b // < alpha
+ cmhi v17.16b, v31.16b, v17.16b // < beta
+ cmhi v18.16b, v31.16b, v18.16b // < beta
+
+ movi v29.16b, #2
+ ushr v30.16b, v30.16b, #2 // alpha >> 2
+ add v30.16b, v30.16b, v29.16b // (alpha >> 2) + 2
+ cmhi v16.16b, v30.16b, v16.16b // < (alpha >> 2) + 2
+
+ and v19.16b, v19.16b, v17.16b
+ and v19.16b, v19.16b, v18.16b
+ shrn v20.8b, v19.8h, #4
+ mov x4, v20.d[0]
+ cbz x4, 9f
+
+ ushll v20.8h, v6.8b, #1
+ ushll v22.8h, v1.8b, #1
+ ushll2 v21.8h, v6.16b, #1
+ ushll2 v23.8h, v1.16b, #1
+ uaddw v20.8h, v20.8h, v7.8b
+ uaddw v22.8h, v22.8h, v0.8b
+ uaddw2 v21.8h, v21.8h, v7.16b
+ uaddw2 v23.8h, v23.8h, v0.16b
+ uaddw v20.8h, v20.8h, v1.8b
+ uaddw v22.8h, v22.8h, v6.8b
+ uaddw2 v21.8h, v21.8h, v1.16b
+ uaddw2 v23.8h, v23.8h, v6.16b
+
+ rshrn v24.8b, v20.8h, #2 // p0'_1
+ rshrn v25.8b, v22.8h, #2 // q0'_1
+ rshrn2 v24.16b, v21.8h, #2 // p0'_1
+ rshrn2 v25.16b, v23.8h, #2 // q0'_1
+
+ uabd v17.16b, v5.16b, v7.16b // abs(p2 - p0)
+ uabd v18.16b, v2.16b, v0.16b // abs(q2 - q0)
+ cmhi v17.16b, v31.16b, v17.16b // < beta
+ cmhi v18.16b, v31.16b, v18.16b // < beta
+
+ and v17.16b, v16.16b, v17.16b // if_2 && if_3
+ and v18.16b, v16.16b, v18.16b // if_2 && if_4
+
+ not v30.16b, v17.16b
+ not v31.16b, v18.16b
+
+ and v30.16b, v30.16b, v19.16b // if_1 && !(if_2 && if_3)
+ and v31.16b, v31.16b, v19.16b // if_1 && !(if_2 && if_4)
+
+ and v17.16b, v19.16b, v17.16b // if_1 && if_2 && if_3
+ and v18.16b, v19.16b, v18.16b // if_1 && if_2 && if_4
+
+ //calc p, v7, v6, v5, v4, v17, v7, v6, v5, v4
+ uaddl v26.8h, v5.8b, v7.8b
+ uaddl2 v27.8h, v5.16b, v7.16b
+ uaddw v26.8h, v26.8h, v0.8b
+ uaddw2 v27.8h, v27.8h, v0.16b
+ add v20.8h, v20.8h, v26.8h
+ add v21.8h, v21.8h, v27.8h
+ uaddw v20.8h, v20.8h, v0.8b
+ uaddw2 v21.8h, v21.8h, v0.16b
+ rshrn v20.8b, v20.8h, #3 // p0'_2
+ rshrn2 v20.16b, v21.8h, #3 // p0'_2
+ uaddw v26.8h, v26.8h, v6.8b
+ uaddw2 v27.8h, v27.8h, v6.16b
+ rshrn v21.8b, v26.8h, #2 // p1'_2
+ rshrn2 v21.16b, v27.8h, #2 // p1'_2
+ uaddl v28.8h, v4.8b, v5.8b
+ uaddl2 v29.8h, v4.16b, v5.16b
+ shl v28.8h, v28.8h, #1
+ shl v29.8h, v29.8h, #1
+ add v28.8h, v28.8h, v26.8h
+ add v29.8h, v29.8h, v27.8h
+ rshrn v19.8b, v28.8h, #3 // p2'_2
+ rshrn2 v19.16b, v29.8h, #3 // p2'_2
+
+ //calc q, v0, v1, v2, v3, v18, v0, v1, v2, v3
+ uaddl v26.8h, v2.8b, v0.8b
+ uaddl2 v27.8h, v2.16b, v0.16b
+ uaddw v26.8h, v26.8h, v7.8b
+ uaddw2 v27.8h, v27.8h, v7.16b
+ add v22.8h, v22.8h, v26.8h
+ add v23.8h, v23.8h, v27.8h
+ uaddw v22.8h, v22.8h, v7.8b
+ uaddw2 v23.8h, v23.8h, v7.16b
+ rshrn v22.8b, v22.8h, #3 // q0'_2
+ rshrn2 v22.16b, v23.8h, #3 // q0'_2
+ uaddw v26.8h, v26.8h, v1.8b
+ uaddw2 v27.8h, v27.8h, v1.16b
+ rshrn v23.8b, v26.8h, #2 // q1'_2
+ rshrn2 v23.16b, v27.8h, #2 // q1'_2
+ uaddl v28.8h, v2.8b, v3.8b
+ uaddl2 v29.8h, v2.16b, v3.16b
+ shl v28.8h, v28.8h, #1
+ shl v29.8h, v29.8h, #1
+ add v28.8h, v28.8h, v26.8h
+ add v29.8h, v29.8h, v27.8h
+ rshrn v26.8b, v28.8h, #3 // q2'_2
+ rshrn2 v26.16b, v29.8h, #3 // q2'_2
+
+ bit v7.16b, v24.16b, v30.16b // p0'_1
+ bit v0.16b, v25.16b, v31.16b // q0'_1
+ bit v7.16b, v20.16b, v17.16b // p0'_2
+ bit v6.16b, v21.16b, v17.16b // p1'_2
+ bit v5.16b, v19.16b, v17.16b // p2'_2
+ bit v0.16b, v22.16b, v18.16b // q0'_2
+ bit v1.16b, v23.16b, v18.16b // q1'_2
+ bit v2.16b, v26.16b, v18.16b // q2'_2
+.endm
+
+function ff_h264_v_loop_filter_luma_intra_neon, export=1
+ h264_loop_filter_start_intra
+
+ ld1 {v0.16b}, [x0], x1 // q0
+ ld1 {v1.16b}, [x0], x1 // q1
+ ld1 {v2.16b}, [x0], x1 // q2
+ ld1 {v3.16b}, [x0], x1 // q3
+ sub x0, x0, x1, lsl #3
+ ld1 {v4.16b}, [x0], x1 // p3
+ ld1 {v5.16b}, [x0], x1 // p2
+ ld1 {v6.16b}, [x0], x1 // p1
+ ld1 {v7.16b}, [x0] // p0
+
+ h264_loop_filter_luma_intra
+
+ sub x0, x0, x1, lsl #1
+ st1 {v5.16b}, [x0], x1 // p2
+ st1 {v6.16b}, [x0], x1 // p1
+ st1 {v7.16b}, [x0], x1 // p0
+ st1 {v0.16b}, [x0], x1 // q0
+ st1 {v1.16b}, [x0], x1 // q1
+ st1 {v2.16b}, [x0] // q2
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_luma_intra_neon, export=1
+ h264_loop_filter_start_intra
+
+ sub x0, x0, #4
+ ld1 {v4.8b}, [x0], x1
+ ld1 {v5.8b}, [x0], x1
+ ld1 {v6.8b}, [x0], x1
+ ld1 {v7.8b}, [x0], x1
+ ld1 {v0.8b}, [x0], x1
+ ld1 {v1.8b}, [x0], x1
+ ld1 {v2.8b}, [x0], x1
+ ld1 {v3.8b}, [x0], x1
+ ld1 {v4.d}[1], [x0], x1
+ ld1 {v5.d}[1], [x0], x1
+ ld1 {v6.d}[1], [x0], x1
+ ld1 {v7.d}[1], [x0], x1
+ ld1 {v0.d}[1], [x0], x1
+ ld1 {v1.d}[1], [x0], x1
+ ld1 {v2.d}[1], [x0], x1
+ ld1 {v3.d}[1], [x0], x1
+
+ transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
+
+ h264_loop_filter_luma_intra
+
+ transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
+
+ sub x0, x0, x1, lsl #4
+ st1 {v4.8b}, [x0], x1
+ st1 {v5.8b}, [x0], x1
+ st1 {v6.8b}, [x0], x1
+ st1 {v7.8b}, [x0], x1
+ st1 {v0.8b}, [x0], x1
+ st1 {v1.8b}, [x0], x1
+ st1 {v2.8b}, [x0], x1
+ st1 {v3.8b}, [x0], x1
+ st1 {v4.d}[1], [x0], x1
+ st1 {v5.d}[1], [x0], x1
+ st1 {v6.d}[1], [x0], x1
+ st1 {v7.d}[1], [x0], x1
+ st1 {v0.d}[1], [x0], x1
+ st1 {v1.d}[1], [x0], x1
+ st1 {v2.d}[1], [x0], x1
+ st1 {v3.d}[1], [x0], x1
+9:
+ ret
+endfunc
+
+.macro h264_loop_filter_chroma
+ dup v22.8B, w2 // alpha
+ dup v23.8B, w3 // beta
+ uxtl v24.8H, v24.8B
+ uabd v26.8B, v16.8B, v0.8B // abs(p0 - q0)
+ uabd v28.8B, v18.8B, v16.8B // abs(p1 - p0)
+ uabd v30.8B, v2.8B, v0.8B // abs(q1 - q0)
+ cmhi v26.8B, v22.8B, v26.8B // < alpha
+ cmhi v28.8B, v23.8B, v28.8B // < beta
+ cmhi v30.8B, v23.8B, v30.8B // < beta
+ uxtl v4.8H, v0.8B
+ and v26.8B, v26.8B, v28.8B
+ usubw v4.8H, v4.8H, v16.8B
+ and v26.8B, v26.8B, v30.8B
+ shl v4.8H, v4.8H, #2
+ mov x8, v26.d[0]
+ sli v24.8H, v24.8H, #8
+ uaddw v4.8H, v4.8H, v18.8B
+ cbz x8, 9f
+ usubw v4.8H, v4.8H, v2.8B
+ rshrn v4.8B, v4.8H, #3
+ smin v4.8B, v4.8B, v24.8B
+ neg v25.8B, v24.8B
+ smax v4.8B, v4.8B, v25.8B
+ uxtl v22.8H, v0.8B
+ and v4.8B, v4.8B, v26.8B
+ uxtl v28.8H, v16.8B
+ saddw v28.8H, v28.8H, v4.8B
+ ssubw v22.8H, v22.8H, v4.8B
+ sqxtun v16.8B, v28.8H
+ sqxtun v0.8B, v22.8H
+.endm
+
+function ff_h264_v_loop_filter_chroma_neon, export=1
+ h264_loop_filter_start
+
+ sub x0, x0, x1, lsl #1
+ ld1 {v18.8B}, [x0], x1
+ ld1 {v16.8B}, [x0], x1
+ ld1 {v0.8B}, [x0], x1
+ ld1 {v2.8B}, [x0]
+
+ h264_loop_filter_chroma
+
+ sub x0, x0, x1, lsl #1
+ st1 {v16.8B}, [x0], x1
+ st1 {v0.8B}, [x0], x1
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma_neon, export=1
+ h264_loop_filter_start
+
+ sub x0, x0, #2
+h_loop_filter_chroma420:
+ ld1 {v18.S}[0], [x0], x1
+ ld1 {v16.S}[0], [x0], x1
+ ld1 {v0.S}[0], [x0], x1
+ ld1 {v2.S}[0], [x0], x1
+ ld1 {v18.S}[1], [x0], x1
+ ld1 {v16.S}[1], [x0], x1
+ ld1 {v0.S}[1], [x0], x1
+ ld1 {v2.S}[1], [x0], x1
+
+ transpose_4x8B v18, v16, v0, v2, v28, v29, v30, v31
+
+ h264_loop_filter_chroma
+
+ transpose_4x8B v18, v16, v0, v2, v28, v29, v30, v31
+
+ sub x0, x0, x1, lsl #3
+ st1 {v18.S}[0], [x0], x1
+ st1 {v16.S}[0], [x0], x1
+ st1 {v0.S}[0], [x0], x1
+ st1 {v2.S}[0], [x0], x1
+ st1 {v18.S}[1], [x0], x1
+ st1 {v16.S}[1], [x0], x1
+ st1 {v0.S}[1], [x0], x1
+ st1 {v2.S}[1], [x0], x1
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma422_neon, export=1
+ h264_loop_filter_start
+ add x5, x0, x1
+ sub x0, x0, #2
+ add x1, x1, x1
+ mov x7, x30
+ bl h_loop_filter_chroma420
+ mov x30, x7
+ sub x0, x5, #2
+ mov v24.s[0], w6
+ b h_loop_filter_chroma420
+endfunc
+
+.macro h264_loop_filter_chroma_intra
+ uabd v26.8b, v16.8b, v17.8b // abs(p0 - q0)
+ uabd v27.8b, v18.8b, v16.8b // abs(p1 - p0)
+ uabd v28.8b, v19.8b, v17.8b // abs(q1 - q0)
+ cmhi v26.8b, v30.8b, v26.8b // < alpha
+ cmhi v27.8b, v31.8b, v27.8b // < beta
+ cmhi v28.8b, v31.8b, v28.8b // < beta
+ and v26.8b, v26.8b, v27.8b
+ and v26.8b, v26.8b, v28.8b
+ mov x2, v26.d[0]
+
+ ushll v4.8h, v18.8b, #1
+ ushll v6.8h, v19.8b, #1
+ cbz x2, 9f
+ uaddl v20.8h, v16.8b, v19.8b
+ uaddl v22.8h, v17.8b, v18.8b
+ add v20.8h, v20.8h, v4.8h
+ add v22.8h, v22.8h, v6.8h
+ uqrshrn v24.8b, v20.8h, #2
+ uqrshrn v25.8b, v22.8h, #2
+ bit v16.8b, v24.8b, v26.8b
+ bit v17.8b, v25.8b, v26.8b
+.endm
+
+function ff_h264_v_loop_filter_chroma_intra_neon, export=1
+ h264_loop_filter_start_intra
+
+ sub x0, x0, x1, lsl #1
+ ld1 {v18.8b}, [x0], x1
+ ld1 {v16.8b}, [x0], x1
+ ld1 {v17.8b}, [x0], x1
+ ld1 {v19.8b}, [x0]
+
+ h264_loop_filter_chroma_intra
+
+ sub x0, x0, x1, lsl #1
+ st1 {v16.8b}, [x0], x1
+ st1 {v17.8b}, [x0], x1
+
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma_mbaff_intra_neon, export=1
+ h264_loop_filter_start_intra
+
+ sub x4, x0, #2
+ sub x0, x0, #1
+ ld1 {v18.8b}, [x4], x1
+ ld1 {v16.8b}, [x4], x1
+ ld1 {v17.8b}, [x4], x1
+ ld1 {v19.8b}, [x4], x1
+
+ transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
+
+ h264_loop_filter_chroma_intra
+
+ st2 {v16.b,v17.b}[0], [x0], x1
+ st2 {v16.b,v17.b}[1], [x0], x1
+ st2 {v16.b,v17.b}[2], [x0], x1
+ st2 {v16.b,v17.b}[3], [x0], x1
+
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma_intra_neon, export=1
+ h264_loop_filter_start_intra
+
+ sub x4, x0, #2
+ sub x0, x0, #1
+h_loop_filter_chroma420_intra:
+ ld1 {v18.8b}, [x4], x1
+ ld1 {v16.8b}, [x4], x1
+ ld1 {v17.8b}, [x4], x1
+ ld1 {v19.8b}, [x4], x1
+ ld1 {v18.s}[1], [x4], x1
+ ld1 {v16.s}[1], [x4], x1
+ ld1 {v17.s}[1], [x4], x1
+ ld1 {v19.s}[1], [x4], x1
+
+ transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
+
+ h264_loop_filter_chroma_intra
+
+ st2 {v16.b,v17.b}[0], [x0], x1
+ st2 {v16.b,v17.b}[1], [x0], x1
+ st2 {v16.b,v17.b}[2], [x0], x1
+ st2 {v16.b,v17.b}[3], [x0], x1
+ st2 {v16.b,v17.b}[4], [x0], x1
+ st2 {v16.b,v17.b}[5], [x0], x1
+ st2 {v16.b,v17.b}[6], [x0], x1
+ st2 {v16.b,v17.b}[7], [x0], x1
+
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma422_intra_neon, export=1
+ h264_loop_filter_start_intra
+ sub x4, x0, #2
+ add x5, x0, x1, lsl #3
+ sub x0, x0, #1
+ mov x7, x30
+ bl h_loop_filter_chroma420_intra
+ sub x0, x5, #1
+ mov x30, x7
+ b h_loop_filter_chroma420_intra
+endfunc
+
+.macro biweight_16 macs, macd
+ dup v0.16B, w5
+ dup v1.16B, w6
+ mov v4.16B, v16.16B
+ mov v6.16B, v16.16B
+1: subs w3, w3, #2
+ ld1 {v20.16B}, [x0], x2
+ \macd v4.8H, v0.8B, v20.8B
+ \macd\()2 v6.8H, v0.16B, v20.16B
+ ld1 {v22.16B}, [x1], x2
+ \macs v4.8H, v1.8B, v22.8B
+ \macs\()2 v6.8H, v1.16B, v22.16B
+ mov v24.16B, v16.16B
+ ld1 {v28.16B}, [x0], x2
+ mov v26.16B, v16.16B
+ \macd v24.8H, v0.8B, v28.8B
+ \macd\()2 v26.8H, v0.16B, v28.16B
+ ld1 {v30.16B}, [x1], x2
+ \macs v24.8H, v1.8B, v30.8B
+ \macs\()2 v26.8H, v1.16B, v30.16B
+ sshl v4.8H, v4.8H, v18.8H
+ sshl v6.8H, v6.8H, v18.8H
+ sqxtun v4.8B, v4.8H
+ sqxtun2 v4.16B, v6.8H
+ sshl v24.8H, v24.8H, v18.8H
+ sshl v26.8H, v26.8H, v18.8H
+ sqxtun v24.8B, v24.8H
+ sqxtun2 v24.16B, v26.8H
+ mov v6.16B, v16.16B
+ st1 {v4.16B}, [x7], x2
+ mov v4.16B, v16.16B
+ st1 {v24.16B}, [x7], x2
+ b.ne 1b
+ ret
+.endm
+
+.macro biweight_8 macs, macd
+ dup v0.8B, w5
+ dup v1.8B, w6
+ mov v2.16B, v16.16B
+ mov v20.16B, v16.16B
+1: subs w3, w3, #2
+ ld1 {v4.8B}, [x0], x2
+ \macd v2.8H, v0.8B, v4.8B
+ ld1 {v5.8B}, [x1], x2
+ \macs v2.8H, v1.8B, v5.8B
+ ld1 {v6.8B}, [x0], x2
+ \macd v20.8H, v0.8B, v6.8B
+ ld1 {v7.8B}, [x1], x2
+ \macs v20.8H, v1.8B, v7.8B
+ sshl v2.8H, v2.8H, v18.8H
+ sqxtun v2.8B, v2.8H
+ sshl v20.8H, v20.8H, v18.8H
+ sqxtun v4.8B, v20.8H
+ mov v20.16B, v16.16B
+ st1 {v2.8B}, [x7], x2
+ mov v2.16B, v16.16B
+ st1 {v4.8B}, [x7], x2
+ b.ne 1b
+ ret
+.endm
+
+.macro biweight_4 macs, macd
+ dup v0.8B, w5
+ dup v1.8B, w6
+ mov v2.16B, v16.16B
+ mov v20.16B,v16.16B
+1: subs w3, w3, #4
+ ld1 {v4.S}[0], [x0], x2
+ ld1 {v4.S}[1], [x0], x2
+ \macd v2.8H, v0.8B, v4.8B
+ ld1 {v5.S}[0], [x1], x2
+ ld1 {v5.S}[1], [x1], x2
+ \macs v2.8H, v1.8B, v5.8B
+ b.lt 2f
+ ld1 {v6.S}[0], [x0], x2
+ ld1 {v6.S}[1], [x0], x2
+ \macd v20.8H, v0.8B, v6.8B
+ ld1 {v7.S}[0], [x1], x2
+ ld1 {v7.S}[1], [x1], x2
+ \macs v20.8H, v1.8B, v7.8B
+ sshl v2.8H, v2.8H, v18.8H
+ sqxtun v2.8B, v2.8H
+ sshl v20.8H, v20.8H, v18.8H
+ sqxtun v4.8B, v20.8H
+ mov v20.16B, v16.16B
+ st1 {v2.S}[0], [x7], x2
+ st1 {v2.S}[1], [x7], x2
+ mov v2.16B, v16.16B
+ st1 {v4.S}[0], [x7], x2
+ st1 {v4.S}[1], [x7], x2
+ b.ne 1b
+ ret
+2: sshl v2.8H, v2.8H, v18.8H
+ sqxtun v2.8B, v2.8H
+ st1 {v2.S}[0], [x7], x2
+ st1 {v2.S}[1], [x7], x2
+ ret
+.endm
+
+.macro biweight_func w
+function ff_biweight_h264_pixels_\w\()_neon, export=1
+ lsr w8, w5, #31
+ add w7, w7, #1
+ eor w8, w8, w6, lsr #30
+ orr w7, w7, #1
+ dup v18.8H, w4
+ lsl w7, w7, w4
+ not v18.16B, v18.16B
+ dup v16.8H, w7
+ mov x7, x0
+ cbz w8, 10f
+ subs w8, w8, #1
+ b.eq 20f
+ subs w8, w8, #1
+ b.eq 30f
+ b 40f
+10: biweight_\w umlal, umlal
+20: neg w5, w5
+ biweight_\w umlal, umlsl
+30: neg w5, w5
+ neg w6, w6
+ biweight_\w umlsl, umlsl
+40: neg w6, w6
+ biweight_\w umlsl, umlal
+endfunc
+.endm
+
+ biweight_func 16
+ biweight_func 8
+ biweight_func 4
+
+.macro weight_16 add
+ dup v0.16B, w4
+1: subs w2, w2, #2
+ ld1 {v20.16B}, [x0], x1
+ umull v4.8H, v0.8B, v20.8B
+ umull2 v6.8H, v0.16B, v20.16B
+ ld1 {v28.16B}, [x0], x1
+ umull v24.8H, v0.8B, v28.8B
+ umull2 v26.8H, v0.16B, v28.16B
+ \add v4.8H, v16.8H, v4.8H
+ srshl v4.8H, v4.8H, v18.8H
+ \add v6.8H, v16.8H, v6.8H
+ srshl v6.8H, v6.8H, v18.8H
+ sqxtun v4.8B, v4.8H
+ sqxtun2 v4.16B, v6.8H
+ \add v24.8H, v16.8H, v24.8H
+ srshl v24.8H, v24.8H, v18.8H
+ \add v26.8H, v16.8H, v26.8H
+ srshl v26.8H, v26.8H, v18.8H
+ sqxtun v24.8B, v24.8H
+ sqxtun2 v24.16B, v26.8H
+ st1 {v4.16B}, [x5], x1
+ st1 {v24.16B}, [x5], x1
+ b.ne 1b
+ ret
+.endm
+
+.macro weight_8 add
+ dup v0.8B, w4
+1: subs w2, w2, #2
+ ld1 {v4.8B}, [x0], x1
+ umull v2.8H, v0.8B, v4.8B
+ ld1 {v6.8B}, [x0], x1
+ umull v20.8H, v0.8B, v6.8B
+ \add v2.8H, v16.8H, v2.8H
+ srshl v2.8H, v2.8H, v18.8H
+ sqxtun v2.8B, v2.8H
+ \add v20.8H, v16.8H, v20.8H
+ srshl v20.8H, v20.8H, v18.8H
+ sqxtun v4.8B, v20.8H
+ st1 {v2.8B}, [x5], x1
+ st1 {v4.8B}, [x5], x1
+ b.ne 1b
+ ret
+.endm
+
+.macro weight_4 add
+ dup v0.8B, w4
+1: subs w2, w2, #4
+ ld1 {v4.S}[0], [x0], x1
+ ld1 {v4.S}[1], [x0], x1
+ umull v2.8H, v0.8B, v4.8B
+ b.lt 2f
+ ld1 {v6.S}[0], [x0], x1
+ ld1 {v6.S}[1], [x0], x1
+ umull v20.8H, v0.8B, v6.8B
+ \add v2.8H, v16.8H, v2.8H
+ srshl v2.8H, v2.8H, v18.8H
+ sqxtun v2.8B, v2.8H
+ \add v20.8H, v16.8H, v20.8H
+ srshl v20.8H, v20.8h, v18.8H
+ sqxtun v4.8B, v20.8H
+ st1 {v2.S}[0], [x5], x1
+ st1 {v2.S}[1], [x5], x1
+ st1 {v4.S}[0], [x5], x1
+ st1 {v4.S}[1], [x5], x1
+ b.ne 1b
+ ret
+2: \add v2.8H, v16.8H, v2.8H
+ srshl v2.8H, v2.8H, v18.8H
+ sqxtun v2.8B, v2.8H
+ st1 {v2.S}[0], [x5], x1
+ st1 {v2.S}[1], [x5], x1
+ ret
+.endm
+
+.macro weight_func w
+function ff_weight_h264_pixels_\w\()_neon, export=1
+ cmp w3, #1
+ mov w6, #1
+ lsl w5, w5, w3
+ dup v16.8H, w5
+ mov x5, x0
+ b.le 20f
+ sub w6, w6, w3
+ dup v18.8H, w6
+ cmp w4, #0
+ b.lt 10f
+ weight_\w shadd
+10: neg w4, w4
+ weight_\w shsub
+20: neg w6, w3
+ dup v18.8H, w6
+ cmp w4, #0
+ b.lt 10f
+ weight_\w add
+10: neg w4, w4
+ weight_\w sub
+endfunc
+.endm
+
+ weight_func 16
+ weight_func 8
+ weight_func 4
+
+.macro h264_loop_filter_start_10
+ cmp w2, #0
+ ldr w6, [x4]
+ ccmp w3, #0, #0, ne
+ lsl w2, w2, #2
+ mov v24.S[0], w6
+ lsl w3, w3, #2
+ and w8, w6, w6, lsl #16
+ b.eq 1f
+ ands w8, w8, w8, lsl #8
+ b.ge 2f
+1:
+ ret
+2:
+.endm
+
+.macro h264_loop_filter_start_intra_10
+ orr w4, w2, w3
+ cbnz w4, 1f
+ ret
+1:
+ lsl w2, w2, #2
+ lsl w3, w3, #2
+ dup v30.8h, w2 // alpha
+ dup v31.8h, w3 // beta
+.endm
+
+.macro h264_loop_filter_chroma_10
+ dup v22.8h, w2 // alpha
+ dup v23.8h, w3 // beta
+ uxtl v24.8h, v24.8b // tc0
+
+ uabd v26.8h, v16.8h, v0.8h // abs(p0 - q0)
+ uabd v28.8h, v18.8h, v16.8h // abs(p1 - p0)
+ uabd v30.8h, v2.8h, v0.8h // abs(q1 - q0)
+ cmhi v26.8h, v22.8h, v26.8h // < alpha
+ cmhi v28.8h, v23.8h, v28.8h // < beta
+ cmhi v30.8h, v23.8h, v30.8h // < beta
+
+ and v26.16b, v26.16b, v28.16b
+ mov v4.16b, v0.16b
+ sub v4.8h, v4.8h, v16.8h
+ and v26.16b, v26.16b, v30.16b
+ shl v4.8h, v4.8h, #2
+ mov x8, v26.d[0]
+ mov x9, v26.d[1]
+ sli v24.8h, v24.8h, #8
+ uxtl v24.8h, v24.8b
+ add v4.8h, v4.8h, v18.8h
+ adds x8, x8, x9
+ shl v24.8h, v24.8h, #2
+
+ b.eq 9f
+
+ movi v31.8h, #3 // (tc0 - 1) << (BIT_DEPTH - 8)) + 1
+ uqsub v24.8h, v24.8h, v31.8h
+ sub v4.8h, v4.8h, v2.8h
+ srshr v4.8h, v4.8h, #3
+ smin v4.8h, v4.8h, v24.8h
+ neg v25.8h, v24.8h
+ smax v4.8h, v4.8h, v25.8h
+ and v4.16b, v4.16b, v26.16b
+ add v16.8h, v16.8h, v4.8h
+ sub v0.8h, v0.8h, v4.8h
+
+ mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
+ movi v5.8h, #0
+ smin v0.8h, v0.8h, v4.8h
+ smin v16.8h, v16.8h, v4.8h
+ smax v0.8h, v0.8h, v5.8h
+ smax v16.8h, v16.8h, v5.8h
+.endm
+
+function ff_h264_v_loop_filter_chroma_neon_10, export=1
+ h264_loop_filter_start_10
+
+ mov x10, x0
+ sub x0, x0, x1, lsl #1
+ ld1 {v18.8h}, [x0 ], x1
+ ld1 {v0.8h}, [x10], x1
+ ld1 {v16.8h}, [x0 ], x1
+ ld1 {v2.8h}, [x10]
+
+ h264_loop_filter_chroma_10
+
+ sub x0, x10, x1, lsl #1
+ st1 {v16.8h}, [x0], x1
+ st1 {v0.8h}, [x0], x1
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma_neon_10, export=1
+ h264_loop_filter_start_10
+
+ sub x0, x0, #4 // access the 2nd left pixel
+h_loop_filter_chroma420_10:
+ add x10, x0, x1, lsl #2
+ ld1 {v18.d}[0], [x0 ], x1
+ ld1 {v18.d}[1], [x10], x1
+ ld1 {v16.d}[0], [x0 ], x1
+ ld1 {v16.d}[1], [x10], x1
+ ld1 {v0.d}[0], [x0 ], x1
+ ld1 {v0.d}[1], [x10], x1
+ ld1 {v2.d}[0], [x0 ], x1
+ ld1 {v2.d}[1], [x10], x1
+
+ transpose_4x8H v18, v16, v0, v2, v28, v29, v30, v31
+
+ h264_loop_filter_chroma_10
+
+ transpose_4x8H v18, v16, v0, v2, v28, v29, v30, v31
+
+ sub x0, x10, x1, lsl #3
+ st1 {v18.d}[0], [x0], x1
+ st1 {v16.d}[0], [x0], x1
+ st1 {v0.d}[0], [x0], x1
+ st1 {v2.d}[0], [x0], x1
+ st1 {v18.d}[1], [x0], x1
+ st1 {v16.d}[1], [x0], x1
+ st1 {v0.d}[1], [x0], x1
+ st1 {v2.d}[1], [x0], x1
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma422_neon_10, export=1
+ h264_loop_filter_start_10
+ add x5, x0, x1
+ sub x0, x0, #4
+ add x1, x1, x1
+ mov x7, x30
+ bl h_loop_filter_chroma420_10
+ mov x30, x7
+ sub x0, x5, #4
+ mov v24.s[0], w6
+ b h_loop_filter_chroma420_10
+endfunc
+
+.macro h264_loop_filter_chroma_intra_10
+ uabd v26.8h, v16.8h, v17.8h // abs(p0 - q0)
+ uabd v27.8h, v18.8h, v16.8h // abs(p1 - p0)
+ uabd v28.8h, v19.8h, v17.8h // abs(q1 - q0)
+ cmhi v26.8h, v30.8h, v26.8h // < alpha
+ cmhi v27.8h, v31.8h, v27.8h // < beta
+ cmhi v28.8h, v31.8h, v28.8h // < beta
+ and v26.16b, v26.16b, v27.16b
+ and v26.16b, v26.16b, v28.16b
+ mov x2, v26.d[0]
+ mov x3, v26.d[1]
+
+ shl v4.8h, v18.8h, #1
+ shl v6.8h, v19.8h, #1
+
+ adds x2, x2, x3
+ b.eq 9f
+
+ add v20.8h, v16.8h, v19.8h
+ add v22.8h, v17.8h, v18.8h
+ add v20.8h, v20.8h, v4.8h
+ add v22.8h, v22.8h, v6.8h
+ urshr v24.8h, v20.8h, #2
+ urshr v25.8h, v22.8h, #2
+ bit v16.16b, v24.16b, v26.16b
+ bit v17.16b, v25.16b, v26.16b
+.endm
+
+function ff_h264_v_loop_filter_chroma_intra_neon_10, export=1
+ h264_loop_filter_start_intra_10
+ mov x9, x0
+ sub x0, x0, x1, lsl #1
+ ld1 {v18.8h}, [x0], x1
+ ld1 {v17.8h}, [x9], x1
+ ld1 {v16.8h}, [x0], x1
+ ld1 {v19.8h}, [x9]
+
+ h264_loop_filter_chroma_intra_10
+
+ sub x0, x9, x1, lsl #1
+ st1 {v16.8h}, [x0], x1
+ st1 {v17.8h}, [x0], x1
+
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10, export=1
+ h264_loop_filter_start_intra_10
+
+ sub x4, x0, #4
+ sub x0, x0, #2
+ add x9, x4, x1, lsl #1
+ ld1 {v18.8h}, [x4], x1
+ ld1 {v17.8h}, [x9], x1
+ ld1 {v16.8h}, [x4], x1
+ ld1 {v19.8h}, [x9], x1
+
+ transpose_4x8H v18, v16, v17, v19, v26, v27, v28, v29
+
+ h264_loop_filter_chroma_intra_10
+
+ st2 {v16.h,v17.h}[0], [x0], x1
+ st2 {v16.h,v17.h}[1], [x0], x1
+ st2 {v16.h,v17.h}[2], [x0], x1
+ st2 {v16.h,v17.h}[3], [x0], x1
+
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma_intra_neon_10, export=1
+ h264_loop_filter_start_intra_10
+ sub x4, x0, #4
+ sub x0, x0, #2
+h_loop_filter_chroma420_intra_10:
+ add x9, x4, x1, lsl #2
+ ld1 {v18.4h}, [x4], x1
+ ld1 {v18.d}[1], [x9], x1
+ ld1 {v16.4h}, [x4], x1
+ ld1 {v16.d}[1], [x9], x1
+ ld1 {v17.4h}, [x4], x1
+ ld1 {v17.d}[1], [x9], x1
+ ld1 {v19.4h}, [x4], x1
+ ld1 {v19.d}[1], [x9], x1
+
+ transpose_4x8H v18, v16, v17, v19, v26, v27, v28, v29
+
+ h264_loop_filter_chroma_intra_10
+
+ st2 {v16.h,v17.h}[0], [x0], x1
+ st2 {v16.h,v17.h}[1], [x0], x1
+ st2 {v16.h,v17.h}[2], [x0], x1
+ st2 {v16.h,v17.h}[3], [x0], x1
+ st2 {v16.h,v17.h}[4], [x0], x1
+ st2 {v16.h,v17.h}[5], [x0], x1
+ st2 {v16.h,v17.h}[6], [x0], x1
+ st2 {v16.h,v17.h}[7], [x0], x1
+
+9:
+ ret
+endfunc
+
+function ff_h264_h_loop_filter_chroma422_intra_neon_10, export=1
+ h264_loop_filter_start_intra_10
+ sub x4, x0, #4
+ add x5, x0, x1, lsl #3
+ sub x0, x0, #2
+ mov x7, x30
+ bl h_loop_filter_chroma420_intra_10
+ mov x4, x9
+ sub x0, x5, #2
+ mov x30, x7
+ b h_loop_filter_chroma420_intra_10
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/h264idct_neon.S b/media/ffvpx/libavcodec/aarch64/h264idct_neon.S
new file mode 100644
index 0000000000..375da31d65
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/h264idct_neon.S
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+#include "neon.S"
+
+function ff_h264_idct_add_neon, export=1
+.L_ff_h264_idct_add_neon:
+ AARCH64_VALID_CALL_TARGET
+ ld1 {v0.4H, v1.4H, v2.4H, v3.4H}, [x1]
+ sxtw x2, w2
+ movi v30.8H, #0
+
+ add v4.4H, v0.4H, v2.4H
+ sshr v16.4H, v1.4H, #1
+ st1 {v30.8H}, [x1], #16
+ sshr v17.4H, v3.4H, #1
+ st1 {v30.8H}, [x1], #16
+ sub v5.4H, v0.4H, v2.4H
+ sub v6.4H, v16.4H, v3.4H
+ add v7.4H, v1.4H, v17.4H
+ add v0.4H, v4.4H, v7.4H
+ add v1.4H, v5.4H, v6.4H
+ sub v2.4H, v5.4H, v6.4H
+ sub v3.4H, v4.4H, v7.4H
+
+ transpose_4x4H v0, v1, v2, v3, v4, v5, v6, v7
+
+ add v4.4H, v0.4H, v2.4H
+ ld1 {v18.S}[0], [x0], x2
+ sshr v16.4H, v3.4H, #1
+ sshr v17.4H, v1.4H, #1
+ ld1 {v18.S}[1], [x0], x2
+ sub v5.4H, v0.4H, v2.4H
+ ld1 {v19.S}[1], [x0], x2
+ add v6.4H, v16.4H, v1.4H
+ ins v4.D[1], v5.D[0]
+ sub v7.4H, v17.4H, v3.4H
+ ld1 {v19.S}[0], [x0], x2
+ ins v6.D[1], v7.D[0]
+ sub x0, x0, x2, lsl #2
+ add v0.8H, v4.8H, v6.8H
+ sub v1.8H, v4.8H, v6.8H
+
+ srshr v0.8H, v0.8H, #6
+ srshr v1.8H, v1.8H, #6
+
+ uaddw v0.8H, v0.8H, v18.8B
+ uaddw v1.8H, v1.8H, v19.8B
+
+ sqxtun v0.8B, v0.8H
+ sqxtun v1.8B, v1.8H
+
+ st1 {v0.S}[0], [x0], x2
+ st1 {v0.S}[1], [x0], x2
+ st1 {v1.S}[1], [x0], x2
+ st1 {v1.S}[0], [x0], x2
+
+ sub x1, x1, #32
+ ret
+endfunc
+
+function ff_h264_idct_dc_add_neon, export=1
+.L_ff_h264_idct_dc_add_neon:
+ AARCH64_VALID_CALL_TARGET
+ sxtw x2, w2
+ mov w3, #0
+ ld1r {v2.8H}, [x1]
+ strh w3, [x1]
+ srshr v2.8H, v2.8H, #6
+ ld1 {v0.S}[0], [x0], x2
+ ld1 {v0.S}[1], [x0], x2
+ uaddw v3.8H, v2.8H, v0.8B
+ ld1 {v1.S}[0], [x0], x2
+ ld1 {v1.S}[1], [x0], x2
+ uaddw v4.8H, v2.8H, v1.8B
+ sqxtun v0.8B, v3.8H
+ sqxtun v1.8B, v4.8H
+ sub x0, x0, x2, lsl #2
+ st1 {v0.S}[0], [x0], x2
+ st1 {v0.S}[1], [x0], x2
+ st1 {v1.S}[0], [x0], x2
+ st1 {v1.S}[1], [x0], x2
+ ret
+endfunc
+
+function ff_h264_idct_add16_neon, export=1
+ mov x12, x30
+ mov x6, x0 // dest
+ mov x5, x1 // block_offset
+ mov x1, x2 // block
+ mov w9, w3 // stride
+ movrel x7, scan8
+ mov x10, #16
+ movrel x13, .L_ff_h264_idct_dc_add_neon
+ movrel x14, .L_ff_h264_idct_add_neon
+1: mov w2, w9
+ ldrb w3, [x7], #1
+ ldrsw x0, [x5], #4
+ ldrb w3, [x4, w3, uxtw]
+ subs w3, w3, #1
+ b.lt 2f
+ ldrsh w3, [x1]
+ add x0, x0, x6
+ ccmp w3, #0, #4, eq
+ csel x15, x13, x14, ne
+ blr x15
+2: subs x10, x10, #1
+ add x1, x1, #32
+ b.ne 1b
+ ret x12
+endfunc
+
+function ff_h264_idct_add16intra_neon, export=1
+ mov x12, x30
+ mov x6, x0 // dest
+ mov x5, x1 // block_offset
+ mov x1, x2 // block
+ mov w9, w3 // stride
+ movrel x7, scan8
+ mov x10, #16
+ movrel x13, .L_ff_h264_idct_dc_add_neon
+ movrel x14, .L_ff_h264_idct_add_neon
+1: mov w2, w9
+ ldrb w3, [x7], #1
+ ldrsw x0, [x5], #4
+ ldrb w3, [x4, w3, uxtw]
+ add x0, x0, x6
+ cmp w3, #0
+ ldrsh w3, [x1]
+ csel x15, x13, x14, eq
+ ccmp w3, #0, #0, eq
+ b.eq 2f
+ blr x15
+2: subs x10, x10, #1
+ add x1, x1, #32
+ b.ne 1b
+ ret x12
+endfunc
+
+function ff_h264_idct_add8_neon, export=1
+ stp x19, x20, [sp, #-0x40]!
+ mov x12, x30
+ ldp x6, x15, [x0] // dest[0], dest[1]
+ add x5, x1, #16*4 // block_offset
+ add x9, x2, #16*32 // block
+ mov w19, w3 // stride
+ movrel x13, .L_ff_h264_idct_dc_add_neon
+ movrel x14, .L_ff_h264_idct_add_neon
+ movrel x7, scan8, 16
+ mov x10, #0
+ mov x11, #16
+1: mov w2, w19
+ ldrb w3, [x7, x10] // scan8[i]
+ ldrsw x0, [x5, x10, lsl #2] // block_offset[i]
+ ldrb w3, [x4, w3, uxtw] // nnzc[ scan8[i] ]
+ add x0, x0, x6 // block_offset[i] + dst[j-1]
+ add x1, x9, x10, lsl #5 // block + i * 16
+ cmp w3, #0
+ ldrsh w3, [x1] // block[i*16]
+ csel x20, x13, x14, eq
+ ccmp w3, #0, #0, eq
+ b.eq 2f
+ blr x20
+2: add x10, x10, #1
+ cmp x10, #4
+ csel x10, x11, x10, eq // mov x10, #16
+ csel x6, x15, x6, eq
+ cmp x10, #20
+ b.lt 1b
+ ldp x19, x20, [sp], #0x40
+ ret x12
+endfunc
+
+.macro idct8x8_cols pass
+ .if \pass == 0
+ va .req v18
+ vb .req v30
+ sshr v18.8H, v26.8H, #1
+ add v16.8H, v24.8H, v28.8H
+ ld1 {v30.8H, v31.8H}, [x1]
+ st1 {v19.8H}, [x1], #16
+ st1 {v19.8H}, [x1], #16
+ sub v17.8H, v24.8H, v28.8H
+ sshr v19.8H, v30.8H, #1
+ sub v18.8H, v18.8H, v30.8H
+ add v19.8H, v19.8H, v26.8H
+ .else
+ va .req v30
+ vb .req v18
+ sshr v30.8H, v26.8H, #1
+ sshr v19.8H, v18.8H, #1
+ add v16.8H, v24.8H, v28.8H
+ sub v17.8H, v24.8H, v28.8H
+ sub v30.8H, v30.8H, v18.8H
+ add v19.8H, v19.8H, v26.8H
+ .endif
+ add v26.8H, v17.8H, va.8H
+ sub v28.8H, v17.8H, va.8H
+ add v24.8H, v16.8H, v19.8H
+ sub vb.8H, v16.8H, v19.8H
+ sub v16.8H, v29.8H, v27.8H
+ add v17.8H, v31.8H, v25.8H
+ sub va.8H, v31.8H, v25.8H
+ add v19.8H, v29.8H, v27.8H
+ sub v16.8H, v16.8H, v31.8H
+ sub v17.8H, v17.8H, v27.8H
+ add va.8H, va.8H, v29.8H
+ add v19.8H, v19.8H, v25.8H
+ sshr v25.8H, v25.8H, #1
+ sshr v27.8H, v27.8H, #1
+ sshr v29.8H, v29.8H, #1
+ sshr v31.8H, v31.8H, #1
+ sub v16.8H, v16.8H, v31.8H
+ sub v17.8H, v17.8H, v27.8H
+ add va.8H, va.8H, v29.8H
+ add v19.8H, v19.8H, v25.8H
+ sshr v25.8H, v16.8H, #2
+ sshr v27.8H, v17.8H, #2
+ sshr v29.8H, va.8H, #2
+ sshr v31.8H, v19.8H, #2
+ sub v19.8H, v19.8H, v25.8H
+ sub va.8H, v27.8H, va.8H
+ add v17.8H, v17.8H, v29.8H
+ add v16.8H, v16.8H, v31.8H
+ .if \pass == 0
+ sub v31.8H, v24.8H, v19.8H
+ add v24.8H, v24.8H, v19.8H
+ add v25.8H, v26.8H, v18.8H
+ sub v18.8H, v26.8H, v18.8H
+ add v26.8H, v28.8H, v17.8H
+ add v27.8H, v30.8H, v16.8H
+ sub v29.8H, v28.8H, v17.8H
+ sub v28.8H, v30.8H, v16.8H
+ .else
+ sub v31.8H, v24.8H, v19.8H
+ add v24.8H, v24.8H, v19.8H
+ add v25.8H, v26.8H, v30.8H
+ sub v30.8H, v26.8H, v30.8H
+ add v26.8H, v28.8H, v17.8H
+ sub v29.8H, v28.8H, v17.8H
+ add v27.8H, v18.8H, v16.8H
+ sub v28.8H, v18.8H, v16.8H
+ .endif
+ .unreq va
+ .unreq vb
+.endm
+
+function ff_h264_idct8_add_neon, export=1
+.L_ff_h264_idct8_add_neon:
+ AARCH64_VALID_CALL_TARGET
+ movi v19.8H, #0
+ sxtw x2, w2
+ ld1 {v24.8H, v25.8H}, [x1]
+ st1 {v19.8H}, [x1], #16
+ st1 {v19.8H}, [x1], #16
+ ld1 {v26.8H, v27.8H}, [x1]
+ st1 {v19.8H}, [x1], #16
+ st1 {v19.8H}, [x1], #16
+ ld1 {v28.8H, v29.8H}, [x1]
+ st1 {v19.8H}, [x1], #16
+ st1 {v19.8H}, [x1], #16
+
+ idct8x8_cols 0
+ transpose_8x8H v24, v25, v26, v27, v28, v29, v18, v31, v6, v7
+ idct8x8_cols 1
+
+ mov x3, x0
+ srshr v24.8H, v24.8H, #6
+ ld1 {v0.8B}, [x0], x2
+ srshr v25.8H, v25.8H, #6
+ ld1 {v1.8B}, [x0], x2
+ srshr v26.8H, v26.8H, #6
+ ld1 {v2.8B}, [x0], x2
+ srshr v27.8H, v27.8H, #6
+ ld1 {v3.8B}, [x0], x2
+ srshr v28.8H, v28.8H, #6
+ ld1 {v4.8B}, [x0], x2
+ srshr v29.8H, v29.8H, #6
+ ld1 {v5.8B}, [x0], x2
+ srshr v30.8H, v30.8H, #6
+ ld1 {v6.8B}, [x0], x2
+ srshr v31.8H, v31.8H, #6
+ ld1 {v7.8B}, [x0], x2
+ uaddw v24.8H, v24.8H, v0.8B
+ uaddw v25.8H, v25.8H, v1.8B
+ uaddw v26.8H, v26.8H, v2.8B
+ sqxtun v0.8B, v24.8H
+ uaddw v27.8H, v27.8H, v3.8B
+ sqxtun v1.8B, v25.8H
+ uaddw v28.8H, v28.8H, v4.8B
+ sqxtun v2.8B, v26.8H
+ st1 {v0.8B}, [x3], x2
+ uaddw v29.8H, v29.8H, v5.8B
+ sqxtun v3.8B, v27.8H
+ st1 {v1.8B}, [x3], x2
+ uaddw v30.8H, v30.8H, v6.8B
+ sqxtun v4.8B, v28.8H
+ st1 {v2.8B}, [x3], x2
+ uaddw v31.8H, v31.8H, v7.8B
+ sqxtun v5.8B, v29.8H
+ st1 {v3.8B}, [x3], x2
+ sqxtun v6.8B, v30.8H
+ sqxtun v7.8B, v31.8H
+ st1 {v4.8B}, [x3], x2
+ st1 {v5.8B}, [x3], x2
+ st1 {v6.8B}, [x3], x2
+ st1 {v7.8B}, [x3], x2
+
+ sub x1, x1, #128
+ ret
+endfunc
+
+function ff_h264_idct8_dc_add_neon, export=1
+.L_ff_h264_idct8_dc_add_neon:
+ AARCH64_VALID_CALL_TARGET
+ mov w3, #0
+ sxtw x2, w2
+ ld1r {v31.8H}, [x1]
+ strh w3, [x1]
+ ld1 {v0.8B}, [x0], x2
+ srshr v31.8H, v31.8H, #6
+ ld1 {v1.8B}, [x0], x2
+ ld1 {v2.8B}, [x0], x2
+ uaddw v24.8H, v31.8H, v0.8B
+ ld1 {v3.8B}, [x0], x2
+ uaddw v25.8H, v31.8H, v1.8B
+ ld1 {v4.8B}, [x0], x2
+ uaddw v26.8H, v31.8H, v2.8B
+ ld1 {v5.8B}, [x0], x2
+ uaddw v27.8H, v31.8H, v3.8B
+ ld1 {v6.8B}, [x0], x2
+ uaddw v28.8H, v31.8H, v4.8B
+ ld1 {v7.8B}, [x0], x2
+ uaddw v29.8H, v31.8H, v5.8B
+ uaddw v30.8H, v31.8H, v6.8B
+ uaddw v31.8H, v31.8H, v7.8B
+ sqxtun v0.8B, v24.8H
+ sqxtun v1.8B, v25.8H
+ sqxtun v2.8B, v26.8H
+ sqxtun v3.8B, v27.8H
+ sub x0, x0, x2, lsl #3
+ st1 {v0.8B}, [x0], x2
+ sqxtun v4.8B, v28.8H
+ st1 {v1.8B}, [x0], x2
+ sqxtun v5.8B, v29.8H
+ st1 {v2.8B}, [x0], x2
+ sqxtun v6.8B, v30.8H
+ st1 {v3.8B}, [x0], x2
+ sqxtun v7.8B, v31.8H
+ st1 {v4.8B}, [x0], x2
+ st1 {v5.8B}, [x0], x2
+ st1 {v6.8B}, [x0], x2
+ st1 {v7.8B}, [x0], x2
+ ret
+endfunc
+
+function ff_h264_idct8_add4_neon, export=1
+ mov x12, x30
+ mov x6, x0
+ mov x5, x1
+ mov x1, x2
+ mov w2, w3
+ movrel x7, scan8
+ mov w10, #16
+ movrel x13, .L_ff_h264_idct8_dc_add_neon
+ movrel x14, .L_ff_h264_idct8_add_neon
+1: ldrb w9, [x7], #4
+ ldrsw x0, [x5], #16
+ ldrb w9, [x4, w9, UXTW]
+ subs w9, w9, #1
+ b.lt 2f
+ ldrsh w11, [x1]
+ add x0, x6, x0
+ ccmp w11, #0, #4, eq
+ csel x15, x13, x14, ne
+ blr x15
+2: subs w10, w10, #4
+ add x1, x1, #128
+ b.ne 1b
+ ret x12
+endfunc
+
+const scan8
+ .byte 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
+ .byte 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
+ .byte 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
+ .byte 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
+ .byte 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
+ .byte 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
+ .byte 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
+ .byte 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
+ .byte 4+11*8, 5+11*8, 4+12*8, 5+12*8
+ .byte 6+11*8, 7+11*8, 6+12*8, 7+12*8
+ .byte 4+13*8, 5+13*8, 4+14*8, 5+14*8
+ .byte 6+13*8, 7+13*8, 6+14*8, 7+14*8
+endconst
diff --git a/media/ffvpx/libavcodec/aarch64/h264pred_init.c b/media/ffvpx/libavcodec/aarch64/h264pred_init.c
new file mode 100644
index 0000000000..0ae8f70d23
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/h264pred_init.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/h264pred.h"
+
+void ff_pred16x16_vert_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_hor_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_plane_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_128_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_left_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_top_dc_neon(uint8_t *src, ptrdiff_t stride);
+
+void ff_pred8x8_vert_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_hor_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_plane_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_128_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_left_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_top_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_l0t_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_0lt_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_l00_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_0l0_dc_neon(uint8_t *src, ptrdiff_t stride);
+
+void ff_pred16x16_vert_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_hor_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_plane_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred16x16_top_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+
+void ff_pred8x8_vert_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_hor_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_plane_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_128_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_left_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_top_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_l0t_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_0lt_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_l00_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_0l0_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+
+static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id,
+ const int bit_depth,
+ const int chroma_format_idc)
+{
+ if (bit_depth == 8) {
+ if (chroma_format_idc <= 1) {
+ h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon;
+ h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon;
+ if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
+ h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon;
+ h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon;
+ if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
+ codec_id != AV_CODEC_ID_VP8) {
+ h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon;
+ h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon;
+ h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon;
+ h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon;
+ h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon;
+ h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon;
+ h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon;
+ }
+ }
+
+ h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon;
+ h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon;
+ h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon;
+ h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon;
+ h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon;
+ h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon;
+ if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 &&
+ codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
+ h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon;
+ }
+ if (bit_depth == 10) {
+ if (chroma_format_idc <= 1) {
+ h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon_10;
+ h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon_10;
+ if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
+ h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon_10;
+ h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon_10;
+ if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
+ codec_id != AV_CODEC_ID_VP8) {
+ h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon_10;
+ h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon_10;
+ h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon_10;
+ h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon_10;
+ h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon_10;
+ h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon_10;
+ h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon_10;
+ }
+ }
+
+ h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon_10;
+ h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon_10;
+ h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon_10;
+ h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon_10;
+ if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 &&
+ codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
+ h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon_10;
+ }
+}
+
+av_cold void ff_h264_pred_init_aarch64(H264PredContext *h, int codec_id,
+ int bit_depth, const int chroma_format_idc)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags))
+ h264_pred_init_neon(h, codec_id, bit_depth, chroma_format_idc);
+}
diff --git a/media/ffvpx/libavcodec/aarch64/h264pred_neon.S b/media/ffvpx/libavcodec/aarch64/h264pred_neon.S
new file mode 100644
index 0000000000..ea37689f34
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/h264pred_neon.S
@@ -0,0 +1,765 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+.macro ldcol.8 rd, rs, rt, n=8, hi=0
+.if \n >= 8 || \hi == 0
+ ld1 {\rd\().b}[0], [\rs], \rt
+ ld1 {\rd\().b}[1], [\rs], \rt
+ ld1 {\rd\().b}[2], [\rs], \rt
+ ld1 {\rd\().b}[3], [\rs], \rt
+.endif
+.if \n >= 8 || \hi == 1
+ ld1 {\rd\().b}[4], [\rs], \rt
+ ld1 {\rd\().b}[5], [\rs], \rt
+ ld1 {\rd\().b}[6], [\rs], \rt
+ ld1 {\rd\().b}[7], [\rs], \rt
+.endif
+.if \n == 16
+ ld1 {\rd\().b}[8], [\rs], \rt
+ ld1 {\rd\().b}[9], [\rs], \rt
+ ld1 {\rd\().b}[10], [\rs], \rt
+ ld1 {\rd\().b}[11], [\rs], \rt
+ ld1 {\rd\().b}[12], [\rs], \rt
+ ld1 {\rd\().b}[13], [\rs], \rt
+ ld1 {\rd\().b}[14], [\rs], \rt
+ ld1 {\rd\().b}[15], [\rs], \rt
+.endif
+.endm
+
+function ff_pred16x16_128_dc_neon, export=1
+ movi v0.16b, #128
+ b .L_pred16x16_dc_end
+endfunc
+
+function ff_pred16x16_top_dc_neon, export=1
+ sub x2, x0, x1
+ ld1 {v0.16b}, [x2]
+ uaddlv h0, v0.16b
+ rshrn v0.8b, v0.8h, #4
+ dup v0.16b, v0.b[0]
+ b .L_pred16x16_dc_end
+endfunc
+
+function ff_pred16x16_left_dc_neon, export=1
+ sub x2, x0, #1
+ ldcol.8 v0, x2, x1, 16
+ uaddlv h0, v0.16b
+ rshrn v0.8b, v0.8h, #4
+ dup v0.16b, v0.b[0]
+ b .L_pred16x16_dc_end
+endfunc
+
+function ff_pred16x16_dc_neon, export=1
+ sub x2, x0, x1
+ sub x3, x0, #1
+ ld1 {v0.16b}, [x2]
+ ldcol.8 v1, x3, x1, 16
+ uaddlv h0, v0.16b
+ uaddlv h1, v1.16b
+ add v0.4h, v0.4h, v1.4h
+ rshrn v0.8b, v0.8h, #5
+ dup v0.16b, v0.b[0]
+.L_pred16x16_dc_end:
+ mov w3, #8
+6: st1 {v0.16b}, [x0], x1
+ subs w3, w3, #1
+ st1 {v0.16b}, [x0], x1
+ b.ne 6b
+ ret
+endfunc
+
+function ff_pred16x16_hor_neon, export=1
+ sub x2, x0, #1
+ mov w3, #16
+1: ld1r {v0.16b}, [x2], x1
+ subs w3, w3, #1
+ st1 {v0.16b}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred16x16_vert_neon, export=1
+ sub x2, x0, x1
+ add x1, x1, x1
+ ld1 {v0.16b}, [x2], x1
+ mov w3, #8
+1: subs w3, w3, #1
+ st1 {v0.16b}, [x0], x1
+ st1 {v0.16b}, [x2], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred16x16_plane_neon, export=1
+ sub x3, x0, x1
+ movrel x4, p16weight
+ add x2, x3, #8
+ sub x3, x3, #1
+ ld1 {v0.8b}, [x3]
+ ld1 {v2.8b}, [x2], x1
+ ldcol.8 v1, x3, x1
+ add x3, x3, x1
+ ldcol.8 v3, x3, x1
+ rev64 v0.8b, v0.8b
+ rev64 v1.8b, v1.8b
+ uaddl v7.8h, v2.8b, v3.8b
+ usubl v2.8h, v2.8b, v0.8b
+ usubl v3.8h, v3.8b, v1.8b
+ ld1 {v0.8h}, [x4]
+ mul v2.8h, v2.8h, v0.8h
+ mul v3.8h, v3.8h, v0.8h
+ addp v2.8h, v2.8h, v3.8h
+ addp v2.8h, v2.8h, v2.8h
+ addp v2.4h, v2.4h, v2.4h
+ sshll v3.4s, v2.4h, #2
+ saddw v2.4s, v3.4s, v2.4h
+ rshrn v4.4h, v2.4s, #6
+ trn2 v5.4h, v4.4h, v4.4h
+ add v2.4h, v4.4h, v5.4h
+ shl v3.4h, v2.4h, #3
+ ext v7.16b, v7.16b, v7.16b, #14
+ sub v3.4h, v3.4h, v2.4h // 7 * (b + c)
+ add v7.4h, v7.4h, v0.4h
+ shl v2.4h, v7.4h, #4
+ sub v2.4h, v2.4h, v3.4h
+ shl v3.4h, v4.4h, #4
+ ext v0.16b, v0.16b, v0.16b, #14
+ sub v6.4h, v5.4h, v3.4h
+ mov v0.h[0], wzr
+ mul v0.8h, v0.8h, v4.h[0]
+ dup v1.8h, v2.h[0]
+ dup v2.8h, v4.h[0]
+ dup v3.8h, v6.h[0]
+ shl v2.8h, v2.8h, #3
+ add v1.8h, v1.8h, v0.8h
+ add v3.8h, v3.8h, v2.8h
+ mov w3, #16
+1:
+ sqshrun v0.8b, v1.8h, #5
+ add v1.8h, v1.8h, v2.8h
+ sqshrun2 v0.16b, v1.8h, #5
+ add v1.8h, v1.8h, v3.8h
+ subs w3, w3, #1
+ st1 {v0.16b}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+const p16weight, align=4
+ .short 1,2,3,4,5,6,7,8
+endconst
+const p8weight, align=4
+ .short 1,2,3,4,1,2,3,4
+endconst
+
+function ff_pred8x8_hor_neon, export=1
+ sub x2, x0, #1
+ mov w3, #8
+1: ld1r {v0.8b}, [x2], x1
+ subs w3, w3, #1
+ st1 {v0.8b}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred8x8_vert_neon, export=1
+ sub x2, x0, x1
+ lsl x1, x1, #1
+ ld1 {v0.8b}, [x2], x1
+ mov w3, #4
+1: subs w3, w3, #1
+ st1 {v0.8b}, [x0], x1
+ st1 {v0.8b}, [x2], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred8x8_plane_neon, export=1
+ sub x3, x0, x1
+ movrel x4, p8weight
+ movrel x5, p16weight
+ add x2, x3, #4
+ sub x3, x3, #1
+ ld1 {v0.s}[0], [x3]
+ ld1 {v2.s}[0], [x2], x1
+ ldcol.8 v0, x3, x1, 4, hi=1
+ add x3, x3, x1
+ ldcol.8 v3, x3, x1, 4
+ uaddl v7.8h, v2.8b, v3.8b
+ rev32 v0.8b, v0.8b
+ trn1 v2.2s, v2.2s, v3.2s
+ usubl v2.8h, v2.8b, v0.8b
+ ld1 {v6.8h}, [x4]
+ mul v2.8h, v2.8h, v6.8h
+ ld1 {v0.8h}, [x5]
+ saddlp v2.4s, v2.8h
+ addp v2.4s, v2.4s, v2.4s
+ shl v3.4s, v2.4s, #4
+ add v2.4s, v3.4s, v2.4s
+ rshrn v5.4h, v2.4s, #5
+ addp v2.4h, v5.4h, v5.4h
+ shl v3.4h, v2.4h, #1
+ add v3.4h, v3.4h, v2.4h
+ rev64 v7.4h, v7.4h
+ add v7.4h, v7.4h, v0.4h
+ shl v2.4h, v7.4h, #4
+ sub v2.4h, v2.4h, v3.4h
+ ext v0.16b, v0.16b, v0.16b, #14
+ mov v0.h[0], wzr
+ mul v0.8h, v0.8h, v5.h[0]
+ dup v1.8h, v2.h[0]
+ dup v2.8h, v5.h[1]
+ add v1.8h, v1.8h, v0.8h
+ mov w3, #8
+1:
+ sqshrun v0.8b, v1.8h, #5
+ subs w3, w3, #1
+ add v1.8h, v1.8h, v2.8h
+ st1 {v0.8b}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred8x8_128_dc_neon, export=1
+ movi v0.8b, #128
+ movi v1.8b, #128
+ b .L_pred8x8_dc_end
+endfunc
+
+function ff_pred8x8_top_dc_neon, export=1
+ sub x2, x0, x1
+ ld1 {v0.8b}, [x2]
+ uaddlp v0.4h, v0.8b
+ addp v0.4h, v0.4h, v0.4h
+ zip1 v0.8h, v0.8h, v0.8h
+ rshrn v2.8b, v0.8h, #2
+ zip1 v0.8b, v2.8b, v2.8b
+ zip1 v1.8b, v2.8b, v2.8b
+ b .L_pred8x8_dc_end
+endfunc
+
+function ff_pred8x8_left_dc_neon, export=1
+ sub x2, x0, #1
+ ldcol.8 v0, x2, x1
+ uaddlp v0.4h, v0.8b
+ addp v0.4h, v0.4h, v0.4h
+ rshrn v2.8b, v0.8h, #2
+ dup v1.8b, v2.b[1]
+ dup v0.8b, v2.b[0]
+ b .L_pred8x8_dc_end
+endfunc
+
+function ff_pred8x8_dc_neon, export=1
+ sub x2, x0, x1
+ sub x3, x0, #1
+ ld1 {v0.8b}, [x2]
+ ldcol.8 v1, x3, x1
+ uaddlp v0.4h, v0.8b
+ uaddlp v1.4h, v1.8b
+ trn1 v2.2s, v0.2s, v1.2s
+ trn2 v3.2s, v0.2s, v1.2s
+ addp v4.4h, v2.4h, v3.4h
+ addp v5.4h, v4.4h, v4.4h
+ rshrn v6.8b, v5.8h, #3
+ rshrn v7.8b, v4.8h, #2
+ dup v0.8b, v6.b[0]
+ dup v2.8b, v7.b[2]
+ dup v1.8b, v7.b[3]
+ dup v3.8b, v6.b[1]
+ zip1 v0.2s, v0.2s, v2.2s
+ zip1 v1.2s, v1.2s, v3.2s
+.L_pred8x8_dc_end:
+ mov w3, #4
+ add x2, x0, x1, lsl #2
+6: subs w3, w3, #1
+ st1 {v0.8b}, [x0], x1
+ st1 {v1.8b}, [x2], x1
+ b.ne 6b
+ ret
+endfunc
+
+function ff_pred8x8_l0t_dc_neon, export=1
+ sub x2, x0, x1
+ sub x3, x0, #1
+ ld1 {v0.8b}, [x2]
+ ldcol.8 v1, x3, x1, 4
+ zip1 v0.4s, v0.4s, v1.4s
+ uaddlp v0.8h, v0.16b
+ addp v0.8h, v0.8h, v0.8h
+ addp v1.4h, v0.4h, v0.4h
+ rshrn v2.8b, v0.8h, #2
+ rshrn v3.8b, v1.8h, #3
+ dup v4.8b, v3.b[0]
+ dup v6.8b, v2.b[2]
+ dup v5.8b, v2.b[0]
+ zip1 v0.2s, v4.2s, v6.2s
+ zip1 v1.2s, v5.2s, v6.2s
+ b .L_pred8x8_dc_end
+endfunc
+
+function ff_pred8x8_l00_dc_neon, export=1
+ sub x2, x0, #1
+ ldcol.8 v0, x2, x1, 4
+ uaddlp v0.4h, v0.8b
+ addp v0.4h, v0.4h, v0.4h
+ rshrn v0.8b, v0.8h, #2
+ movi v1.8b, #128
+ dup v0.8b, v0.b[0]
+ b .L_pred8x8_dc_end
+endfunc
+
+function ff_pred8x8_0lt_dc_neon, export=1
+ add x3, x0, x1, lsl #2
+ sub x2, x0, x1
+ sub x3, x3, #1
+ ld1 {v0.8b}, [x2]
+ ldcol.8 v1, x3, x1, 4, hi=1
+ zip1 v0.4s, v0.4s, v1.4s
+ uaddlp v0.8h, v0.16b
+ addp v0.8h, v0.8h, v0.8h
+ addp v1.4h, v0.4h, v0.4h
+ rshrn v2.8b, v0.8h, #2
+ rshrn v3.8b, v1.8h, #3
+ dup v4.8b, v2.b[0]
+ dup v5.8b, v2.b[3]
+ dup v6.8b, v2.b[2]
+ dup v7.8b, v3.b[1]
+ zip1 v0.2s, v4.2s, v6.2s
+ zip1 v1.2s, v5.2s, v7.2s
+ b .L_pred8x8_dc_end
+endfunc
+
+function ff_pred8x8_0l0_dc_neon, export=1
+ add x2, x0, x1, lsl #2
+ sub x2, x2, #1
+ ldcol.8 v1, x2, x1, 4
+ uaddlp v2.4h, v1.8b
+ addp v2.4h, v2.4h, v2.4h
+ rshrn v1.8b, v2.8h, #2
+ movi v0.8b, #128
+ dup v1.8b, v1.b[0]
+ b .L_pred8x8_dc_end
+endfunc
+
+.macro ldcol.16 rd, rs, rt, n=4, hi=0
+.if \n >= 4 && \hi == 0
+ ld1 {\rd\().h}[0], [\rs], \rt
+ ld1 {\rd\().h}[1], [\rs], \rt
+ ld1 {\rd\().h}[2], [\rs], \rt
+ ld1 {\rd\().h}[3], [\rs], \rt
+.endif
+.if \n == 8 || \hi == 1
+ ld1 {\rd\().h}[4], [\rs], \rt
+ ld1 {\rd\().h}[5], [\rs], \rt
+ ld1 {\rd\().h}[6], [\rs], \rt
+ ld1 {\rd\().h}[7], [\rs], \rt
+.endif
+.endm
+
+// slower than C
+/*
+function ff_pred16x16_128_dc_neon_10, export=1
+ movi v0.8h, #2, lsl #8 // 512, 1 << (bit_depth - 1)
+
+ b .L_pred16x16_dc_10_end
+endfunc
+*/
+
+function ff_pred16x16_top_dc_neon_10, export=1
+ sub x2, x0, x1
+
+ ld1 {v0.8h, v1.8h}, [x2]
+
+ add v0.8h, v0.8h, v1.8h
+ addv h0, v0.8h
+
+ urshr v0.4h, v0.4h, #4
+ dup v0.8h, v0.h[0]
+ b .L_pred16x16_dc_10_end
+endfunc
+
+// slower than C
+/*
+function ff_pred16x16_left_dc_neon_10, export=1
+ sub x2, x0, #2 // access to the "left" column
+ ldcol.16 v0, x2, x1, 8
+ ldcol.16 v1, x2, x1, 8 // load "left" column
+
+ add v0.8h, v0.8h, v1.8h
+ addv h0, v0.8h
+
+ urshr v0.4h, v0.4h, #4
+ dup v0.8h, v0.h[0]
+ b .L_pred16x16_dc_10_end
+endfunc
+*/
+
+function ff_pred16x16_dc_neon_10, export=1
+ sub x2, x0, x1 // access to the "top" row
+ sub x3, x0, #2 // access to the "left" column
+
+ ld1 {v0.8h, v1.8h}, [x2]
+ ldcol.16 v2, x3, x1, 8
+ ldcol.16 v3, x3, x1, 8 // load pixels in "top" row and "left" col
+
+ add v0.8h, v0.8h, v1.8h
+ add v2.8h, v2.8h, v3.8h
+ add v0.8h, v0.8h, v2.8h
+ addv h0, v0.8h
+
+ urshr v0.4h, v0.4h, #5
+ dup v0.8h, v0.h[0]
+.L_pred16x16_dc_10_end:
+ mov v1.16b, v0.16b
+ mov w3, #8
+6: st1 {v0.8h, v1.8h}, [x0], x1
+ subs w3, w3, #1
+ st1 {v0.8h, v1.8h}, [x0], x1
+ b.ne 6b
+ ret
+endfunc
+
+function ff_pred16x16_hor_neon_10, export=1
+ sub x2, x0, #2
+ add x3, x0, #16
+
+ mov w4, #16
+1: ld1r {v0.8h}, [x2], x1
+ subs w4, w4, #1
+ st1 {v0.8h}, [x0], x1
+ st1 {v0.8h}, [x3], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred16x16_vert_neon_10, export=1
+ sub x2, x0, x1
+ add x1, x1, x1
+
+ ld1 {v0.8h, v1.8h}, [x2], x1
+
+ mov w3, #8
+1: subs w3, w3, #1
+ st1 {v0.8h, v1.8h}, [x0], x1
+ st1 {v0.8h, v1.8h}, [x2], x1
+
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred16x16_plane_neon_10, export=1
+ sub x3, x0, x1
+ movrel x4, p16weight
+ add x2, x3, #16
+ sub x3, x3, #2
+ ld1 {v0.8h}, [x3]
+ ld1 {v2.8h}, [x2], x1
+ ldcol.16 v1, x3, x1, 8
+ add x3, x3, x1
+ ldcol.16 v3, x3, x1, 8
+
+ rev64 v16.8h, v0.8h
+ rev64 v17.8h, v1.8h
+ ext v0.16b, v16.16b, v16.16b, #8
+ ext v1.16b, v17.16b, v17.16b, #8
+
+ add v7.8h, v2.8h, v3.8h
+ sub v2.8h, v2.8h, v0.8h
+ sub v3.8h, v3.8h, v1.8h
+ ld1 {v0.8h}, [x4]
+ mul v2.8h, v2.8h, v0.8h
+ mul v3.8h, v3.8h, v0.8h
+ addp v2.8h, v2.8h, v3.8h
+ addp v2.8h, v2.8h, v2.8h
+ addp v2.4h, v2.4h, v2.4h
+ sshll v3.4s, v2.4h, #2
+ saddw v2.4s, v3.4s, v2.4h
+ rshrn v4.4h, v2.4s, #6
+ trn2 v5.4h, v4.4h, v4.4h
+ add v2.4h, v4.4h, v5.4h
+ shl v3.4h, v2.4h, #3
+ ext v7.16b, v7.16b, v7.16b, #14
+ sub v3.4h, v3.4h, v2.4h // 7 * (b + c)
+ add v7.4h, v7.4h, v0.4h
+ shl v2.4h, v7.4h, #4
+ ssubl v2.4s, v2.4h, v3.4h
+ shl v3.4h, v4.4h, #4
+ ext v0.16b, v0.16b, v0.16b, #14
+ ssubl v6.4s, v5.4h, v3.4h
+
+ mov v0.h[0], wzr
+ mul v0.8h, v0.8h, v4.h[0]
+ dup v16.4s, v2.s[0]
+ dup v17.4s, v2.s[0]
+ dup v2.8h, v4.h[0]
+ dup v3.4s, v6.s[0]
+ shl v2.8h, v2.8h, #3
+ saddw v16.4s, v16.4s, v0.4h
+ saddw2 v17.4s, v17.4s, v0.8h
+ saddw v3.4s, v3.4s, v2.4h
+
+ mov w3, #16
+ mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
+1:
+ sqshrun v0.4h, v16.4s, #5
+ sqshrun2 v0.8h, v17.4s, #5
+ saddw v16.4s, v16.4s, v2.4h
+ saddw v17.4s, v17.4s, v2.4h
+ sqshrun v1.4h, v16.4s, #5
+ sqshrun2 v1.8h, v17.4s, #5
+ add v16.4s, v16.4s, v3.4s
+ add v17.4s, v17.4s, v3.4s
+
+ subs w3, w3, #1
+
+ smin v0.8h, v0.8h, v4.8h
+ smin v1.8h, v1.8h, v4.8h
+
+ st1 {v0.8h, v1.8h}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred8x8_hor_neon_10, export=1
+ sub x2, x0, #2
+ mov w3, #8
+
+1: ld1r {v0.8h}, [x2], x1
+ subs w3, w3, #1
+ st1 {v0.8h}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred8x8_vert_neon_10, export=1
+ sub x2, x0, x1
+ lsl x1, x1, #1
+
+ ld1 {v0.8h}, [x2], x1
+ mov w3, #4
+1: subs w3, w3, #1
+ st1 {v0.8h}, [x0], x1
+ st1 {v0.8h}, [x2], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred8x8_plane_neon_10, export=1
+ sub x3, x0, x1
+ movrel x4, p8weight
+ movrel x5, p16weight
+ add x2, x3, #8
+ sub x3, x3, #2
+ ld1 {v0.d}[0], [x3]
+ ld1 {v2.d}[0], [x2], x1
+ ldcol.16 v0, x3, x1, hi=1
+ add x3, x3, x1
+ ldcol.16 v3, x3, x1, 4
+ add v7.8h, v2.8h, v3.8h
+ rev64 v0.8h, v0.8h
+ trn1 v2.2d, v2.2d, v3.2d
+ sub v2.8h, v2.8h, v0.8h
+ ld1 {v6.8h}, [x4]
+ mul v2.8h, v2.8h, v6.8h
+ ld1 {v0.8h}, [x5]
+ saddlp v2.4s, v2.8h
+ addp v2.4s, v2.4s, v2.4s
+ shl v3.4s, v2.4s, #4
+ add v2.4s, v3.4s, v2.4s
+ rshrn v5.4h, v2.4s, #5
+ addp v2.4h, v5.4h, v5.4h
+ shl v3.4h, v2.4h, #1
+ add v3.4h, v3.4h, v2.4h
+ rev64 v7.4h, v7.4h
+ add v7.4h, v7.4h, v0.4h
+ shl v2.4h, v7.4h, #4
+ ssubl v2.4s, v2.4h, v3.4h
+ ext v0.16b, v0.16b, v0.16b, #14
+ mov v0.h[0], wzr
+ mul v0.8h, v0.8h, v5.h[0]
+ dup v1.4s, v2.s[0]
+ dup v2.4s, v2.s[0]
+ dup v3.8h, v5.h[1]
+ saddw v1.4s, v1.4s, v0.4h
+ saddw2 v2.4s, v2.4s, v0.8h
+ mov w3, #8
+ mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
+1:
+ sqshrun v0.4h, v1.4s, #5
+ sqshrun2 v0.8h, v2.4s, #5
+
+ saddw v1.4s, v1.4s, v3.4h
+ saddw v2.4s, v2.4s, v3.4h
+
+ subs w3, w3, #1
+
+ smin v0.8h, v0.8h, v4.8h
+
+ st1 {v0.8h}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_pred8x8_128_dc_neon_10, export=1
+ movi v0.8h, #2, lsl #8 // 512, 1 << (bit_depth - 1)
+ movi v1.8h, #2, lsl #8
+ b .L_pred8x8_dc_10_end
+endfunc
+
+function ff_pred8x8_top_dc_neon_10, export=1
+ sub x2, x0, x1
+ ld1 {v0.8h}, [x2]
+
+ addp v0.8h, v0.8h, v0.8h
+ addp v0.4h, v0.4h, v0.4h
+ zip1 v0.4h, v0.4h, v0.4h
+ urshr v2.4h, v0.4h, #2
+ zip1 v0.8h, v2.8h, v2.8h
+ zip1 v1.8h, v2.8h, v2.8h
+ b .L_pred8x8_dc_10_end
+endfunc
+
+function ff_pred8x8_left_dc_neon_10, export=1
+ sub x2, x0, #2
+ ldcol.16 v0, x2, x1, 8
+
+ addp v0.8h, v0.8h, v0.8h
+ addp v0.4h, v0.4h, v0.4h
+ urshr v2.4h, v0.4h, #2
+ dup v1.8h, v2.h[1]
+ dup v0.8h, v2.h[0]
+ b .L_pred8x8_dc_10_end
+endfunc
+
+function ff_pred8x8_dc_neon_10, export=1
+ sub x2, x0, x1
+ sub x3, x0, #2
+
+ ld1 {v0.8h}, [x2]
+ ldcol.16 v1, x3, x1, 8
+
+ addp v0.8h, v0.8h, v0.8h
+ addp v1.8h, v1.8h, v1.8h
+ trn1 v2.2s, v0.2s, v1.2s
+ trn2 v3.2s, v0.2s, v1.2s
+ addp v4.4h, v2.4h, v3.4h
+ addp v5.4h, v4.4h, v4.4h
+ urshr v6.4h, v5.4h, #3
+ urshr v7.4h, v4.4h, #2
+ dup v0.8h, v6.h[0]
+ dup v2.8h, v7.h[2]
+ dup v1.8h, v7.h[3]
+ dup v3.8h, v6.h[1]
+ zip1 v0.2d, v0.2d, v2.2d
+ zip1 v1.2d, v1.2d, v3.2d
+.L_pred8x8_dc_10_end:
+ mov w3, #4
+ add x2, x0, x1, lsl #2
+
+6: st1 {v0.8h}, [x0], x1
+ subs w3, w3, #1
+ st1 {v1.8h}, [x2], x1
+ b.ne 6b
+ ret
+endfunc
+
+function ff_pred8x8_l0t_dc_neon_10, export=1
+ sub x2, x0, x1
+ sub x3, x0, #2
+
+ ld1 {v0.8h}, [x2]
+ ldcol.16 v1, x3, x1, 4
+
+ addp v0.8h, v0.8h, v0.8h
+ addp v1.4h, v1.4h, v1.4h
+ addp v0.4h, v0.4h, v0.4h
+ addp v1.4h, v1.4h, v1.4h
+ add v1.4h, v1.4h, v0.4h
+
+ urshr v2.4h, v0.4h, #2
+ urshr v3.4h, v1.4h, #3 // the pred4x4 part
+
+ dup v4.4h, v3.h[0]
+ dup v5.4h, v2.h[0]
+ dup v6.4h, v2.h[1]
+
+ zip1 v0.2d, v4.2d, v6.2d
+ zip1 v1.2d, v5.2d, v6.2d
+ b .L_pred8x8_dc_10_end
+endfunc
+
+function ff_pred8x8_l00_dc_neon_10, export=1
+ sub x2, x0, #2
+
+ ldcol.16 v0, x2, x1, 4
+
+ addp v0.4h, v0.4h, v0.4h
+ addp v0.4h, v0.4h, v0.4h
+ urshr v0.4h, v0.4h, #2
+
+ movi v1.8h, #2, lsl #8 // 512
+ dup v0.8h, v0.h[0]
+ b .L_pred8x8_dc_10_end
+endfunc
+
+function ff_pred8x8_0lt_dc_neon_10, export=1
+ add x3, x0, x1, lsl #2
+ sub x2, x0, x1
+ sub x3, x3, #2
+
+ ld1 {v0.8h}, [x2]
+ ldcol.16 v1, x3, x1, hi=1
+
+ addp v0.8h, v0.8h, v0.8h
+ addp v1.8h, v1.8h, v1.8h
+ addp v0.4h, v0.4h, v0.4h
+ addp v1.4h, v1.4h, v1.4h
+ zip1 v0.2s, v0.2s, v1.2s
+ add v1.4h, v0.4h, v1.4h
+
+ urshr v2.4h, v0.4h, #2
+ urshr v3.4h, v1.4h, #3
+
+ dup v4.4h, v2.h[0]
+ dup v5.4h, v2.h[3]
+ dup v6.4h, v2.h[1]
+ dup v7.4h, v3.h[1]
+
+ zip1 v0.2d, v4.2d, v6.2d
+ zip1 v1.2d, v5.2d, v7.2d
+ b .L_pred8x8_dc_10_end
+endfunc
+
+function ff_pred8x8_0l0_dc_neon_10, export=1
+ add x2, x0, x1, lsl #2
+ sub x2, x2, #2
+
+ ldcol.16 v1, x2, x1, 4
+
+ addp v2.8h, v1.8h, v1.8h
+ addp v2.4h, v2.4h, v2.4h
+ urshr v1.4h, v2.4h, #2
+
+ movi v0.8h, #2, lsl #8 // 512
+ dup v1.8h, v1.h[0]
+ b .L_pred8x8_dc_10_end
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/hpeldsp_init_aarch64.c b/media/ffvpx/libavcodec/aarch64/hpeldsp_init_aarch64.c
new file mode 100644
index 0000000000..144ae2bcc4
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/hpeldsp_init_aarch64.c
@@ -0,0 +1,123 @@
+/*
+ * ARM NEON optimised DSP functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavcodec/hpeldsp.h"
+
+void ff_put_pixels16_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels16_x2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels16_y2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels16_xy2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels8_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels8_x2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels8_y2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels8_xy2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+
+void ff_put_pixels16_x2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels16_y2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels16_xy2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels8_x2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels8_y2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_put_pixels8_xy2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+
+void ff_avg_pixels16_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels16_x2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels16_y2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels16_xy2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels8_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels8_x2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels8_y2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels8_xy2_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+
+void ff_avg_pixels16_x2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels16_y2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+void ff_avg_pixels16_xy2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h);
+
+av_cold void ff_hpeldsp_init_aarch64(HpelDSPContext *c, int flags)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+ c->put_pixels_tab[0][0] = ff_put_pixels16_neon;
+ c->put_pixels_tab[0][1] = ff_put_pixels16_x2_neon;
+ c->put_pixels_tab[0][2] = ff_put_pixels16_y2_neon;
+ c->put_pixels_tab[0][3] = ff_put_pixels16_xy2_neon;
+ c->put_pixels_tab[1][0] = ff_put_pixels8_neon;
+ c->put_pixels_tab[1][1] = ff_put_pixels8_x2_neon;
+ c->put_pixels_tab[1][2] = ff_put_pixels8_y2_neon;
+ c->put_pixels_tab[1][3] = ff_put_pixels8_xy2_neon;
+
+ c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_neon;
+ c->put_no_rnd_pixels_tab[0][1] = ff_put_pixels16_x2_no_rnd_neon;
+ c->put_no_rnd_pixels_tab[0][2] = ff_put_pixels16_y2_no_rnd_neon;
+ c->put_no_rnd_pixels_tab[0][3] = ff_put_pixels16_xy2_no_rnd_neon;
+ c->put_no_rnd_pixels_tab[1][0] = ff_put_pixels8_neon;
+ c->put_no_rnd_pixels_tab[1][1] = ff_put_pixels8_x2_no_rnd_neon;
+ c->put_no_rnd_pixels_tab[1][2] = ff_put_pixels8_y2_no_rnd_neon;
+ c->put_no_rnd_pixels_tab[1][3] = ff_put_pixels8_xy2_no_rnd_neon;
+
+ c->avg_pixels_tab[0][0] = ff_avg_pixels16_neon;
+ c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_neon;
+ c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_neon;
+ c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_neon;
+ c->avg_pixels_tab[1][0] = ff_avg_pixels8_neon;
+ c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_neon;
+ c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_neon;
+ c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_neon;
+
+ c->avg_no_rnd_pixels_tab[0] = ff_avg_pixels16_neon;
+ c->avg_no_rnd_pixels_tab[1] = ff_avg_pixels16_x2_no_rnd_neon;
+ c->avg_no_rnd_pixels_tab[2] = ff_avg_pixels16_y2_no_rnd_neon;
+ c->avg_no_rnd_pixels_tab[3] = ff_avg_pixels16_xy2_no_rnd_neon;
+ }
+}
diff --git a/media/ffvpx/libavcodec/aarch64/hpeldsp_neon.S b/media/ffvpx/libavcodec/aarch64/hpeldsp_neon.S
new file mode 100644
index 0000000000..a491c173bb
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/hpeldsp_neon.S
@@ -0,0 +1,397 @@
+/*
+ * ARM NEON optimised DSP functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+.macro pixels16 rnd=1, avg=0
+ .if \avg
+ mov x12, x0
+ .endif
+1: ld1 {v0.16B}, [x1], x2
+ ld1 {v1.16B}, [x1], x2
+ ld1 {v2.16B}, [x1], x2
+ ld1 {v3.16B}, [x1], x2
+ .if \avg
+ ld1 {v4.16B}, [x12], x2
+ urhadd v0.16B, v0.16B, v4.16B
+ ld1 {v5.16B}, [x12], x2
+ urhadd v1.16B, v1.16B, v5.16B
+ ld1 {v6.16B}, [x12], x2
+ urhadd v2.16B, v2.16B, v6.16B
+ ld1 {v7.16B}, [x12], x2
+ urhadd v3.16B, v3.16B, v7.16B
+ .endif
+ subs w3, w3, #4
+ st1 {v0.16B}, [x0], x2
+ st1 {v1.16B}, [x0], x2
+ st1 {v2.16B}, [x0], x2
+ st1 {v3.16B}, [x0], x2
+ b.ne 1b
+ ret
+.endm
+
+.macro pixels16_x2 rnd=1, avg=0
+1: ld1 {v0.16B, v1.16B}, [x1], x2
+ ld1 {v2.16B, v3.16B}, [x1], x2
+ subs w3, w3, #2
+ ext v1.16B, v0.16B, v1.16B, #1
+ avg v0.16B, v0.16B, v1.16B
+ ext v3.16B, v2.16B, v3.16B, #1
+ avg v2.16B, v2.16B, v3.16B
+ .if \avg
+ ld1 {v1.16B}, [x0], x2
+ ld1 {v3.16B}, [x0]
+ urhadd v0.16B, v0.16B, v1.16B
+ urhadd v2.16B, v2.16B, v3.16B
+ sub x0, x0, x2
+ .endif
+ st1 {v0.16B}, [x0], x2
+ st1 {v2.16B}, [x0], x2
+ b.ne 1b
+ ret
+.endm
+
+.macro pixels16_y2 rnd=1, avg=0
+ sub w3, w3, #2
+ ld1 {v0.16B}, [x1], x2
+ ld1 {v1.16B}, [x1], x2
+1: subs w3, w3, #2
+ avg v2.16B, v0.16B, v1.16B
+ ld1 {v0.16B}, [x1], x2
+ avg v3.16B, v0.16B, v1.16B
+ ld1 {v1.16B}, [x1], x2
+ .if \avg
+ ld1 {v4.16B}, [x0], x2
+ ld1 {v5.16B}, [x0]
+ urhadd v2.16B, v2.16B, v4.16B
+ urhadd v3.16B, v3.16B, v5.16B
+ sub x0, x0, x2
+ .endif
+ st1 {v2.16B}, [x0], x2
+ st1 {v3.16B}, [x0], x2
+ b.ne 1b
+
+ avg v2.16B, v0.16B, v1.16B
+ ld1 {v0.16B}, [x1], x2
+ avg v3.16B, v0.16B, v1.16B
+ .if \avg
+ ld1 {v4.16B}, [x0], x2
+ ld1 {v5.16B}, [x0]
+ urhadd v2.16B, v2.16B, v4.16B
+ urhadd v3.16B, v3.16B, v5.16B
+ sub x0, x0, x2
+ .endif
+ st1 {v2.16B}, [x0], x2
+ st1 {v3.16B}, [x0], x2
+
+ ret
+.endm
+
+.macro pixels16_xy2 rnd=1, avg=0
+ sub w3, w3, #2
+ ld1 {v0.16B, v1.16B}, [x1], x2
+ ld1 {v4.16B, v5.16B}, [x1], x2
+NRND movi v26.8H, #1
+ ext v1.16B, v0.16B, v1.16B, #1
+ ext v5.16B, v4.16B, v5.16B, #1
+ uaddl v16.8H, v0.8B, v1.8B
+ uaddl2 v20.8H, v0.16B, v1.16B
+ uaddl v18.8H, v4.8B, v5.8B
+ uaddl2 v22.8H, v4.16B, v5.16B
+1: subs w3, w3, #2
+ ld1 {v0.16B, v1.16B}, [x1], x2
+ add v24.8H, v16.8H, v18.8H
+NRND add v24.8H, v24.8H, v26.8H
+ ext v30.16B, v0.16B, v1.16B, #1
+ add v1.8H, v20.8H, v22.8H
+ mshrn v28.8B, v24.8H, #2
+NRND add v1.8H, v1.8H, v26.8H
+ mshrn2 v28.16B, v1.8H, #2
+ .if \avg
+ ld1 {v16.16B}, [x0]
+ urhadd v28.16B, v28.16B, v16.16B
+ .endif
+ uaddl v16.8H, v0.8B, v30.8B
+ ld1 {v2.16B, v3.16B}, [x1], x2
+ uaddl2 v20.8H, v0.16B, v30.16B
+ st1 {v28.16B}, [x0], x2
+ add v24.8H, v16.8H, v18.8H
+NRND add v24.8H, v24.8H, v26.8H
+ ext v3.16B, v2.16B, v3.16B, #1
+ add v0.8H, v20.8H, v22.8H
+ mshrn v30.8B, v24.8H, #2
+NRND add v0.8H, v0.8H, v26.8H
+ mshrn2 v30.16B, v0.8H, #2
+ .if \avg
+ ld1 {v18.16B}, [x0]
+ urhadd v30.16B, v30.16B, v18.16B
+ .endif
+ uaddl v18.8H, v2.8B, v3.8B
+ uaddl2 v22.8H, v2.16B, v3.16B
+ st1 {v30.16B}, [x0], x2
+ b.gt 1b
+
+ ld1 {v0.16B, v1.16B}, [x1], x2
+ add v24.8H, v16.8H, v18.8H
+NRND add v24.8H, v24.8H, v26.8H
+ ext v30.16B, v0.16B, v1.16B, #1
+ add v1.8H, v20.8H, v22.8H
+ mshrn v28.8B, v24.8H, #2
+NRND add v1.8H, v1.8H, v26.8H
+ mshrn2 v28.16B, v1.8H, #2
+ .if \avg
+ ld1 {v16.16B}, [x0]
+ urhadd v28.16B, v28.16B, v16.16B
+ .endif
+ uaddl v16.8H, v0.8B, v30.8B
+ uaddl2 v20.8H, v0.16B, v30.16B
+ st1 {v28.16B}, [x0], x2
+ add v24.8H, v16.8H, v18.8H
+NRND add v24.8H, v24.8H, v26.8H
+ add v0.8H, v20.8H, v22.8H
+ mshrn v30.8B, v24.8H, #2
+NRND add v0.8H, v0.8H, v26.8H
+ mshrn2 v30.16B, v0.8H, #2
+ .if \avg
+ ld1 {v18.16B}, [x0]
+ urhadd v30.16B, v30.16B, v18.16B
+ .endif
+ st1 {v30.16B}, [x0], x2
+
+ ret
+.endm
+
+.macro pixels8 rnd=1, avg=0
+1: ld1 {v0.8B}, [x1], x2
+ ld1 {v1.8B}, [x1], x2
+ ld1 {v2.8B}, [x1], x2
+ ld1 {v3.8B}, [x1], x2
+ .if \avg
+ ld1 {v4.8B}, [x0], x2
+ urhadd v0.8B, v0.8B, v4.8B
+ ld1 {v5.8B}, [x0], x2
+ urhadd v1.8B, v1.8B, v5.8B
+ ld1 {v6.8B}, [x0], x2
+ urhadd v2.8B, v2.8B, v6.8B
+ ld1 {v7.8B}, [x0], x2
+ urhadd v3.8B, v3.8B, v7.8B
+ sub x0, x0, x2, lsl #2
+ .endif
+ subs w3, w3, #4
+ st1 {v0.8B}, [x0], x2
+ st1 {v1.8B}, [x0], x2
+ st1 {v2.8B}, [x0], x2
+ st1 {v3.8B}, [x0], x2
+ b.ne 1b
+ ret
+.endm
+
+.macro pixels8_x2 rnd=1, avg=0
+1: ld1 {v0.8B, v1.8B}, [x1], x2
+ ext v1.8B, v0.8B, v1.8B, #1
+ ld1 {v2.8B, v3.8B}, [x1], x2
+ ext v3.8B, v2.8B, v3.8B, #1
+ subs w3, w3, #2
+ avg v0.8B, v0.8B, v1.8B
+ avg v2.8B, v2.8B, v3.8B
+ .if \avg
+ ld1 {v4.8B}, [x0], x2
+ ld1 {v5.8B}, [x0]
+ urhadd v0.8B, v0.8B, v4.8B
+ urhadd v2.8B, v2.8B, v5.8B
+ sub x0, x0, x2
+ .endif
+ st1 {v0.8B}, [x0], x2
+ st1 {v2.8B}, [x0], x2
+ b.ne 1b
+ ret
+.endm
+
+.macro pixels8_y2 rnd=1, avg=0
+ sub w3, w3, #2
+ ld1 {v0.8B}, [x1], x2
+ ld1 {v1.8B}, [x1], x2
+1: subs w3, w3, #2
+ avg v4.8B, v0.8B, v1.8B
+ ld1 {v0.8B}, [x1], x2
+ avg v5.8B, v0.8B, v1.8B
+ ld1 {v1.8B}, [x1], x2
+ .if \avg
+ ld1 {v2.8B}, [x0], x2
+ ld1 {v3.8B}, [x0]
+ urhadd v4.8B, v4.8B, v2.8B
+ urhadd v5.8B, v5.8B, v3.8B
+ sub x0, x0, x2
+ .endif
+ st1 {v4.8B}, [x0], x2
+ st1 {v5.8B}, [x0], x2
+ b.ne 1b
+
+ avg v4.8B, v0.8B, v1.8B
+ ld1 {v0.8B}, [x1], x2
+ avg v5.8B, v0.8B, v1.8B
+ .if \avg
+ ld1 {v2.8B}, [x0], x2
+ ld1 {v3.8B}, [x0]
+ urhadd v4.8B, v4.8B, v2.8B
+ urhadd v5.8B, v5.8B, v3.8B
+ sub x0, x0, x2
+ .endif
+ st1 {v4.8B}, [x0], x2
+ st1 {v5.8B}, [x0], x2
+
+ ret
+.endm
+
+.macro pixels8_xy2 rnd=1, avg=0
+ sub w3, w3, #2
+ ld1 {v0.16B}, [x1], x2
+ ld1 {v1.16B}, [x1], x2
+NRND movi v19.8H, #1
+ ext v4.16B, v0.16B, v4.16B, #1
+ ext v6.16B, v1.16B, v6.16B, #1
+ uaddl v16.8H, v0.8B, v4.8B
+ uaddl v17.8H, v1.8B, v6.8B
+1: subs w3, w3, #2
+ ld1 {v0.16B}, [x1], x2
+ add v18.8H, v16.8H, v17.8H
+ ext v4.16B, v0.16B, v4.16B, #1
+NRND add v18.8H, v18.8H, v19.8H
+ uaddl v16.8H, v0.8B, v4.8B
+ mshrn v5.8B, v18.8H, #2
+ ld1 {v1.16B}, [x1], x2
+ add v18.8H, v16.8H, v17.8H
+ .if \avg
+ ld1 {v7.8B}, [x0]
+ urhadd v5.8B, v5.8B, v7.8B
+ .endif
+NRND add v18.8H, v18.8H, v19.8H
+ st1 {v5.8B}, [x0], x2
+ mshrn v7.8B, v18.8H, #2
+ .if \avg
+ ld1 {v5.8B}, [x0]
+ urhadd v7.8B, v7.8B, v5.8B
+ .endif
+ ext v6.16B, v1.16B, v6.16B, #1
+ uaddl v17.8H, v1.8B, v6.8B
+ st1 {v7.8B}, [x0], x2
+ b.gt 1b
+
+ ld1 {v0.16B}, [x1], x2
+ add v18.8H, v16.8H, v17.8H
+ ext v4.16B, v0.16B, v4.16B, #1
+NRND add v18.8H, v18.8H, v19.8H
+ uaddl v16.8H, v0.8B, v4.8B
+ mshrn v5.8B, v18.8H, #2
+ add v18.8H, v16.8H, v17.8H
+ .if \avg
+ ld1 {v7.8B}, [x0]
+ urhadd v5.8B, v5.8B, v7.8B
+ .endif
+NRND add v18.8H, v18.8H, v19.8H
+ st1 {v5.8B}, [x0], x2
+ mshrn v7.8B, v18.8H, #2
+ .if \avg
+ ld1 {v5.8B}, [x0]
+ urhadd v7.8B, v7.8B, v5.8B
+ .endif
+ st1 {v7.8B}, [x0], x2
+
+ ret
+.endm
+
+.macro pixfunc pfx, name, suf, rnd=1, avg=0
+ .if \rnd
+ .macro avg rd, rn, rm
+ urhadd \rd, \rn, \rm
+ .endm
+ .macro mshrn rd, rn, rm
+ rshrn \rd, \rn, \rm
+ .endm
+ .macro mshrn2 rd, rn, rm
+ rshrn2 \rd, \rn, \rm
+ .endm
+ .macro NRND insn:vararg
+ .endm
+ .else
+ .macro avg rd, rn, rm
+ uhadd \rd, \rn, \rm
+ .endm
+ .macro mshrn rd, rn, rm
+ shrn \rd, \rn, \rm
+ .endm
+ .macro mshrn2 rd, rn, rm
+ shrn2 \rd, \rn, \rm
+ .endm
+ .macro NRND insn:vararg
+ \insn
+ .endm
+ .endif
+function ff_\pfx\name\suf\()_neon, export=1
+ \name \rnd, \avg
+endfunc
+ .purgem avg
+ .purgem mshrn
+ .purgem mshrn2
+ .purgem NRND
+.endm
+
+.macro pixfunc2 pfx, name, avg=0
+ pixfunc \pfx, \name, rnd=1, avg=\avg
+ pixfunc \pfx, \name, _no_rnd, rnd=0, avg=\avg
+.endm
+
+function ff_put_h264_qpel16_mc00_neon, export=1
+ mov w3, #16
+endfunc
+
+ pixfunc put_, pixels16, avg=0
+ pixfunc2 put_, pixels16_x2, avg=0
+ pixfunc2 put_, pixels16_y2, avg=0
+ pixfunc2 put_, pixels16_xy2, avg=0
+
+function ff_avg_h264_qpel16_mc00_neon, export=1
+ mov w3, #16
+endfunc
+
+ pixfunc avg_, pixels16, avg=1
+ pixfunc2 avg_, pixels16_x2, avg=1
+ pixfunc2 avg_, pixels16_y2, avg=1
+ pixfunc2 avg_, pixels16_xy2, avg=1
+
+function ff_put_h264_qpel8_mc00_neon, export=1
+ mov w3, #8
+endfunc
+
+ pixfunc put_, pixels8, avg=0
+ pixfunc2 put_, pixels8_x2, avg=0
+ pixfunc2 put_, pixels8_y2, avg=0
+ pixfunc2 put_, pixels8_xy2, avg=0
+
+function ff_avg_h264_qpel8_mc00_neon, export=1
+ mov w3, #8
+endfunc
+
+ pixfunc avg_, pixels8, avg=1
+ pixfunc avg_, pixels8_x2, avg=1
+ pixfunc avg_, pixels8_y2, avg=1
+ pixfunc avg_, pixels8_xy2, avg=1
diff --git a/media/ffvpx/libavcodec/aarch64/idct.h b/media/ffvpx/libavcodec/aarch64/idct.h
new file mode 100644
index 0000000000..97ee0a64af
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/idct.h
@@ -0,0 +1,29 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AARCH64_IDCT_H
+#define AVCODEC_AARCH64_IDCT_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+void ff_simple_idct_neon(int16_t *data);
+void ff_simple_idct_put_neon(uint8_t *dest, ptrdiff_t line_size, int16_t *data);
+void ff_simple_idct_add_neon(uint8_t *dest, ptrdiff_t line_size, int16_t *data);
+
+#endif /* AVCODEC_AARCH64_IDCT_H */
diff --git a/media/ffvpx/libavcodec/aarch64/idctdsp_init_aarch64.c b/media/ffvpx/libavcodec/aarch64/idctdsp_init_aarch64.c
new file mode 100644
index 0000000000..eec21aa5a2
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/idctdsp_init_aarch64.c
@@ -0,0 +1,55 @@
+/*
+ * ARM-NEON-optimized IDCT functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2017 Matthieu Bouron <matthieu.bouron@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/arm/cpu.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/idctdsp.h"
+#include "idct.h"
+
+void ff_put_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
+void ff_put_signed_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
+void ff_add_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
+
+av_cold void ff_idctdsp_init_aarch64(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+ if (!avctx->lowres && !high_bit_depth) {
+ if (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLENEON) {
+ c->idct_put = ff_simple_idct_put_neon;
+ c->idct_add = ff_simple_idct_add_neon;
+ c->idct = ff_simple_idct_neon;
+ c->perm_type = FF_IDCT_PERM_PARTTRANS;
+ }
+ }
+
+ c->add_pixels_clamped = ff_add_pixels_clamped_neon;
+ c->put_pixels_clamped = ff_put_pixels_clamped_neon;
+ c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_neon;
+ }
+}
diff --git a/media/ffvpx/libavcodec/aarch64/idctdsp_neon.S b/media/ffvpx/libavcodec/aarch64/idctdsp_neon.S
new file mode 100644
index 0000000000..7f47611206
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/idctdsp_neon.S
@@ -0,0 +1,130 @@
+/*
+ * IDCT AArch64 NEON optimisations
+ *
+ * Copyright (c) 2022 Ben Avison <bavison@riscosopen.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+// Clamp 16-bit signed block coefficients to unsigned 8-bit
+// On entry:
+// x0 -> array of 64x 16-bit coefficients
+// x1 -> 8-bit results
+// x2 = row stride for results, bytes
+function ff_put_pixels_clamped_neon, export=1
+ ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], #64
+ ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x0]
+ sqxtun v0.8b, v0.8h
+ sqxtun v1.8b, v1.8h
+ sqxtun v2.8b, v2.8h
+ sqxtun v3.8b, v3.8h
+ sqxtun v4.8b, v4.8h
+ st1 {v0.8b}, [x1], x2
+ sqxtun v0.8b, v5.8h
+ st1 {v1.8b}, [x1], x2
+ sqxtun v1.8b, v6.8h
+ st1 {v2.8b}, [x1], x2
+ sqxtun v2.8b, v7.8h
+ st1 {v3.8b}, [x1], x2
+ st1 {v4.8b}, [x1], x2
+ st1 {v0.8b}, [x1], x2
+ st1 {v1.8b}, [x1], x2
+ st1 {v2.8b}, [x1]
+ ret
+endfunc
+
+// Clamp 16-bit signed block coefficients to signed 8-bit (biased by 128)
+// On entry:
+// x0 -> array of 64x 16-bit coefficients
+// x1 -> 8-bit results
+// x2 = row stride for results, bytes
+function ff_put_signed_pixels_clamped_neon, export=1
+ ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], #64
+ movi v4.8b, #128
+ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x0]
+ sqxtn v0.8b, v0.8h
+ sqxtn v1.8b, v1.8h
+ sqxtn v2.8b, v2.8h
+ sqxtn v3.8b, v3.8h
+ sqxtn v5.8b, v16.8h
+ add v0.8b, v0.8b, v4.8b
+ sqxtn v6.8b, v17.8h
+ add v1.8b, v1.8b, v4.8b
+ sqxtn v7.8b, v18.8h
+ add v2.8b, v2.8b, v4.8b
+ sqxtn v16.8b, v19.8h
+ add v3.8b, v3.8b, v4.8b
+ st1 {v0.8b}, [x1], x2
+ add v0.8b, v5.8b, v4.8b
+ st1 {v1.8b}, [x1], x2
+ add v1.8b, v6.8b, v4.8b
+ st1 {v2.8b}, [x1], x2
+ add v2.8b, v7.8b, v4.8b
+ st1 {v3.8b}, [x1], x2
+ add v3.8b, v16.8b, v4.8b
+ st1 {v0.8b}, [x1], x2
+ st1 {v1.8b}, [x1], x2
+ st1 {v2.8b}, [x1], x2
+ st1 {v3.8b}, [x1]
+ ret
+endfunc
+
+// Add 16-bit signed block coefficients to unsigned 8-bit
+// On entry:
+// x0 -> array of 64x 16-bit coefficients
+// x1 -> 8-bit input and results
+// x2 = row stride for 8-bit input and results, bytes
+function ff_add_pixels_clamped_neon, export=1
+ ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], #64
+ mov x3, x1
+ ld1 {v4.8b}, [x1], x2
+ ld1 {v5.8b}, [x1], x2
+ ld1 {v6.8b}, [x1], x2
+ ld1 {v7.8b}, [x1], x2
+ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x0]
+ uaddw v0.8h, v0.8h, v4.8b
+ uaddw v1.8h, v1.8h, v5.8b
+ uaddw v2.8h, v2.8h, v6.8b
+ ld1 {v4.8b}, [x1], x2
+ uaddw v3.8h, v3.8h, v7.8b
+ ld1 {v5.8b}, [x1], x2
+ sqxtun v0.8b, v0.8h
+ ld1 {v6.8b}, [x1], x2
+ sqxtun v1.8b, v1.8h
+ ld1 {v7.8b}, [x1]
+ sqxtun v2.8b, v2.8h
+ sqxtun v3.8b, v3.8h
+ uaddw v4.8h, v16.8h, v4.8b
+ st1 {v0.8b}, [x3], x2
+ uaddw v0.8h, v17.8h, v5.8b
+ st1 {v1.8b}, [x3], x2
+ uaddw v1.8h, v18.8h, v6.8b
+ st1 {v2.8b}, [x3], x2
+ uaddw v2.8h, v19.8h, v7.8b
+ sqxtun v4.8b, v4.8h
+ sqxtun v0.8b, v0.8h
+ st1 {v3.8b}, [x3], x2
+ sqxtun v1.8b, v1.8h
+ sqxtun v2.8b, v2.8h
+ st1 {v4.8b}, [x3], x2
+ st1 {v0.8b}, [x3], x2
+ st1 {v1.8b}, [x3], x2
+ st1 {v2.8b}, [x3]
+ ret
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/mdct_neon.S b/media/ffvpx/libavcodec/aarch64/mdct_neon.S
new file mode 100644
index 0000000000..98b09bf1ab
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/mdct_neon.S
@@ -0,0 +1,326 @@
+/*
+ * AArch64 NEON optimised MDCT
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+function ff_imdct_half_neon, export=1
+ stp x19, x20, [sp, #-32]!
+ AARCH64_SIGN_LINK_REGISTER
+ str x30, [sp, #16]
+ mov x12, #1
+ ldr w14, [x0, #28] // mdct_bits
+ ldr x4, [x0, #32] // tcos
+ ldr x3, [x0, #8] // revtab
+ lsl x12, x12, x14 // n = 1 << nbits
+ lsr x14, x12, #2 // n4 = n >> 2
+ add x7, x2, x12, lsl #1
+ mov x12, #-16
+ sub x7, x7, #16
+
+ ld2 {v16.2s,v17.2s}, [x7], x12 // d16=x,n1 d17=x,n0
+ ld2 {v0.2s,v1.2s}, [x2], #16 // d0 =m0,x d1 =m1,x
+ rev64 v17.2s, v17.2s
+ ld2 {v2.2s,v3.2s}, [x4], #16 // d2=c0,c1 d3=s0,s2
+ fmul v6.2s, v17.2s, v2.2s
+ fmul v7.2s, v0.2s, v2.2s
+1:
+ subs x14, x14, #2
+ ldr w6, [x3], #4
+ fmul v4.2s, v0.2s, v3.2s
+ fmul v5.2s, v17.2s, v3.2s
+ fsub v4.2s, v6.2s, v4.2s
+ fadd v5.2s, v5.2s, v7.2s
+ ubfm x8, x6, #16, #31
+ ubfm x6, x6, #0, #15
+ add x8, x1, x8, lsl #3
+ add x6, x1, x6, lsl #3
+ b.eq 2f
+ ld2 {v16.2s,v17.2s}, [x7], x12
+ ld2 {v0.2s,v1.2s}, [x2], #16
+ rev64 v17.2s, v17.2s
+ ld2 {v2.2s,v3.2s}, [x4], #16 // d2=c0,c1 d3=s0,s2
+ fmul v6.2s, v17.2s, v2.2s
+ fmul v7.2s, v0.2s, v2.2s
+ st2 {v4.s,v5.s}[0], [x6]
+ st2 {v4.s,v5.s}[1], [x8]
+ b 1b
+2:
+ st2 {v4.s,v5.s}[0], [x6]
+ st2 {v4.s,v5.s}[1], [x8]
+
+ mov x19, x0
+ mov x20, x1
+ bl X(ff_fft_calc_neon)
+
+ mov x12, #1
+ ldr w14, [x19, #28] // mdct_bits
+ ldr x4, [x19, #32] // tcos
+ lsl x12, x12, x14 // n = 1 << nbits
+ lsr x14, x12, #3 // n8 = n >> 3
+
+ add x4, x4, x14, lsl #3
+ add x6, x20, x14, lsl #3
+ sub x1, x4, #16
+ sub x3, x6, #16
+
+ mov x7, #-16
+ mov x8, x6
+ mov x0, x3
+
+ ld2 {v0.2s,v1.2s}, [x3], x7 // d0 =i1,r1 d1 =i0,r0
+ ld2 {v20.2s,v21.2s},[x6], #16 // d20=i2,r2 d21=i3,r3
+ ld2 {v16.2s,v17.2s},[x1], x7 // d16=c1,c0 d18=s1,s0
+3:
+ subs x14, x14, #2
+ fmul v7.2s, v0.2s, v17.2s
+ ld2 {v18.2s,v19.2s},[x4], #16 // d17=c2,c3 d19=s2,s3
+ fmul v4.2s, v1.2s, v17.2s
+ fmul v6.2s, v21.2s, v19.2s
+ fmul v5.2s, v20.2s, v19.2s
+ fmul v22.2s, v1.2s, v16.2s
+ fmul v23.2s, v21.2s, v18.2s
+ fmul v24.2s, v0.2s, v16.2s
+ fmul v25.2s, v20.2s, v18.2s
+ fadd v7.2s, v7.2s, v22.2s
+ fadd v5.2s, v5.2s, v23.2s
+ fsub v4.2s, v4.2s, v24.2s
+ fsub v6.2s, v6.2s, v25.2s
+ b.eq 4f
+ ld2 {v0.2s,v1.2s}, [x3], x7
+ ld2 {v20.2s,v21.2s},[x6], #16
+ ld2 {v16.2s,v17.2s},[x1], x7 // d16=c1,c0 d18=s1,s0
+ rev64 v5.2s, v5.2s
+ rev64 v7.2s, v7.2s
+ st2 {v4.2s,v5.2s}, [x0], x7
+ st2 {v6.2s,v7.2s}, [x8], #16
+ b 3b
+4:
+ rev64 v5.2s, v5.2s
+ rev64 v7.2s, v7.2s
+ st2 {v4.2s,v5.2s}, [x0]
+ st2 {v6.2s,v7.2s}, [x8]
+
+ ldr x30, [sp, #16]
+ AARCH64_VALIDATE_LINK_REGISTER
+ ldp x19, x20, [sp], #32
+
+ ret
+endfunc
+
+function ff_imdct_calc_neon, export=1
+ stp x19, x20, [sp, #-32]!
+ AARCH64_SIGN_LINK_REGISTER
+ str x30, [sp, #16]
+ ldr w3, [x0, #28] // mdct_bits
+ mov x19, #1
+ mov x20, x1
+ lsl x19, x19, x3
+ add x1, x1, x19
+
+ bl X(ff_imdct_half_neon)
+
+ add x0, x20, x19, lsl #2
+ add x1, x20, x19, lsl #1
+ sub x0, x0, #8
+ sub x2, x1, #16
+ mov x3, #-16
+ mov x6, #-8
+1:
+ ld1 {v0.4s}, [x2], x3
+ prfum pldl1keep, [x0, #-16]
+ rev64 v0.4s, v0.4s
+ ld1 {v2.2s,v3.2s}, [x1], #16
+ fneg v4.4s, v0.4s
+ prfum pldl1keep, [x2, #-16]
+ rev64 v2.2s, v2.2s
+ rev64 v3.2s, v3.2s
+ ext v4.16b, v4.16b, v4.16b, #8
+ st1 {v2.2s}, [x0], x6
+ st1 {v3.2s}, [x0], x6
+ st1 {v4.4s}, [x20], #16
+ subs x19, x19, #16
+ b.gt 1b
+
+ ldr x30, [sp, #16]
+ AARCH64_VALIDATE_LINK_REGISTER
+ ldp x19, x20, [sp], #32
+
+ ret
+endfunc
+
+
+function ff_mdct_calc_neon, export=1
+ stp x19, x20, [sp, #-32]!
+ AARCH64_SIGN_LINK_REGISTER
+ str x30, [sp, #16]
+
+ mov x12, #1
+ ldr w14, [x0, #28] // mdct_bits
+ ldr x4, [x0, #32] // tcos
+ ldr x3, [x0, #8] // revtab
+ lsl x14, x12, x14 // n = 1 << nbits
+ add x7, x2, x14 // in4u
+ sub x9, x7, #16 // in4d
+ add x2, x7, x14, lsl #1 // in3u
+ add x8, x9, x14, lsl #1 // in3d
+ add x5, x4, x14, lsl #1
+ sub x5, x5, #16
+ sub x3, x3, #4
+ mov x12, #-16
+ lsr x13, x14, #1
+
+ ld2 {v16.2s,v17.2s}, [x9], x12 // in0u0,in0u1 in4d1,in4d0
+ ld2 {v18.2s,v19.2s}, [x8], x12 // in2u0,in2u1 in3d1,in3d0
+ ld2 {v0.2s, v1.2s}, [x7], #16 // in4u0,in4u1 in2d1,in2d0
+ rev64 v17.2s, v17.2s // in4d0,in4d1 in3d0,in3d1
+ rev64 v19.2s, v19.2s // in4d0,in4d1 in3d0,in3d1
+ ld2 {v2.2s, v3.2s}, [x2], #16 // in3u0,in3u1 in1d1,in1d0
+ fsub v0.2s, v17.2s, v0.2s // in4d-in4u I
+ ld2 {v20.2s,v21.2s}, [x4], #16 // c0,c1 s0,s1
+ rev64 v1.2s, v1.2s // in2d0,in2d1 in1d0,in1d1
+ rev64 v3.2s, v3.2s // in2d0,in2d1 in1d0,in1d1
+ ld2 {v30.2s,v31.2s}, [x5], x12 // c2,c3 s2,s3
+ fadd v2.2s, v2.2s, v19.2s // in3u+in3d -R
+ fsub v16.2s, v16.2s, v1.2s // in0u-in2d R
+ fadd v18.2s, v18.2s, v3.2s // in2u+in1d -I
+1:
+ fmul v7.2s, v0.2s, v21.2s // I*s
+ ldr w10, [x3, x13]
+ fmul v6.2s, v2.2s, v20.2s // -R*c
+ ldr w6, [x3, #4]!
+ fmul v4.2s, v2.2s, v21.2s // -R*s
+ fmul v5.2s, v0.2s, v20.2s // I*c
+ fmul v24.2s, v16.2s, v30.2s // R*c
+ fmul v25.2s, v18.2s, v31.2s // -I*s
+ fmul v22.2s, v16.2s, v31.2s // R*s
+ fmul v23.2s, v18.2s, v30.2s // I*c
+ subs x14, x14, #16
+ subs x13, x13, #8
+ fsub v6.2s, v6.2s, v7.2s // -R*c-I*s
+ fadd v7.2s, v4.2s, v5.2s // -R*s+I*c
+ fsub v24.2s, v25.2s, v24.2s // I*s-R*c
+ fadd v25.2s, v22.2s, v23.2s // R*s-I*c
+ b.eq 1f
+ mov x12, #-16
+ ld2 {v16.2s,v17.2s}, [x9], x12 // in0u0,in0u1 in4d1,in4d0
+ ld2 {v18.2s,v19.2s}, [x8], x12 // in2u0,in2u1 in3d1,in3d0
+ fneg v7.2s, v7.2s // R*s-I*c
+ ld2 {v0.2s, v1.2s}, [x7], #16 // in4u0,in4u1 in2d1,in2d0
+ rev64 v17.2s, v17.2s // in4d0,in4d1 in3d0,in3d1
+ rev64 v19.2s, v19.2s // in4d0,in4d1 in3d0,in3d1
+ ld2 {v2.2s, v3.2s}, [x2], #16 // in3u0,in3u1 in1d1,in1d0
+ fsub v0.2s, v17.2s, v0.2s // in4d-in4u I
+ ld2 {v20.2s,v21.2s}, [x4], #16 // c0,c1 s0,s1
+ rev64 v1.2s, v1.2s // in2d0,in2d1 in1d0,in1d1
+ rev64 v3.2s, v3.2s // in2d0,in2d1 in1d0,in1d1
+ ld2 {v30.2s,v31.2s}, [x5], x12 // c2,c3 s2,s3
+ fadd v2.2s, v2.2s, v19.2s // in3u+in3d -R
+ fsub v16.2s, v16.2s, v1.2s // in0u-in2d R
+ fadd v18.2s, v18.2s, v3.2s // in2u+in1d -I
+ ubfm x12, x6, #16, #31
+ ubfm x6, x6, #0, #15
+ add x12, x1, x12, lsl #3
+ add x6, x1, x6, lsl #3
+ st2 {v6.s,v7.s}[0], [x6]
+ st2 {v6.s,v7.s}[1], [x12]
+ ubfm x6, x10, #16, #31
+ ubfm x10, x10, #0, #15
+ add x6 , x1, x6, lsl #3
+ add x10, x1, x10, lsl #3
+ st2 {v24.s,v25.s}[0], [x10]
+ st2 {v24.s,v25.s}[1], [x6]
+ b 1b
+1:
+ fneg v7.2s, v7.2s // R*s-I*c
+ ubfm x12, x6, #16, #31
+ ubfm x6, x6, #0, #15
+ add x12, x1, x12, lsl #3
+ add x6, x1, x6, lsl #3
+ st2 {v6.s,v7.s}[0], [x6]
+ st2 {v6.s,v7.s}[1], [x12]
+ ubfm x6, x10, #16, #31
+ ubfm x10, x10, #0, #15
+ add x6 , x1, x6, lsl #3
+ add x10, x1, x10, lsl #3
+ st2 {v24.s,v25.s}[0], [x10]
+ st2 {v24.s,v25.s}[1], [x6]
+
+ mov x19, x0
+ mov x20, x1
+ bl X(ff_fft_calc_neon)
+
+ mov x12, #1
+ ldr w14, [x19, #28] // mdct_bits
+ ldr x4, [x19, #32] // tcos
+ lsl x12, x12, x14 // n = 1 << nbits
+ lsr x14, x12, #3 // n8 = n >> 3
+
+ add x4, x4, x14, lsl #3
+ add x6, x20, x14, lsl #3
+ sub x1, x4, #16
+ sub x3, x6, #16
+
+ mov x7, #-16
+ mov x8, x6
+ mov x0, x3
+
+ ld2 {v0.2s,v1.2s}, [x3], x7 // d0 =r1,i1 d1 =r0,i0
+ ld2 {v20.2s,v21.2s}, [x6], #16 // d20=r2,i2 d21=r3,i3
+ ld2 {v16.2s,v17.2s}, [x1], x7 // c1,c0 s1,s0
+1:
+ subs x14, x14, #2
+ fmul v7.2s, v0.2s, v17.2s // r1*s1,r0*s0
+ ld2 {v18.2s,v19.2s}, [x4], #16 // c2,c3 s2,s3
+ fmul v4.2s, v1.2s, v17.2s // i1*s1,i0*s0
+ fmul v6.2s, v21.2s, v19.2s // i2*s2,i3*s3
+ fmul v5.2s, v20.2s, v19.2s // r2*s2,r3*s3
+ fmul v24.2s, v0.2s, v16.2s // r1*c1,r0*c0
+ fmul v25.2s, v20.2s, v18.2s // r2*c2,r3*c3
+ fmul v22.2s, v21.2s, v18.2s // i2*c2,i3*c3
+ fmul v23.2s, v1.2s, v16.2s // i1*c1,i0*c0
+ fadd v4.2s, v4.2s, v24.2s // i1*s1+r1*c1,i0*s0+r0*c0
+ fadd v6.2s, v6.2s, v25.2s // i2*s2+r2*c2,i3*s3+r3*c3
+ fsub v5.2s, v22.2s, v5.2s // i2*c2-r2*s2,i3*c3-r3*s3
+ fsub v7.2s, v23.2s, v7.2s // i1*c1-r1*s1,i0*c0-r0*s0
+ fneg v4.2s, v4.2s
+ fneg v6.2s, v6.2s
+ b.eq 1f
+ ld2 {v0.2s, v1.2s}, [x3], x7
+ ld2 {v20.2s,v21.2s}, [x6], #16
+ ld2 {v16.2s,v17.2s}, [x1], x7 // c1,c0 s1,s0
+ rev64 v5.2s, v5.2s
+ rev64 v7.2s, v7.2s
+ st2 {v4.2s,v5.2s}, [x0], x7
+ st2 {v6.2s,v7.2s}, [x8], #16
+ b 1b
+1:
+ rev64 v5.2s, v5.2s
+ rev64 v7.2s, v7.2s
+ st2 {v4.2s,v5.2s}, [x0]
+ st2 {v6.2s,v7.2s}, [x8]
+
+ ldr x30, [sp, #16]
+ AARCH64_VALIDATE_LINK_REGISTER
+ ldp x19, x20, [sp], #32
+
+ ret
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/moz.build b/media/ffvpx/libavcodec/aarch64/moz.build
new file mode 100644
index 0000000000..7126a39648
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/moz.build
@@ -0,0 +1,59 @@
+## -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+## vim: set filetype=python:
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
+ SOURCES += [
+ 'fft_init_aarch64.c',
+ 'fft_neon.S',
+ 'h264chroma_init_aarch64.c',
+ 'h264cmc_neon.S',
+ 'h264dsp_init_aarch64.c',
+ 'h264dsp_neon.S',
+ 'h264idct_neon.S',
+ 'h264pred_init.c',
+ 'h264pred_neon.S',
+ 'hpeldsp_init_aarch64.c',
+ 'hpeldsp_neon.S',
+ 'idctdsp_init_aarch64.c',
+ 'idctdsp_neon.S',
+ 'mdct_neon.S',
+ 'mpegaudiodsp_init.c',
+ 'mpegaudiodsp_neon.S',
+ 'neon.S',
+ 'simple_idct_neon.S',
+ 'videodsp.S',
+ 'videodsp_init.c',
+ 'vp8dsp_init_aarch64.c',
+ 'vp8dsp_neon.S',
+ 'vp9dsp_init_10bpp_aarch64.c',
+ 'vp9dsp_init_12bpp_aarch64.c',
+ 'vp9dsp_init_aarch64.c',
+ 'vp9itxfm_16bpp_neon.S',
+ 'vp9itxfm_neon.S',
+ 'vp9lpf_16bpp_neon.S',
+ 'vp9lpf_neon.S',
+ 'vp9mc_16bpp_neon.S',
+ 'vp9mc_aarch64.S',
+ 'vp9mc_neon.S',
+ ]
+else:
+ SOURCES += [
+ 'fft_init_aarch64.c',
+ 'fft_neon.S',
+ 'idctdsp_init_aarch64.c',
+ 'idctdsp_neon.S',
+ 'mpegaudiodsp_init.c',
+ 'mpegaudiodsp_neon.S',
+ 'simple_idct_neon.S',
+ ]
+
+if CONFIG['OS_ARCH'] == 'WINNT':
+ USE_INTEGRATED_CLANGCL_AS = True
+ DEFINES['EXTERN_ASM'] = ''
+
+FINAL_LIBRARY = 'mozavcodec'
+
+include('/media/ffvpx/ffvpxcommon.mozbuild')
diff --git a/media/ffvpx/libavcodec/aarch64/mpegaudiodsp_init.c b/media/ffvpx/libavcodec/aarch64/mpegaudiodsp_init.c
new file mode 100644
index 0000000000..5d966af5f4
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/mpegaudiodsp_init.c
@@ -0,0 +1,40 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavcodec/mpegaudiodsp.h"
+#include "config.h"
+
+void ff_mpadsp_apply_window_fixed_neon(int32_t *synth_buf, int32_t *window,
+ int *dither, int16_t *samples, ptrdiff_t incr);
+void ff_mpadsp_apply_window_float_neon(float *synth_buf, float *window,
+ int *dither, float *samples, ptrdiff_t incr);
+
+av_cold void ff_mpadsp_init_aarch64(MPADSPContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+ s->apply_window_fixed = ff_mpadsp_apply_window_fixed_neon;
+ s->apply_window_float = ff_mpadsp_apply_window_float_neon;
+ }
+}
diff --git a/media/ffvpx/libavcodec/aarch64/mpegaudiodsp_neon.S b/media/ffvpx/libavcodec/aarch64/mpegaudiodsp_neon.S
new file mode 100644
index 0000000000..b6ef131228
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/mpegaudiodsp_neon.S
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+#define FRAC_BITS 23 // fractional bits for sb_samples and dct
+#define WFRAC_BITS 16 // fractional bits for window
+#define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 15)
+
+const tbl_rev128_s, align=4
+ .byte 12, 13, 14, 15
+ .byte 8, 9, 10, 11
+ .byte 4, 5, 6, 7
+ .byte 0, 1, 2, 3
+endconst
+
+.macro apply_window type, st
+function ff_mpadsp_apply_window_\type\()_neon, export=1
+ mov x7, x0
+ add x8, x0, #512<<2
+ ld1 {v0.4s,v1.4s,v2.4s,v3.4s}, [x7], #64
+ ld1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x7], #64
+ st1 {v0.4s,v1.4s,v2.4s,v3.4s}, [x8], #64
+ st1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x8], #64
+ movrel x15, tbl_rev128_s
+ ld1 {v27.4s}, [x15]
+.ifc \type, fixed
+ lsl x4, x4, #1
+.else
+ lsl x4, x4, #2
+.endif
+ add x10, x0, #45<<2
+ add x0, x0, #16<<2
+ add x1, x1, #16<<2
+ add x5, x3, x4, lsl #5
+ sub x5, x5, x4 // samples2
+ neg x13, x4 // -incr
+ mov x9, #64<<2
+.ifc \type, fixed
+ ld1r {v16.2s}, [x2] // dither_state
+ sxtl v16.2d, v16.2s
+ movi v29.2d, #0
+ movi v30.2d, #(1<<OUT_SHIFT)-1
+ trn1 v31.2d, v29.2d, v30.2d
+ trn2 v30.2d, v30.2d, v29.2d
+ trn1 v16.2d, v16.2d, v29.2d
+.else
+ movi v16.4s, #0
+ movi v28.4s, #0
+.endif
+ mov x14, #4
+1:
+ mov x8, x0
+ sub x7, x1, #3<<2
+ sub x6, x1, x14, lsl #4
+ add x7, x7, x14, lsl #4
+ add x11, x6, #(32)<<2 // w + 32
+ add x12, x7, #(32)<<2 // w2 + 32
+ mov x15, #8
+ movi v17.2d, #0
+ movi v18.2d, #0
+ movi v19.2d, #0
+2:
+ subs x15, x15, #1
+ ld1 {v0.4s}, [x8], x9
+ ld1 {v1.4s}, [x10], x9
+ ld1 {v2.4s}, [x6], x9
+ ld1 {v3.4s}, [x7], x9
+ tbl v6.16b, {v0.16b}, v27.16b
+ tbl v7.16b, {v1.16b}, v27.16b
+ ld1 {v4.4s}, [x11], x9
+ ld1 {v5.4s}, [x12], x9
+ MLA v16, v2, v0
+ MLA2 v17, v2, v0
+ MLS v18, v3, v6
+ MLS2 v19, v3, v6
+ MLS v16, v4, v7
+ MLS2 v17, v4, v7
+ MLS v18, v5, v1
+ MLS2 v19, v5, v1
+ b.gt 2b
+
+ cmp x14, #4
+ sub x10, x10, #64<<5 // 64 * 8 * sizeof(int32_t)
+
+.ifc \type, fixed
+ and v28.16b, v16.16b, v30.16b
+ ext v28.16b, v29.16b, v28.16b, #8
+
+ b.eq 4f
+ round_sample v19, 1, 1
+4:
+ round_sample v16, 1, 0
+ shrn v16.2s, v16.2d, #OUT_SHIFT
+ round_sample v19, 0, 0
+ shrn v19.2s, v19.2d, #OUT_SHIFT
+ round_sample v17, 0, 1
+ round_sample v18, 1, 1
+ round_sample v17, 1, 0
+ shrn2 v16.4s, v17.2d, #OUT_SHIFT
+ round_sample v18, 0, 0
+ shrn2 v19.4s, v18.2d, #OUT_SHIFT
+ sqxtn v16.4h, v16.4s
+ sqxtn v18.4h, v19.4s
+.else
+ ext v18.16b, v18.16b, v18.16b, #8
+.endif
+
+ st1 {v16.\st\()}[0], [x3], x4
+ b.eq 4f
+ st1 {v18.\st\()}[1], [x5], x13
+4:
+ st1 {v16.\st\()}[1], [x3], x4
+ st1 {v18.\st\()}[0], [x5], x13
+ st1 {v16.\st\()}[2], [x3], x4
+ st1 {v18.\st\()}[3], [x5], x13
+ st1 {v16.\st\()}[3], [x3], x4
+ st1 {v18.\st\()}[2], [x5], x13
+
+ mov v16.16b, v28.16b
+
+ subs x14, x14, #1
+ add x0, x0, #4<<2
+ sub x10, x10, #4<<2
+ b.gt 1b
+
+// computing samples[16]
+ add x6, x1, #32<<2
+ ld1 {v0.2s}, [x6], x9
+ ld1 {v1.2s}, [x0], x9
+.rept 3
+ ld1 {v2.2s}, [x6], x9
+ ld1 {v3.2s}, [x0], x9
+ MLS v16, v0, v1
+ ld1 {v0.2s}, [x6], x9
+ ld1 {v1.2s}, [x0], x9
+ MLS v16, v2, v3
+.endr
+ ld1 {v2.2s}, [x6], x9
+ ld1 {v3.2s}, [x0], x9
+ MLS v16, v0, v1
+ MLS v16, v2, v3
+
+.ifc \type, fixed
+ and v28.16b, v16.16b, v30.16b
+ shrn v20.2s, v16.2d, #OUT_SHIFT
+ xtn v28.2s, v28.2d
+ sqxtn v20.4h, v20.4s
+ st1 {v28.s}[0], [x2] // save dither_state
+ st1 {v20.h}[0], [x3]
+.else
+ st1 {v16.s}[0], [x3]
+.endif
+
+ ret
+endfunc
+.purgem round_sample
+.purgem MLA
+.purgem MLA2
+.purgem MLS
+.purgem MLS2
+.endm
+
+
+.macro round_sample r, idx, next
+ add \r\().2d, \r\().2d, v28.2d
+.if \idx == 0
+ and v28.16b, \r\().16b, v30.16b
+.else // \idx == 1
+ and v28.16b, \r\().16b, v31.16b
+.endif
+.if \idx != \next
+ .if \next == 0
+ ext v28.16b, v28.16b, v29.16b, #8
+ .else
+ ext v28.16b, v29.16b, v28.16b, #8
+ .endif
+.endif
+.endm
+.macro MLA d, s1, s2
+ smlal \d\().2d, \s1\().2s, \s2\().2s
+.endm
+.macro MLA2 d, s1, s2
+ smlal2 \d\().2d, \s1\().4s, \s2\().4s
+.endm
+.macro MLS d, s1, s2
+ smlsl \d\().2d, \s1\().2s, \s2\().2s
+.endm
+.macro MLS2 d, s1, s2
+ smlsl2 \d\().2d, \s1\().4s, \s2\().4s
+.endm
+apply_window fixed, h
+
+
+// nothing to do for round_sample and ML{A,S}2
+.macro round_sample r, idx, next
+.endm
+.macro MLA2 d, s1, s2
+.endm
+.macro MLS2 d, s1, s2
+.endm
+.macro MLA d, s1, s2
+ fmla \d\().4s, \s1\().4s, \s2\().4s
+.endm
+.macro MLS d, s1, s2
+ fmls \d\().4s, \s1\().4s, \s2\().4s
+.endm
+apply_window float, s
diff --git a/media/ffvpx/libavcodec/aarch64/neon.S b/media/ffvpx/libavcodec/aarch64/neon.S
new file mode 100644
index 0000000000..1ad32c359d
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/neon.S
@@ -0,0 +1,162 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+.macro transpose_8x8B r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
+ trn1 \r8\().8B, \r0\().8B, \r1\().8B
+ trn2 \r9\().8B, \r0\().8B, \r1\().8B
+ trn1 \r1\().8B, \r2\().8B, \r3\().8B
+ trn2 \r3\().8B, \r2\().8B, \r3\().8B
+ trn1 \r0\().8B, \r4\().8B, \r5\().8B
+ trn2 \r5\().8B, \r4\().8B, \r5\().8B
+ trn1 \r2\().8B, \r6\().8B, \r7\().8B
+ trn2 \r7\().8B, \r6\().8B, \r7\().8B
+
+ trn1 \r4\().4H, \r0\().4H, \r2\().4H
+ trn2 \r2\().4H, \r0\().4H, \r2\().4H
+ trn1 \r6\().4H, \r5\().4H, \r7\().4H
+ trn2 \r7\().4H, \r5\().4H, \r7\().4H
+ trn1 \r5\().4H, \r9\().4H, \r3\().4H
+ trn2 \r9\().4H, \r9\().4H, \r3\().4H
+ trn1 \r3\().4H, \r8\().4H, \r1\().4H
+ trn2 \r8\().4H, \r8\().4H, \r1\().4H
+
+ trn1 \r0\().2S, \r3\().2S, \r4\().2S
+ trn2 \r4\().2S, \r3\().2S, \r4\().2S
+
+ trn1 \r1\().2S, \r5\().2S, \r6\().2S
+ trn2 \r5\().2S, \r5\().2S, \r6\().2S
+
+ trn2 \r6\().2S, \r8\().2S, \r2\().2S
+ trn1 \r2\().2S, \r8\().2S, \r2\().2S
+
+ trn1 \r3\().2S, \r9\().2S, \r7\().2S
+ trn2 \r7\().2S, \r9\().2S, \r7\().2S
+.endm
+
+.macro transpose_8x16B r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
+ trn1 \t0\().16B, \r0\().16B, \r1\().16B
+ trn2 \t1\().16B, \r0\().16B, \r1\().16B
+ trn1 \r1\().16B, \r2\().16B, \r3\().16B
+ trn2 \r3\().16B, \r2\().16B, \r3\().16B
+ trn1 \r0\().16B, \r4\().16B, \r5\().16B
+ trn2 \r5\().16B, \r4\().16B, \r5\().16B
+ trn1 \r2\().16B, \r6\().16B, \r7\().16B
+ trn2 \r7\().16B, \r6\().16B, \r7\().16B
+
+ trn1 \r4\().8H, \r0\().8H, \r2\().8H
+ trn2 \r2\().8H, \r0\().8H, \r2\().8H
+ trn1 \r6\().8H, \r5\().8H, \r7\().8H
+ trn2 \r7\().8H, \r5\().8H, \r7\().8H
+ trn1 \r5\().8H, \t1\().8H, \r3\().8H
+ trn2 \t1\().8H, \t1\().8H, \r3\().8H
+ trn1 \r3\().8H, \t0\().8H, \r1\().8H
+ trn2 \t0\().8H, \t0\().8H, \r1\().8H
+
+ trn1 \r0\().4S, \r3\().4S, \r4\().4S
+ trn2 \r4\().4S, \r3\().4S, \r4\().4S
+
+ trn1 \r1\().4S, \r5\().4S, \r6\().4S
+ trn2 \r5\().4S, \r5\().4S, \r6\().4S
+
+ trn2 \r6\().4S, \t0\().4S, \r2\().4S
+ trn1 \r2\().4S, \t0\().4S, \r2\().4S
+
+ trn1 \r3\().4S, \t1\().4S, \r7\().4S
+ trn2 \r7\().4S, \t1\().4S, \r7\().4S
+.endm
+
+.macro transpose_4x16B r0, r1, r2, r3, t4, t5, t6, t7
+ trn1 \t4\().16B, \r0\().16B, \r1\().16B
+ trn2 \t5\().16B, \r0\().16B, \r1\().16B
+ trn1 \t6\().16B, \r2\().16B, \r3\().16B
+ trn2 \t7\().16B, \r2\().16B, \r3\().16B
+
+ trn1 \r0\().8H, \t4\().8H, \t6\().8H
+ trn2 \r2\().8H, \t4\().8H, \t6\().8H
+ trn1 \r1\().8H, \t5\().8H, \t7\().8H
+ trn2 \r3\().8H, \t5\().8H, \t7\().8H
+.endm
+
+.macro transpose_4x8B r0, r1, r2, r3, t4, t5, t6, t7
+ trn1 \t4\().8B, \r0\().8B, \r1\().8B
+ trn2 \t5\().8B, \r0\().8B, \r1\().8B
+ trn1 \t6\().8B, \r2\().8B, \r3\().8B
+ trn2 \t7\().8B, \r2\().8B, \r3\().8B
+
+ trn1 \r0\().4H, \t4\().4H, \t6\().4H
+ trn2 \r2\().4H, \t4\().4H, \t6\().4H
+ trn1 \r1\().4H, \t5\().4H, \t7\().4H
+ trn2 \r3\().4H, \t5\().4H, \t7\().4H
+.endm
+
+.macro transpose_4x4H r0, r1, r2, r3, r4, r5, r6, r7
+ trn1 \r4\().4H, \r0\().4H, \r1\().4H
+ trn2 \r5\().4H, \r0\().4H, \r1\().4H
+ trn1 \r6\().4H, \r2\().4H, \r3\().4H
+ trn2 \r7\().4H, \r2\().4H, \r3\().4H
+
+ trn1 \r0\().2S, \r4\().2S, \r6\().2S
+ trn2 \r2\().2S, \r4\().2S, \r6\().2S
+ trn1 \r1\().2S, \r5\().2S, \r7\().2S
+ trn2 \r3\().2S, \r5\().2S, \r7\().2S
+.endm
+
+.macro transpose_4x8H r0, r1, r2, r3, t4, t5, t6, t7
+ trn1 \t4\().8H, \r0\().8H, \r1\().8H
+ trn2 \t5\().8H, \r0\().8H, \r1\().8H
+ trn1 \t6\().8H, \r2\().8H, \r3\().8H
+ trn2 \t7\().8H, \r2\().8H, \r3\().8H
+
+ trn1 \r0\().4S, \t4\().4S, \t6\().4S
+ trn2 \r2\().4S, \t4\().4S, \t6\().4S
+ trn1 \r1\().4S, \t5\().4S, \t7\().4S
+ trn2 \r3\().4S, \t5\().4S, \t7\().4S
+.endm
+
+.macro transpose_8x8H r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
+ trn1 \r8\().8H, \r0\().8H, \r1\().8H
+ trn2 \r9\().8H, \r0\().8H, \r1\().8H
+ trn1 \r1\().8H, \r2\().8H, \r3\().8H
+ trn2 \r3\().8H, \r2\().8H, \r3\().8H
+ trn1 \r0\().8H, \r4\().8H, \r5\().8H
+ trn2 \r5\().8H, \r4\().8H, \r5\().8H
+ trn1 \r2\().8H, \r6\().8H, \r7\().8H
+ trn2 \r7\().8H, \r6\().8H, \r7\().8H
+
+ trn1 \r4\().4S, \r0\().4S, \r2\().4S
+ trn2 \r2\().4S, \r0\().4S, \r2\().4S
+ trn1 \r6\().4S, \r5\().4S, \r7\().4S
+ trn2 \r7\().4S, \r5\().4S, \r7\().4S
+ trn1 \r5\().4S, \r9\().4S, \r3\().4S
+ trn2 \r9\().4S, \r9\().4S, \r3\().4S
+ trn1 \r3\().4S, \r8\().4S, \r1\().4S
+ trn2 \r8\().4S, \r8\().4S, \r1\().4S
+
+ trn1 \r0\().2D, \r3\().2D, \r4\().2D
+ trn2 \r4\().2D, \r3\().2D, \r4\().2D
+
+ trn1 \r1\().2D, \r5\().2D, \r6\().2D
+ trn2 \r5\().2D, \r5\().2D, \r6\().2D
+
+ trn2 \r6\().2D, \r8\().2D, \r2\().2D
+ trn1 \r2\().2D, \r8\().2D, \r2\().2D
+
+ trn1 \r3\().2D, \r9\().2D, \r7\().2D
+ trn2 \r7\().2D, \r9\().2D, \r7\().2D
+
+.endm
diff --git a/media/ffvpx/libavcodec/aarch64/simple_idct_neon.S b/media/ffvpx/libavcodec/aarch64/simple_idct_neon.S
new file mode 100644
index 0000000000..210182ff21
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/simple_idct_neon.S
@@ -0,0 +1,362 @@
+/*
+ * ARM NEON IDCT
+ *
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2017 Matthieu Bouron <matthieu.bouron@gmail.com>
+ *
+ * Based on Simple IDCT
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+#define Z1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define Z2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define Z3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define Z4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define Z5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define Z6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define Z7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define Z4c ((1<<(COL_SHIFT-1))/Z4)
+#define ROW_SHIFT 11
+#define COL_SHIFT 20
+
+#define z1 v0.H[0]
+#define z2 v0.H[1]
+#define z3 v0.H[2]
+#define z4 v0.H[3]
+#define z5 v0.H[4]
+#define z6 v0.H[5]
+#define z7 v0.H[6]
+#define z4c v0.H[7]
+
+const idct_coeff_neon, align=4
+ .short Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z4c
+endconst
+
+.macro idct_start data
+ prfm pldl1keep, [\data]
+ mov x10, x30
+ movrel x3, idct_coeff_neon
+ ld1 {v0.2D}, [x3]
+.endm
+
+.macro idct_end
+ ret x10
+.endm
+
+.macro smull1 a, b, c
+ smull \a, \b, \c
+.endm
+
+.macro smlal1 a, b, c
+ smlal \a, \b, \c
+.endm
+
+.macro smlsl1 a, b, c
+ smlsl \a, \b, \c
+.endm
+
+.macro idct_col4_top y1, y2, y3, y4, i, l
+ smull\i v7.4S, \y3\l, z2
+ smull\i v16.4S, \y3\l, z6
+ smull\i v17.4S, \y2\l, z1
+ add v19.4S, v23.4S, v7.4S
+ smull\i v18.4S, \y2\l, z3
+ add v20.4S, v23.4S, v16.4S
+ smull\i v5.4S, \y2\l, z5
+ sub v21.4S, v23.4S, v16.4S
+ smull\i v6.4S, \y2\l, z7
+ sub v22.4S, v23.4S, v7.4S
+
+ smlal\i v17.4S, \y4\l, z3
+ smlsl\i v18.4S, \y4\l, z7
+ smlsl\i v5.4S, \y4\l, z1
+ smlsl\i v6.4S, \y4\l, z5
+.endm
+
+.macro idct_row4_neon y1, y2, y3, y4, pass
+ ld1 {\y1\().2D,\y2\().2D}, [x2], #32
+ movi v23.4S, #1<<2, lsl #8
+ orr v5.16B, \y1\().16B, \y2\().16B
+ ld1 {\y3\().2D,\y4\().2D}, [x2], #32
+ orr v6.16B, \y3\().16B, \y4\().16B
+ orr v5.16B, v5.16B, v6.16B
+ mov x3, v5.D[1]
+ smlal v23.4S, \y1\().4H, z4
+
+ idct_col4_top \y1, \y2, \y3, \y4, 1, .4H
+
+ cmp x3, #0
+ b.eq \pass\()f
+
+ smull2 v7.4S, \y1\().8H, z4
+ smlal2 v17.4S, \y2\().8H, z5
+ smlsl2 v18.4S, \y2\().8H, z1
+ smull2 v16.4S, \y3\().8H, z2
+ smlal2 v5.4S, \y2\().8H, z7
+ add v19.4S, v19.4S, v7.4S
+ sub v20.4S, v20.4S, v7.4S
+ sub v21.4S, v21.4S, v7.4S
+ add v22.4S, v22.4S, v7.4S
+ smlal2 v6.4S, \y2\().8H, z3
+ smull2 v7.4S, \y3\().8H, z6
+ smlal2 v17.4S, \y4\().8H, z7
+ smlsl2 v18.4S, \y4\().8H, z5
+ smlal2 v5.4S, \y4\().8H, z3
+ smlsl2 v6.4S, \y4\().8H, z1
+ add v19.4S, v19.4S, v7.4S
+ sub v20.4S, v20.4S, v16.4S
+ add v21.4S, v21.4S, v16.4S
+ sub v22.4S, v22.4S, v7.4S
+
+\pass: add \y3\().4S, v19.4S, v17.4S
+ add \y4\().4S, v20.4S, v18.4S
+ shrn \y1\().4H, \y3\().4S, #ROW_SHIFT
+ shrn \y2\().4H, \y4\().4S, #ROW_SHIFT
+ add v7.4S, v21.4S, v5.4S
+ add v16.4S, v22.4S, v6.4S
+ shrn \y3\().4H, v7.4S, #ROW_SHIFT
+ shrn \y4\().4H, v16.4S, #ROW_SHIFT
+ sub v22.4S, v22.4S, v6.4S
+ sub v19.4S, v19.4S, v17.4S
+ sub v21.4S, v21.4S, v5.4S
+ shrn2 \y1\().8H, v22.4S, #ROW_SHIFT
+ sub v20.4S, v20.4S, v18.4S
+ shrn2 \y2\().8H, v21.4S, #ROW_SHIFT
+ shrn2 \y3\().8H, v20.4S, #ROW_SHIFT
+ shrn2 \y4\().8H, v19.4S, #ROW_SHIFT
+
+ trn1 v16.8H, \y1\().8H, \y2\().8H
+ trn2 v17.8H, \y1\().8H, \y2\().8H
+ trn1 v18.8H, \y3\().8H, \y4\().8H
+ trn2 v19.8H, \y3\().8H, \y4\().8H
+ trn1 \y1\().4S, v16.4S, v18.4S
+ trn1 \y2\().4S, v17.4S, v19.4S
+ trn2 \y3\().4S, v16.4S, v18.4S
+ trn2 \y4\().4S, v17.4S, v19.4S
+.endm
+
+.macro declare_idct_col4_neon i, l
+function idct_col4_neon\i
+ dup v23.4H, z4c
+.if \i == 1
+ add v23.4H, v23.4H, v24.4H
+.else
+ mov v5.D[0], v24.D[1]
+ add v23.4H, v23.4H, v5.4H
+.endif
+ smull v23.4S, v23.4H, z4
+
+ idct_col4_top v24, v25, v26, v27, \i, \l
+
+ mov x4, v28.D[\i - 1]
+ mov x5, v29.D[\i - 1]
+ cmp x4, #0
+ b.eq 1f
+
+ smull\i v7.4S, v28\l, z4
+ add v19.4S, v19.4S, v7.4S
+ sub v20.4S, v20.4S, v7.4S
+ sub v21.4S, v21.4S, v7.4S
+ add v22.4S, v22.4S, v7.4S
+
+1: mov x4, v30.D[\i - 1]
+ cmp x5, #0
+ b.eq 2f
+
+ smlal\i v17.4S, v29\l, z5
+ smlsl\i v18.4S, v29\l, z1
+ smlal\i v5.4S, v29\l, z7
+ smlal\i v6.4S, v29\l, z3
+
+2: mov x5, v31.D[\i - 1]
+ cmp x4, #0
+ b.eq 3f
+
+ smull\i v7.4S, v30\l, z6
+ smull\i v16.4S, v30\l, z2
+ add v19.4S, v19.4S, v7.4S
+ sub v22.4S, v22.4S, v7.4S
+ sub v20.4S, v20.4S, v16.4S
+ add v21.4S, v21.4S, v16.4S
+
+3: cmp x5, #0
+ b.eq 4f
+
+ smlal\i v17.4S, v31\l, z7
+ smlsl\i v18.4S, v31\l, z5
+ smlal\i v5.4S, v31\l, z3
+ smlsl\i v6.4S, v31\l, z1
+
+4: addhn v7.4H, v19.4S, v17.4S
+ addhn2 v7.8H, v20.4S, v18.4S
+ subhn v18.4H, v20.4S, v18.4S
+ subhn2 v18.8H, v19.4S, v17.4S
+
+ addhn v16.4H, v21.4S, v5.4S
+ addhn2 v16.8H, v22.4S, v6.4S
+ subhn v17.4H, v22.4S, v6.4S
+ subhn2 v17.8H, v21.4S, v5.4S
+
+ ret
+endfunc
+.endm
+
+declare_idct_col4_neon 1, .4H
+declare_idct_col4_neon 2, .8H
+
+function ff_simple_idct_put_neon, export=1
+ idct_start x2
+
+ idct_row4_neon v24, v25, v26, v27, 1
+ idct_row4_neon v28, v29, v30, v31, 2
+ bl idct_col4_neon1
+
+ sqshrun v1.8B, v7.8H, #COL_SHIFT-16
+ sqshrun2 v1.16B, v16.8H, #COL_SHIFT-16
+ sqshrun v3.8B, v17.8H, #COL_SHIFT-16
+ sqshrun2 v3.16B, v18.8H, #COL_SHIFT-16
+
+ bl idct_col4_neon2
+
+ sqshrun v2.8B, v7.8H, #COL_SHIFT-16
+ sqshrun2 v2.16B, v16.8H, #COL_SHIFT-16
+ sqshrun v4.8B, v17.8H, #COL_SHIFT-16
+ sqshrun2 v4.16B, v18.8H, #COL_SHIFT-16
+
+ zip1 v16.4S, v1.4S, v2.4S
+ zip2 v17.4S, v1.4S, v2.4S
+
+ st1 {v16.D}[0], [x0], x1
+ st1 {v16.D}[1], [x0], x1
+
+ zip1 v18.4S, v3.4S, v4.4S
+ zip2 v19.4S, v3.4S, v4.4S
+
+ st1 {v17.D}[0], [x0], x1
+ st1 {v17.D}[1], [x0], x1
+ st1 {v18.D}[0], [x0], x1
+ st1 {v18.D}[1], [x0], x1
+ st1 {v19.D}[0], [x0], x1
+ st1 {v19.D}[1], [x0], x1
+
+ idct_end
+endfunc
+
+function ff_simple_idct_add_neon, export=1
+ idct_start x2
+
+ idct_row4_neon v24, v25, v26, v27, 1
+ idct_row4_neon v28, v29, v30, v31, 2
+ bl idct_col4_neon1
+
+ sshr v1.8H, v7.8H, #COL_SHIFT-16
+ sshr v2.8H, v16.8H, #COL_SHIFT-16
+ sshr v3.8H, v17.8H, #COL_SHIFT-16
+ sshr v4.8H, v18.8H, #COL_SHIFT-16
+
+ bl idct_col4_neon2
+
+ sshr v7.8H, v7.8H, #COL_SHIFT-16
+ sshr v16.8H, v16.8H, #COL_SHIFT-16
+ sshr v17.8H, v17.8H, #COL_SHIFT-16
+ sshr v18.8H, v18.8H, #COL_SHIFT-16
+
+ mov x9, x0
+ ld1 {v19.D}[0], [x0], x1
+ zip1 v23.2D, v1.2D, v7.2D
+ zip2 v24.2D, v1.2D, v7.2D
+ ld1 {v19.D}[1], [x0], x1
+ zip1 v25.2D, v2.2D, v16.2D
+ zip2 v26.2D, v2.2D, v16.2D
+ ld1 {v20.D}[0], [x0], x1
+ zip1 v27.2D, v3.2D, v17.2D
+ zip2 v28.2D, v3.2D, v17.2D
+ ld1 {v20.D}[1], [x0], x1
+ zip1 v29.2D, v4.2D, v18.2D
+ zip2 v30.2D, v4.2D, v18.2D
+ ld1 {v21.D}[0], [x0], x1
+ uaddw v23.8H, v23.8H, v19.8B
+ uaddw2 v24.8H, v24.8H, v19.16B
+ ld1 {v21.D}[1], [x0], x1
+ sqxtun v23.8B, v23.8H
+ sqxtun2 v23.16B, v24.8H
+ ld1 {v22.D}[0], [x0], x1
+ uaddw v24.8H, v25.8H, v20.8B
+ uaddw2 v25.8H, v26.8H, v20.16B
+ ld1 {v22.D}[1], [x0], x1
+ sqxtun v24.8B, v24.8H
+ sqxtun2 v24.16B, v25.8H
+ st1 {v23.D}[0], [x9], x1
+ uaddw v25.8H, v27.8H, v21.8B
+ uaddw2 v26.8H, v28.8H, v21.16B
+ st1 {v23.D}[1], [x9], x1
+ sqxtun v25.8B, v25.8H
+ sqxtun2 v25.16B, v26.8H
+ st1 {v24.D}[0], [x9], x1
+ uaddw v26.8H, v29.8H, v22.8B
+ uaddw2 v27.8H, v30.8H, v22.16B
+ st1 {v24.D}[1], [x9], x1
+ sqxtun v26.8B, v26.8H
+ sqxtun2 v26.16B, v27.8H
+ st1 {v25.D}[0], [x9], x1
+ st1 {v25.D}[1], [x9], x1
+ st1 {v26.D}[0], [x9], x1
+ st1 {v26.D}[1], [x9], x1
+
+ idct_end
+endfunc
+
+function ff_simple_idct_neon, export=1
+ idct_start x0
+
+ mov x2, x0
+ idct_row4_neon v24, v25, v26, v27, 1
+ idct_row4_neon v28, v29, v30, v31, 2
+ sub x2, x2, #128
+ bl idct_col4_neon1
+
+ sshr v1.8H, v7.8H, #COL_SHIFT-16
+ sshr v2.8H, v16.8H, #COL_SHIFT-16
+ sshr v3.8H, v17.8H, #COL_SHIFT-16
+ sshr v4.8H, v18.8H, #COL_SHIFT-16
+
+ bl idct_col4_neon2
+
+ sshr v7.8H, v7.8H, #COL_SHIFT-16
+ sshr v16.8H, v16.8H, #COL_SHIFT-16
+ sshr v17.8H, v17.8H, #COL_SHIFT-16
+ sshr v18.8H, v18.8H, #COL_SHIFT-16
+
+ zip1 v23.2D, v1.2D, v7.2D
+ zip2 v24.2D, v1.2D, v7.2D
+ st1 {v23.2D,v24.2D}, [x2], #32
+ zip1 v25.2D, v2.2D, v16.2D
+ zip2 v26.2D, v2.2D, v16.2D
+ st1 {v25.2D,v26.2D}, [x2], #32
+ zip1 v27.2D, v3.2D, v17.2D
+ zip2 v28.2D, v3.2D, v17.2D
+ st1 {v27.2D,v28.2D}, [x2], #32
+ zip1 v29.2D, v4.2D, v18.2D
+ zip2 v30.2D, v4.2D, v18.2D
+ st1 {v29.2D,v30.2D}, [x2], #32
+
+ idct_end
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/vc1dsp_init_aarch64.c b/media/ffvpx/libavcodec/aarch64/vc1dsp_init_aarch64.c
new file mode 100644
index 0000000000..3bc0bd17ee
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vc1dsp_init_aarch64.c
@@ -0,0 +1,141 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavutil/intreadwrite.h"
+#include "libavcodec/vc1dsp.h"
+
+#include "config.h"
+
+void ff_vc1_inv_trans_8x8_neon(int16_t *block);
+void ff_vc1_inv_trans_8x4_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_4x8_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_4x4_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+
+void ff_vc1_inv_trans_8x8_dc_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_8x4_dc_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_4x8_dc_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_4x4_dc_neon(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+
+void ff_vc1_v_loop_filter4_neon(uint8_t *src, ptrdiff_t stride, int pq);
+void ff_vc1_h_loop_filter4_neon(uint8_t *src, ptrdiff_t stride, int pq);
+void ff_vc1_v_loop_filter8_neon(uint8_t *src, ptrdiff_t stride, int pq);
+void ff_vc1_h_loop_filter8_neon(uint8_t *src, ptrdiff_t stride, int pq);
+void ff_vc1_v_loop_filter16_neon(uint8_t *src, ptrdiff_t stride, int pq);
+void ff_vc1_h_loop_filter16_neon(uint8_t *src, ptrdiff_t stride, int pq);
+
+void ff_put_vc1_chroma_mc8_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_avg_vc1_chroma_mc8_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_put_vc1_chroma_mc4_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_avg_vc1_chroma_mc4_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+
+int ff_vc1_unescape_buffer_helper_neon(const uint8_t *src, int size, uint8_t *dst);
+
+static int vc1_unescape_buffer_neon(const uint8_t *src, int size, uint8_t *dst)
+{
+ /* Dealing with starting and stopping, and removing escape bytes, are
+ * comparatively less time-sensitive, so are more clearly expressed using
+ * a C wrapper around the assembly inner loop. Note that we assume a
+ * little-endian machine that supports unaligned loads. */
+ int dsize = 0;
+ while (size >= 4)
+ {
+ int found = 0;
+ while (!found && (((uintptr_t) dst) & 7) && size >= 4)
+ {
+ found = (AV_RL32(src) &~ 0x03000000) == 0x00030000;
+ if (!found)
+ {
+ *dst++ = *src++;
+ --size;
+ ++dsize;
+ }
+ }
+ if (!found)
+ {
+ int skip = size - ff_vc1_unescape_buffer_helper_neon(src, size, dst);
+ dst += skip;
+ src += skip;
+ size -= skip;
+ dsize += skip;
+ while (!found && size >= 4)
+ {
+ found = (AV_RL32(src) &~ 0x03000000) == 0x00030000;
+ if (!found)
+ {
+ *dst++ = *src++;
+ --size;
+ ++dsize;
+ }
+ }
+ }
+ if (found)
+ {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ ++src;
+ size -= 3;
+ dsize += 2;
+ }
+ }
+ while (size > 0)
+ {
+ *dst++ = *src++;
+ --size;
+ ++dsize;
+ }
+ return dsize;
+}
+
+av_cold void ff_vc1dsp_init_aarch64(VC1DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+ dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_neon;
+ dsp->vc1_inv_trans_8x4 = ff_vc1_inv_trans_8x4_neon;
+ dsp->vc1_inv_trans_4x8 = ff_vc1_inv_trans_4x8_neon;
+ dsp->vc1_inv_trans_4x4 = ff_vc1_inv_trans_4x4_neon;
+ dsp->vc1_inv_trans_8x8_dc = ff_vc1_inv_trans_8x8_dc_neon;
+ dsp->vc1_inv_trans_8x4_dc = ff_vc1_inv_trans_8x4_dc_neon;
+ dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_neon;
+ dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_neon;
+
+ dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_neon;
+ dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_neon;
+ dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_neon;
+ dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_neon;
+ dsp->vc1_v_loop_filter16 = ff_vc1_v_loop_filter16_neon;
+ dsp->vc1_h_loop_filter16 = ff_vc1_h_loop_filter16_neon;
+
+ dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_neon;
+ dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_neon;
+ dsp->put_no_rnd_vc1_chroma_pixels_tab[1] = ff_put_vc1_chroma_mc4_neon;
+ dsp->avg_no_rnd_vc1_chroma_pixels_tab[1] = ff_avg_vc1_chroma_mc4_neon;
+
+ dsp->vc1_unescape_buffer = vc1_unescape_buffer_neon;
+ }
+}
diff --git a/media/ffvpx/libavcodec/aarch64/videodsp.S b/media/ffvpx/libavcodec/aarch64/videodsp.S
new file mode 100644
index 0000000000..fe2da0658e
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/videodsp.S
@@ -0,0 +1,29 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+function ff_prefetch_aarch64, export=1
+1:
+ subs w2, w2, #2
+ prfm pldl1strm, [x0]
+ prfm pldl1strm, [x0, x1]
+ add x0, x0, x1, lsl #1
+ b.gt 1b
+ ret
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/videodsp_init.c b/media/ffvpx/libavcodec/aarch64/videodsp_init.c
new file mode 100644
index 0000000000..1f77a918d7
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/videodsp_init.c
@@ -0,0 +1,32 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavcodec/videodsp.h"
+
+void ff_prefetch_aarch64(const uint8_t *mem, ptrdiff_t stride, int h);
+
+av_cold void ff_videodsp_init_aarch64(VideoDSPContext *ctx, int bpc)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_armv8(cpu_flags))
+ ctx->prefetch = ff_prefetch_aarch64;
+}
diff --git a/media/ffvpx/libavcodec/aarch64/vp8dsp.h b/media/ffvpx/libavcodec/aarch64/vp8dsp.h
new file mode 100644
index 0000000000..4e59de28b1
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp8dsp.h
@@ -0,0 +1,75 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AARCH64_VP8DSP_H
+#define AVCODEC_AARCH64_VP8DSP_H
+
+#include "libavcodec/vp8dsp.h"
+
+#define VP8_LF_Y(hv, inner, opt) \
+ void ff_vp8_##hv##_loop_filter16##inner##_##opt(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim_E, int flim_I, \
+ int hev_thresh)
+
+#define VP8_LF_UV(hv, inner, opt) \
+ void ff_vp8_##hv##_loop_filter8uv##inner##_##opt(uint8_t *dstU, \
+ uint8_t *dstV, \
+ ptrdiff_t stride, \
+ int flim_E, int flim_I, \
+ int hev_thresh)
+
+#define VP8_LF_SIMPLE(hv, opt) \
+ void ff_vp8_##hv##_loop_filter16_simple_##opt(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim)
+
+#define VP8_LF_HV(inner, opt) \
+ VP8_LF_Y(h, inner, opt); \
+ VP8_LF_Y(v, inner, opt); \
+ VP8_LF_UV(h, inner, opt); \
+ VP8_LF_UV(v, inner, opt)
+
+#define VP8_LF(opt) \
+ VP8_LF_HV(, opt); \
+ VP8_LF_HV(_inner, opt); \
+ VP8_LF_SIMPLE(h, opt); \
+ VP8_LF_SIMPLE(v, opt)
+
+#define VP8_MC(n, opt) \
+ void ff_put_vp8_##n##_##opt(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, ptrdiff_t srcstride,\
+ int h, int x, int y)
+
+#define VP8_EPEL(w, opt) \
+ VP8_MC(pixels ## w, opt); \
+ VP8_MC(epel ## w ## _h4, opt); \
+ VP8_MC(epel ## w ## _h6, opt); \
+ VP8_MC(epel ## w ## _v4, opt); \
+ VP8_MC(epel ## w ## _h4v4, opt); \
+ VP8_MC(epel ## w ## _h6v4, opt); \
+ VP8_MC(epel ## w ## _v6, opt); \
+ VP8_MC(epel ## w ## _h4v6, opt); \
+ VP8_MC(epel ## w ## _h6v6, opt)
+
+#define VP8_BILIN(w, opt) \
+ VP8_MC(bilin ## w ## _h, opt); \
+ VP8_MC(bilin ## w ## _v, opt); \
+ VP8_MC(bilin ## w ## _hv, opt)
+
+#endif /* AVCODEC_AARCH64_VP8DSP_H */
diff --git a/media/ffvpx/libavcodec/aarch64/vp8dsp_init_aarch64.c b/media/ffvpx/libavcodec/aarch64/vp8dsp_init_aarch64.c
new file mode 100644
index 0000000000..fc7e831d17
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp8dsp_init_aarch64.c
@@ -0,0 +1,124 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavcodec/vp8dsp.h"
+#include "vp8dsp.h"
+
+void ff_vp8_luma_dc_wht_neon(int16_t block[4][4][16], int16_t dc[16]);
+
+void ff_vp8_idct_add_neon(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+void ff_vp8_idct_dc_add_neon(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+void ff_vp8_idct_dc_add4y_neon(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
+void ff_vp8_idct_dc_add4uv_neon(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
+
+VP8_LF(neon);
+
+VP8_EPEL(16, neon);
+VP8_EPEL(8, neon);
+VP8_EPEL(4, neon);
+
+VP8_BILIN(16, neon);
+VP8_BILIN(8, neon);
+VP8_BILIN(4, neon);
+
+av_cold void ff_vp78dsp_init_aarch64(VP8DSPContext *dsp)
+{
+ if (!have_neon(av_get_cpu_flags()))
+ return;
+ dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon;
+ dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_neon;
+ dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_neon;
+ dsp->put_vp8_epel_pixels_tab[0][2][2] = ff_put_vp8_epel16_h6v6_neon;
+
+ dsp->put_vp8_epel_pixels_tab[1][0][0] = ff_put_vp8_pixels8_neon;
+ dsp->put_vp8_epel_pixels_tab[1][0][1] = ff_put_vp8_epel8_h4_neon;
+ dsp->put_vp8_epel_pixels_tab[1][0][2] = ff_put_vp8_epel8_h6_neon;
+ dsp->put_vp8_epel_pixels_tab[1][1][0] = ff_put_vp8_epel8_v4_neon;
+ dsp->put_vp8_epel_pixels_tab[1][1][1] = ff_put_vp8_epel8_h4v4_neon;
+ dsp->put_vp8_epel_pixels_tab[1][1][2] = ff_put_vp8_epel8_h6v4_neon;
+ dsp->put_vp8_epel_pixels_tab[1][2][0] = ff_put_vp8_epel8_v6_neon;
+ dsp->put_vp8_epel_pixels_tab[1][2][1] = ff_put_vp8_epel8_h4v6_neon;
+ dsp->put_vp8_epel_pixels_tab[1][2][2] = ff_put_vp8_epel8_h6v6_neon;
+
+ dsp->put_vp8_epel_pixels_tab[2][0][1] = ff_put_vp8_epel4_h4_neon;
+ dsp->put_vp8_epel_pixels_tab[2][0][2] = ff_put_vp8_epel4_h6_neon;
+ dsp->put_vp8_epel_pixels_tab[2][1][0] = ff_put_vp8_epel4_v4_neon;
+ dsp->put_vp8_epel_pixels_tab[2][1][1] = ff_put_vp8_epel4_h4v4_neon;
+ dsp->put_vp8_epel_pixels_tab[2][1][2] = ff_put_vp8_epel4_h6v4_neon;
+ dsp->put_vp8_epel_pixels_tab[2][2][0] = ff_put_vp8_epel4_v6_neon;
+ dsp->put_vp8_epel_pixels_tab[2][2][1] = ff_put_vp8_epel4_h4v6_neon;
+ dsp->put_vp8_epel_pixels_tab[2][2][2] = ff_put_vp8_epel4_h6v6_neon;
+
+ dsp->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon;
+ dsp->put_vp8_bilinear_pixels_tab[0][0][1] = ff_put_vp8_bilin16_h_neon;
+ dsp->put_vp8_bilinear_pixels_tab[0][0][2] = ff_put_vp8_bilin16_h_neon;
+ dsp->put_vp8_bilinear_pixels_tab[0][1][0] = ff_put_vp8_bilin16_v_neon;
+ dsp->put_vp8_bilinear_pixels_tab[0][1][1] = ff_put_vp8_bilin16_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[0][1][2] = ff_put_vp8_bilin16_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[0][2][0] = ff_put_vp8_bilin16_v_neon;
+ dsp->put_vp8_bilinear_pixels_tab[0][2][1] = ff_put_vp8_bilin16_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[0][2][2] = ff_put_vp8_bilin16_hv_neon;
+
+ dsp->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_neon;
+ dsp->put_vp8_bilinear_pixels_tab[1][0][1] = ff_put_vp8_bilin8_h_neon;
+ dsp->put_vp8_bilinear_pixels_tab[1][0][2] = ff_put_vp8_bilin8_h_neon;
+ dsp->put_vp8_bilinear_pixels_tab[1][1][0] = ff_put_vp8_bilin8_v_neon;
+ dsp->put_vp8_bilinear_pixels_tab[1][1][1] = ff_put_vp8_bilin8_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[1][1][2] = ff_put_vp8_bilin8_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[1][2][0] = ff_put_vp8_bilin8_v_neon;
+ dsp->put_vp8_bilinear_pixels_tab[1][2][1] = ff_put_vp8_bilin8_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[1][2][2] = ff_put_vp8_bilin8_hv_neon;
+
+ dsp->put_vp8_bilinear_pixels_tab[2][0][1] = ff_put_vp8_bilin4_h_neon;
+ dsp->put_vp8_bilinear_pixels_tab[2][0][2] = ff_put_vp8_bilin4_h_neon;
+ dsp->put_vp8_bilinear_pixels_tab[2][1][0] = ff_put_vp8_bilin4_v_neon;
+ dsp->put_vp8_bilinear_pixels_tab[2][1][1] = ff_put_vp8_bilin4_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[2][1][2] = ff_put_vp8_bilin4_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[2][2][0] = ff_put_vp8_bilin4_v_neon;
+ dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_neon;
+ dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_neon;
+}
+
+av_cold void ff_vp8dsp_init_aarch64(VP8DSPContext *dsp)
+{
+ if (!have_neon(av_get_cpu_flags()))
+ return;
+ dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_neon;
+
+ dsp->vp8_idct_add = ff_vp8_idct_add_neon;
+ dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_neon;
+ dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_neon;
+ dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_neon;
+
+ dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_neon;
+ dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_neon;
+ dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_neon;
+ dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_neon;
+
+ dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_neon;
+ dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_neon;
+ dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_neon;
+ dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_neon;
+
+ dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_neon;
+ dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_neon;
+}
diff --git a/media/ffvpx/libavcodec/aarch64/vp8dsp_neon.S b/media/ffvpx/libavcodec/aarch64/vp8dsp_neon.S
new file mode 100644
index 0000000000..4bbf16d1a4
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp8dsp_neon.S
@@ -0,0 +1,1790 @@
+/*
+ * VP8 NEON optimisations
+ *
+ * Copyright (c) 2010 Rob Clark <rob@ti.com>
+ * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2018 Magnus Röös <mla2.roos@gmail.com>
+ * Copyright (c) 2019 Martin Storsjo <martin@martin.st>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+#include "neon.S"
+
+function ff_vp8_luma_dc_wht_neon, export=1
+ ld1 {v0.4h - v3.4h}, [x1]
+ movi v30.8h, #0
+
+ add v4.4h, v0.4h, v3.4h
+ add v6.4h, v1.4h, v2.4h
+ st1 {v30.8h}, [x1], #16
+ sub v7.4h, v1.4h, v2.4h
+ sub v5.4h, v0.4h, v3.4h
+ st1 {v30.8h}, [x1]
+ add v0.4h, v4.4h, v6.4h
+ add v1.4h, v5.4h, v7.4h
+ sub v2.4h, v4.4h, v6.4h
+ sub v3.4h, v5.4h, v7.4h
+
+ movi v16.4h, #3
+
+ transpose_4x4H v0, v1, v2, v3, v4, v5, v6, v7
+
+ add v0.4h, v0.4h, v16.4h
+
+ add v4.4h, v0.4h, v3.4h
+ add v6.4h, v1.4h, v2.4h
+ sub v7.4h, v1.4h, v2.4h
+ sub v5.4h, v0.4h, v3.4h
+ add v0.4h, v4.4h, v6.4h
+ add v1.4h, v5.4h, v7.4h
+ sub v2.4h, v4.4h, v6.4h
+ sub v3.4h, v5.4h, v7.4h
+
+ sshr v0.4h, v0.4h, #3
+ sshr v1.4h, v1.4h, #3
+ sshr v2.4h, v2.4h, #3
+ sshr v3.4h, v3.4h, #3
+
+ mov x3, #32
+ st1 {v0.h}[0], [x0], x3
+ st1 {v1.h}[0], [x0], x3
+ st1 {v2.h}[0], [x0], x3
+ st1 {v3.h}[0], [x0], x3
+ st1 {v0.h}[1], [x0], x3
+ st1 {v1.h}[1], [x0], x3
+ st1 {v2.h}[1], [x0], x3
+ st1 {v3.h}[1], [x0], x3
+ st1 {v0.h}[2], [x0], x3
+ st1 {v1.h}[2], [x0], x3
+ st1 {v2.h}[2], [x0], x3
+ st1 {v3.h}[2], [x0], x3
+ st1 {v0.h}[3], [x0], x3
+ st1 {v1.h}[3], [x0], x3
+ st1 {v2.h}[3], [x0], x3
+ st1 {v3.h}[3], [x0], x3
+
+ ret
+endfunc
+
+function ff_vp8_idct_add_neon, export=1
+ ld1 {v0.8b - v3.8b}, [x1]
+ mov w4, #20091
+ movk w4, #35468/2, lsl #16
+ dup v4.2s, w4
+
+ smull v26.4s, v1.4h, v4.h[0]
+ smull v27.4s, v3.4h, v4.h[0]
+ sqdmulh v20.4h, v1.4h, v4.h[1]
+ sqdmulh v23.4h, v3.4h, v4.h[1]
+ shrn v21.4h, v26.4s, #16
+ shrn v22.4h, v27.4s, #16
+ add v21.4h, v21.4h, v1.4h
+ add v22.4h, v22.4h, v3.4h
+
+ add v16.4h, v0.4h, v2.4h
+ sub v17.4h, v0.4h, v2.4h
+
+ add v18.4h, v21.4h, v23.4h
+ sub v19.4h, v20.4h, v22.4h
+
+ add v0.4h, v16.4h, v18.4h
+ add v1.4h, v17.4h, v19.4h
+ sub v3.4h, v16.4h, v18.4h
+ sub v2.4h, v17.4h, v19.4h
+
+ transpose_4x4H v0, v1, v2, v3, v24, v5, v6, v7
+
+ movi v29.8h, #0
+ smull v26.4s, v1.4h, v4.h[0]
+ st1 {v29.8h}, [x1], #16
+ smull v27.4s, v3.4h, v4.h[0]
+ st1 {v29.16b}, [x1]
+ sqdmulh v21.4h, v1.4h, v4.h[1]
+ sqdmulh v23.4h, v3.4h, v4.h[1]
+ shrn v20.4h, v26.4s, #16
+ shrn v22.4h, v27.4s, #16
+ add v20.4h, v20.4h, v1.4h
+ add v22.4h, v22.4h, v3.4h
+ add v16.4h, v0.4h, v2.4h
+ sub v17.4h, v0.4h, v2.4h
+
+ add v18.4h, v20.4h, v23.4h
+ ld1 {v24.s}[0], [x0], x2
+ sub v19.4h, v21.4h, v22.4h
+ ld1 {v25.s}[0], [x0], x2
+ add v0.4h, v16.4h, v18.4h
+ add v1.4h, v17.4h, v19.4h
+ ld1 {v26.s}[0], [x0], x2
+ sub v3.4h, v16.4h, v18.4h
+ sub v2.4h, v17.4h, v19.4h
+ ld1 {v27.s}[0], [x0], x2
+ srshr v0.4h, v0.4h, #3
+ srshr v1.4h, v1.4h, #3
+ srshr v2.4h, v2.4h, #3
+ srshr v3.4h, v3.4h, #3
+
+ sub x0, x0, x2, lsl #2
+
+ transpose_4x4H v0, v1, v2, v3, v5, v6, v7, v16
+
+ uaddw v0.8h, v0.8h, v24.8b
+ uaddw v1.8h, v1.8h, v25.8b
+ uaddw v2.8h, v2.8h, v26.8b
+ uaddw v3.8h, v3.8h, v27.8b
+ sqxtun v0.8b, v0.8h
+ sqxtun v1.8b, v1.8h
+ sqxtun v2.8b, v2.8h
+ sqxtun v3.8b, v3.8h
+
+ st1 {v0.s}[0], [x0], x2
+ st1 {v1.s}[0], [x0], x2
+ st1 {v2.s}[0], [x0], x2
+ st1 {v3.s}[0], [x0], x2
+
+ ret
+endfunc
+
+function ff_vp8_idct_dc_add4uv_neon, export=1
+ movi v0.4h, #0
+ mov x3, #32
+ ld1r {v16.4h}, [x1]
+ st1 {v0.h}[0], [x1], x3
+ ld1r {v17.4h}, [x1]
+ st1 {v0.h}[0], [x1], x3
+ ld1r {v18.4h}, [x1]
+ st1 {v0.h}[0], [x1], x3
+ ld1r {v19.4h}, [x1]
+ st1 {v0.h}[0], [x1], x3
+ ins v16.d[1], v17.d[0]
+ ins v18.d[1], v19.d[0]
+ mov x3, x0
+ srshr v16.8h, v16.8h, #3 // dc >>= 3
+ ld1 {v0.8b}, [x0], x2
+ srshr v18.8h, v18.8h, #3
+ ld1 {v1.8b}, [x0], x2
+ uaddw v20.8h, v16.8h, v0.8b
+ ld1 {v2.8b}, [x0], x2
+ uaddw v0.8h, v16.8h, v1.8b
+ ld1 {v3.8b}, [x0], x2
+ uaddw v22.8h, v16.8h, v2.8b
+ ld1 {v4.8b}, [x0], x2
+ uaddw v2.8h, v16.8h, v3.8b
+ ld1 {v5.8b}, [x0], x2
+ uaddw v24.8h, v18.8h, v4.8b
+ ld1 {v6.8b}, [x0], x2
+ uaddw v4.8h, v18.8h, v5.8b
+ ld1 {v7.8b}, [x0], x2
+ uaddw v26.8h, v18.8h, v6.8b
+ sqxtun v20.8b, v20.8h
+ uaddw v6.8h, v18.8h, v7.8b
+ sqxtun v21.8b, v0.8h
+ sqxtun v22.8b, v22.8h
+ st1 {v20.8b}, [x3], x2
+ sqxtun v23.8b, v2.8h
+ st1 {v21.8b}, [x3], x2
+ sqxtun v24.8b, v24.8h
+ st1 {v22.8b}, [x3], x2
+ sqxtun v25.8b, v4.8h
+ st1 {v23.8b}, [x3], x2
+ sqxtun v26.8b, v26.8h
+ st1 {v24.8b}, [x3], x2
+ sqxtun v27.8b, v6.8h
+ st1 {v25.8b}, [x3], x2
+ st1 {v26.8b}, [x3], x2
+ st1 {v27.8b}, [x3], x2
+
+ ret
+endfunc
+
+function ff_vp8_idct_dc_add4y_neon, export=1
+ movi v0.16b, #0
+ mov x3, #32
+ ld1r {v16.4h}, [x1]
+ st1 {v0.h}[0], [x1], x3
+ ld1r {v17.4h}, [x1]
+ st1 {v0.h}[0], [x1], x3
+ zip1 v16.2d, v16.2d, v17.2d
+ ld1r {v18.4h}, [x1]
+ st1 {v0.h}[0], [x1], x3
+ ld1r {v19.4h}, [x1]
+ st1 {v0.h}[0], [x1], x3
+ zip1 v18.2d, v18.2d, v19.2d
+ srshr v16.8h, v16.8h, #3 // dc >>= 3
+ ld1 {v0.16b}, [x0], x2
+ srshr v18.8h, v18.8h, #3
+ ld1 {v1.16b}, [x0], x2
+ uaddw v20.8h, v16.8h, v0.8b
+ ld1 {v2.16b}, [x0], x2
+ uaddw2 v0.8h, v18.8h, v0.16b
+ ld1 {v3.16b}, [x0], x2
+ uaddw v21.8h, v16.8h, v1.8b
+ uaddw2 v1.8h, v18.8h, v1.16b
+ uaddw v22.8h, v16.8h, v2.8b
+ uaddw2 v2.8h, v18.8h, v2.16b
+ uaddw v23.8h, v16.8h, v3.8b
+ uaddw2 v3.8h, v18.8h, v3.16b
+ sub x0, x0, x2, lsl #2
+ sqxtun v20.8b, v20.8h
+ sqxtun2 v20.16b, v0.8h
+ sqxtun v21.8b, v21.8h
+ sqxtun2 v21.16b, v1.8h
+ sqxtun v22.8b, v22.8h
+ st1 {v20.16b}, [x0], x2
+ sqxtun2 v22.16b, v2.8h
+ st1 {v21.16b}, [x0], x2
+ sqxtun v23.8b, v23.8h
+ st1 {v22.16b}, [x0], x2
+ sqxtun2 v23.16b, v3.8h
+ st1 {v23.16b}, [x0], x2
+
+ ret
+endfunc
+
+function ff_vp8_idct_dc_add_neon, export=1
+ mov w3, #0
+ ld1r {v2.8h}, [x1]
+ strh w3, [x1]
+ srshr v2.8h, v2.8h, #3
+ ld1 {v0.s}[0], [x0], x2
+ ld1 {v0.s}[1], [x0], x2
+ uaddw v3.8h, v2.8h, v0.8b
+ ld1 {v1.s}[0], [x0], x2
+ ld1 {v1.s}[1], [x0], x2
+ uaddw v4.8h, v2.8h, v1.8b
+ sqxtun v0.8b, v3.8h
+ sqxtun v1.8b, v4.8h
+ sub x0, x0, x2, lsl #2
+ st1 {v0.s}[0], [x0], x2
+ st1 {v0.s}[1], [x0], x2
+ st1 {v1.s}[0], [x0], x2
+ st1 {v1.s}[1], [x0], x2
+ ret
+endfunc
+
+// Register layout:
+// P3..Q3 -> v0..v7
+// flim_E -> v22
+// flim_I -> v23
+// hev_thresh -> x5
+//
+.macro vp8_loop_filter, inner=0, simple=0, hev_thresh
+ .if \simple
+ uabd v17.16b, v3.16b, v4.16b // abs(P0-Q0)
+ uabd v23.16b, v2.16b, v5.16b // abs(P1-Q1)
+ uqadd v17.16b, v17.16b, v17.16b // abs(P0-Q0) * 2
+ ushr v18.16b, v23.16b, #1 // abs(P1-Q1) / 2
+ uqadd v19.16b, v17.16b, v18.16b // (abs(P0-Q0)*2) + (abs(P1-Q1)/2)
+ movi v21.16b, #0x80
+ cmhs v16.16b, v22.16b, v19.16b // (abs(P0-Q0)*2) + (abs(P1-Q1)/2) <= flim
+ .else
+ // calculate hev and normal_limit:
+ uabd v20.16b, v2.16b, v3.16b // abs(P1-P0)
+ uabd v21.16b, v5.16b, v4.16b // abs(Q1-Q0)
+ uabd v18.16b, v0.16b, v1.16b // abs(P3-P2)
+ uabd v19.16b, v1.16b, v2.16b // abs(P2-P1)
+ cmhs v16.16b, v23.16b, v20.16b // abs(P1-P0) <= flim_I
+ cmhs v17.16b, v23.16b, v21.16b // abs(Q1-Q0) <= flim_I
+ cmhs v18.16b, v23.16b, v18.16b // abs(P3-P2) <= flim_I
+ cmhs v19.16b, v23.16b, v19.16b // abs(P2-P1) <= flim_I
+ and v16.16b, v17.16b, v16.16b
+ uabd v17.16b, v7.16b, v6.16b // abs(Q3-Q2)
+ and v16.16b, v16.16b, v19.16b
+ uabd v19.16b, v6.16b, v5.16b // abs(Q2-Q1)
+ and v16.16b, v16.16b, v18.16b
+ cmhs v18.16b, v23.16b, v17.16b // abs(Q3-Q2) <= flim_I
+ cmhs v19.16b, v23.16b, v19.16b // abs(Q2-Q1) <= flim_I
+ uabd v17.16b, v3.16b, v4.16b // abs(P0-Q0)
+ uabd v23.16b, v2.16b, v5.16b // abs(P1-Q1)
+ and v16.16b, v16.16b, v18.16b
+ uqadd v17.16b, v17.16b, v17.16b // abs(P0-Q0) * 2
+ and v16.16b, v16.16b, v19.16b
+ ushr v18.16b, v23.16b, #1 // abs(P1-Q1) / 2
+ dup v23.16b, \hev_thresh // hev_thresh
+ uqadd v19.16b, v17.16b, v18.16b // (abs(P0-Q0)*2) + (abs(P1-Q1)/2)
+ cmhi v20.16b, v20.16b, v23.16b // abs(P1-P0) > hev_thresh
+ cmhs v19.16b, v22.16b, v19.16b // (abs(P0-Q0)*2) + (abs(P1-Q1)/2) <= flim_E
+ cmhi v22.16b, v21.16b, v23.16b // abs(Q1-Q0) > hev_thresh
+ and v16.16b, v16.16b, v19.16b
+ movi v21.16b, #0x80
+ orr v17.16b, v20.16b, v22.16b
+ .endif
+
+ // at this point:
+ // v16: normal_limit
+ // v17: hev
+
+ // convert to signed value:
+ eor v3.16b, v3.16b, v21.16b // PS0 = P0 ^ 0x80
+ eor v4.16b, v4.16b, v21.16b // QS0 = Q0 ^ 0x80
+
+ movi v20.8h, #3
+ ssubl v18.8h, v4.8b, v3.8b // QS0 - PS0
+ ssubl2 v19.8h, v4.16b, v3.16b // (widened to 16bit)
+ eor v2.16b, v2.16b, v21.16b // PS1 = P1 ^ 0x80
+ eor v5.16b, v5.16b, v21.16b // QS1 = Q1 ^ 0x80
+ mul v18.8h, v18.8h, v20.8h // w = 3 * (QS0 - PS0)
+ mul v19.8h, v19.8h, v20.8h
+
+ sqsub v20.16b, v2.16b, v5.16b // clamp(PS1-QS1)
+ movi v22.16b, #4
+ movi v23.16b, #3
+ .if \inner
+ and v20.16b, v20.16b, v17.16b // if(hev) w += clamp(PS1-QS1)
+ .endif
+ saddw v18.8h, v18.8h, v20.8b // w += clamp(PS1-QS1)
+ saddw2 v19.8h, v19.8h, v20.16b
+ sqxtn v18.8b, v18.8h // narrow result back into v18
+ sqxtn2 v18.16b, v19.8h
+ .if !\inner && !\simple
+ eor v1.16b, v1.16b, v21.16b // PS2 = P2 ^ 0x80
+ eor v6.16b, v6.16b, v21.16b // QS2 = Q2 ^ 0x80
+ .endif
+ and v18.16b, v18.16b, v16.16b // w &= normal_limit
+
+ // registers used at this point..
+ // v0 -> P3 (don't corrupt)
+ // v1-v6 -> PS2-QS2
+ // v7 -> Q3 (don't corrupt)
+ // v17 -> hev
+ // v18 -> w
+ // v21 -> #0x80
+ // v22 -> #4
+ // v23 -> #3
+ // v16, v19, v29 -> unused
+ //
+ // filter_common: is4tap==1
+ // c1 = clamp(w + 4) >> 3;
+ // c2 = clamp(w + 3) >> 3;
+ // Q0 = s2u(QS0 - c1);
+ // P0 = s2u(PS0 + c2);
+
+ .if \simple
+ sqadd v19.16b, v18.16b, v22.16b // c1 = clamp((w&hev)+4)
+ sqadd v20.16b, v18.16b, v23.16b // c2 = clamp((w&hev)+3)
+ sshr v19.16b, v19.16b, #3 // c1 >>= 3
+ sshr v20.16b, v20.16b, #3 // c2 >>= 3
+ sqsub v4.16b, v4.16b, v19.16b // QS0 = clamp(QS0-c1)
+ sqadd v3.16b, v3.16b, v20.16b // PS0 = clamp(PS0+c2)
+ eor v4.16b, v4.16b, v21.16b // Q0 = QS0 ^ 0x80
+ eor v3.16b, v3.16b, v21.16b // P0 = PS0 ^ 0x80
+ eor v5.16b, v5.16b, v21.16b // Q1 = QS1 ^ 0x80
+ eor v2.16b, v2.16b, v21.16b // P1 = PS1 ^ 0x80
+ .elseif \inner
+ // the !is4tap case of filter_common, only used for inner blocks
+ // c3 = ((c1&~hev) + 1) >> 1;
+ // Q1 = s2u(QS1 - c3);
+ // P1 = s2u(PS1 + c3);
+ sqadd v19.16b, v18.16b, v22.16b // c1 = clamp((w&hev)+4)
+ sqadd v20.16b, v18.16b, v23.16b // c2 = clamp((w&hev)+3)
+ sshr v19.16b, v19.16b, #3 // c1 >>= 3
+ sshr v20.16b, v20.16b, #3 // c2 >>= 3
+ sqsub v4.16b, v4.16b, v19.16b // QS0 = clamp(QS0-c1)
+ sqadd v3.16b, v3.16b, v20.16b // PS0 = clamp(PS0+c2)
+ bic v19.16b, v19.16b, v17.16b // c1 & ~hev
+ eor v4.16b, v4.16b, v21.16b // Q0 = QS0 ^ 0x80
+ srshr v19.16b, v19.16b, #1 // c3 >>= 1
+ eor v3.16b, v3.16b, v21.16b // P0 = PS0 ^ 0x80
+ sqsub v5.16b, v5.16b, v19.16b // QS1 = clamp(QS1-c3)
+ sqadd v2.16b, v2.16b, v19.16b // PS1 = clamp(PS1+c3)
+ eor v5.16b, v5.16b, v21.16b // Q1 = QS1 ^ 0x80
+ eor v2.16b, v2.16b, v21.16b // P1 = PS1 ^ 0x80
+ .else
+ and v20.16b, v18.16b, v17.16b // w & hev
+ sqadd v19.16b, v20.16b, v22.16b // c1 = clamp((w&hev)+4)
+ sqadd v20.16b, v20.16b, v23.16b // c2 = clamp((w&hev)+3)
+ sshr v19.16b, v19.16b, #3 // c1 >>= 3
+ sshr v20.16b, v20.16b, #3 // c2 >>= 3
+ bic v18.16b, v18.16b, v17.16b // w &= ~hev
+ sqsub v4.16b, v4.16b, v19.16b // QS0 = clamp(QS0-c1)
+ sqadd v3.16b, v3.16b, v20.16b // PS0 = clamp(PS0+c2)
+
+ // filter_mbedge:
+ // a = clamp((27*w + 63) >> 7);
+ // Q0 = s2u(QS0 - a);
+ // P0 = s2u(PS0 + a);
+ // a = clamp((18*w + 63) >> 7);
+ // Q1 = s2u(QS1 - a);
+ // P1 = s2u(PS1 + a);
+ // a = clamp((9*w + 63) >> 7);
+ // Q2 = s2u(QS2 - a);
+ // P2 = s2u(PS2 + a);
+ movi v17.8h, #63
+ sshll v22.8h, v18.8b, #3
+ sshll2 v23.8h, v18.16b, #3
+ saddw v22.8h, v22.8h, v18.8b
+ saddw2 v23.8h, v23.8h, v18.16b
+ add v16.8h, v17.8h, v22.8h
+ add v17.8h, v17.8h, v23.8h // 9*w + 63
+ add v19.8h, v16.8h, v22.8h
+ add v20.8h, v17.8h, v23.8h // 18*w + 63
+ add v22.8h, v19.8h, v22.8h
+ add v23.8h, v20.8h, v23.8h // 27*w + 63
+ sqshrn v16.8b, v16.8h, #7
+ sqshrn2 v16.16b, v17.8h, #7 // clamp(( 9*w + 63)>>7)
+ sqshrn v19.8b, v19.8h, #7
+ sqshrn2 v19.16b, v20.8h, #7 // clamp((18*w + 63)>>7)
+ sqshrn v22.8b, v22.8h, #7
+ sqshrn2 v22.16b, v23.8h, #7 // clamp((27*w + 63)>>7)
+ sqadd v1.16b, v1.16b, v16.16b // PS2 = clamp(PS2+a)
+ sqsub v6.16b, v6.16b, v16.16b // QS2 = clamp(QS2-a)
+ sqadd v2.16b, v2.16b, v19.16b // PS1 = clamp(PS1+a)
+ sqsub v5.16b, v5.16b, v19.16b // QS1 = clamp(QS1-a)
+ sqadd v3.16b, v3.16b, v22.16b // PS0 = clamp(PS0+a)
+ sqsub v4.16b, v4.16b, v22.16b // QS0 = clamp(QS0-a)
+ eor v3.16b, v3.16b, v21.16b // P0 = PS0 ^ 0x80
+ eor v4.16b, v4.16b, v21.16b // Q0 = QS0 ^ 0x80
+ eor v2.16b, v2.16b, v21.16b // P1 = PS1 ^ 0x80
+ eor v5.16b, v5.16b, v21.16b // Q1 = QS1 ^ 0x80
+ eor v1.16b, v1.16b, v21.16b // P2 = PS2 ^ 0x80
+ eor v6.16b, v6.16b, v21.16b // Q2 = QS2 ^ 0x80
+ .endif
+.endm
+
+.macro vp8_v_loop_filter16 name, inner=0, simple=0
+function ff_vp8_v_loop_filter16\name\()_neon, export=1
+ sub x0, x0, x1, lsl #1+!\simple
+
+ // Load pixels:
+ .if !\simple
+ ld1 {v0.16b}, [x0], x1 // P3
+ ld1 {v1.16b}, [x0], x1 // P2
+ .endif
+ ld1 {v2.16b}, [x0], x1 // P1
+ ld1 {v3.16b}, [x0], x1 // P0
+ ld1 {v4.16b}, [x0], x1 // Q0
+ ld1 {v5.16b}, [x0], x1 // Q1
+ .if !\simple
+ ld1 {v6.16b}, [x0], x1 // Q2
+ ld1 {v7.16b}, [x0] // Q3
+ dup v23.16b, w3 // flim_I
+ .endif
+ dup v22.16b, w2 // flim_E
+
+ vp8_loop_filter inner=\inner, simple=\simple, hev_thresh=w4
+
+ // back up to P2: dst -= stride * 6
+ sub x0, x0, x1, lsl #2
+ .if !\simple
+ sub x0, x0, x1, lsl #1
+
+ // Store pixels:
+ st1 {v1.16b}, [x0], x1 // P2
+ .endif
+ st1 {v2.16b}, [x0], x1 // P1
+ st1 {v3.16b}, [x0], x1 // P0
+ st1 {v4.16b}, [x0], x1 // Q0
+ st1 {v5.16b}, [x0], x1 // Q1
+ .if !\simple
+ st1 {v6.16b}, [x0] // Q2
+ .endif
+
+ ret
+endfunc
+.endm
+
+vp8_v_loop_filter16
+vp8_v_loop_filter16 _inner, inner=1
+vp8_v_loop_filter16 _simple, simple=1
+
+.macro vp8_v_loop_filter8uv name, inner=0
+function ff_vp8_v_loop_filter8uv\name\()_neon, export=1
+ sub x0, x0, x2, lsl #2
+ sub x1, x1, x2, lsl #2
+ // Load pixels:
+ ld1 {v0.d}[0], [x0], x2 // P3
+ ld1 {v0.d}[1], [x1], x2 // P3
+ ld1 {v1.d}[0], [x0], x2 // P2
+ ld1 {v1.d}[1], [x1], x2 // P2
+ ld1 {v2.d}[0], [x0], x2 // P1
+ ld1 {v2.d}[1], [x1], x2 // P1
+ ld1 {v3.d}[0], [x0], x2 // P0
+ ld1 {v3.d}[1], [x1], x2 // P0
+ ld1 {v4.d}[0], [x0], x2 // Q0
+ ld1 {v4.d}[1], [x1], x2 // Q0
+ ld1 {v5.d}[0], [x0], x2 // Q1
+ ld1 {v5.d}[1], [x1], x2 // Q1
+ ld1 {v6.d}[0], [x0], x2 // Q2
+ ld1 {v6.d}[1], [x1], x2 // Q2
+ ld1 {v7.d}[0], [x0] // Q3
+ ld1 {v7.d}[1], [x1] // Q3
+
+ dup v22.16b, w3 // flim_E
+ dup v23.16b, w4 // flim_I
+
+ vp8_loop_filter inner=\inner, hev_thresh=w5
+
+ // back up to P2: u,v -= stride * 6
+ sub x0, x0, x2, lsl #2
+ sub x1, x1, x2, lsl #2
+ sub x0, x0, x2, lsl #1
+ sub x1, x1, x2, lsl #1
+
+ // Store pixels:
+
+ st1 {v1.d}[0], [x0], x2 // P2
+ st1 {v1.d}[1], [x1], x2 // P2
+ st1 {v2.d}[0], [x0], x2 // P1
+ st1 {v2.d}[1], [x1], x2 // P1
+ st1 {v3.d}[0], [x0], x2 // P0
+ st1 {v3.d}[1], [x1], x2 // P0
+ st1 {v4.d}[0], [x0], x2 // Q0
+ st1 {v4.d}[1], [x1], x2 // Q0
+ st1 {v5.d}[0], [x0], x2 // Q1
+ st1 {v5.d}[1], [x1], x2 // Q1
+ st1 {v6.d}[0], [x0] // Q2
+ st1 {v6.d}[1], [x1] // Q2
+
+ ret
+endfunc
+.endm
+
+vp8_v_loop_filter8uv
+vp8_v_loop_filter8uv _inner, inner=1
+
+.macro vp8_h_loop_filter16 name, inner=0, simple=0
+function ff_vp8_h_loop_filter16\name\()_neon, export=1
+
+ sub x0, x0, #4
+ // Load pixels:
+ ld1 {v0.d}[0], [x0], x1
+ ld1 {v1.d}[0], [x0], x1
+ ld1 {v2.d}[0], [x0], x1
+ ld1 {v3.d}[0], [x0], x1
+ ld1 {v4.d}[0], [x0], x1
+ ld1 {v5.d}[0], [x0], x1
+ ld1 {v6.d}[0], [x0], x1
+ ld1 {v7.d}[0], [x0], x1
+ ld1 {v0.d}[1], [x0], x1
+ ld1 {v1.d}[1], [x0], x1
+ ld1 {v2.d}[1], [x0], x1
+ ld1 {v3.d}[1], [x0], x1
+ ld1 {v4.d}[1], [x0], x1
+ ld1 {v5.d}[1], [x0], x1
+ ld1 {v6.d}[1], [x0], x1
+ ld1 {v7.d}[1], [x0], x1
+
+ transpose_8x16B v0, v1, v2, v3, v4, v5, v6, v7, v30, v31
+
+ dup v22.16b, w2 // flim_E
+ .if !\simple
+ dup v23.16b, w3 // flim_I
+ .endif
+
+ vp8_loop_filter inner=\inner, simple=\simple, hev_thresh=w4
+
+ sub x0, x0, x1, lsl #4 // backup 16 rows
+
+ transpose_8x16B v0, v1, v2, v3, v4, v5, v6, v7, v30, v31
+
+ // Store pixels:
+ st1 {v0.d}[0], [x0], x1
+ st1 {v1.d}[0], [x0], x1
+ st1 {v2.d}[0], [x0], x1
+ st1 {v3.d}[0], [x0], x1
+ st1 {v4.d}[0], [x0], x1
+ st1 {v5.d}[0], [x0], x1
+ st1 {v6.d}[0], [x0], x1
+ st1 {v7.d}[0], [x0], x1
+ st1 {v0.d}[1], [x0], x1
+ st1 {v1.d}[1], [x0], x1
+ st1 {v2.d}[1], [x0], x1
+ st1 {v3.d}[1], [x0], x1
+ st1 {v4.d}[1], [x0], x1
+ st1 {v5.d}[1], [x0], x1
+ st1 {v6.d}[1], [x0], x1
+ st1 {v7.d}[1], [x0]
+
+ ret
+endfunc
+.endm
+
+vp8_h_loop_filter16
+vp8_h_loop_filter16 _inner, inner=1
+vp8_h_loop_filter16 _simple, simple=1
+
+.macro vp8_h_loop_filter8uv name, inner=0
+function ff_vp8_h_loop_filter8uv\name\()_neon, export=1
+ sub x0, x0, #4
+ sub x1, x1, #4
+
+ // Load pixels:
+ ld1 {v0.d}[0], [x0], x2 // load u
+ ld1 {v0.d}[1], [x1], x2 // load v
+ ld1 {v1.d}[0], [x0], x2
+ ld1 {v1.d}[1], [x1], x2
+ ld1 {v2.d}[0], [x0], x2
+ ld1 {v2.d}[1], [x1], x2
+ ld1 {v3.d}[0], [x0], x2
+ ld1 {v3.d}[1], [x1], x2
+ ld1 {v4.d}[0], [x0], x2
+ ld1 {v4.d}[1], [x1], x2
+ ld1 {v5.d}[0], [x0], x2
+ ld1 {v5.d}[1], [x1], x2
+ ld1 {v6.d}[0], [x0], x2
+ ld1 {v6.d}[1], [x1], x2
+ ld1 {v7.d}[0], [x0], x2
+ ld1 {v7.d}[1], [x1], x2
+
+ transpose_8x16B v0, v1, v2, v3, v4, v5, v6, v7, v30, v31
+
+ dup v22.16b, w3 // flim_E
+ dup v23.16b, w4 // flim_I
+
+ vp8_loop_filter inner=\inner, hev_thresh=w5
+
+ sub x0, x0, x2, lsl #3 // backup u 8 rows
+ sub x1, x1, x2, lsl #3 // backup v 8 rows
+
+ transpose_8x16B v0, v1, v2, v3, v4, v5, v6, v7, v30, v31
+
+ // Store pixels:
+ st1 {v0.d}[0], [x0], x2 // load u
+ st1 {v0.d}[1], [x1], x2 // load v
+ st1 {v1.d}[0], [x0], x2
+ st1 {v1.d}[1], [x1], x2
+ st1 {v2.d}[0], [x0], x2
+ st1 {v2.d}[1], [x1], x2
+ st1 {v3.d}[0], [x0], x2
+ st1 {v3.d}[1], [x1], x2
+ st1 {v4.d}[0], [x0], x2
+ st1 {v4.d}[1], [x1], x2
+ st1 {v5.d}[0], [x0], x2
+ st1 {v5.d}[1], [x1], x2
+ st1 {v6.d}[0], [x0], x2
+ st1 {v6.d}[1], [x1], x2
+ st1 {v7.d}[0], [x0]
+ st1 {v7.d}[1], [x1]
+
+ ret
+
+endfunc
+.endm
+
+vp8_h_loop_filter8uv
+vp8_h_loop_filter8uv _inner, inner=1
+
+
+function ff_put_vp8_pixels16_neon, export=1
+1:
+ subs w4, w4, #4
+ ld1 {v0.16b}, [x2], x3
+ ld1 {v1.16b}, [x2], x3
+ ld1 {v2.16b}, [x2], x3
+ ld1 {v3.16b}, [x2], x3
+ st1 {v0.16b}, [x0], x1
+ st1 {v1.16b}, [x0], x1
+ st1 {v2.16b}, [x0], x1
+ st1 {v3.16b}, [x0], x1
+ b.gt 1b
+ ret
+endfunc
+
+function ff_put_vp8_pixels8_neon, export=1
+1:
+ subs w4, w4, #4
+ ld1 {v0.8b}, [x2], x3
+ ld1 {v0.d}[1], [x2], x3
+ ld1 {v1.8b}, [x2], x3
+ ld1 {v1.d}[1], [x2], x3
+ st1 {v0.8b}, [x0], x1
+ st1 {v0.d}[1], [x0], x1
+ st1 {v1.8b}, [x0], x1
+ st1 {v1.d}[1], [x0], x1
+ b.gt 1b
+ ret
+endfunc
+
+/* 4/6-tap 8th-pel MC */
+
+.macro vp8_epel8_h6 d, s0, s1
+ ext v22.8b, \s0\().8b, \s1\().8b, #1
+ uxtl v18.8h, \s0\().8b
+ ext v23.8b, \s0\().8b, \s1\().8b, #2
+ uxtl v19.8h, v22.8b
+ ext v24.8b, \s0\().8b, \s1\().8b, #3
+ uxtl v21.8h, v23.8b
+ ext v25.8b, \s0\().8b, \s1\().8b, #4
+ uxtl v22.8h, v24.8b
+ ext v26.8b, \s0\().8b, \s1\().8b, #5
+ uxtl v25.8h, v25.8b
+ mul v21.8h, v21.8h, v0.h[2]
+ uxtl v26.8h, v26.8b
+ mul v22.8h, v22.8h, v0.h[3]
+ mls v21.8h, v19.8h, v0.h[1]
+ mls v22.8h, v25.8h, v0.h[4]
+ mla v21.8h, v18.8h, v0.h[0]
+ mla v22.8h, v26.8h, v0.h[5]
+ sqadd v22.8h, v21.8h, v22.8h
+ sqrshrun \d\().8b, v22.8h, #7
+.endm
+
+.macro vp8_epel16_h6 d0, v0, v1
+ ext v22.16b, \v0\().16b, \v1\().16b, #3
+ ext v23.16b, \v0\().16b, \v1\().16b, #4
+ uxtl v19.8h, v22.8b
+ uxtl2 v22.8h, v22.16b
+ ext v3.16b, \v0\().16b, \v1\().16b, #2
+ uxtl v20.8h, v23.8b
+ uxtl2 v23.8h, v23.16b
+ ext v16.16b, \v0\().16b, \v1\().16b, #1
+ uxtl v18.8h, v3.8b
+ uxtl2 v3.8h, v3.16b
+ ext v2.16b, \v0\().16b, \v1\().16b, #5
+ uxtl v21.8h, v2.8b
+ uxtl2 v2.8h, v2.16b
+ uxtl v17.8h, v16.8b
+ uxtl2 v16.8h, v16.16b
+ mul v19.8h, v19.8h, v0.h[3]
+ mul v18.8h, v18.8h, v0.h[2]
+ mul v3.8h, v3.8h, v0.h[2]
+ mul v22.8h, v22.8h, v0.h[3]
+ mls v19.8h, v20.8h, v0.h[4]
+ uxtl v20.8h, \v0\().8b
+ uxtl2 v1.8h, \v0\().16b
+ mls v18.8h, v17.8h, v0.h[1]
+ mls v3.8h, v16.8h, v0.h[1]
+ mls v22.8h, v23.8h, v0.h[4]
+ mla v18.8h, v20.8h, v0.h[0]
+ mla v19.8h, v21.8h, v0.h[5]
+ mla v3.8h, v1.8h, v0.h[0]
+ mla v22.8h, v2.8h, v0.h[5]
+ sqadd v19.8h, v18.8h, v19.8h
+ sqadd v22.8h, v3.8h, v22.8h
+ sqrshrun \d0\().8b, v19.8h, #7
+ sqrshrun2 \d0\().16b, v22.8h, #7
+.endm
+
+.macro vp8_epel8_v6_y2 d0, d1, s0, s1, s2, s3, s4, s5, s6
+ uxtl \s0\().8h, \s0\().8b
+ uxtl \s3\().8h, \s3\().8b
+ uxtl \s6\().8h, \s6\().8b
+ uxtl \s1\().8h, \s1\().8b
+ uxtl \s4\().8h, \s4\().8b
+ uxtl \s2\().8h, \s2\().8b
+ uxtl \s5\().8h, \s5\().8b
+ mul \s0\().8h, \s0\().8h, v0.h[0]
+ mul v31.8h , \s3\().8h, v0.h[3]
+ mul \s3\().8h, \s3\().8h, v0.h[2]
+ mul \s6\().8h, \s6\().8h, v0.h[5]
+
+ mls \s0\().8h, \s1\().8h, v0.h[1]
+ mls v31.8h , \s4\().8h, v0.h[4]
+ mls \s3\().8h, \s2\().8h, v0.h[1]
+ mls \s6\().8h, \s5\().8h, v0.h[4]
+
+ mla \s0\().8h, \s2\().8h, v0.h[2]
+ mla v31.8h , \s5\().8h, v0.h[5]
+ mla \s3\().8h, \s1\().8h, v0.h[0]
+ mla \s6\().8h, \s4\().8h, v0.h[3]
+ sqadd v31.8h , \s0\().8h, v31.8h
+ sqadd \s6\().8h, \s3\().8h, \s6\().8h
+ sqrshrun \d0\().8b, v31.8h, #7
+ sqrshrun \d1\().8b, \s6\().8h, #7
+.endm
+
+.macro vp8_epel8_h4 d, v0, v1
+ ext v22.8b, \v0\().8b, \v1\().8b, #1
+ uxtl v19.8h, \v0\().8b
+ ext v23.8b, \v0\().8b, \v1\().8b, #2
+ uxtl v20.8h, v22.8b
+ ext v25.8b, \v0\().8b, \v1\().8b, #3
+ uxtl v22.8h, v23.8b
+ uxtl v25.8h, v25.8b
+ mul v20.8h, v20.8h, v0.h[2]
+ mul v22.8h, v22.8h, v0.h[3]
+ mls v20.8h, v19.8h, v0.h[1]
+ mls v22.8h, v25.8h, v0.h[4]
+ sqadd v22.8h, v20.8h, v22.8h
+ sqrshrun \d\().8b, v22.8h, #7
+.endm
+
+.macro vp8_epel8_v4_y2 d0, s0, s1, s2, s3, s4
+ uxtl \s0\().8h, \s0\().8b
+ uxtl \s1\().8h, \s1\().8b
+ uxtl \s2\().8h, \s2\().8b
+ uxtl \s3\().8h, \s3\().8b
+ uxtl \s4\().8h, \s4\().8b
+ mul v21.8h, \s1\().8h, v0.h[2]
+ mul v23.8h, \s2\().8h, v0.h[3]
+ mul \s2\().8h, \s2\().8h, v0.h[2]
+ mul v22.8h, \s3\().8h, v0.h[3]
+ mls v21.8h, \s0\().8h, v0.h[1]
+ mls v23.8h, \s3\().8h, v0.h[4]
+ mls \s2\().8h, \s1\().8h, v0.h[1]
+ mls v22.8h, \s4\().8h, v0.h[4]
+ sqadd v21.8h, v21.8h, v23.8h
+ sqadd \s2\().8h, \s2\().8h, v22.8h
+ sqrshrun \d0\().8b, v21.8h, #7
+ sqrshrun2 \d0\().16b, \s2\().8h, #7
+.endm
+
+
+// note: worst case sum of all 6-tap filter values * 255 is 0x7f80 so 16 bit
+// arithmetic can be used to apply filters
+const subpel_filters, align=4
+ .short 0, 6, 123, 12, 1, 0, 0, 0
+ .short 2, 11, 108, 36, 8, 1, 0, 0
+ .short 0, 9, 93, 50, 6, 0, 0, 0
+ .short 3, 16, 77, 77, 16, 3, 0, 0
+ .short 0, 6, 50, 93, 9, 0, 0, 0
+ .short 1, 8, 36, 108, 11, 2, 0, 0
+ .short 0, 1, 12, 123, 6, 0, 0, 0
+endconst
+
+function ff_put_vp8_epel16_v6_neon, export=1
+ sub x2, x2, x3, lsl #1
+
+ sxtw x4, w4
+ sxtw x6, w6
+ movrel x17, subpel_filters, -16
+ add x6, x17, x6, lsl #4 // y
+ ld1 {v0.8h}, [x6]
+1:
+ ld1 {v1.1d - v2.1d}, [x2], x3
+ ld1 {v3.1d - v4.1d}, [x2], x3
+ ld1 {v16.1d - v17.1d}, [x2], x3
+ ld1 {v18.1d - v19.1d}, [x2], x3
+ ld1 {v20.1d - v21.1d}, [x2], x3
+ ld1 {v22.1d - v23.1d}, [x2], x3
+ ld1 {v24.1d - v25.1d}, [x2]
+ sub x2, x2, x3, lsl #2
+
+ vp8_epel8_v6_y2 v1, v3, v1, v3, v16, v18, v20, v22, v24
+ vp8_epel8_v6_y2 v2, v4, v2, v4, v17, v19, v21, v23, v25
+
+ st1 {v1.1d - v2.1d}, [x0], x1
+ st1 {v3.1d - v4.1d}, [x0], x1
+ subs x4, x4, #2
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel16_h6_neon, export=1
+ sub x2, x2, #2
+ sxtw x5, w5 // x
+
+ // first pass (horizontal):
+ movrel x17, subpel_filters, -16
+ add x5, x17, x5, lsl #4 // x
+ ld1 {v0.8h}, [x5]
+1:
+ ld1 {v1.16b, v2.16b}, [x2], x3
+ vp8_epel16_h6 v1, v1, v2
+ st1 {v1.16b}, [x0], x1
+
+ subs w4, w4, #1
+ b.ne 1b
+ ret
+endfunc
+
+
+function ff_put_vp8_epel16_h6v6_neon, export=1
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, #2
+
+ // first pass (horizontal):
+ movrel x17, subpel_filters, -16
+ sxtw x5, w5 // x
+ add x16, x17, x5, lsl #4 // x
+ sub sp, sp, #336+16
+ ld1 {v0.8h}, [x16]
+ add x7, sp, #15
+ sxtw x4, w4
+ add x16, x4, #5 // h
+ bic x7, x7, #15
+1:
+ ld1 {v1.16b, v2.16b}, [x2], x3
+ vp8_epel16_h6 v1, v1, v2
+ st1 {v1.16b}, [x7], #16
+ subs x16, x16, #1
+ b.ne 1b
+
+
+ // second pass (vertical):
+ sxtw x6, w6
+ add x6, x17, x6, lsl #4 // y
+ add x7, sp, #15
+ ld1 {v0.8h}, [x6]
+ bic x7, x7, #15
+2:
+ ld1 {v1.8b - v4.8b}, [x7], #32
+ ld1 {v16.8b - v19.8b}, [x7], #32
+ ld1 {v20.8b - v23.8b}, [x7], #32
+ ld1 {v24.8b - v25.8b}, [x7]
+ sub x7, x7, #64
+
+ vp8_epel8_v6_y2 v1, v3, v1, v3, v16, v18, v20, v22, v24
+ vp8_epel8_v6_y2 v2, v4, v2, v4, v17, v19, v21, v23, v25
+ trn1 v1.2d, v1.2d, v2.2d
+ trn1 v3.2d, v3.2d, v4.2d
+
+ st1 {v1.16b}, [x0], x1
+ st1 {v3.16b}, [x0], x1
+ subs x4, x4, #2
+ b.ne 2b
+
+ add sp, sp, #336+16
+ ret
+endfunc
+
+function ff_put_vp8_epel8_v6_neon, export=1
+ sub x2, x2, x3, lsl #1
+
+ movrel x7, subpel_filters, -16
+ add x6, x7, w6, uxtw #4
+ ld1 {v0.8h}, [x6]
+1:
+ ld1 {v2.8b}, [x2], x3
+ ld1 {v3.8b}, [x2], x3
+ ld1 {v4.8b}, [x2], x3
+ ld1 {v5.8b}, [x2], x3
+ ld1 {v6.8b}, [x2], x3
+ ld1 {v7.8b}, [x2], x3
+ ld1 {v28.8b}, [x2]
+
+ sub x2, x2, x3, lsl #2
+
+ vp8_epel8_v6_y2 v2, v3, v2, v3, v4, v5, v6, v7, v28
+
+ st1 {v2.8b}, [x0], x1
+ st1 {v3.8b}, [x0], x1
+ subs w4, w4, #2
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel8_h6_neon, export=1
+ sub x2, x2, #2
+
+ movrel x7, subpel_filters, -16
+ add x5, x7, w5, uxtw #4
+ ld1 {v0.8h}, [x5]
+1:
+ ld1 {v2.8b, v3.8b}, [x2], x3
+
+ vp8_epel8_h6 v2, v2, v3
+
+ st1 {v2.8b}, [x0], x1
+ subs w4, w4, #1
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel8_h6v6_neon, export=1
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, #2
+ sxtw x4, w4
+
+ // first pass (horizontal):
+ movrel x17, subpel_filters, -16
+ sxtw x5, w5
+ add x5, x17, x5, lsl #4 // x
+ sub sp, sp, #168+16
+ ld1 {v0.8h}, [x5]
+ add x7, sp, #15
+ add x16, x4, #5 // h
+ bic x7, x7, #15
+1:
+ ld1 {v1.8b, v2.8b}, [x2], x3
+
+ vp8_epel8_h6 v1, v1, v2
+
+ st1 {v1.8b}, [x7], #8
+ subs x16, x16, #1
+ b.ne 1b
+
+ // second pass (vertical):
+ sxtw x6, w6
+ add x6, x17, x6, lsl #4 // y
+ add x7, sp, #15
+ ld1 {v0.8h}, [x6]
+ bic x7, x7, #15
+2:
+ ld1 {v1.8b - v4.8b}, [x7], #32
+ ld1 {v5.8b - v7.8b}, [x7]
+
+ sub x7, x7, #16
+
+ vp8_epel8_v6_y2 v1, v2, v1, v2, v3, v4, v5, v6, v7
+
+ st1 {v1.8b}, [x0], x1
+ st1 {v2.8b}, [x0], x1
+ subs x4, x4, #2
+ b.ne 2b
+
+ add sp, sp, #168+16
+ ret
+endfunc
+
+function ff_put_vp8_epel8_v4_neon, export=1
+ sub x2, x2, x3
+
+ movrel x7, subpel_filters, -16
+ add x6, x7, w6, uxtw #4
+ ld1 {v0.8h}, [x6]
+1:
+ ld1 {v2.8b}, [x2], x3
+ ld1 {v3.8b}, [x2], x3
+ ld1 {v4.8b}, [x2], x3
+ ld1 {v5.8b}, [x2], x3
+ ld1 {v6.8b}, [x2]
+ sub x2, x2, x3, lsl #1
+
+ vp8_epel8_v4_y2 v2, v2, v3, v4, v5, v6
+
+ st1 {v2.d}[0], [x0], x1
+ st1 {v2.d}[1], [x0], x1
+ subs w4, w4, #2
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel8_h4_neon, export=1
+ sub x2, x2, #1
+
+ movrel x7, subpel_filters, -16
+ add x5, x7, w5, uxtw #4
+ ld1 {v0.8h}, [x5]
+1:
+ ld1 {v2.8b,v3.8b}, [x2], x3
+
+ vp8_epel8_h4 v2, v2, v3
+
+ st1 {v2.8b}, [x0], x1
+ subs w4, w4, #1
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel8_h4v6_neon, export=1
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, #1
+ sxtw x4, w4
+
+ // first pass (horizontal):
+ movrel x17, subpel_filters, -16
+ sxtw x5, w5
+ add x5, x17, x5, lsl #4 // x
+ sub sp, sp, #168+16
+ ld1 {v0.8h}, [x5]
+ add x7, sp, #15
+ add x16, x4, #5 // h
+ bic x7, x7, #15
+1:
+ ld1 {v1.8b, v2.8b}, [x2], x3
+
+ vp8_epel8_h4 v1, v1, v2
+
+ st1 {v1.8b}, [x7], #8
+ subs x16, x16, #1
+ b.ne 1b
+
+ // second pass (vertical):
+ sxtw x6, w6
+ add x6, x17, x6, lsl #4 // y
+ add x7, sp, #15
+ ld1 {v0.8h}, [x6]
+ bic x7, x7, #15
+2:
+ ld1 {v1.8b - v4.8b}, [x7], #32
+ ld1 {v5.8b - v7.8b}, [x7]
+
+ sub x7, x7, #16
+
+ vp8_epel8_v6_y2 v1, v2, v1, v2, v3, v4, v5, v6, v7
+
+ st1 {v1.8b}, [x0], x1
+ st1 {v2.8b}, [x0], x1
+ subs x4, x4, #2
+ b.ne 2b
+
+ add sp, sp, #168+16
+ ret
+endfunc
+
+function ff_put_vp8_epel8_h4v4_neon, export=1
+ sub x2, x2, x3
+ sub x2, x2, #1
+ sxtw x4, w4
+
+
+ // first pass (horizontal):
+ movrel x17, subpel_filters, -16
+ sxtw x5, w5
+ add x5, x17, x5, lsl #4 // x
+ sub sp, sp, #168+16
+ ld1 {v0.8h}, [x5]
+ add x7, sp, #15
+ add x16, x4, #3 // h
+ bic x7, x7, #15
+1:
+ ld1 {v1.8b, v2.8b}, [x2], x3
+
+ vp8_epel8_h4 v1, v1, v2
+
+ st1 {v1.8b}, [x7], #8
+ subs x16, x16, #1
+ b.ne 1b
+
+ // second pass (vertical):
+ sxtw x6, w6
+ add x6, x17, x6, lsl #4 // y
+ add x7, sp, #15
+ ld1 {v0.8h}, [x6]
+ bic x7, x7, #15
+2:
+ ld1 {v1.8b - v2.8b}, [x7], #16
+ ld1 {v3.8b - v5.8b}, [x7]
+
+ vp8_epel8_v4_y2 v1, v1, v2, v3, v4, v5
+
+ st1 {v1.d}[0], [x0], x1
+ st1 {v1.d}[1], [x0], x1
+ subs x4, x4, #2
+ b.ne 2b
+
+ add sp, sp, #168+16
+ ret
+endfunc
+
+function ff_put_vp8_epel8_h6v4_neon, export=1
+ sub x2, x2, x3
+ sub x2, x2, #2
+ sxtw x4, w4
+
+
+ // first pass (horizontal):
+ movrel x17, subpel_filters, -16
+ sxtw x5, w5
+ add x5, x17, x5, lsl #4 // x
+ sub sp, sp, #168+16
+ ld1 {v0.8h}, [x5]
+ add x7, sp, #15
+ add x16, x4, #3 // h
+ bic x7, x7, #15
+1:
+ ld1 {v1.8b, v2.8b}, [x2], x3
+
+ vp8_epel8_h6 v1, v1, v2
+
+ st1 {v1.8b}, [x7], #8
+ subs x16, x16, #1
+ b.ne 1b
+
+ // second pass (vertical):
+ sxtw x6, w6
+ add x6, x17, x6, lsl #4 // y
+ add x7, sp, #15
+ ld1 {v0.8h}, [x6]
+ bic x7, x7, #15
+2:
+ ld1 {v1.8b - v2.8b}, [x7], #16
+ ld1 {v3.8b - v5.8b}, [x7]
+
+ vp8_epel8_v4_y2 v1, v1, v2, v3, v4, v5
+
+ st1 {v1.d}[0], [x0], x1
+ st1 {v1.d}[1], [x0], x1
+ subs x4, x4, #2
+ b.ne 2b
+
+ add sp, sp, #168+16
+ ret
+endfunc
+
+function ff_put_vp8_epel4_v6_neon, export=1
+ sub x2, x2, x3, lsl #1
+
+ movrel x7, subpel_filters, -16
+ add x6, x7, w6, uxtw #4
+ ld1 {v0.8h}, [x6]
+1:
+ ld1r {v2.2s}, [x2], x3
+ ld1r {v3.2s}, [x2], x3
+ ld1r {v4.2s}, [x2], x3
+ ld1r {v5.2s}, [x2], x3
+ ld1r {v6.2s}, [x2], x3
+ ld1r {v7.2s}, [x2], x3
+ ld1r {v28.2s}, [x2]
+ sub x2, x2, x3, lsl #2
+ ld1 {v2.s}[1], [x2], x3
+ ld1 {v3.s}[1], [x2], x3
+ ld1 {v4.s}[1], [x2], x3
+ ld1 {v5.s}[1], [x2], x3
+ ld1 {v6.s}[1], [x2], x3
+ ld1 {v7.s}[1], [x2], x3
+ ld1 {v28.s}[1], [x2]
+ sub x2, x2, x3, lsl #2
+
+ vp8_epel8_v6_y2 v2, v3, v2, v3, v4, v5, v6, v7, v28
+
+ st1 {v2.s}[0], [x0], x1
+ st1 {v3.s}[0], [x0], x1
+ st1 {v2.s}[1], [x0], x1
+ st1 {v3.s}[1], [x0], x1
+ subs w4, w4, #4
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel4_h6_neon, export=1
+ sub x2, x2, #2
+
+ movrel x7, subpel_filters, -16
+ add x5, x7, w5, uxtw #4
+ ld1 {v0.8h}, [x5]
+1:
+ ld1 {v2.8b,v3.8b}, [x2], x3
+ vp8_epel8_h6 v2, v2, v3
+ st1 {v2.s}[0], [x0], x1
+ subs w4, w4, #1
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel4_h6v6_neon, export=1
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, #2
+
+ movrel x7, subpel_filters, -16
+ add x5, x7, w5, uxtw #4
+ ld1 {v0.8h}, [x5]
+
+ sub sp, sp, #52
+ add w8, w4, #5
+ mov x9, sp
+1:
+ ld1 {v2.8b,v3.8b}, [x2], x3
+ vp8_epel8_h6 v2, v2, v3
+ st1 {v2.s}[0], [x9], #4
+ subs w8, w8, #1
+ b.ne 1b
+
+ add x6, x7, w6, uxtw #4
+ ld1 {v0.8h}, [x6]
+ mov x9, sp
+2:
+ ld1 {v2.8b,v3.8b}, [x9], #16
+ ld1 {v6.8b}, [x9], #8
+ ld1r {v28.2s}, [x9]
+ sub x9, x9, #16
+ ld1 {v4.8b,v5.8b}, [x9], #16
+ ld1 {v7.8b}, [x9], #8
+ ld1 {v28.s}[1], [x9]
+ sub x9, x9, #16
+ trn1 v1.2s, v2.2s, v4.2s
+ trn2 v4.2s, v2.2s, v4.2s
+ trn1 v2.2s, v3.2s, v5.2s
+ trn2 v5.2s, v3.2s, v5.2s
+ trn1 v3.2s, v6.2s, v7.2s
+ trn2 v7.2s, v6.2s, v7.2s
+ vp8_epel8_v6_y2 v2, v3, v1, v4, v2, v5, v3, v7, v28
+ st1 {v2.s}[0], [x0], x1
+ st1 {v3.s}[0], [x0], x1
+ st1 {v2.s}[1], [x0], x1
+ st1 {v3.s}[1], [x0], x1
+ subs w4, w4, #4
+ b.ne 2b
+
+ add sp, sp, #52
+ ret
+endfunc
+
+function ff_put_vp8_epel4_h4v6_neon, export=1
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, #1
+
+ movrel x7, subpel_filters, -16
+ add x5, x7, w5, uxtw #4
+ ld1 {v0.8h}, [x5]
+
+ sub sp, sp, #52
+ add w8, w4, #5
+ mov x9, sp
+1:
+ ld1 {v2.8b}, [x2], x3
+ vp8_epel8_h4 v2, v2, v2
+ st1 {v2.s}[0], [x9], #4
+ subs w8, w8, #1
+ b.ne 1b
+
+ add x6, x7, w6, uxtw #4
+ ld1 {v0.8h}, [x6]
+ mov x9, sp
+2:
+ ld1 {v2.8b,v3.8b}, [x9], #16
+ ld1 {v6.8b}, [x9], #8
+ ld1r {v28.2s}, [x9]
+ sub x9, x9, #16
+ ld1 {v4.8b,v5.8b}, [x9], #16
+ ld1 {v7.8b}, [x9], #8
+ ld1 {v28.s}[1], [x9]
+ sub x9, x9, #16
+ trn1 v1.2s, v2.2s, v4.2s
+ trn2 v4.2s, v2.2s, v4.2s
+ trn1 v2.2s, v3.2s, v5.2s
+ trn2 v5.2s, v3.2s, v5.2s
+ trn1 v3.2s, v6.2s, v7.2s
+ trn2 v7.2s, v6.2s, v7.2s
+ vp8_epel8_v6_y2 v2, v3, v1, v4, v2, v5, v3, v7, v28
+ st1 {v2.s}[0], [x0], x1
+ st1 {v3.s}[0], [x0], x1
+ st1 {v2.s}[1], [x0], x1
+ st1 {v3.s}[1], [x0], x1
+ subs w4, w4, #4
+ b.ne 2b
+
+ add sp, sp, #52
+ ret
+endfunc
+
+function ff_put_vp8_epel4_h6v4_neon, export=1
+ sub x2, x2, x3
+ sub x2, x2, #2
+
+ movrel x7, subpel_filters, -16
+ add x5, x7, w5, uxtw #4
+ ld1 {v0.8h}, [x5]
+
+ sub sp, sp, #44
+ add w8, w4, #3
+ mov x9, sp
+1:
+ ld1 {v2.8b,v3.8b}, [x2], x3
+ vp8_epel8_h6 v2, v2, v3
+ st1 {v2.s}[0], [x9], #4
+ subs w8, w8, #1
+ b.ne 1b
+
+ add x6, x7, w6, uxtw #4
+ ld1 {v0.8h}, [x6]
+ mov x9, sp
+2:
+ ld1 {v2.8b,v3.8b}, [x9], #16
+ ld1r {v6.2s}, [x9]
+ sub x9, x9, #8
+ ld1 {v4.8b,v5.8b}, [x9], #16
+ ld1 {v6.s}[1], [x9]
+ sub x9, x9, #8
+ trn1 v1.2s, v2.2s, v4.2s
+ trn2 v4.2s, v2.2s, v4.2s
+ trn1 v2.2s, v3.2s, v5.2s
+ trn2 v5.2s, v3.2s, v5.2s
+ vp8_epel8_v4_y2 v1, v1, v4, v2, v5, v6
+ st1 {v1.s}[0], [x0], x1
+ st1 {v1.s}[2], [x0], x1
+ st1 {v1.s}[1], [x0], x1
+ st1 {v1.s}[3], [x0], x1
+ subs w4, w4, #4
+ b.ne 2b
+
+ add sp, sp, #44
+ ret
+endfunc
+
+function ff_put_vp8_epel4_h4_neon, export=1
+ sub x2, x2, #1
+
+ movrel x7, subpel_filters, -16
+ add x5, x7, w5, uxtw #4
+ ld1 {v0.8h}, [x5]
+1:
+ ld1 {v2.8b}, [x2], x3
+ vp8_epel8_h4 v2, v2, v2
+ st1 {v2.s}[0], [x0], x1
+ subs w4, w4, #1
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel4_v4_neon, export=1
+ sub x2, x2, x3
+
+ movrel x7, subpel_filters, -16
+ add x6, x7, w6, uxtw #4
+ ld1 {v0.8h}, [x6]
+1:
+ ld1r {v2.2s}, [x2], x3
+ ld1r {v3.2s}, [x2], x3
+ ld1r {v4.2s}, [x2], x3
+ ld1r {v5.2s}, [x2], x3
+ ld1r {v6.2s}, [x2]
+ sub x2, x2, x3, lsl #1
+ ld1 {v2.s}[1], [x2], x3
+ ld1 {v3.s}[1], [x2], x3
+ ld1 {v4.s}[1], [x2], x3
+ ld1 {v5.s}[1], [x2], x3
+ ld1 {v6.s}[1], [x2]
+ sub x2, x2, x3, lsl #1
+
+ vp8_epel8_v4_y2 v2, v2, v3, v4, v5, v6
+
+ st1 {v2.s}[0], [x0], x1
+ st1 {v2.s}[2], [x0], x1
+ st1 {v2.s}[1], [x0], x1
+ st1 {v2.s}[3], [x0], x1
+ subs w4, w4, #4
+ b.ne 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_epel4_h4v4_neon, export=1
+ sub x2, x2, x3
+ sub x2, x2, #1
+
+ movrel x7, subpel_filters, -16
+ add x5, x7, w5, uxtw #4
+ ld1 {v0.8h}, [x5]
+
+ sub sp, sp, #44
+ add w8, w4, #3
+ mov x9, sp
+1:
+ ld1 {v2.8b}, [x2], x3
+ vp8_epel8_h4 v2, v2, v3
+ st1 {v2.s}[0], [x9], #4
+ subs w8, w8, #1
+ b.ne 1b
+
+ add x6, x7, w6, uxtw #4
+ ld1 {v0.8h}, [x6]
+ mov x9, sp
+2:
+ ld1 {v2.8b,v3.8b}, [x9], #16
+ ld1r {v6.2s}, [x9]
+ sub x9, x9, #8
+ ld1 {v4.8b,v5.8b}, [x9], #16
+ ld1 {v6.s}[1], [x9]
+ sub x9, x9, #8
+ trn1 v1.2s, v2.2s, v4.2s
+ trn2 v4.2s, v2.2s, v4.2s
+ trn1 v2.2s, v3.2s, v5.2s
+ trn2 v5.2s, v3.2s, v5.2s
+ vp8_epel8_v4_y2 v1, v1, v4, v2, v5, v6
+ st1 {v1.s}[0], [x0], x1
+ st1 {v1.s}[2], [x0], x1
+ st1 {v1.s}[1], [x0], x1
+ st1 {v1.s}[3], [x0], x1
+ subs w4, w4, #4
+ b.ne 2b
+
+ add sp, sp, #44
+ ret
+endfunc
+
+/* Bilinear MC */
+
+function ff_put_vp8_bilin16_h_neon, export=1
+ mov w7, #8
+ dup v0.8b, w5
+ sub w5, w7, w5
+ dup v1.8b, w5
+1:
+ subs w4, w4, #2
+ ld1 {v2.8b,v3.8b,v4.8b}, [x2], x3
+ ext v5.8b, v3.8b, v4.8b, #1
+ ext v4.8b, v2.8b, v3.8b, #1
+ umull v16.8h, v2.8b, v1.8b
+ umlal v16.8h, v4.8b, v0.8b
+ ld1 {v18.8b,v19.8b,v20.8b}, [x2], x3
+ umull v6.8h, v3.8b, v1.8b
+ umlal v6.8h, v5.8b, v0.8b
+ ext v21.8b, v19.8b, v20.8b, #1
+ ext v20.8b, v18.8b, v19.8b, #1
+ umull v22.8h, v18.8b, v1.8b
+ umlal v22.8h, v20.8b, v0.8b
+ umull v24.8h, v19.8b, v1.8b
+ umlal v24.8h, v21.8b, v0.8b
+ rshrn v4.8b, v16.8h, #3
+ rshrn2 v4.16b, v6.8h, #3
+ rshrn v6.8b, v22.8h, #3
+ rshrn2 v6.16b, v24.8h, #3
+ st1 {v4.16b}, [x0], x1
+ st1 {v6.16b}, [x0], x1
+ b.gt 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_bilin16_v_neon, export=1
+ mov w7, #8
+ dup v0.16b, w6
+ sub w6, w7, w6
+ dup v1.16b, w6
+
+ ld1 {v2.16b}, [x2], x3
+1:
+ subs w4, w4, #2
+ ld1 {v4.16b}, [x2], x3
+ umull v6.8h, v2.8b, v1.8b
+ umlal v6.8h, v4.8b, v0.8b
+ umull2 v16.8h, v2.16b, v1.16b
+ umlal2 v16.8h, v4.16b, v0.16b
+ ld1 {v2.16b}, [x2], x3
+ umull v18.8h, v4.8b, v1.8b
+ umlal v18.8h, v2.8b, v0.8b
+ umull2 v20.8h, v4.16b, v1.16b
+ umlal2 v20.8h, v2.16b, v0.16b
+ rshrn v4.8b, v6.8h, #3
+ rshrn2 v4.16b, v16.8h, #3
+ rshrn v6.8b, v18.8h, #3
+ rshrn2 v6.16b, v20.8h, #3
+ st1 {v4.16b}, [x0], x1
+ st1 {v6.16b}, [x0], x1
+ b.gt 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_bilin16_hv_neon, export=1
+ mov w7, #8
+ dup v0.8b, w5 // mx
+ sub w5, w7, w5
+ dup v1.8b, w5
+ dup v2.16b, w6 // my
+ sub w6, w7, w6
+ dup v3.16b, w6
+
+ ld1 {v4.8b,v5.8b,v6.8b}, [x2], x3
+
+ ext v7.8b, v5.8b, v6.8b, #1
+ ext v6.8b, v4.8b, v5.8b, #1
+ umull v16.8h, v4.8b, v1.8b
+ umlal v16.8h, v6.8b, v0.8b
+ umull v18.8h, v5.8b, v1.8b
+ umlal v18.8h, v7.8b, v0.8b
+ rshrn v4.8b, v16.8h, #3
+ rshrn2 v4.16b, v18.8h, #3
+1:
+ subs w4, w4, #2
+ ld1 {v18.8b,v19.8b,v20.8b}, [x2], x3
+ ext v21.8b, v19.8b, v20.8b, #1
+ ext v20.8b, v18.8b, v19.8b, #1
+ umull v22.8h, v18.8b, v1.8b
+ umlal v22.8h, v20.8b, v0.8b
+ ld1 {v26.8b,v27.8b,v28.8b}, [x2], x3
+ umull v24.8h, v19.8b, v1.8b
+ umlal v24.8h, v21.8b, v0.8b
+ ext v29.8b, v27.8b, v28.8b, #1
+ ext v28.8b, v26.8b, v27.8b, #1
+ umull v16.8h, v26.8b, v1.8b
+ umlal v16.8h, v28.8b, v0.8b
+ umull v18.8h, v27.8b, v1.8b
+ umlal v18.8h, v29.8b, v0.8b
+ rshrn v6.8b, v22.8h, #3
+ rshrn2 v6.16b, v24.8h, #3
+ umull v24.8h, v4.8b, v3.8b
+ umlal v24.8h, v6.8b, v2.8b
+ umull2 v30.8h, v4.16b, v3.16b
+ umlal2 v30.8h, v6.16b, v2.16b
+ rshrn v4.8b, v16.8h, #3
+ rshrn2 v4.16b, v18.8h, #3
+ umull v20.8h, v6.8b, v3.8b
+ umlal v20.8h, v4.8b, v2.8b
+ umull2 v22.8h, v6.16b, v3.16b
+ umlal2 v22.8h, v4.16b, v2.16b
+ rshrn v24.8b, v24.8h, #3
+ rshrn2 v24.16b, v30.8h, #3
+ st1 {v24.16b}, [x0], x1
+ rshrn v20.8b, v20.8h, #3
+ rshrn2 v20.16b, v22.8h, #3
+ st1 {v20.16b}, [x0], x1
+ b.gt 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_bilin8_h_neon, export=1
+ mov w7, #8
+ dup v0.8b, w5
+ sub w5, w7, w5
+ dup v1.8b, w5
+1:
+ subs w4, w4, #2
+ ld1 {v2.8b,v3.8b}, [x2], x3
+ ext v3.8b, v2.8b, v3.8b, #1
+ umull v4.8h, v2.8b, v1.8b
+ umlal v4.8h, v3.8b, v0.8b
+ ld1 {v6.8b,v7.8b}, [x2], x3
+ ext v7.8b, v6.8b, v7.8b, #1
+ umull v16.8h, v6.8b, v1.8b
+ umlal v16.8h, v7.8b, v0.8b
+ rshrn v4.8b, v4.8h, #3
+ rshrn v16.8b, v16.8h, #3
+ st1 {v4.8b}, [x0], x1
+ st1 {v16.8b}, [x0], x1
+ b.gt 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_bilin8_v_neon, export=1
+ mov w7, #8
+ dup v0.8b, w6
+ sub w6, w7, w6
+ dup v1.8b, w6
+
+ ld1 {v2.8b}, [x2], x3
+1:
+ subs w4, w4, #2
+ ld1 {v3.8b}, [x2], x3
+ umull v4.8h, v2.8b, v1.8b
+ umlal v4.8h, v3.8b, v0.8b
+ ld1 {v2.8b}, [x2], x3
+ umull v6.8h, v3.8b, v1.8b
+ umlal v6.8h, v2.8b, v0.8b
+ rshrn v4.8b, v4.8h, #3
+ rshrn v6.8b, v6.8h, #3
+ st1 {v4.8b}, [x0], x1
+ st1 {v6.8b}, [x0], x1
+ b.gt 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_bilin8_hv_neon, export=1
+ mov w7, #8
+ dup v0.8b, w5 // mx
+ sub w5, w7, w5
+ dup v1.8b, w5
+ dup v2.8b, w6 // my
+ sub w6, w7, w6
+ dup v3.8b, w6
+
+ ld1 {v4.8b,v5.8b}, [x2], x3
+ ext v5.8b, v4.8b, v5.8b, #1
+ umull v18.8h, v4.8b, v1.8b
+ umlal v18.8h, v5.8b, v0.8b
+ rshrn v22.8b, v18.8h, #3
+1:
+ subs w4, w4, #2
+ ld1 {v6.8b,v7.8b}, [x2], x3
+ ext v7.8b, v6.8b, v7.8b, #1
+ umull v16.8h, v6.8b, v1.8b
+ umlal v16.8h, v7.8b, v0.8b
+ ld1 {v4.8b,v5.8b}, [x2], x3
+ ext v5.8b, v4.8b, v5.8b, #1
+ umull v18.8h, v4.8b, v1.8b
+ umlal v18.8h, v5.8b, v0.8b
+ rshrn v16.8b, v16.8h, #3
+ umull v20.8h, v22.8b, v3.8b
+ umlal v20.8h, v16.8b, v2.8b
+ rshrn v22.8b, v18.8h, #3
+ umull v24.8h, v16.8b, v3.8b
+ umlal v24.8h, v22.8b, v2.8b
+ rshrn v20.8b, v20.8h, #3
+ st1 {v20.8b}, [x0], x1
+ rshrn v23.8b, v24.8h, #3
+ st1 {v23.8b}, [x0], x1
+ b.gt 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_bilin4_h_neon, export=1
+ mov w7, #8
+ dup v0.8b, w5
+ sub w5, w7, w5
+ dup v1.8b, w5
+1:
+ subs w4, w4, #2
+ ld1 {v2.8b}, [x2], x3
+ ext v3.8b, v2.8b, v3.8b, #1
+ ld1 {v6.8b}, [x2], x3
+ ext v7.8b, v6.8b, v7.8b, #1
+ trn1 v2.2s, v2.2s, v6.2s
+ trn1 v3.2s, v3.2s, v7.2s
+ umull v4.8h, v2.8b, v1.8b
+ umlal v4.8h, v3.8b, v0.8b
+ rshrn v4.8b, v4.8h, #3
+ st1 {v4.s}[0], [x0], x1
+ st1 {v4.s}[1], [x0], x1
+ b.gt 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_bilin4_v_neon, export=1
+ mov w7, #8
+ dup v0.8b, w6
+ sub w6, w7, w6
+ dup v1.8b, w6
+
+ ld1r {v2.2s}, [x2], x3
+1:
+ ld1r {v3.2s}, [x2]
+ ld1 {v2.s}[1], [x2], x3
+ ld1 {v3.s}[1], [x2], x3
+ umull v4.8h, v2.8b, v1.8b
+ umlal v4.8h, v3.8b, v0.8b
+ trn2 v2.2s, v3.2s, v2.2s
+ rshrn v4.8b, v4.8h, #3
+ st1 {v4.s}[0], [x0], x1
+ st1 {v4.s}[1], [x0], x1
+ subs w4, w4, #2
+ b.gt 1b
+
+ ret
+endfunc
+
+function ff_put_vp8_bilin4_hv_neon, export=1
+ mov w7, #8
+ dup v0.8b, w5 // mx
+ sub w5, w7, w5
+ dup v1.8b, w5
+ dup v2.8b, w6 // my
+ sub w6, w7, w6
+ dup v3.8b, w6
+
+ ld1 {v4.8b}, [x2], x3
+ ext v5.8b, v4.8b, v4.8b, #1
+ umull v18.8h, v4.8b, v1.8b
+ umlal v18.8h, v5.8b, v0.8b
+ rshrn v22.8b, v18.8h, #3
+1:
+ subs w4, w4, #2
+ ld1 {v6.8b}, [x2], x3
+ ext v7.8b, v6.8b, v6.8b, #1
+ ld1 {v4.8b}, [x2], x3
+ ext v5.8b, v4.8b, v4.8b, #1
+ trn1 v6.2s, v6.2s, v4.2s
+ trn1 v7.2s, v7.2s, v5.2s
+ umull v16.8h, v6.8b, v1.8b
+ umlal v16.8h, v7.8b, v0.8b
+ rshrn v16.8b, v16.8h, #3
+ umull v20.8h, v16.8b, v2.8b
+ trn1 v22.2s, v22.2s, v16.2s
+ umlal v20.8h, v22.8b, v3.8b
+ rev64 v22.2s, v16.2s
+ rshrn v20.8b, v20.8h, #3
+ st1 {v20.s}[0], [x0], x1
+ st1 {v20.s}[1], [x0], x1
+ b.gt 1b
+
+ ret
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/vp9dsp_init.h b/media/ffvpx/libavcodec/aarch64/vp9dsp_init.h
new file mode 100644
index 0000000000..9df1752c62
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9dsp_init.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AARCH64_VP9DSP_INIT_H
+#define AVCODEC_AARCH64_VP9DSP_INIT_H
+
+#include "libavcodec/vp9dsp.h"
+
+void ff_vp9dsp_init_10bpp_aarch64(VP9DSPContext *dsp);
+void ff_vp9dsp_init_12bpp_aarch64(VP9DSPContext *dsp);
+
+#endif /* AVCODEC_AARCH64_VP9DSP_INIT_H */
diff --git a/media/ffvpx/libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c b/media/ffvpx/libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c
new file mode 100644
index 0000000000..0fa0d7f8c2
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BPP 10
+#define INIT_FUNC ff_vp9dsp_init_10bpp_aarch64
+#include "vp9dsp_init_16bpp_aarch64_template.c"
diff --git a/media/ffvpx/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c b/media/ffvpx/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c
new file mode 100644
index 0000000000..dae2232403
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BPP 12
+#define INIT_FUNC ff_vp9dsp_init_12bpp_aarch64
+#include "vp9dsp_init_16bpp_aarch64_template.c"
diff --git a/media/ffvpx/libavcodec/aarch64/vp9dsp_init_16bpp_aarch64_template.c b/media/ffvpx/libavcodec/aarch64/vp9dsp_init_16bpp_aarch64_template.c
new file mode 100644
index 0000000000..d2a4e90b3a
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9dsp_init_16bpp_aarch64_template.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/internal.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/aarch64/cpu.h"
+#include "vp9dsp_init.h"
+
+#define declare_fpel(type, sz, suffix) \
+void ff_vp9_##type##sz##suffix##_neon(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my)
+
+#define decl_mc_func(op, filter, dir, sz, bpp) \
+void ff_vp9_##op##_##filter##sz##_##dir##_##bpp##_neon(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my)
+
+#define define_8tap_2d_fn(op, filter, sz, bpp) \
+static void op##_##filter##sz##_hv_##bpp##_neon(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, \
+ ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ LOCAL_ALIGNED_16(uint8_t, temp, [((1 + (sz < 64)) * sz + 8) * sz * 2]); \
+ /* We only need h + 7 lines, but the horizontal filter assumes an \
+ * even number of rows, so filter h + 8 lines here. */ \
+ ff_vp9_put_##filter##sz##_h_##bpp##_neon(temp, 2 * sz, \
+ src - 3 * src_stride, src_stride, \
+ h + 8, mx, 0); \
+ ff_vp9_##op##_##filter##sz##_v_##bpp##_neon(dst, dst_stride, \
+ temp + 3 * 2 * sz, 2 * sz, \
+ h, 0, my); \
+}
+
+#define decl_filter_funcs(op, dir, sz, bpp) \
+ decl_mc_func(op, regular, dir, sz, bpp); \
+ decl_mc_func(op, sharp, dir, sz, bpp); \
+ decl_mc_func(op, smooth, dir, sz, bpp)
+
+#define decl_mc_funcs(sz, bpp) \
+ decl_filter_funcs(put, h, sz, bpp); \
+ decl_filter_funcs(avg, h, sz, bpp); \
+ decl_filter_funcs(put, v, sz, bpp); \
+ decl_filter_funcs(avg, v, sz, bpp); \
+ decl_filter_funcs(put, hv, sz, bpp); \
+ decl_filter_funcs(avg, hv, sz, bpp)
+
+#define ff_vp9_copy32_neon ff_vp9_copy32_aarch64
+#define ff_vp9_copy64_neon ff_vp9_copy64_aarch64
+#define ff_vp9_copy128_neon ff_vp9_copy128_aarch64
+
+declare_fpel(copy, 128, );
+declare_fpel(copy, 64, );
+declare_fpel(copy, 32, );
+declare_fpel(copy, 16, );
+declare_fpel(copy, 8, );
+declare_fpel(avg, 64, _16);
+declare_fpel(avg, 32, _16);
+declare_fpel(avg, 16, _16);
+declare_fpel(avg, 8, _16);
+declare_fpel(avg, 4, _16);
+
+decl_mc_funcs(64, BPP);
+decl_mc_funcs(32, BPP);
+decl_mc_funcs(16, BPP);
+decl_mc_funcs(8, BPP);
+decl_mc_funcs(4, BPP);
+
+#define define_8tap_2d_funcs(sz, bpp) \
+ define_8tap_2d_fn(put, regular, sz, bpp) \
+ define_8tap_2d_fn(put, sharp, sz, bpp) \
+ define_8tap_2d_fn(put, smooth, sz, bpp) \
+ define_8tap_2d_fn(avg, regular, sz, bpp) \
+ define_8tap_2d_fn(avg, sharp, sz, bpp) \
+ define_8tap_2d_fn(avg, smooth, sz, bpp)
+
+define_8tap_2d_funcs(64, BPP)
+define_8tap_2d_funcs(32, BPP)
+define_8tap_2d_funcs(16, BPP)
+define_8tap_2d_funcs(8, BPP)
+define_8tap_2d_funcs(4, BPP)
+
+static av_cold void vp9dsp_mc_init_aarch64(VP9DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+#define init_fpel(idx1, idx2, sz, type, suffix) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_##type##sz##suffix
+
+#define init_copy(idx, sz, suffix) \
+ init_fpel(idx, 0, sz, copy, suffix)
+
+#define init_avg(idx, sz, suffix) \
+ init_fpel(idx, 1, sz, avg, suffix)
+
+#define init_copy_avg(idx, sz1, sz2) \
+ init_copy(idx, sz2, _neon); \
+ init_avg (idx, sz1, _16_neon)
+
+ if (have_armv8(cpu_flags)) {
+ init_copy(0, 128, _aarch64);
+ init_copy(1, 64, _aarch64);
+ init_copy(2, 32, _aarch64);
+ }
+
+ if (have_neon(cpu_flags)) {
+#define init_mc_func(idx1, idx2, op, filter, fname, dir, mx, my, sz, pfx, bpp) \
+ dsp->mc[idx1][filter][idx2][mx][my] = pfx##op##_##fname##sz##_##dir##_##bpp##_neon
+
+#define init_mc_funcs(idx, dir, mx, my, sz, pfx, bpp) \
+ init_mc_func(idx, 0, put, FILTER_8TAP_REGULAR, regular, dir, mx, my, sz, pfx, bpp); \
+ init_mc_func(idx, 0, put, FILTER_8TAP_SHARP, sharp, dir, mx, my, sz, pfx, bpp); \
+ init_mc_func(idx, 0, put, FILTER_8TAP_SMOOTH, smooth, dir, mx, my, sz, pfx, bpp); \
+ init_mc_func(idx, 1, avg, FILTER_8TAP_REGULAR, regular, dir, mx, my, sz, pfx, bpp); \
+ init_mc_func(idx, 1, avg, FILTER_8TAP_SHARP, sharp, dir, mx, my, sz, pfx, bpp); \
+ init_mc_func(idx, 1, avg, FILTER_8TAP_SMOOTH, smooth, dir, mx, my, sz, pfx, bpp)
+
+#define init_mc_funcs_dirs(idx, sz, bpp) \
+ init_mc_funcs(idx, v, 0, 1, sz, ff_vp9_, bpp); \
+ init_mc_funcs(idx, h, 1, 0, sz, ff_vp9_, bpp); \
+ init_mc_funcs(idx, hv, 1, 1, sz, , bpp)
+
+
+ init_avg(0, 64, _16_neon);
+ init_avg(1, 32, _16_neon);
+ init_avg(2, 16, _16_neon);
+ init_copy_avg(3, 8, 16);
+ init_copy_avg(4, 4, 8);
+
+ init_mc_funcs_dirs(0, 64, BPP);
+ init_mc_funcs_dirs(1, 32, BPP);
+ init_mc_funcs_dirs(2, 16, BPP);
+ init_mc_funcs_dirs(3, 8, BPP);
+ init_mc_funcs_dirs(4, 4, BPP);
+ }
+}
+
+#define define_itxfm2(type_a, type_b, sz, bpp) \
+void ff_vp9_##type_a##_##type_b##_##sz##x##sz##_add_##bpp##_neon(uint8_t *_dst, \
+ ptrdiff_t stride, \
+ int16_t *_block, int eob)
+#define define_itxfm(type_a, type_b, sz, bpp) define_itxfm2(type_a, type_b, sz, bpp)
+
+#define define_itxfm_funcs(sz, bpp) \
+ define_itxfm(idct, idct, sz, bpp); \
+ define_itxfm(iadst, idct, sz, bpp); \
+ define_itxfm(idct, iadst, sz, bpp); \
+ define_itxfm(iadst, iadst, sz, bpp)
+
+define_itxfm_funcs(4, BPP);
+define_itxfm_funcs(8, BPP);
+define_itxfm_funcs(16, BPP);
+define_itxfm(idct, idct, 32, BPP);
+define_itxfm(iwht, iwht, 4, BPP);
+
+
+static av_cold void vp9dsp_itxfm_init_aarch64(VP9DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+#define init_itxfm2(tx, sz, bpp) \
+ dsp->itxfm_add[tx][DCT_DCT] = ff_vp9_idct_idct_##sz##_add_##bpp##_neon; \
+ dsp->itxfm_add[tx][DCT_ADST] = ff_vp9_iadst_idct_##sz##_add_##bpp##_neon; \
+ dsp->itxfm_add[tx][ADST_DCT] = ff_vp9_idct_iadst_##sz##_add_##bpp##_neon; \
+ dsp->itxfm_add[tx][ADST_ADST] = ff_vp9_iadst_iadst_##sz##_add_##bpp##_neon
+#define init_itxfm(tx, sz, bpp) init_itxfm2(tx, sz, bpp)
+
+#define init_idct2(tx, nm, bpp) \
+ dsp->itxfm_add[tx][DCT_DCT] = \
+ dsp->itxfm_add[tx][ADST_DCT] = \
+ dsp->itxfm_add[tx][DCT_ADST] = \
+ dsp->itxfm_add[tx][ADST_ADST] = ff_vp9_##nm##_add_##bpp##_neon
+#define init_idct(tx, nm, bpp) init_idct2(tx, nm, bpp)
+
+ init_itxfm(TX_4X4, 4x4, BPP);
+ init_itxfm(TX_8X8, 8x8, BPP);
+ init_itxfm(TX_16X16, 16x16, BPP);
+ init_idct(TX_32X32, idct_idct_32x32, BPP);
+ init_idct(4, iwht_iwht_4x4, BPP);
+ }
+}
+
+#define define_loop_filter(dir, wd, size, bpp) \
+void ff_vp9_loop_filter_##dir##_##wd##_##size##_##bpp##_neon(uint8_t *dst, ptrdiff_t stride, int E, int I, int H)
+
+#define define_loop_filters(wd, size, bpp) \
+ define_loop_filter(h, wd, size, bpp); \
+ define_loop_filter(v, wd, size, bpp)
+
+define_loop_filters(4, 8, BPP);
+define_loop_filters(8, 8, BPP);
+define_loop_filters(16, 8, BPP);
+
+define_loop_filters(16, 16, BPP);
+
+define_loop_filters(44, 16, BPP);
+define_loop_filters(48, 16, BPP);
+define_loop_filters(84, 16, BPP);
+define_loop_filters(88, 16, BPP);
+
+static av_cold void vp9dsp_loopfilter_init_aarch64(VP9DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+#define init_lpf_func_8(idx1, idx2, dir, wd, bpp) \
+ dsp->loop_filter_8[idx1][idx2] = ff_vp9_loop_filter_##dir##_##wd##_8_##bpp##_neon
+
+#define init_lpf_func_16(idx, dir, bpp) \
+ dsp->loop_filter_16[idx] = ff_vp9_loop_filter_##dir##_16_16_##bpp##_neon
+
+#define init_lpf_func_mix2(idx1, idx2, idx3, dir, wd, bpp) \
+ dsp->loop_filter_mix2[idx1][idx2][idx3] = ff_vp9_loop_filter_##dir##_##wd##_16_##bpp##_neon
+
+#define init_lpf_funcs_8_wd(idx, wd, bpp) \
+ init_lpf_func_8(idx, 0, h, wd, bpp); \
+ init_lpf_func_8(idx, 1, v, wd, bpp)
+
+#define init_lpf_funcs_16(bpp) \
+ init_lpf_func_16(0, h, bpp); \
+ init_lpf_func_16(1, v, bpp)
+
+#define init_lpf_funcs_mix2_wd(idx1, idx2, wd, bpp) \
+ init_lpf_func_mix2(idx1, idx2, 0, h, wd, bpp); \
+ init_lpf_func_mix2(idx1, idx2, 1, v, wd, bpp)
+
+#define init_lpf_funcs_8(bpp) \
+ init_lpf_funcs_8_wd(0, 4, bpp); \
+ init_lpf_funcs_8_wd(1, 8, bpp); \
+ init_lpf_funcs_8_wd(2, 16, bpp)
+
+#define init_lpf_funcs_mix2(bpp) \
+ init_lpf_funcs_mix2_wd(0, 0, 44, bpp); \
+ init_lpf_funcs_mix2_wd(0, 1, 48, bpp); \
+ init_lpf_funcs_mix2_wd(1, 0, 84, bpp); \
+ init_lpf_funcs_mix2_wd(1, 1, 88, bpp)
+
+ init_lpf_funcs_8(BPP);
+ init_lpf_funcs_16(BPP);
+ init_lpf_funcs_mix2(BPP);
+ }
+}
+
+av_cold void INIT_FUNC(VP9DSPContext *dsp)
+{
+ vp9dsp_mc_init_aarch64(dsp);
+ vp9dsp_loopfilter_init_aarch64(dsp);
+ vp9dsp_itxfm_init_aarch64(dsp);
+}
diff --git a/media/ffvpx/libavcodec/aarch64/vp9dsp_init_aarch64.c b/media/ffvpx/libavcodec/aarch64/vp9dsp_init_aarch64.c
new file mode 100644
index 0000000000..4d1fee62de
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9dsp_init_aarch64.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2016 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/internal.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavcodec/vp9dsp.h"
+#include "vp9dsp_init.h"
+
+#define declare_fpel(type, sz) \
+void ff_vp9_##type##sz##_neon(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my)
+
+#define declare_copy_avg(sz) \
+ declare_fpel(copy, sz); \
+ declare_fpel(avg , sz)
+
+#define decl_mc_func(op, filter, dir, sz) \
+void ff_vp9_##op##_##filter##sz##_##dir##_neon(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my)
+
+#define define_8tap_2d_fn(op, filter, sz) \
+static void op##_##filter##sz##_hv_neon(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ LOCAL_ALIGNED_16(uint8_t, temp, [((1 + (sz < 64)) * sz + 8) * sz]); \
+ /* We only need h + 7 lines, but the horizontal filter assumes an \
+ * even number of rows, so filter h + 8 lines here. */ \
+ ff_vp9_put_##filter##sz##_h_neon(temp, sz, \
+ src - 3 * src_stride, src_stride, \
+ h + 8, mx, 0); \
+ ff_vp9_##op##_##filter##sz##_v_neon(dst, dst_stride, \
+ temp + 3 * sz, sz, \
+ h, 0, my); \
+}
+
+#define decl_filter_funcs(op, dir, sz) \
+ decl_mc_func(op, regular, dir, sz); \
+ decl_mc_func(op, sharp, dir, sz); \
+ decl_mc_func(op, smooth, dir, sz)
+
+#define decl_mc_funcs(sz) \
+ decl_filter_funcs(put, h, sz); \
+ decl_filter_funcs(avg, h, sz); \
+ decl_filter_funcs(put, v, sz); \
+ decl_filter_funcs(avg, v, sz); \
+ decl_filter_funcs(put, hv, sz); \
+ decl_filter_funcs(avg, hv, sz)
+
+#define ff_vp9_copy32_neon ff_vp9_copy32_aarch64
+#define ff_vp9_copy64_neon ff_vp9_copy64_aarch64
+
+declare_copy_avg(64);
+declare_copy_avg(32);
+declare_copy_avg(16);
+declare_copy_avg(8);
+declare_copy_avg(4);
+
+decl_mc_funcs(64);
+decl_mc_funcs(32);
+decl_mc_funcs(16);
+decl_mc_funcs(8);
+decl_mc_funcs(4);
+
+#define define_8tap_2d_funcs(sz) \
+ define_8tap_2d_fn(put, regular, sz) \
+ define_8tap_2d_fn(put, sharp, sz) \
+ define_8tap_2d_fn(put, smooth, sz) \
+ define_8tap_2d_fn(avg, regular, sz) \
+ define_8tap_2d_fn(avg, sharp, sz) \
+ define_8tap_2d_fn(avg, smooth, sz)
+
+define_8tap_2d_funcs(64)
+define_8tap_2d_funcs(32)
+define_8tap_2d_funcs(16)
+define_8tap_2d_funcs(8)
+define_8tap_2d_funcs(4)
+
+static av_cold void vp9dsp_mc_init_aarch64(VP9DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+#define init_fpel(idx1, idx2, sz, type, suffix) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_##type##sz##suffix
+
+#define init_copy(idx, sz, suffix) \
+ init_fpel(idx, 0, sz, copy, suffix)
+
+#define init_avg(idx, sz, suffix) \
+ init_fpel(idx, 1, sz, avg, suffix)
+
+#define init_copy_avg(idx, sz) \
+ init_copy(idx, sz, _neon); \
+ init_avg (idx, sz, _neon)
+
+ if (have_armv8(cpu_flags)) {
+ init_copy(0, 64, _aarch64);
+ init_copy(1, 32, _aarch64);
+ }
+
+ if (have_neon(cpu_flags)) {
+#define init_mc_func(idx1, idx2, op, filter, fname, dir, mx, my, sz, pfx) \
+ dsp->mc[idx1][filter][idx2][mx][my] = pfx##op##_##fname##sz##_##dir##_neon
+
+#define init_mc_funcs(idx, dir, mx, my, sz, pfx) \
+ init_mc_func(idx, 0, put, FILTER_8TAP_REGULAR, regular, dir, mx, my, sz, pfx); \
+ init_mc_func(idx, 0, put, FILTER_8TAP_SHARP, sharp, dir, mx, my, sz, pfx); \
+ init_mc_func(idx, 0, put, FILTER_8TAP_SMOOTH, smooth, dir, mx, my, sz, pfx); \
+ init_mc_func(idx, 1, avg, FILTER_8TAP_REGULAR, regular, dir, mx, my, sz, pfx); \
+ init_mc_func(idx, 1, avg, FILTER_8TAP_SHARP, sharp, dir, mx, my, sz, pfx); \
+ init_mc_func(idx, 1, avg, FILTER_8TAP_SMOOTH, smooth, dir, mx, my, sz, pfx)
+
+#define init_mc_funcs_dirs(idx, sz) \
+ init_mc_funcs(idx, h, 1, 0, sz, ff_vp9_); \
+ init_mc_funcs(idx, v, 0, 1, sz, ff_vp9_); \
+ init_mc_funcs(idx, hv, 1, 1, sz,)
+
+ init_avg(0, 64, _neon);
+ init_avg(1, 32, _neon);
+ init_copy_avg(2, 16);
+ init_copy_avg(3, 8);
+ init_copy_avg(4, 4);
+
+ init_mc_funcs_dirs(0, 64);
+ init_mc_funcs_dirs(1, 32);
+ init_mc_funcs_dirs(2, 16);
+ init_mc_funcs_dirs(3, 8);
+ init_mc_funcs_dirs(4, 4);
+ }
+}
+
+#define define_itxfm(type_a, type_b, sz) \
+void ff_vp9_##type_a##_##type_b##_##sz##x##sz##_add_neon(uint8_t *_dst, \
+ ptrdiff_t stride, \
+ int16_t *_block, int eob)
+
+#define define_itxfm_funcs(sz) \
+ define_itxfm(idct, idct, sz); \
+ define_itxfm(iadst, idct, sz); \
+ define_itxfm(idct, iadst, sz); \
+ define_itxfm(iadst, iadst, sz)
+
+define_itxfm_funcs(4);
+define_itxfm_funcs(8);
+define_itxfm_funcs(16);
+define_itxfm(idct, idct, 32);
+define_itxfm(iwht, iwht, 4);
+
+
+static av_cold void vp9dsp_itxfm_init_aarch64(VP9DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+#define init_itxfm(tx, sz) \
+ dsp->itxfm_add[tx][DCT_DCT] = ff_vp9_idct_idct_##sz##_add_neon; \
+ dsp->itxfm_add[tx][DCT_ADST] = ff_vp9_iadst_idct_##sz##_add_neon; \
+ dsp->itxfm_add[tx][ADST_DCT] = ff_vp9_idct_iadst_##sz##_add_neon; \
+ dsp->itxfm_add[tx][ADST_ADST] = ff_vp9_iadst_iadst_##sz##_add_neon
+
+#define init_idct(tx, nm) \
+ dsp->itxfm_add[tx][DCT_DCT] = \
+ dsp->itxfm_add[tx][ADST_DCT] = \
+ dsp->itxfm_add[tx][DCT_ADST] = \
+ dsp->itxfm_add[tx][ADST_ADST] = ff_vp9_##nm##_add_neon
+
+ init_itxfm(TX_4X4, 4x4);
+ init_itxfm(TX_8X8, 8x8);
+ init_itxfm(TX_16X16, 16x16);
+ init_idct(TX_32X32, idct_idct_32x32);
+ init_idct(4, iwht_iwht_4x4);
+ }
+}
+
+#define define_loop_filter(dir, wd, len) \
+void ff_vp9_loop_filter_##dir##_##wd##_##len##_neon(uint8_t *dst, ptrdiff_t stride, int E, int I, int H)
+
+#define define_loop_filters(wd, len) \
+ define_loop_filter(h, wd, len); \
+ define_loop_filter(v, wd, len)
+
+define_loop_filters(4, 8);
+define_loop_filters(8, 8);
+define_loop_filters(16, 8);
+
+define_loop_filters(16, 16);
+
+define_loop_filters(44, 16);
+define_loop_filters(48, 16);
+define_loop_filters(84, 16);
+define_loop_filters(88, 16);
+
+static av_cold void vp9dsp_loopfilter_init_aarch64(VP9DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+ dsp->loop_filter_8[0][1] = ff_vp9_loop_filter_v_4_8_neon;
+ dsp->loop_filter_8[0][0] = ff_vp9_loop_filter_h_4_8_neon;
+ dsp->loop_filter_8[1][1] = ff_vp9_loop_filter_v_8_8_neon;
+ dsp->loop_filter_8[1][0] = ff_vp9_loop_filter_h_8_8_neon;
+ dsp->loop_filter_8[2][1] = ff_vp9_loop_filter_v_16_8_neon;
+ dsp->loop_filter_8[2][0] = ff_vp9_loop_filter_h_16_8_neon;
+
+ dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_neon;
+ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_neon;
+
+ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_neon;
+ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_neon;
+ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_neon;
+ dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_neon;
+ dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_neon;
+ dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_neon;
+ dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_neon;
+ dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_neon;
+ }
+}
+
+av_cold void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int bpp)
+{
+ if (bpp == 10) {
+ ff_vp9dsp_init_10bpp_aarch64(dsp);
+ return;
+ } else if (bpp == 12) {
+ ff_vp9dsp_init_12bpp_aarch64(dsp);
+ return;
+ } else if (bpp != 8)
+ return;
+
+ vp9dsp_mc_init_aarch64(dsp);
+ vp9dsp_loopfilter_init_aarch64(dsp);
+ vp9dsp_itxfm_init_aarch64(dsp);
+}
diff --git a/media/ffvpx/libavcodec/aarch64/vp9itxfm_16bpp_neon.S b/media/ffvpx/libavcodec/aarch64/vp9itxfm_16bpp_neon.S
new file mode 100644
index 0000000000..c5f43d36a3
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9itxfm_16bpp_neon.S
@@ -0,0 +1,2017 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+#include "neon.S"
+
+const itxfm4_coeffs, align=4
+ .short 11585, 0, 6270, 15137
+iadst4_coeffs:
+ .short 5283, 15212, 9929, 13377
+endconst
+
+const iadst8_coeffs, align=4
+ .short 16305, 1606, 14449, 7723, 10394, 12665, 4756, 15679
+idct_coeffs:
+ .short 11585, 0, 6270, 15137, 3196, 16069, 13623, 9102
+ .short 1606, 16305, 12665, 10394, 7723, 14449, 15679, 4756
+ .short 804, 16364, 12140, 11003, 7005, 14811, 15426, 5520
+ .short 3981, 15893, 14053, 8423, 9760, 13160, 16207, 2404
+endconst
+
+const iadst16_coeffs, align=4
+ .short 16364, 804, 15893, 3981, 11003, 12140, 8423, 14053
+ .short 14811, 7005, 13160, 9760, 5520, 15426, 2404, 16207
+endconst
+
+.macro transpose_4x4s r0, r1, r2, r3, r4, r5, r6, r7
+ trn1 \r4\().4s, \r0\().4s, \r1\().4s
+ trn2 \r5\().4s, \r0\().4s, \r1\().4s
+ trn1 \r6\().4s, \r2\().4s, \r3\().4s
+ trn2 \r7\().4s, \r2\().4s, \r3\().4s
+ trn1 \r0\().2d, \r4\().2d, \r6\().2d
+ trn2 \r2\().2d, \r4\().2d, \r6\().2d
+ trn1 \r1\().2d, \r5\().2d, \r7\().2d
+ trn2 \r3\().2d, \r5\().2d, \r7\().2d
+.endm
+
+// Transpose a 8x8 matrix of 32 bit elements, where each row is spread out
+// over two registers.
+.macro transpose_8x8s r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, t0, t1, t2, t3
+ transpose_4x4s \r0, \r2, \r4, \r6, \t0, \t1, \t2, \t3
+ transpose_4x4s \r9, \r11, \r13, \r15, \t0, \t1, \t2, \t3
+
+ // Do 4x4 transposes of r1,r3,r5,r7 and r8,r10,r12,r14
+ // while swapping the two 4x4 matrices between each other
+
+ // First step of the 4x4 transpose of r1-r7, into t0-t3
+ trn1 \t0\().4s, \r1\().4s, \r3\().4s
+ trn2 \t1\().4s, \r1\().4s, \r3\().4s
+ trn1 \t2\().4s, \r5\().4s, \r7\().4s
+ trn2 \t3\().4s, \r5\().4s, \r7\().4s
+
+ // First step of the 4x4 transpose of r8-r12, into r1-r7
+ trn1 \r1\().4s, \r8\().4s, \r10\().4s
+ trn2 \r3\().4s, \r8\().4s, \r10\().4s
+ trn1 \r5\().4s, \r12\().4s, \r14\().4s
+ trn2 \r7\().4s, \r12\().4s, \r14\().4s
+
+ // Second step of the 4x4 transpose of r1-r7 (now in t0-r3), into r8-r12
+ trn1 \r8\().2d, \t0\().2d, \t2\().2d
+ trn2 \r12\().2d, \t0\().2d, \t2\().2d
+ trn1 \r10\().2d, \t1\().2d, \t3\().2d
+ trn2 \r14\().2d, \t1\().2d, \t3\().2d
+
+ // Second step of the 4x4 transpose of r8-r12 (now in r1-r7), in place as far as possible
+ trn1 \t0\().2d, \r1\().2d, \r5\().2d
+ trn2 \r5\().2d, \r1\().2d, \r5\().2d
+ trn1 \t1\().2d, \r3\().2d, \r7\().2d
+ trn2 \r7\().2d, \r3\().2d, \r7\().2d
+
+ // Move the outputs of trn1 back in place
+ mov \r1\().16b, \t0\().16b
+ mov \r3\().16b, \t1\().16b
+.endm
+
+// out1 = ((in1 + in2) * d0[0] + (1 << 13)) >> 14
+// out2 = ((in1 - in2) * d0[0] + (1 << 13)) >> 14
+// in/out are .4s registers; this can do with 4 temp registers, but is
+// more efficient if 6 temp registers are available.
+.macro dmbutterfly0 out1, out2, in1, in2, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, neg=0
+.if \neg > 0
+ neg \tmp4\().4s, v0.4s
+.endif
+ add \tmp1\().4s, \in1\().4s, \in2\().4s
+ sub \tmp2\().4s, \in1\().4s, \in2\().4s
+.if \neg > 0
+ smull \tmp3\().2d, \tmp1\().2s, \tmp4\().s[0]
+ smull2 \tmp4\().2d, \tmp1\().4s, \tmp4\().s[0]
+.else
+ smull \tmp3\().2d, \tmp1\().2s, v0.s[0]
+ smull2 \tmp4\().2d, \tmp1\().4s, v0.s[0]
+.endif
+.ifb \tmp5
+ rshrn \out1\().2s, \tmp3\().2d, #14
+ rshrn2 \out1\().4s, \tmp4\().2d, #14
+ smull \tmp3\().2d, \tmp2\().2s, v0.s[0]
+ smull2 \tmp4\().2d, \tmp2\().4s, v0.s[0]
+ rshrn \out2\().2s, \tmp3\().2d, #14
+ rshrn2 \out2\().4s, \tmp4\().2d, #14
+.else
+ smull \tmp5\().2d, \tmp2\().2s, v0.s[0]
+ smull2 \tmp6\().2d, \tmp2\().4s, v0.s[0]
+ rshrn \out1\().2s, \tmp3\().2d, #14
+ rshrn2 \out1\().4s, \tmp4\().2d, #14
+ rshrn \out2\().2s, \tmp5\().2d, #14
+ rshrn2 \out2\().4s, \tmp6\().2d, #14
+.endif
+.endm
+
+// Same as dmbutterfly0 above, but treating the input in in2 as zero,
+// writing the same output into both out1 and out2.
+.macro dmbutterfly0_h out1, out2, in1, in2, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6
+ smull \tmp1\().2d, \in1\().2s, v0.s[0]
+ smull2 \tmp2\().2d, \in1\().4s, v0.s[0]
+ rshrn \out1\().2s, \tmp1\().2d, #14
+ rshrn2 \out1\().4s, \tmp2\().2d, #14
+ rshrn \out2\().2s, \tmp1\().2d, #14
+ rshrn2 \out2\().4s, \tmp2\().2d, #14
+.endm
+
+// out1,out2 = in1 * coef1 - in2 * coef2
+// out3,out4 = in1 * coef2 + in2 * coef1
+// out are 4 x .2d registers, in are 2 x .4s registers
+.macro dmbutterfly_l out1, out2, out3, out4, in1, in2, coef1, coef2
+ smull \out1\().2d, \in1\().2s, \coef1
+ smull2 \out2\().2d, \in1\().4s, \coef1
+ smull \out3\().2d, \in1\().2s, \coef2
+ smull2 \out4\().2d, \in1\().4s, \coef2
+ smlsl \out1\().2d, \in2\().2s, \coef2
+ smlsl2 \out2\().2d, \in2\().4s, \coef2
+ smlal \out3\().2d, \in2\().2s, \coef1
+ smlal2 \out4\().2d, \in2\().4s, \coef1
+.endm
+
+// inout1 = (inout1 * coef1 - inout2 * coef2 + (1 << 13)) >> 14
+// inout2 = (inout1 * coef2 + inout2 * coef1 + (1 << 13)) >> 14
+// inout are 2 x .4s registers
+.macro dmbutterfly inout1, inout2, coef1, coef2, tmp1, tmp2, tmp3, tmp4, neg=0
+ dmbutterfly_l \tmp1, \tmp2, \tmp3, \tmp4, \inout1, \inout2, \coef1, \coef2
+.if \neg > 0
+ neg \tmp3\().2d, \tmp3\().2d
+ neg \tmp4\().2d, \tmp4\().2d
+.endif
+ rshrn \inout1\().2s, \tmp1\().2d, #14
+ rshrn2 \inout1\().4s, \tmp2\().2d, #14
+ rshrn \inout2\().2s, \tmp3\().2d, #14
+ rshrn2 \inout2\().4s, \tmp4\().2d, #14
+.endm
+
+// Same as dmbutterfly above, but treating the input in inout2 as zero
+.macro dmbutterfly_h1 inout1, inout2, coef1, coef2, tmp1, tmp2, tmp3, tmp4
+ smull \tmp1\().2d, \inout1\().2s, \coef1
+ smull2 \tmp2\().2d, \inout1\().4s, \coef1
+ smull \tmp3\().2d, \inout1\().2s, \coef2
+ smull2 \tmp4\().2d, \inout1\().4s, \coef2
+ rshrn \inout1\().2s, \tmp1\().2d, #14
+ rshrn2 \inout1\().4s, \tmp2\().2d, #14
+ rshrn \inout2\().2s, \tmp3\().2d, #14
+ rshrn2 \inout2\().4s, \tmp4\().2d, #14
+.endm
+
+// Same as dmbutterfly above, but treating the input in inout1 as zero
+.macro dmbutterfly_h2 inout1, inout2, coef1, coef2, tmp1, tmp2, tmp3, tmp4
+ smull \tmp1\().2d, \inout2\().2s, \coef2
+ smull2 \tmp2\().2d, \inout2\().4s, \coef2
+ smull \tmp3\().2d, \inout2\().2s, \coef1
+ smull2 \tmp4\().2d, \inout2\().4s, \coef1
+ neg \tmp1\().2d, \tmp1\().2d
+ neg \tmp2\().2d, \tmp2\().2d
+ rshrn \inout2\().2s, \tmp3\().2d, #14
+ rshrn2 \inout2\().4s, \tmp4\().2d, #14
+ rshrn \inout1\().2s, \tmp1\().2d, #14
+ rshrn2 \inout1\().4s, \tmp2\().2d, #14
+.endm
+
+.macro dsmull_h out1, out2, in, coef
+ smull \out1\().2d, \in\().2s, \coef
+ smull2 \out2\().2d, \in\().4s, \coef
+.endm
+
+.macro drshrn_h out, in1, in2, shift
+ rshrn \out\().2s, \in1\().2d, \shift
+ rshrn2 \out\().4s, \in2\().2d, \shift
+.endm
+
+
+// out1 = in1 + in2
+// out2 = in1 - in2
+.macro butterfly_4s out1, out2, in1, in2
+ add \out1\().4s, \in1\().4s, \in2\().4s
+ sub \out2\().4s, \in1\().4s, \in2\().4s
+.endm
+
+// out1 = in1 - in2
+// out2 = in1 + in2
+.macro butterfly_4s_r out1, out2, in1, in2
+ sub \out1\().4s, \in1\().4s, \in2\().4s
+ add \out2\().4s, \in1\().4s, \in2\().4s
+.endm
+
+// out1 = (in1,in2 + in3,in4 + (1 << 13)) >> 14
+// out2 = (in1,in2 - in3,in4 + (1 << 13)) >> 14
+// out are 2 x .4s registers, in are 4 x .2d registers
+.macro dbutterfly_n out1, out2, in1, in2, in3, in4, tmp1, tmp2, tmp3, tmp4
+ add \tmp1\().2d, \in1\().2d, \in3\().2d
+ add \tmp2\().2d, \in2\().2d, \in4\().2d
+ sub \tmp3\().2d, \in1\().2d, \in3\().2d
+ sub \tmp4\().2d, \in2\().2d, \in4\().2d
+ rshrn \out1\().2s, \tmp1\().2d, #14
+ rshrn2 \out1\().4s, \tmp2\().2d, #14
+ rshrn \out2\().2s, \tmp3\().2d, #14
+ rshrn2 \out2\().4s, \tmp4\().2d, #14
+.endm
+
+.macro iwht4_10 c0, c1, c2, c3
+ add \c0\().4s, \c0\().4s, \c1\().4s
+ sub v17.4s, \c2\().4s, \c3\().4s
+ sub v16.4s, \c0\().4s, v17.4s
+ sshr v16.4s, v16.4s, #1
+ sub \c2\().4s, v16.4s, \c1\().4s
+ sub \c1\().4s, v16.4s, \c3\().4s
+ add \c3\().4s, v17.4s, \c2\().4s
+ sub \c0\().4s, \c0\().4s, \c1\().4s
+.endm
+
+.macro iwht4_12 c0, c1, c2, c3
+ iwht4_10 \c0, \c1, \c2, \c3
+.endm
+
+.macro idct4_10 c0, c1, c2, c3
+ mul v22.4s, \c1\().4s, v0.s[3]
+ mul v20.4s, \c1\().4s, v0.s[2]
+ add v16.4s, \c0\().4s, \c2\().4s
+ sub v17.4s, \c0\().4s, \c2\().4s
+ mla v22.4s, \c3\().4s, v0.s[2]
+ mul v18.4s, v16.4s, v0.s[0]
+ mul v24.4s, v17.4s, v0.s[0]
+ mls v20.4s, \c3\().4s, v0.s[3]
+ srshr v22.4s, v22.4s, #14
+ srshr v18.4s, v18.4s, #14
+ srshr v24.4s, v24.4s, #14
+ srshr v20.4s, v20.4s, #14
+ add \c0\().4s, v18.4s, v22.4s
+ sub \c3\().4s, v18.4s, v22.4s
+ add \c1\().4s, v24.4s, v20.4s
+ sub \c2\().4s, v24.4s, v20.4s
+.endm
+
+.macro idct4_12 c0, c1, c2, c3
+ smull v22.2d, \c1\().2s, v0.s[3]
+ smull2 v23.2d, \c1\().4s, v0.s[3]
+ smull v20.2d, \c1\().2s, v0.s[2]
+ smull2 v21.2d, \c1\().4s, v0.s[2]
+ add v16.4s, \c0\().4s, \c2\().4s
+ sub v17.4s, \c0\().4s, \c2\().4s
+ smlal v22.2d, \c3\().2s, v0.s[2]
+ smlal2 v23.2d, \c3\().4s, v0.s[2]
+ smull v18.2d, v16.2s, v0.s[0]
+ smull2 v19.2d, v16.4s, v0.s[0]
+ smull v24.2d, v17.2s, v0.s[0]
+ smull2 v25.2d, v17.4s, v0.s[0]
+ smlsl v20.2d, \c3\().2s, v0.s[3]
+ smlsl2 v21.2d, \c3\().4s, v0.s[3]
+ rshrn v22.2s, v22.2d, #14
+ rshrn2 v22.4s, v23.2d, #14
+ rshrn v18.2s, v18.2d, #14
+ rshrn2 v18.4s, v19.2d, #14
+ rshrn v24.2s, v24.2d, #14
+ rshrn2 v24.4s, v25.2d, #14
+ rshrn v20.2s, v20.2d, #14
+ rshrn2 v20.4s, v21.2d, #14
+ add \c0\().4s, v18.4s, v22.4s
+ sub \c3\().4s, v18.4s, v22.4s
+ add \c1\().4s, v24.4s, v20.4s
+ sub \c2\().4s, v24.4s, v20.4s
+.endm
+
+.macro iadst4_10 c0, c1, c2, c3
+ mul v16.4s, \c0\().4s, v1.s[0]
+ mla v16.4s, \c2\().4s, v1.s[1]
+ mla v16.4s, \c3\().4s, v1.s[2]
+ mul v18.4s, \c0\().4s, v1.s[2]
+ mls v18.4s, \c2\().4s, v1.s[0]
+ sub \c0\().4s, \c0\().4s, \c2\().4s
+ mls v18.4s, \c3\().4s, v1.s[1]
+ add \c0\().4s, \c0\().4s, \c3\().4s
+ mul v22.4s, \c1\().4s, v1.s[3]
+ mul v20.4s, \c0\().4s, v1.s[3]
+ add v24.4s, v16.4s, v22.4s
+ add v26.4s, v18.4s, v22.4s
+ srshr \c0\().4s, v24.4s, #14
+ add v16.4s, v16.4s, v18.4s
+ srshr \c1\().4s, v26.4s, #14
+ sub v16.4s, v16.4s, v22.4s
+ srshr \c2\().4s, v20.4s, #14
+ srshr \c3\().4s, v16.4s, #14
+.endm
+
+.macro iadst4_12 c0, c1, c2, c3
+ smull v16.2d, \c0\().2s, v1.s[0]
+ smull2 v17.2d, \c0\().4s, v1.s[0]
+ smlal v16.2d, \c2\().2s, v1.s[1]
+ smlal2 v17.2d, \c2\().4s, v1.s[1]
+ smlal v16.2d, \c3\().2s, v1.s[2]
+ smlal2 v17.2d, \c3\().4s, v1.s[2]
+ smull v18.2d, \c0\().2s, v1.s[2]
+ smull2 v19.2d, \c0\().4s, v1.s[2]
+ smlsl v18.2d, \c2\().2s, v1.s[0]
+ smlsl2 v19.2d, \c2\().4s, v1.s[0]
+ sub \c0\().4s, \c0\().4s, \c2\().4s
+ smlsl v18.2d, \c3\().2s, v1.s[1]
+ smlsl2 v19.2d, \c3\().4s, v1.s[1]
+ add \c0\().4s, \c0\().4s, \c3\().4s
+ smull v22.2d, \c1\().2s, v1.s[3]
+ smull2 v23.2d, \c1\().4s, v1.s[3]
+ smull v20.2d, \c0\().2s, v1.s[3]
+ smull2 v21.2d, \c0\().4s, v1.s[3]
+ add v24.2d, v16.2d, v22.2d
+ add v25.2d, v17.2d, v23.2d
+ add v26.2d, v18.2d, v22.2d
+ add v27.2d, v19.2d, v23.2d
+ rshrn \c0\().2s, v24.2d, #14
+ rshrn2 \c0\().4s, v25.2d, #14
+ add v16.2d, v16.2d, v18.2d
+ add v17.2d, v17.2d, v19.2d
+ rshrn \c1\().2s, v26.2d, #14
+ rshrn2 \c1\().4s, v27.2d, #14
+ sub v16.2d, v16.2d, v22.2d
+ sub v17.2d, v17.2d, v23.2d
+ rshrn \c2\().2s, v20.2d, #14
+ rshrn2 \c2\().4s, v21.2d, #14
+ rshrn \c3\().2s, v16.2d, #14
+ rshrn2 \c3\().4s, v17.2d, #14
+.endm
+
+// The public functions in this file have got the following signature:
+// void itxfm_add(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+
+.macro itxfm_func4x4 txfm1, txfm2, bpp
+function ff_vp9_\txfm1\()_\txfm2\()_4x4_add_\bpp\()_neon, export=1
+.ifc \txfm1,\txfm2
+.ifc \txfm1,idct
+ movrel x4, itxfm4_coeffs
+ ld1 {v0.4h}, [x4]
+ sxtl v0.4s, v0.4h
+.endif
+.ifc \txfm1,iadst
+ movrel x4, iadst4_coeffs
+ ld1 {v0.d}[1], [x4]
+ sxtl2 v1.4s, v0.8h
+.endif
+.else
+ movrel x4, itxfm4_coeffs
+ ld1 {v0.8h}, [x4]
+ sxtl2 v1.4s, v0.8h
+ sxtl v0.4s, v0.4h
+.endif
+
+ movi v30.4s, #0
+ movi v31.4s, #0
+.ifc \txfm1\()_\txfm2,idct_idct
+ cmp w3, #1
+ b.ne 1f
+ // DC-only for idct/idct
+ ld1 {v2.s}[0], [x2]
+ smull v2.2d, v2.2s, v0.s[0]
+ rshrn v2.2s, v2.2d, #14
+ smull v2.2d, v2.2s, v0.s[0]
+ rshrn v2.2s, v2.2d, #14
+ st1 {v31.s}[0], [x2]
+ dup v4.4s, v2.s[0]
+ mov v5.16b, v4.16b
+ mov v6.16b, v4.16b
+ mov v7.16b, v4.16b
+ b 2f
+.endif
+
+1:
+ ld1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x2]
+ st1 {v30.4s,v31.4s}, [x2], #32
+
+.ifc \txfm1,iwht
+ sshr v4.4s, v4.4s, #2
+ sshr v5.4s, v5.4s, #2
+ sshr v6.4s, v6.4s, #2
+ sshr v7.4s, v7.4s, #2
+.endif
+
+ \txfm1\()4_\bpp v4, v5, v6, v7
+
+ st1 {v30.4s,v31.4s}, [x2], #32
+ // Transpose 4x4 with 32 bit elements
+ transpose_4x4s v4, v5, v6, v7, v16, v17, v18, v19
+
+ \txfm2\()4_\bpp v4, v5, v6, v7
+2:
+ mvni v31.8h, #((0xff << (\bpp - 8)) & 0xff), lsl #8
+ ld1 {v0.4h}, [x0], x1
+ ld1 {v1.4h}, [x0], x1
+.ifnc \txfm1,iwht
+ srshr v4.4s, v4.4s, #4
+ srshr v5.4s, v5.4s, #4
+ srshr v6.4s, v6.4s, #4
+ srshr v7.4s, v7.4s, #4
+.endif
+ uaddw v4.4s, v4.4s, v0.4h
+ uaddw v5.4s, v5.4s, v1.4h
+ ld1 {v2.4h}, [x0], x1
+ ld1 {v3.4h}, [x0], x1
+ sqxtun v0.4h, v4.4s
+ sqxtun2 v0.8h, v5.4s
+ sub x0, x0, x1, lsl #2
+
+ uaddw v6.4s, v6.4s, v2.4h
+ umin v0.8h, v0.8h, v31.8h
+ uaddw v7.4s, v7.4s, v3.4h
+ st1 {v0.4h}, [x0], x1
+ sqxtun v2.4h, v6.4s
+ sqxtun2 v2.8h, v7.4s
+ umin v2.8h, v2.8h, v31.8h
+
+ st1 {v0.d}[1], [x0], x1
+ st1 {v2.4h}, [x0], x1
+ st1 {v2.d}[1], [x0], x1
+
+ ret
+endfunc
+.endm
+
+.macro itxfm_funcs4x4 bpp
+itxfm_func4x4 idct, idct, \bpp
+itxfm_func4x4 iadst, idct, \bpp
+itxfm_func4x4 idct, iadst, \bpp
+itxfm_func4x4 iadst, iadst, \bpp
+itxfm_func4x4 iwht, iwht, \bpp
+.endm
+
+itxfm_funcs4x4 10
+itxfm_funcs4x4 12
+
+function idct8x8_dc_add_neon
+ movrel x4, idct_coeffs
+ ld1 {v0.4h}, [x4]
+
+ movi v1.4h, #0
+ sxtl v0.4s, v0.4h
+
+ ld1 {v2.s}[0], [x2]
+ smull v2.2d, v2.2s, v0.s[0]
+ rshrn v2.2s, v2.2d, #14
+ smull v2.2d, v2.2s, v0.s[0]
+ rshrn v2.2s, v2.2d, #14
+ st1 {v1.s}[0], [x2]
+ dup v2.4s, v2.s[0]
+
+ srshr v2.4s, v2.4s, #5
+
+ mov x4, #8
+ mov x3, x0
+ dup v31.8h, w5
+1:
+ // Loop to add the constant from v2 into all 8x8 outputs
+ subs x4, x4, #2
+ ld1 {v3.8h}, [x0], x1
+ ld1 {v4.8h}, [x0], x1
+ uaddw v16.4s, v2.4s, v3.4h
+ uaddw2 v17.4s, v2.4s, v3.8h
+ uaddw v18.4s, v2.4s, v4.4h
+ uaddw2 v19.4s, v2.4s, v4.8h
+ sqxtun v3.4h, v16.4s
+ sqxtun2 v3.8h, v17.4s
+ sqxtun v4.4h, v18.4s
+ sqxtun2 v4.8h, v19.4s
+ umin v3.8h, v3.8h, v31.8h
+ umin v4.8h, v4.8h, v31.8h
+ st1 {v3.8h}, [x3], x1
+ st1 {v4.8h}, [x3], x1
+ b.ne 1b
+
+ ret
+endfunc
+
+.macro idct8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1, t2, t3, t4, t5
+ dmbutterfly0 \r0, \r4, \r0, \r4, \t0, \t1, \t2, \t3, \t4, \t5 // r0 = t0a, r4 = t1a
+ dmbutterfly \r2, \r6, v0.s[2], v0.s[3], \t0, \t1, \t2, \t3 // r2 = t2a, r6 = t3a
+ dmbutterfly \r1, \r7, v1.s[0], v1.s[1], \t0, \t1, \t2, \t3 // r1 = t4a, r7 = t7a
+ dmbutterfly \r5, \r3, v1.s[2], v1.s[3], \t0, \t1, \t2, \t3 // r5 = t5a, r3 = t6a
+
+ butterfly_4s \t0, \t1, \r0, \r6 // t0 = t0, t1 = t3
+ butterfly_4s \t2, \r5, \r1, \r5 // t2 = t4, r5 = t5a
+ butterfly_4s \t3, \r6, \r7, \r3 // t3 = t7, r6 = t6a
+ butterfly_4s \r7, \r4, \r4, \r2 // r7 = t1, r4 = t2
+
+ dmbutterfly0 \r6, \r5, \r6, \r5, \r0, \r1, \r2, \r3, \t4, \t5 // r6 = t6, r5 = t5
+
+ butterfly_4s \r1, \r6, \r7, \r6 // r1 = out[1], r6 = out[6]
+ butterfly_4s \r0, \r7, \t0, \t3 // r0 = out[0], r7 = out[7]
+ butterfly_4s \r2, \r5, \r4, \r5 // r2 = out[2], r5 = out[5]
+ butterfly_4s \r3, \r4, \t1, \t2 // r3 = out[3], r4 = out[4]
+.endm
+
+.macro iadst8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1, t2, t3, t4, t5
+ dmbutterfly_l \t2, \t3, \t0, \t1, \r7, \r0, v2.s[1], v2.s[0] // t2,t3 = t1a, t0,t1 = t0a
+ dmbutterfly_l \r0, \r7, \t4, \t5, \r3, \r4, v3.s[1], v3.s[0] // r0,r7 = t5a, t4,t5 = t4a
+
+ dbutterfly_n \r3, \t0, \t0, \t1, \t4, \t5, \r3, \r4, \t0, \t1 // r3 = t0, t0 = t4
+ dbutterfly_n \r4, \t1, \t2, \t3, \r0, \r7, \r4, \t1, \t4, \t5 // r4 = t1, t1 = t5
+
+ dmbutterfly_l \t4, \t5, \t2, \t3, \r5, \r2, v2.s[3], v2.s[2] // t4,t5 = t3a, t2,t3 = t2a
+ dmbutterfly_l \r2, \r5, \r0, \r7, \r1, \r6, v3.s[3], v3.s[2] // r2,r5 = t7a, r0,r7 = t6a
+
+ dbutterfly_n \r1, \t2, \t2, \t3, \r0, \r7, \r1, \r6, \t2, \t3 // r1 = t2, t2 = t6
+ dbutterfly_n \r0, \t4, \t4, \t5, \r2, \r5, \r0, \r7, \t4, \t5 // r0 = t3, t4 = t7
+
+ butterfly_4s \r7, \r4, \r4, \r0 // r7 = -out[7], r4 = t3
+ neg \r7\().4s, \r7\().4s // r7 = out[7]
+ butterfly_4s \r0, \r1, \r3, \r1 // r0 = out[0], r1 = t2
+
+ dmbutterfly_l \r2, \r3, \t3, \t5, \t0, \t1, v0.s[2], v0.s[3] // r2,r3 = t5a, t3,t5 = t4a
+ dmbutterfly_l \t0, \t1, \r5, \r6, \t4, \t2, v0.s[3], v0.s[2] // t0,t1 = t6a, r5,r6 = t7a
+
+ dbutterfly_n \r6, \t2, \r2, \r3, \r5, \r6, \t2, \t4, \r2, \r3 // r6 = out[6], t2 = t7
+
+ dmbutterfly0 \r3, \r4, \r1, \r4, \t4, \r5, \r1, \r2 // r3 = -out[3], r4 = out[4]
+ neg \r3\().4s, \r3\().4s // r3 = out[3]
+
+ dbutterfly_n \r1, \t0, \t3, \t5, \t0, \t1, \r1, \r2, \t0, \t1 // r1 = -out[1], t0 = t6
+ neg \r1\().4s, \r1\().4s // r1 = out[1]
+
+ dmbutterfly0 \r2, \r5, \t0, \t2, \t1, \t3, \t4, \t5 // r2 = out[2], r5 = -out[5]
+ neg \r5\().4s, \r5\().4s // r5 = out[5]
+.endm
+
+
+.macro itxfm_func8x8 txfm1, txfm2
+function vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
+.ifc \txfm1\()_\txfm2,idct_idct
+ cmp w3, #1
+ b.eq idct8x8_dc_add_neon
+.endif
+ // The iadst also uses a few coefficients from
+ // idct, so those always need to be loaded.
+.ifc \txfm1\()_\txfm2,idct_idct
+ movrel x4, idct_coeffs
+.else
+ movrel x4, iadst8_coeffs
+ ld1 {v1.8h}, [x4], #16
+ stp d8, d9, [sp, #-0x10]!
+ sxtl2 v3.4s, v1.8h
+ sxtl v2.4s, v1.4h
+.endif
+ ld1 {v0.8h}, [x4]
+ sxtl2 v1.4s, v0.8h
+ sxtl v0.4s, v0.4h
+
+ movi v4.4s, #0
+ movi v5.4s, #0
+ movi v6.4s, #0
+ movi v7.4s, #0
+
+1:
+ ld1 {v16.4s,v17.4s,v18.4s,v19.4s}, [x2], #64
+ ld1 {v20.4s,v21.4s,v22.4s,v23.4s}, [x2], #64
+ ld1 {v24.4s,v25.4s,v26.4s,v27.4s}, [x2], #64
+ ld1 {v28.4s,v29.4s,v30.4s,v31.4s}, [x2], #64
+ sub x2, x2, #256
+ st1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x2], #64
+ st1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x2], #64
+ st1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x2], #64
+ st1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x2], #64
+
+.ifc \txfm1\()_\txfm2,idct_idct
+ idct8 v16, v18, v20, v22, v24, v26, v28, v30, v2, v3, v4, v5, v6, v7
+ idct8 v17, v19, v21, v23, v25, v27, v29, v31, v2, v3, v4, v5, v6, v7
+.else
+ \txfm1\()8 v16, v18, v20, v22, v24, v26, v28, v30, v4, v5, v6, v7, v8, v9
+ \txfm1\()8 v17, v19, v21, v23, v25, v27, v29, v31, v4, v5, v6, v7, v8, v9
+.endif
+
+ // Transpose 8x8 with 16 bit elements
+ transpose_8x8s v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v4, v5, v6, v7
+
+.ifc \txfm1\()_\txfm2,idct_idct
+ idct8 v16, v18, v20, v22, v24, v26, v28, v30, v2, v3, v4, v5, v6, v7
+ idct8 v17, v19, v21, v23, v25, v27, v29, v31, v2, v3, v4, v5, v6, v7
+.else
+ \txfm2\()8 v16, v18, v20, v22, v24, v26, v28, v30, v4, v5, v6, v7, v8, v9
+ \txfm2\()8 v17, v19, v21, v23, v25, v27, v29, v31, v4, v5, v6, v7, v8, v9
+.endif
+2:
+ mov x3, x0
+ // Add into the destination
+ ld1 {v0.8h}, [x0], x1
+ srshr v16.4s, v16.4s, #5
+ srshr v17.4s, v17.4s, #5
+ ld1 {v1.8h}, [x0], x1
+ srshr v18.4s, v18.4s, #5
+ srshr v19.4s, v19.4s, #5
+ ld1 {v2.8h}, [x0], x1
+ srshr v20.4s, v20.4s, #5
+ srshr v21.4s, v21.4s, #5
+ uaddw v16.4s, v16.4s, v0.4h
+ uaddw2 v17.4s, v17.4s, v0.8h
+ ld1 {v3.8h}, [x0], x1
+ srshr v22.4s, v22.4s, #5
+ srshr v23.4s, v23.4s, #5
+ uaddw v18.4s, v18.4s, v1.4h
+ uaddw2 v19.4s, v19.4s, v1.8h
+ ld1 {v4.8h}, [x0], x1
+ srshr v24.4s, v24.4s, #5
+ srshr v25.4s, v25.4s, #5
+ uaddw v20.4s, v20.4s, v2.4h
+ uaddw2 v21.4s, v21.4s, v2.8h
+ sqxtun v0.4h, v16.4s
+ sqxtun2 v0.8h, v17.4s
+ dup v16.8h, w5
+ ld1 {v5.8h}, [x0], x1
+ srshr v26.4s, v26.4s, #5
+ srshr v27.4s, v27.4s, #5
+ uaddw v22.4s, v22.4s, v3.4h
+ uaddw2 v23.4s, v23.4s, v3.8h
+ sqxtun v1.4h, v18.4s
+ sqxtun2 v1.8h, v19.4s
+ umin v0.8h, v0.8h, v16.8h
+ ld1 {v6.8h}, [x0], x1
+ srshr v28.4s, v28.4s, #5
+ srshr v29.4s, v29.4s, #5
+ uaddw v24.4s, v24.4s, v4.4h
+ uaddw2 v25.4s, v25.4s, v4.8h
+ sqxtun v2.4h, v20.4s
+ sqxtun2 v2.8h, v21.4s
+ umin v1.8h, v1.8h, v16.8h
+ ld1 {v7.8h}, [x0], x1
+ srshr v30.4s, v30.4s, #5
+ srshr v31.4s, v31.4s, #5
+ uaddw v26.4s, v26.4s, v5.4h
+ uaddw2 v27.4s, v27.4s, v5.8h
+ sqxtun v3.4h, v22.4s
+ sqxtun2 v3.8h, v23.4s
+ umin v2.8h, v2.8h, v16.8h
+
+ st1 {v0.8h}, [x3], x1
+ uaddw v28.4s, v28.4s, v6.4h
+ uaddw2 v29.4s, v29.4s, v6.8h
+ st1 {v1.8h}, [x3], x1
+ sqxtun v4.4h, v24.4s
+ sqxtun2 v4.8h, v25.4s
+ umin v3.8h, v3.8h, v16.8h
+ st1 {v2.8h}, [x3], x1
+ uaddw v30.4s, v30.4s, v7.4h
+ uaddw2 v31.4s, v31.4s, v7.8h
+ st1 {v3.8h}, [x3], x1
+ sqxtun v5.4h, v26.4s
+ sqxtun2 v5.8h, v27.4s
+ umin v4.8h, v4.8h, v16.8h
+ st1 {v4.8h}, [x3], x1
+ sqxtun v6.4h, v28.4s
+ sqxtun2 v6.8h, v29.4s
+ umin v5.8h, v5.8h, v16.8h
+ st1 {v5.8h}, [x3], x1
+ sqxtun v7.4h, v30.4s
+ sqxtun2 v7.8h, v31.4s
+ umin v6.8h, v6.8h, v16.8h
+
+ st1 {v6.8h}, [x3], x1
+ umin v7.8h, v7.8h, v16.8h
+ st1 {v7.8h}, [x3], x1
+
+.ifnc \txfm1\()_\txfm2,idct_idct
+ ldp d8, d9, [sp], 0x10
+.endif
+ ret
+endfunc
+
+function ff_vp9_\txfm1\()_\txfm2\()_8x8_add_10_neon, export=1
+ mov x5, #0x03ff
+ b vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
+endfunc
+
+function ff_vp9_\txfm1\()_\txfm2\()_8x8_add_12_neon, export=1
+ mov x5, #0x0fff
+ b vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
+endfunc
+.endm
+
+itxfm_func8x8 idct, idct
+itxfm_func8x8 iadst, idct
+itxfm_func8x8 idct, iadst
+itxfm_func8x8 iadst, iadst
+
+
+function idct16x16_dc_add_neon
+ movrel x4, idct_coeffs
+ ld1 {v0.4h}, [x4]
+ sxtl v0.4s, v0.4h
+
+ movi v1.4h, #0
+
+ ld1 {v2.s}[0], [x2]
+ smull v2.2d, v2.2s, v0.s[0]
+ rshrn v2.2s, v2.2d, #14
+ smull v2.2d, v2.2s, v0.s[0]
+ rshrn v2.2s, v2.2d, #14
+ st1 {v1.s}[0], [x2]
+ dup v2.4s, v2.s[0]
+
+ srshr v0.4s, v2.4s, #6
+
+ mov x3, x0
+ mov x4, #16
+ dup v31.8h, w13
+1:
+ // Loop to add the constant from v2 into all 16x16 outputs
+ subs x4, x4, #2
+ ld1 {v1.8h,v2.8h}, [x0], x1
+ uaddw v16.4s, v0.4s, v1.4h
+ uaddw2 v17.4s, v0.4s, v1.8h
+ ld1 {v3.8h,v4.8h}, [x0], x1
+ uaddw v18.4s, v0.4s, v2.4h
+ uaddw2 v19.4s, v0.4s, v2.8h
+ uaddw v20.4s, v0.4s, v3.4h
+ uaddw2 v21.4s, v0.4s, v3.8h
+ uaddw v22.4s, v0.4s, v4.4h
+ uaddw2 v23.4s, v0.4s, v4.8h
+ sqxtun v1.4h, v16.4s
+ sqxtun2 v1.8h, v17.4s
+ sqxtun v2.4h, v18.4s
+ sqxtun2 v2.8h, v19.4s
+ sqxtun v3.4h, v20.4s
+ sqxtun2 v3.8h, v21.4s
+ sqxtun v4.4h, v22.4s
+ sqxtun2 v4.8h, v23.4s
+ umin v1.8h, v1.8h, v31.8h
+ umin v2.8h, v2.8h, v31.8h
+ st1 {v1.8h,v2.8h}, [x3], x1
+ umin v3.8h, v3.8h, v31.8h
+ umin v4.8h, v4.8h, v31.8h
+ st1 {v3.8h,v4.8h}, [x3], x1
+ b.ne 1b
+
+ ret
+endfunc
+
+.macro idct16_end
+ butterfly_4s v18, v7, v4, v7 // v18 = t0a, v7 = t7a
+ butterfly_4s v19, v22, v5, v22 // v19 = t1a, v22 = t6
+ butterfly_4s v4, v26, v20, v26 // v4 = t2a, v26 = t5
+ butterfly_4s v5, v6, v28, v6 // v5 = t3a, v6 = t4
+ butterfly_4s v20, v28, v16, v24 // v20 = t8a, v28 = t11a
+ butterfly_4s v24, v21, v23, v21 // v24 = t9, v21 = t10
+ butterfly_4s v23, v27, v25, v27 // v23 = t14, v27 = t13
+ butterfly_4s v25, v29, v29, v17 // v25 = t15a, v29 = t12a
+
+ dmbutterfly0 v8, v9, v27, v21, v8, v9, v16, v17, v30, v31 // v8 = t13a, v9 = t10a
+ dmbutterfly0 v28, v27, v29, v28, v21, v29, v16, v17, v30, v31 // v28 = t12, v27 = t11
+
+ butterfly_4s v16, v31, v18, v25 // v16 = out[0], v31 = out[15]
+ butterfly_4s v17, v30, v19, v23 // v17 = out[1], v30 = out[14]
+ butterfly_4s_r v25, v22, v22, v24 // v25 = out[9], v22 = out[6]
+ butterfly_4s v23, v24, v7, v20 // v23 = out[7], v24 = out[8]
+ butterfly_4s v18, v29, v4, v8 // v18 = out[2], v29 = out[13]
+ butterfly_4s v19, v28, v5, v28 // v19 = out[3], v28 = out[12]
+ butterfly_4s v20, v27, v6, v27 // v20 = out[4], v27 = out[11]
+ butterfly_4s v21, v26, v26, v9 // v21 = out[5], v26 = out[10]
+ ret
+.endm
+
+function idct16
+ dmbutterfly0 v16, v24, v16, v24, v4, v5, v6, v7, v8, v9 // v16 = t0a, v24 = t1a
+ dmbutterfly v20, v28, v0.s[2], v0.s[3], v4, v5, v6, v7 // v20 = t2a, v28 = t3a
+ dmbutterfly v18, v30, v1.s[0], v1.s[1], v4, v5, v6, v7 // v18 = t4a, v30 = t7a
+ dmbutterfly v26, v22, v1.s[2], v1.s[3], v4, v5, v6, v7 // v26 = t5a, v22 = t6a
+ dmbutterfly v17, v31, v2.s[0], v2.s[1], v4, v5, v6, v7 // v17 = t8a, v31 = t15a
+ dmbutterfly v25, v23, v2.s[2], v2.s[3], v4, v5, v6, v7 // v25 = t9a, v23 = t14a
+ dmbutterfly v21, v27, v3.s[0], v3.s[1], v4, v5, v6, v7 // v21 = t10a, v27 = t13a
+ dmbutterfly v29, v19, v3.s[2], v3.s[3], v4, v5, v6, v7 // v29 = t11a, v19 = t12a
+
+ butterfly_4s v4, v28, v16, v28 // v4 = t0, v28 = t3
+ butterfly_4s v5, v20, v24, v20 // v5 = t1, v20 = t2
+ butterfly_4s v6, v26, v18, v26 // v6 = t4, v26 = t5
+ butterfly_4s v7, v22, v30, v22 // v7 = t7, v22 = t6
+ butterfly_4s v16, v25, v17, v25 // v16 = t8, v25 = t9
+ butterfly_4s v24, v21, v29, v21 // v24 = t11, v21 = t10
+ butterfly_4s v17, v27, v19, v27 // v17 = t12, v27 = t13
+ butterfly_4s v29, v23, v31, v23 // v29 = t15, v23 = t14
+
+ dmbutterfly0 v22, v26, v22, v26, v8, v9, v18, v19, v30, v31 // v22 = t6a, v26 = t5a
+ dmbutterfly v23, v25, v0.s[2], v0.s[3], v18, v19, v30, v31 // v23 = t9a, v25 = t14a
+ dmbutterfly v27, v21, v0.s[2], v0.s[3], v18, v19, v30, v31, neg=1 // v27 = t13a, v21 = t10a
+ idct16_end
+endfunc
+
+function idct16_half
+ dmbutterfly0_h v16, v24, v16, v24, v4, v5, v6, v7, v8, v9 // v16 = t0a, v24 = t1a
+ dmbutterfly_h1 v20, v28, v0.s[2], v0.s[3], v4, v5, v6, v7 // v20 = t2a, v28 = t3a
+ dmbutterfly_h1 v18, v30, v1.s[0], v1.s[1], v4, v5, v6, v7 // v18 = t4a, v30 = t7a
+ dmbutterfly_h2 v26, v22, v1.s[2], v1.s[3], v4, v5, v6, v7 // v26 = t5a, v22 = t6a
+ dmbutterfly_h1 v17, v31, v2.s[0], v2.s[1], v4, v5, v6, v7 // v17 = t8a, v31 = t15a
+ dmbutterfly_h2 v25, v23, v2.s[2], v2.s[3], v4, v5, v6, v7 // v25 = t9a, v23 = t14a
+ dmbutterfly_h1 v21, v27, v3.s[0], v3.s[1], v4, v5, v6, v7 // v21 = t10a, v27 = t13a
+ dmbutterfly_h2 v29, v19, v3.s[2], v3.s[3], v4, v5, v6, v7 // v29 = t11a, v19 = t12a
+
+ butterfly_4s v4, v28, v16, v28 // v4 = t0, v28 = t3
+ butterfly_4s v5, v20, v24, v20 // v5 = t1, v20 = t2
+ butterfly_4s v6, v26, v18, v26 // v6 = t4, v26 = t5
+ butterfly_4s v7, v22, v30, v22 // v7 = t7, v22 = t6
+ butterfly_4s v16, v25, v17, v25 // v16 = t8, v25 = t9
+ butterfly_4s v24, v21, v29, v21 // v24 = t11, v21 = t10
+ butterfly_4s v17, v27, v19, v27 // v17 = t12, v27 = t13
+ butterfly_4s v29, v23, v31, v23 // v29 = t15, v23 = t14
+
+ dmbutterfly0 v22, v26, v22, v26, v8, v9, v18, v19, v30, v31 // v22 = t6a, v26 = t5a
+ dmbutterfly v23, v25, v0.s[2], v0.s[3], v18, v19, v30, v31 // v23 = t9a, v25 = t14a
+ dmbutterfly v27, v21, v0.s[2], v0.s[3], v18, v19, v30, v31, neg=1 // v27 = t13a, v21 = t10a
+ idct16_end
+endfunc
+
+function idct16_quarter
+ dsmull_h v24, v25, v19, v3.s[3]
+ dsmull_h v4, v5, v17, v2.s[0]
+ dsmull_h v7, v6, v18, v1.s[1]
+ dsmull_h v30, v31, v18, v1.s[0]
+ neg v24.2d, v24.2d
+ neg v25.2d, v25.2d
+ dsmull_h v29, v28, v17, v2.s[1]
+ dsmull_h v26, v27, v19, v3.s[2]
+ dsmull_h v22, v23, v16, v0.s[0]
+ drshrn_h v24, v24, v25, #14
+ drshrn_h v16, v4, v5, #14
+ drshrn_h v7, v7, v6, #14
+ drshrn_h v6, v30, v31, #14
+ drshrn_h v29, v29, v28, #14
+ drshrn_h v17, v26, v27, #14
+ drshrn_h v28, v22, v23, #14
+
+ dmbutterfly_l v20, v21, v22, v23, v17, v24, v0.s[2], v0.s[3]
+ dmbutterfly_l v18, v19, v30, v31, v29, v16, v0.s[2], v0.s[3]
+ neg v22.2d, v22.2d
+ neg v23.2d, v23.2d
+ drshrn_h v27, v20, v21, #14
+ drshrn_h v21, v22, v23, #14
+ drshrn_h v23, v18, v19, #14
+ drshrn_h v25, v30, v31, #14
+ mov v4.16b, v28.16b
+ mov v5.16b, v28.16b
+ dmbutterfly0 v22, v26, v7, v6, v18, v19, v30, v31
+ mov v20.16b, v28.16b
+ idct16_end
+endfunc
+
+function iadst16
+ ld1 {v0.8h,v1.8h}, [x11]
+ sxtl v2.4s, v1.4h
+ sxtl2 v3.4s, v1.8h
+ sxtl2 v1.4s, v0.8h
+ sxtl v0.4s, v0.4h
+
+ dmbutterfly_l v6, v7, v4, v5, v31, v16, v0.s[1], v0.s[0] // v6,v7 = t1, v4,v5 = t0
+ dmbutterfly_l v10, v11, v8, v9, v23, v24, v1.s[1], v1.s[0] // v10,v11 = t9, v8,v9 = t8
+ dbutterfly_n v31, v24, v6, v7, v10, v11, v12, v13, v10, v11 // v31 = t1a, v24 = t9a
+ dmbutterfly_l v14, v15, v12, v13, v29, v18, v0.s[3], v0.s[2] // v14,v15 = t3, v12,v13 = t2
+ dbutterfly_n v16, v23, v4, v5, v8, v9, v6, v7, v8, v9 // v16 = t0a, v23 = t8a
+
+ dmbutterfly_l v6, v7, v4, v5, v21, v26, v1.s[3], v1.s[2] // v6,v7 = t11, v4,v5 = t10
+ dbutterfly_n v29, v26, v14, v15, v6, v7, v8, v9, v6, v7 // v29 = t3a, v26 = t11a
+ dmbutterfly_l v10, v11, v8, v9, v27, v20, v2.s[1], v2.s[0] // v10,v11 = t5, v8,v9 = t4
+ dbutterfly_n v18, v21, v12, v13, v4, v5, v6, v7, v4, v5 // v18 = t2a, v21 = t10a
+
+ dmbutterfly_l v14, v15, v12, v13, v19, v28, v3.s[1], v3.s[0] // v14,v15 = t13, v12,v13 = t12
+ dbutterfly_n v20, v28, v10, v11, v14, v15, v4, v5, v14, v15 // v20 = t5a, v28 = t13a
+ dmbutterfly_l v6, v7, v4, v5, v25, v22, v2.s[3], v2.s[2] // v6,v7 = t7, v4,v5 = t6
+ dbutterfly_n v27, v19, v8, v9, v12, v13, v10, v11, v12, v13 // v27 = t4a, v19 = t12a
+
+ dmbutterfly_l v10, v11, v8, v9, v17, v30, v3.s[3], v3.s[2] // v10,v11 = t15, v8,v9 = t14
+ ld1 {v0.8h}, [x10]
+ dbutterfly_n v22, v30, v6, v7, v10, v11, v12, v13, v10, v11 // v22 = t7a, v30 = t15a
+ sxtl2 v1.4s, v0.8h
+ sxtl v0.4s, v0.4h
+ dmbutterfly_l v14, v15, v12, v13, v23, v24, v1.s[0], v1.s[1] // v14,v15 = t9, v12,v13 = t8
+ dbutterfly_n v25, v17, v4, v5, v8, v9, v6, v7, v8, v9 // v25 = t6a, v17 = t14a
+
+ dmbutterfly_l v4, v5, v6, v7, v28, v19, v1.s[1], v1.s[0] // v4,v5 = t12, v6,v7 = t13
+ dbutterfly_n v23, v19, v12, v13, v4, v5, v8, v9, v4, v5 // v23 = t8a, v19 = t12a
+ dmbutterfly_l v10, v11, v8, v9, v21, v26, v1.s[2], v1.s[3] // v10,v11 = t11, v8,v9 = t10
+ butterfly_4s_r v4, v27, v16, v27 // v4 = t4, v27 = t0
+ dbutterfly_n v24, v28, v14, v15, v6, v7, v12, v13, v6, v7 // v24 = t9a, v28 = t13a
+
+ dmbutterfly_l v12, v13, v14, v15, v30, v17, v1.s[3], v1.s[2] // v12,v13 = t14, v14,v15 = t15
+ butterfly_4s_r v5, v20, v31, v20 // v5 = t5, v20 = t1
+ dbutterfly_n v21, v17, v8, v9, v12, v13, v6, v7, v12, v13 // v21 = t10a, v17 = t14a
+ dbutterfly_n v26, v30, v10, v11, v14, v15, v8, v9, v14, v15 // v26 = t11a, v30 = t15a
+
+ butterfly_4s_r v6, v25, v18, v25 // v6 = t6, v25 = t2
+ butterfly_4s_r v7, v22, v29, v22 // v7 = t7, v22 = t3
+
+ dmbutterfly_l v10, v11, v8, v9, v19, v28, v0.s[2], v0.s[3] // v10,v11 = t13, v8,v9 = t12
+ dmbutterfly_l v12, v13, v14, v15, v30, v17, v0.s[3], v0.s[2] // v12,v13 = t14, v14,v15 = t15
+
+ dbutterfly_n v18, v30, v8, v9, v12, v13, v16, v17, v12, v13 // v18 = out[2], v30 = t14a
+ dbutterfly_n v29, v17, v10, v11, v14, v15, v12, v13, v14, v15 // v29 = -out[13], v17 = t15a
+ neg v29.4s, v29.4s // v29 = out[13]
+
+ dmbutterfly_l v10, v11, v8, v9, v4, v5, v0.s[2], v0.s[3] // v10,v11 = t5a, v8,v9 = t4a
+ dmbutterfly_l v12, v13, v14, v15, v7, v6, v0.s[3], v0.s[2] // v12,v13 = t6a, v14,v15 = t7a
+
+ butterfly_4s v2, v6, v27, v25 // v2 = out[0], v6 = t2a
+ butterfly_4s v3, v7, v23, v21 // v3 =-out[1], v7 = t10
+
+ dbutterfly_n v19, v31, v8, v9, v12, v13, v4, v5, v8, v9 // v19 = -out[3], v31 = t6
+ neg v19.4s, v19.4s // v19 = out[3]
+ dbutterfly_n v28, v16, v10, v11, v14, v15, v4, v5, v10, v11 // v28 = out[12], v16 = t7
+
+ butterfly_4s v5, v8, v20, v22 // v5 =-out[15],v8 = t3a
+ butterfly_4s v4, v9, v24, v26 // v4 = out[14],v9 = t11
+
+ dmbutterfly0 v23, v24, v6, v8, v10, v11, v12, v13, v14, v15, 1 // v23 = out[7], v24 = out[8]
+ dmbutterfly0 v21, v26, v30, v17, v10, v11, v12, v13, v14, v15, 1 // v21 = out[5], v26 = out[10]
+ dmbutterfly0 v20, v27, v16, v31, v10, v11, v12, v13, v14, v15 // v20 = out[4], v27 = out[11]
+ dmbutterfly0 v22, v25, v9, v7, v10, v11, v12, v13, v14, v15 // v22 = out[6], v25 = out[9]
+
+ neg v31.4s, v5.4s // v31 = out[15]
+ neg v17.4s, v3.4s // v17 = out[1]
+
+ mov v16.16b, v2.16b
+ mov v30.16b, v4.16b
+ ret
+endfunc
+
+// Helper macros; we can't use these expressions directly within
+// e.g. .irp due to the extra concatenation \(). Therefore wrap
+// them in macros to allow using .irp below.
+.macro load i, src, inc
+ ld1 {v\i\().4s}, [\src], \inc
+.endm
+.macro store i, dst, inc
+ st1 {v\i\().4s}, [\dst], \inc
+.endm
+.macro movi_v i, size, imm
+ movi v\i\()\size, \imm
+.endm
+.macro load_clear i, src, inc
+ ld1 {v\i\().4s}, [\src]
+ st1 {v4.4s}, [\src], \inc
+.endm
+
+.macro load_add_store coef0, coef1, coef2, coef3, coef4, coef5, coef6, coef7
+ srshr \coef0, \coef0, #6
+ ld1 {v4.4h}, [x0], x1
+ srshr \coef1, \coef1, #6
+ ld1 {v4.d}[1], [x3], x1
+ srshr \coef2, \coef2, #6
+ ld1 {v5.4h}, [x0], x1
+ srshr \coef3, \coef3, #6
+ uaddw \coef0, \coef0, v4.4h
+ ld1 {v5.d}[1], [x3], x1
+ srshr \coef4, \coef4, #6
+ uaddw2 \coef1, \coef1, v4.8h
+ ld1 {v6.4h}, [x0], x1
+ srshr \coef5, \coef5, #6
+ uaddw \coef2, \coef2, v5.4h
+ ld1 {v6.d}[1], [x3], x1
+ sqxtun v4.4h, \coef0
+ srshr \coef6, \coef6, #6
+ uaddw2 \coef3, \coef3, v5.8h
+ ld1 {v7.4h}, [x0], x1
+ sqxtun2 v4.8h, \coef1
+ srshr \coef7, \coef7, #6
+ uaddw \coef4, \coef4, v6.4h
+ ld1 {v7.d}[1], [x3], x1
+ umin v4.8h, v4.8h, v8.8h
+ sub x0, x0, x1, lsl #2
+ sub x3, x3, x1, lsl #2
+ sqxtun v5.4h, \coef2
+ uaddw2 \coef5, \coef5, v6.8h
+ st1 {v4.4h}, [x0], x1
+ sqxtun2 v5.8h, \coef3
+ uaddw \coef6, \coef6, v7.4h
+ st1 {v4.d}[1], [x3], x1
+ umin v5.8h, v5.8h, v8.8h
+ sqxtun v6.4h, \coef4
+ uaddw2 \coef7, \coef7, v7.8h
+ st1 {v5.4h}, [x0], x1
+ sqxtun2 v6.8h, \coef5
+ st1 {v5.d}[1], [x3], x1
+ umin v6.8h, v6.8h, v8.8h
+ sqxtun v7.4h, \coef6
+ st1 {v6.4h}, [x0], x1
+ sqxtun2 v7.8h, \coef7
+ st1 {v6.d}[1], [x3], x1
+ umin v7.8h, v7.8h, v8.8h
+ st1 {v7.4h}, [x0], x1
+ st1 {v7.d}[1], [x3], x1
+.endm
+
+// Read a vertical 4x16 slice out of a 16x16 matrix, do a transform on it,
+// transpose into a horizontal 16x4 slice and store.
+// x0 = dst (temp buffer)
+// x1 = slice offset
+// x2 = src
+// x9 = input stride
+.macro itxfm16_1d_funcs txfm
+function \txfm\()16_1d_4x16_pass1_neon
+ mov x14, x30
+
+ movi v4.4s, #0
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load_clear \i, x2, x9
+.endr
+
+ bl \txfm\()16
+
+ // Do four 4x4 transposes. Originally, v16-v31 contain the
+ // 16 rows. Afterwards, v16-v19, v20-v23, v24-v27 and v28-v31
+ // contain the four transposed 4x4 blocks.
+ transpose_4x4s v16, v17, v18, v19, v4, v5, v6, v7
+ transpose_4x4s v20, v21, v22, v23, v4, v5, v6, v7
+ transpose_4x4s v24, v25, v26, v27, v4, v5, v6, v7
+ transpose_4x4s v28, v29, v30, v31, v4, v5, v6, v7
+
+ // Store the transposed 4x4 blocks horizontally.
+ cmp x1, #12
+ b.eq 1f
+.irp i, 16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19, 23, 27, 31
+ store \i, x0, #16
+.endr
+ ret x14
+1:
+ // Special case: For the last input column (x1 == 12),
+ // which would be stored as the last row in the temp buffer,
+ // don't store the first 4x4 block, but keep it in registers
+ // for the first slice of the second pass (where it is the
+ // last 4x4 block).
+ add x0, x0, #16
+ st1 {v20.4s}, [x0], #16
+ st1 {v24.4s}, [x0], #16
+ st1 {v28.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v21.4s}, [x0], #16
+ st1 {v25.4s}, [x0], #16
+ st1 {v29.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v22.4s}, [x0], #16
+ st1 {v26.4s}, [x0], #16
+ st1 {v30.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v23.4s}, [x0], #16
+ st1 {v27.4s}, [x0], #16
+ st1 {v31.4s}, [x0], #16
+
+ mov v28.16b, v16.16b
+ mov v29.16b, v17.16b
+ mov v30.16b, v18.16b
+ mov v31.16b, v19.16b
+ ret x14
+endfunc
+
+// Read a vertical 4x16 slice out of a 16x16 matrix, do a transform on it,
+// load the destination pixels (from a similar 4x16 slice), add and store back.
+// x0 = dst
+// x1 = dst stride
+// x2 = src (temp buffer)
+// x3 = slice offset
+// x9 = temp buffer stride
+function \txfm\()16_1d_4x16_pass2_neon
+ mov x14, x30
+
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27
+ load \i, x2, x9
+.endr
+ cbz x3, 1f
+.irp i, 28, 29, 30, 31
+ load \i, x2, x9
+.endr
+1:
+
+ add x3, x0, x1
+ lsl x1, x1, #1
+ bl \txfm\()16
+
+ dup v8.8h, w13
+ load_add_store v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
+ load_add_store v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
+
+ ret x14
+endfunc
+.endm
+
+itxfm16_1d_funcs idct
+itxfm16_1d_funcs iadst
+
+// This is the minimum eob value for each subpartition, in increments of 4
+const min_eob_idct_idct_16, align=4
+ .short 0, 10, 38, 89
+endconst
+
+.macro itxfm_func16x16 txfm1, txfm2
+function vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
+.ifc \txfm1\()_\txfm2,idct_idct
+ cmp w3, #1
+ b.eq idct16x16_dc_add_neon
+.endif
+ mov x15, x30
+ // iadst16 requires clobbering v8-v15, idct16 only clobbers v8-v9.
+.ifnc \txfm1\()_\txfm2,idct_idct
+ stp d14, d15, [sp, #-0x10]!
+ stp d12, d13, [sp, #-0x10]!
+ stp d10, d11, [sp, #-0x10]!
+.endif
+ stp d8, d9, [sp, #-0x10]!
+
+ sub sp, sp, #1024
+
+ mov x4, x0
+ mov x5, x1
+ mov x6, x2
+
+ movrel x10, idct_coeffs
+.ifnc \txfm1\()_\txfm2,idct_idct
+ movrel x11, iadst16_coeffs
+.endif
+.ifc \txfm1,idct
+ ld1 {v0.8h,v1.8h}, [x10]
+ sxtl v2.4s, v1.4h
+ sxtl2 v3.4s, v1.8h
+ sxtl2 v1.4s, v0.8h
+ sxtl v0.4s, v0.4h
+.endif
+ mov x9, #64
+
+.ifc \txfm1\()_\txfm2,idct_idct
+ cmp w3, #10
+ b.le idct16x16_quarter_add_16_neon
+ cmp w3, #38
+ b.le idct16x16_half_add_16_neon
+
+ movrel x12, min_eob_idct_idct_16, 2
+.endif
+
+.irp i, 0, 4, 8, 12
+ add x0, sp, #(\i*64)
+.ifc \txfm1\()_\txfm2,idct_idct
+.if \i > 0
+ ldrh w1, [x12], #2
+ cmp w3, w1
+ mov x1, #(16 - \i)/4
+ b.le 1f
+.endif
+.endif
+ mov x1, #\i
+ add x2, x6, #(\i*4)
+ bl \txfm1\()16_1d_4x16_pass1_neon
+.endr
+.ifc \txfm1\()_\txfm2,iadst_idct
+ ld1 {v0.8h,v1.8h}, [x10]
+ sxtl v2.4s, v1.4h
+ sxtl2 v3.4s, v1.8h
+ sxtl2 v1.4s, v0.8h
+ sxtl v0.4s, v0.4h
+.endif
+
+.ifc \txfm1\()_\txfm2,idct_idct
+ b 3f
+1:
+ // Set v28-v31 to zero, for the in-register passthrough of
+ // coefficients to pass 2.
+ movi v28.4s, #0
+ movi v29.4s, #0
+ movi v30.4s, #0
+ movi v31.4s, #0
+2:
+ subs x1, x1, #1
+.rept 4
+ st1 {v28.4s,v29.4s,v30.4s,v31.4s}, [x0], x9
+.endr
+ b.ne 2b
+3:
+.endif
+
+.irp i, 0, 4, 8, 12
+ add x0, x4, #(\i*2)
+ mov x1, x5
+ add x2, sp, #(\i*4)
+ mov x3, #\i
+ bl \txfm2\()16_1d_4x16_pass2_neon
+.endr
+
+ add sp, sp, #1024
+ ldp d8, d9, [sp], 0x10
+.ifnc \txfm1\()_\txfm2,idct_idct
+ ldp d10, d11, [sp], 0x10
+ ldp d12, d13, [sp], 0x10
+ ldp d14, d15, [sp], 0x10
+.endif
+ ret x15
+endfunc
+
+function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_10_neon, export=1
+ mov x13, #0x03ff
+ b vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
+endfunc
+
+function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_12_neon, export=1
+ mov x13, #0x0fff
+ b vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
+endfunc
+.endm
+
+itxfm_func16x16 idct, idct
+itxfm_func16x16 iadst, idct
+itxfm_func16x16 idct, iadst
+itxfm_func16x16 iadst, iadst
+
+function idct16_1d_4x16_pass1_quarter_neon
+ mov x14, x30
+
+ movi v4.4s, #0
+.irp i, 16, 17, 18, 19
+ load_clear \i, x2, x9
+.endr
+
+ bl idct16_quarter
+
+ // Do four 4x4 transposes. Originally, v16-v31 contain the
+ // 16 rows. Afterwards, v16-v19, v20-v23, v24-v27 and v28-v31
+ // contain the four transposed 4x4 blocks.
+ transpose_4x4s v16, v17, v18, v19, v4, v5, v6, v7
+ transpose_4x4s v20, v21, v22, v23, v4, v5, v6, v7
+ transpose_4x4s v24, v25, v26, v27, v4, v5, v6, v7
+ transpose_4x4s v28, v29, v30, v31, v4, v5, v6, v7
+
+ // Store the transposed 4x4 blocks horizontally.
+ // The first 4x4 block is kept in registers for the second pass,
+ // store the rest in the temp buffer.
+ add x0, x0, #16
+ st1 {v20.4s}, [x0], #16
+ st1 {v24.4s}, [x0], #16
+ st1 {v28.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v21.4s}, [x0], #16
+ st1 {v25.4s}, [x0], #16
+ st1 {v29.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v22.4s}, [x0], #16
+ st1 {v26.4s}, [x0], #16
+ st1 {v30.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v23.4s}, [x0], #16
+ st1 {v27.4s}, [x0], #16
+ st1 {v31.4s}, [x0], #16
+ ret x14
+endfunc
+
+function idct16_1d_4x16_pass2_quarter_neon
+ mov x14, x30
+
+ // Only load the top 4 lines, and only do it for the later slices.
+ // For the first slice, d16-d19 is kept in registers from the first pass.
+ cbz x3, 1f
+.irp i, 16, 17, 18, 19
+ load \i, x2, x9
+.endr
+1:
+
+ add x3, x0, x1
+ lsl x1, x1, #1
+ bl idct16_quarter
+
+ dup v8.8h, w13
+ load_add_store v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
+ load_add_store v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
+
+ ret x14
+endfunc
+
+function idct16_1d_4x16_pass1_half_neon
+ mov x14, x30
+
+ movi v4.4s, #0
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load_clear \i, x2, x9
+.endr
+
+ bl idct16_half
+
+ // Do four 4x4 transposes. Originally, v16-v31 contain the
+ // 16 rows. Afterwards, v16-v19, v20-v23, v24-v27 and v28-v31
+ // contain the four transposed 4x4 blocks.
+ transpose_4x4s v16, v17, v18, v19, v4, v5, v6, v7
+ transpose_4x4s v20, v21, v22, v23, v4, v5, v6, v7
+ transpose_4x4s v24, v25, v26, v27, v4, v5, v6, v7
+ transpose_4x4s v28, v29, v30, v31, v4, v5, v6, v7
+
+ // Store the transposed 4x4 blocks horizontally.
+ cmp x1, #4
+ b.eq 1f
+.irp i, 16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19, 23, 27, 31
+ store \i, x0, #16
+.endr
+ ret x14
+1:
+ // Special case: For the second input column (r1 == 4),
+ // which would be stored as the second row in the temp buffer,
+ // don't store the first 4x4 block, but keep it in registers
+ // for the first slice of the second pass (where it is the
+ // second 4x4 block).
+ add x0, x0, #16
+ st1 {v20.4s}, [x0], #16
+ st1 {v24.4s}, [x0], #16
+ st1 {v28.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v21.4s}, [x0], #16
+ st1 {v25.4s}, [x0], #16
+ st1 {v29.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v22.4s}, [x0], #16
+ st1 {v26.4s}, [x0], #16
+ st1 {v30.4s}, [x0], #16
+ add x0, x0, #16
+ st1 {v23.4s}, [x0], #16
+ st1 {v27.4s}, [x0], #16
+ st1 {v31.4s}, [x0], #16
+
+ mov v20.16b, v16.16b
+ mov v21.16b, v17.16b
+ mov v22.16b, v18.16b
+ mov v23.16b, v19.16b
+ ret x14
+endfunc
+
+function idct16_1d_4x16_pass2_half_neon
+ mov x14, x30
+
+.irp i, 16, 17, 18, 19
+ load \i, x2, x9
+.endr
+ cbz x3, 1f
+.irp i, 20, 21, 22, 23
+ load \i, x2, x9
+.endr
+1:
+
+ add x3, x0, x1
+ lsl x1, x1, #1
+ bl idct16_half
+
+ dup v8.8h, w13
+ load_add_store v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
+ load_add_store v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
+
+ ret x14
+endfunc
+
+.macro idct16_partial size
+function idct16x16_\size\()_add_16_neon
+ add x0, sp, #(0*64)
+ mov x1, #0
+ add x2, x6, #(0*4)
+ bl idct16_1d_4x16_pass1_\size\()_neon
+.ifc \size,half
+ add x0, sp, #(4*64)
+ mov x1, #4
+ add x2, x6, #(4*4)
+ bl idct16_1d_4x16_pass1_\size\()_neon
+.endif
+
+.irp i, 0, 4, 8, 12
+ add x0, x4, #(\i*2)
+ mov x1, x5
+ add x2, sp, #(\i*4)
+ mov x3, #\i
+ bl idct16_1d_4x16_pass2_\size\()_neon
+.endr
+
+ add sp, sp, #1024
+ ldp d8, d9, [sp], 0x10
+ ret x15
+endfunc
+.endm
+
+idct16_partial quarter
+idct16_partial half
+
+function idct32x32_dc_add_neon
+ movrel x4, idct_coeffs
+ ld1 {v0.4h}, [x4]
+ sxtl v0.4s, v0.4h
+
+ movi v1.4h, #0
+
+ ld1 {v2.s}[0], [x2]
+ smull v2.2d, v2.2s, v0.s[0]
+ rshrn v2.2s, v2.2d, #14
+ smull v2.2d, v2.2s, v0.s[0]
+ rshrn v2.2s, v2.2d, #14
+ st1 {v1.s}[0], [x2]
+ dup v2.4s, v2.s[0]
+
+ srshr v0.4s, v2.4s, #6
+
+ mov x3, x0
+ mov x4, #32
+ sub x1, x1, #32
+ dup v31.8h, w13
+1:
+ // Loop to add the constant v0 into all 32x32 outputs
+ subs x4, x4, #1
+ ld1 {v1.8h,v2.8h}, [x0], #32
+ uaddw v16.4s, v0.4s, v1.4h
+ uaddw2 v17.4s, v0.4s, v1.8h
+ ld1 {v3.8h,v4.8h}, [x0], x1
+ uaddw v18.4s, v0.4s, v2.4h
+ uaddw2 v19.4s, v0.4s, v2.8h
+ uaddw v20.4s, v0.4s, v3.4h
+ uaddw2 v21.4s, v0.4s, v3.8h
+ uaddw v22.4s, v0.4s, v4.4h
+ uaddw2 v23.4s, v0.4s, v4.8h
+ sqxtun v1.4h, v16.4s
+ sqxtun2 v1.8h, v17.4s
+ sqxtun v2.4h, v18.4s
+ sqxtun2 v2.8h, v19.4s
+ sqxtun v3.4h, v20.4s
+ sqxtun2 v3.8h, v21.4s
+ sqxtun v4.4h, v22.4s
+ sqxtun2 v4.8h, v23.4s
+ umin v1.8h, v1.8h, v31.8h
+ umin v2.8h, v2.8h, v31.8h
+ st1 {v1.8h,v2.8h}, [x3], #32
+ umin v3.8h, v3.8h, v31.8h
+ umin v4.8h, v4.8h, v31.8h
+ st1 {v3.8h,v4.8h}, [x3], x1
+ b.ne 1b
+
+ ret
+endfunc
+
+.macro idct32_end
+ butterfly_4s v16, v5, v4, v5 // v16 = t16a, v5 = t19a
+ butterfly_4s v17, v20, v23, v20 // v17 = t17, v20 = t18
+ butterfly_4s v18, v6, v7, v6 // v18 = t23a, v6 = t20a
+ butterfly_4s v19, v21, v22, v21 // v19 = t22, v21 = t21
+ butterfly_4s v4, v28, v28, v30 // v4 = t24a, v28 = t27a
+ butterfly_4s v23, v26, v25, v26 // v23 = t25, v26 = t26
+ butterfly_4s v7, v8, v29, v31 // v7 = t31a, v3 = t28a
+ butterfly_4s v22, v27, v24, v27 // v22 = t30, v27 = t29
+
+ dmbutterfly v27, v20, v0.s[2], v0.s[3], v24, v25, v30, v31 // v27 = t18a, v20 = t29a
+ dmbutterfly v8, v5, v0.s[2], v0.s[3], v24, v25, v30, v31 // v3 = t19, v5 = t28
+ dmbutterfly v28, v6, v0.s[2], v0.s[3], v24, v25, v30, v31, neg=1 // v28 = t27, v6 = t20
+ dmbutterfly v26, v21, v0.s[2], v0.s[3], v24, v25, v30, v31, neg=1 // v26 = t26a, v21 = t21a
+
+ butterfly_4s v31, v24, v7, v4 // v31 = t31, v24 = t24
+ butterfly_4s v30, v25, v22, v23 // v30 = t30a, v25 = t25a
+ butterfly_4s_r v23, v16, v16, v18 // v23 = t23, v16 = t16
+ butterfly_4s_r v22, v17, v17, v19 // v22 = t22a, v17 = t17a
+ butterfly_4s v18, v21, v27, v21 // v18 = t18, v21 = t21
+ butterfly_4s_r v27, v28, v5, v28 // v27 = t27a, v28 = t28a
+ butterfly_4s v29, v26, v20, v26 // v29 = t29, v26 = t26
+ butterfly_4s v19, v20, v8, v6 // v19 = t19a, v20 = t20
+
+ dmbutterfly0 v27, v20, v27, v20, v4, v5, v6, v7, v8, v9 // v27 = t27, v20 = t20
+ dmbutterfly0 v26, v21, v26, v21, v4, v5, v6, v7, v8, v9 // v26 = t26a, v21 = t21a
+ dmbutterfly0 v25, v22, v25, v22, v4, v5, v6, v7, v8, v9 // v25 = t25, v22 = t22
+ dmbutterfly0 v24, v23, v24, v23, v4, v5, v6, v7, v8, v9 // v24 = t24a, v23 = t23a
+ ret
+.endm
+
+function idct32_odd
+ dmbutterfly v16, v31, v10.s[0], v10.s[1], v4, v5, v6, v7 // v16 = t16a, v31 = t31a
+ dmbutterfly v24, v23, v10.s[2], v10.s[3], v4, v5, v6, v7 // v24 = t17a, v23 = t30a
+ dmbutterfly v20, v27, v11.s[0], v11.s[1], v4, v5, v6, v7 // v20 = t18a, v27 = t29a
+ dmbutterfly v28, v19, v11.s[2], v11.s[3], v4, v5, v6, v7 // v28 = t19a, v19 = t28a
+ dmbutterfly v18, v29, v12.s[0], v12.s[1], v4, v5, v6, v7 // v18 = t20a, v29 = t27a
+ dmbutterfly v26, v21, v12.s[2], v12.s[3], v4, v5, v6, v7 // v26 = t21a, v21 = t26a
+ dmbutterfly v22, v25, v13.s[0], v13.s[1], v4, v5, v6, v7 // v22 = t22a, v25 = t25a
+ dmbutterfly v30, v17, v13.s[2], v13.s[3], v4, v5, v6, v7 // v30 = t23a, v17 = t24a
+
+ butterfly_4s v4, v24, v16, v24 // v4 = t16, v24 = t17
+ butterfly_4s v5, v20, v28, v20 // v5 = t19, v20 = t18
+ butterfly_4s v6, v26, v18, v26 // v6 = t20, v26 = t21
+ butterfly_4s v7, v22, v30, v22 // v7 = t23, v22 = t22
+ butterfly_4s v28, v25, v17, v25 // v28 = t24, v25 = t25
+ butterfly_4s v30, v21, v29, v21 // v30 = t27, v21 = t26
+ butterfly_4s v29, v23, v31, v23 // v29 = t31, v23 = t30
+ butterfly_4s v31, v27, v19, v27 // v31 = t28, v27 = t29
+
+ dmbutterfly v23, v24, v1.s[0], v1.s[1], v16, v17, v18, v19 // v23 = t17a, v24 = t30a
+ dmbutterfly v27, v20, v1.s[0], v1.s[1], v16, v17, v18, v19, neg=1 // v27 = t29a, v20 = t18a
+ dmbutterfly v21, v26, v1.s[2], v1.s[3], v16, v17, v18, v19 // v21 = t21a, v26 = t26a
+ dmbutterfly v25, v22, v1.s[2], v1.s[3], v16, v17, v18, v19, neg=1 // v25 = t25a, v22 = t22a
+ idct32_end
+endfunc
+
+function idct32_odd_half
+ dmbutterfly_h1 v16, v31, v10.s[0], v10.s[1], v4, v5, v6, v7 // v16 = t16a, v31 = t31a
+ dmbutterfly_h2 v24, v23, v10.s[2], v10.s[3], v4, v5, v6, v7 // v24 = t17a, v23 = t30a
+ dmbutterfly_h1 v20, v27, v11.s[0], v11.s[1], v4, v5, v6, v7 // v20 = t18a, v27 = t29a
+ dmbutterfly_h2 v28, v19, v11.s[2], v11.s[3], v4, v5, v6, v7 // v28 = t19a, v19 = t28a
+ dmbutterfly_h1 v18, v29, v12.s[0], v12.s[1], v4, v5, v6, v7 // v18 = t20a, v29 = t27a
+ dmbutterfly_h2 v26, v21, v12.s[2], v12.s[3], v4, v5, v6, v7 // v26 = t21a, v21 = t26a
+ dmbutterfly_h1 v22, v25, v13.s[0], v13.s[1], v4, v5, v6, v7 // v22 = t22a, v25 = t25a
+ dmbutterfly_h2 v30, v17, v13.s[2], v13.s[3], v4, v5, v6, v7 // v30 = t23a, v17 = t24a
+
+ butterfly_4s v4, v24, v16, v24 // v4 = t16, v24 = t17
+ butterfly_4s v5, v20, v28, v20 // v5 = t19, v20 = t18
+ butterfly_4s v6, v26, v18, v26 // v6 = t20, v26 = t21
+ butterfly_4s v7, v22, v30, v22 // v7 = t23, v22 = t22
+ butterfly_4s v28, v25, v17, v25 // v28 = t24, v25 = t25
+ butterfly_4s v30, v21, v29, v21 // v30 = t27, v21 = t26
+ butterfly_4s v29, v23, v31, v23 // v29 = t31, v23 = t30
+ butterfly_4s v31, v27, v19, v27 // v31 = t28, v27 = t29
+
+ dmbutterfly v23, v24, v1.s[0], v1.s[1], v16, v17, v18, v19 // v23 = t17a, v24 = t30a
+ dmbutterfly v27, v20, v1.s[0], v1.s[1], v16, v17, v18, v19, neg=1 // v27 = t29a, v20 = t18a
+ dmbutterfly v21, v26, v1.s[2], v1.s[3], v16, v17, v18, v19 // v21 = t21a, v26 = t26a
+ dmbutterfly v25, v22, v1.s[2], v1.s[3], v16, v17, v18, v19, neg=1 // v25 = t25a, v22 = t22a
+ idct32_end
+endfunc
+
+function idct32_odd_quarter
+ dsmull_h v4, v5, v16, v10.s[0]
+ dsmull_h v28, v29, v19, v11.s[3]
+ dsmull_h v30, v31, v16, v10.s[1]
+ dsmull_h v22, v23, v17, v13.s[2]
+ dsmull_h v7, v6, v17, v13.s[3]
+ dsmull_h v26, v27, v19, v11.s[2]
+ dsmull_h v20, v21, v18, v12.s[0]
+ dsmull_h v24, v25, v18, v12.s[1]
+
+ neg v28.2d, v28.2d
+ neg v29.2d, v29.2d
+ neg v7.2d, v7.2d
+ neg v6.2d, v6.2d
+
+ drshrn_h v4, v4, v5, #14
+ drshrn_h v5, v28, v29, #14
+ drshrn_h v29, v30, v31, #14
+ drshrn_h v28, v22, v23, #14
+ drshrn_h v7, v7, v6, #14
+ drshrn_h v31, v26, v27, #14
+ drshrn_h v6, v20, v21, #14
+ drshrn_h v30, v24, v25, #14
+
+ dmbutterfly_l v16, v17, v18, v19, v29, v4, v1.s[0], v1.s[1]
+ dmbutterfly_l v27, v26, v20, v21, v31, v5, v1.s[0], v1.s[1]
+ drshrn_h v23, v16, v17, #14
+ drshrn_h v24, v18, v19, #14
+ neg v20.2d, v20.2d
+ neg v21.2d, v21.2d
+ drshrn_h v27, v27, v26, #14
+ drshrn_h v20, v20, v21, #14
+ dmbutterfly_l v16, v17, v18, v19, v30, v6, v1.s[2], v1.s[3]
+ drshrn_h v21, v16, v17, #14
+ drshrn_h v26, v18, v19, #14
+ dmbutterfly_l v16, v17, v18, v19, v28, v7, v1.s[2], v1.s[3]
+ drshrn_h v25, v16, v17, #14
+ neg v18.2d, v18.2d
+ neg v19.2d, v19.2d
+ drshrn_h v22, v18, v19, #14
+
+ idct32_end
+endfunc
+
+.macro idct32_funcs suffix
+// Do an 32-point IDCT of a 4x32 slice out of a 32x32 matrix.
+// The 32-point IDCT can be decomposed into two 16-point IDCTs;
+// a normal IDCT16 with every other input component (the even ones, with
+// each output written twice), followed by a separate 16-point IDCT
+// of the odd inputs, added/subtracted onto the outputs of the first idct16.
+// x0 = dst (temp buffer)
+// x1 = unused
+// x2 = src
+// x9 = double input stride
+function idct32_1d_4x32_pass1\suffix\()_neon
+ mov x14, x30
+
+ movi v4.4s, #0
+
+ // v16 = IN(0), v17 = IN(2) ... v31 = IN(30)
+.ifb \suffix
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load_clear \i, x2, x9
+.endr
+.endif
+.ifc \suffix,_quarter
+.irp i, 16, 17, 18, 19
+ load_clear \i, x2, x9
+.endr
+.endif
+.ifc \suffix,_half
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load_clear \i, x2, x9
+.endr
+.endif
+
+ bl idct16\suffix
+
+ // Do four 4x4 transposes. Originally, v16-v31 contain the
+ // 16 rows. Afterwards, v16-v19, v20-v23, v24-v27 and v28-v31
+ // contain the four transposed 4x4 blocks.
+ transpose_4x4s v16, v17, v18, v19, v4, v5, v6, v7
+ transpose_4x4s v20, v21, v22, v23, v4, v5, v6, v7
+ transpose_4x4s v24, v25, v26, v27, v4, v5, v6, v7
+ transpose_4x4s v28, v29, v30, v31, v4, v5, v6, v7
+
+ // Store the registers a, b, c, d horizontally, followed by the
+ // same registers d, c, b, a mirrored.
+.macro store_rev a, b, c, d
+ // There's no rev128 instruction, but we reverse each 64 bit
+ // half, and then flip them using an ext with 8 bytes offset.
+ rev64 v7.4s, \d
+ st1 {\a}, [x0], #16
+ ext v7.16b, v7.16b, v7.16b, #8
+ st1 {\b}, [x0], #16
+ rev64 v6.4s, \c
+ st1 {\c}, [x0], #16
+ ext v6.16b, v6.16b, v6.16b, #8
+ st1 {\d}, [x0], #16
+ rev64 v5.4s, \b
+ st1 {v7.4s}, [x0], #16
+ ext v5.16b, v5.16b, v5.16b, #8
+ st1 {v6.4s}, [x0], #16
+ rev64 v4.4s, \a
+ st1 {v5.4s}, [x0], #16
+ ext v4.16b, v4.16b, v4.16b, #8
+ st1 {v4.4s}, [x0], #16
+.endm
+ store_rev v16.4s, v20.4s, v24.4s, v28.4s
+ store_rev v17.4s, v21.4s, v25.4s, v29.4s
+ store_rev v18.4s, v22.4s, v26.4s, v30.4s
+ store_rev v19.4s, v23.4s, v27.4s, v31.4s
+ sub x0, x0, #512
+.purgem store_rev
+
+ // Move x2 back to the start of the input, and move
+ // to the first odd row
+.ifb \suffix
+ sub x2, x2, x9, lsl #4
+.endif
+.ifc \suffix,_quarter
+ sub x2, x2, x9, lsl #2
+.endif
+.ifc \suffix,_half
+ sub x2, x2, x9, lsl #3
+.endif
+ add x2, x2, #128
+
+ movi v4.4s, #0
+ // v16 = IN(1), v17 = IN(3) ... v31 = IN(31)
+.ifb \suffix
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load_clear \i, x2, x9
+.endr
+.endif
+.ifc \suffix,_quarter
+.irp i, 16, 17, 18, 19
+ load_clear \i, x2, x9
+.endr
+.endif
+.ifc \suffix,_half
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load_clear \i, x2, x9
+.endr
+.endif
+
+ bl idct32_odd\suffix
+
+ transpose_4x4s v31, v30, v29, v28, v4, v5, v6, v7
+ transpose_4x4s v27, v26, v25, v24, v4, v5, v6, v7
+ transpose_4x4s v23, v22, v21, v20, v4, v5, v6, v7
+ transpose_4x4s v19, v18, v17, v16, v4, v5, v6, v7
+
+ // Store the registers a, b, c, d horizontally,
+ // adding into the output first, and the mirrored,
+ // subtracted from the output.
+.macro store_rev a, b, c, d, a16b, b16b
+ ld1 {v4.4s}, [x0]
+ rev64 v9.4s, \d
+ add v4.4s, v4.4s, \a
+ st1 {v4.4s}, [x0], #16
+ rev64 v8.4s, \c
+ ld1 {v4.4s}, [x0]
+ ext v9.16b, v9.16b, v9.16b, #8
+ add v4.4s, v4.4s, \b
+ st1 {v4.4s}, [x0], #16
+ ext v8.16b, v8.16b, v8.16b, #8
+ ld1 {v4.4s}, [x0]
+ rev64 \b, \b
+ add v4.4s, v4.4s, \c
+ st1 {v4.4s}, [x0], #16
+ rev64 \a, \a
+ ld1 {v4.4s}, [x0]
+ ext \b16b, \b16b, \b16b, #8
+ add v4.4s, v4.4s, \d
+ st1 {v4.4s}, [x0], #16
+ ext \a16b, \a16b, \a16b, #8
+ ld1 {v4.4s}, [x0]
+ sub v4.4s, v4.4s, v9.4s
+ st1 {v4.4s}, [x0], #16
+ ld1 {v4.4s}, [x0]
+ sub v4.4s, v4.4s, v8.4s
+ st1 {v4.4s}, [x0], #16
+ ld1 {v4.4s}, [x0]
+ sub v4.4s, v4.4s, \b
+ st1 {v4.4s}, [x0], #16
+ ld1 {v4.4s}, [x0]
+ sub v4.4s, v4.4s, \a
+ st1 {v4.4s}, [x0], #16
+.endm
+
+ store_rev v31.4s, v27.4s, v23.4s, v19.4s, v31.16b, v27.16b
+ store_rev v30.4s, v26.4s, v22.4s, v18.4s, v30.16b, v26.16b
+ store_rev v29.4s, v25.4s, v21.4s, v17.4s, v29.16b, v25.16b
+ store_rev v28.4s, v24.4s, v20.4s, v16.4s, v28.16b, v24.16b
+.purgem store_rev
+ ret x14
+endfunc
+
+// This is mostly the same as 4x32_pass1, but without the transpose,
+// and use the source as temp buffer between the two idct passes, and
+// add into the destination.
+// x0 = dst
+// x1 = dst stride
+// x2 = src (temp buffer)
+// x7 = negative double temp buffer stride
+// x9 = double temp buffer stride
+function idct32_1d_4x32_pass2\suffix\()_neon
+ mov x14, x30
+
+ // v16 = IN(0), v17 = IN(2) ... v31 = IN(30)
+.ifb \suffix
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #4
+.endif
+.ifc \suffix,_quarter
+.irp i, 16, 17, 18, 19
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #2
+.endif
+.ifc \suffix,_half
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #3
+.endif
+
+ bl idct16\suffix
+
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ store \i, x2, x9
+.endr
+
+ sub x2, x2, x9, lsl #4
+ add x2, x2, #128
+
+ // v16 = IN(1), v17 = IN(3) ... v31 = IN(31)
+.ifb \suffix
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #4
+.endif
+.ifc \suffix,_quarter
+.irp i, 16, 17, 18, 19
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #2
+.endif
+.ifc \suffix,_half
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #3
+.endif
+ sub x2, x2, #128
+
+ bl idct32_odd\suffix
+
+.macro load_acc_store a, b, c, d, neg=0
+.if \neg == 0
+ ld1 {v4.4s}, [x2], x9
+ ld1 {v5.4s}, [x2], x9
+ add v4.4s, v4.4s, \a
+ ld1 {v6.4s}, [x2], x9
+ add v5.4s, v5.4s, \b
+ ld1 {v7.4s}, [x2], x9
+ add v6.4s, v6.4s, \c
+ add v7.4s, v7.4s, \d
+.else
+ ld1 {v4.4s}, [x2], x7
+ ld1 {v5.4s}, [x2], x7
+ sub v4.4s, v4.4s, \a
+ ld1 {v6.4s}, [x2], x7
+ sub v5.4s, v5.4s, \b
+ ld1 {v7.4s}, [x2], x7
+ sub v6.4s, v6.4s, \c
+ sub v7.4s, v7.4s, \d
+.endif
+ ld1 {v8.4h}, [x0], x1
+ ld1 {v8.d}[1], [x0], x1
+ srshr v4.4s, v4.4s, #6
+ ld1 {v9.4h}, [x0], x1
+ srshr v5.4s, v5.4s, #6
+ uaddw v4.4s, v4.4s, v8.4h
+ ld1 {v9.d}[1], [x0], x1
+ srshr v6.4s, v6.4s, #6
+ uaddw2 v5.4s, v5.4s, v8.8h
+ srshr v7.4s, v7.4s, #6
+ sub x0, x0, x1, lsl #2
+ uaddw v6.4s, v6.4s, v9.4h
+ sqxtun v4.4h, v4.4s
+ uaddw2 v7.4s, v7.4s, v9.8h
+ sqxtun2 v4.8h, v5.4s
+ umin v4.8h, v4.8h, v15.8h
+ st1 {v4.4h}, [x0], x1
+ sqxtun v5.4h, v6.4s
+ st1 {v4.d}[1], [x0], x1
+ sqxtun2 v5.8h, v7.4s
+ umin v5.8h, v5.8h, v15.8h
+ st1 {v5.4h}, [x0], x1
+ st1 {v5.d}[1], [x0], x1
+.endm
+ load_acc_store v31.4s, v30.4s, v29.4s, v28.4s
+ load_acc_store v27.4s, v26.4s, v25.4s, v24.4s
+ load_acc_store v23.4s, v22.4s, v21.4s, v20.4s
+ load_acc_store v19.4s, v18.4s, v17.4s, v16.4s
+ sub x2, x2, x9
+ load_acc_store v16.4s, v17.4s, v18.4s, v19.4s, 1
+ load_acc_store v20.4s, v21.4s, v22.4s, v23.4s, 1
+ load_acc_store v24.4s, v25.4s, v26.4s, v27.4s, 1
+ load_acc_store v28.4s, v29.4s, v30.4s, v31.4s, 1
+.purgem load_acc_store
+ ret x14
+endfunc
+.endm
+
+idct32_funcs
+idct32_funcs _quarter
+idct32_funcs _half
+
+const min_eob_idct_idct_32, align=4
+ .short 0, 9, 34, 70, 135, 240, 336, 448
+endconst
+
+function vp9_idct_idct_32x32_add_16_neon
+ cmp w3, #1
+ b.eq idct32x32_dc_add_neon
+
+ movrel x10, idct_coeffs
+
+ mov x15, x30
+ stp d8, d9, [sp, #-0x10]!
+ stp d10, d11, [sp, #-0x10]!
+ stp d12, d13, [sp, #-0x10]!
+ stp d14, d15, [sp, #-0x10]!
+
+ sub sp, sp, #4096
+
+ mov x4, x0
+ mov x5, x1
+ mov x6, x2
+
+ // Double stride of the input, since we only read every other line
+ mov x9, #256
+ neg x7, x9
+
+ ld1 {v0.8h,v1.8h}, [x10], #32
+ sxtl v2.4s, v1.4h
+ sxtl2 v3.4s, v1.8h
+ sxtl2 v1.4s, v0.8h
+ sxtl v0.4s, v0.4h
+ ld1 {v10.8h,v11.8h}, [x10]
+ sxtl v12.4s, v11.4h
+ sxtl2 v13.4s, v11.8h
+ sxtl2 v11.4s, v10.8h
+ sxtl v10.4s, v10.4h
+
+ dup v15.8h, w13
+
+ cmp w3, #34
+ b.le idct32x32_quarter_add_16_neon
+ cmp w3, #135
+ b.le idct32x32_half_add_16_neon
+
+ movrel x12, min_eob_idct_idct_32, 2
+
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add x0, sp, #(\i*128)
+.if \i > 0
+ ldrh w1, [x12], #2
+ cmp w3, w1
+ mov x1, #(32 - \i)/4
+ b.le 1f
+.endif
+ add x2, x6, #(\i*4)
+ bl idct32_1d_4x32_pass1_neon
+.endr
+ b 3f
+
+1:
+ // Write zeros to the temp buffer for pass 2
+ movi v16.4s, #0
+ movi v17.4s, #0
+ movi v18.4s, #0
+ movi v19.4s, #0
+2:
+ subs x1, x1, #1
+.rept 4
+ st1 {v16.4s,v17.4s,v18.4s,v19.4s}, [x0], #64
+ st1 {v16.4s,v17.4s,v18.4s,v19.4s}, [x0], #64
+.endr
+ b.ne 2b
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add x0, x4, #(\i*2)
+ mov x1, x5
+ add x2, sp, #(\i*4)
+ bl idct32_1d_4x32_pass2_neon
+.endr
+
+ add sp, sp, #4096
+ ldp d14, d15, [sp], 0x10
+ ldp d12, d13, [sp], 0x10
+ ldp d10, d11, [sp], 0x10
+ ldp d8, d9, [sp], 0x10
+
+ ret x15
+endfunc
+
+function ff_vp9_idct_idct_32x32_add_10_neon, export=1
+ mov x13, #0x03ff
+ b vp9_idct_idct_32x32_add_16_neon
+endfunc
+
+function ff_vp9_idct_idct_32x32_add_12_neon, export=1
+ mov x13, #0x0fff
+ b vp9_idct_idct_32x32_add_16_neon
+endfunc
+
+.macro idct32_partial size
+function idct32x32_\size\()_add_16_neon
+.irp i, 0, 4
+ add x0, sp, #(\i*128)
+.ifc \size,quarter
+.if \i == 4
+ cmp w3, #9
+ b.le 1f
+.endif
+.endif
+ add x2, x6, #(\i*4)
+ bl idct32_1d_4x32_pass1_\size\()_neon
+.endr
+
+.ifc \size,half
+.irp i, 8, 12
+ add x0, sp, #(\i*128)
+.if \i == 12
+ cmp w3, #70
+ b.le 1f
+.endif
+ add x2, x6, #(\i*4)
+ bl idct32_1d_4x32_pass1_\size\()_neon
+.endr
+.endif
+ b 3f
+
+1:
+ // Write zeros to the temp buffer for pass 2
+ movi v16.4s, #0
+ movi v17.4s, #0
+ movi v18.4s, #0
+ movi v19.4s, #0
+
+.rept 4
+ st1 {v16.4s,v17.4s,v18.4s,v19.4s}, [x0], #64
+ st1 {v16.4s,v17.4s,v18.4s,v19.4s}, [x0], #64
+.endr
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add x0, x4, #(\i*2)
+ mov x1, x5
+ add x2, sp, #(\i*4)
+ bl idct32_1d_4x32_pass2_\size\()_neon
+.endr
+
+ add sp, sp, #4096
+ ldp d14, d15, [sp], 0x10
+ ldp d12, d13, [sp], 0x10
+ ldp d10, d11, [sp], 0x10
+ ldp d8, d9, [sp], 0x10
+
+ ret x15
+endfunc
+.endm
+
+idct32_partial quarter
+idct32_partial half
diff --git a/media/ffvpx/libavcodec/aarch64/vp9itxfm_neon.S b/media/ffvpx/libavcodec/aarch64/vp9itxfm_neon.S
new file mode 100644
index 0000000000..a27f7b8ae5
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9itxfm_neon.S
@@ -0,0 +1,1580 @@
+/*
+ * Copyright (c) 2016 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+#include "neon.S"
+
+const itxfm4_coeffs, align=4
+ .short 11585, 0, 6270, 15137
+iadst4_coeffs:
+ .short 5283, 15212, 9929, 13377
+endconst
+
+const iadst8_coeffs, align=4
+ .short 16305, 1606, 14449, 7723, 10394, 12665, 4756, 15679
+idct_coeffs:
+ .short 11585, 0, 6270, 15137, 3196, 16069, 13623, 9102
+ .short 1606, 16305, 12665, 10394, 7723, 14449, 15679, 4756
+ .short 804, 16364, 12140, 11003, 7005, 14811, 15426, 5520
+ .short 3981, 15893, 14053, 8423, 9760, 13160, 16207, 2404
+endconst
+
+const iadst16_coeffs, align=4
+ .short 16364, 804, 15893, 3981, 11003, 12140, 8423, 14053
+ .short 14811, 7005, 13160, 9760, 5520, 15426, 2404, 16207
+endconst
+
+// out1 = ((in1 + in2) * v0[0] + (1 << 13)) >> 14
+// out2 = ((in1 - in2) * v0[0] + (1 << 13)) >> 14
+// in/out are .8h registers; this can do with 4 temp registers, but is
+// more efficient if 6 temp registers are available.
+.macro dmbutterfly0 out1, out2, in1, in2, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, neg=0
+.if \neg > 0
+ neg \tmp4\().4h, v0.4h
+.endif
+ add \tmp1\().8h, \in1\().8h, \in2\().8h
+ sub \tmp2\().8h, \in1\().8h, \in2\().8h
+.if \neg > 0
+ smull \tmp3\().4s, \tmp1\().4h, \tmp4\().h[0]
+ smull2 \tmp4\().4s, \tmp1\().8h, \tmp4\().h[0]
+.else
+ smull \tmp3\().4s, \tmp1\().4h, v0.h[0]
+ smull2 \tmp4\().4s, \tmp1\().8h, v0.h[0]
+.endif
+.ifb \tmp5
+ rshrn \out1\().4h, \tmp3\().4s, #14
+ rshrn2 \out1\().8h, \tmp4\().4s, #14
+ smull \tmp3\().4s, \tmp2\().4h, v0.h[0]
+ smull2 \tmp4\().4s, \tmp2\().8h, v0.h[0]
+ rshrn \out2\().4h, \tmp3\().4s, #14
+ rshrn2 \out2\().8h, \tmp4\().4s, #14
+.else
+ smull \tmp5\().4s, \tmp2\().4h, v0.h[0]
+ smull2 \tmp6\().4s, \tmp2\().8h, v0.h[0]
+ rshrn \out1\().4h, \tmp3\().4s, #14
+ rshrn2 \out1\().8h, \tmp4\().4s, #14
+ rshrn \out2\().4h, \tmp5\().4s, #14
+ rshrn2 \out2\().8h, \tmp6\().4s, #14
+.endif
+.endm
+
+// Same as dmbutterfly0 above, but treating the input in in2 as zero,
+// writing the same output into both out1 and out2.
+.macro dmbutterfly0_h out1, out2, in1, in2, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6
+ smull \tmp1\().4s, \in1\().4h, v0.h[0]
+ smull2 \tmp2\().4s, \in1\().8h, v0.h[0]
+ rshrn \out1\().4h, \tmp1\().4s, #14
+ rshrn2 \out1\().8h, \tmp2\().4s, #14
+ rshrn \out2\().4h, \tmp1\().4s, #14
+ rshrn2 \out2\().8h, \tmp2\().4s, #14
+.endm
+
+// out1,out2 = in1 * coef1 - in2 * coef2
+// out3,out4 = in1 * coef2 + in2 * coef1
+// out are 4 x .4s registers, in are 2 x .8h registers
+.macro dmbutterfly_l out1, out2, out3, out4, in1, in2, coef1, coef2
+ smull \out1\().4s, \in1\().4h, \coef1
+ smull2 \out2\().4s, \in1\().8h, \coef1
+ smull \out3\().4s, \in1\().4h, \coef2
+ smull2 \out4\().4s, \in1\().8h, \coef2
+ smlsl \out1\().4s, \in2\().4h, \coef2
+ smlsl2 \out2\().4s, \in2\().8h, \coef2
+ smlal \out3\().4s, \in2\().4h, \coef1
+ smlal2 \out4\().4s, \in2\().8h, \coef1
+.endm
+
+// inout1 = (inout1 * coef1 - inout2 * coef2 + (1 << 13)) >> 14
+// inout2 = (inout1 * coef2 + inout2 * coef1 + (1 << 13)) >> 14
+// inout are 2 x .8h registers
+.macro dmbutterfly inout1, inout2, coef1, coef2, tmp1, tmp2, tmp3, tmp4, neg=0
+ dmbutterfly_l \tmp1, \tmp2, \tmp3, \tmp4, \inout1, \inout2, \coef1, \coef2
+.if \neg > 0
+ neg \tmp3\().4s, \tmp3\().4s
+ neg \tmp4\().4s, \tmp4\().4s
+.endif
+ rshrn \inout1\().4h, \tmp1\().4s, #14
+ rshrn2 \inout1\().8h, \tmp2\().4s, #14
+ rshrn \inout2\().4h, \tmp3\().4s, #14
+ rshrn2 \inout2\().8h, \tmp4\().4s, #14
+.endm
+
+// Same as dmbutterfly above, but treating the input in inout2 as zero
+.macro dmbutterfly_h1 inout1, inout2, coef1, coef2, tmp1, tmp2, tmp3, tmp4
+ smull \tmp1\().4s, \inout1\().4h, \coef1
+ smull2 \tmp2\().4s, \inout1\().8h, \coef1
+ smull \tmp3\().4s, \inout1\().4h, \coef2
+ smull2 \tmp4\().4s, \inout1\().8h, \coef2
+ rshrn \inout1\().4h, \tmp1\().4s, #14
+ rshrn2 \inout1\().8h, \tmp2\().4s, #14
+ rshrn \inout2\().4h, \tmp3\().4s, #14
+ rshrn2 \inout2\().8h, \tmp4\().4s, #14
+.endm
+
+// Same as dmbutterfly above, but treating the input in inout1 as zero
+.macro dmbutterfly_h2 inout1, inout2, coef1, coef2, tmp1, tmp2, tmp3, tmp4
+ smull \tmp1\().4s, \inout2\().4h, \coef2
+ smull2 \tmp2\().4s, \inout2\().8h, \coef2
+ smull \tmp3\().4s, \inout2\().4h, \coef1
+ smull2 \tmp4\().4s, \inout2\().8h, \coef1
+ neg \tmp1\().4s, \tmp1\().4s
+ neg \tmp2\().4s, \tmp2\().4s
+ rshrn \inout2\().4h, \tmp3\().4s, #14
+ rshrn2 \inout2\().8h, \tmp4\().4s, #14
+ rshrn \inout1\().4h, \tmp1\().4s, #14
+ rshrn2 \inout1\().8h, \tmp2\().4s, #14
+.endm
+
+.macro dsmull_h out1, out2, in, coef
+ smull \out1\().4s, \in\().4h, \coef
+ smull2 \out2\().4s, \in\().8h, \coef
+.endm
+
+.macro drshrn_h out, in1, in2, shift
+ rshrn \out\().4h, \in1\().4s, \shift
+ rshrn2 \out\().8h, \in2\().4s, \shift
+.endm
+
+
+// out1 = in1 + in2
+// out2 = in1 - in2
+.macro butterfly_8h out1, out2, in1, in2
+ add \out1\().8h, \in1\().8h, \in2\().8h
+ sub \out2\().8h, \in1\().8h, \in2\().8h
+.endm
+
+// out1 = in1 - in2
+// out2 = in1 + in2
+.macro butterfly_8h_r out1, out2, in1, in2
+ sub \out1\().8h, \in1\().8h, \in2\().8h
+ add \out2\().8h, \in1\().8h, \in2\().8h
+.endm
+
+// out1 = (in1,in2 + in3,in4 + (1 << 13)) >> 14
+// out2 = (in1,in2 - in3,in4 + (1 << 13)) >> 14
+// out are 2 x .8h registers, in are 4 x .4s registers
+.macro dbutterfly_n out1, out2, in1, in2, in3, in4, tmp1, tmp2, tmp3, tmp4
+ add \tmp1\().4s, \in1\().4s, \in3\().4s
+ add \tmp2\().4s, \in2\().4s, \in4\().4s
+ sub \tmp3\().4s, \in1\().4s, \in3\().4s
+ sub \tmp4\().4s, \in2\().4s, \in4\().4s
+ rshrn \out1\().4h, \tmp1\().4s, #14
+ rshrn2 \out1\().8h, \tmp2\().4s, #14
+ rshrn \out2\().4h, \tmp3\().4s, #14
+ rshrn2 \out2\().8h, \tmp4\().4s, #14
+.endm
+
+.macro iwht4 c0, c1, c2, c3
+ add \c0\().4h, \c0\().4h, \c1\().4h
+ sub v17.4h, \c2\().4h, \c3\().4h
+ sub v16.4h, \c0\().4h, v17.4h
+ sshr v16.4h, v16.4h, #1
+ sub \c2\().4h, v16.4h, \c1\().4h
+ sub \c1\().4h, v16.4h, \c3\().4h
+ add \c3\().4h, v17.4h, \c2\().4h
+ sub \c0\().4h, \c0\().4h, \c1\().4h
+.endm
+
+.macro idct4 c0, c1, c2, c3
+ smull v22.4s, \c1\().4h, v0.h[3]
+ smull v20.4s, \c1\().4h, v0.h[2]
+ add v16.4h, \c0\().4h, \c2\().4h
+ sub v17.4h, \c0\().4h, \c2\().4h
+ smlal v22.4s, \c3\().4h, v0.h[2]
+ smull v18.4s, v16.4h, v0.h[0]
+ smull v19.4s, v17.4h, v0.h[0]
+ smlsl v20.4s, \c3\().4h, v0.h[3]
+ rshrn v22.4h, v22.4s, #14
+ rshrn v18.4h, v18.4s, #14
+ rshrn v19.4h, v19.4s, #14
+ rshrn v20.4h, v20.4s, #14
+ add \c0\().4h, v18.4h, v22.4h
+ sub \c3\().4h, v18.4h, v22.4h
+ add \c1\().4h, v19.4h, v20.4h
+ sub \c2\().4h, v19.4h, v20.4h
+.endm
+
+.macro iadst4 c0, c1, c2, c3
+ smull v16.4s, \c0\().4h, v0.h[4]
+ smlal v16.4s, \c2\().4h, v0.h[5]
+ smlal v16.4s, \c3\().4h, v0.h[6]
+ smull v17.4s, \c0\().4h, v0.h[6]
+ smlsl v17.4s, \c2\().4h, v0.h[4]
+ sub \c0\().4h, \c0\().4h, \c2\().4h
+ smlsl v17.4s, \c3\().4h, v0.h[5]
+ add \c0\().4h, \c0\().4h, \c3\().4h
+ smull v19.4s, \c1\().4h, v0.h[7]
+ smull v18.4s, \c0\().4h, v0.h[7]
+ add v20.4s, v16.4s, v19.4s
+ add v21.4s, v17.4s, v19.4s
+ rshrn \c0\().4h, v20.4s, #14
+ add v16.4s, v16.4s, v17.4s
+ rshrn \c1\().4h, v21.4s, #14
+ sub v16.4s, v16.4s, v19.4s
+ rshrn \c2\().4h, v18.4s, #14
+ rshrn \c3\().4h, v16.4s, #14
+.endm
+
+// The public functions in this file have got the following signature:
+// void itxfm_add(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+
+.macro itxfm_func4x4 txfm1, txfm2
+function ff_vp9_\txfm1\()_\txfm2\()_4x4_add_neon, export=1
+.ifc \txfm1,\txfm2
+.ifc \txfm1,idct
+ movrel x4, itxfm4_coeffs
+ ld1 {v0.4h}, [x4]
+.endif
+.ifc \txfm1,iadst
+ movrel x4, iadst4_coeffs
+ ld1 {v0.d}[1], [x4]
+.endif
+.else
+ movrel x4, itxfm4_coeffs
+ ld1 {v0.8h}, [x4]
+.endif
+
+ movi v31.8h, #0
+.ifc \txfm1\()_\txfm2,idct_idct
+ cmp w3, #1
+ b.ne 1f
+ // DC-only for idct/idct
+ ld1 {v2.h}[0], [x2]
+ smull v2.4s, v2.4h, v0.h[0]
+ rshrn v2.4h, v2.4s, #14
+ smull v2.4s, v2.4h, v0.h[0]
+ rshrn v2.4h, v2.4s, #14
+ st1 {v31.h}[0], [x2]
+ dup v4.4h, v2.h[0]
+ mov v5.16b, v4.16b
+ mov v6.16b, v4.16b
+ mov v7.16b, v4.16b
+ b 2f
+.endif
+
+1:
+ ld1 {v4.4h,v5.4h,v6.4h,v7.4h}, [x2]
+ st1 {v31.8h}, [x2], #16
+
+.ifc \txfm1,iwht
+ sshr v4.4h, v4.4h, #2
+ sshr v5.4h, v5.4h, #2
+ sshr v6.4h, v6.4h, #2
+ sshr v7.4h, v7.4h, #2
+.endif
+
+ \txfm1\()4 v4, v5, v6, v7
+
+ st1 {v31.8h}, [x2], #16
+ // Transpose 4x4 with 16 bit elements
+ transpose_4x4H v4, v5, v6, v7, v16, v17, v18, v19
+
+ \txfm2\()4 v4, v5, v6, v7
+2:
+ ld1 {v0.s}[0], [x0], x1
+ ld1 {v1.s}[0], [x0], x1
+.ifnc \txfm1,iwht
+ srshr v4.4h, v4.4h, #4
+ srshr v5.4h, v5.4h, #4
+ srshr v6.4h, v6.4h, #4
+ srshr v7.4h, v7.4h, #4
+.endif
+ uaddw v4.8h, v4.8h, v0.8b
+ uaddw v5.8h, v5.8h, v1.8b
+ ld1 {v2.s}[0], [x0], x1
+ ld1 {v3.s}[0], [x0], x1
+ sqxtun v0.8b, v4.8h
+ sqxtun v1.8b, v5.8h
+ sub x0, x0, x1, lsl #2
+
+ uaddw v6.8h, v6.8h, v2.8b
+ uaddw v7.8h, v7.8h, v3.8b
+ st1 {v0.s}[0], [x0], x1
+ sqxtun v2.8b, v6.8h
+ sqxtun v3.8b, v7.8h
+
+ st1 {v1.s}[0], [x0], x1
+ st1 {v2.s}[0], [x0], x1
+ st1 {v3.s}[0], [x0], x1
+
+ ret
+endfunc
+.endm
+
+itxfm_func4x4 idct, idct
+itxfm_func4x4 iadst, idct
+itxfm_func4x4 idct, iadst
+itxfm_func4x4 iadst, iadst
+itxfm_func4x4 iwht, iwht
+
+
+.macro idct8
+ dmbutterfly0 v16, v20, v16, v20, v2, v3, v4, v5, v6, v7 // v16 = t0a, v20 = t1a
+ dmbutterfly v18, v22, v0.h[2], v0.h[3], v2, v3, v4, v5 // v18 = t2a, v22 = t3a
+ dmbutterfly v17, v23, v0.h[4], v0.h[5], v2, v3, v4, v5 // v17 = t4a, v23 = t7a
+ dmbutterfly v21, v19, v0.h[6], v0.h[7], v2, v3, v4, v5 // v21 = t5a, v19 = t6a
+
+ butterfly_8h v24, v25, v16, v22 // v24 = t0, v25 = t3
+ butterfly_8h v28, v29, v17, v21 // v28 = t4, v29 = t5a
+ butterfly_8h v30, v31, v23, v19 // v30 = t7, v31 = t6a
+ butterfly_8h v26, v27, v20, v18 // v26 = t1, v27 = t2
+
+ dmbutterfly0 v31, v29, v31, v29, v2, v3, v4, v5, v6, v7 // v31 = t6, v29 = t5
+
+ butterfly_8h v16, v23, v24, v30 // v16 = out[0], v23 = out[7]
+ butterfly_8h v17, v22, v26, v31 // v17 = out[1], v22 = out[6]
+ butterfly_8h v18, v21, v27, v29 // q13 = out[2], q10 = out[5]
+ butterfly_8h v19, v20, v25, v28 // v17 = out[3], q12 = out[4]
+.endm
+
+.macro iadst8
+ dmbutterfly_l v24, v25, v26, v27, v23, v16, v1.h[1], v1.h[0] // v24,v25 = t1a, v26,v27 = t0a
+ dmbutterfly_l v28, v29, v30, v31, v21, v18, v1.h[3], v1.h[2] // v28,v29 = t3a, v30,v31 = t2a
+ dmbutterfly_l v2, v3, v4, v5, v19, v20, v1.h[5], v1.h[4] // v2,v3 = t5a, v4,v5 = t4a
+ dmbutterfly_l v16, v18, v21, v23, v17, v22, v1.h[7], v1.h[6] // v16,v18 = t7a, v21,v23 = t6a
+
+ dbutterfly_n v4, v5, v26, v27, v4, v5, v6, v7, v26, v27 // v4 = t0, v5 = t4
+ dbutterfly_n v2, v3, v24, v25, v2, v3, v6, v7, v26, v27 // v2 = t1, v3 = t5
+ dbutterfly_n v24, v25, v30, v31, v21, v23, v6, v7, v26, v27 // v24 = t2, v25 = t6
+ dbutterfly_n v30, v31, v28, v29, v16, v18, v6, v7, v26, v27 // v30 = t3, v31 = t7
+
+ butterfly_8h v16, v6, v4, v24 // v16 = out[0], v6 = t2
+ butterfly_8h v23, v7, v2, v30 // v23 = -out[7], v7 = t3
+ neg v23.8h, v23.8h // v23 = out[7]
+
+ dmbutterfly0 v19, v20, v6, v7, v24, v26, v27, v28, v29, v30 // v19 = -out[3], v20 = out[4]
+ neg v19.8h, v19.8h // v19 = out[3]
+
+ dmbutterfly_l v26, v27, v28, v29, v5, v3, v0.h[2], v0.h[3] // v26,v27 = t5a, v28,v29 = t4a
+ dmbutterfly_l v2, v3, v4, v5, v31, v25, v0.h[3], v0.h[2] // v2,v3 = t6a, v4,v5 = t7a
+
+ dbutterfly_n v17, v30, v28, v29, v2, v3, v6, v7, v24, v25 // v17 = -out[1], v30 = t6
+ dbutterfly_n v22, v31, v26, v27, v4, v5, v6, v7, v24, v25 // v22 = out[6], v31 = t7
+ neg v17.8h, v17.8h // v17 = out[1]
+
+ dmbutterfly0 v18, v21, v30, v31, v2, v3, v4, v5, v6, v7 // v18 = out[2], v21 = -out[5]
+ neg v21.8h, v21.8h // v21 = out[5]
+.endm
+
+
+.macro itxfm_func8x8 txfm1, txfm2
+function ff_vp9_\txfm1\()_\txfm2\()_8x8_add_neon, export=1
+ // The iadst also uses a few coefficients from
+ // idct, so those always need to be loaded.
+.ifc \txfm1\()_\txfm2,idct_idct
+ movrel x4, idct_coeffs
+.else
+ movrel x4, iadst8_coeffs
+ ld1 {v1.8h}, [x4], #16
+.endif
+ ld1 {v0.8h}, [x4]
+
+ movi v2.8h, #0
+ movi v3.8h, #0
+ movi v4.8h, #0
+ movi v5.8h, #0
+
+.ifc \txfm1\()_\txfm2,idct_idct
+ cmp w3, #1
+ b.ne 1f
+ // DC-only for idct/idct
+ ld1 {v2.h}[0], [x2]
+ smull v2.4s, v2.4h, v0.h[0]
+ rshrn v2.4h, v2.4s, #14
+ smull v2.4s, v2.4h, v0.h[0]
+ rshrn v2.4h, v2.4s, #14
+ st1 {v3.h}[0], [x2]
+ dup v16.8h, v2.h[0]
+ mov v17.16b, v16.16b
+ mov v18.16b, v16.16b
+ mov v19.16b, v16.16b
+ mov v20.16b, v16.16b
+ mov v21.16b, v16.16b
+ mov v22.16b, v16.16b
+ mov v23.16b, v16.16b
+ b 2f
+.endif
+1:
+ ld1 {v16.8h,v17.8h,v18.8h,v19.8h}, [x2], #64
+ ld1 {v20.8h,v21.8h,v22.8h,v23.8h}, [x2], #64
+ sub x2, x2, #128
+ st1 {v2.8h,v3.8h,v4.8h,v5.8h}, [x2], #64
+ st1 {v2.8h,v3.8h,v4.8h,v5.8h}, [x2], #64
+
+ \txfm1\()8
+
+ // Transpose 8x8 with 16 bit elements
+ transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v24, v25
+
+ \txfm2\()8
+2:
+ mov x3, x0
+ // Add into the destination
+ ld1 {v0.8b}, [x0], x1
+ srshr v16.8h, v16.8h, #5
+ ld1 {v1.8b}, [x0], x1
+ srshr v17.8h, v17.8h, #5
+ ld1 {v2.8b}, [x0], x1
+ srshr v18.8h, v18.8h, #5
+ uaddw v16.8h, v16.8h, v0.8b
+ ld1 {v3.8b}, [x0], x1
+ srshr v19.8h, v19.8h, #5
+ uaddw v17.8h, v17.8h, v1.8b
+ ld1 {v4.8b}, [x0], x1
+ srshr v20.8h, v20.8h, #5
+ uaddw v18.8h, v18.8h, v2.8b
+ sqxtun v0.8b, v16.8h
+ ld1 {v5.8b}, [x0], x1
+ srshr v21.8h, v21.8h, #5
+ uaddw v19.8h, v19.8h, v3.8b
+ sqxtun v1.8b, v17.8h
+ ld1 {v6.8b}, [x0], x1
+ srshr v22.8h, v22.8h, #5
+ uaddw v20.8h, v20.8h, v4.8b
+ sqxtun v2.8b, v18.8h
+ ld1 {v7.8b}, [x0], x1
+ srshr v23.8h, v23.8h, #5
+ uaddw v21.8h, v21.8h, v5.8b
+ sqxtun v3.8b, v19.8h
+
+ st1 {v0.8b}, [x3], x1
+ uaddw v22.8h, v22.8h, v6.8b
+ st1 {v1.8b}, [x3], x1
+ sqxtun v4.8b, v20.8h
+ st1 {v2.8b}, [x3], x1
+ uaddw v23.8h, v23.8h, v7.8b
+ st1 {v3.8b}, [x3], x1
+ sqxtun v5.8b, v21.8h
+ st1 {v4.8b}, [x3], x1
+ sqxtun v6.8b, v22.8h
+ st1 {v5.8b}, [x3], x1
+ sqxtun v7.8b, v23.8h
+
+ st1 {v6.8b}, [x3], x1
+ st1 {v7.8b}, [x3], x1
+
+ ret
+endfunc
+.endm
+
+itxfm_func8x8 idct, idct
+itxfm_func8x8 iadst, idct
+itxfm_func8x8 idct, iadst
+itxfm_func8x8 iadst, iadst
+
+
+function idct16x16_dc_add_neon
+ movrel x4, idct_coeffs
+ ld1 {v0.4h}, [x4]
+
+ movi v1.4h, #0
+
+ ld1 {v2.h}[0], [x2]
+ smull v2.4s, v2.4h, v0.h[0]
+ rshrn v2.4h, v2.4s, #14
+ smull v2.4s, v2.4h, v0.h[0]
+ rshrn v2.4h, v2.4s, #14
+ dup v2.8h, v2.h[0]
+ st1 {v1.h}[0], [x2]
+
+ srshr v2.8h, v2.8h, #6
+
+ mov x3, x0
+ mov x4, #16
+1:
+ // Loop to add the constant from v2 into all 16x16 outputs
+ subs x4, x4, #2
+ ld1 {v3.16b}, [x0], x1
+ ld1 {v4.16b}, [x0], x1
+ uaddw v16.8h, v2.8h, v3.8b
+ uaddw2 v17.8h, v2.8h, v3.16b
+ uaddw v18.8h, v2.8h, v4.8b
+ uaddw2 v19.8h, v2.8h, v4.16b
+ sqxtun v3.8b, v16.8h
+ sqxtun2 v3.16b, v17.8h
+ sqxtun v4.8b, v18.8h
+ sqxtun2 v4.16b, v19.8h
+ st1 {v3.16b}, [x3], x1
+ st1 {v4.16b}, [x3], x1
+ b.ne 1b
+
+ ret
+endfunc
+
+.macro idct16_end
+ butterfly_8h v18, v7, v4, v7 // v18 = t0a, v7 = t7a
+ butterfly_8h v19, v22, v5, v22 // v19 = t1a, v22 = t6
+ butterfly_8h v4, v26, v20, v26 // v4 = t2a, v26 = t5
+ butterfly_8h v5, v6, v28, v6 // v5 = t3a, v6 = t4
+ butterfly_8h v20, v28, v16, v24 // v20 = t8a, v28 = t11a
+ butterfly_8h v24, v21, v23, v21 // v24 = t9, v21 = t10
+ butterfly_8h v23, v27, v25, v27 // v23 = t14, v27 = t13
+ butterfly_8h v25, v29, v29, v17 // v25 = t15a, v29 = t12a
+
+ dmbutterfly0 v2, v3, v27, v21, v2, v3, v16, v17, v30, v31 // v2 = t13a, v3 = t10a
+ dmbutterfly0 v28, v27, v29, v28, v21, v29, v16, v17, v30, v31 // v28 = t12, v27 = t11
+
+ butterfly_8h v16, v31, v18, v25 // v16 = out[0], v31 = out[15]
+ butterfly_8h v17, v30, v19, v23 // v17 = out[1], v30 = out[14]
+ butterfly_8h_r v25, v22, v22, v24 // v25 = out[9], v22 = out[6]
+ butterfly_8h v23, v24, v7, v20 // v23 = out[7], v24 = out[8]
+ butterfly_8h v18, v29, v4, v2 // v18 = out[2], v29 = out[13]
+ butterfly_8h v19, v28, v5, v28 // v19 = out[3], v28 = out[12]
+ butterfly_8h v20, v27, v6, v27 // v20 = out[4], v27 = out[11]
+ butterfly_8h v21, v26, v26, v3 // v21 = out[5], v26 = out[10]
+ ret
+.endm
+
+function idct16
+ dmbutterfly0 v16, v24, v16, v24, v2, v3, v4, v5, v6, v7 // v16 = t0a, v24 = t1a
+ dmbutterfly v20, v28, v0.h[2], v0.h[3], v2, v3, v4, v5 // v20 = t2a, v28 = t3a
+ dmbutterfly v18, v30, v0.h[4], v0.h[5], v2, v3, v4, v5 // v18 = t4a, v30 = t7a
+ dmbutterfly v26, v22, v0.h[6], v0.h[7], v2, v3, v4, v5 // v26 = t5a, v22 = t6a
+ dmbutterfly v17, v31, v1.h[0], v1.h[1], v2, v3, v4, v5 // v17 = t8a, v31 = t15a
+ dmbutterfly v25, v23, v1.h[2], v1.h[3], v2, v3, v4, v5 // v25 = t9a, v23 = t14a
+ dmbutterfly v21, v27, v1.h[4], v1.h[5], v2, v3, v4, v5 // v21 = t10a, v27 = t13a
+ dmbutterfly v29, v19, v1.h[6], v1.h[7], v2, v3, v4, v5 // v29 = t11a, v19 = t12a
+
+ butterfly_8h v4, v28, v16, v28 // v4 = t0, v28 = t3
+ butterfly_8h v5, v20, v24, v20 // v5 = t1, v20 = t2
+ butterfly_8h v6, v26, v18, v26 // v6 = t4, v26 = t5
+ butterfly_8h v7, v22, v30, v22 // v7 = t7, v22 = t6
+ butterfly_8h v16, v25, v17, v25 // v16 = t8, v25 = t9
+ butterfly_8h v24, v21, v29, v21 // v24 = t11, v21 = t10
+ butterfly_8h v17, v27, v19, v27 // v17 = t12, v27 = t13
+ butterfly_8h v29, v23, v31, v23 // v29 = t15, v23 = t14
+
+ dmbutterfly0 v22, v26, v22, v26, v2, v3, v18, v19, v30, v31 // v22 = t6a, v26 = t5a
+ dmbutterfly v23, v25, v0.h[2], v0.h[3], v18, v19, v30, v31 // v23 = t9a, v25 = t14a
+ dmbutterfly v27, v21, v0.h[2], v0.h[3], v18, v19, v30, v31, neg=1 // v27 = t13a, v21 = t10a
+ idct16_end
+endfunc
+
+function idct16_half
+ dmbutterfly0_h v16, v24, v16, v24, v2, v3, v4, v5, v6, v7 // v16 = t0a, v24 = t1a
+ dmbutterfly_h1 v20, v28, v0.h[2], v0.h[3], v2, v3, v4, v5 // v20 = t2a, v28 = t3a
+ dmbutterfly_h1 v18, v30, v0.h[4], v0.h[5], v2, v3, v4, v5 // v18 = t4a, v30 = t7a
+ dmbutterfly_h2 v26, v22, v0.h[6], v0.h[7], v2, v3, v4, v5 // v26 = t5a, v22 = t6a
+ dmbutterfly_h1 v17, v31, v1.h[0], v1.h[1], v2, v3, v4, v5 // v17 = t8a, v31 = t15a
+ dmbutterfly_h2 v25, v23, v1.h[2], v1.h[3], v2, v3, v4, v5 // v25 = t9a, v23 = t14a
+ dmbutterfly_h1 v21, v27, v1.h[4], v1.h[5], v2, v3, v4, v5 // v21 = t10a, v27 = t13a
+ dmbutterfly_h2 v29, v19, v1.h[6], v1.h[7], v2, v3, v4, v5 // v29 = t11a, v19 = t12a
+
+ butterfly_8h v4, v28, v16, v28 // v4 = t0, v28 = t3
+ butterfly_8h v5, v20, v24, v20 // v5 = t1, v20 = t2
+ butterfly_8h v6, v26, v18, v26 // v6 = t4, v26 = t5
+ butterfly_8h v7, v22, v30, v22 // v7 = t7, v22 = t6
+ butterfly_8h v16, v25, v17, v25 // v16 = t8, v25 = t9
+ butterfly_8h v24, v21, v29, v21 // v24 = t11, v21 = t10
+ butterfly_8h v17, v27, v19, v27 // v17 = t12, v27 = t13
+ butterfly_8h v29, v23, v31, v23 // v29 = t15, v23 = t14
+
+ dmbutterfly0 v22, v26, v22, v26, v2, v3, v18, v19, v30, v31 // v22 = t6a, v26 = t5a
+ dmbutterfly v23, v25, v0.h[2], v0.h[3], v18, v19, v30, v31 // v23 = t9a, v25 = t14a
+ dmbutterfly v27, v21, v0.h[2], v0.h[3], v18, v19, v30, v31, neg=1 // v27 = t13a, v21 = t10a
+ idct16_end
+endfunc
+
+function idct16_quarter
+ dsmull_h v24, v25, v19, v1.h[7]
+ dsmull_h v4, v5, v17, v1.h[0]
+ dsmull_h v7, v6, v18, v0.h[5]
+ dsmull_h v30, v31, v18, v0.h[4]
+ neg v24.4s, v24.4s
+ neg v25.4s, v25.4s
+ dsmull_h v29, v28, v17, v1.h[1]
+ dsmull_h v26, v27, v19, v1.h[6]
+ dsmull_h v22, v23, v16, v0.h[0]
+ drshrn_h v24, v24, v25, #14
+ drshrn_h v16, v4, v5, #14
+ drshrn_h v7, v7, v6, #14
+ drshrn_h v6, v30, v31, #14
+ drshrn_h v29, v29, v28, #14
+ drshrn_h v17, v26, v27, #14
+ drshrn_h v28, v22, v23, #14
+
+ dmbutterfly_l v20, v21, v22, v23, v17, v24, v0.h[2], v0.h[3]
+ dmbutterfly_l v18, v19, v30, v31, v29, v16, v0.h[2], v0.h[3]
+ neg v22.4s, v22.4s
+ neg v23.4s, v23.4s
+ drshrn_h v27, v20, v21, #14
+ drshrn_h v21, v22, v23, #14
+ drshrn_h v23, v18, v19, #14
+ drshrn_h v25, v30, v31, #14
+ mov v4.16b, v28.16b
+ mov v5.16b, v28.16b
+ dmbutterfly0 v22, v26, v7, v6, v18, v19, v30, v31
+ mov v20.16b, v28.16b
+ idct16_end
+endfunc
+
+function iadst16
+ ld1 {v0.8h,v1.8h}, [x11]
+
+ dmbutterfly_l v6, v7, v4, v5, v31, v16, v0.h[1], v0.h[0] // v6,v7 = t1, v4,v5 = t0
+ dmbutterfly_l v10, v11, v8, v9, v23, v24, v0.h[5], v0.h[4] // v10,v11 = t9, v8,v9 = t8
+ dbutterfly_n v31, v24, v6, v7, v10, v11, v12, v13, v10, v11 // v31 = t1a, v24 = t9a
+ dmbutterfly_l v14, v15, v12, v13, v29, v18, v0.h[3], v0.h[2] // v14,v15 = t3, v12,v13 = t2
+ dbutterfly_n v16, v23, v4, v5, v8, v9, v6, v7, v8, v9 // v16 = t0a, v23 = t8a
+
+ dmbutterfly_l v6, v7, v4, v5, v21, v26, v0.h[7], v0.h[6] // v6,v7 = t11, v4,v5 = t10
+ dbutterfly_n v29, v26, v14, v15, v6, v7, v8, v9, v6, v7 // v29 = t3a, v26 = t11a
+ dmbutterfly_l v10, v11, v8, v9, v27, v20, v1.h[1], v1.h[0] // v10,v11 = t5, v8,v9 = t4
+ dbutterfly_n v18, v21, v12, v13, v4, v5, v6, v7, v4, v5 // v18 = t2a, v21 = t10a
+
+ dmbutterfly_l v14, v15, v12, v13, v19, v28, v1.h[5], v1.h[4] // v14,v15 = t13, v12,v13 = t12
+ dbutterfly_n v20, v28, v10, v11, v14, v15, v4, v5, v14, v15 // v20 = t5a, v28 = t13a
+ dmbutterfly_l v6, v7, v4, v5, v25, v22, v1.h[3], v1.h[2] // v6,v7 = t7, v4,v5 = t6
+ dbutterfly_n v27, v19, v8, v9, v12, v13, v10, v11, v12, v13 // v27 = t4a, v19 = t12a
+
+ dmbutterfly_l v10, v11, v8, v9, v17, v30, v1.h[7], v1.h[6] // v10,v11 = t15, v8,v9 = t14
+ ld1 {v0.8h}, [x10]
+ dbutterfly_n v22, v30, v6, v7, v10, v11, v12, v13, v10, v11 // v22 = t7a, v30 = t15a
+ dmbutterfly_l v14, v15, v12, v13, v23, v24, v0.h[4], v0.h[5] // v14,v15 = t9, v12,v13 = t8
+ dbutterfly_n v25, v17, v4, v5, v8, v9, v6, v7, v8, v9 // v25 = t6a, v17 = t14a
+
+ dmbutterfly_l v4, v5, v6, v7, v28, v19, v0.h[5], v0.h[4] // v4,v5 = t12, v6,v7 = t13
+ dbutterfly_n v23, v19, v12, v13, v4, v5, v8, v9, v4, v5 // v23 = t8a, v19 = t12a
+ dmbutterfly_l v10, v11, v8, v9, v21, v26, v0.h[6], v0.h[7] // v10,v11 = t11, v8,v9 = t10
+ butterfly_8h_r v4, v27, v16, v27 // v4 = t4, v27 = t0
+ dbutterfly_n v24, v28, v14, v15, v6, v7, v12, v13, v6, v7 // v24 = t9a, v28 = t13a
+
+ dmbutterfly_l v12, v13, v14, v15, v30, v17, v0.h[7], v0.h[6] // v12,v13 = t14, v14,v15 = t15
+ butterfly_8h_r v5, v20, v31, v20 // v5 = t5, v20 = t1
+ dbutterfly_n v21, v17, v8, v9, v12, v13, v6, v7, v12, v13 // v21 = t10a, v17 = t14a
+ dbutterfly_n v26, v30, v10, v11, v14, v15, v8, v9, v14, v15 // v26 = t11a, v30 = t15a
+
+ butterfly_8h_r v6, v25, v18, v25 // v6 = t6, v25 = t2
+ butterfly_8h_r v7, v22, v29, v22 // v7 = t7, v22 = t3
+
+ dmbutterfly_l v10, v11, v8, v9, v19, v28, v0.h[2], v0.h[3] // v10,v11 = t13, v8,v9 = t12
+ dmbutterfly_l v12, v13, v14, v15, v30, v17, v0.h[3], v0.h[2] // v12,v13 = t14, v14,v15 = t15
+
+ dbutterfly_n v18, v30, v8, v9, v12, v13, v16, v17, v12, v13 // v18 = out[2], v30 = t14a
+ dbutterfly_n v29, v17, v10, v11, v14, v15, v12, v13, v14, v15 // v29 = -out[13], v17 = t15a
+ neg v29.8h, v29.8h // v29 = out[13]
+
+ dmbutterfly_l v10, v11, v8, v9, v4, v5, v0.h[2], v0.h[3] // v10,v11 = t5a, v8,v9 = t4a
+ dmbutterfly_l v12, v13, v14, v15, v7, v6, v0.h[3], v0.h[2] // v12,v13 = t6a, v14,v15 = t7a
+
+ butterfly_8h v2, v6, v27, v25 // v2 = out[0], v6 = t2a
+ butterfly_8h v3, v7, v23, v21 // v3 =-out[1], v7 = t10
+
+ dbutterfly_n v19, v31, v8, v9, v12, v13, v4, v5, v8, v9 // v19 = -out[3], v31 = t6
+ neg v19.8h, v19.8h // v19 = out[3]
+ dbutterfly_n v28, v16, v10, v11, v14, v15, v4, v5, v10, v11 // v28 = out[12], v16 = t7
+
+ butterfly_8h v5, v8, v20, v22 // v5 =-out[15],v8 = t3a
+ butterfly_8h v4, v9, v24, v26 // v4 = out[14],v9 = t11
+
+ dmbutterfly0 v23, v24, v6, v8, v10, v11, v12, v13, v14, v15, 1 // v23 = out[7], v24 = out[8]
+ dmbutterfly0 v21, v26, v30, v17, v10, v11, v12, v13, v14, v15, 1 // v21 = out[5], v26 = out[10]
+ dmbutterfly0 v20, v27, v16, v31, v10, v11, v12, v13, v14, v15 // v20 = out[4], v27 = out[11]
+ dmbutterfly0 v22, v25, v9, v7, v10, v11, v12, v13, v14, v15 // v22 = out[6], v25 = out[9]
+
+ neg v31.8h, v5.8h // v31 = out[15]
+ neg v17.8h, v3.8h // v17 = out[1]
+
+ mov v16.16b, v2.16b
+ mov v30.16b, v4.16b
+ ret
+endfunc
+
+// Helper macros; we can't use these expressions directly within
+// e.g. .irp due to the extra concatenation \(). Therefore wrap
+// them in macros to allow using .irp below.
+.macro load i, src, inc
+ ld1 {v\i\().8h}, [\src], \inc
+.endm
+.macro store i, dst, inc
+ st1 {v\i\().8h}, [\dst], \inc
+.endm
+.macro movi_v i, size, imm
+ movi v\i\()\size, \imm
+.endm
+.macro load_clear i, src, inc
+ ld1 {v\i\().8h}, [\src]
+ st1 {v2.8h}, [\src], \inc
+.endm
+
+.macro load_add_store coef0, coef1, coef2, coef3, coef4, coef5, coef6, coef7, tmp1, tmp2
+ srshr \coef0, \coef0, #6
+ ld1 {v2.8b}, [x0], x1
+ srshr \coef1, \coef1, #6
+ ld1 {v3.8b}, [x3], x1
+ srshr \coef2, \coef2, #6
+ ld1 {v4.8b}, [x0], x1
+ srshr \coef3, \coef3, #6
+ uaddw \coef0, \coef0, v2.8b
+ ld1 {v5.8b}, [x3], x1
+ uaddw \coef1, \coef1, v3.8b
+ srshr \coef4, \coef4, #6
+ ld1 {v6.8b}, [x0], x1
+ srshr \coef5, \coef5, #6
+ ld1 {v7.8b}, [x3], x1
+ sqxtun v2.8b, \coef0
+ srshr \coef6, \coef6, #6
+ sqxtun v3.8b, \coef1
+ srshr \coef7, \coef7, #6
+ uaddw \coef2, \coef2, v4.8b
+ ld1 {\tmp1}, [x0], x1
+ uaddw \coef3, \coef3, v5.8b
+ ld1 {\tmp2}, [x3], x1
+ sqxtun v4.8b, \coef2
+ sub x0, x0, x1, lsl #2
+ sub x3, x3, x1, lsl #2
+ sqxtun v5.8b, \coef3
+ uaddw \coef4, \coef4, v6.8b
+ st1 {v2.8b}, [x0], x1
+ uaddw \coef5, \coef5, v7.8b
+ st1 {v3.8b}, [x3], x1
+ sqxtun v6.8b, \coef4
+ st1 {v4.8b}, [x0], x1
+ sqxtun v7.8b, \coef5
+ st1 {v5.8b}, [x3], x1
+ uaddw \coef6, \coef6, \tmp1
+ st1 {v6.8b}, [x0], x1
+ uaddw \coef7, \coef7, \tmp2
+ st1 {v7.8b}, [x3], x1
+ sqxtun \tmp1, \coef6
+ sqxtun \tmp2, \coef7
+ st1 {\tmp1}, [x0], x1
+ st1 {\tmp2}, [x3], x1
+.endm
+
+// Read a vertical 8x16 slice out of a 16x16 matrix, do a transform on it,
+// transpose into a horizontal 16x8 slice and store.
+// x0 = dst (temp buffer)
+// x1 = slice offset
+// x2 = src
+// x9 = input stride
+.macro itxfm16_1d_funcs txfm
+function \txfm\()16_1d_8x16_pass1_neon
+ mov x14, x30
+
+ movi v2.8h, #0
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load_clear \i, x2, x9
+.endr
+
+ bl \txfm\()16
+
+ // Do two 8x8 transposes. Originally, v16-v31 contain the
+ // 16 rows. Afterwards, v16-v23 and v24-v31 contain the two
+ // transposed 8x8 blocks.
+ transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v2, v3
+ transpose_8x8H v24, v25, v26, v27, v28, v29, v30, v31, v2, v3
+
+ // Store the transposed 8x8 blocks horizontally.
+ cmp x1, #8
+ b.eq 1f
+.irp i, 16, 24, 17, 25, 18, 26, 19, 27, 20, 28, 21, 29, 22, 30, 23, 31
+ store \i, x0, #16
+.endr
+ ret x14
+1:
+ // Special case: For the last input column (x1 == 8),
+ // which would be stored as the last row in the temp buffer,
+ // don't store the first 8x8 block, but keep it in registers
+ // for the first slice of the second pass (where it is the
+ // last 8x8 block).
+.irp i, 24, 25, 26, 27, 28, 29, 30, 31
+ add x0, x0, #16
+ store \i, x0, #16
+.endr
+ mov v24.16b, v16.16b
+ mov v25.16b, v17.16b
+ mov v26.16b, v18.16b
+ mov v27.16b, v19.16b
+ mov v28.16b, v20.16b
+ mov v29.16b, v21.16b
+ mov v30.16b, v22.16b
+ mov v31.16b, v23.16b
+ ret x14
+endfunc
+
+// Read a vertical 8x16 slice out of a 16x16 matrix, do a transform on it,
+// load the destination pixels (from a similar 8x16 slice), add and store back.
+// x0 = dst
+// x1 = dst stride
+// x2 = src (temp buffer)
+// x3 = slice offset
+// x9 = temp buffer stride
+function \txfm\()16_1d_8x16_pass2_neon
+ mov x14, x30
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load \i, x2, x9
+.endr
+ cbz x3, 1f
+.irp i, 24, 25, 26, 27, 28, 29, 30, 31
+ load \i, x2, x9
+.endr
+1:
+
+ add x3, x0, x1
+ lsl x1, x1, #1
+ bl \txfm\()16
+
+ load_add_store v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v16.8b, v17.8b
+ load_add_store v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h, v16.8b, v17.8b
+
+ ret x14
+endfunc
+.endm
+
+itxfm16_1d_funcs idct
+itxfm16_1d_funcs iadst
+
+.macro itxfm_func16x16 txfm1, txfm2
+function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_neon, export=1
+.ifc \txfm1\()_\txfm2,idct_idct
+ cmp w3, #1
+ b.eq idct16x16_dc_add_neon
+.endif
+ mov x15, x30
+ // iadst16 requires clobbering v8-v15, but idct16 doesn't need to.
+.ifnc \txfm1\()_\txfm2,idct_idct
+ stp d8, d9, [sp, #-0x40]!
+ stp d14, d15, [sp, #0x30]
+ stp d12, d13, [sp, #0x20]
+ stp d10, d11, [sp, #0x10]
+.endif
+
+ sub sp, sp, #512
+
+ mov x4, x0
+ mov x5, x1
+ mov x6, x2
+
+ movrel x10, idct_coeffs
+.ifnc \txfm1\()_\txfm2,idct_idct
+ movrel x11, iadst16_coeffs
+.endif
+.ifc \txfm1,idct
+ ld1 {v0.8h,v1.8h}, [x10]
+.endif
+ mov x9, #32
+
+.ifc \txfm1\()_\txfm2,idct_idct
+ cmp w3, #10
+ b.le idct16x16_quarter_add_neon
+ cmp w3, #38
+ b.le idct16x16_half_add_neon
+.endif
+
+.irp i, 0, 8
+ add x0, sp, #(\i*32)
+.ifc \txfm1\()_\txfm2,idct_idct
+.if \i == 8
+ cmp w3, #38
+ b.le 1f
+.endif
+.endif
+ mov x1, #\i
+ add x2, x6, #(\i*2)
+ bl \txfm1\()16_1d_8x16_pass1_neon
+.endr
+.ifc \txfm1\()_\txfm2,iadst_idct
+ ld1 {v0.8h,v1.8h}, [x10]
+.endif
+
+.ifc \txfm1\()_\txfm2,idct_idct
+ b 3f
+1:
+ // Set v24-v31 to zero, for the in-register passthrough of
+ // coefficients to pass 2. Since we only do two slices, this can
+ // only ever happen for the second slice. So we only need to store
+ // zeros to the temp buffer for the second half of the buffer.
+ // Move x0 to the second half, and use x9 == 32 as increment.
+ add x0, x0, #16
+.irp i, 24, 25, 26, 27, 28, 29, 30, 31
+ movi_v \i, .16b, #0
+ st1 {v24.8h}, [x0], x9
+.endr
+3:
+.endif
+
+.irp i, 0, 8
+ add x0, x4, #(\i)
+ mov x1, x5
+ add x2, sp, #(\i*2)
+ mov x3, #\i
+ bl \txfm2\()16_1d_8x16_pass2_neon
+.endr
+
+ add sp, sp, #512
+.ifnc \txfm1\()_\txfm2,idct_idct
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], #0x40
+.endif
+ ret x15
+endfunc
+.endm
+
+itxfm_func16x16 idct, idct
+itxfm_func16x16 iadst, idct
+itxfm_func16x16 idct, iadst
+itxfm_func16x16 iadst, iadst
+
+function idct16_1d_8x16_pass1_quarter_neon
+ mov x14, x30
+ movi v2.8h, #0
+.irp i, 16, 17, 18, 19
+ load_clear \i, x2, x9
+.endr
+
+ bl idct16_quarter
+
+ // Do two 8x8 transposes. Originally, v16-v31 contain the
+ // 16 rows. Afterwards, v16-v23 and v24-v31 contain the two
+ // transposed 8x8 blocks.
+ transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v2, v3
+ transpose_8x8H v24, v25, v26, v27, v28, v29, v30, v31, v2, v3
+
+ // Store the transposed 8x8 blocks horizontally.
+ // The first 8x8 block is kept in registers for the second pass,
+ // store the rest in the temp buffer.
+ // Since only a 4x4 part of the input was nonzero, this means that
+ // only 4 rows are nonzero after transposing, and the second pass
+ // only reads the topmost 4 rows. Therefore only store the topmost
+ // 4 rows.
+ add x0, x0, #16
+.irp i, 24, 25, 26, 27
+ store \i, x0, x9
+.endr
+ ret x14
+endfunc
+
+function idct16_1d_8x16_pass2_quarter_neon
+ mov x14, x30
+ cbz x3, 1f
+.irp i, 16, 17, 18, 19
+ load \i, x2, x9
+.endr
+1:
+
+ add x3, x0, x1
+ lsl x1, x1, #1
+ bl idct16_quarter
+
+ load_add_store v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v16.8b, v17.8b
+ load_add_store v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h, v16.8b, v17.8b
+
+ ret x14
+endfunc
+
+function idct16_1d_8x16_pass1_half_neon
+ mov x14, x30
+ movi v2.8h, #0
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load_clear \i, x2, x9
+.endr
+
+ bl idct16_half
+
+ // Do two 8x8 transposes. Originally, v16-v31 contain the
+ // 16 rows. Afterwards, v16-v23 and v24-v31 contain the two
+ // transposed 8x8 blocks.
+ transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v2, v3
+ transpose_8x8H v24, v25, v26, v27, v28, v29, v30, v31, v2, v3
+
+ // Store the transposed 8x8 blocks horizontally.
+ // The first 8x8 block is kept in registers for the second pass,
+ // store the rest in the temp buffer.
+ add x0, x0, #16
+.irp i, 24, 25, 26, 27, 28, 29, 30, 31
+ store \i, x0, x9
+.endr
+ ret x14
+endfunc
+
+function idct16_1d_8x16_pass2_half_neon
+ mov x14, x30
+ cbz x3, 1f
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load \i, x2, x9
+.endr
+1:
+
+ add x3, x0, x1
+ lsl x1, x1, #1
+ bl idct16_half
+
+ load_add_store v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v16.8b, v17.8b
+ load_add_store v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h, v16.8b, v17.8b
+
+ ret x14
+endfunc
+
+.macro idct16_partial size
+function idct16x16_\size\()_add_neon
+ add x0, sp, #(0*32)
+ add x2, x6, #(0*2)
+ bl idct16_1d_8x16_pass1_\size\()_neon
+.irp i, 0, 8
+ add x0, x4, #(\i)
+ mov x1, x5
+ add x2, sp, #(\i*2)
+ mov x3, #\i
+ bl idct16_1d_8x16_pass2_\size\()_neon
+.endr
+
+ add sp, sp, #512
+ ret x15
+endfunc
+.endm
+
+idct16_partial quarter
+idct16_partial half
+
+function idct32x32_dc_add_neon
+ movrel x4, idct_coeffs
+ ld1 {v0.4h}, [x4]
+
+ movi v1.4h, #0
+
+ ld1 {v2.h}[0], [x2]
+ smull v2.4s, v2.4h, v0.h[0]
+ rshrn v2.4h, v2.4s, #14
+ smull v2.4s, v2.4h, v0.h[0]
+ rshrn v2.4h, v2.4s, #14
+ dup v2.8h, v2.h[0]
+ st1 {v1.h}[0], [x2]
+
+ srshr v0.8h, v2.8h, #6
+
+ mov x3, x0
+ mov x4, #32
+1:
+ // Loop to add the constant v0 into all 32x32 outputs
+ subs x4, x4, #2
+ ld1 {v1.16b,v2.16b}, [x0], x1
+ uaddw v16.8h, v0.8h, v1.8b
+ uaddw2 v17.8h, v0.8h, v1.16b
+ ld1 {v3.16b,v4.16b}, [x0], x1
+ uaddw v18.8h, v0.8h, v2.8b
+ uaddw2 v19.8h, v0.8h, v2.16b
+ uaddw v20.8h, v0.8h, v3.8b
+ uaddw2 v21.8h, v0.8h, v3.16b
+ uaddw v22.8h, v0.8h, v4.8b
+ uaddw2 v23.8h, v0.8h, v4.16b
+ sqxtun v1.8b, v16.8h
+ sqxtun2 v1.16b, v17.8h
+ sqxtun v2.8b, v18.8h
+ sqxtun2 v2.16b, v19.8h
+ sqxtun v3.8b, v20.8h
+ sqxtun2 v3.16b, v21.8h
+ st1 {v1.16b,v2.16b}, [x3], x1
+ sqxtun v4.8b, v22.8h
+ sqxtun2 v4.16b, v23.8h
+ st1 {v3.16b,v4.16b}, [x3], x1
+ b.ne 1b
+
+ ret
+endfunc
+
+.macro idct32_end
+ butterfly_8h v16, v5, v4, v5 // v16 = t16a, v5 = t19a
+ butterfly_8h v17, v20, v23, v20 // v17 = t17, v20 = t18
+ butterfly_8h v18, v6, v7, v6 // v18 = t23a, v6 = t20a
+ butterfly_8h v19, v21, v22, v21 // v19 = t22, v21 = t21
+ butterfly_8h v4, v28, v28, v30 // v4 = t24a, v28 = t27a
+ butterfly_8h v23, v26, v25, v26 // v23 = t25, v26 = t26
+ butterfly_8h v7, v3, v29, v31 // v7 = t31a, v3 = t28a
+ butterfly_8h v22, v27, v24, v27 // v22 = t30, v27 = t29
+
+ dmbutterfly v27, v20, v0.h[2], v0.h[3], v24, v25, v30, v31 // v27 = t18a, v20 = t29a
+ dmbutterfly v3, v5, v0.h[2], v0.h[3], v24, v25, v30, v31 // v3 = t19, v5 = t28
+ dmbutterfly v28, v6, v0.h[2], v0.h[3], v24, v25, v30, v31, neg=1 // v28 = t27, v6 = t20
+ dmbutterfly v26, v21, v0.h[2], v0.h[3], v24, v25, v30, v31, neg=1 // v26 = t26a, v21 = t21a
+
+ butterfly_8h v31, v24, v7, v4 // v31 = t31, v24 = t24
+ butterfly_8h v30, v25, v22, v23 // v30 = t30a, v25 = t25a
+ butterfly_8h_r v23, v16, v16, v18 // v23 = t23, v16 = t16
+ butterfly_8h_r v22, v17, v17, v19 // v22 = t22a, v17 = t17a
+ butterfly_8h v18, v21, v27, v21 // v18 = t18, v21 = t21
+ butterfly_8h_r v27, v28, v5, v28 // v27 = t27a, v28 = t28a
+ butterfly_8h v29, v26, v20, v26 // v29 = t29, v26 = t26
+ butterfly_8h v19, v20, v3, v6 // v19 = t19a, v20 = t20
+
+ dmbutterfly0 v27, v20, v27, v20, v2, v3, v4, v5, v6, v7 // v27 = t27, v20 = t20
+ dmbutterfly0 v26, v21, v26, v21, v2, v3, v4, v5, v6, v7 // v26 = t26a, v21 = t21a
+ dmbutterfly0 v25, v22, v25, v22, v2, v3, v4, v5, v6, v7 // v25 = t25, v22 = t22
+ dmbutterfly0 v24, v23, v24, v23, v2, v3, v4, v5, v6, v7 // v24 = t24a, v23 = t23a
+ ret
+.endm
+
+function idct32_odd
+ dmbutterfly v16, v31, v8.h[0], v8.h[1], v4, v5, v6, v7 // v16 = t16a, v31 = t31a
+ dmbutterfly v24, v23, v8.h[2], v8.h[3], v4, v5, v6, v7 // v24 = t17a, v23 = t30a
+ dmbutterfly v20, v27, v8.h[4], v8.h[5], v4, v5, v6, v7 // v20 = t18a, v27 = t29a
+ dmbutterfly v28, v19, v8.h[6], v8.h[7], v4, v5, v6, v7 // v28 = t19a, v19 = t28a
+ dmbutterfly v18, v29, v9.h[0], v9.h[1], v4, v5, v6, v7 // v18 = t20a, v29 = t27a
+ dmbutterfly v26, v21, v9.h[2], v9.h[3], v4, v5, v6, v7 // v26 = t21a, v21 = t26a
+ dmbutterfly v22, v25, v9.h[4], v9.h[5], v4, v5, v6, v7 // v22 = t22a, v25 = t25a
+ dmbutterfly v30, v17, v9.h[6], v9.h[7], v4, v5, v6, v7 // v30 = t23a, v17 = t24a
+
+ butterfly_8h v4, v24, v16, v24 // v4 = t16, v24 = t17
+ butterfly_8h v5, v20, v28, v20 // v5 = t19, v20 = t18
+ butterfly_8h v6, v26, v18, v26 // v6 = t20, v26 = t21
+ butterfly_8h v7, v22, v30, v22 // v7 = t23, v22 = t22
+ butterfly_8h v28, v25, v17, v25 // v28 = t24, v25 = t25
+ butterfly_8h v30, v21, v29, v21 // v30 = t27, v21 = t26
+ butterfly_8h v29, v23, v31, v23 // v29 = t31, v23 = t30
+ butterfly_8h v31, v27, v19, v27 // v31 = t28, v27 = t29
+
+ dmbutterfly v23, v24, v0.h[4], v0.h[5], v16, v17, v18, v19 // v23 = t17a, v24 = t30a
+ dmbutterfly v27, v20, v0.h[4], v0.h[5], v16, v17, v18, v19, neg=1 // v27 = t29a, v20 = t18a
+ dmbutterfly v21, v26, v0.h[6], v0.h[7], v16, v17, v18, v19 // v21 = t21a, v26 = t26a
+ dmbutterfly v25, v22, v0.h[6], v0.h[7], v16, v17, v18, v19, neg=1 // v25 = t25a, v22 = t22a
+ idct32_end
+endfunc
+
+function idct32_odd_half
+ dmbutterfly_h1 v16, v31, v8.h[0], v8.h[1], v4, v5, v6, v7 // v16 = t16a, v31 = t31a
+ dmbutterfly_h2 v24, v23, v8.h[2], v8.h[3], v4, v5, v6, v7 // v24 = t17a, v23 = t30a
+ dmbutterfly_h1 v20, v27, v8.h[4], v8.h[5], v4, v5, v6, v7 // v20 = t18a, v27 = t29a
+ dmbutterfly_h2 v28, v19, v8.h[6], v8.h[7], v4, v5, v6, v7 // v28 = t19a, v19 = t28a
+ dmbutterfly_h1 v18, v29, v9.h[0], v9.h[1], v4, v5, v6, v7 // v18 = t20a, v29 = t27a
+ dmbutterfly_h2 v26, v21, v9.h[2], v9.h[3], v4, v5, v6, v7 // v26 = t21a, v21 = t26a
+ dmbutterfly_h1 v22, v25, v9.h[4], v9.h[5], v4, v5, v6, v7 // v22 = t22a, v25 = t25a
+ dmbutterfly_h2 v30, v17, v9.h[6], v9.h[7], v4, v5, v6, v7 // v30 = t23a, v17 = t24a
+
+ butterfly_8h v4, v24, v16, v24 // v4 = t16, v24 = t17
+ butterfly_8h v5, v20, v28, v20 // v5 = t19, v20 = t18
+ butterfly_8h v6, v26, v18, v26 // v6 = t20, v26 = t21
+ butterfly_8h v7, v22, v30, v22 // v7 = t23, v22 = t22
+ butterfly_8h v28, v25, v17, v25 // v28 = t24, v25 = t25
+ butterfly_8h v30, v21, v29, v21 // v30 = t27, v21 = t26
+ butterfly_8h v29, v23, v31, v23 // v29 = t31, v23 = t30
+ butterfly_8h v31, v27, v19, v27 // v31 = t28, v27 = t29
+
+ dmbutterfly v23, v24, v0.h[4], v0.h[5], v16, v17, v18, v19 // v23 = t17a, v24 = t30a
+ dmbutterfly v27, v20, v0.h[4], v0.h[5], v16, v17, v18, v19, neg=1 // v27 = t29a, v20 = t18a
+ dmbutterfly v21, v26, v0.h[6], v0.h[7], v16, v17, v18, v19 // v21 = t21a, v26 = t26a
+ dmbutterfly v25, v22, v0.h[6], v0.h[7], v16, v17, v18, v19, neg=1 // v25 = t25a, v22 = t22a
+ idct32_end
+endfunc
+
+function idct32_odd_quarter
+ dsmull_h v4, v5, v16, v8.h[0]
+ dsmull_h v28, v29, v19, v8.h[7]
+ dsmull_h v30, v31, v16, v8.h[1]
+ dsmull_h v22, v23, v17, v9.h[6]
+ dsmull_h v7, v6, v17, v9.h[7]
+ dsmull_h v26, v27, v19, v8.h[6]
+ dsmull_h v20, v21, v18, v9.h[0]
+ dsmull_h v24, v25, v18, v9.h[1]
+
+ neg v28.4s, v28.4s
+ neg v29.4s, v29.4s
+ neg v7.4s, v7.4s
+ neg v6.4s, v6.4s
+
+ drshrn_h v4, v4, v5, #14
+ drshrn_h v5, v28, v29, #14
+ drshrn_h v29, v30, v31, #14
+ drshrn_h v28, v22, v23, #14
+ drshrn_h v7, v7, v6, #14
+ drshrn_h v31, v26, v27, #14
+ drshrn_h v6, v20, v21, #14
+ drshrn_h v30, v24, v25, #14
+
+ dmbutterfly_l v16, v17, v18, v19, v29, v4, v0.h[4], v0.h[5]
+ dmbutterfly_l v27, v26, v20, v21, v31, v5, v0.h[4], v0.h[5]
+ drshrn_h v23, v16, v17, #14
+ drshrn_h v24, v18, v19, #14
+ neg v20.4s, v20.4s
+ neg v21.4s, v21.4s
+ drshrn_h v27, v27, v26, #14
+ drshrn_h v20, v20, v21, #14
+ dmbutterfly_l v16, v17, v18, v19, v30, v6, v0.h[6], v0.h[7]
+ drshrn_h v21, v16, v17, #14
+ drshrn_h v26, v18, v19, #14
+ dmbutterfly_l v16, v17, v18, v19, v28, v7, v0.h[6], v0.h[7]
+ drshrn_h v25, v16, v17, #14
+ neg v18.4s, v18.4s
+ neg v19.4s, v19.4s
+ drshrn_h v22, v18, v19, #14
+
+ idct32_end
+endfunc
+
+.macro idct32_funcs suffix
+// Do an 32-point IDCT of a 8x32 slice out of a 32x32 matrix.
+// The 32-point IDCT can be decomposed into two 16-point IDCTs;
+// a normal IDCT16 with every other input component (the even ones, with
+// each output written twice), followed by a separate 16-point IDCT
+// of the odd inputs, added/subtracted onto the outputs of the first idct16.
+// x0 = dst (temp buffer)
+// x1 = unused
+// x2 = src
+// x9 = double input stride
+function idct32_1d_8x32_pass1\suffix\()_neon
+ mov x14, x30
+ movi v2.8h, #0
+
+ // v16 = IN(0), v17 = IN(2) ... v31 = IN(30)
+.ifb \suffix
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load_clear \i, x2, x9
+.endr
+.endif
+.ifc \suffix,_quarter
+.irp i, 16, 17, 18, 19
+ load_clear \i, x2, x9
+.endr
+.endif
+.ifc \suffix,_half
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load_clear \i, x2, x9
+.endr
+.endif
+
+ bl idct16\suffix
+
+ // Do two 8x8 transposes. Originally, v16-v31 contain the
+ // 16 rows. Afterwards, v16-v23 and v24-v31 contain the
+ // two transposed 8x8 blocks.
+ transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v2, v3
+ transpose_8x8H v24, v25, v26, v27, v28, v29, v30, v31, v2, v3
+
+ // Store the registers a, b horizontally, followed by the
+ // same registers b, a mirrored.
+.macro store_rev a, b
+ // There's no rev128 instruction, but we reverse each 64 bit
+ // half, and then flip them using an ext with 8 bytes offset.
+ rev64 v3.8h, \b
+ st1 {\a}, [x0], #16
+ rev64 v2.8h, \a
+ ext v3.16b, v3.16b, v3.16b, #8
+ st1 {\b}, [x0], #16
+ ext v2.16b, v2.16b, v2.16b, #8
+ st1 {v3.8h}, [x0], #16
+ st1 {v2.8h}, [x0], #16
+.endm
+ store_rev v16.8h, v24.8h
+ store_rev v17.8h, v25.8h
+ store_rev v18.8h, v26.8h
+ store_rev v19.8h, v27.8h
+ store_rev v20.8h, v28.8h
+ store_rev v21.8h, v29.8h
+ store_rev v22.8h, v30.8h
+ store_rev v23.8h, v31.8h
+ sub x0, x0, #512
+.purgem store_rev
+
+ // Move x2 back to the start of the input, and move
+ // to the first odd row
+.ifb \suffix
+ sub x2, x2, x9, lsl #4
+.endif
+.ifc \suffix,_quarter
+ sub x2, x2, x9, lsl #2
+.endif
+.ifc \suffix,_half
+ sub x2, x2, x9, lsl #3
+.endif
+ add x2, x2, #64
+
+ movi v2.8h, #0
+ // v16 = IN(1), v17 = IN(3) ... v31 = IN(31)
+.ifb \suffix
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load_clear \i, x2, x9
+.endr
+.endif
+.ifc \suffix,_quarter
+.irp i, 16, 17, 18, 19
+ load_clear \i, x2, x9
+.endr
+.endif
+.ifc \suffix,_half
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load_clear \i, x2, x9
+.endr
+.endif
+
+ bl idct32_odd\suffix
+
+ transpose_8x8H v31, v30, v29, v28, v27, v26, v25, v24, v2, v3
+ transpose_8x8H v23, v22, v21, v20, v19, v18, v17, v16, v2, v3
+
+ // Store the registers a, b horizontally,
+ // adding into the output first, and the mirrored,
+ // subtracted from the output.
+.macro store_rev a, b
+ ld1 {v4.8h}, [x0]
+ rev64 v3.8h, \b
+ add v4.8h, v4.8h, \a
+ rev64 v2.8h, \a
+ st1 {v4.8h}, [x0], #16
+ ext v3.16b, v3.16b, v3.16b, #8
+ ld1 {v5.8h}, [x0]
+ ext v2.16b, v2.16b, v2.16b, #8
+ add v5.8h, v5.8h, \b
+ st1 {v5.8h}, [x0], #16
+ ld1 {v6.8h}, [x0]
+ sub v6.8h, v6.8h, v3.8h
+ st1 {v6.8h}, [x0], #16
+ ld1 {v7.8h}, [x0]
+ sub v7.8h, v7.8h, v2.8h
+ st1 {v7.8h}, [x0], #16
+.endm
+
+ store_rev v31.8h, v23.8h
+ store_rev v30.8h, v22.8h
+ store_rev v29.8h, v21.8h
+ store_rev v28.8h, v20.8h
+ store_rev v27.8h, v19.8h
+ store_rev v26.8h, v18.8h
+ store_rev v25.8h, v17.8h
+ store_rev v24.8h, v16.8h
+.purgem store_rev
+ ret x14
+endfunc
+
+// This is mostly the same as 8x32_pass1, but without the transpose,
+// and use the source as temp buffer between the two idct passes, and
+// add into the destination.
+// x0 = dst
+// x1 = dst stride
+// x2 = src (temp buffer)
+// x7 = negative double temp buffer stride
+// x9 = double temp buffer stride
+function idct32_1d_8x32_pass2\suffix\()_neon
+ mov x14, x30
+ // v16 = IN(0), v17 = IN(2) ... v31 = IN(30)
+.ifb \suffix
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #4
+.endif
+.ifc \suffix,_quarter
+.irp i, 16, 17, 18, 19
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #2
+.endif
+.ifc \suffix,_half
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #3
+.endif
+
+ bl idct16\suffix
+
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ store \i, x2, x9
+.endr
+
+ sub x2, x2, x9, lsl #4
+ add x2, x2, #64
+
+ // v16 = IN(1), v17 = IN(3) ... v31 = IN(31)
+.ifb \suffix
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #4
+.endif
+.ifc \suffix,_quarter
+.irp i, 16, 17, 18, 19
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #2
+.endif
+.ifc \suffix,_half
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23
+ load \i, x2, x9
+.endr
+ sub x2, x2, x9, lsl #3
+.endif
+ sub x2, x2, #64
+
+ bl idct32_odd\suffix
+
+.macro load_acc_store a, b, c, d, neg=0
+.if \neg == 0
+ ld1 {v4.8h}, [x2], x9
+ ld1 {v5.8h}, [x2], x9
+ add v4.8h, v4.8h, \a
+ ld1 {v6.8h}, [x2], x9
+ add v5.8h, v5.8h, \b
+ ld1 {v7.8h}, [x2], x9
+ add v6.8h, v6.8h, \c
+ add v7.8h, v7.8h, \d
+.else
+ ld1 {v4.8h}, [x2], x7
+ ld1 {v5.8h}, [x2], x7
+ sub v4.8h, v4.8h, \a
+ ld1 {v6.8h}, [x2], x7
+ sub v5.8h, v5.8h, \b
+ ld1 {v7.8h}, [x2], x7
+ sub v6.8h, v6.8h, \c
+ sub v7.8h, v7.8h, \d
+.endif
+ ld1 {v10.8b}, [x0], x1
+ ld1 {v11.8b}, [x0], x1
+ srshr v4.8h, v4.8h, #6
+ ld1 {v2.8b}, [x0], x1
+ srshr v5.8h, v5.8h, #6
+ uaddw v4.8h, v4.8h, v10.8b
+ ld1 {v3.8b}, [x0], x1
+ srshr v6.8h, v6.8h, #6
+ uaddw v5.8h, v5.8h, v11.8b
+ srshr v7.8h, v7.8h, #6
+ sub x0, x0, x1, lsl #2
+ uaddw v6.8h, v6.8h, v2.8b
+ sqxtun v4.8b, v4.8h
+ uaddw v7.8h, v7.8h, v3.8b
+ sqxtun v5.8b, v5.8h
+ st1 {v4.8b}, [x0], x1
+ sqxtun v6.8b, v6.8h
+ st1 {v5.8b}, [x0], x1
+ sqxtun v7.8b, v7.8h
+ st1 {v6.8b}, [x0], x1
+ st1 {v7.8b}, [x0], x1
+.endm
+ load_acc_store v31.8h, v30.8h, v29.8h, v28.8h
+ load_acc_store v27.8h, v26.8h, v25.8h, v24.8h
+ load_acc_store v23.8h, v22.8h, v21.8h, v20.8h
+ load_acc_store v19.8h, v18.8h, v17.8h, v16.8h
+ sub x2, x2, x9
+ load_acc_store v16.8h, v17.8h, v18.8h, v19.8h, 1
+ load_acc_store v20.8h, v21.8h, v22.8h, v23.8h, 1
+ load_acc_store v24.8h, v25.8h, v26.8h, v27.8h, 1
+ load_acc_store v28.8h, v29.8h, v30.8h, v31.8h, 1
+.purgem load_acc_store
+ ret x14
+endfunc
+.endm
+
+idct32_funcs
+idct32_funcs _quarter
+idct32_funcs _half
+
+const min_eob_idct_idct_32, align=4
+ .short 0, 34, 135, 336
+endconst
+
+function ff_vp9_idct_idct_32x32_add_neon, export=1
+ cmp w3, #1
+ b.eq idct32x32_dc_add_neon
+
+ movrel x10, idct_coeffs
+
+ mov x15, x30
+
+ stp d8, d9, [sp, #-0x20]!
+ stp d10, d11, [sp, #0x10]
+
+ sub sp, sp, #2048
+
+ mov x4, x0
+ mov x5, x1
+ mov x6, x2
+
+ // Double stride of the input, since we only read every other line
+ mov x9, #128
+ neg x7, x9
+
+ ld1 {v0.8h,v1.8h}, [x10], #32
+ ld1 {v8.8h,v9.8h}, [x10]
+
+ cmp w3, #34
+ b.le idct32x32_quarter_add_neon
+ cmp w3, #135
+ b.le idct32x32_half_add_neon
+
+ movrel x12, min_eob_idct_idct_32, 2
+
+.irp i, 0, 8, 16, 24
+ add x0, sp, #(\i*64)
+.if \i > 0
+ ldrh w1, [x12], #2
+ cmp w3, w1
+ mov x1, #(32 - \i)/4
+ b.le 1f
+.endif
+ add x2, x6, #(\i*2)
+ bl idct32_1d_8x32_pass1_neon
+.endr
+ b 3f
+
+1:
+ // Write zeros to the temp buffer for pass 2
+ movi v16.8h, #0
+ movi v17.8h, #0
+ movi v18.8h, #0
+ movi v19.8h, #0
+2:
+ subs x1, x1, #1
+.rept 4
+ st1 {v16.8h,v17.8h,v18.8h,v19.8h}, [x0], #64
+.endr
+ b.ne 2b
+3:
+.irp i, 0, 8, 16, 24
+ add x0, x4, #(\i)
+ mov x1, x5
+ add x2, sp, #(\i*2)
+ bl idct32_1d_8x32_pass2_neon
+.endr
+
+ add sp, sp, #2048
+
+ ldp d10, d11, [sp, #0x10]
+ ldp d8, d9, [sp], #0x20
+
+ ret x15
+endfunc
+
+.macro idct32_partial size
+function idct32x32_\size\()_add_neon
+ add x0, sp, #(0*64)
+ add x2, x6, #(0*2)
+ bl idct32_1d_8x32_pass1_\size\()_neon
+.ifc \size,half
+ add x0, sp, #(8*64)
+ add x2, x6, #(8*2)
+ bl idct32_1d_8x32_pass1_\size\()_neon
+.endif
+.irp i, 0, 8, 16, 24
+ add x0, x4, #(\i)
+ mov x1, x5
+ add x2, sp, #(\i*2)
+ bl idct32_1d_8x32_pass2_\size\()_neon
+.endr
+
+ add sp, sp, #2048
+
+ ldp d10, d11, [sp, #0x10]
+ ldp d8, d9, [sp], #0x20
+
+ ret x15
+endfunc
+.endm
+
+idct32_partial quarter
+idct32_partial half
diff --git a/media/ffvpx/libavcodec/aarch64/vp9lpf_16bpp_neon.S b/media/ffvpx/libavcodec/aarch64/vp9lpf_16bpp_neon.S
new file mode 100644
index 0000000000..e3e70491c6
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9lpf_16bpp_neon.S
@@ -0,0 +1,861 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+#include "neon.S"
+
+
+// The input to and output from this macro is in the registers v16-v31,
+// and v0-v7 are used as scratch registers.
+// p7 = v16 .. p3 = v20, p0 = v23, q0 = v24, q3 = v27, q7 = v31
+// Depending on the width of the loop filter, we either use v16-v19
+// and v28-v31 as temp registers, or v8-v15.
+.macro loop_filter wd, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8
+ dup v0.8h, w2 // E
+ dup v2.8h, w3 // I
+ dup v3.8h, w4 // H
+
+ uabd v4.8h, v20.8h, v21.8h // abs(p3 - p2)
+ uabd v5.8h, v21.8h, v22.8h // abs(p2 - p1)
+ uabd v6.8h, v22.8h, v23.8h // abs(p1 - p0)
+ uabd v7.8h, v24.8h, v25.8h // abs(q0 - q1)
+ uabd \tmp1\().8h, v25.8h, v26.8h // abs(q1 - q2)
+ uabd \tmp2\().8h, v26.8h, v27.8h // abs(q2 - q3)
+ umax v4.8h, v4.8h, v5.8h
+ umax v5.8h, v6.8h, v7.8h
+ umax \tmp1\().8h, \tmp1\().8h, \tmp2\().8h
+ uabd v6.8h, v23.8h, v24.8h // abs(p0 - q0)
+ umax v4.8h, v4.8h, v5.8h
+ add v6.8h, v6.8h, v6.8h // abs(p0 - q0) * 2
+ uabd v5.8h, v22.8h, v25.8h // abs(p1 - q1)
+ umax v4.8h, v4.8h, \tmp1\().8h // max(abs(p3 - p2), ..., abs(q2 - q3))
+ ushr v5.8h, v5.8h, #1
+ cmhs v4.8h, v2.8h, v4.8h // max(abs()) <= I
+ add v6.8h, v6.8h, v5.8h // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
+ cmhs v6.8h, v0.8h, v6.8h
+ and v4.16b, v4.16b, v6.16b // fm
+
+ // If no pixels need filtering, just exit as soon as possible
+ mov x11, v4.d[0]
+ mov x12, v4.d[1]
+ adds x11, x11, x12
+ b.ne 1f
+ ret x10
+1:
+
+.if \wd >= 8
+ dup v0.8h, w5
+
+ uabd v6.8h, v20.8h, v23.8h // abs(p3 - p0)
+ uabd v2.8h, v21.8h, v23.8h // abs(p2 - p0)
+ uabd v1.8h, v22.8h, v23.8h // abs(p1 - p0)
+ uabd \tmp1\().8h, v25.8h, v24.8h // abs(q1 - q0)
+ uabd \tmp2\().8h, v26.8h, v24.8h // abs(q2 - q0)
+ uabd \tmp3\().8h, v27.8h, v24.8h // abs(q3 - q0)
+ umax v6.8h, v6.8h, v2.8h
+ umax v1.8h, v1.8h, \tmp1\().8h
+ umax \tmp2\().8h, \tmp2\().8h, \tmp3\().8h
+.if \wd == 16
+ uabd v7.8h, v16.8h, v23.8h // abs(p7 - p0)
+ umax v6.8h, v6.8h, v1.8h
+ uabd v2.8h, v17.8h, v23.8h // abs(p6 - p0)
+ umax v6.8h, v6.8h, \tmp2\().8h
+ uabd v1.8h, v18.8h, v23.8h // abs(p5 - p0)
+ cmhs v6.8h, v0.8h, v6.8h // flat8in
+ uabd v8.8h, v19.8h, v23.8h // abs(p4 - p0)
+ and v6.16b, v6.16b, v4.16b // flat8in && fm
+ uabd v9.8h, v28.8h, v24.8h // abs(q4 - q0)
+ bic v4.16b, v4.16b, v6.16b // fm && !flat8in
+ uabd v10.8h, v29.8h, v24.8h // abs(q5 - q0)
+ uabd v11.8h, v30.8h, v24.8h // abs(q6 - q0)
+ uabd v12.8h, v31.8h, v24.8h // abs(q7 - q0)
+
+ umax v7.8h, v7.8h, v2.8h
+ umax v1.8h, v1.8h, v8.8h
+ umax v9.8h, v9.8h, v10.8h
+ umax v11.8h, v11.8h, v12.8h
+ // The rest of the calculation of flat8out is interleaved below
+.else
+ // The rest of the calculation of flat8in is interleaved below
+.endif
+.endif
+
+ // Calculate the normal inner loop filter for 2 or 4 pixels
+ uabd v5.8h, v22.8h, v23.8h // abs(p1 - p0)
+.if \wd == 16
+ umax v7.8h, v7.8h, v1.8h
+ umax v9.8h, v9.8h, v11.8h
+.elseif \wd == 8
+ umax v6.8h, v6.8h, v1.8h
+.endif
+ uabd v1.8h, v25.8h, v24.8h // abs(q1 - q0)
+.if \wd == 16
+ umax v7.8h, v7.8h, v9.8h
+.elseif \wd == 8
+ umax v6.8h, v6.8h, \tmp2\().8h
+.endif
+ dup \tmp2\().8h, w6 // left shift for saturation
+ sub \tmp1\().8h, v22.8h, v25.8h // p1 - q1
+ neg \tmp6\().8h, \tmp2\().8h // negative left shift after saturation
+ umax v5.8h, v5.8h, v1.8h // max(abs(p1 - p0), abs(q1 - q0))
+ sub \tmp3\().8h, v24.8h, v23.8h // q0 - p0
+ movi \tmp5\().8h, #3
+.if \wd == 8
+ cmhs v6.8h, v0.8h, v6.8h // flat8in
+.endif
+ cmhs v5.8h, v3.8h, v5.8h // !hev
+.if \wd == 8
+ and v6.16b, v6.16b, v4.16b // flat8in && fm
+.endif
+ sqshl \tmp1\().8h, \tmp1\().8h, \tmp2\().8h
+.if \wd == 16
+ cmhs v7.8h, v0.8h, v7.8h // flat8out
+.elseif \wd == 8
+ bic v4.16b, v4.16b, v6.16b // fm && !flat8in
+.endif
+ and v5.16b, v5.16b, v4.16b // !hev && fm && !flat8in
+.if \wd == 16
+ and v7.16b, v7.16b, v6.16b // flat8out && flat8in && fm
+.endif
+ sshl \tmp1\().8h, \tmp1\().8h, \tmp6\().8h // av_clip_int2p(p1 - q1, BIT_DEPTH - 1)
+
+ mul \tmp3\().8h, \tmp3\().8h, \tmp5\().8h // 3 * (q0 - p0)
+ bic \tmp1\().16b, \tmp1\().16b, v5.16b // if (!hev) av_clip_int8 = 0
+ movi v2.8h, #4
+ add \tmp3\().8h, \tmp3\().8h, \tmp1\().8h // 3 * (q0 - p0) [+ av_clip_int8(p1 - q1)]
+ movi v3.8h, #3
+ sqshl \tmp1\().8h, \tmp3\().8h, \tmp2\().8h
+ movi \tmp5\().8h, #0
+ sshl \tmp1\().8h, \tmp1\().8h, \tmp6\().8h // av_clip_int2p(3 * (q0 - p0) [+ av_clip_int2p(p1 - q1)], BIT_DEPTH - 1) = f
+ dup \tmp6\().8h, w7 // max pixel value
+.if \wd == 16
+ bic v6.16b, v6.16b, v7.16b // fm && flat8in && !flat8out
+.endif
+
+ ushr \tmp2\().8h, \tmp6\().8h, #1 // (1 << (BIT_DEPTH - 1)) - 1
+
+ add \tmp3\().8h, \tmp1\().8h, v2.8h // f + 4
+ add \tmp4\().8h, \tmp1\().8h, v3.8h // f + 3
+ smin \tmp3\().8h, \tmp3\().8h, \tmp2\().8h // FFMIN(f + 4, (1 << (BIT_DEPTH - 1)) - 1)
+ smin \tmp4\().8h, \tmp4\().8h, \tmp2\().8h // FFMIN(f + 3, (1 << (BIT_DEPTH - 1)) - 1)
+ sshr \tmp3\().8h, \tmp3\().8h, #3 // f1
+ sshr \tmp4\().8h, \tmp4\().8h, #3 // f2
+
+ add v0.8h, v23.8h, \tmp4\().8h // p0 + f2
+ sub v2.8h, v24.8h, \tmp3\().8h // q0 - f1
+ smin v0.8h, v0.8h, \tmp6\().8h
+ smin v2.8h, v2.8h, \tmp6\().8h
+ srshr \tmp3\().8h, \tmp3\().8h, #1 // f = (f1 + 1) >> 1
+ smax v0.8h, v0.8h, \tmp5\().8h // out p0
+ smax v2.8h, v2.8h, \tmp5\().8h // out q0
+ bit v23.16b, v0.16b, v4.16b // if (fm && !flat8in)
+ bit v24.16b, v2.16b, v4.16b
+
+ add v0.8h, v22.8h, \tmp3\().8h // p1 + f
+ sub v2.8h, v25.8h, \tmp3\().8h // q1 - f
+.if \wd >= 8
+ mov x11, v6.d[0]
+.endif
+ smin v0.8h, v0.8h, \tmp6\().8h
+ smin v2.8h, v2.8h, \tmp6\().8h
+.if \wd >= 8
+ mov x12, v6.d[1]
+.endif
+ smax v0.8h, v0.8h, \tmp5\().8h // out p1
+ smax v2.8h, v2.8h, \tmp5\().8h // out q1
+.if \wd >= 8
+ adds x11, x11, x12
+.endif
+ bit v22.16b, v0.16b, v5.16b // if (!hev && fm && !flat8in)
+ bit v25.16b, v2.16b, v5.16b
+
+ // If no pixels need flat8in, jump to flat8out
+ // (or to a writeout of the inner 4 pixels, for wd=8)
+.if \wd >= 8
+.if \wd == 16
+ b.eq 6f
+.else
+ b.ne 1f
+ ret x13
+1:
+.endif
+
+ // flat8in
+ add \tmp1\().8h, v20.8h, v21.8h
+ add \tmp3\().8h, v22.8h, v25.8h
+ add \tmp5\().8h, v20.8h, v22.8h
+ add \tmp7\().8h, v23.8h, v26.8h
+ add v0.8h, \tmp1\().8h, \tmp1\().8h
+ add v0.8h, v0.8h, v23.8h
+ add v0.8h, v0.8h, v24.8h
+ add v0.8h, v0.8h, \tmp5\().8h
+ sub \tmp3\().8h, \tmp3\().8h, \tmp1\().8h
+ sub \tmp7\().8h, \tmp7\().8h, \tmp5\().8h
+ urshr v2.8h, v0.8h, #3 // out p2
+
+ add v0.8h, v0.8h, \tmp3\().8h
+ add \tmp1\().8h, v20.8h, v23.8h
+ add \tmp3\().8h, v24.8h, v27.8h
+ urshr v3.8h, v0.8h, #3 // out p1
+
+ add v0.8h, v0.8h, \tmp7\().8h
+ sub \tmp3\().8h, \tmp3\().8h, \tmp1\().8h
+ add \tmp5\().8h, v21.8h, v24.8h
+ add \tmp7\().8h, v25.8h, v27.8h
+ urshr v4.8h, v0.8h, #3 // out p0
+
+ add v0.8h, v0.8h, \tmp3\().8h
+ sub \tmp7\().8h, \tmp7\().8h, \tmp5\().8h
+ add \tmp1\().8h, v22.8h, v25.8h
+ add \tmp3\().8h, v26.8h, v27.8h
+ urshr v5.8h, v0.8h, #3 // out q0
+
+ add v0.8h, v0.8h, \tmp7\().8h
+ sub \tmp3\().8h, \tmp3\().8h, \tmp1\().8h
+ urshr \tmp5\().8h, v0.8h, #3 // out q1
+
+ add v0.8h, v0.8h, \tmp3\().8h
+ // The output here is written back into the input registers. This doesn't
+ // matter for the flat8part below, since we only update those pixels
+ // which won't be touched below.
+ bit v21.16b, v2.16b, v6.16b
+ bit v22.16b, v3.16b, v6.16b
+ bit v23.16b, v4.16b, v6.16b
+ urshr \tmp6\().8h, v0.8h, #3 // out q2
+ bit v24.16b, v5.16b, v6.16b
+ bit v25.16b, \tmp5\().16b, v6.16b
+ bit v26.16b, \tmp6\().16b, v6.16b
+.endif
+.if \wd == 16
+6:
+ orr v2.16b, v6.16b, v7.16b
+ mov x11, v2.d[0]
+ mov x12, v2.d[1]
+ adds x11, x11, x12
+ b.ne 1f
+ // If no pixels needed flat8in nor flat8out, jump to a
+ // writeout of the inner 4 pixels
+ ret x14
+1:
+
+ mov x11, v7.d[0]
+ mov x12, v7.d[1]
+ adds x11, x11, x12
+ b.ne 1f
+ // If no pixels need flat8out, jump to a writeout of the inner 6 pixels
+ ret x15
+
+1:
+ // flat8out
+ // This writes all outputs into v2-v17 (skipping v6 and v16).
+ // If this part is skipped, the output is read from v21-v26 (which is the input
+ // to this section).
+ shl v0.8h, v16.8h, #3 // 8 * v16
+ sub v0.8h, v0.8h, v16.8h // 7 * v16
+ add v0.8h, v0.8h, v17.8h
+ add v8.8h, v17.8h, v18.8h
+ add v10.8h, v19.8h, v20.8h
+ add v0.8h, v0.8h, v8.8h
+ add v8.8h, v16.8h, v17.8h
+ add v12.8h, v21.8h, v22.8h
+ add v0.8h, v0.8h, v10.8h
+ add v10.8h, v18.8h, v25.8h
+ add v14.8h, v23.8h, v24.8h
+ sub v10.8h, v10.8h, v8.8h
+ add v0.8h, v0.8h, v12.8h
+ add v0.8h, v0.8h, v14.8h
+ add v12.8h, v16.8h, v18.8h
+ add v14.8h, v19.8h, v26.8h
+ urshr v2.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v10.8h
+ add v8.8h, v16.8h, v19.8h
+ add v10.8h, v20.8h, v27.8h
+ sub v14.8h, v14.8h, v12.8h
+ bif v2.16b, v17.16b, v7.16b
+ urshr v3.8h , v0.8h, #4
+
+ add v0.8h, v0.8h, v14.8h
+ add v12.8h, v16.8h, v20.8h
+ add v14.8h, v21.8h, v28.8h
+ sub v10.8h, v10.8h, v8.8h
+ bif v3.16b, v18.16b, v7.16b
+ urshr v4.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v10.8h
+ add v8.8h, v16.8h, v21.8h
+ add v10.8h, v22.8h, v29.8h
+ sub v14.8h, v14.8h, v12.8h
+ bif v4.16b, v19.16b, v7.16b
+ urshr v5.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v14.8h
+ add v12.8h, v16.8h, v22.8h
+ add v14.8h, v23.8h, v30.8h
+ sub v10.8h, v10.8h, v8.8h
+ bif v5.16b, v20.16b, v7.16b
+ urshr v6.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v10.8h
+ add v10.8h, v16.8h, v23.8h
+ sub v14.8h, v14.8h, v12.8h
+ add v12.8h, v24.8h, v31.8h
+ bif v6.16b, v21.16b, v7.16b
+ urshr v8.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v14.8h
+ sub v10.8h, v12.8h, v10.8h
+ add v12.8h, v17.8h, v24.8h
+ add v14.8h, v25.8h, v31.8h
+ bif v8.16b, v22.16b, v7.16b
+ urshr v9.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v10.8h
+ sub v14.8h, v14.8h, v12.8h
+ add v12.8h, v26.8h, v31.8h
+ bif v9.16b, v23.16b, v7.16b
+ urshr v10.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v14.8h
+ add v14.8h, v18.8h, v25.8h
+ add v18.8h, v19.8h, v26.8h
+ sub v12.8h, v12.8h, v14.8h
+ add v14.8h, v27.8h, v31.8h
+ bif v10.16b, v24.16b, v7.16b
+ urshr v11.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v12.8h
+ add v12.8h, v20.8h, v27.8h
+ sub v14.8h, v14.8h, v18.8h
+ add v18.8h, v28.8h, v31.8h
+ bif v11.16b, v25.16b, v7.16b
+ sub v18.8h, v18.8h, v12.8h
+ urshr v12.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v14.8h
+ add v14.8h, v21.8h, v28.8h
+ add v20.8h, v29.8h, v31.8h
+ bif v12.16b, v26.16b, v7.16b
+ urshr v13.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v18.8h
+ sub v20.8h, v20.8h, v14.8h
+ add v18.8h, v22.8h, v29.8h
+ add v22.8h, v30.8h, v31.8h
+ bif v13.16b, v27.16b, v7.16b
+ urshr v14.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v20.8h
+ sub v22.8h, v22.8h, v18.8h
+ bif v14.16b, v28.16b, v7.16b
+ urshr v15.8h, v0.8h, #4
+
+ add v0.8h, v0.8h, v22.8h
+ bif v15.16b, v29.16b, v7.16b
+ urshr v17.8h, v0.8h, #4
+ bif v17.16b, v30.16b, v7.16b
+.endif
+.endm
+
+// For wd <= 8, we use v16-v19 and v28-v31 for temp registers,
+// while we need those for inputs/outputs in wd=16 and use v8-v15
+// for temp registers there instead.
+function vp9_loop_filter_4
+ loop_filter 4, v16, v17, v18, v19, v28, v29, v30, v31
+ ret
+endfunc
+
+function vp9_loop_filter_8
+ loop_filter 8, v16, v17, v18, v19, v28, v29, v30, v31
+ ret
+endfunc
+
+function vp9_loop_filter_16
+ loop_filter 16, v8, v9, v10, v11, v12, v13, v14, v15
+ ret
+endfunc
+
+.macro loop_filter_4
+ bl vp9_loop_filter_4
+.endm
+
+.macro loop_filter_8
+ // calculate alternative 'return' targets
+ adr x13, 6f
+ bl vp9_loop_filter_8
+.endm
+
+.macro loop_filter_16
+ // calculate alternative 'return' targets
+ adr x14, 7f
+ adr x15, 8f
+ bl vp9_loop_filter_16
+.endm
+
+
+// The public functions in this file have got the following signature:
+// void loop_filter(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr);
+
+.macro bpp_frontend func, bpp, push
+function ff_\func\()_\bpp\()_neon, export=1
+.if \push
+ mov x16, x30
+ stp d8, d9, [sp, #-0x40]!
+ stp d14, d15, [sp, #0x30]
+ stp d12, d13, [sp, #0x20]
+ stp d10, d11, [sp, #0x10]
+.endif
+ lsl w2, w2, #\bpp - 8
+ lsl w3, w3, #\bpp - 8
+ lsl w4, w4, #\bpp - 8
+ mov x5, #1 << (\bpp - 8)
+ mov x6, #16 - \bpp
+ mov x7, #((1 << \bpp) - 1)
+.if \push
+ bl \func\()_16_neon
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], #0x40
+ ret x16
+.else
+ b \func\()_16_neon
+.endif
+endfunc
+.endm
+
+.macro bpp_frontends func, push=0
+ bpp_frontend \func, 10, \push
+ bpp_frontend \func, 12, \push
+.endm
+
+.macro bpp_frontend_rep func, suffix, int_suffix, dir, bpp, push
+function ff_\func\()_\suffix\()_\bpp\()_neon, export=1
+ mov x16, x30
+.if \push
+ stp d8, d9, [sp, #-0x40]!
+ stp d14, d15, [sp, #0x30]
+ stp d12, d13, [sp, #0x20]
+ stp d10, d11, [sp, #0x10]
+.endif
+ lsl w2, w2, #\bpp - 8
+ lsl w3, w3, #\bpp - 8
+ lsl w4, w4, #\bpp - 8
+ mov x5, #1 << (\bpp - 8)
+ mov x6, #16 - \bpp
+ mov x7, #((1 << \bpp) - 1)
+ bl \func\()_\int_suffix\()_16_neon
+.ifc \dir,h
+ add x0, x0, x1, lsl #3
+.else
+ add x0, x0, #16
+.endif
+ bl \func\()_\int_suffix\()_16_neon
+.if \push
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], 0x40
+.endif
+ ret x16
+endfunc
+.endm
+
+.macro bpp_frontends_rep func, suffix, int_suffix, dir, push=0
+ bpp_frontend_rep \func, \suffix, \int_suffix, \dir, 10, \push
+ bpp_frontend_rep \func, \suffix, \int_suffix, \dir, 12, \push
+.endm
+
+.macro bpp_frontend_mix2 wd1, wd2, dir, bpp
+function ff_vp9_loop_filter_\dir\()_\wd1\()\wd2\()_16_\bpp\()_neon, export=1
+ mov x16, x30
+ lsr w8, w2, #8
+ lsr w14, w3, #8
+ lsr w15, w4, #8
+ and w2, w2, #0xff
+ and w3, w3, #0xff
+ and w4, w4, #0xff
+ lsl w2, w2, #\bpp - 8
+ lsl w3, w3, #\bpp - 8
+ lsl w4, w4, #\bpp - 8
+ mov x5, #1 << (\bpp - 8)
+ mov x6, #16 - \bpp
+ mov x7, #((1 << \bpp) - 1)
+ bl vp9_loop_filter_\dir\()_\wd1\()_8_16_neon
+.ifc \dir,h
+ add x0, x0, x1, lsl #3
+.else
+ add x0, x0, #16
+.endif
+ lsl w2, w8, #\bpp - 8
+ lsl w3, w14, #\bpp - 8
+ lsl w4, w15, #\bpp - 8
+ bl vp9_loop_filter_\dir\()_\wd2\()_8_16_neon
+ ret x16
+endfunc
+.endm
+
+.macro bpp_frontends_mix2 wd1, wd2
+ bpp_frontend_mix2 \wd1, \wd2, v, 10
+ bpp_frontend_mix2 \wd1, \wd2, v, 12
+ bpp_frontend_mix2 \wd1, \wd2, h, 10
+ bpp_frontend_mix2 \wd1, \wd2, h, 12
+.endm
+
+function vp9_loop_filter_v_4_8_16_neon
+ mov x10, x30
+ sub x9, x0, x1, lsl #2
+ ld1 {v20.8h}, [x9], x1 // p3
+ ld1 {v24.8h}, [x0], x1 // q0
+ ld1 {v21.8h}, [x9], x1 // p2
+ ld1 {v25.8h}, [x0], x1 // q1
+ ld1 {v22.8h}, [x9], x1 // p1
+ ld1 {v26.8h}, [x0], x1 // q2
+ ld1 {v23.8h}, [x9], x1 // p0
+ ld1 {v27.8h}, [x0], x1 // q3
+ sub x0, x0, x1, lsl #2
+ sub x9, x9, x1, lsl #1
+
+ loop_filter_4
+
+ st1 {v22.8h}, [x9], x1
+ st1 {v24.8h}, [x0], x1
+ st1 {v23.8h}, [x9], x1
+ st1 {v25.8h}, [x0], x1
+ sub x0, x0, x1, lsl #1
+
+ ret x10
+endfunc
+
+bpp_frontends vp9_loop_filter_v_4_8
+
+function vp9_loop_filter_h_4_8_16_neon
+ mov x10, x30
+ sub x9, x0, #8
+ add x0, x9, x1, lsl #2
+ ld1 {v20.8h}, [x9], x1
+ ld1 {v24.8h}, [x0], x1
+ ld1 {v21.8h}, [x9], x1
+ ld1 {v25.8h}, [x0], x1
+ ld1 {v22.8h}, [x9], x1
+ ld1 {v26.8h}, [x0], x1
+ ld1 {v23.8h}, [x9], x1
+ ld1 {v27.8h}, [x0], x1
+
+ sub x9, x9, x1, lsl #2
+ sub x0, x0, x1, lsl #3
+ add x0, x0, #8
+
+ transpose_8x8H v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ loop_filter_4
+
+ // Move x9 forward by 2 pixels; we don't need to rewrite the
+ // outermost 2 pixels since they aren't changed.
+ add x9, x9, #4
+ add x0, x9, x1, lsl #2
+
+ // We only will write the mid 4 pixels back; after the loop filter,
+ // these are in v22, v23, v24, v25, ordered as rows (8x4 pixels).
+ // We need to transpose them to columns, done with a 4x8 transpose
+ // (which in practice is two 4x4 transposes of the two 4x4 halves
+ // of the 8x4 pixels; into 4x8 pixels).
+ transpose_4x8H v22, v23, v24, v25, v26, v27, v28, v29
+ st1 {v22.d}[0], [x9], x1
+ st1 {v22.d}[1], [x0], x1
+ st1 {v23.d}[0], [x9], x1
+ st1 {v23.d}[1], [x0], x1
+ st1 {v24.d}[0], [x9], x1
+ st1 {v24.d}[1], [x0], x1
+ st1 {v25.d}[0], [x9], x1
+ st1 {v25.d}[1], [x0], x1
+ sub x0, x0, x1, lsl #3
+ add x0, x0, #4
+
+ ret x10
+endfunc
+
+bpp_frontends vp9_loop_filter_h_4_8
+
+function vp9_loop_filter_v_8_8_16_neon
+ mov x10, x30
+ sub x9, x0, x1, lsl #2
+ ld1 {v20.8h}, [x9], x1 // p3
+ ld1 {v24.8h}, [x0], x1 // q0
+ ld1 {v21.8h}, [x9], x1 // p2
+ ld1 {v25.8h}, [x0], x1 // q1
+ ld1 {v22.8h}, [x9], x1 // p1
+ ld1 {v26.8h}, [x0], x1 // q2
+ ld1 {v23.8h}, [x9], x1 // p0
+ ld1 {v27.8h}, [x0], x1 // q3
+ sub x9, x9, x1, lsl #2
+ sub x0, x0, x1, lsl #2
+ add x9, x9, x1
+
+ loop_filter_8
+
+ st1 {v21.8h}, [x9], x1
+ st1 {v24.8h}, [x0], x1
+ st1 {v22.8h}, [x9], x1
+ st1 {v25.8h}, [x0], x1
+ st1 {v23.8h}, [x9], x1
+ st1 {v26.8h}, [x0], x1
+ sub x0, x0, x1, lsl #1
+ sub x0, x0, x1
+
+ ret x10
+6:
+ sub x9, x0, x1, lsl #1
+ st1 {v22.8h}, [x9], x1
+ st1 {v24.8h}, [x0], x1
+ st1 {v23.8h}, [x9], x1
+ st1 {v25.8h}, [x0], x1
+ sub x0, x0, x1, lsl #1
+ ret x10
+endfunc
+
+bpp_frontends vp9_loop_filter_v_8_8
+
+function vp9_loop_filter_h_8_8_16_neon
+ mov x10, x30
+ sub x9, x0, #8
+ add x0, x9, x1, lsl #2
+ ld1 {v20.8h}, [x9], x1
+ ld1 {v24.8h}, [x0], x1
+ ld1 {v21.8h}, [x9], x1
+ ld1 {v25.8h}, [x0], x1
+ ld1 {v22.8h}, [x9], x1
+ ld1 {v26.8h}, [x0], x1
+ ld1 {v23.8h}, [x9], x1
+ ld1 {v27.8h}, [x0], x1
+
+ sub x9, x9, x1, lsl #2
+ sub x0, x0, x1, lsl #3
+ add x0, x0, #8
+
+ transpose_8x8H v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ loop_filter_8
+
+ add x0, x9, x1, lsl #2
+
+ // Even though only 6 pixels per row have been changed, we write the
+ // full 8 pixel registers.
+ transpose_8x8H v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ st1 {v20.8h}, [x9], x1
+ st1 {v24.8h}, [x0], x1
+ st1 {v21.8h}, [x9], x1
+ st1 {v25.8h}, [x0], x1
+ st1 {v22.8h}, [x9], x1
+ st1 {v26.8h}, [x0], x1
+ st1 {v23.8h}, [x9], x1
+ st1 {v27.8h}, [x0], x1
+ sub x0, x0, x1, lsl #3
+ add x0, x0, #8
+
+ ret x10
+6:
+ // If we didn't need to do the flat8in part, we use the same writeback
+ // as in loop_filter_h_4_8.
+ add x9, x9, #4
+ add x0, x9, x1, lsl #2
+ transpose_4x8H v22, v23, v24, v25, v26, v27, v28, v29
+ st1 {v22.d}[0], [x9], x1
+ st1 {v22.d}[1], [x0], x1
+ st1 {v23.d}[0], [x9], x1
+ st1 {v23.d}[1], [x0], x1
+ st1 {v24.d}[0], [x9], x1
+ st1 {v24.d}[1], [x0], x1
+ st1 {v25.d}[0], [x9], x1
+ st1 {v25.d}[1], [x0], x1
+ sub x0, x0, x1, lsl #3
+ add x0, x0, #4
+ ret x10
+endfunc
+
+bpp_frontends vp9_loop_filter_h_8_8
+
+bpp_frontends_mix2 4, 4
+bpp_frontends_mix2 4, 8
+bpp_frontends_mix2 8, 4
+bpp_frontends_mix2 8, 8
+
+function vp9_loop_filter_v_16_8_16_neon
+ mov x10, x30
+ sub x9, x0, x1, lsl #3
+ ld1 {v16.8h}, [x9], x1 // p7
+ ld1 {v24.8h}, [x0], x1 // q0
+ ld1 {v17.8h}, [x9], x1 // p6
+ ld1 {v25.8h}, [x0], x1 // q1
+ ld1 {v18.8h}, [x9], x1 // p5
+ ld1 {v26.8h}, [x0], x1 // q2
+ ld1 {v19.8h}, [x9], x1 // p4
+ ld1 {v27.8h}, [x0], x1 // q3
+ ld1 {v20.8h}, [x9], x1 // p3
+ ld1 {v28.8h}, [x0], x1 // q4
+ ld1 {v21.8h}, [x9], x1 // p2
+ ld1 {v29.8h}, [x0], x1 // q5
+ ld1 {v22.8h}, [x9], x1 // p1
+ ld1 {v30.8h}, [x0], x1 // q6
+ ld1 {v23.8h}, [x9], x1 // p0
+ ld1 {v31.8h}, [x0], x1 // q7
+ sub x9, x9, x1, lsl #3
+ sub x0, x0, x1, lsl #3
+ add x9, x9, x1
+
+ loop_filter_16
+
+ // If we did the flat8out part, we get the output in
+ // v2-v17 (skipping v7 and v16). x9 points to x0 - 7 * stride,
+ // store v2-v9 there, and v10-v17 into x0.
+ st1 {v2.8h}, [x9], x1
+ st1 {v10.8h}, [x0], x1
+ st1 {v3.8h}, [x9], x1
+ st1 {v11.8h}, [x0], x1
+ st1 {v4.8h}, [x9], x1
+ st1 {v12.8h}, [x0], x1
+ st1 {v5.8h}, [x9], x1
+ st1 {v13.8h}, [x0], x1
+ st1 {v6.8h}, [x9], x1
+ st1 {v14.8h}, [x0], x1
+ st1 {v8.8h}, [x9], x1
+ st1 {v15.8h}, [x0], x1
+ st1 {v9.8h}, [x9], x1
+ st1 {v17.8h}, [x0], x1
+ sub x0, x0, x1, lsl #3
+ add x0, x0, x1
+
+ ret x10
+8:
+ add x9, x9, x1, lsl #2
+ // If we didn't do the flat8out part, the output is left in the
+ // input registers.
+ st1 {v21.8h}, [x9], x1
+ st1 {v24.8h}, [x0], x1
+ st1 {v22.8h}, [x9], x1
+ st1 {v25.8h}, [x0], x1
+ st1 {v23.8h}, [x9], x1
+ st1 {v26.8h}, [x0], x1
+ sub x0, x0, x1, lsl #1
+ sub x0, x0, x1
+ ret x10
+7:
+ sub x9, x0, x1, lsl #1
+ st1 {v22.8h}, [x9], x1
+ st1 {v24.8h}, [x0], x1
+ st1 {v23.8h}, [x9], x1
+ st1 {v25.8h}, [x0], x1
+ sub x0, x0, x1, lsl #1
+ ret x10
+endfunc
+
+bpp_frontends vp9_loop_filter_v_16_8, push=1
+bpp_frontends_rep vp9_loop_filter_v_16, 16, 8, v, push=1
+
+function vp9_loop_filter_h_16_8_16_neon
+ mov x10, x30
+ sub x9, x0, #16
+ ld1 {v16.8h}, [x9], x1
+ ld1 {v24.8h}, [x0], x1
+ ld1 {v17.8h}, [x9], x1
+ ld1 {v25.8h}, [x0], x1
+ ld1 {v18.8h}, [x9], x1
+ ld1 {v26.8h}, [x0], x1
+ ld1 {v19.8h}, [x9], x1
+ ld1 {v27.8h}, [x0], x1
+ ld1 {v20.8h}, [x9], x1
+ ld1 {v28.8h}, [x0], x1
+ ld1 {v21.8h}, [x9], x1
+ ld1 {v29.8h}, [x0], x1
+ ld1 {v22.8h}, [x9], x1
+ ld1 {v30.8h}, [x0], x1
+ ld1 {v23.8h}, [x9], x1
+ ld1 {v31.8h}, [x0], x1
+ sub x0, x0, x1, lsl #3
+ sub x9, x9, x1, lsl #3
+
+ // The 16x8 pixels read above is in two 8x8 blocks; the left
+ // half in v16-v23, and the right half in v24-v31. Do two 8x8 transposes
+ // of this, to get one column per register.
+ transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
+ transpose_8x8H v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
+
+ loop_filter_16
+
+ transpose_8x8H v16, v2, v3, v4, v5, v6, v8, v9, v0, v1
+ transpose_8x8H v10, v11, v12, v13, v14, v15, v17, v31, v0, v1
+
+ st1 {v16.8h}, [x9], x1
+ st1 {v10.8h}, [x0], x1
+ st1 {v2.8h}, [x9], x1
+ st1 {v11.8h}, [x0], x1
+ st1 {v3.8h}, [x9], x1
+ st1 {v12.8h}, [x0], x1
+ st1 {v4.8h}, [x9], x1
+ st1 {v13.8h}, [x0], x1
+ st1 {v5.8h}, [x9], x1
+ st1 {v14.8h}, [x0], x1
+ st1 {v6.8h}, [x9], x1
+ st1 {v15.8h}, [x0], x1
+ st1 {v8.8h}, [x9], x1
+ st1 {v17.8h}, [x0], x1
+ st1 {v9.8h}, [x9], x1
+ st1 {v31.8h}, [x0], x1
+ sub x0, x0, x1, lsl #3
+
+ ret x10
+8:
+ // The same writeback as in loop_filter_h_8_8
+ sub x9, x0, #8
+ add x0, x9, x1, lsl #2
+ transpose_8x8H v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ st1 {v20.8h}, [x9], x1
+ st1 {v24.8h}, [x0], x1
+ st1 {v21.8h}, [x9], x1
+ st1 {v25.8h}, [x0], x1
+ st1 {v22.8h}, [x9], x1
+ st1 {v26.8h}, [x0], x1
+ st1 {v23.8h}, [x9], x1
+ st1 {v27.8h}, [x0], x1
+ sub x0, x0, x1, lsl #3
+ add x0, x0, #8
+ ret x10
+7:
+ // The same writeback as in loop_filter_h_4_8
+ sub x9, x0, #4
+ add x0, x9, x1, lsl #2
+ transpose_4x8H v22, v23, v24, v25, v26, v27, v28, v29
+ st1 {v22.d}[0], [x9], x1
+ st1 {v22.d}[1], [x0], x1
+ st1 {v23.d}[0], [x9], x1
+ st1 {v23.d}[1], [x0], x1
+ st1 {v24.d}[0], [x9], x1
+ st1 {v24.d}[1], [x0], x1
+ st1 {v25.d}[0], [x9], x1
+ st1 {v25.d}[1], [x0], x1
+ sub x0, x0, x1, lsl #3
+ add x0, x0, #4
+ ret x10
+endfunc
+
+bpp_frontends vp9_loop_filter_h_16_8, push=1
+bpp_frontends_rep vp9_loop_filter_h_16, 16, 8, h, push=1
diff --git a/media/ffvpx/libavcodec/aarch64/vp9lpf_neon.S b/media/ffvpx/libavcodec/aarch64/vp9lpf_neon.S
new file mode 100644
index 0000000000..9a79f48df3
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9lpf_neon.S
@@ -0,0 +1,1334 @@
+/*
+ * Copyright (c) 2016 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+#include "neon.S"
+
+
+// The main loop filter macro is templated and can produce filters for
+// vectors of 8 or 16 bytes. The register mapping throughout the filter
+// is close to identical to the arm version (please try to maintain this,
+// if either is changed!). When the arm version uses e.g. d20 for the
+// input variable p3, the aarch64 version uses v20.8b or v20.16b, depending
+// on vector length.
+//
+// The number of elements in the vector is passed in via the macro parameter
+// \sz, which is either .8b or .16b. For simple instructions that doesn't
+// lengthen or narrow things, this can easily be templated like this:
+// uabd v4\sz, v20\sz, v21\sz
+//
+// For instructions that lengthen or narrow content, the arm version would
+// have used q registers. For these instructions, we have macros that expand
+// into either a single e.g. uaddl instruction, or into a uaddl + uaddl2
+// pair, depending on the \sz parameter. Wherever the arm version would have
+// used a q register, these macros instead take two v registers, i.e. q3
+// is mapped to v6+v7. For the case with 8 byte input vectors, such a
+// lengthening operation is only stored in v6.8h (what was in q3 in the arm
+// case), while the 16 byte input vectors will use v6.8h + v7.8h.
+// Such a macro invocation would look like this:
+// uaddl_sz v8.8h, v9.8h, v17, v18, \sz
+//
+// That is, in the 8 byte input vector case, the second register in these
+// register pairs will be unused.
+// Unfortunately, this makes the code quite hard to read. For readability,
+// see the arm version instead.
+
+
+.macro add_sz dst1, dst2, in1, in2, in3, in4, sz
+ add \dst1, \in1, \in3
+.ifc \sz, .16b
+ add \dst2, \in2, \in4
+.endif
+.endm
+
+.macro sub_sz dst1, dst2, in1, in2, in3, in4, sz
+ sub \dst1, \in1, \in3
+.ifc \sz, .16b
+ sub \dst2, \in2, \in4
+.endif
+.endm
+
+.macro uaddw_sz dst1, dst2, in1, in2, in3, sz
+ uaddw \dst1, \in1, \in3\().8b
+.ifc \sz, .16b
+ uaddw2 \dst2, \in2, \in3\().16b
+.endif
+.endm
+
+.macro usubw_sz dst1, dst2, in1, in2, in3, sz
+ usubw \dst1, \in1, \in3\().8b
+.ifc \sz, .16b
+ usubw2 \dst2, \in2, \in3\().16b
+.endif
+.endm
+
+.macro usubl_sz dst1, dst2, in1, in2, sz
+ usubl \dst1, \in1\().8b, \in2\().8b
+.ifc \sz, .16b
+ usubl2 \dst2, \in1\().16b, \in2\().16b
+.endif
+.endm
+
+.macro sqxtn_sz dst, in1, in2, sz
+ sqxtn \dst\().8b, \in1
+.ifc \sz, .16b
+ sqxtn2 \dst\().16b, \in2
+.endif
+.endm
+
+.macro sqxtun_sz dst, in1, in2, sz
+ sqxtun \dst\().8b, \in1
+.ifc \sz, .16b
+ sqxtun2 \dst\().16b, \in2
+.endif
+.endm
+
+.macro mul_sz dst1, dst2, in1, in2, in3, in4, sz
+ mul \dst1, \in1, \in3
+.ifc \sz, .16b
+ mul \dst2, \in2, \in4
+.endif
+.endm
+
+.macro saddw_sz dst1, dst2, in1, in2, in3, sz
+ saddw \dst1, \in1, \in3\().8b
+.ifc \sz, .16b
+ saddw2 \dst2, \in2, \in3\().16b
+.endif
+.endm
+
+.macro ssubw_sz dst1, dst2, in1, in2, in3, sz
+ ssubw \dst1, \in1, \in3\().8b
+.ifc \sz, .16b
+ ssubw2 \dst2, \in2, \in3\().16b
+.endif
+.endm
+
+.macro uxtl_sz dst1, dst2, in, sz
+ uxtl \dst1, \in\().8b
+.ifc \sz, .16b
+ uxtl2 \dst2, \in\().16b
+.endif
+.endm
+
+.macro uaddl_sz dst1, dst2, in1, in2, sz
+ uaddl \dst1, \in1\().8b, \in2\().8b
+.ifc \sz, .16b
+ uaddl2 \dst2, \in1\().16b, \in2\().16b
+.endif
+.endm
+
+.macro rshrn_sz dst, in1, in2, shift, sz
+ rshrn \dst\().8b, \in1, \shift
+.ifc \sz, .16b
+ rshrn2 \dst\().16b, \in2, \shift
+.endif
+.endm
+
+.macro ushll_sz dst1, dst2, in, shift, sz
+ ushll \dst1, \in\().8b, \shift
+.ifc \sz, .16b
+ ushll2 \dst2, \in\().16b, \shift
+.endif
+.endm
+
+// The input to and output from this macro is in the registers v16-v31,
+// and v0-v7 are used as scratch registers.
+// p7 = v16 .. p3 = v20, p0 = v23, q0 = v24, q3 = v27, q7 = v31
+// Depending on the width of the loop filter, we either use v16-v19
+// and v28-v31 as temp registers, or v8-v15.
+// When comparing to the arm version, tmpq1 == tmp1 + tmp2,
+// tmpq2 == tmp3 + tmp4, etc.
+.macro loop_filter wd, sz, mix, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8
+.if \mix == 0
+ dup v0\sz, w2 // E
+ dup v2\sz, w3 // I
+ dup v3\sz, w4 // H
+.else
+ dup v0.8h, w2 // E
+ dup v2.8h, w3 // I
+ dup v3.8h, w4 // H
+ rev16 v1.16b, v0.16b // E
+ rev16 v4.16b, v2.16b // I
+ rev16 v5.16b, v3.16b // H
+ uzp1 v0.16b, v0.16b, v1.16b
+ uzp1 v2.16b, v2.16b, v4.16b
+ uzp1 v3.16b, v3.16b, v5.16b
+.endif
+
+ uabd v4\sz, v20\sz, v21\sz // abs(p3 - p2)
+ uabd v5\sz, v21\sz, v22\sz // abs(p2 - p1)
+ uabd v6\sz, v22\sz, v23\sz // abs(p1 - p0)
+ uabd v7\sz, v24\sz, v25\sz // abs(q0 - q1)
+ uabd \tmp1\sz, v25\sz, v26\sz // abs(q1 - q2)
+ uabd \tmp2\sz, v26\sz, v27\sz // abs(q2 - q3)
+ umax v4\sz, v4\sz, v5\sz
+ umax v5\sz, v6\sz, v7\sz
+ umax \tmp1\sz, \tmp1\sz, \tmp2\sz
+ uabd v6\sz, v23\sz, v24\sz // abs(p0 - q0)
+ umax v4\sz, v4\sz, v5\sz
+ uqadd v6\sz, v6\sz, v6\sz // abs(p0 - q0) * 2
+ uabd v5\sz, v22\sz, v25\sz // abs(p1 - q1)
+ umax v4\sz, v4\sz, \tmp1\sz // max(abs(p3 - p2), ..., abs(q2 - q3))
+ ushr v5\sz, v5\sz, #1
+ cmhs v4\sz, v2\sz, v4\sz // max(abs()) <= I
+ uqadd v6\sz, v6\sz, v5\sz // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
+ cmhs v5\sz, v0\sz, v6\sz
+ and v4\sz, v4\sz, v5\sz // fm
+
+ // If no pixels need filtering, just exit as soon as possible
+ mov x5, v4.d[0]
+.ifc \sz, .16b
+ mov x6, v4.d[1]
+ adds x5, x5, x6
+ b.eq 9f
+.else
+ cbz x5, 9f
+.endif
+
+.if \wd >= 8
+ movi v0\sz, #1
+
+ uabd v6\sz, v20\sz, v23\sz // abs(p3 - p0)
+ uabd v2\sz, v21\sz, v23\sz // abs(p2 - p0)
+ uabd v1\sz, v22\sz, v23\sz // abs(p1 - p0)
+ uabd \tmp1\sz, v25\sz, v24\sz // abs(q1 - q0)
+ uabd \tmp2\sz, v26\sz, v24\sz // abs(q2 - q0)
+ uabd \tmp3\sz, v27\sz, v24\sz // abs(q3 - q0)
+ umax v6\sz, v6\sz, v2\sz
+ umax v1\sz, v1\sz, \tmp1\sz
+ umax \tmp2\sz, \tmp2\sz, \tmp3\sz
+.if \wd == 16
+ uabd v7\sz, v16\sz, v23\sz // abs(p7 - p0)
+ umax v6\sz, v6\sz, v1\sz
+ uabd v2\sz, v17\sz, v23\sz // abs(p6 - p0)
+ umax v6\sz, v6\sz, \tmp2\sz
+ uabd v1\sz, v18\sz, v23\sz // abs(p5 - p0)
+ cmhs v6\sz, v0\sz, v6\sz // flat8in
+ uabd v8\sz, v19\sz, v23\sz // abs(p4 - p0)
+ and v6\sz, v6\sz, v4\sz // flat8in && fm
+ uabd v9\sz, v28\sz, v24\sz // abs(q4 - q0)
+ bic v4\sz, v4\sz, v6\sz // fm && !flat8in
+ uabd v10\sz, v29\sz, v24\sz // abs(q5 - q0)
+ uabd v11\sz, v30\sz, v24\sz // abs(q6 - q0)
+ uabd v12\sz, v31\sz, v24\sz // abs(q7 - q0)
+
+ umax v7\sz, v7\sz, v2\sz
+ umax v1\sz, v1\sz, v8\sz
+ umax v9\sz, v9\sz, v10\sz
+ umax v11\sz, v11\sz, v12\sz
+ // The rest of the calculation of flat8out is interleaved below
+.else
+ // The rest of the calculation of flat8in is interleaved below
+.endif
+.endif
+
+ // Calculate the normal inner loop filter for 2 or 4 pixels
+ uabd v5\sz, v22\sz, v23\sz // abs(p1 - p0)
+.if \wd == 16
+ umax v7\sz, v7\sz, v1\sz
+ umax v9\sz, v9\sz, v11\sz
+.elseif \wd == 8
+ umax v6\sz, v6\sz, v1\sz
+.endif
+ uabd v1\sz, v25\sz, v24\sz // abs(q1 - q0)
+.if \wd == 16
+ umax v7\sz, v7\sz, v9\sz
+.elseif \wd == 8
+ umax v6\sz, v6\sz, \tmp2\sz
+.endif
+ usubl_sz \tmp1\().8h, \tmp2\().8h, v22, v25, \sz // p1 - q1
+ umax v5\sz, v5\sz, v1\sz // max(abs(p1 - p0), abs(q1 - q0))
+.if \mix != 0
+ mov v1.d[0], x11
+.endif
+ usubl_sz \tmp3\().8h, \tmp4\().8h, v24, v23, \sz // q0 - p0
+ movi \tmp5\().8h, #3
+.if \wd == 8
+ cmhs v6\sz, v0\sz, v6\sz // flat8in
+.endif
+.if \mix != 0
+ sxtl v1.8h, v1.8b
+.endif
+ cmhs v5\sz, v3\sz, v5\sz // !hev
+.if \wd == 8
+ // If a 4/8 or 8/4 mix is used, clear the relevant half of v6
+.if \mix != 0
+ and v6\sz, v6\sz, v1.16b
+.endif
+ and v6\sz, v6\sz, v4\sz // flat8in && fm
+.endif
+ sqxtn_sz \tmp1, \tmp1\().8h, \tmp2\().8h, \sz // av_clip_int8(p1 - q1)
+.if \wd == 16
+ cmhs v7\sz, v0\sz, v7\sz // flat8out
+.elseif \wd == 8
+ bic v4\sz, v4\sz, v6\sz // fm && !flat8in
+.endif
+ and v5\sz, v5\sz, v4\sz // !hev && fm && !flat8in
+.if \wd == 16
+ and v7\sz, v7\sz, v6\sz // flat8out && flat8in && fm
+.endif
+
+ mul_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp5\().8h, \tmp5\().8h, \sz // 3 * (q0 - p0)
+ bic \tmp1\sz, \tmp1\sz, v5\sz // if (!hev) av_clip_int8 = 0
+ movi v2\sz, #4
+ saddw_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1, \sz // 3 * (q0 - p0) [+ av_clip_int8(p1 - q1)]
+ movi v3\sz, #3
+ sqxtn_sz \tmp1, \tmp3\().8h, \tmp4\().8h, \sz // f
+.if \wd == 16
+ bic v6\sz, v6\sz, v7\sz // fm && flat8in && !flat8out
+.endif
+
+ sqadd \tmp3\sz, \tmp1\sz, v2\sz // FFMIN(f + 4, 127)
+ sqadd \tmp4\sz, \tmp1\sz, v3\sz // FFMIN(f + 3, 127)
+ uxtl_sz v0.8h, v1.8h, v23, \sz // p0
+ sshr \tmp3\sz, \tmp3\sz, #3 // f1
+ sshr \tmp4\sz, \tmp4\sz, #3 // f2
+
+ uxtl_sz v2.8h, v3.8h, v24, \sz // q0
+ saddw_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp4, \sz // p0 + f2
+ ssubw_sz v2.8h, v3.8h, v2.8h, v3.8h, \tmp3, \sz // q0 - f1
+ sqxtun_sz v0, v0.8h, v1.8h, \sz // out p0
+ sqxtun_sz v1, v2.8h, v3.8h, \sz // out q0
+ srshr \tmp3\sz, \tmp3\sz, #1 // f = (f1 + 1) >> 1
+ bit v23\sz, v0\sz, v4\sz // if (fm && !flat8in)
+ bit v24\sz, v1\sz, v4\sz
+
+ uxtl_sz v0.8h, v1.8h, v22, \sz // p1
+ uxtl_sz v2.8h, v3.8h, v25, \sz // q1
+.if \wd >= 8
+ mov x5, v6.d[0]
+.ifc \sz, .16b
+ mov x6, v6.d[1]
+.endif
+.endif
+ saddw_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3, \sz // p1 + f
+ ssubw_sz v2.8h, v3.8h, v2.8h, v3.8h, \tmp3, \sz // q1 - f
+ sqxtun_sz v0, v0.8h, v1.8h, \sz // out p1
+ sqxtun_sz v2, v2.8h, v3.8h, \sz // out q1
+.if \wd >= 8
+.ifc \sz, .16b
+ adds x5, x5, x6
+.endif
+.endif
+ bit v22\sz, v0\sz, v5\sz // if (!hev && fm && !flat8in)
+ bit v25\sz, v2\sz, v5\sz
+
+ // If no pixels need flat8in, jump to flat8out
+ // (or to a writeout of the inner 4 pixels, for wd=8)
+.if \wd >= 8
+.ifc \sz, .16b
+ b.eq 6f
+.else
+ cbz x5, 6f
+.endif
+
+ // flat8in
+ uaddl_sz \tmp1\().8h, \tmp2\().8h, v20, v21, \sz
+ uaddl_sz \tmp3\().8h, \tmp4\().8h, v22, v25, \sz
+ uaddl_sz \tmp5\().8h, \tmp6\().8h, v20, v22, \sz
+ uaddl_sz \tmp7\().8h, \tmp8\().8h, v23, v26, \sz
+ add_sz v0.8h, v1.8h, \tmp1\().8h, \tmp2\().8h, \tmp1\().8h, \tmp2\().8h, \sz
+ uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v23, \sz
+ uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v24, \sz
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp5\().8h, \tmp6\().8h, \sz
+ sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
+ sub_sz \tmp7\().8h, \tmp8\().8h, \tmp7\().8h, \tmp8\().8h, \tmp5\().8h, \tmp6\().8h, \sz
+ rshrn_sz v2, v0.8h, v1.8h, #3, \sz // out p2
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
+ uaddl_sz \tmp1\().8h, \tmp2\().8h, v20, v23, \sz
+ uaddl_sz \tmp3\().8h, \tmp4\().8h, v24, v27, \sz
+ rshrn_sz v3, v0.8h, v1.8h, #3, \sz // out p1
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp7\().8h, \tmp8\().8h, \sz
+ sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
+ uaddl_sz \tmp5\().8h, \tmp6\().8h, v21, v24, \sz
+ uaddl_sz \tmp7\().8h, \tmp8\().8h, v25, v27, \sz
+ rshrn_sz v4, v0.8h, v1.8h, #3, \sz // out p0
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
+ sub_sz \tmp7\().8h, \tmp8\().8h, \tmp7\().8h, \tmp8\().8h, \tmp5\().8h, \tmp6\().8h, \sz
+ uaddl_sz \tmp1\().8h, \tmp2\().8h, v22, v25, \sz
+ uaddl_sz \tmp3\().8h, \tmp4\().8h, v26, v27, \sz
+ rshrn_sz v5, v0.8h, v1.8h, #3, \sz // out q0
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp7\().8h, \tmp8\().8h, \sz
+ sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
+ rshrn_sz \tmp5, v0.8h, v1.8h, #3, \sz // out q1
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
+ // The output here is written back into the input registers. This doesn't
+ // matter for the flat8part below, since we only update those pixels
+ // which won't be touched below.
+ bit v21\sz, v2\sz, v6\sz
+ bit v22\sz, v3\sz, v6\sz
+ bit v23\sz, v4\sz, v6\sz
+ rshrn_sz \tmp6, v0.8h, v1.8h, #3, \sz // out q2
+ bit v24\sz, v5\sz, v6\sz
+ bit v25\sz, \tmp5\sz, v6\sz
+ bit v26\sz, \tmp6\sz, v6\sz
+.endif
+.if \wd == 16
+6:
+ orr v2\sz, v6\sz, v7\sz
+ mov x5, v2.d[0]
+.ifc \sz, .16b
+ mov x6, v2.d[1]
+ adds x5, x5, x6
+ b.ne 1f
+.else
+ cbnz x5, 1f
+.endif
+ // If no pixels needed flat8in nor flat8out, jump to a
+ // writeout of the inner 4 pixels
+ ret x14
+1:
+
+ mov x5, v7.d[0]
+.ifc \sz, .16b
+ mov x6, v7.d[1]
+ adds x5, x5, x6
+ b.ne 1f
+.else
+ cbnz x5, 1f
+.endif
+ // If no pixels need flat8out, jump to a writeout of the inner 6 pixels
+ ret x15
+
+1:
+ // flat8out
+ // This writes all outputs into v2-v17 (skipping v6 and v16).
+ // If this part is skipped, the output is read from v21-v26 (which is the input
+ // to this section).
+ ushll_sz v0.8h, v1.8h, v16, #3, \sz // 8 * v16
+ usubw_sz v0.8h, v1.8h, v0.8h, v1.8h, v16, \sz // 7 * v16
+ uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v17, \sz
+ uaddl_sz v8.8h, v9.8h, v17, v18, \sz
+ uaddl_sz v10.8h, v11.8h, v19, v20, \sz
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v8.8h, v9.8h, \sz
+ uaddl_sz v8.8h, v9.8h, v16, v17, \sz
+ uaddl_sz v12.8h, v13.8h, v21, v22, \sz
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
+ uaddl_sz v10.8h, v11.8h, v18, v25, \sz
+ uaddl_sz v14.8h, v15.8h, v23, v24, \sz
+ sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v12.8h, v13.8h, \sz
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
+ uaddl_sz v12.8h, v13.8h, v16, v18, \sz
+ uaddl_sz v14.8h, v15.8h, v19, v26, \sz
+ rshrn_sz v2, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
+ uaddl_sz v8.8h, v9.8h, v16, v19, \sz
+ uaddl_sz v10.8h, v11.8h, v20, v27, \sz
+ sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
+ bif v2\sz, v17\sz, v7\sz
+ rshrn_sz v3, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
+ uaddl_sz v12.8h, v13.8h, v16, v20, \sz
+ uaddl_sz v14.8h, v15.8h, v21, v28, \sz
+ sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
+ bif v3\sz, v18\sz, v7\sz
+ rshrn_sz v4, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
+ uaddl_sz v8.8h, v9.8h, v16, v21, \sz
+ uaddl_sz v10.8h, v11.8h, v22, v29, \sz
+ sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
+ bif v4\sz, v19\sz, v7\sz
+ rshrn_sz v5, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
+ uaddl_sz v12.8h, v13.8h, v16, v22, \sz
+ uaddl_sz v14.8h, v15.8h, v23, v30, \sz
+ sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
+ bif v5\sz, v20\sz, v7\sz
+ rshrn_sz v6, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
+ uaddl_sz v10.8h, v11.8h, v16, v23, \sz
+ sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
+ uaddl_sz v12.8h, v13.8h, v24, v31, \sz
+ bif v6\sz, v21\sz, v7\sz
+ rshrn_sz v8, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
+ sub_sz v10.8h, v11.8h, v12.8h, v13.8h, v10.8h, v11.8h, \sz
+ uaddl_sz v12.8h, v13.8h, v17, v24, \sz
+ uaddl_sz v14.8h, v15.8h, v25, v31, \sz
+ bif v8\sz, v22\sz, v7\sz
+ rshrn_sz v9, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
+ sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
+ uaddl_sz v12.8h, v13.8h, v26, v31, \sz
+ bif v9\sz, v23\sz, v7\sz
+ rshrn_sz v10, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
+ uaddl_sz v14.8h, v15.8h, v18, v25, \sz
+ uaddl_sz v18.8h, v19.8h, v19, v26, \sz
+ sub_sz v12.8h, v13.8h, v12.8h, v13.8h, v14.8h, v15.8h, \sz
+ uaddl_sz v14.8h, v15.8h, v27, v31, \sz
+ bif v10\sz, v24\sz, v7\sz
+ rshrn_sz v11, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v12.8h, v13.8h, \sz
+ uaddl_sz v12.8h, v13.8h, v20, v27, \sz
+ sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v18.8h, v19.8h, \sz
+ uaddl_sz v18.8h, v19.8h, v28, v31, \sz
+ bif v11\sz, v25\sz, v7\sz
+ sub_sz v18.8h, v19.8h, v18.8h, v19.8h, v12.8h, v13.8h, \sz
+ rshrn_sz v12, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
+ uaddl_sz v14.8h, v15.8h, v21, v28, \sz
+ uaddl_sz v20.8h, v21.8h, v29, v31, \sz
+ bif v12\sz, v26\sz, v7\sz
+ rshrn_sz v13, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v18.8h, v19.8h, \sz
+ sub_sz v20.8h, v21.8h, v20.8h, v21.8h, v14.8h, v15.8h, \sz
+ uaddl_sz v18.8h, v19.8h, v22, v29, \sz
+ uaddl_sz v22.8h, v23.8h, v30, v31, \sz
+ bif v13\sz, v27\sz, v7\sz
+ rshrn_sz v14, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v20.8h, v21.8h, \sz
+ sub_sz v22.8h, v23.8h, v22.8h, v23.8h, v18.8h, v19.8h, \sz
+ bif v14\sz, v28\sz, v7\sz
+ rshrn_sz v15, v0.8h, v1.8h, #4, \sz
+
+ add_sz v0.8h, v1.8h, v0.8h, v1.8h, v22.8h, v23.8h, \sz
+ bif v15\sz, v29\sz, v7\sz
+ rshrn_sz v17, v0.8h, v1.8h, #4, \sz
+ bif v17\sz, v30\sz, v7\sz
+.endif
+.endm
+
+// For wd <= 8, we use v16-v19 and v28-v31 for temp registers,
+// while we need those for inputs/outputs in wd=16 and use v8-v15
+// for temp registers there instead.
+function vp9_loop_filter_4
+ loop_filter 4, .8b, 0, v16, v17, v18, v19, v28, v29, v30, v31
+ ret
+9:
+ ret x10
+endfunc
+
+function vp9_loop_filter_4_16b_mix_44
+ loop_filter 4, .16b, 44, v16, v17, v18, v19, v28, v29, v30, v31
+ ret
+9:
+ ret x10
+endfunc
+
+function vp9_loop_filter_8
+ loop_filter 8, .8b, 0, v16, v17, v18, v19, v28, v29, v30, v31
+ ret
+6:
+ ret x13
+9:
+ ret x10
+endfunc
+
+function vp9_loop_filter_8_16b_mix
+ loop_filter 8, .16b, 88, v16, v17, v18, v19, v28, v29, v30, v31
+ ret
+6:
+ ret x13
+9:
+ ret x10
+endfunc
+
+function vp9_loop_filter_16
+ loop_filter 16, .8b, 0, v8, v9, v10, v11, v12, v13, v14, v15
+ ret
+9:
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], #0x40
+ ret x10
+endfunc
+
+function vp9_loop_filter_16_16b
+ loop_filter 16, .16b, 0, v8, v9, v10, v11, v12, v13, v14, v15
+ ret
+9:
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], #0x40
+ ret x10
+endfunc
+
+.macro loop_filter_4
+ bl vp9_loop_filter_4
+.endm
+
+.macro loop_filter_4_16b_mix mix
+ bl vp9_loop_filter_4_16b_mix_\mix
+.endm
+
+.macro loop_filter_8
+ // calculate alternative 'return' targets
+ adr x13, 6f
+ bl vp9_loop_filter_8
+.endm
+
+.macro loop_filter_8_16b_mix mix
+ // calculate alternative 'return' targets
+ adr x13, 6f
+.if \mix == 48
+ mov x11, #0xffffffff00000000
+.elseif \mix == 84
+ mov x11, #0x00000000ffffffff
+.else
+ mov x11, #0xffffffffffffffff
+.endif
+ bl vp9_loop_filter_8_16b_mix
+.endm
+
+.macro loop_filter_16
+ // calculate alternative 'return' targets
+ adr x14, 7f
+ adr x15, 8f
+ bl vp9_loop_filter_16
+.endm
+
+.macro loop_filter_16_16b
+ // calculate alternative 'return' targets
+ adr x14, 7f
+ adr x15, 8f
+ bl vp9_loop_filter_16_16b
+.endm
+
+
+// The public functions in this file have got the following signature:
+// void loop_filter(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr);
+
+function ff_vp9_loop_filter_v_4_8_neon, export=1
+ mov x10, x30
+ sub x9, x0, x1, lsl #2
+ ld1 {v20.8b}, [x9], x1 // p3
+ ld1 {v24.8b}, [x0], x1 // q0
+ ld1 {v21.8b}, [x9], x1 // p2
+ ld1 {v25.8b}, [x0], x1 // q1
+ ld1 {v22.8b}, [x9], x1 // p1
+ ld1 {v26.8b}, [x0], x1 // q2
+ ld1 {v23.8b}, [x9], x1 // p0
+ ld1 {v27.8b}, [x0], x1 // q3
+ sub x0, x0, x1, lsl #2
+ sub x9, x9, x1, lsl #1
+
+ loop_filter_4
+
+ st1 {v22.8b}, [x9], x1
+ st1 {v24.8b}, [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v25.8b}, [x0], x1
+
+ ret x10
+endfunc
+
+function ff_vp9_loop_filter_v_44_16_neon, export=1
+ mov x10, x30
+ sub x9, x0, x1, lsl #2
+ ld1 {v20.16b}, [x9], x1 // p3
+ ld1 {v24.16b}, [x0], x1 // q0
+ ld1 {v21.16b}, [x9], x1 // p2
+ ld1 {v25.16b}, [x0], x1 // q1
+ ld1 {v22.16b}, [x9], x1 // p1
+ ld1 {v26.16b}, [x0], x1 // q2
+ ld1 {v23.16b}, [x9], x1 // p0
+ ld1 {v27.16b}, [x0], x1 // q3
+ sub x0, x0, x1, lsl #2
+ sub x9, x9, x1, lsl #1
+
+ loop_filter_4_16b_mix 44
+
+ st1 {v22.16b}, [x9], x1
+ st1 {v24.16b}, [x0], x1
+ st1 {v23.16b}, [x9], x1
+ st1 {v25.16b}, [x0], x1
+
+ ret x10
+endfunc
+
+function ff_vp9_loop_filter_h_4_8_neon, export=1
+ mov x10, x30
+ sub x9, x0, #4
+ add x0, x9, x1, lsl #2
+ ld1 {v20.8b}, [x9], x1
+ ld1 {v24.8b}, [x0], x1
+ ld1 {v21.8b}, [x9], x1
+ ld1 {v25.8b}, [x0], x1
+ ld1 {v22.8b}, [x9], x1
+ ld1 {v26.8b}, [x0], x1
+ ld1 {v23.8b}, [x9], x1
+ ld1 {v27.8b}, [x0], x1
+
+ sub x9, x9, x1, lsl #2
+ sub x0, x0, x1, lsl #2
+ // Move x0/x9 forward by 2 pixels; we don't need to rewrite the
+ // outermost 2 pixels since they aren't changed.
+ add x9, x9, #2
+ add x0, x0, #2
+
+ transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ loop_filter_4
+
+ // We only will write the mid 4 pixels back; after the loop filter,
+ // these are in v22, v23, v24, v25, ordered as rows (8x4 pixels).
+ // We need to transpose them to columns, done with a 4x8 transpose
+ // (which in practice is two 4x4 transposes of the two 4x4 halves
+ // of the 8x4 pixels; into 4x8 pixels).
+ transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
+ st1 {v22.s}[0], [x9], x1
+ st1 {v22.s}[1], [x0], x1
+ st1 {v23.s}[0], [x9], x1
+ st1 {v23.s}[1], [x0], x1
+ st1 {v24.s}[0], [x9], x1
+ st1 {v24.s}[1], [x0], x1
+ st1 {v25.s}[0], [x9], x1
+ st1 {v25.s}[1], [x0], x1
+
+ ret x10
+endfunc
+
+function ff_vp9_loop_filter_h_44_16_neon, export=1
+ mov x10, x30
+ sub x9, x0, #4
+ add x0, x9, x1, lsl #3
+ ld1 {v20.8b}, [x9], x1
+ ld1 {v20.d}[1], [x0], x1
+ ld1 {v21.8b}, [x9], x1
+ ld1 {v21.d}[1], [x0], x1
+ ld1 {v22.8b}, [x9], x1
+ ld1 {v22.d}[1], [x0], x1
+ ld1 {v23.8b}, [x9], x1
+ ld1 {v23.d}[1], [x0], x1
+ ld1 {v24.8b}, [x9], x1
+ ld1 {v24.d}[1], [x0], x1
+ ld1 {v25.8b}, [x9], x1
+ ld1 {v25.d}[1], [x0], x1
+ ld1 {v26.8b}, [x9], x1
+ ld1 {v26.d}[1], [x0], x1
+ ld1 {v27.8b}, [x9], x1
+ ld1 {v27.d}[1], [x0], x1
+
+ sub x9, x9, x1, lsl #3
+ sub x0, x0, x1, lsl #3
+ add x9, x9, #2
+ add x0, x0, #2
+
+ transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ loop_filter_4_16b_mix 44
+
+ transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
+
+ st1 {v22.s}[0], [x9], x1
+ st1 {v22.s}[2], [x0], x1
+ st1 {v23.s}[0], [x9], x1
+ st1 {v23.s}[2], [x0], x1
+ st1 {v24.s}[0], [x9], x1
+ st1 {v24.s}[2], [x0], x1
+ st1 {v25.s}[0], [x9], x1
+ st1 {v25.s}[2], [x0], x1
+ st1 {v22.s}[1], [x9], x1
+ st1 {v22.s}[3], [x0], x1
+ st1 {v23.s}[1], [x9], x1
+ st1 {v23.s}[3], [x0], x1
+ st1 {v24.s}[1], [x9], x1
+ st1 {v24.s}[3], [x0], x1
+ st1 {v25.s}[1], [x9], x1
+ st1 {v25.s}[3], [x0], x1
+
+ ret x10
+endfunc
+
+function ff_vp9_loop_filter_v_8_8_neon, export=1
+ mov x10, x30
+ sub x9, x0, x1, lsl #2
+ ld1 {v20.8b}, [x9], x1 // p3
+ ld1 {v24.8b}, [x0], x1 // q0
+ ld1 {v21.8b}, [x9], x1 // p2
+ ld1 {v25.8b}, [x0], x1 // q1
+ ld1 {v22.8b}, [x9], x1 // p1
+ ld1 {v26.8b}, [x0], x1 // q2
+ ld1 {v23.8b}, [x9], x1 // p0
+ ld1 {v27.8b}, [x0], x1 // q3
+ sub x9, x9, x1, lsl #2
+ sub x0, x0, x1, lsl #2
+ add x9, x9, x1
+
+ loop_filter_8
+
+ st1 {v21.8b}, [x9], x1
+ st1 {v24.8b}, [x0], x1
+ st1 {v22.8b}, [x9], x1
+ st1 {v25.8b}, [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v26.8b}, [x0], x1
+
+ ret x10
+6:
+ sub x9, x0, x1, lsl #1
+ st1 {v22.8b}, [x9], x1
+ st1 {v24.8b}, [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v25.8b}, [x0], x1
+ ret x10
+endfunc
+
+.macro mix_v_16 mix
+function ff_vp9_loop_filter_v_\mix\()_16_neon, export=1
+ mov x10, x30
+ sub x9, x0, x1, lsl #2
+ ld1 {v20.16b}, [x9], x1 // p3
+ ld1 {v24.16b}, [x0], x1 // q0
+ ld1 {v21.16b}, [x9], x1 // p2
+ ld1 {v25.16b}, [x0], x1 // q1
+ ld1 {v22.16b}, [x9], x1 // p1
+ ld1 {v26.16b}, [x0], x1 // q2
+ ld1 {v23.16b}, [x9], x1 // p0
+ ld1 {v27.16b}, [x0], x1 // q3
+ sub x9, x9, x1, lsl #2
+ sub x0, x0, x1, lsl #2
+ add x9, x9, x1
+
+ loop_filter_8_16b_mix \mix
+
+ st1 {v21.16b}, [x9], x1
+ st1 {v24.16b}, [x0], x1
+ st1 {v22.16b}, [x9], x1
+ st1 {v25.16b}, [x0], x1
+ st1 {v23.16b}, [x9], x1
+ st1 {v26.16b}, [x0], x1
+
+ ret x10
+6:
+ sub x9, x0, x1, lsl #1
+ st1 {v22.16b}, [x9], x1
+ st1 {v24.16b}, [x0], x1
+ st1 {v23.16b}, [x9], x1
+ st1 {v25.16b}, [x0], x1
+ ret x10
+endfunc
+.endm
+
+mix_v_16 48
+mix_v_16 84
+mix_v_16 88
+
+function ff_vp9_loop_filter_h_8_8_neon, export=1
+ mov x10, x30
+ sub x9, x0, #4
+ add x0, x9, x1, lsl #2
+ ld1 {v20.8b}, [x9], x1
+ ld1 {v24.8b}, [x0], x1
+ ld1 {v21.8b}, [x9], x1
+ ld1 {v25.8b}, [x0], x1
+ ld1 {v22.8b}, [x9], x1
+ ld1 {v26.8b}, [x0], x1
+ ld1 {v23.8b}, [x9], x1
+ ld1 {v27.8b}, [x0], x1
+
+ sub x9, x9, x1, lsl #2
+ sub x0, x0, x1, lsl #2
+
+ transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ loop_filter_8
+
+ // Even though only 6 pixels per row have been changed, we write the
+ // full 8 pixel registers.
+ transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ st1 {v20.8b}, [x9], x1
+ st1 {v24.8b}, [x0], x1
+ st1 {v21.8b}, [x9], x1
+ st1 {v25.8b}, [x0], x1
+ st1 {v22.8b}, [x9], x1
+ st1 {v26.8b}, [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v27.8b}, [x0], x1
+
+ ret x10
+6:
+ // If we didn't need to do the flat8in part, we use the same writeback
+ // as in loop_filter_h_4_8.
+ add x9, x9, #2
+ add x0, x0, #2
+ transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
+ st1 {v22.s}[0], [x9], x1
+ st1 {v22.s}[1], [x0], x1
+ st1 {v23.s}[0], [x9], x1
+ st1 {v23.s}[1], [x0], x1
+ st1 {v24.s}[0], [x9], x1
+ st1 {v24.s}[1], [x0], x1
+ st1 {v25.s}[0], [x9], x1
+ st1 {v25.s}[1], [x0], x1
+ ret x10
+endfunc
+
+.macro mix_h_16 mix
+function ff_vp9_loop_filter_h_\mix\()_16_neon, export=1
+ mov x10, x30
+ sub x9, x0, #4
+ add x0, x9, x1, lsl #3
+ ld1 {v20.8b}, [x9], x1
+ ld1 {v20.d}[1], [x0], x1
+ ld1 {v21.8b}, [x9], x1
+ ld1 {v21.d}[1], [x0], x1
+ ld1 {v22.8b}, [x9], x1
+ ld1 {v22.d}[1], [x0], x1
+ ld1 {v23.8b}, [x9], x1
+ ld1 {v23.d}[1], [x0], x1
+ ld1 {v24.8b}, [x9], x1
+ ld1 {v24.d}[1], [x0], x1
+ ld1 {v25.8b}, [x9], x1
+ ld1 {v25.d}[1], [x0], x1
+ ld1 {v26.8b}, [x9], x1
+ ld1 {v26.d}[1], [x0], x1
+ ld1 {v27.8b}, [x9], x1
+ ld1 {v27.d}[1], [x0], x1
+
+ sub x9, x9, x1, lsl #3
+ sub x0, x0, x1, lsl #3
+
+ transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ loop_filter_8_16b_mix \mix
+
+ transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ st1 {v20.8b}, [x9], x1
+ st1 {v20.d}[1], [x0], x1
+ st1 {v21.8b}, [x9], x1
+ st1 {v21.d}[1], [x0], x1
+ st1 {v22.8b}, [x9], x1
+ st1 {v22.d}[1], [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v23.d}[1], [x0], x1
+ st1 {v24.8b}, [x9], x1
+ st1 {v24.d}[1], [x0], x1
+ st1 {v25.8b}, [x9], x1
+ st1 {v25.d}[1], [x0], x1
+ st1 {v26.8b}, [x9], x1
+ st1 {v26.d}[1], [x0], x1
+ st1 {v27.8b}, [x9], x1
+ st1 {v27.d}[1], [x0], x1
+
+ ret x10
+6:
+ add x9, x9, #2
+ add x0, x0, #2
+ transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
+ st1 {v22.s}[0], [x9], x1
+ st1 {v22.s}[2], [x0], x1
+ st1 {v23.s}[0], [x9], x1
+ st1 {v23.s}[2], [x0], x1
+ st1 {v24.s}[0], [x9], x1
+ st1 {v24.s}[2], [x0], x1
+ st1 {v25.s}[0], [x9], x1
+ st1 {v25.s}[2], [x0], x1
+ st1 {v22.s}[1], [x9], x1
+ st1 {v22.s}[3], [x0], x1
+ st1 {v23.s}[1], [x9], x1
+ st1 {v23.s}[3], [x0], x1
+ st1 {v24.s}[1], [x9], x1
+ st1 {v24.s}[3], [x0], x1
+ st1 {v25.s}[1], [x9], x1
+ st1 {v25.s}[3], [x0], x1
+ ret x10
+endfunc
+.endm
+
+mix_h_16 48
+mix_h_16 84
+mix_h_16 88
+
+function ff_vp9_loop_filter_v_16_8_neon, export=1
+ mov x10, x30
+ stp d8, d9, [sp, #-0x40]!
+ stp d14, d15, [sp, #0x30]
+ stp d12, d13, [sp, #0x20]
+ stp d10, d11, [sp, #0x10]
+ sub x9, x0, x1, lsl #3
+ ld1 {v16.8b}, [x9], x1 // p7
+ ld1 {v24.8b}, [x0], x1 // q0
+ ld1 {v17.8b}, [x9], x1 // p6
+ ld1 {v25.8b}, [x0], x1 // q1
+ ld1 {v18.8b}, [x9], x1 // p5
+ ld1 {v26.8b}, [x0], x1 // q2
+ ld1 {v19.8b}, [x9], x1 // p4
+ ld1 {v27.8b}, [x0], x1 // q3
+ ld1 {v20.8b}, [x9], x1 // p3
+ ld1 {v28.8b}, [x0], x1 // q4
+ ld1 {v21.8b}, [x9], x1 // p2
+ ld1 {v29.8b}, [x0], x1 // q5
+ ld1 {v22.8b}, [x9], x1 // p1
+ ld1 {v30.8b}, [x0], x1 // q6
+ ld1 {v23.8b}, [x9], x1 // p0
+ ld1 {v31.8b}, [x0], x1 // q7
+ sub x9, x9, x1, lsl #3
+ sub x0, x0, x1, lsl #3
+ add x9, x9, x1
+
+ loop_filter_16
+
+ // If we did the flat8out part, we get the output in
+ // v2-v17 (skipping v7 and v16). x9 points to x0 - 7 * stride,
+ // store v2-v9 there, and v10-v17 into x0.
+ st1 {v2.8b}, [x9], x1
+ st1 {v10.8b}, [x0], x1
+ st1 {v3.8b}, [x9], x1
+ st1 {v11.8b}, [x0], x1
+ st1 {v4.8b}, [x9], x1
+ st1 {v12.8b}, [x0], x1
+ st1 {v5.8b}, [x9], x1
+ st1 {v13.8b}, [x0], x1
+ st1 {v6.8b}, [x9], x1
+ st1 {v14.8b}, [x0], x1
+ st1 {v8.8b}, [x9], x1
+ st1 {v15.8b}, [x0], x1
+ st1 {v9.8b}, [x9], x1
+ st1 {v17.8b}, [x0], x1
+9:
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], #0x40
+ ret x10
+8:
+ add x9, x9, x1, lsl #2
+ // If we didn't do the flat8out part, the output is left in the
+ // input registers.
+ st1 {v21.8b}, [x9], x1
+ st1 {v24.8b}, [x0], x1
+ st1 {v22.8b}, [x9], x1
+ st1 {v25.8b}, [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v26.8b}, [x0], x1
+ b 9b
+7:
+ sub x9, x0, x1, lsl #1
+ st1 {v22.8b}, [x9], x1
+ st1 {v24.8b}, [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v25.8b}, [x0], x1
+ b 9b
+endfunc
+
+function ff_vp9_loop_filter_v_16_16_neon, export=1
+ mov x10, x30
+ stp d8, d9, [sp, #-0x40]!
+ stp d14, d15, [sp, #0x30]
+ stp d12, d13, [sp, #0x20]
+ stp d10, d11, [sp, #0x10]
+ sub x9, x0, x1, lsl #3
+ ld1 {v16.16b}, [x9], x1 // p7
+ ld1 {v24.16b}, [x0], x1 // q0
+ ld1 {v17.16b}, [x9], x1 // p6
+ ld1 {v25.16b}, [x0], x1 // q1
+ ld1 {v18.16b}, [x9], x1 // p5
+ ld1 {v26.16b}, [x0], x1 // q2
+ ld1 {v19.16b}, [x9], x1 // p4
+ ld1 {v27.16b}, [x0], x1 // q3
+ ld1 {v20.16b}, [x9], x1 // p3
+ ld1 {v28.16b}, [x0], x1 // q4
+ ld1 {v21.16b}, [x9], x1 // p2
+ ld1 {v29.16b}, [x0], x1 // q5
+ ld1 {v22.16b}, [x9], x1 // p1
+ ld1 {v30.16b}, [x0], x1 // q6
+ ld1 {v23.16b}, [x9], x1 // p0
+ ld1 {v31.16b}, [x0], x1 // q7
+ sub x9, x9, x1, lsl #3
+ sub x0, x0, x1, lsl #3
+ add x9, x9, x1
+
+ loop_filter_16_16b
+
+ st1 {v2.16b}, [x9], x1
+ st1 {v10.16b}, [x0], x1
+ st1 {v3.16b}, [x9], x1
+ st1 {v11.16b}, [x0], x1
+ st1 {v4.16b}, [x9], x1
+ st1 {v12.16b}, [x0], x1
+ st1 {v5.16b}, [x9], x1
+ st1 {v13.16b}, [x0], x1
+ st1 {v6.16b}, [x9], x1
+ st1 {v14.16b}, [x0], x1
+ st1 {v8.16b}, [x9], x1
+ st1 {v15.16b}, [x0], x1
+ st1 {v9.16b}, [x9], x1
+ st1 {v17.16b}, [x0], x1
+9:
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], #0x40
+ ret x10
+8:
+ add x9, x9, x1, lsl #2
+ st1 {v21.16b}, [x9], x1
+ st1 {v24.16b}, [x0], x1
+ st1 {v22.16b}, [x9], x1
+ st1 {v25.16b}, [x0], x1
+ st1 {v23.16b}, [x9], x1
+ st1 {v26.16b}, [x0], x1
+ b 9b
+7:
+ sub x9, x0, x1, lsl #1
+ st1 {v22.16b}, [x9], x1
+ st1 {v24.16b}, [x0], x1
+ st1 {v23.16b}, [x9], x1
+ st1 {v25.16b}, [x0], x1
+ b 9b
+endfunc
+
+function ff_vp9_loop_filter_h_16_8_neon, export=1
+ mov x10, x30
+ stp d8, d9, [sp, #-0x40]!
+ stp d14, d15, [sp, #0x30]
+ stp d12, d13, [sp, #0x20]
+ stp d10, d11, [sp, #0x10]
+ sub x9, x0, #8
+ ld1 {v16.8b}, [x9], x1
+ ld1 {v24.8b}, [x0], x1
+ ld1 {v17.8b}, [x9], x1
+ ld1 {v25.8b}, [x0], x1
+ ld1 {v18.8b}, [x9], x1
+ ld1 {v26.8b}, [x0], x1
+ ld1 {v19.8b}, [x9], x1
+ ld1 {v27.8b}, [x0], x1
+ ld1 {v20.8b}, [x9], x1
+ ld1 {v28.8b}, [x0], x1
+ ld1 {v21.8b}, [x9], x1
+ ld1 {v29.8b}, [x0], x1
+ ld1 {v22.8b}, [x9], x1
+ ld1 {v30.8b}, [x0], x1
+ ld1 {v23.8b}, [x9], x1
+ ld1 {v31.8b}, [x0], x1
+ sub x0, x0, x1, lsl #3
+ sub x9, x9, x1, lsl #3
+
+ // The 16x8 pixels read above is in two 8x8 blocks; the left
+ // half in v16-v23, and the right half in v24-v31. Do two 8x8 transposes
+ // of this, to get one column per register.
+ transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
+ transpose_8x8B v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
+
+ loop_filter_16
+
+ transpose_8x8B v16, v2, v3, v4, v5, v6, v8, v9, v0, v1
+ transpose_8x8B v10, v11, v12, v13, v14, v15, v17, v31, v0, v1
+
+ st1 {v16.8b}, [x9], x1
+ st1 {v10.8b}, [x0], x1
+ st1 {v2.8b}, [x9], x1
+ st1 {v11.8b}, [x0], x1
+ st1 {v3.8b}, [x9], x1
+ st1 {v12.8b}, [x0], x1
+ st1 {v4.8b}, [x9], x1
+ st1 {v13.8b}, [x0], x1
+ st1 {v5.8b}, [x9], x1
+ st1 {v14.8b}, [x0], x1
+ st1 {v6.8b}, [x9], x1
+ st1 {v15.8b}, [x0], x1
+ st1 {v8.8b}, [x9], x1
+ st1 {v17.8b}, [x0], x1
+ st1 {v9.8b}, [x9], x1
+ st1 {v31.8b}, [x0], x1
+9:
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], #0x40
+ ret x10
+8:
+ // The same writeback as in loop_filter_h_8_8
+ sub x9, x0, #4
+ add x0, x9, x1, lsl #2
+ transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ st1 {v20.8b}, [x9], x1
+ st1 {v24.8b}, [x0], x1
+ st1 {v21.8b}, [x9], x1
+ st1 {v25.8b}, [x0], x1
+ st1 {v22.8b}, [x9], x1
+ st1 {v26.8b}, [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v27.8b}, [x0], x1
+ b 9b
+7:
+ // The same writeback as in loop_filter_h_4_8
+ sub x9, x0, #2
+ add x0, x9, x1, lsl #2
+ transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
+ st1 {v22.s}[0], [x9], x1
+ st1 {v22.s}[1], [x0], x1
+ st1 {v23.s}[0], [x9], x1
+ st1 {v23.s}[1], [x0], x1
+ st1 {v24.s}[0], [x9], x1
+ st1 {v24.s}[1], [x0], x1
+ st1 {v25.s}[0], [x9], x1
+ st1 {v25.s}[1], [x0], x1
+ b 9b
+endfunc
+
+function ff_vp9_loop_filter_h_16_16_neon, export=1
+ mov x10, x30
+ stp d8, d9, [sp, #-0x40]!
+ stp d14, d15, [sp, #0x30]
+ stp d12, d13, [sp, #0x20]
+ stp d10, d11, [sp, #0x10]
+ sub x9, x0, #8
+ ld1 {v16.8b}, [x9], x1
+ ld1 {v24.8b}, [x0], x1
+ ld1 {v17.8b}, [x9], x1
+ ld1 {v25.8b}, [x0], x1
+ ld1 {v18.8b}, [x9], x1
+ ld1 {v26.8b}, [x0], x1
+ ld1 {v19.8b}, [x9], x1
+ ld1 {v27.8b}, [x0], x1
+ ld1 {v20.8b}, [x9], x1
+ ld1 {v28.8b}, [x0], x1
+ ld1 {v21.8b}, [x9], x1
+ ld1 {v29.8b}, [x0], x1
+ ld1 {v22.8b}, [x9], x1
+ ld1 {v30.8b}, [x0], x1
+ ld1 {v23.8b}, [x9], x1
+ ld1 {v31.8b}, [x0], x1
+ ld1 {v16.d}[1], [x9], x1
+ ld1 {v24.d}[1], [x0], x1
+ ld1 {v17.d}[1], [x9], x1
+ ld1 {v25.d}[1], [x0], x1
+ ld1 {v18.d}[1], [x9], x1
+ ld1 {v26.d}[1], [x0], x1
+ ld1 {v19.d}[1], [x9], x1
+ ld1 {v27.d}[1], [x0], x1
+ ld1 {v20.d}[1], [x9], x1
+ ld1 {v28.d}[1], [x0], x1
+ ld1 {v21.d}[1], [x9], x1
+ ld1 {v29.d}[1], [x0], x1
+ ld1 {v22.d}[1], [x9], x1
+ ld1 {v30.d}[1], [x0], x1
+ ld1 {v23.d}[1], [x9], x1
+ ld1 {v31.d}[1], [x0], x1
+ sub x0, x0, x1, lsl #4
+ sub x9, x9, x1, lsl #4
+
+ transpose_8x16B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
+ transpose_8x16B v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
+
+ loop_filter_16_16b
+
+ transpose_8x16B v16, v2, v3, v4, v5, v6, v8, v9, v0, v1
+ transpose_8x16B v10, v11, v12, v13, v14, v15, v17, v31, v0, v1
+
+ st1 {v16.8b}, [x9], x1
+ st1 {v10.8b}, [x0], x1
+ st1 {v2.8b}, [x9], x1
+ st1 {v11.8b}, [x0], x1
+ st1 {v3.8b}, [x9], x1
+ st1 {v12.8b}, [x0], x1
+ st1 {v4.8b}, [x9], x1
+ st1 {v13.8b}, [x0], x1
+ st1 {v5.8b}, [x9], x1
+ st1 {v14.8b}, [x0], x1
+ st1 {v6.8b}, [x9], x1
+ st1 {v15.8b}, [x0], x1
+ st1 {v8.8b}, [x9], x1
+ st1 {v17.8b}, [x0], x1
+ st1 {v9.8b}, [x9], x1
+ st1 {v31.8b}, [x0], x1
+ st1 {v16.d}[1], [x9], x1
+ st1 {v10.d}[1], [x0], x1
+ st1 {v2.d}[1], [x9], x1
+ st1 {v11.d}[1], [x0], x1
+ st1 {v3.d}[1], [x9], x1
+ st1 {v12.d}[1], [x0], x1
+ st1 {v4.d}[1], [x9], x1
+ st1 {v13.d}[1], [x0], x1
+ st1 {v5.d}[1], [x9], x1
+ st1 {v14.d}[1], [x0], x1
+ st1 {v6.d}[1], [x9], x1
+ st1 {v15.d}[1], [x0], x1
+ st1 {v8.d}[1], [x9], x1
+ st1 {v17.d}[1], [x0], x1
+ st1 {v9.d}[1], [x9], x1
+ st1 {v31.d}[1], [x0], x1
+9:
+ ldp d10, d11, [sp, #0x10]
+ ldp d12, d13, [sp, #0x20]
+ ldp d14, d15, [sp, #0x30]
+ ldp d8, d9, [sp], #0x40
+ ret x10
+8:
+ sub x9, x0, #4
+ add x0, x9, x1, lsl #3
+ transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
+
+ st1 {v20.8b}, [x9], x1
+ st1 {v20.d}[1], [x0], x1
+ st1 {v21.8b}, [x9], x1
+ st1 {v21.d}[1], [x0], x1
+ st1 {v22.8b}, [x9], x1
+ st1 {v22.d}[1], [x0], x1
+ st1 {v23.8b}, [x9], x1
+ st1 {v23.d}[1], [x0], x1
+ st1 {v24.8b}, [x9], x1
+ st1 {v24.d}[1], [x0], x1
+ st1 {v25.8b}, [x9], x1
+ st1 {v25.d}[1], [x0], x1
+ st1 {v26.8b}, [x9], x1
+ st1 {v26.d}[1], [x0], x1
+ st1 {v27.8b}, [x9], x1
+ st1 {v27.d}[1], [x0], x1
+ b 9b
+7:
+ sub x9, x0, #2
+ add x0, x9, x1, lsl #3
+ transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
+ st1 {v22.s}[0], [x9], x1
+ st1 {v22.s}[2], [x0], x1
+ st1 {v23.s}[0], [x9], x1
+ st1 {v23.s}[2], [x0], x1
+ st1 {v24.s}[0], [x9], x1
+ st1 {v24.s}[2], [x0], x1
+ st1 {v25.s}[0], [x9], x1
+ st1 {v25.s}[2], [x0], x1
+ st1 {v22.s}[1], [x9], x1
+ st1 {v22.s}[3], [x0], x1
+ st1 {v23.s}[1], [x9], x1
+ st1 {v23.s}[3], [x0], x1
+ st1 {v24.s}[1], [x9], x1
+ st1 {v24.s}[3], [x0], x1
+ st1 {v25.s}[1], [x9], x1
+ st1 {v25.s}[3], [x0], x1
+ b 9b
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/vp9mc_16bpp_neon.S b/media/ffvpx/libavcodec/aarch64/vp9mc_16bpp_neon.S
new file mode 100644
index 0000000000..53b372c262
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9mc_16bpp_neon.S
@@ -0,0 +1,606 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+// All public functions in this file have the following signature:
+// typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
+// const uint8_t *ref, ptrdiff_t ref_stride,
+// int h, int mx, int my);
+
+function ff_vp9_avg64_16_neon, export=1
+ mov x5, x0
+ sub x1, x1, #64
+ sub x3, x3, #64
+1:
+ ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
+ ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
+ ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x2], x3
+ urhadd v0.8h, v0.8h, v4.8h
+ urhadd v1.8h, v1.8h, v5.8h
+ ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x0], x1
+ urhadd v2.8h, v2.8h, v6.8h
+ urhadd v3.8h, v3.8h, v7.8h
+ subs w4, w4, #1
+ urhadd v16.8h, v16.8h, v20.8h
+ urhadd v17.8h, v17.8h, v21.8h
+ st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x5], #64
+ urhadd v18.8h, v18.8h, v22.8h
+ urhadd v19.8h, v19.8h, v23.8h
+ st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_avg32_16_neon, export=1
+ mov x5, x0
+1:
+ ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], x3
+ ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
+ ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x2], x3
+ urhadd v0.8h, v0.8h, v4.8h
+ urhadd v1.8h, v1.8h, v5.8h
+ ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x0], x1
+ urhadd v2.8h, v2.8h, v6.8h
+ urhadd v3.8h, v3.8h, v7.8h
+ subs w4, w4, #2
+ urhadd v16.8h, v16.8h, v20.8h
+ urhadd v17.8h, v17.8h, v21.8h
+ st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x5], x1
+ urhadd v18.8h, v18.8h, v22.8h
+ urhadd v19.8h, v19.8h, v23.8h
+ st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_avg16_16_neon, export=1
+1:
+ ld1 {v2.8h, v3.8h}, [x2], x3
+ ld1 {v0.8h, v1.8h}, [x0]
+ urhadd v0.8h, v0.8h, v2.8h
+ urhadd v1.8h, v1.8h, v3.8h
+ subs w4, w4, #1
+ st1 {v0.8h, v1.8h}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_avg8_16_neon, export=1
+ mov x5, x0
+1:
+ ld1 {v2.8h}, [x2], x3
+ ld1 {v0.8h}, [x0], x1
+ ld1 {v3.8h}, [x2], x3
+ urhadd v0.8h, v0.8h, v2.8h
+ ld1 {v1.8h}, [x0], x1
+ urhadd v1.8h, v1.8h, v3.8h
+ subs w4, w4, #2
+ st1 {v0.8h}, [x5], x1
+ st1 {v1.8h}, [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_avg4_16_neon, export=1
+ mov x5, x0
+1:
+ ld1 {v2.4h}, [x2], x3
+ ld1 {v0.4h}, [x0], x1
+ ld1 {v3.4h}, [x2], x3
+ urhadd v0.4h, v0.4h, v2.4h
+ ld1 {v1.4h}, [x0], x1
+ urhadd v1.4h, v1.4h, v3.4h
+ subs w4, w4, #2
+ st1 {v0.4h}, [x5], x1
+ st1 {v1.8b}, [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+
+// Extract a vector from src1-src2 and src4-src5 (src1-src3 and src4-src6
+// for size >= 16), and multiply-accumulate into dst1 and dst5 (or
+// dst1-dst2 and dst5-dst6 for size >= 8 and dst1-dst4 and dst5-dst8
+// for size >= 16)
+.macro extmlal dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8, src1, src2, src3, src4, src5, src6, offset, size
+ ext v20.16b, \src1\().16b, \src2\().16b, #(2*\offset)
+ ext v22.16b, \src4\().16b, \src5\().16b, #(2*\offset)
+ smlal \dst1\().4s, v20.4h, v0.h[\offset]
+ smlal \dst5\().4s, v22.4h, v0.h[\offset]
+.if \size >= 16
+ ext v21.16b, \src2\().16b, \src3\().16b, #(2*\offset)
+ ext v23.16b, \src5\().16b, \src6\().16b, #(2*\offset)
+.endif
+.if \size >= 8
+ smlal2 \dst2\().4s, v20.8h, v0.h[\offset]
+ smlal2 \dst6\().4s, v22.8h, v0.h[\offset]
+.endif
+.if \size >= 16
+ smlal \dst3\().4s, v21.4h, v0.h[\offset]
+ smlal \dst7\().4s, v23.4h, v0.h[\offset]
+ smlal2 \dst4\().4s, v21.8h, v0.h[\offset]
+ smlal2 \dst8\().4s, v23.8h, v0.h[\offset]
+.endif
+.endm
+
+
+// Instantiate a horizontal filter function for the given size.
+// This can work on 4, 8 or 16 pixels in parallel; for larger
+// widths it will do 16 pixels at a time and loop horizontally.
+// The actual width (in bytes) is passed in x5, the height in w4 and
+// the filter coefficients in x9.
+.macro do_8tap_h type, size
+function \type\()_8tap_\size\()h
+ sub x2, x2, #6
+ add x6, x0, x1
+ add x7, x2, x3
+ add x1, x1, x1
+ add x3, x3, x3
+ // Only size >= 16 loops horizontally and needs
+ // reduced dst stride
+.if \size >= 16
+ sub x1, x1, x5
+.endif
+ // size >= 16 loads two qwords and increments r2,
+ // for size 4/8 it's enough with one qword and no
+ // postincrement
+.if \size >= 16
+ sub x3, x3, x5
+ sub x3, x3, #16
+.endif
+ // Load the filter vector
+ ld1 {v0.8h}, [x9]
+1:
+.if \size >= 16
+ mov x9, x5
+.endif
+ // Load src
+.if \size >= 16
+ ld1 {v5.8h, v6.8h, v7.8h}, [x2], #48
+ ld1 {v16.8h, v17.8h, v18.8h}, [x7], #48
+.else
+ ld1 {v5.8h, v6.8h}, [x2]
+ ld1 {v16.8h, v17.8h}, [x7]
+.endif
+2:
+
+ smull v1.4s, v5.4h, v0.h[0]
+ smull v24.4s, v16.4h, v0.h[0]
+.if \size >= 8
+ smull2 v2.4s, v5.8h, v0.h[0]
+ smull2 v25.4s, v16.8h, v0.h[0]
+.endif
+.if \size >= 16
+ smull v3.4s, v6.4h, v0.h[0]
+ smull v26.4s, v17.4h, v0.h[0]
+ smull2 v4.4s, v6.8h, v0.h[0]
+ smull2 v27.4s, v17.8h, v0.h[0]
+.endif
+ extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 1, \size
+ extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 2, \size
+ extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 3, \size
+ extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 4, \size
+ extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 5, \size
+ extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 6, \size
+ extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 7, \size
+
+ // Round, shift and saturate
+ // The sqrshrun takes care of clamping negative values to zero, but
+ // we manually need to do umin with the max pixel value.
+ sqrshrun v1.4h, v1.4s, #7
+ sqrshrun v24.4h, v24.4s, #7
+.if \size >= 8
+ sqrshrun2 v1.8h, v2.4s, #7
+ sqrshrun2 v24.8h, v25.4s, #7
+ umin v1.8h, v1.8h, v31.8h
+ umin v24.8h, v24.8h, v31.8h
+.if \size >= 16
+ sqrshrun v2.4h, v3.4s, #7
+ sqrshrun v25.4h, v26.4s, #7
+ sqrshrun2 v2.8h, v4.4s, #7
+ sqrshrun2 v25.8h, v27.4s, #7
+ umin v2.8h, v2.8h, v31.8h
+ umin v25.8h, v25.8h, v31.8h
+.endif
+.else
+ umin v1.4h, v1.4h, v31.4h
+ umin v24.4h, v24.4h, v31.4h
+.endif
+ // Average
+.ifc \type,avg
+.if \size >= 16
+ ld1 {v3.8h, v4.8h}, [x0]
+ ld1 {v29.8h, v30.8h}, [x6]
+ urhadd v1.8h, v1.8h, v3.8h
+ urhadd v2.8h, v2.8h, v4.8h
+ urhadd v24.8h, v24.8h, v29.8h
+ urhadd v25.8h, v25.8h, v30.8h
+.elseif \size >= 8
+ ld1 {v3.8h}, [x0]
+ ld1 {v4.8h}, [x6]
+ urhadd v1.8h, v1.8h, v3.8h
+ urhadd v24.8h, v24.8h, v4.8h
+.else
+ ld1 {v3.4h}, [x0]
+ ld1 {v4.4h}, [x6]
+ urhadd v1.4h, v1.4h, v3.4h
+ urhadd v24.4h, v24.4h, v4.4h
+.endif
+.endif
+ // Store and loop horizontally (for size >= 16)
+.if \size >= 16
+ subs x9, x9, #32
+ st1 {v1.8h, v2.8h}, [x0], #32
+ st1 {v24.8h, v25.8h}, [x6], #32
+ b.eq 3f
+ mov v5.16b, v7.16b
+ mov v16.16b, v18.16b
+ ld1 {v6.8h, v7.8h}, [x2], #32
+ ld1 {v17.8h, v18.8h}, [x7], #32
+ b 2b
+.elseif \size == 8
+ st1 {v1.8h}, [x0]
+ st1 {v24.8h}, [x6]
+.else // \size == 4
+ st1 {v1.4h}, [x0]
+ st1 {v24.4h}, [x6]
+.endif
+3:
+ // Loop vertically
+ add x0, x0, x1
+ add x6, x6, x1
+ add x2, x2, x3
+ add x7, x7, x3
+ subs w4, w4, #2
+ b.ne 1b
+ ret
+endfunc
+.endm
+
+.macro do_8tap_h_size size
+do_8tap_h put, \size
+do_8tap_h avg, \size
+.endm
+
+do_8tap_h_size 4
+do_8tap_h_size 8
+do_8tap_h_size 16
+
+.macro do_8tap_h_func type, filter, offset, size, bpp
+function ff_vp9_\type\()_\filter\()\size\()_h_\bpp\()_neon, export=1
+ mvni v31.8h, #((0xff << (\bpp - 8)) & 0xff), lsl #8
+ movrel x6, X(ff_vp9_subpel_filters), 256*\offset
+ cmp w5, #8
+ add x9, x6, w5, uxtw #4
+ mov x5, #2*\size
+.if \size >= 16
+ b \type\()_8tap_16h
+.else
+ b \type\()_8tap_\size\()h
+.endif
+endfunc
+.endm
+
+.macro do_8tap_h_filters size, bpp
+do_8tap_h_func put, regular, 1, \size, \bpp
+do_8tap_h_func avg, regular, 1, \size, \bpp
+do_8tap_h_func put, sharp, 2, \size, \bpp
+do_8tap_h_func avg, sharp, 2, \size, \bpp
+do_8tap_h_func put, smooth, 0, \size, \bpp
+do_8tap_h_func avg, smooth, 0, \size, \bpp
+.endm
+
+.macro do_8tap_h_filters_bpp bpp
+do_8tap_h_filters 64, \bpp
+do_8tap_h_filters 32, \bpp
+do_8tap_h_filters 16, \bpp
+do_8tap_h_filters 8, \bpp
+do_8tap_h_filters 4, \bpp
+.endm
+
+do_8tap_h_filters_bpp 10
+do_8tap_h_filters_bpp 12
+
+
+// Vertical filters
+
+// Round, shift and saturate and store reg1-reg4
+.macro do_store4 reg1, reg2, reg3, reg4, tmp1, tmp2, tmp3, tmp4, minreg, type
+ sqrshrun \reg1\().4h, \reg1\().4s, #7
+ sqrshrun \reg2\().4h, \reg2\().4s, #7
+ sqrshrun \reg3\().4h, \reg3\().4s, #7
+ sqrshrun \reg4\().4h, \reg4\().4s, #7
+.ifc \type,avg
+ ld1 {\tmp1\().4h}, [x7], x1
+ ld1 {\tmp2\().4h}, [x7], x1
+ ld1 {\tmp3\().4h}, [x7], x1
+ ld1 {\tmp4\().4h}, [x7], x1
+.endif
+ umin \reg1\().4h, \reg1\().4h, \minreg\().4h
+ umin \reg2\().4h, \reg2\().4h, \minreg\().4h
+ umin \reg3\().4h, \reg3\().4h, \minreg\().4h
+ umin \reg4\().4h, \reg4\().4h, \minreg\().4h
+.ifc \type,avg
+ urhadd \reg1\().4h, \reg1\().4h, \tmp1\().4h
+ urhadd \reg2\().4h, \reg2\().4h, \tmp2\().4h
+ urhadd \reg3\().4h, \reg3\().4h, \tmp3\().4h
+ urhadd \reg4\().4h, \reg4\().4h, \tmp4\().4h
+.endif
+ st1 {\reg1\().4h}, [x0], x1
+ st1 {\reg2\().4h}, [x0], x1
+ st1 {\reg3\().4h}, [x0], x1
+ st1 {\reg4\().4h}, [x0], x1
+.endm
+
+// Round, shift and saturate and store reg1-8, where
+// reg1-2, reg3-4 etc pairwise correspond to 4 rows.
+.macro do_store8 reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, minreg, type
+ sqrshrun \reg1\().4h, \reg1\().4s, #7
+ sqrshrun2 \reg1\().8h, \reg2\().4s, #7
+ sqrshrun \reg2\().4h, \reg3\().4s, #7
+ sqrshrun2 \reg2\().8h, \reg4\().4s, #7
+ sqrshrun \reg3\().4h, \reg5\().4s, #7
+ sqrshrun2 \reg3\().8h, \reg6\().4s, #7
+ sqrshrun \reg4\().4h, \reg7\().4s, #7
+ sqrshrun2 \reg4\().8h, \reg8\().4s, #7
+.ifc \type,avg
+ ld1 {\reg5\().8h}, [x7], x1
+ ld1 {\reg6\().8h}, [x7], x1
+ ld1 {\reg7\().8h}, [x7], x1
+ ld1 {\reg8\().8h}, [x7], x1
+.endif
+ umin \reg1\().8h, \reg1\().8h, \minreg\().8h
+ umin \reg2\().8h, \reg2\().8h, \minreg\().8h
+ umin \reg3\().8h, \reg3\().8h, \minreg\().8h
+ umin \reg4\().8h, \reg4\().8h, \minreg\().8h
+.ifc \type,avg
+ urhadd \reg1\().8h, \reg1\().8h, \reg5\().8h
+ urhadd \reg2\().8h, \reg2\().8h, \reg6\().8h
+ urhadd \reg3\().8h, \reg3\().8h, \reg7\().8h
+ urhadd \reg4\().8h, \reg4\().8h, \reg8\().8h
+.endif
+ st1 {\reg1\().8h}, [x0], x1
+ st1 {\reg2\().8h}, [x0], x1
+ st1 {\reg3\().8h}, [x0], x1
+ st1 {\reg4\().8h}, [x0], x1
+.endm
+
+// Evaluate the filter twice in parallel, from the inputs src1-src9 into dst1-dst2
+// (src1-src8 into dst1, src2-src9 into dst2).
+.macro convolve4 dst1, dst2, src1, src2, src3, src4, src5, src6, src7, src8, src9, tmp1, tmp2
+ smull \dst1\().4s, \src1\().4h, v0.h[0]
+ smull \dst2\().4s, \src2\().4h, v0.h[0]
+ smull \tmp1\().4s, \src2\().4h, v0.h[1]
+ smull \tmp2\().4s, \src3\().4h, v0.h[1]
+ smlal \dst1\().4s, \src3\().4h, v0.h[2]
+ smlal \dst2\().4s, \src4\().4h, v0.h[2]
+ smlal \tmp1\().4s, \src4\().4h, v0.h[3]
+ smlal \tmp2\().4s, \src5\().4h, v0.h[3]
+ smlal \dst1\().4s, \src5\().4h, v0.h[4]
+ smlal \dst2\().4s, \src6\().4h, v0.h[4]
+ smlal \tmp1\().4s, \src6\().4h, v0.h[5]
+ smlal \tmp2\().4s, \src7\().4h, v0.h[5]
+ smlal \dst1\().4s, \src7\().4h, v0.h[6]
+ smlal \dst2\().4s, \src8\().4h, v0.h[6]
+ smlal \tmp1\().4s, \src8\().4h, v0.h[7]
+ smlal \tmp2\().4s, \src9\().4h, v0.h[7]
+ add \dst1\().4s, \dst1\().4s, \tmp1\().4s
+ add \dst2\().4s, \dst2\().4s, \tmp2\().4s
+.endm
+
+// Evaluate the filter twice in parallel, from the inputs src1-src9 into dst1-dst4
+// (src1-src8 into dst1-dst2, src2-src9 into dst3-dst4).
+.macro convolve8 dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6, src7, src8, src9
+ smull \dst1\().4s, \src1\().4h, v0.h[0]
+ smull2 \dst2\().4s, \src1\().8h, v0.h[0]
+ smull \dst3\().4s, \src2\().4h, v0.h[0]
+ smull2 \dst4\().4s, \src2\().8h, v0.h[0]
+ smlal \dst1\().4s, \src2\().4h, v0.h[1]
+ smlal2 \dst2\().4s, \src2\().8h, v0.h[1]
+ smlal \dst3\().4s, \src3\().4h, v0.h[1]
+ smlal2 \dst4\().4s, \src3\().8h, v0.h[1]
+ smlal \dst1\().4s, \src3\().4h, v0.h[2]
+ smlal2 \dst2\().4s, \src3\().8h, v0.h[2]
+ smlal \dst3\().4s, \src4\().4h, v0.h[2]
+ smlal2 \dst4\().4s, \src4\().8h, v0.h[2]
+ smlal \dst1\().4s, \src4\().4h, v0.h[3]
+ smlal2 \dst2\().4s, \src4\().8h, v0.h[3]
+ smlal \dst3\().4s, \src5\().4h, v0.h[3]
+ smlal2 \dst4\().4s, \src5\().8h, v0.h[3]
+ smlal \dst1\().4s, \src5\().4h, v0.h[4]
+ smlal2 \dst2\().4s, \src5\().8h, v0.h[4]
+ smlal \dst3\().4s, \src6\().4h, v0.h[4]
+ smlal2 \dst4\().4s, \src6\().8h, v0.h[4]
+ smlal \dst1\().4s, \src6\().4h, v0.h[5]
+ smlal2 \dst2\().4s, \src6\().8h, v0.h[5]
+ smlal \dst3\().4s, \src7\().4h, v0.h[5]
+ smlal2 \dst4\().4s, \src7\().8h, v0.h[5]
+ smlal \dst1\().4s, \src7\().4h, v0.h[6]
+ smlal2 \dst2\().4s, \src7\().8h, v0.h[6]
+ smlal \dst3\().4s, \src8\().4h, v0.h[6]
+ smlal2 \dst4\().4s, \src8\().8h, v0.h[6]
+ smlal \dst1\().4s, \src8\().4h, v0.h[7]
+ smlal2 \dst2\().4s, \src8\().8h, v0.h[7]
+ smlal \dst3\().4s, \src9\().4h, v0.h[7]
+ smlal2 \dst4\().4s, \src9\().8h, v0.h[7]
+.endm
+
+// Instantiate a vertical filter function for filtering 8 pixels at a time.
+// The height is passed in x4, the width in x5 and the filter coefficients
+// in x6.
+.macro do_8tap_8v type
+function \type\()_8tap_8v
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, x3
+ ld1 {v0.8h}, [x6]
+1:
+.ifc \type,avg
+ mov x7, x0
+.endif
+ mov x6, x4
+
+ ld1 {v17.8h}, [x2], x3
+ ld1 {v18.8h}, [x2], x3
+ ld1 {v19.8h}, [x2], x3
+ ld1 {v20.8h}, [x2], x3
+ ld1 {v21.8h}, [x2], x3
+ ld1 {v22.8h}, [x2], x3
+ ld1 {v23.8h}, [x2], x3
+2:
+ ld1 {v24.8h}, [x2], x3
+ ld1 {v25.8h}, [x2], x3
+ ld1 {v26.8h}, [x2], x3
+ ld1 {v27.8h}, [x2], x3
+
+ convolve8 v2, v3, v4, v5, v17, v18, v19, v20, v21, v22, v23, v24, v25
+ convolve8 v6, v7, v30, v31, v19, v20, v21, v22, v23, v24, v25, v26, v27
+ do_store8 v2, v3, v4, v5, v6, v7, v30, v31, v1, \type
+
+ subs x6, x6, #4
+ b.eq 8f
+
+ ld1 {v16.8h}, [x2], x3
+ ld1 {v17.8h}, [x2], x3
+ ld1 {v18.8h}, [x2], x3
+ ld1 {v19.8h}, [x2], x3
+ convolve8 v2, v3, v4, v5, v21, v22, v23, v24, v25, v26, v27, v16, v17
+ convolve8 v6, v7, v20, v21, v23, v24, v25, v26, v27, v16, v17, v18, v19
+ do_store8 v2, v3, v4, v5, v6, v7, v20, v21, v1, \type
+
+ subs x6, x6, #4
+ b.eq 8f
+
+ ld1 {v20.8h}, [x2], x3
+ ld1 {v21.8h}, [x2], x3
+ ld1 {v22.8h}, [x2], x3
+ ld1 {v23.8h}, [x2], x3
+ convolve8 v2, v3, v4, v5, v25, v26, v27, v16, v17, v18, v19, v20, v21
+ convolve8 v6, v7, v24, v25, v27, v16, v17, v18, v19, v20, v21, v22, v23
+ do_store8 v2, v3, v4, v5, v6, v7, v24, v25, v1, \type
+
+ subs x6, x6, #4
+ b.ne 2b
+
+8:
+ subs x5, x5, #8
+ b.eq 9f
+ // x0 -= h * dst_stride
+ msub x0, x1, x4, x0
+ // x2 -= h * src_stride
+ msub x2, x3, x4, x2
+ // x2 -= 8 * src_stride
+ sub x2, x2, x3, lsl #3
+ // x2 += 1 * src_stride
+ add x2, x2, x3
+ add x2, x2, #16
+ add x0, x0, #16
+ b 1b
+9:
+ ret
+endfunc
+.endm
+
+do_8tap_8v put
+do_8tap_8v avg
+
+
+// Instantiate a vertical filter function for filtering a 4 pixels wide
+// slice. This only is designed to work for 4 or 8 output lines.
+.macro do_8tap_4v type
+function \type\()_8tap_4v
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, x3
+ ld1 {v0.8h}, [x6]
+.ifc \type,avg
+ mov x7, x0
+.endif
+
+ ld1 {v16.4h}, [x2], x3
+ ld1 {v17.4h}, [x2], x3
+ ld1 {v18.4h}, [x2], x3
+ ld1 {v19.4h}, [x2], x3
+ ld1 {v20.4h}, [x2], x3
+ ld1 {v21.4h}, [x2], x3
+ ld1 {v22.4h}, [x2], x3
+ ld1 {v23.4h}, [x2], x3
+ ld1 {v24.4h}, [x2], x3
+ ld1 {v25.4h}, [x2], x3
+ ld1 {v26.4h}, [x2], x3
+
+ convolve4 v2, v3, v16, v17, v18, v19, v20, v21, v22, v23, v24, v30, v31
+ convolve4 v4, v5, v18, v19, v20, v21, v22, v23, v24, v25, v26, v30, v31
+ do_store4 v2, v3, v4, v5, v28, v29, v30, v31, v1, \type
+
+ subs x4, x4, #4
+ b.eq 9f
+
+ ld1 {v27.4h}, [x2], x3
+ ld1 {v28.4h}, [x2], x3
+ ld1 {v29.4h}, [x2], x3
+ ld1 {v30.4h}, [x2], x3
+
+ convolve4 v2, v3, v20, v21, v22, v23, v24, v25, v26, v27, v28, v16, v17
+ convolve4 v4, v5, v22, v23, v24, v25, v26, v27, v28, v29, v30, v16, v17
+ do_store4 v2, v3, v4, v5, v16, v17, v18, v19, v1, \type
+
+9:
+ ret
+endfunc
+.endm
+
+do_8tap_4v put
+do_8tap_4v avg
+
+
+.macro do_8tap_v_func type, filter, offset, size, bpp
+function ff_vp9_\type\()_\filter\()\size\()_v_\bpp\()_neon, export=1
+ uxtw x4, w4
+ mvni v1.8h, #((0xff << (\bpp - 8)) & 0xff), lsl #8
+ movrel x5, X(ff_vp9_subpel_filters), 256*\offset
+ add x6, x5, w6, uxtw #4
+ mov x5, #\size
+.if \size >= 8
+ b \type\()_8tap_8v
+.else
+ b \type\()_8tap_4v
+.endif
+endfunc
+.endm
+
+.macro do_8tap_v_filters size, bpp
+do_8tap_v_func put, regular, 1, \size, \bpp
+do_8tap_v_func avg, regular, 1, \size, \bpp
+do_8tap_v_func put, sharp, 2, \size, \bpp
+do_8tap_v_func avg, sharp, 2, \size, \bpp
+do_8tap_v_func put, smooth, 0, \size, \bpp
+do_8tap_v_func avg, smooth, 0, \size, \bpp
+.endm
+
+.macro do_8tap_v_filters_bpp bpp
+do_8tap_v_filters 64, \bpp
+do_8tap_v_filters 32, \bpp
+do_8tap_v_filters 16, \bpp
+do_8tap_v_filters 8, \bpp
+do_8tap_v_filters 4, \bpp
+.endm
+
+do_8tap_v_filters_bpp 10
+do_8tap_v_filters_bpp 12
diff --git a/media/ffvpx/libavcodec/aarch64/vp9mc_aarch64.S b/media/ffvpx/libavcodec/aarch64/vp9mc_aarch64.S
new file mode 100644
index 0000000000..f17a8cf04a
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9mc_aarch64.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+// All public functions in this file have the following signature:
+// typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
+// const uint8_t *ref, ptrdiff_t ref_stride,
+// int h, int mx, int my);
+
+function ff_vp9_copy128_aarch64, export=1
+1:
+ ldp x5, x6, [x2]
+ ldp x7, x8, [x2, #16]
+ stp x5, x6, [x0]
+ ldp x9, x10, [x2, #32]
+ stp x7, x8, [x0, #16]
+ subs w4, w4, #1
+ ldp x11, x12, [x2, #48]
+ stp x9, x10, [x0, #32]
+ stp x11, x12, [x0, #48]
+ ldp x5, x6, [x2, #64]
+ ldp x7, x8, [x2, #80]
+ stp x5, x6, [x0, #64]
+ ldp x9, x10, [x2, #96]
+ stp x7, x8, [x0, #80]
+ ldp x11, x12, [x2, #112]
+ stp x9, x10, [x0, #96]
+ stp x11, x12, [x0, #112]
+ add x2, x2, x3
+ add x0, x0, x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_copy64_aarch64, export=1
+1:
+ ldp x5, x6, [x2]
+ ldp x7, x8, [x2, #16]
+ stp x5, x6, [x0]
+ ldp x9, x10, [x2, #32]
+ stp x7, x8, [x0, #16]
+ subs w4, w4, #1
+ ldp x11, x12, [x2, #48]
+ stp x9, x10, [x0, #32]
+ stp x11, x12, [x0, #48]
+ add x2, x2, x3
+ add x0, x0, x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_copy32_aarch64, export=1
+1:
+ ldp x5, x6, [x2]
+ ldp x7, x8, [x2, #16]
+ stp x5, x6, [x0]
+ subs w4, w4, #1
+ stp x7, x8, [x0, #16]
+ add x2, x2, x3
+ add x0, x0, x1
+ b.ne 1b
+ ret
+endfunc
diff --git a/media/ffvpx/libavcodec/aarch64/vp9mc_neon.S b/media/ffvpx/libavcodec/aarch64/vp9mc_neon.S
new file mode 100644
index 0000000000..abf2bae9db
--- /dev/null
+++ b/media/ffvpx/libavcodec/aarch64/vp9mc_neon.S
@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2016 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+// All public functions in this file have the following signature:
+// typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
+// const uint8_t *ref, ptrdiff_t ref_stride,
+// int h, int mx, int my);
+
+function ff_vp9_avg64_neon, export=1
+ mov x5, x0
+1:
+ ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x2], x3
+ ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
+ ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x2], x3
+ urhadd v0.16b, v0.16b, v4.16b
+ urhadd v1.16b, v1.16b, v5.16b
+ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x0], x1
+ urhadd v2.16b, v2.16b, v6.16b
+ urhadd v3.16b, v3.16b, v7.16b
+ subs w4, w4, #2
+ urhadd v16.16b, v16.16b, v20.16b
+ urhadd v17.16b, v17.16b, v21.16b
+ st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x5], x1
+ urhadd v18.16b, v18.16b, v22.16b
+ urhadd v19.16b, v19.16b, v23.16b
+ st1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_avg32_neon, export=1
+1:
+ ld1 {v2.16b, v3.16b}, [x2], x3
+ ld1 {v0.16b, v1.16b}, [x0]
+ urhadd v0.16b, v0.16b, v2.16b
+ urhadd v1.16b, v1.16b, v3.16b
+ subs w4, w4, #1
+ st1 {v0.16b, v1.16b}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_copy16_neon, export=1
+ add x5, x0, x1
+ lsl x1, x1, #1
+ add x6, x2, x3
+ lsl x3, x3, #1
+1:
+ ld1 {v0.16b}, [x2], x3
+ ld1 {v1.16b}, [x6], x3
+ ld1 {v2.16b}, [x2], x3
+ ld1 {v3.16b}, [x6], x3
+ subs w4, w4, #4
+ st1 {v0.16b}, [x0], x1
+ st1 {v1.16b}, [x5], x1
+ st1 {v2.16b}, [x0], x1
+ st1 {v3.16b}, [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_avg16_neon, export=1
+ mov x5, x0
+1:
+ ld1 {v2.16b}, [x2], x3
+ ld1 {v0.16b}, [x0], x1
+ ld1 {v3.16b}, [x2], x3
+ urhadd v0.16b, v0.16b, v2.16b
+ ld1 {v1.16b}, [x0], x1
+ urhadd v1.16b, v1.16b, v3.16b
+ subs w4, w4, #2
+ st1 {v0.16b}, [x5], x1
+ st1 {v1.16b}, [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_copy8_neon, export=1
+1:
+ ld1 {v0.8b}, [x2], x3
+ ld1 {v1.8b}, [x2], x3
+ subs w4, w4, #2
+ st1 {v0.8b}, [x0], x1
+ st1 {v1.8b}, [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_avg8_neon, export=1
+ mov x5, x0
+1:
+ ld1 {v2.8b}, [x2], x3
+ ld1 {v0.8b}, [x0], x1
+ ld1 {v3.8b}, [x2], x3
+ urhadd v0.8b, v0.8b, v2.8b
+ ld1 {v1.8b}, [x0], x1
+ urhadd v1.8b, v1.8b, v3.8b
+ subs w4, w4, #2
+ st1 {v0.8b}, [x5], x1
+ st1 {v1.8b}, [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_copy4_neon, export=1
+1:
+ ld1 {v0.s}[0], [x2], x3
+ ld1 {v1.s}[0], [x2], x3
+ st1 {v0.s}[0], [x0], x1
+ ld1 {v2.s}[0], [x2], x3
+ st1 {v1.s}[0], [x0], x1
+ ld1 {v3.s}[0], [x2], x3
+ subs w4, w4, #4
+ st1 {v2.s}[0], [x0], x1
+ st1 {v3.s}[0], [x0], x1
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vp9_avg4_neon, export=1
+ mov x5, x0
+1:
+ ld1 {v2.s}[0], [x2], x3
+ ld1 {v0.s}[0], [x0], x1
+ ld1 {v2.s}[1], [x2], x3
+ ld1 {v0.s}[1], [x0], x1
+ ld1 {v3.s}[0], [x2], x3
+ ld1 {v1.s}[0], [x0], x1
+ ld1 {v3.s}[1], [x2], x3
+ ld1 {v1.s}[1], [x0], x1
+ subs w4, w4, #4
+ urhadd v0.8b, v0.8b, v2.8b
+ urhadd v1.8b, v1.8b, v3.8b
+ st1 {v0.s}[0], [x5], x1
+ st1 {v0.s}[1], [x5], x1
+ st1 {v1.s}[0], [x5], x1
+ st1 {v1.s}[1], [x5], x1
+ b.ne 1b
+ ret
+endfunc
+
+
+// Extract a vector from src1-src2 and src4-src5 (src1-src3 and src4-src6
+// for size >= 16), and multiply-accumulate into dst1 and dst3 (or
+// dst1-dst2 and dst3-dst4 for size >= 16)
+.macro extmla dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6, offset, size
+ ext v20.16b, \src1\().16b, \src2\().16b, #(2*\offset)
+ ext v22.16b, \src4\().16b, \src5\().16b, #(2*\offset)
+.if \size >= 16
+ mla \dst1\().8h, v20.8h, v0.h[\offset]
+ ext v21.16b, \src2\().16b, \src3\().16b, #(2*\offset)
+ mla \dst3\().8h, v22.8h, v0.h[\offset]
+ ext v23.16b, \src5\().16b, \src6\().16b, #(2*\offset)
+ mla \dst2\().8h, v21.8h, v0.h[\offset]
+ mla \dst4\().8h, v23.8h, v0.h[\offset]
+.elseif \size == 8
+ mla \dst1\().8h, v20.8h, v0.h[\offset]
+ mla \dst3\().8h, v22.8h, v0.h[\offset]
+.else
+ mla \dst1\().4h, v20.4h, v0.h[\offset]
+ mla \dst3\().4h, v22.4h, v0.h[\offset]
+.endif
+.endm
+// The same as above, but don't accumulate straight into the
+// destination, but use a temp register and accumulate with saturation.
+.macro extmulqadd dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6, offset, size
+ ext v20.16b, \src1\().16b, \src2\().16b, #(2*\offset)
+ ext v22.16b, \src4\().16b, \src5\().16b, #(2*\offset)
+.if \size >= 16
+ mul v20.8h, v20.8h, v0.h[\offset]
+ ext v21.16b, \src2\().16b, \src3\().16b, #(2*\offset)
+ mul v22.8h, v22.8h, v0.h[\offset]
+ ext v23.16b, \src5\().16b, \src6\().16b, #(2*\offset)
+ mul v21.8h, v21.8h, v0.h[\offset]
+ mul v23.8h, v23.8h, v0.h[\offset]
+.elseif \size == 8
+ mul v20.8h, v20.8h, v0.h[\offset]
+ mul v22.8h, v22.8h, v0.h[\offset]
+.else
+ mul v20.4h, v20.4h, v0.h[\offset]
+ mul v22.4h, v22.4h, v0.h[\offset]
+.endif
+.if \size == 4
+ sqadd \dst1\().4h, \dst1\().4h, v20.4h
+ sqadd \dst3\().4h, \dst3\().4h, v22.4h
+.else
+ sqadd \dst1\().8h, \dst1\().8h, v20.8h
+ sqadd \dst3\().8h, \dst3\().8h, v22.8h
+.if \size >= 16
+ sqadd \dst2\().8h, \dst2\().8h, v21.8h
+ sqadd \dst4\().8h, \dst4\().8h, v23.8h
+.endif
+.endif
+.endm
+
+
+// Instantiate a horizontal filter function for the given size.
+// This can work on 4, 8 or 16 pixels in parallel; for larger
+// widths it will do 16 pixels at a time and loop horizontally.
+// The actual width is passed in x5, the height in w4 and the
+// filter coefficients in x9. idx2 is the index of the largest
+// filter coefficient (3 or 4) and idx1 is the other one of them.
+.macro do_8tap_h type, size, idx1, idx2
+function \type\()_8tap_\size\()h_\idx1\idx2
+ sub x2, x2, #3
+ add x6, x0, x1
+ add x7, x2, x3
+ add x1, x1, x1
+ add x3, x3, x3
+ // Only size >= 16 loops horizontally and needs
+ // reduced dst stride
+.if \size >= 16
+ sub x1, x1, x5
+.endif
+ // size >= 16 loads two qwords and increments x2,
+ // for size 4/8 it's enough with one qword and no
+ // postincrement
+.if \size >= 16
+ sub x3, x3, x5
+ sub x3, x3, #8
+.endif
+ // Load the filter vector
+ ld1 {v0.8h}, [x9]
+1:
+.if \size >= 16
+ mov x9, x5
+.endif
+ // Load src
+.if \size >= 16
+ ld1 {v4.8b, v5.8b, v6.8b}, [x2], #24
+ ld1 {v16.8b, v17.8b, v18.8b}, [x7], #24
+.else
+ ld1 {v4.8b, v5.8b}, [x2]
+ ld1 {v16.8b, v17.8b}, [x7]
+.endif
+ uxtl v4.8h, v4.8b
+ uxtl v5.8h, v5.8b
+ uxtl v16.8h, v16.8b
+ uxtl v17.8h, v17.8b
+.if \size >= 16
+ uxtl v6.8h, v6.8b
+ uxtl v18.8h, v18.8b
+.endif
+2:
+
+ // Accumulate, adding idx2 last with a separate
+ // saturating add. The positive filter coefficients
+ // for all indices except idx2 must add up to less
+ // than 127 for this not to overflow.
+ mul v1.8h, v4.8h, v0.h[0]
+ mul v24.8h, v16.8h, v0.h[0]
+.if \size >= 16
+ mul v2.8h, v5.8h, v0.h[0]
+ mul v25.8h, v17.8h, v0.h[0]
+.endif
+ extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 1, \size
+ extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 2, \size
+ extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, \idx1, \size
+ extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 5, \size
+ extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 6, \size
+ extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 7, \size
+ extmulqadd v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, \idx2, \size
+
+ // Round, shift and saturate
+ sqrshrun v1.8b, v1.8h, #7
+ sqrshrun v24.8b, v24.8h, #7
+.if \size >= 16
+ sqrshrun2 v1.16b, v2.8h, #7
+ sqrshrun2 v24.16b, v25.8h, #7
+.endif
+ // Average
+.ifc \type,avg
+.if \size >= 16
+ ld1 {v2.16b}, [x0]
+ ld1 {v3.16b}, [x6]
+ urhadd v1.16b, v1.16b, v2.16b
+ urhadd v24.16b, v24.16b, v3.16b
+.elseif \size == 8
+ ld1 {v2.8b}, [x0]
+ ld1 {v3.8b}, [x6]
+ urhadd v1.8b, v1.8b, v2.8b
+ urhadd v24.8b, v24.8b, v3.8b
+.else
+ ld1 {v2.s}[0], [x0]
+ ld1 {v3.s}[0], [x6]
+ urhadd v1.8b, v1.8b, v2.8b
+ urhadd v24.8b, v24.8b, v3.8b
+.endif
+.endif
+ // Store and loop horizontally (for size >= 16)
+.if \size >= 16
+ subs x9, x9, #16
+ st1 {v1.16b}, [x0], #16
+ st1 {v24.16b}, [x6], #16
+ b.eq 3f
+ mov v4.16b, v6.16b
+ mov v16.16b, v18.16b
+ ld1 {v6.16b}, [x2], #16
+ ld1 {v18.16b}, [x7], #16
+ uxtl v5.8h, v6.8b
+ uxtl2 v6.8h, v6.16b
+ uxtl v17.8h, v18.8b
+ uxtl2 v18.8h, v18.16b
+ b 2b
+.elseif \size == 8
+ st1 {v1.8b}, [x0]
+ st1 {v24.8b}, [x6]
+.else // \size == 4
+ st1 {v1.s}[0], [x0]
+ st1 {v24.s}[0], [x6]
+.endif
+3:
+ // Loop vertically
+ add x0, x0, x1
+ add x6, x6, x1
+ add x2, x2, x3
+ add x7, x7, x3
+ subs w4, w4, #2
+ b.ne 1b
+ ret
+endfunc
+.endm
+
+.macro do_8tap_h_size size
+do_8tap_h put, \size, 3, 4
+do_8tap_h avg, \size, 3, 4
+do_8tap_h put, \size, 4, 3
+do_8tap_h avg, \size, 4, 3
+.endm
+
+do_8tap_h_size 4
+do_8tap_h_size 8
+do_8tap_h_size 16
+
+.macro do_8tap_h_func type, filter, offset, size
+function ff_vp9_\type\()_\filter\()\size\()_h_neon, export=1
+ movrel x6, X(ff_vp9_subpel_filters), 256*\offset
+ cmp w5, #8
+ add x9, x6, w5, uxtw #4
+ mov x5, #\size
+.if \size >= 16
+ b.ge \type\()_8tap_16h_34
+ b \type\()_8tap_16h_43
+.else
+ b.ge \type\()_8tap_\size\()h_34
+ b \type\()_8tap_\size\()h_43
+.endif
+endfunc
+.endm
+
+.macro do_8tap_h_filters size
+do_8tap_h_func put, regular, 1, \size
+do_8tap_h_func avg, regular, 1, \size
+do_8tap_h_func put, sharp, 2, \size
+do_8tap_h_func avg, sharp, 2, \size
+do_8tap_h_func put, smooth, 0, \size
+do_8tap_h_func avg, smooth, 0, \size
+.endm
+
+do_8tap_h_filters 64
+do_8tap_h_filters 32
+do_8tap_h_filters 16
+do_8tap_h_filters 8
+do_8tap_h_filters 4
+
+
+// Vertical filters
+
+// Round, shift and saturate and store reg1-reg2 over 4 lines
+.macro do_store4 reg1, reg2, tmp1, tmp2, type
+ sqrshrun \reg1\().8b, \reg1\().8h, #7
+ sqrshrun \reg2\().8b, \reg2\().8h, #7
+.ifc \type,avg
+ ld1 {\tmp1\().s}[0], [x7], x1
+ ld1 {\tmp2\().s}[0], [x7], x1
+ ld1 {\tmp1\().s}[1], [x7], x1
+ ld1 {\tmp2\().s}[1], [x7], x1
+ urhadd \reg1\().8b, \reg1\().8b, \tmp1\().8b
+ urhadd \reg2\().8b, \reg2\().8b, \tmp2\().8b
+.endif
+ st1 {\reg1\().s}[0], [x0], x1
+ st1 {\reg2\().s}[0], [x0], x1
+ st1 {\reg1\().s}[1], [x0], x1
+ st1 {\reg2\().s}[1], [x0], x1
+.endm
+
+// Round, shift and saturate and store reg1-4
+.macro do_store reg1, reg2, reg3, reg4, tmp1, tmp2, tmp3, tmp4, type
+ sqrshrun \reg1\().8b, \reg1\().8h, #7
+ sqrshrun \reg2\().8b, \reg2\().8h, #7
+ sqrshrun \reg3\().8b, \reg3\().8h, #7
+ sqrshrun \reg4\().8b, \reg4\().8h, #7
+.ifc \type,avg
+ ld1 {\tmp1\().8b}, [x7], x1
+ ld1 {\tmp2\().8b}, [x7], x1
+ ld1 {\tmp3\().8b}, [x7], x1
+ ld1 {\tmp4\().8b}, [x7], x1
+ urhadd \reg1\().8b, \reg1\().8b, \tmp1\().8b
+ urhadd \reg2\().8b, \reg2\().8b, \tmp2\().8b
+ urhadd \reg3\().8b, \reg3\().8b, \tmp3\().8b
+ urhadd \reg4\().8b, \reg4\().8b, \tmp4\().8b
+.endif
+ st1 {\reg1\().8b}, [x0], x1
+ st1 {\reg2\().8b}, [x0], x1
+ st1 {\reg3\().8b}, [x0], x1
+ st1 {\reg4\().8b}, [x0], x1
+.endm
+
+// Evaluate the filter twice in parallel, from the inputs src1-src9 into dst1-dst2
+// (src1-src8 into dst1, src2-src9 into dst2), adding idx2 separately
+// at the end with saturation. Indices 0 and 7 always have negative or zero
+// coefficients, so they can be accumulated into tmp1-tmp2 together with the
+// largest coefficient.
+.macro convolve dst1, dst2, src1, src2, src3, src4, src5, src6, src7, src8, src9, idx1, idx2, tmp1, tmp2
+ mul \dst1\().8h, \src2\().8h, v0.h[1]
+ mul \dst2\().8h, \src3\().8h, v0.h[1]
+ mul \tmp1\().8h, \src1\().8h, v0.h[0]
+ mul \tmp2\().8h, \src2\().8h, v0.h[0]
+ mla \dst1\().8h, \src3\().8h, v0.h[2]
+ mla \dst2\().8h, \src4\().8h, v0.h[2]
+.if \idx1 == 3
+ mla \dst1\().8h, \src4\().8h, v0.h[3]
+ mla \dst2\().8h, \src5\().8h, v0.h[3]
+.else
+ mla \dst1\().8h, \src5\().8h, v0.h[4]
+ mla \dst2\().8h, \src6\().8h, v0.h[4]
+.endif
+ mla \dst1\().8h, \src6\().8h, v0.h[5]
+ mla \dst2\().8h, \src7\().8h, v0.h[5]
+ mla \tmp1\().8h, \src8\().8h, v0.h[7]
+ mla \tmp2\().8h, \src9\().8h, v0.h[7]
+ mla \dst1\().8h, \src7\().8h, v0.h[6]
+ mla \dst2\().8h, \src8\().8h, v0.h[6]
+.if \idx2 == 3
+ mla \tmp1\().8h, \src4\().8h, v0.h[3]
+ mla \tmp2\().8h, \src5\().8h, v0.h[3]
+.else
+ mla \tmp1\().8h, \src5\().8h, v0.h[4]
+ mla \tmp2\().8h, \src6\().8h, v0.h[4]
+.endif
+ sqadd \dst1\().8h, \dst1\().8h, \tmp1\().8h
+ sqadd \dst2\().8h, \dst2\().8h, \tmp2\().8h
+.endm
+
+// Load pixels and extend them to 16 bit
+.macro loadl dst1, dst2, dst3, dst4
+ ld1 {v1.8b}, [x2], x3
+ ld1 {v2.8b}, [x2], x3
+ ld1 {v3.8b}, [x2], x3
+.ifnb \dst4
+ ld1 {v4.8b}, [x2], x3
+.endif
+ uxtl \dst1\().8h, v1.8b
+ uxtl \dst2\().8h, v2.8b
+ uxtl \dst3\().8h, v3.8b
+.ifnb \dst4
+ uxtl \dst4\().8h, v4.8b
+.endif
+.endm
+
+// Instantiate a vertical filter function for filtering 8 pixels at a time.
+// The height is passed in x4, the width in x5 and the filter coefficients
+// in x6. idx2 is the index of the largest filter coefficient (3 or 4)
+// and idx1 is the other one of them.
+.macro do_8tap_8v type, idx1, idx2
+function \type\()_8tap_8v_\idx1\idx2
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, x3
+ ld1 {v0.8h}, [x6]
+1:
+.ifc \type,avg
+ mov x7, x0
+.endif
+ mov x6, x4
+
+ loadl v17, v18, v19
+
+ loadl v20, v21, v22, v23
+2:
+ loadl v24, v25, v26, v27
+ convolve v1, v2, v17, v18, v19, v20, v21, v22, v23, v24, v25, \idx1, \idx2, v5, v6
+ convolve v3, v4, v19, v20, v21, v22, v23, v24, v25, v26, v27, \idx1, \idx2, v5, v6
+ do_store v1, v2, v3, v4, v5, v6, v7, v28, \type
+
+ subs x6, x6, #4
+ b.eq 8f
+
+ loadl v16, v17, v18, v19
+ convolve v1, v2, v21, v22, v23, v24, v25, v26, v27, v16, v17, \idx1, \idx2, v5, v6
+ convolve v3, v4, v23, v24, v25, v26, v27, v16, v17, v18, v19, \idx1, \idx2, v5, v6
+ do_store v1, v2, v3, v4, v5, v6, v7, v28, \type
+
+ subs x6, x6, #4
+ b.eq 8f
+
+ loadl v20, v21, v22, v23
+ convolve v1, v2, v25, v26, v27, v16, v17, v18, v19, v20, v21, \idx1, \idx2, v5, v6
+ convolve v3, v4, v27, v16, v17, v18, v19, v20, v21, v22, v23, \idx1, \idx2, v5, v6
+ do_store v1, v2, v3, v4, v5, v6, v7, v28, \type
+
+ subs x6, x6, #4
+ b.ne 2b
+
+8:
+ subs x5, x5, #8
+ b.eq 9f
+ // x0 -= h * dst_stride
+ msub x0, x1, x4, x0
+ // x2 -= h * src_stride
+ msub x2, x3, x4, x2
+ // x2 -= 8 * src_stride
+ sub x2, x2, x3, lsl #3
+ // x2 += 1 * src_stride
+ add x2, x2, x3
+ add x2, x2, #8
+ add x0, x0, #8
+ b 1b
+9:
+ ret
+endfunc
+.endm
+
+do_8tap_8v put, 3, 4
+do_8tap_8v put, 4, 3
+do_8tap_8v avg, 3, 4
+do_8tap_8v avg, 4, 3
+
+
+// Instantiate a vertical filter function for filtering a 4 pixels wide
+// slice. The first half of the registers contain one row, while the second
+// half of a register contains the second-next row (also stored in the first
+// half of the register two steps ahead). The convolution does two outputs
+// at a time; the output of v17-v24 into one, and v18-v25 into another one.
+// The first half of first output is the first output row, the first half
+// of the other output is the second output row. The second halves of the
+// registers are rows 3 and 4.
+// This only is designed to work for 4 or 8 output lines.
+.macro do_8tap_4v type, idx1, idx2
+function \type\()_8tap_4v_\idx1\idx2
+ sub x2, x2, x3, lsl #1
+ sub x2, x2, x3
+ ld1 {v0.8h}, [x6]
+.ifc \type,avg
+ mov x7, x0
+.endif
+
+ ld1 {v1.s}[0], [x2], x3
+ ld1 {v2.s}[0], [x2], x3
+ ld1 {v3.s}[0], [x2], x3
+ ld1 {v4.s}[0], [x2], x3
+ ld1 {v5.s}[0], [x2], x3
+ ld1 {v6.s}[0], [x2], x3
+ trn1 v1.2s, v1.2s, v3.2s
+ ld1 {v7.s}[0], [x2], x3
+ trn1 v2.2s, v2.2s, v4.2s
+ ld1 {v26.s}[0], [x2], x3
+ uxtl v17.8h, v1.8b
+ trn1 v3.2s, v3.2s, v5.2s
+ ld1 {v27.s}[0], [x2], x3
+ uxtl v18.8h, v2.8b
+ trn1 v4.2s, v4.2s, v6.2s
+ ld1 {v28.s}[0], [x2], x3
+ uxtl v19.8h, v3.8b
+ trn1 v5.2s, v5.2s, v7.2s
+ ld1 {v29.s}[0], [x2], x3
+ uxtl v20.8h, v4.8b
+ trn1 v6.2s, v6.2s, v26.2s
+ uxtl v21.8h, v5.8b
+ trn1 v7.2s, v7.2s, v27.2s
+ uxtl v22.8h, v6.8b
+ trn1 v26.2s, v26.2s, v28.2s
+ uxtl v23.8h, v7.8b
+ trn1 v27.2s, v27.2s, v29.2s
+ uxtl v24.8h, v26.8b
+ uxtl v25.8h, v27.8b
+
+ convolve v1, v2, v17, v18, v19, v20, v21, v22, v23, v24, v25, \idx1, \idx2, v3, v4
+ do_store4 v1, v2, v5, v6, \type
+
+ subs x4, x4, #4
+ b.eq 9f
+
+ ld1 {v1.s}[0], [x2], x3
+ ld1 {v2.s}[0], [x2], x3
+ trn1 v28.2s, v28.2s, v1.2s
+ trn1 v29.2s, v29.2s, v2.2s
+ ld1 {v1.s}[1], [x2], x3
+ uxtl v26.8h, v28.8b
+ ld1 {v2.s}[1], [x2], x3
+ uxtl v27.8h, v29.8b
+ uxtl v28.8h, v1.8b
+ uxtl v29.8h, v2.8b
+
+ convolve v1, v2, v21, v22, v23, v24, v25, v26, v27, v28, v29, \idx1, \idx2, v3, v4
+ do_store4 v1, v2, v5, v6, \type
+
+9:
+ ret
+endfunc
+.endm
+
+do_8tap_4v put, 3, 4
+do_8tap_4v put, 4, 3
+do_8tap_4v avg, 3, 4
+do_8tap_4v avg, 4, 3
+
+
+.macro do_8tap_v_func type, filter, offset, size
+function ff_vp9_\type\()_\filter\()\size\()_v_neon, export=1
+ uxtw x4, w4
+ movrel x5, X(ff_vp9_subpel_filters), 256*\offset
+ cmp w6, #8
+ add x6, x5, w6, uxtw #4
+ mov x5, #\size
+.if \size >= 8
+ b.ge \type\()_8tap_8v_34
+ b \type\()_8tap_8v_43
+.else
+ b.ge \type\()_8tap_4v_34
+ b \type\()_8tap_4v_43
+.endif
+endfunc
+.endm
+
+.macro do_8tap_v_filters size
+do_8tap_v_func put, regular, 1, \size
+do_8tap_v_func avg, regular, 1, \size
+do_8tap_v_func put, sharp, 2, \size
+do_8tap_v_func avg, sharp, 2, \size
+do_8tap_v_func put, smooth, 0, \size
+do_8tap_v_func avg, smooth, 0, \size
+.endm
+
+do_8tap_v_filters 64
+do_8tap_v_filters 32
+do_8tap_v_filters 16
+do_8tap_v_filters 8
+do_8tap_v_filters 4
diff --git a/media/ffvpx/libavcodec/allcodecs.c b/media/ffvpx/libavcodec/allcodecs.c
new file mode 100644
index 0000000000..e593ad19af
--- /dev/null
+++ b/media/ffvpx/libavcodec/allcodecs.c
@@ -0,0 +1,995 @@
+/*
+ * Provide registration of all codecs, parsers and bitstream filters for libavcodec.
+ * Copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Provide registration of all codecs, parsers and bitstream filters for libavcodec.
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "config.h"
+#include "config_components.h"
+#include "libavutil/thread.h"
+#include "codec.h"
+#include "codec_id.h"
+#include "codec_internal.h"
+
+extern const FFCodec ff_a64multi_encoder;
+extern const FFCodec ff_a64multi5_encoder;
+extern const FFCodec ff_aasc_decoder;
+extern const FFCodec ff_aic_decoder;
+extern const FFCodec ff_alias_pix_encoder;
+extern const FFCodec ff_alias_pix_decoder;
+extern const FFCodec ff_agm_decoder;
+extern const FFCodec ff_amv_encoder;
+extern const FFCodec ff_amv_decoder;
+extern const FFCodec ff_anm_decoder;
+extern const FFCodec ff_ansi_decoder;
+extern const FFCodec ff_apng_encoder;
+extern const FFCodec ff_apng_decoder;
+extern const FFCodec ff_arbc_decoder;
+extern const FFCodec ff_argo_decoder;
+extern const FFCodec ff_asv1_encoder;
+extern const FFCodec ff_asv1_decoder;
+extern const FFCodec ff_asv2_encoder;
+extern const FFCodec ff_asv2_decoder;
+extern const FFCodec ff_aura_decoder;
+extern const FFCodec ff_aura2_decoder;
+extern const FFCodec ff_avrp_encoder;
+extern const FFCodec ff_avrp_decoder;
+extern const FFCodec ff_avrn_decoder;
+extern const FFCodec ff_avs_decoder;
+extern const FFCodec ff_avui_encoder;
+extern const FFCodec ff_avui_decoder;
+#if FF_API_AYUV_CODECID
+extern const FFCodec ff_ayuv_encoder;
+extern const FFCodec ff_ayuv_decoder;
+#endif
+extern const FFCodec ff_bethsoftvid_decoder;
+extern const FFCodec ff_bfi_decoder;
+extern const FFCodec ff_bink_decoder;
+extern const FFCodec ff_bitpacked_decoder;
+extern const FFCodec ff_bitpacked_encoder;
+extern const FFCodec ff_bmp_encoder;
+extern const FFCodec ff_bmp_decoder;
+extern const FFCodec ff_bmv_video_decoder;
+extern const FFCodec ff_brender_pix_decoder;
+extern const FFCodec ff_c93_decoder;
+extern const FFCodec ff_cavs_decoder;
+extern const FFCodec ff_cdgraphics_decoder;
+extern const FFCodec ff_cdtoons_decoder;
+extern const FFCodec ff_cdxl_decoder;
+extern const FFCodec ff_cfhd_encoder;
+extern const FFCodec ff_cfhd_decoder;
+extern const FFCodec ff_cinepak_encoder;
+extern const FFCodec ff_cinepak_decoder;
+extern const FFCodec ff_clearvideo_decoder;
+extern const FFCodec ff_cljr_encoder;
+extern const FFCodec ff_cljr_decoder;
+extern const FFCodec ff_cllc_decoder;
+extern const FFCodec ff_comfortnoise_encoder;
+extern const FFCodec ff_comfortnoise_decoder;
+extern const FFCodec ff_cpia_decoder;
+extern const FFCodec ff_cri_decoder;
+extern const FFCodec ff_cscd_decoder;
+extern const FFCodec ff_cyuv_decoder;
+extern const FFCodec ff_dds_decoder;
+extern const FFCodec ff_dfa_decoder;
+extern const FFCodec ff_dirac_decoder;
+extern const FFCodec ff_dnxhd_encoder;
+extern const FFCodec ff_dnxhd_decoder;
+extern const FFCodec ff_dpx_encoder;
+extern const FFCodec ff_dpx_decoder;
+extern const FFCodec ff_dsicinvideo_decoder;
+extern const FFCodec ff_dvaudio_decoder;
+extern const FFCodec ff_dvvideo_encoder;
+extern const FFCodec ff_dvvideo_decoder;
+extern const FFCodec ff_dxa_decoder;
+extern const FFCodec ff_dxtory_decoder;
+extern const FFCodec ff_dxv_decoder;
+extern const FFCodec ff_eacmv_decoder;
+extern const FFCodec ff_eamad_decoder;
+extern const FFCodec ff_eatgq_decoder;
+extern const FFCodec ff_eatgv_decoder;
+extern const FFCodec ff_eatqi_decoder;
+extern const FFCodec ff_eightbps_decoder;
+extern const FFCodec ff_eightsvx_exp_decoder;
+extern const FFCodec ff_eightsvx_fib_decoder;
+extern const FFCodec ff_escape124_decoder;
+extern const FFCodec ff_escape130_decoder;
+extern const FFCodec ff_exr_encoder;
+extern const FFCodec ff_exr_decoder;
+extern const FFCodec ff_ffv1_encoder;
+extern const FFCodec ff_ffv1_decoder;
+extern const FFCodec ff_ffvhuff_encoder;
+extern const FFCodec ff_ffvhuff_decoder;
+extern const FFCodec ff_fic_decoder;
+extern const FFCodec ff_fits_encoder;
+extern const FFCodec ff_fits_decoder;
+extern const FFCodec ff_flashsv_encoder;
+extern const FFCodec ff_flashsv_decoder;
+extern const FFCodec ff_flashsv2_encoder;
+extern const FFCodec ff_flashsv2_decoder;
+extern const FFCodec ff_flic_decoder;
+extern const FFCodec ff_flv_encoder;
+extern const FFCodec ff_flv_decoder;
+extern const FFCodec ff_fmvc_decoder;
+extern const FFCodec ff_fourxm_decoder;
+extern const FFCodec ff_fraps_decoder;
+extern const FFCodec ff_frwu_decoder;
+extern const FFCodec ff_g2m_decoder;
+extern const FFCodec ff_gdv_decoder;
+extern const FFCodec ff_gem_decoder;
+extern const FFCodec ff_gif_encoder;
+extern const FFCodec ff_gif_decoder;
+extern const FFCodec ff_h261_encoder;
+extern const FFCodec ff_h261_decoder;
+extern const FFCodec ff_h263_encoder;
+extern const FFCodec ff_h263_decoder;
+extern const FFCodec ff_h263i_decoder;
+extern const FFCodec ff_h263p_encoder;
+extern const FFCodec ff_h263p_decoder;
+extern const FFCodec ff_h263_v4l2m2m_decoder;
+extern const FFCodec ff_h264_decoder;
+extern const FFCodec ff_h264_crystalhd_decoder;
+extern const FFCodec ff_h264_v4l2m2m_decoder;
+extern const FFCodec ff_h264_mediacodec_decoder;
+extern const FFCodec ff_h264_mediacodec_encoder;
+extern const FFCodec ff_h264_mmal_decoder;
+extern const FFCodec ff_h264_qsv_decoder;
+extern const FFCodec ff_h264_rkmpp_decoder;
+extern const FFCodec ff_hap_encoder;
+extern const FFCodec ff_hap_decoder;
+extern const FFCodec ff_hevc_decoder;
+extern const FFCodec ff_hevc_qsv_decoder;
+extern const FFCodec ff_hevc_rkmpp_decoder;
+extern const FFCodec ff_hevc_v4l2m2m_decoder;
+extern const FFCodec ff_hnm4_video_decoder;
+extern const FFCodec ff_hq_hqa_decoder;
+extern const FFCodec ff_hqx_decoder;
+extern const FFCodec ff_huffyuv_encoder;
+extern const FFCodec ff_huffyuv_decoder;
+extern const FFCodec ff_hymt_decoder;
+extern const FFCodec ff_idcin_decoder;
+extern const FFCodec ff_iff_ilbm_decoder;
+extern const FFCodec ff_imm4_decoder;
+extern const FFCodec ff_imm5_decoder;
+extern const FFCodec ff_indeo2_decoder;
+extern const FFCodec ff_indeo3_decoder;
+extern const FFCodec ff_indeo4_decoder;
+extern const FFCodec ff_indeo5_decoder;
+extern const FFCodec ff_interplay_video_decoder;
+extern const FFCodec ff_ipu_decoder;
+extern const FFCodec ff_jpeg2000_encoder;
+extern const FFCodec ff_jpeg2000_decoder;
+extern const FFCodec ff_jpegls_encoder;
+extern const FFCodec ff_jpegls_decoder;
+extern const FFCodec ff_jv_decoder;
+extern const FFCodec ff_kgv1_decoder;
+extern const FFCodec ff_kmvc_decoder;
+extern const FFCodec ff_lagarith_decoder;
+extern const FFCodec ff_ljpeg_encoder;
+extern const FFCodec ff_loco_decoder;
+extern const FFCodec ff_lscr_decoder;
+extern const FFCodec ff_m101_decoder;
+extern const FFCodec ff_magicyuv_encoder;
+extern const FFCodec ff_magicyuv_decoder;
+extern const FFCodec ff_mdec_decoder;
+extern const FFCodec ff_media100_decoder;
+extern const FFCodec ff_mimic_decoder;
+extern const FFCodec ff_mjpeg_encoder;
+extern const FFCodec ff_mjpeg_decoder;
+extern const FFCodec ff_mjpegb_decoder;
+extern const FFCodec ff_mmvideo_decoder;
+extern const FFCodec ff_mobiclip_decoder;
+extern const FFCodec ff_motionpixels_decoder;
+extern const FFCodec ff_mpeg1video_encoder;
+extern const FFCodec ff_mpeg1video_decoder;
+extern const FFCodec ff_mpeg2video_encoder;
+extern const FFCodec ff_mpeg2video_decoder;
+extern const FFCodec ff_mpeg4_encoder;
+extern const FFCodec ff_mpeg4_decoder;
+extern const FFCodec ff_mpeg4_crystalhd_decoder;
+extern const FFCodec ff_mpeg4_v4l2m2m_decoder;
+extern const FFCodec ff_mpeg4_mmal_decoder;
+extern const FFCodec ff_mpegvideo_decoder;
+extern const FFCodec ff_mpeg1_v4l2m2m_decoder;
+extern const FFCodec ff_mpeg2_mmal_decoder;
+extern const FFCodec ff_mpeg2_crystalhd_decoder;
+extern const FFCodec ff_mpeg2_v4l2m2m_decoder;
+extern const FFCodec ff_mpeg2_qsv_decoder;
+extern const FFCodec ff_mpeg2_mediacodec_decoder;
+extern const FFCodec ff_msa1_decoder;
+extern const FFCodec ff_mscc_decoder;
+extern const FFCodec ff_msmpeg4v1_decoder;
+extern const FFCodec ff_msmpeg4v2_encoder;
+extern const FFCodec ff_msmpeg4v2_decoder;
+extern const FFCodec ff_msmpeg4v3_encoder;
+extern const FFCodec ff_msmpeg4v3_decoder;
+extern const FFCodec ff_msmpeg4_crystalhd_decoder;
+extern const FFCodec ff_msp2_decoder;
+extern const FFCodec ff_msrle_decoder;
+extern const FFCodec ff_mss1_decoder;
+extern const FFCodec ff_mss2_decoder;
+extern const FFCodec ff_msvideo1_encoder;
+extern const FFCodec ff_msvideo1_decoder;
+extern const FFCodec ff_mszh_decoder;
+extern const FFCodec ff_mts2_decoder;
+extern const FFCodec ff_mv30_decoder;
+extern const FFCodec ff_mvc1_decoder;
+extern const FFCodec ff_mvc2_decoder;
+extern const FFCodec ff_mvdv_decoder;
+extern const FFCodec ff_mvha_decoder;
+extern const FFCodec ff_mwsc_decoder;
+extern const FFCodec ff_mxpeg_decoder;
+extern const FFCodec ff_notchlc_decoder;
+extern const FFCodec ff_nuv_decoder;
+extern const FFCodec ff_paf_video_decoder;
+extern const FFCodec ff_pam_encoder;
+extern const FFCodec ff_pam_decoder;
+extern const FFCodec ff_pbm_encoder;
+extern const FFCodec ff_pbm_decoder;
+extern const FFCodec ff_pcx_encoder;
+extern const FFCodec ff_pcx_decoder;
+extern const FFCodec ff_pfm_encoder;
+extern const FFCodec ff_pfm_decoder;
+extern const FFCodec ff_pgm_encoder;
+extern const FFCodec ff_pgm_decoder;
+extern const FFCodec ff_pgmyuv_encoder;
+extern const FFCodec ff_pgmyuv_decoder;
+extern const FFCodec ff_pgx_decoder;
+extern const FFCodec ff_phm_encoder;
+extern const FFCodec ff_phm_decoder;
+extern const FFCodec ff_photocd_decoder;
+extern const FFCodec ff_pictor_decoder;
+extern const FFCodec ff_pixlet_decoder;
+extern const FFCodec ff_png_encoder;
+extern const FFCodec ff_png_decoder;
+extern const FFCodec ff_ppm_encoder;
+extern const FFCodec ff_ppm_decoder;
+extern const FFCodec ff_prores_encoder;
+extern const FFCodec ff_prores_decoder;
+extern const FFCodec ff_prores_aw_encoder;
+extern const FFCodec ff_prores_ks_encoder;
+extern const FFCodec ff_prosumer_decoder;
+extern const FFCodec ff_psd_decoder;
+extern const FFCodec ff_ptx_decoder;
+extern const FFCodec ff_qdraw_decoder;
+extern const FFCodec ff_qoi_encoder;
+extern const FFCodec ff_qoi_decoder;
+extern const FFCodec ff_qpeg_decoder;
+extern const FFCodec ff_qtrle_encoder;
+extern const FFCodec ff_qtrle_decoder;
+extern const FFCodec ff_r10k_encoder;
+extern const FFCodec ff_r10k_decoder;
+extern const FFCodec ff_r210_encoder;
+extern const FFCodec ff_r210_decoder;
+extern const FFCodec ff_rasc_decoder;
+extern const FFCodec ff_rawvideo_encoder;
+extern const FFCodec ff_rawvideo_decoder;
+extern const FFCodec ff_rka_decoder;
+extern const FFCodec ff_rl2_decoder;
+extern const FFCodec ff_roq_encoder;
+extern const FFCodec ff_roq_decoder;
+extern const FFCodec ff_rpza_encoder;
+extern const FFCodec ff_rpza_decoder;
+extern const FFCodec ff_rscc_decoder;
+extern const FFCodec ff_rv10_encoder;
+extern const FFCodec ff_rv10_decoder;
+extern const FFCodec ff_rv20_encoder;
+extern const FFCodec ff_rv20_decoder;
+extern const FFCodec ff_rv30_decoder;
+extern const FFCodec ff_rv40_decoder;
+extern const FFCodec ff_s302m_encoder;
+extern const FFCodec ff_s302m_decoder;
+extern const FFCodec ff_sanm_decoder;
+extern const FFCodec ff_scpr_decoder;
+extern const FFCodec ff_screenpresso_decoder;
+extern const FFCodec ff_sga_decoder;
+extern const FFCodec ff_sgi_encoder;
+extern const FFCodec ff_sgi_decoder;
+extern const FFCodec ff_sgirle_decoder;
+extern const FFCodec ff_sheervideo_decoder;
+extern const FFCodec ff_simbiosis_imx_decoder;
+extern const FFCodec ff_smacker_decoder;
+extern const FFCodec ff_smc_encoder;
+extern const FFCodec ff_smc_decoder;
+extern const FFCodec ff_smvjpeg_decoder;
+extern const FFCodec ff_snow_encoder;
+extern const FFCodec ff_snow_decoder;
+extern const FFCodec ff_sp5x_decoder;
+extern const FFCodec ff_speedhq_decoder;
+extern const FFCodec ff_speedhq_encoder;
+extern const FFCodec ff_speex_decoder;
+extern const FFCodec ff_srgc_decoder;
+extern const FFCodec ff_sunrast_encoder;
+extern const FFCodec ff_sunrast_decoder;
+extern const FFCodec ff_svq1_encoder;
+extern const FFCodec ff_svq1_decoder;
+extern const FFCodec ff_svq3_decoder;
+extern const FFCodec ff_targa_encoder;
+extern const FFCodec ff_targa_decoder;
+extern const FFCodec ff_targa_y216_decoder;
+extern const FFCodec ff_tdsc_decoder;
+extern const FFCodec ff_theora_decoder;
+extern const FFCodec ff_thp_decoder;
+extern const FFCodec ff_tiertexseqvideo_decoder;
+extern const FFCodec ff_tiff_encoder;
+extern const FFCodec ff_tiff_decoder;
+extern const FFCodec ff_tmv_decoder;
+extern const FFCodec ff_truemotion1_decoder;
+extern const FFCodec ff_truemotion2_decoder;
+extern const FFCodec ff_truemotion2rt_decoder;
+extern const FFCodec ff_tscc_decoder;
+extern const FFCodec ff_tscc2_decoder;
+extern const FFCodec ff_txd_decoder;
+extern const FFCodec ff_ulti_decoder;
+extern const FFCodec ff_utvideo_encoder;
+extern const FFCodec ff_utvideo_decoder;
+extern const FFCodec ff_v210_encoder;
+extern const FFCodec ff_v210_decoder;
+extern const FFCodec ff_v210x_decoder;
+extern const FFCodec ff_v308_encoder;
+extern const FFCodec ff_v308_decoder;
+extern const FFCodec ff_v408_encoder;
+extern const FFCodec ff_v408_decoder;
+extern const FFCodec ff_v410_encoder;
+extern const FFCodec ff_v410_decoder;
+extern const FFCodec ff_vb_decoder;
+extern const FFCodec ff_vbn_encoder;
+extern const FFCodec ff_vbn_decoder;
+extern const FFCodec ff_vble_decoder;
+extern const FFCodec ff_vc1_decoder;
+extern const FFCodec ff_vc1_crystalhd_decoder;
+extern const FFCodec ff_vc1image_decoder;
+extern const FFCodec ff_vc1_mmal_decoder;
+extern const FFCodec ff_vc1_qsv_decoder;
+extern const FFCodec ff_vc1_v4l2m2m_decoder;
+extern const FFCodec ff_vc2_encoder;
+extern const FFCodec ff_vcr1_decoder;
+extern const FFCodec ff_vmdvideo_decoder;
+extern const FFCodec ff_vmnc_decoder;
+extern const FFCodec ff_vp3_decoder;
+extern const FFCodec ff_vp4_decoder;
+extern const FFCodec ff_vp5_decoder;
+extern const FFCodec ff_vp6_decoder;
+extern const FFCodec ff_vp6a_decoder;
+extern const FFCodec ff_vp6f_decoder;
+extern const FFCodec ff_vp7_decoder;
+extern const FFCodec ff_vp8_decoder;
+extern const FFCodec ff_vp8_rkmpp_decoder;
+extern const FFCodec ff_vp8_v4l2m2m_decoder;
+extern const FFCodec ff_vp9_decoder;
+extern const FFCodec ff_vp9_rkmpp_decoder;
+extern const FFCodec ff_vp9_v4l2m2m_decoder;
+extern const FFCodec ff_vqa_decoder;
+extern const FFCodec ff_vqc_decoder;
+extern const FFCodec ff_wbmp_decoder;
+extern const FFCodec ff_wbmp_encoder;
+extern const FFCodec ff_webp_decoder;
+extern const FFCodec ff_wcmv_decoder;
+extern const FFCodec ff_wrapped_avframe_encoder;
+extern const FFCodec ff_wrapped_avframe_decoder;
+extern const FFCodec ff_wmv1_encoder;
+extern const FFCodec ff_wmv1_decoder;
+extern const FFCodec ff_wmv2_encoder;
+extern const FFCodec ff_wmv2_decoder;
+extern const FFCodec ff_wmv3_decoder;
+extern const FFCodec ff_wmv3_crystalhd_decoder;
+extern const FFCodec ff_wmv3image_decoder;
+extern const FFCodec ff_wnv1_decoder;
+extern const FFCodec ff_xan_wc3_decoder;
+extern const FFCodec ff_xan_wc4_decoder;
+extern const FFCodec ff_xbm_encoder;
+extern const FFCodec ff_xbm_decoder;
+extern const FFCodec ff_xface_encoder;
+extern const FFCodec ff_xface_decoder;
+extern const FFCodec ff_xl_decoder;
+extern const FFCodec ff_xpm_decoder;
+extern const FFCodec ff_xwd_encoder;
+extern const FFCodec ff_xwd_decoder;
+extern const FFCodec ff_y41p_encoder;
+extern const FFCodec ff_y41p_decoder;
+extern const FFCodec ff_ylc_decoder;
+extern const FFCodec ff_yop_decoder;
+extern const FFCodec ff_yuv4_encoder;
+extern const FFCodec ff_yuv4_decoder;
+extern const FFCodec ff_zero12v_decoder;
+extern const FFCodec ff_zerocodec_decoder;
+extern const FFCodec ff_zlib_encoder;
+extern const FFCodec ff_zlib_decoder;
+extern const FFCodec ff_zmbv_encoder;
+extern const FFCodec ff_zmbv_decoder;
+
+/* audio codecs */
+extern const FFCodec ff_aac_encoder;
+extern const FFCodec ff_aac_decoder;
+extern const FFCodec ff_aac_fixed_decoder;
+extern const FFCodec ff_aac_latm_decoder;
+extern const FFCodec ff_ac3_encoder;
+extern const FFCodec ff_ac3_decoder;
+extern const FFCodec ff_ac3_fixed_encoder;
+extern const FFCodec ff_ac3_fixed_decoder;
+extern const FFCodec ff_acelp_kelvin_decoder;
+extern const FFCodec ff_alac_encoder;
+extern const FFCodec ff_alac_decoder;
+extern const FFCodec ff_als_decoder;
+extern const FFCodec ff_amrnb_decoder;
+extern const FFCodec ff_amrwb_decoder;
+extern const FFCodec ff_apac_decoder;
+extern const FFCodec ff_ape_decoder;
+extern const FFCodec ff_aptx_encoder;
+extern const FFCodec ff_aptx_decoder;
+extern const FFCodec ff_aptx_hd_encoder;
+extern const FFCodec ff_aptx_hd_decoder;
+extern const FFCodec ff_atrac1_decoder;
+extern const FFCodec ff_atrac3_decoder;
+extern const FFCodec ff_atrac3al_decoder;
+extern const FFCodec ff_atrac3p_decoder;
+extern const FFCodec ff_atrac3pal_decoder;
+extern const FFCodec ff_atrac9_decoder;
+extern const FFCodec ff_binkaudio_dct_decoder;
+extern const FFCodec ff_binkaudio_rdft_decoder;
+extern const FFCodec ff_bmv_audio_decoder;
+extern const FFCodec ff_bonk_decoder;
+extern const FFCodec ff_cook_decoder;
+extern const FFCodec ff_dca_encoder;
+extern const FFCodec ff_dca_decoder;
+extern const FFCodec ff_dfpwm_encoder;
+extern const FFCodec ff_dfpwm_decoder;
+extern const FFCodec ff_dolby_e_decoder;
+extern const FFCodec ff_dsd_lsbf_decoder;
+extern const FFCodec ff_dsd_msbf_decoder;
+extern const FFCodec ff_dsd_lsbf_planar_decoder;
+extern const FFCodec ff_dsd_msbf_planar_decoder;
+extern const FFCodec ff_dsicinaudio_decoder;
+extern const FFCodec ff_dss_sp_decoder;
+extern const FFCodec ff_dst_decoder;
+extern const FFCodec ff_eac3_encoder;
+extern const FFCodec ff_eac3_decoder;
+extern const FFCodec ff_evrc_decoder;
+extern const FFCodec ff_fastaudio_decoder;
+extern const FFCodec ff_ffwavesynth_decoder;
+extern const FFCodec ff_flac_encoder;
+extern const FFCodec ff_flac_decoder;
+extern const FFCodec ff_ftr_decoder;
+extern const FFCodec ff_g723_1_encoder;
+extern const FFCodec ff_g723_1_decoder;
+extern const FFCodec ff_g729_decoder;
+extern const FFCodec ff_gsm_decoder;
+extern const FFCodec ff_gsm_ms_decoder;
+extern const FFCodec ff_hca_decoder;
+extern const FFCodec ff_hcom_decoder;
+extern const FFCodec ff_hdr_encoder;
+extern const FFCodec ff_hdr_decoder;
+extern const FFCodec ff_iac_decoder;
+extern const FFCodec ff_ilbc_decoder;
+extern const FFCodec ff_imc_decoder;
+extern const FFCodec ff_interplay_acm_decoder;
+extern const FFCodec ff_mace3_decoder;
+extern const FFCodec ff_mace6_decoder;
+extern const FFCodec ff_metasound_decoder;
+extern const FFCodec ff_misc4_decoder;
+extern const FFCodec ff_mlp_encoder;
+extern const FFCodec ff_mlp_decoder;
+extern const FFCodec ff_mp1_decoder;
+extern const FFCodec ff_mp1float_decoder;
+extern const FFCodec ff_mp2_encoder;
+extern const FFCodec ff_mp2_decoder;
+extern const FFCodec ff_mp2float_decoder;
+extern const FFCodec ff_mp2fixed_encoder;
+extern const FFCodec ff_mp3float_decoder;
+extern const FFCodec ff_mp3_decoder;
+extern const FFCodec ff_mp3adufloat_decoder;
+extern const FFCodec ff_mp3adu_decoder;
+extern const FFCodec ff_mp3on4float_decoder;
+extern const FFCodec ff_mp3on4_decoder;
+extern const FFCodec ff_mpc7_decoder;
+extern const FFCodec ff_mpc8_decoder;
+extern const FFCodec ff_msnsiren_decoder;
+extern const FFCodec ff_nellymoser_encoder;
+extern const FFCodec ff_nellymoser_decoder;
+extern const FFCodec ff_on2avc_decoder;
+extern const FFCodec ff_opus_encoder;
+extern const FFCodec ff_opus_decoder;
+extern const FFCodec ff_paf_audio_decoder;
+extern const FFCodec ff_qcelp_decoder;
+extern const FFCodec ff_qdm2_decoder;
+extern const FFCodec ff_qdmc_decoder;
+extern const FFCodec ff_ra_144_encoder;
+extern const FFCodec ff_ra_144_decoder;
+extern const FFCodec ff_ra_288_decoder;
+extern const FFCodec ff_ralf_decoder;
+extern const FFCodec ff_sbc_encoder;
+extern const FFCodec ff_sbc_decoder;
+extern const FFCodec ff_shorten_decoder;
+extern const FFCodec ff_sipr_decoder;
+extern const FFCodec ff_siren_decoder;
+extern const FFCodec ff_smackaud_decoder;
+extern const FFCodec ff_sonic_encoder;
+extern const FFCodec ff_sonic_decoder;
+extern const FFCodec ff_sonic_ls_encoder;
+extern const FFCodec ff_tak_decoder;
+extern const FFCodec ff_truehd_encoder;
+extern const FFCodec ff_truehd_decoder;
+extern const FFCodec ff_truespeech_decoder;
+extern const FFCodec ff_tta_encoder;
+extern const FFCodec ff_tta_decoder;
+extern const FFCodec ff_twinvq_decoder;
+extern const FFCodec ff_vmdaudio_decoder;
+extern const FFCodec ff_vorbis_encoder;
+extern const FFCodec ff_vorbis_decoder;
+extern const FFCodec ff_wavarc_decoder;
+extern const FFCodec ff_wavpack_encoder;
+extern const FFCodec ff_wavpack_decoder;
+extern const FFCodec ff_wmalossless_decoder;
+extern const FFCodec ff_wmapro_decoder;
+extern const FFCodec ff_wmav1_encoder;
+extern const FFCodec ff_wmav1_decoder;
+extern const FFCodec ff_wmav2_encoder;
+extern const FFCodec ff_wmav2_decoder;
+extern const FFCodec ff_wmavoice_decoder;
+extern const FFCodec ff_ws_snd1_decoder;
+extern const FFCodec ff_xma1_decoder;
+extern const FFCodec ff_xma2_decoder;
+
+/* PCM codecs */
+extern const FFCodec ff_pcm_alaw_encoder;
+extern const FFCodec ff_pcm_alaw_decoder;
+extern const FFCodec ff_pcm_bluray_encoder;
+extern const FFCodec ff_pcm_bluray_decoder;
+extern const FFCodec ff_pcm_dvd_encoder;
+extern const FFCodec ff_pcm_dvd_decoder;
+extern const FFCodec ff_pcm_f16le_decoder;
+extern const FFCodec ff_pcm_f24le_decoder;
+extern const FFCodec ff_pcm_f32be_encoder;
+extern const FFCodec ff_pcm_f32be_decoder;
+extern const FFCodec ff_pcm_f32le_encoder;
+extern const FFCodec ff_pcm_f32le_decoder;
+extern const FFCodec ff_pcm_f64be_encoder;
+extern const FFCodec ff_pcm_f64be_decoder;
+extern const FFCodec ff_pcm_f64le_encoder;
+extern const FFCodec ff_pcm_f64le_decoder;
+extern const FFCodec ff_pcm_lxf_decoder;
+extern const FFCodec ff_pcm_mulaw_encoder;
+extern const FFCodec ff_pcm_mulaw_decoder;
+extern const FFCodec ff_pcm_s8_encoder;
+extern const FFCodec ff_pcm_s8_decoder;
+extern const FFCodec ff_pcm_s8_planar_encoder;
+extern const FFCodec ff_pcm_s8_planar_decoder;
+extern const FFCodec ff_pcm_s16be_encoder;
+extern const FFCodec ff_pcm_s16be_decoder;
+extern const FFCodec ff_pcm_s16be_planar_encoder;
+extern const FFCodec ff_pcm_s16be_planar_decoder;
+extern const FFCodec ff_pcm_s16le_encoder;
+extern const FFCodec ff_pcm_s16le_decoder;
+extern const FFCodec ff_pcm_s16le_planar_encoder;
+extern const FFCodec ff_pcm_s16le_planar_decoder;
+extern const FFCodec ff_pcm_s24be_encoder;
+extern const FFCodec ff_pcm_s24be_decoder;
+extern const FFCodec ff_pcm_s24daud_encoder;
+extern const FFCodec ff_pcm_s24daud_decoder;
+extern const FFCodec ff_pcm_s24le_encoder;
+extern const FFCodec ff_pcm_s24le_decoder;
+extern const FFCodec ff_pcm_s24le_planar_encoder;
+extern const FFCodec ff_pcm_s24le_planar_decoder;
+extern const FFCodec ff_pcm_s32be_encoder;
+extern const FFCodec ff_pcm_s32be_decoder;
+extern const FFCodec ff_pcm_s32le_encoder;
+extern const FFCodec ff_pcm_s32le_decoder;
+extern const FFCodec ff_pcm_s32le_planar_encoder;
+extern const FFCodec ff_pcm_s32le_planar_decoder;
+extern const FFCodec ff_pcm_s64be_encoder;
+extern const FFCodec ff_pcm_s64be_decoder;
+extern const FFCodec ff_pcm_s64le_encoder;
+extern const FFCodec ff_pcm_s64le_decoder;
+extern const FFCodec ff_pcm_sga_decoder;
+extern const FFCodec ff_pcm_u8_encoder;
+extern const FFCodec ff_pcm_u8_decoder;
+extern const FFCodec ff_pcm_u16be_encoder;
+extern const FFCodec ff_pcm_u16be_decoder;
+extern const FFCodec ff_pcm_u16le_encoder;
+extern const FFCodec ff_pcm_u16le_decoder;
+extern const FFCodec ff_pcm_u24be_encoder;
+extern const FFCodec ff_pcm_u24be_decoder;
+extern const FFCodec ff_pcm_u24le_encoder;
+extern const FFCodec ff_pcm_u24le_decoder;
+extern const FFCodec ff_pcm_u32be_encoder;
+extern const FFCodec ff_pcm_u32be_decoder;
+extern const FFCodec ff_pcm_u32le_encoder;
+extern const FFCodec ff_pcm_u32le_decoder;
+extern const FFCodec ff_pcm_vidc_encoder;
+extern const FFCodec ff_pcm_vidc_decoder;
+
+/* DPCM codecs */
+extern const FFCodec ff_cbd2_dpcm_decoder;
+extern const FFCodec ff_derf_dpcm_decoder;
+extern const FFCodec ff_gremlin_dpcm_decoder;
+extern const FFCodec ff_interplay_dpcm_decoder;
+extern const FFCodec ff_roq_dpcm_encoder;
+extern const FFCodec ff_roq_dpcm_decoder;
+extern const FFCodec ff_sdx2_dpcm_decoder;
+extern const FFCodec ff_sol_dpcm_decoder;
+extern const FFCodec ff_xan_dpcm_decoder;
+extern const FFCodec ff_wady_dpcm_decoder;
+
+/* ADPCM codecs */
+extern const FFCodec ff_adpcm_4xm_decoder;
+extern const FFCodec ff_adpcm_adx_encoder;
+extern const FFCodec ff_adpcm_adx_decoder;
+extern const FFCodec ff_adpcm_afc_decoder;
+extern const FFCodec ff_adpcm_agm_decoder;
+extern const FFCodec ff_adpcm_aica_decoder;
+extern const FFCodec ff_adpcm_argo_decoder;
+extern const FFCodec ff_adpcm_argo_encoder;
+extern const FFCodec ff_adpcm_ct_decoder;
+extern const FFCodec ff_adpcm_dtk_decoder;
+extern const FFCodec ff_adpcm_ea_decoder;
+extern const FFCodec ff_adpcm_ea_maxis_xa_decoder;
+extern const FFCodec ff_adpcm_ea_r1_decoder;
+extern const FFCodec ff_adpcm_ea_r2_decoder;
+extern const FFCodec ff_adpcm_ea_r3_decoder;
+extern const FFCodec ff_adpcm_ea_xas_decoder;
+extern const FFCodec ff_adpcm_g722_encoder;
+extern const FFCodec ff_adpcm_g722_decoder;
+extern const FFCodec ff_adpcm_g726_encoder;
+extern const FFCodec ff_adpcm_g726_decoder;
+extern const FFCodec ff_adpcm_g726le_encoder;
+extern const FFCodec ff_adpcm_g726le_decoder;
+extern const FFCodec ff_adpcm_ima_acorn_decoder;
+extern const FFCodec ff_adpcm_ima_amv_decoder;
+extern const FFCodec ff_adpcm_ima_amv_encoder;
+extern const FFCodec ff_adpcm_ima_alp_decoder;
+extern const FFCodec ff_adpcm_ima_alp_encoder;
+extern const FFCodec ff_adpcm_ima_apc_decoder;
+extern const FFCodec ff_adpcm_ima_apm_decoder;
+extern const FFCodec ff_adpcm_ima_apm_encoder;
+extern const FFCodec ff_adpcm_ima_cunning_decoder;
+extern const FFCodec ff_adpcm_ima_dat4_decoder;
+extern const FFCodec ff_adpcm_ima_dk3_decoder;
+extern const FFCodec ff_adpcm_ima_dk4_decoder;
+extern const FFCodec ff_adpcm_ima_ea_eacs_decoder;
+extern const FFCodec ff_adpcm_ima_ea_sead_decoder;
+extern const FFCodec ff_adpcm_ima_iss_decoder;
+extern const FFCodec ff_adpcm_ima_moflex_decoder;
+extern const FFCodec ff_adpcm_ima_mtf_decoder;
+extern const FFCodec ff_adpcm_ima_oki_decoder;
+extern const FFCodec ff_adpcm_ima_qt_encoder;
+extern const FFCodec ff_adpcm_ima_qt_decoder;
+extern const FFCodec ff_adpcm_ima_rad_decoder;
+extern const FFCodec ff_adpcm_ima_ssi_decoder;
+extern const FFCodec ff_adpcm_ima_ssi_encoder;
+extern const FFCodec ff_adpcm_ima_smjpeg_decoder;
+extern const FFCodec ff_adpcm_ima_wav_encoder;
+extern const FFCodec ff_adpcm_ima_wav_decoder;
+extern const FFCodec ff_adpcm_ima_ws_encoder;
+extern const FFCodec ff_adpcm_ima_ws_decoder;
+extern const FFCodec ff_adpcm_ms_encoder;
+extern const FFCodec ff_adpcm_ms_decoder;
+extern const FFCodec ff_adpcm_mtaf_decoder;
+extern const FFCodec ff_adpcm_psx_decoder;
+extern const FFCodec ff_adpcm_sbpro_2_decoder;
+extern const FFCodec ff_adpcm_sbpro_3_decoder;
+extern const FFCodec ff_adpcm_sbpro_4_decoder;
+extern const FFCodec ff_adpcm_swf_encoder;
+extern const FFCodec ff_adpcm_swf_decoder;
+extern const FFCodec ff_adpcm_thp_decoder;
+extern const FFCodec ff_adpcm_thp_le_decoder;
+extern const FFCodec ff_adpcm_vima_decoder;
+extern const FFCodec ff_adpcm_xa_decoder;
+extern const FFCodec ff_adpcm_xmd_decoder;
+extern const FFCodec ff_adpcm_yamaha_encoder;
+extern const FFCodec ff_adpcm_yamaha_decoder;
+extern const FFCodec ff_adpcm_zork_decoder;
+
+/* subtitles */
+extern const FFCodec ff_ssa_encoder;
+extern const FFCodec ff_ssa_decoder;
+extern const FFCodec ff_ass_encoder;
+extern const FFCodec ff_ass_decoder;
+extern const FFCodec ff_ccaption_decoder;
+extern const FFCodec ff_dvbsub_encoder;
+extern const FFCodec ff_dvbsub_decoder;
+extern const FFCodec ff_dvdsub_encoder;
+extern const FFCodec ff_dvdsub_decoder;
+extern const FFCodec ff_jacosub_decoder;
+extern const FFCodec ff_microdvd_decoder;
+extern const FFCodec ff_movtext_encoder;
+extern const FFCodec ff_movtext_decoder;
+extern const FFCodec ff_mpl2_decoder;
+extern const FFCodec ff_pgssub_decoder;
+extern const FFCodec ff_pjs_decoder;
+extern const FFCodec ff_realtext_decoder;
+extern const FFCodec ff_sami_decoder;
+extern const FFCodec ff_srt_encoder;
+extern const FFCodec ff_srt_decoder;
+extern const FFCodec ff_stl_decoder;
+extern const FFCodec ff_subrip_encoder;
+extern const FFCodec ff_subrip_decoder;
+extern const FFCodec ff_subviewer_decoder;
+extern const FFCodec ff_subviewer1_decoder;
+extern const FFCodec ff_text_encoder;
+extern const FFCodec ff_text_decoder;
+extern const FFCodec ff_ttml_encoder;
+extern const FFCodec ff_vplayer_decoder;
+extern const FFCodec ff_webvtt_encoder;
+extern const FFCodec ff_webvtt_decoder;
+extern const FFCodec ff_xsub_encoder;
+extern const FFCodec ff_xsub_decoder;
+
+/* external libraries */
+extern const FFCodec ff_aac_at_encoder;
+extern const FFCodec ff_aac_at_decoder;
+extern const FFCodec ff_ac3_at_decoder;
+extern const FFCodec ff_adpcm_ima_qt_at_decoder;
+extern const FFCodec ff_alac_at_encoder;
+extern const FFCodec ff_alac_at_decoder;
+extern const FFCodec ff_amr_nb_at_decoder;
+extern const FFCodec ff_eac3_at_decoder;
+extern const FFCodec ff_gsm_ms_at_decoder;
+extern const FFCodec ff_ilbc_at_encoder;
+extern const FFCodec ff_ilbc_at_decoder;
+extern const FFCodec ff_mp1_at_decoder;
+extern const FFCodec ff_mp2_at_decoder;
+extern const FFCodec ff_mp3_at_decoder;
+extern const FFCodec ff_pcm_alaw_at_encoder;
+extern const FFCodec ff_pcm_alaw_at_decoder;
+extern const FFCodec ff_pcm_mulaw_at_encoder;
+extern const FFCodec ff_pcm_mulaw_at_decoder;
+extern const FFCodec ff_qdmc_at_decoder;
+extern const FFCodec ff_qdm2_at_decoder;
+extern FFCodec ff_libaom_av1_encoder;
+extern const FFCodec ff_libaribb24_decoder;
+extern const FFCodec ff_libcelt_decoder;
+extern const FFCodec ff_libcodec2_encoder;
+extern const FFCodec ff_libcodec2_decoder;
+extern const FFCodec ff_libdav1d_decoder;
+extern const FFCodec ff_libdavs2_decoder;
+extern const FFCodec ff_libfdk_aac_encoder;
+extern const FFCodec ff_libfdk_aac_decoder;
+extern const FFCodec ff_libgsm_encoder;
+extern const FFCodec ff_libgsm_decoder;
+extern const FFCodec ff_libgsm_ms_encoder;
+extern const FFCodec ff_libgsm_ms_decoder;
+extern const FFCodec ff_libilbc_encoder;
+extern const FFCodec ff_libilbc_decoder;
+extern const FFCodec ff_libjxl_decoder;
+extern const FFCodec ff_libjxl_encoder;
+extern const FFCodec ff_libmp3lame_encoder;
+extern const FFCodec ff_libopencore_amrnb_encoder;
+extern const FFCodec ff_libopencore_amrnb_decoder;
+extern const FFCodec ff_libopencore_amrwb_decoder;
+extern const FFCodec ff_libopenjpeg_encoder;
+extern const FFCodec ff_libopenjpeg_decoder;
+extern const FFCodec ff_libopus_encoder;
+extern const FFCodec ff_libopus_decoder;
+extern const FFCodec ff_librav1e_encoder;
+extern const FFCodec ff_librsvg_decoder;
+extern const FFCodec ff_libshine_encoder;
+extern const FFCodec ff_libspeex_encoder;
+extern const FFCodec ff_libspeex_decoder;
+extern const FFCodec ff_libsvtav1_encoder;
+extern const FFCodec ff_libtheora_encoder;
+extern const FFCodec ff_libtwolame_encoder;
+extern const FFCodec ff_libuavs3d_decoder;
+extern const FFCodec ff_libvo_amrwbenc_encoder;
+extern const FFCodec ff_libvorbis_encoder;
+extern const FFCodec ff_libvorbis_decoder;
+extern const FFCodec ff_libvpx_vp8_encoder;
+extern const FFCodec ff_libvpx_vp8_decoder;
+extern FFCodec ff_libvpx_vp9_encoder;
+extern FFCodec ff_libvpx_vp9_decoder;
+/* preferred over libwebp */
+extern const FFCodec ff_libwebp_anim_encoder;
+extern const FFCodec ff_libwebp_encoder;
+extern const FFCodec ff_libx262_encoder;
+#if CONFIG_LIBX264_ENCODER
+#include <x264.h>
+#if X264_BUILD < 153
+#define LIBX264_CONST
+#else
+#define LIBX264_CONST const
+#endif
+extern LIBX264_CONST FFCodec ff_libx264_encoder;
+#endif
+extern const FFCodec ff_libx264rgb_encoder;
+extern FFCodec ff_libx265_encoder;
+extern const FFCodec ff_libxavs_encoder;
+extern const FFCodec ff_libxavs2_encoder;
+extern const FFCodec ff_libxvid_encoder;
+extern const FFCodec ff_libzvbi_teletext_decoder;
+
+/* text */
+extern const FFCodec ff_bintext_decoder;
+extern const FFCodec ff_xbin_decoder;
+extern const FFCodec ff_idf_decoder;
+
+/* external libraries, that shouldn't be used by default if one of the
+ * above is available */
+extern const FFCodec ff_aac_mf_encoder;
+extern const FFCodec ff_ac3_mf_encoder;
+extern const FFCodec ff_h263_v4l2m2m_encoder;
+extern const FFCodec ff_libaom_av1_decoder;
+/* hwaccel hooks only, so prefer external decoders */
+extern const FFCodec ff_av1_decoder;
+extern const FFCodec ff_av1_cuvid_decoder;
+extern const FFCodec ff_av1_mediacodec_decoder;
+extern const FFCodec ff_av1_nvenc_encoder;
+extern const FFCodec ff_av1_qsv_decoder;
+extern const FFCodec ff_av1_qsv_encoder;
+extern const FFCodec ff_av1_amf_encoder;
+extern const FFCodec ff_libopenh264_encoder;
+extern const FFCodec ff_libopenh264_decoder;
+extern const FFCodec ff_h264_amf_encoder;
+extern const FFCodec ff_h264_cuvid_decoder;
+extern const FFCodec ff_h264_mf_encoder;
+extern const FFCodec ff_h264_nvenc_encoder;
+extern const FFCodec ff_h264_omx_encoder;
+extern const FFCodec ff_h264_qsv_encoder;
+extern const FFCodec ff_h264_v4l2m2m_encoder;
+extern const FFCodec ff_h264_vaapi_encoder;
+extern const FFCodec ff_h264_videotoolbox_encoder;
+extern const FFCodec ff_hevc_amf_encoder;
+extern const FFCodec ff_hevc_cuvid_decoder;
+extern const FFCodec ff_hevc_mediacodec_decoder;
+extern const FFCodec ff_hevc_mediacodec_encoder;
+extern const FFCodec ff_hevc_mf_encoder;
+extern const FFCodec ff_hevc_nvenc_encoder;
+extern const FFCodec ff_hevc_qsv_encoder;
+extern const FFCodec ff_hevc_v4l2m2m_encoder;
+extern const FFCodec ff_hevc_vaapi_encoder;
+extern const FFCodec ff_hevc_videotoolbox_encoder;
+extern const FFCodec ff_libkvazaar_encoder;
+extern const FFCodec ff_mjpeg_cuvid_decoder;
+extern const FFCodec ff_mjpeg_qsv_encoder;
+extern const FFCodec ff_mjpeg_qsv_decoder;
+extern const FFCodec ff_mjpeg_vaapi_encoder;
+extern const FFCodec ff_mp3_mf_encoder;
+extern const FFCodec ff_mpeg1_cuvid_decoder;
+extern const FFCodec ff_mpeg2_cuvid_decoder;
+extern const FFCodec ff_mpeg2_qsv_encoder;
+extern const FFCodec ff_mpeg2_vaapi_encoder;
+extern const FFCodec ff_mpeg4_cuvid_decoder;
+extern const FFCodec ff_mpeg4_mediacodec_decoder;
+extern const FFCodec ff_mpeg4_omx_encoder;
+extern const FFCodec ff_mpeg4_v4l2m2m_encoder;
+extern const FFCodec ff_prores_videotoolbox_encoder;
+extern const FFCodec ff_vc1_cuvid_decoder;
+extern const FFCodec ff_vp8_cuvid_decoder;
+extern const FFCodec ff_vp8_mediacodec_decoder;
+extern const FFCodec ff_vp8_qsv_decoder;
+extern const FFCodec ff_vp8_v4l2m2m_encoder;
+extern const FFCodec ff_vp8_vaapi_encoder;
+extern const FFCodec ff_vp9_cuvid_decoder;
+extern const FFCodec ff_vp9_mediacodec_decoder;
+extern const FFCodec ff_vp9_qsv_decoder;
+extern const FFCodec ff_vp9_vaapi_encoder;
+extern const FFCodec ff_vp9_qsv_encoder;
+
+// null codecs
+extern const FFCodec ff_vnull_decoder;
+extern const FFCodec ff_vnull_encoder;
+extern const FFCodec ff_anull_decoder;
+extern const FFCodec ff_anull_encoder;
+
+// The iterate API is not usable with ossfuzz due to the excessive size of binaries created
+#if CONFIG_OSSFUZZ
+const FFCodec * codec_list[] = {
+ NULL,
+ NULL,
+ NULL
+};
+#else
+#include "libavcodec/codec_list.c"
+#endif
+
+static AVOnce av_codec_static_init = AV_ONCE_INIT;
+static void av_codec_init_static(void)
+{
+ for (int i = 0; codec_list[i]; i++) {
+ if (codec_list[i]->init_static_data)
+ codec_list[i]->init_static_data((FFCodec*)codec_list[i]);
+ }
+}
+
+const AVCodec *av_codec_iterate(void **opaque)
+{
+ uintptr_t i = (uintptr_t)*opaque;
+ const FFCodec *c = codec_list[i];
+
+ ff_thread_once(&av_codec_static_init, av_codec_init_static);
+
+ if (c) {
+ *opaque = (void*)(i + 1);
+ return &c->p;
+ }
+ return NULL;
+}
+
+static enum AVCodecID remap_deprecated_codec_id(enum AVCodecID id)
+{
+ switch(id){
+ //This is for future deprecatec codec ids, its empty since
+ //last major bump but will fill up again over time, please don't remove it
+ default : return id;
+ }
+}
+
+static const AVCodec *find_codec(enum AVCodecID id, int (*x)(const AVCodec *))
+{
+ const AVCodec *p, *experimental = NULL;
+ void *i = 0;
+
+ id = remap_deprecated_codec_id(id);
+
+ while ((p = av_codec_iterate(&i))) {
+ if (!x(p))
+ continue;
+ if (p->id == id) {
+ if (p->capabilities & AV_CODEC_CAP_EXPERIMENTAL && !experimental) {
+ experimental = p;
+ } else
+ return p;
+ }
+ }
+
+ return experimental;
+}
+
+const AVCodec *avcodec_find_encoder(enum AVCodecID id)
+{
+ return find_codec(id, av_codec_is_encoder);
+}
+
+const AVCodec *avcodec_find_decoder(enum AVCodecID id)
+{
+ return find_codec(id, av_codec_is_decoder);
+}
+
+static const AVCodec *find_codec_by_name(const char *name, int (*x)(const AVCodec *))
+{
+ void *i = 0;
+ const AVCodec *p;
+
+ if (!name)
+ return NULL;
+
+ while ((p = av_codec_iterate(&i))) {
+ if (!x(p))
+ continue;
+ if (strcmp(name, p->name) == 0)
+ return p;
+ }
+
+ return NULL;
+}
+
+const AVCodec *avcodec_find_encoder_by_name(const char *name)
+{
+ return find_codec_by_name(name, av_codec_is_encoder);
+}
+
+const AVCodec *avcodec_find_decoder_by_name(const char *name)
+{
+ return find_codec_by_name(name, av_codec_is_decoder);
+}
diff --git a/media/ffvpx/libavcodec/arm/fft_init_arm.c b/media/ffvpx/libavcodec/arm/fft_init_arm.c
new file mode 100644
index 0000000000..8ae22dfb4e
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/fft_init_arm.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/arm/cpu.h"
+
+#include "libavcodec/fft.h"
+
+void ff_fft_calc_vfp(FFTContext *s, FFTComplex *z);
+
+void ff_fft_permute_neon(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_neon(FFTContext *s, FFTComplex *z);
+
+void ff_imdct_half_vfp(FFTContext *s, FFTSample *output, const FFTSample *input);
+
+void ff_imdct_calc_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_mdct_calc_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
+
+av_cold void ff_fft_init_arm(FFTContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_vfp_vm(cpu_flags)) {
+ s->fft_calc = ff_fft_calc_vfp;
+#if CONFIG_MDCT
+ s->imdct_half = ff_imdct_half_vfp;
+#endif
+ }
+
+ if (have_neon(cpu_flags)) {
+#if CONFIG_FFT
+ if (s->nbits < 17) {
+ s->fft_permute = ff_fft_permute_neon;
+ s->fft_calc = ff_fft_calc_neon;
+ }
+#endif
+#if CONFIG_MDCT
+ s->imdct_calc = ff_imdct_calc_neon;
+ s->imdct_half = ff_imdct_half_neon;
+ s->mdct_calc = ff_mdct_calc_neon;
+ s->mdct_permutation = FF_MDCT_PERM_INTERLEAVE;
+#endif
+ }
+}
diff --git a/media/ffvpx/libavcodec/arm/fft_neon.S b/media/ffvpx/libavcodec/arm/fft_neon.S
new file mode 100644
index 0000000000..48f8dfc424
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/fft_neon.S
@@ -0,0 +1,375 @@
+/*
+ * ARM NEON optimised FFT
+ *
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2009 Naotoshi Nojiri
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+#define M_SQRT1_2 0.70710678118654752440
+
+
+function fft4_neon
+ vld1.32 {d0-d3}, [r0,:128]
+
+ vext.32 q8, q1, q1, #1 @ i2,r3 d3=i3,r2
+ vsub.f32 d6, d0, d1 @ r0-r1,i0-i1
+ vsub.f32 d7, d16, d17 @ r3-r2,i2-i3
+ vadd.f32 d4, d0, d1 @ r0+r1,i0+i1
+ vadd.f32 d5, d2, d3 @ i2+i3,r2+r3
+ vadd.f32 d1, d6, d7
+ vsub.f32 d3, d6, d7
+ vadd.f32 d0, d4, d5
+ vsub.f32 d2, d4, d5
+
+ vst1.32 {d0-d3}, [r0,:128]
+
+ bx lr
+endfunc
+
+function fft8_neon
+ mov r1, r0
+ vld1.32 {d0-d3}, [r1,:128]!
+ vld1.32 {d16-d19}, [r1,:128]
+
+ movw r2, #0x04f3 @ sqrt(1/2)
+ movt r2, #0x3f35
+ eor r3, r2, #1<<31
+ vdup.32 d31, r2
+
+ vext.32 q11, q1, q1, #1 @ i2,r3,i3,r2
+ vadd.f32 d4, d16, d17 @ r4+r5,i4+i5
+ vmov d28, r3, r2
+ vadd.f32 d5, d18, d19 @ r6+r7,i6+i7
+ vsub.f32 d17, d16, d17 @ r4-r5,i4-i5
+ vsub.f32 d19, d18, d19 @ r6-r7,i6-i7
+ vrev64.32 d29, d28
+ vadd.f32 d20, d0, d1 @ r0+r1,i0+i1
+ vadd.f32 d21, d2, d3 @ r2+r3,i2+i3
+ vmul.f32 d26, d17, d28 @ -a2r*w,a2i*w
+ vext.32 q3, q2, q2, #1
+ vmul.f32 d27, d19, d29 @ a3r*w,-a3i*w
+ vsub.f32 d23, d22, d23 @ i2-i3,r3-r2
+ vsub.f32 d22, d0, d1 @ r0-r1,i0-i1
+ vmul.f32 d24, d17, d31 @ a2r*w,a2i*w
+ vmul.f32 d25, d19, d31 @ a3r*w,a3i*w
+ vadd.f32 d0, d20, d21
+ vsub.f32 d2, d20, d21
+ vadd.f32 d1, d22, d23
+ vrev64.32 q13, q13
+ vsub.f32 d3, d22, d23
+ vsub.f32 d6, d6, d7
+ vadd.f32 d24, d24, d26 @ a2r+a2i,a2i-a2r t1,t2
+ vadd.f32 d25, d25, d27 @ a3r-a3i,a3i+a3r t5,t6
+ vadd.f32 d7, d4, d5
+ vsub.f32 d18, d2, d6
+ vext.32 q13, q12, q12, #1
+ vadd.f32 d2, d2, d6
+ vsub.f32 d16, d0, d7
+ vadd.f32 d5, d25, d24
+ vsub.f32 d4, d26, d27
+ vadd.f32 d0, d0, d7
+ vsub.f32 d17, d1, d5
+ vsub.f32 d19, d3, d4
+ vadd.f32 d3, d3, d4
+ vadd.f32 d1, d1, d5
+
+ vst1.32 {d16-d19}, [r1,:128]
+ vst1.32 {d0-d3}, [r0,:128]
+
+ bx lr
+endfunc
+
+function fft16_neon
+ movrel r1, mppm
+ vld1.32 {d16-d19}, [r0,:128]! @ q8{r0,i0,r1,i1} q9{r2,i2,r3,i3}
+ pld [r0, #32]
+ vld1.32 {d2-d3}, [r1,:128]
+ vext.32 q13, q9, q9, #1
+ vld1.32 {d22-d25}, [r0,:128]! @ q11{r4,i4,r5,i5} q12{r6,i5,r7,i7}
+ vadd.f32 d4, d16, d17
+ vsub.f32 d5, d16, d17
+ vadd.f32 d18, d18, d19
+ vsub.f32 d19, d26, d27
+
+ vadd.f32 d20, d22, d23
+ vsub.f32 d22, d22, d23
+ vsub.f32 d23, d24, d25
+ vadd.f32 q8, q2, q9 @ {r0,i0,r1,i1}
+ vadd.f32 d21, d24, d25
+ vmul.f32 d24, d22, d2
+ vsub.f32 q9, q2, q9 @ {r2,i2,r3,i3}
+ vmul.f32 d25, d23, d3
+ vuzp.32 d16, d17 @ {r0,r1,i0,i1}
+ vmul.f32 q1, q11, d2[1]
+ vuzp.32 d18, d19 @ {r2,r3,i2,i3}
+ vrev64.32 q12, q12
+ vadd.f32 q11, q12, q1 @ {t1a,t2a,t5,t6}
+ vld1.32 {d24-d27}, [r0,:128]! @ q12{r8,i8,r9,i9} q13{r10,i10,r11,i11}
+ vzip.32 q10, q11
+ vld1.32 {d28-d31}, [r0,:128] @ q14{r12,i12,r13,i13} q15{r14,i14,r15,i15}
+ vadd.f32 d0, d22, d20
+ vadd.f32 d1, d21, d23
+ vsub.f32 d2, d21, d23
+ vsub.f32 d3, d22, d20
+ sub r0, r0, #96
+ vext.32 q13, q13, q13, #1
+ vsub.f32 q10, q8, q0 @ {r4,r5,i4,i5}
+ vadd.f32 q8, q8, q0 @ {r0,r1,i0,i1}
+ vext.32 q15, q15, q15, #1
+ vsub.f32 q11, q9, q1 @ {r6,r7,i6,i7}
+ vswp d25, d26 @ q12{r8,i8,i10,r11} q13{r9,i9,i11,r10}
+ vadd.f32 q9, q9, q1 @ {r2,r3,i2,i3}
+ vswp d29, d30 @ q14{r12,i12,i14,r15} q15{r13,i13,i15,r14}
+ vadd.f32 q0, q12, q13 @ {t1,t2,t5,t6}
+ vadd.f32 q1, q14, q15 @ {t1a,t2a,t5a,t6a}
+ movrelx r2, X(ff_cos_16)
+ vsub.f32 q13, q12, q13 @ {t3,t4,t7,t8}
+ vrev64.32 d1, d1
+ vsub.f32 q15, q14, q15 @ {t3a,t4a,t7a,t8a}
+ vrev64.32 d3, d3
+ movrel r3, pmmp
+ vswp d1, d26 @ q0{t1,t2,t3,t4} q13{t6,t5,t7,t8}
+ vswp d3, d30 @ q1{t1a,t2a,t3a,t4a} q15{t6a,t5a,t7a,t8a}
+ vadd.f32 q12, q0, q13 @ {r8,i8,r9,i9}
+ vadd.f32 q14, q1, q15 @ {r12,i12,r13,i13}
+ vld1.32 {d4-d5}, [r2,:64]
+ vsub.f32 q13, q0, q13 @ {r10,i10,r11,i11}
+ vsub.f32 q15, q1, q15 @ {r14,i14,r15,i15}
+ vswp d25, d28 @ q12{r8,i8,r12,i12} q14{r9,i9,r13,i13}
+ vld1.32 {d6-d7}, [r3,:128]
+ vrev64.32 q1, q14
+ vmul.f32 q14, q14, d4[1]
+ vmul.f32 q1, q1, q3
+ vmla.f32 q14, q1, d5[1] @ {t1a,t2a,t5a,t6a}
+ vswp d27, d30 @ q13{r10,i10,r14,i14} q15{r11,i11,r15,i15}
+ vzip.32 q12, q14
+ vadd.f32 d0, d28, d24
+ vadd.f32 d1, d25, d29
+ vsub.f32 d2, d25, d29
+ vsub.f32 d3, d28, d24
+ vsub.f32 q12, q8, q0 @ {r8,r9,i8,i9}
+ vadd.f32 q8, q8, q0 @ {r0,r1,i0,i1}
+ vsub.f32 q14, q10, q1 @ {r12,r13,i12,i13}
+ mov r1, #32
+ vadd.f32 q10, q10, q1 @ {r4,r5,i4,i5}
+ vrev64.32 q0, q13
+ vmul.f32 q13, q13, d5[0]
+ vrev64.32 q1, q15
+ vmul.f32 q15, q15, d5[1]
+ vst2.32 {d16-d17},[r0,:128], r1
+ vmul.f32 q0, q0, q3
+ vst2.32 {d20-d21},[r0,:128], r1
+ vmul.f32 q1, q1, q3
+ vmla.f32 q13, q0, d5[0] @ {t1,t2,t5,t6}
+ vmla.f32 q15, q1, d4[1] @ {t1a,t2a,t5a,t6a}
+ vst2.32 {d24-d25},[r0,:128], r1
+ vst2.32 {d28-d29},[r0,:128]
+ vzip.32 q13, q15
+ sub r0, r0, #80
+ vadd.f32 d0, d30, d26
+ vadd.f32 d1, d27, d31
+ vsub.f32 d2, d27, d31
+ vsub.f32 d3, d30, d26
+ vsub.f32 q13, q9, q0 @ {r10,r11,i10,i11}
+ vadd.f32 q9, q9, q0 @ {r2,r3,i2,i3}
+ vsub.f32 q15, q11, q1 @ {r14,r15,i14,i15}
+ vadd.f32 q11, q11, q1 @ {r6,r7,i6,i7}
+ vst2.32 {d18-d19},[r0,:128], r1
+ vst2.32 {d22-d23},[r0,:128], r1
+ vst2.32 {d26-d27},[r0,:128], r1
+ vst2.32 {d30-d31},[r0,:128]
+ bx lr
+endfunc
+
+function fft_pass_neon
+ push {r4-r6,lr}
+ mov r6, r2 @ n
+ lsl r5, r2, #3 @ 2 * n * sizeof FFTSample
+ lsl r4, r2, #4 @ 2 * n * sizeof FFTComplex
+ lsl r2, r2, #5 @ 4 * n * sizeof FFTComplex
+ add r3, r2, r4
+ add r4, r4, r0 @ &z[o1]
+ add r2, r2, r0 @ &z[o2]
+ add r3, r3, r0 @ &z[o3]
+ vld1.32 {d20-d21},[r2,:128] @ {z[o2],z[o2+1]}
+ movrel r12, pmmp
+ vld1.32 {d22-d23},[r3,:128] @ {z[o3],z[o3+1]}
+ add r5, r5, r1 @ wim
+ vld1.32 {d6-d7}, [r12,:128] @ pmmp
+ vswp d21, d22
+ vld1.32 {d4}, [r1,:64]! @ {wre[0],wre[1]}
+ sub r5, r5, #4 @ wim--
+ vrev64.32 q1, q11
+ vmul.f32 q11, q11, d4[1]
+ vmul.f32 q1, q1, q3
+ vld1.32 {d5[0]}, [r5,:32] @ d5[0] = wim[-1]
+ vmla.f32 q11, q1, d5[0] @ {t1a,t2a,t5a,t6a}
+ vld2.32 {d16-d17},[r0,:128] @ {z[0],z[1]}
+ sub r6, r6, #1 @ n--
+ vld2.32 {d18-d19},[r4,:128] @ {z[o1],z[o1+1]}
+ vzip.32 q10, q11
+ vadd.f32 d0, d22, d20
+ vadd.f32 d1, d21, d23
+ vsub.f32 d2, d21, d23
+ vsub.f32 d3, d22, d20
+ vsub.f32 q10, q8, q0
+ vadd.f32 q8, q8, q0
+ vsub.f32 q11, q9, q1
+ vadd.f32 q9, q9, q1
+ vst2.32 {d20-d21},[r2,:128]! @ {z[o2],z[o2+1]}
+ vst2.32 {d16-d17},[r0,:128]! @ {z[0],z[1]}
+ vst2.32 {d22-d23},[r3,:128]! @ {z[o3],z[o3+1]}
+ vst2.32 {d18-d19},[r4,:128]! @ {z[o1],z[o1+1]}
+ sub r5, r5, #8 @ wim -= 2
+1:
+ vld1.32 {d20-d21},[r2,:128] @ {z[o2],z[o2+1]}
+ vld1.32 {d22-d23},[r3,:128] @ {z[o3],z[o3+1]}
+ vswp d21, d22
+ vld1.32 {d4}, [r1]! @ {wre[0],wre[1]}
+ vrev64.32 q0, q10
+ vmul.f32 q10, q10, d4[0]
+ vrev64.32 q1, q11
+ vmul.f32 q11, q11, d4[1]
+ vld1.32 {d5}, [r5] @ {wim[-1],wim[0]}
+ vmul.f32 q0, q0, q3
+ sub r5, r5, #8 @ wim -= 2
+ vmul.f32 q1, q1, q3
+ vmla.f32 q10, q0, d5[1] @ {t1,t2,t5,t6}
+ vmla.f32 q11, q1, d5[0] @ {t1a,t2a,t5a,t6a}
+ vld2.32 {d16-d17},[r0,:128] @ {z[0],z[1]}
+ subs r6, r6, #1 @ n--
+ vld2.32 {d18-d19},[r4,:128] @ {z[o1],z[o1+1]}
+ vzip.32 q10, q11
+ vadd.f32 d0, d22, d20
+ vadd.f32 d1, d21, d23
+ vsub.f32 d2, d21, d23
+ vsub.f32 d3, d22, d20
+ vsub.f32 q10, q8, q0
+ vadd.f32 q8, q8, q0
+ vsub.f32 q11, q9, q1
+ vadd.f32 q9, q9, q1
+ vst2.32 {d20-d21}, [r2,:128]! @ {z[o2],z[o2+1]}
+ vst2.32 {d16-d17}, [r0,:128]! @ {z[0],z[1]}
+ vst2.32 {d22-d23}, [r3,:128]! @ {z[o3],z[o3+1]}
+ vst2.32 {d18-d19}, [r4,:128]! @ {z[o1],z[o1+1]}
+ bne 1b
+
+ pop {r4-r6,pc}
+endfunc
+
+.macro def_fft n, n2, n4
+ .align 6
+function fft\n\()_neon
+ push {r4, lr}
+ mov r4, r0
+ bl fft\n2\()_neon
+ add r0, r4, #\n4*2*8
+ bl fft\n4\()_neon
+ add r0, r4, #\n4*3*8
+ bl fft\n4\()_neon
+ mov r0, r4
+ pop {r4, lr}
+ movrelx r1, X(ff_cos_\n)
+ mov r2, #\n4/2
+ b fft_pass_neon
+endfunc
+.endm
+
+ def_fft 32, 16, 8
+ def_fft 64, 32, 16
+ def_fft 128, 64, 32
+ def_fft 256, 128, 64
+ def_fft 512, 256, 128
+ def_fft 1024, 512, 256
+ def_fft 2048, 1024, 512
+ def_fft 4096, 2048, 1024
+ def_fft 8192, 4096, 2048
+ def_fft 16384, 8192, 4096
+ def_fft 32768, 16384, 8192
+ def_fft 65536, 32768, 16384
+
+function ff_fft_calc_neon, export=1
+ ldr r2, [r0]
+ sub r2, r2, #2
+ movrel r3, fft_tab_neon
+ ldr r3, [r3, r2, lsl #2]
+ mov r0, r1
+ bx r3
+endfunc
+
+function ff_fft_permute_neon, export=1
+ push {r4,lr}
+ mov r12, #1
+ ldr r2, [r0] @ nbits
+ ldr r3, [r0, #12] @ tmp_buf
+ ldr r0, [r0, #8] @ revtab
+ lsl r12, r12, r2
+ mov r2, r12
+1:
+ vld1.32 {d0-d1}, [r1,:128]!
+ ldr r4, [r0], #4
+ uxth lr, r4
+ uxth r4, r4, ror #16
+ add lr, r3, lr, lsl #3
+ add r4, r3, r4, lsl #3
+ vst1.32 {d0}, [lr,:64]
+ vst1.32 {d1}, [r4,:64]
+ subs r12, r12, #2
+ bgt 1b
+
+ sub r1, r1, r2, lsl #3
+1:
+ vld1.32 {d0-d3}, [r3,:128]!
+ vst1.32 {d0-d3}, [r1,:128]!
+ subs r2, r2, #4
+ bgt 1b
+
+ pop {r4,pc}
+endfunc
+
+const fft_tab_neon, relocate=1
+ .word fft4_neon
+ .word fft8_neon
+ .word fft16_neon
+ .word fft32_neon
+ .word fft64_neon
+ .word fft128_neon
+ .word fft256_neon
+ .word fft512_neon
+ .word fft1024_neon
+ .word fft2048_neon
+ .word fft4096_neon
+ .word fft8192_neon
+ .word fft16384_neon
+ .word fft32768_neon
+ .word fft65536_neon
+endconst
+
+const pmmp, align=4
+ .float +1.0, -1.0, -1.0, +1.0
+endconst
+
+const mppm, align=4
+ .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
+endconst
diff --git a/media/ffvpx/libavcodec/arm/fft_vfp.S b/media/ffvpx/libavcodec/arm/fft_vfp.S
new file mode 100644
index 0000000000..ac601325f2
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/fft_vfp.S
@@ -0,0 +1,530 @@
+/*
+ * Copyright (c) 2013 RISC OS Open Ltd
+ * Author: Ben Avison <bavison@riscosopen.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+@ The fftx_internal_vfp versions of the functions obey a modified AAPCS:
+@ VFP is in RunFast mode, vector length 4, stride 1 thoroughout, and
+@ all single-precision VFP registers may be corrupted on exit. The a2
+@ register may not be clobbered in these functions, as it holds the
+@ stored original FPSCR.
+
+function ff_fft_calc_vfp, export=1
+ ldr ip, [a1, #0] @ nbits
+ mov a1, a2
+ movrel a2, (fft_tab_vfp - 8)
+ ldr pc, [a2, ip, lsl #2]
+endfunc
+const fft_tab_vfp, relocate=1
+ .word fft4_vfp
+ .word fft8_vfp
+ .word X(ff_fft16_vfp) @ this one alone is exported
+ .word fft32_vfp
+ .word fft64_vfp
+ .word fft128_vfp
+ .word fft256_vfp
+ .word fft512_vfp
+ .word fft1024_vfp
+ .word fft2048_vfp
+ .word fft4096_vfp
+ .word fft8192_vfp
+ .word fft16384_vfp
+ .word fft32768_vfp
+ .word fft65536_vfp
+endconst
+
+function fft4_vfp
+ vldr d0, [a1, #0*2*4] @ s0,s1 = z[0]
+ vldr d4, [a1, #1*2*4] @ s8,s9 = z[1]
+ vldr d1, [a1, #2*2*4] @ s2,s3 = z[2]
+ vldr d5, [a1, #3*2*4] @ s10,s11 = z[3]
+ @ stall
+ vadd.f s12, s0, s8 @ i0
+ vadd.f s13, s1, s9 @ i1
+ vadd.f s14, s2, s10 @ i2
+ vadd.f s15, s3, s11 @ i3
+ vsub.f s8, s0, s8 @ i4
+ vsub.f s9, s1, s9 @ i5
+ vsub.f s10, s2, s10 @ i6
+ vsub.f s11, s3, s11 @ i7
+ @ stall
+ @ stall
+ vadd.f s0, s12, s14 @ z[0].re
+ vsub.f s4, s12, s14 @ z[2].re
+ vadd.f s1, s13, s15 @ z[0].im
+ vsub.f s5, s13, s15 @ z[2].im
+ vadd.f s7, s9, s10 @ z[3].im
+ vsub.f s3, s9, s10 @ z[1].im
+ vadd.f s2, s8, s11 @ z[1].re
+ vsub.f s6, s8, s11 @ z[3].re
+ @ stall
+ @ stall
+ vstr d0, [a1, #0*2*4]
+ vstr d2, [a1, #2*2*4]
+ @ stall
+ @ stall
+ vstr d1, [a1, #1*2*4]
+ vstr d3, [a1, #3*2*4]
+
+ bx lr
+endfunc
+
+.macro macro_fft8_head
+ @ FFT4
+ vldr d4, [a1, #0 * 2*4]
+ vldr d6, [a1, #1 * 2*4]
+ vldr d5, [a1, #2 * 2*4]
+ vldr d7, [a1, #3 * 2*4]
+ @ BF
+ vldr d12, [a1, #4 * 2*4]
+ vadd.f s16, s8, s12 @ vector op
+ vldr d14, [a1, #5 * 2*4]
+ vldr d13, [a1, #6 * 2*4]
+ vldr d15, [a1, #7 * 2*4]
+ vsub.f s20, s8, s12 @ vector op
+ vadd.f s0, s16, s18
+ vsub.f s2, s16, s18
+ vadd.f s1, s17, s19
+ vsub.f s3, s17, s19
+ vadd.f s7, s21, s22
+ vsub.f s5, s21, s22
+ vadd.f s4, s20, s23
+ vsub.f s6, s20, s23
+ vsub.f s20, s24, s28 @ vector op
+ vstr d0, [a1, #0 * 2*4] @ transfer s0-s7 to s24-s31 via memory
+ vstr d1, [a1, #1 * 2*4]
+ vldr s0, cos1pi4
+ vadd.f s16, s24, s28 @ vector op
+ vstr d2, [a1, #2 * 2*4]
+ vstr d3, [a1, #3 * 2*4]
+ vldr d12, [a1, #0 * 2*4]
+ @ TRANSFORM
+ vmul.f s20, s20, s0 @ vector x scalar op
+ vldr d13, [a1, #1 * 2*4]
+ vldr d14, [a1, #2 * 2*4]
+ vldr d15, [a1, #3 * 2*4]
+ @ BUTTERFLIES
+ vadd.f s0, s18, s16
+ vadd.f s1, s17, s19
+ vsub.f s2, s17, s19
+ vsub.f s3, s18, s16
+ vadd.f s4, s21, s20
+ vsub.f s5, s21, s20
+ vadd.f s6, s22, s23
+ vsub.f s7, s22, s23
+ vadd.f s8, s0, s24 @ vector op
+ vstr d0, [a1, #0 * 2*4] @ transfer s0-s3 to s12-s15 via memory
+ vstr d1, [a1, #1 * 2*4]
+ vldr d6, [a1, #0 * 2*4]
+ vldr d7, [a1, #1 * 2*4]
+ vadd.f s1, s5, s6
+ vadd.f s0, s7, s4
+ vsub.f s2, s5, s6
+ vsub.f s3, s7, s4
+ vsub.f s12, s24, s12 @ vector op
+ vsub.f s5, s29, s1
+ vsub.f s4, s28, s0
+ vsub.f s6, s30, s2
+ vsub.f s7, s31, s3
+ vadd.f s16, s0, s28 @ vector op
+ vstr d6, [a1, #4 * 2*4]
+ vstr d7, [a1, #6 * 2*4]
+ vstr d4, [a1, #0 * 2*4]
+ vstr d5, [a1, #2 * 2*4]
+ vstr d2, [a1, #5 * 2*4]
+ vstr d3, [a1, #7 * 2*4]
+.endm
+
+.macro macro_fft8_tail
+ vstr d8, [a1, #1 * 2*4]
+ vstr d9, [a1, #3 * 2*4]
+.endm
+
+function .Lfft8_internal_vfp
+ macro_fft8_head
+ macro_fft8_tail
+ bx lr
+endfunc
+
+function fft8_vfp
+ ldr a3, =0x03030000 @ RunFast mode, vector length 4, stride 1
+ fmrx a2, FPSCR
+ fmxr FPSCR, a3
+ vpush {s16-s31}
+ mov ip, lr
+ bl .Lfft8_internal_vfp
+ vpop {s16-s31}
+ fmxr FPSCR, a2
+ bx ip
+endfunc
+
+.align 3
+cos1pi4: @ cos(1*pi/4) = sqrt(2)
+ .float 0.707106769084930419921875
+cos1pi8: @ cos(1*pi/8) = sqrt(2+sqrt(2))/2
+ .float 0.92387950420379638671875
+cos3pi8: @ cos(2*pi/8) = sqrt(2-sqrt(2))/2
+ .float 0.3826834261417388916015625
+
+function .Lfft16_internal_vfp
+ macro_fft8_head
+ @ FFT4(z+8)
+ vldr d10, [a1, #8 * 2*4]
+ vldr d12, [a1, #9 * 2*4]
+ vldr d11, [a1, #10 * 2*4]
+ vldr d13, [a1, #11 * 2*4]
+ macro_fft8_tail
+ vadd.f s16, s20, s24 @ vector op
+ @ FFT4(z+12)
+ vldr d4, [a1, #12 * 2*4]
+ vldr d6, [a1, #13 * 2*4]
+ vldr d5, [a1, #14 * 2*4]
+ vsub.f s20, s20, s24 @ vector op
+ vldr d7, [a1, #15 * 2*4]
+ vadd.f s0, s16, s18
+ vsub.f s4, s16, s18
+ vadd.f s1, s17, s19
+ vsub.f s5, s17, s19
+ vadd.f s7, s21, s22
+ vsub.f s3, s21, s22
+ vadd.f s2, s20, s23
+ vsub.f s6, s20, s23
+ vadd.f s16, s8, s12 @ vector op
+ vstr d0, [a1, #8 * 2*4]
+ vstr d2, [a1, #10 * 2*4]
+ vstr d1, [a1, #9 * 2*4]
+ vsub.f s20, s8, s12
+ vstr d3, [a1, #11 * 2*4]
+ @ TRANSFORM(z[2],z[6],z[10],z[14],cos1pi4,cos1pi4)
+ vldr d12, [a1, #10 * 2*4]
+ vadd.f s0, s16, s18
+ vadd.f s1, s17, s19
+ vsub.f s6, s16, s18
+ vsub.f s7, s17, s19
+ vsub.f s3, s21, s22
+ vadd.f s2, s20, s23
+ vadd.f s5, s21, s22
+ vsub.f s4, s20, s23
+ vstr d0, [a1, #12 * 2*4]
+ vmov s0, s6
+ @ TRANSFORM(z[1],z[5],z[9],z[13],cos1pi8,cos3pi8)
+ vldr d6, [a1, #9 * 2*4]
+ vstr d1, [a1, #13 * 2*4]
+ vldr d1, cos1pi4 @ s2 = cos1pi4, s3 = cos1pi8
+ vstr d2, [a1, #15 * 2*4]
+ vldr d7, [a1, #13 * 2*4]
+ vadd.f s4, s25, s24
+ vsub.f s5, s25, s24
+ vsub.f s6, s0, s7
+ vadd.f s7, s0, s7
+ vmul.f s20, s12, s3 @ vector op
+ @ TRANSFORM(z[3],z[7],z[11],z[15],cos3pi8,cos1pi8)
+ vldr d4, [a1, #11 * 2*4]
+ vldr d5, [a1, #15 * 2*4]
+ vldr s1, cos3pi8
+ vmul.f s24, s4, s2 @ vector * scalar op
+ vmul.f s28, s12, s1 @ vector * scalar op
+ vmul.f s12, s8, s1 @ vector * scalar op
+ vadd.f s4, s20, s29
+ vsub.f s5, s21, s28
+ vsub.f s6, s22, s31
+ vadd.f s7, s23, s30
+ vmul.f s8, s8, s3 @ vector * scalar op
+ vldr d8, [a1, #1 * 2*4]
+ vldr d9, [a1, #5 * 2*4]
+ vldr d10, [a1, #3 * 2*4]
+ vldr d11, [a1, #7 * 2*4]
+ vldr d14, [a1, #2 * 2*4]
+ vadd.f s0, s6, s4
+ vadd.f s1, s5, s7
+ vsub.f s2, s5, s7
+ vsub.f s3, s6, s4
+ vadd.f s4, s12, s9
+ vsub.f s5, s13, s8
+ vsub.f s6, s14, s11
+ vadd.f s7, s15, s10
+ vadd.f s12, s0, s16 @ vector op
+ vstr d0, [a1, #1 * 2*4]
+ vstr d1, [a1, #5 * 2*4]
+ vldr d4, [a1, #1 * 2*4]
+ vldr d5, [a1, #5 * 2*4]
+ vadd.f s0, s6, s4
+ vadd.f s1, s5, s7
+ vsub.f s2, s5, s7
+ vsub.f s3, s6, s4
+ vsub.f s8, s16, s8 @ vector op
+ vstr d6, [a1, #1 * 2*4]
+ vstr d7, [a1, #5 * 2*4]
+ vldr d15, [a1, #6 * 2*4]
+ vsub.f s4, s20, s0
+ vsub.f s5, s21, s1
+ vsub.f s6, s22, s2
+ vsub.f s7, s23, s3
+ vadd.f s20, s0, s20 @ vector op
+ vstr d4, [a1, #9 * 2*4]
+ @ TRANSFORM_ZERO(z[0],z[4],z[8],z[12])
+ vldr d6, [a1, #8 * 2*4]
+ vstr d5, [a1, #13 * 2*4]
+ vldr d7, [a1, #12 * 2*4]
+ vstr d2, [a1, #11 * 2*4]
+ vldr d8, [a1, #0 * 2*4]
+ vstr d3, [a1, #15 * 2*4]
+ vldr d9, [a1, #4 * 2*4]
+ vadd.f s0, s26, s24
+ vadd.f s1, s25, s27
+ vsub.f s2, s25, s27
+ vsub.f s3, s26, s24
+ vadd.f s4, s14, s12
+ vadd.f s5, s13, s15
+ vsub.f s6, s13, s15
+ vsub.f s7, s14, s12
+ vadd.f s8, s0, s28 @ vector op
+ vstr d0, [a1, #3 * 2*4]
+ vstr d1, [a1, #7 * 2*4]
+ vldr d6, [a1, #3 * 2*4]
+ vldr d7, [a1, #7 * 2*4]
+ vsub.f s0, s16, s4
+ vsub.f s1, s17, s5
+ vsub.f s2, s18, s6
+ vsub.f s3, s19, s7
+ vsub.f s12, s28, s12 @ vector op
+ vadd.f s16, s4, s16 @ vector op
+ vstr d10, [a1, #3 * 2*4]
+ vstr d11, [a1, #7 * 2*4]
+ vstr d4, [a1, #2 * 2*4]
+ vstr d5, [a1, #6 * 2*4]
+ vstr d0, [a1, #8 * 2*4]
+ vstr d1, [a1, #12 * 2*4]
+ vstr d6, [a1, #10 * 2*4]
+ vstr d7, [a1, #14 * 2*4]
+ vstr d8, [a1, #0 * 2*4]
+ vstr d9, [a1, #4 * 2*4]
+
+ bx lr
+endfunc
+
+function ff_fft16_vfp, export=1
+ ldr a3, =0x03030000 @ RunFast mode, vector length 4, stride 1
+ fmrx a2, FPSCR
+ fmxr FPSCR, a3
+ vpush {s16-s31}
+ mov ip, lr
+ bl .Lfft16_internal_vfp
+ vpop {s16-s31}
+ fmxr FPSCR, a2
+ bx ip
+endfunc
+
+.macro pass n, z0, z1, z2, z3
+ add v6, v5, #4*2*\n
+ @ TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3])
+ @ TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1])
+ @ TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0])
+ @ TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1])
+ vldr d8, [\z2, #8*(o2+1)] @ s16,s17
+ vldmdb v6!, {s2}
+ vldr d9, [\z3, #8*(o3+1)] @ s18,s19
+ vldmia v5!, {s0,s1} @ s0 is unused
+ vldr s7, [\z2, #8*o2] @ t1
+ vmul.f s20, s16, s2 @ vector * scalar
+ vldr s0, [\z3, #8*o3] @ t5
+ vldr s6, [\z2, #8*o2+4] @ t2
+ vldr s3, [\z3, #8*o3+4] @ t6
+ vmul.f s16, s16, s1 @ vector * scalar
+ ldr a4, =\n-1
+1: add \z0, \z0, #8*2
+ .if \n*4*2 >= 512
+ add \z1, \z1, #8*2
+ .endif
+ .if \n*4*2 >= 256
+ add \z2, \z2, #8*2
+ .endif
+ .if \n*4*2 >= 512
+ add \z3, \z3, #8*2
+ .endif
+ @ up to 2 stalls (VFP vector issuing / waiting for s0)
+ @ depending upon whether this is the first iteration and
+ @ how many add instructions are inserted above
+ vadd.f s4, s0, s7 @ t5
+ vadd.f s5, s6, s3 @ t6
+ vsub.f s6, s6, s3 @ t4
+ vsub.f s7, s0, s7 @ t3
+ vldr d6, [\z0, #8*0-8*2] @ s12,s13
+ vadd.f s0, s16, s21 @ t1
+ vldr d7, [\z1, #8*o1-8*2] @ s14,s15
+ vsub.f s1, s18, s23 @ t5
+ vadd.f s8, s4, s12 @ vector + vector
+ @ stall (VFP vector issuing)
+ @ stall (VFP vector issuing)
+ @ stall (VFP vector issuing)
+ vsub.f s4, s12, s4
+ vsub.f s5, s13, s5
+ vsub.f s6, s14, s6
+ vsub.f s7, s15, s7
+ vsub.f s2, s17, s20 @ t2
+ vadd.f s3, s19, s22 @ t6
+ vstr d4, [\z0, #8*0-8*2] @ s8,s9
+ vstr d5, [\z1, #8*o1-8*2] @ s10,s11
+ @ stall (waiting for s5)
+ vstr d2, [\z2, #8*o2-8*2] @ s4,s5
+ vadd.f s4, s1, s0 @ t5
+ vstr d3, [\z3, #8*o3-8*2] @ s6,s7
+ vsub.f s7, s1, s0 @ t3
+ vadd.f s5, s2, s3 @ t6
+ vsub.f s6, s2, s3 @ t4
+ vldr d6, [\z0, #8*1-8*2] @ s12,s13
+ vldr d7, [\z1, #8*(o1+1)-8*2] @ s14,s15
+ vldr d4, [\z2, #8*o2] @ s8,s9
+ vldmdb v6!, {s2,s3}
+ vldr d5, [\z3, #8*o3] @ s10,s11
+ vadd.f s20, s4, s12 @ vector + vector
+ vldmia v5!, {s0,s1}
+ vldr d8, [\z2, #8*(o2+1)] @ s16,s17
+ @ stall (VFP vector issuing)
+ vsub.f s4, s12, s4
+ vsub.f s5, s13, s5
+ vsub.f s6, s14, s6
+ vsub.f s7, s15, s7
+ vmul.f s12, s8, s3 @ vector * scalar
+ vstr d10, [\z0, #8*1-8*2] @ s20,s21
+ vldr d9, [\z3, #8*(o3+1)] @ s18,s19
+ vstr d11, [\z1, #8*(o1+1)-8*2] @ s22,s23
+ vmul.f s8, s8, s0 @ vector * scalar
+ vstr d2, [\z2, #8*(o2+1)-8*2] @ s4,s5
+ @ stall (waiting for s7)
+ vstr d3, [\z3, #8*(o3+1)-8*2] @ s6,s7
+ vmul.f s20, s16, s2 @ vector * scalar
+ @ stall (VFP vector issuing)
+ @ stall (VFP vector issuing)
+ @ stall (VFP vector issuing)
+ vadd.f s7, s8, s13 @ t1
+ vsub.f s6, s9, s12 @ t2
+ vsub.f s0, s10, s15 @ t5
+ vadd.f s3, s11, s14 @ t6
+ vmul.f s16, s16, s1 @ vector * scalar
+ subs a4, a4, #1
+ bne 1b
+ @ What remains is identical to the first two indentations of
+ @ the above, but without the increment of z
+ vadd.f s4, s0, s7 @ t5
+ vadd.f s5, s6, s3 @ t6
+ vsub.f s6, s6, s3 @ t4
+ vsub.f s7, s0, s7 @ t3
+ vldr d6, [\z0, #8*0] @ s12,s13
+ vadd.f s0, s16, s21 @ t1
+ vldr d7, [\z1, #8*o1] @ s14,s15
+ vsub.f s1, s18, s23 @ t5
+ vadd.f s8, s4, s12 @ vector + vector
+ vsub.f s4, s12, s4
+ vsub.f s5, s13, s5
+ vsub.f s6, s14, s6
+ vsub.f s7, s15, s7
+ vsub.f s2, s17, s20 @ t2
+ vadd.f s3, s19, s22 @ t6
+ vstr d4, [\z0, #8*0] @ s8,s9
+ vstr d5, [\z1, #8*o1] @ s10,s11
+ vstr d2, [\z2, #8*o2] @ s4,s5
+ vadd.f s4, s1, s0 @ t5
+ vstr d3, [\z3, #8*o3] @ s6,s7
+ vsub.f s7, s1, s0 @ t3
+ vadd.f s5, s2, s3 @ t6
+ vsub.f s6, s2, s3 @ t4
+ vldr d6, [\z0, #8*1] @ s12,s13
+ vldr d7, [\z1, #8*(o1+1)] @ s14,s15
+ vadd.f s20, s4, s12 @ vector + vector
+ vsub.f s4, s12, s4
+ vsub.f s5, s13, s5
+ vsub.f s6, s14, s6
+ vsub.f s7, s15, s7
+ vstr d10, [\z0, #8*1] @ s20,s21
+ vstr d11, [\z1, #8*(o1+1)] @ s22,s23
+ vstr d2, [\z2, #8*(o2+1)] @ s4,s5
+ vstr d3, [\z3, #8*(o3+1)] @ s6,s7
+.endm
+
+.macro def_fft n, n2, n4
+function .Lfft\n\()_internal_vfp
+ .if \n >= 512
+ push {v1-v6,lr}
+ .elseif \n >= 256
+ push {v1-v2,v5-v6,lr}
+ .else
+ push {v1,v5-v6,lr}
+ .endif
+ mov v1, a1
+ bl .Lfft\n2\()_internal_vfp
+ add a1, v1, #8*(\n/4)*2
+ bl .Lfft\n4\()_internal_vfp
+ movrelx v5, X(ff_cos_\n), a1
+ add a1, v1, #8*(\n/4)*3
+ bl .Lfft\n4\()_internal_vfp
+ .if \n >= 512
+ .set o1, 0*(\n/4/2)
+ .set o2, 0*(\n/4/2)
+ .set o3, 0*(\n/4/2)
+ add v2, v1, #8*2*(\n/4/2)
+ add v3, v1, #8*4*(\n/4/2)
+ add v4, v1, #8*6*(\n/4/2)
+ pass (\n/4/2), v1, v2, v3, v4
+ pop {v1-v6,pc}
+ .elseif \n >= 256
+ .set o1, 2*(\n/4/2)
+ .set o2, 0*(\n/4/2)
+ .set o3, 2*(\n/4/2)
+ add v2, v1, #8*4*(\n/4/2)
+ pass (\n/4/2), v1, v1, v2, v2
+ pop {v1-v2,v5-v6,pc}
+ .else
+ .set o1, 2*(\n/4/2)
+ .set o2, 4*(\n/4/2)
+ .set o3, 6*(\n/4/2)
+ pass (\n/4/2), v1, v1, v1, v1
+ pop {v1,v5-v6,pc}
+ .endif
+endfunc
+
+function fft\n\()_vfp
+ ldr a3, =0x03030000 /* RunFast mode, vector length 4, stride 1 */
+ fmrx a2, FPSCR
+ fmxr FPSCR, a3
+ vpush {s16-s31}
+ mov ip, lr
+ bl .Lfft\n\()_internal_vfp
+ vpop {s16-s31}
+ fmxr FPSCR, a2
+ bx ip
+endfunc
+
+.ltorg
+.endm
+
+ def_fft 32, 16, 8
+ def_fft 64, 32, 16
+ def_fft 128, 64, 32
+ def_fft 256, 128, 64
+ def_fft 512, 256, 128
+ def_fft 1024, 512, 256
+ def_fft 2048, 1024, 512
+ def_fft 4096, 2048, 1024
+ def_fft 8192, 4096, 2048
+ def_fft 16384, 8192, 4096
+ def_fft 32768, 16384, 8192
+ def_fft 65536, 32768, 16384
diff --git a/media/ffvpx/libavcodec/arm/flacdsp_arm.S b/media/ffvpx/libavcodec/arm/flacdsp_arm.S
new file mode 100644
index 0000000000..f8861c5967
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/flacdsp_arm.S
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2012 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+function flac_lpc_16_1_arm
+ ldr r12, [sp]
+ push {r4, lr}
+ ldr r1, [r1]
+ subs r12, r12, #2
+ ldr lr, [r0], #4
+ beq 2f
+ it lt
+ poplt {r4, pc}
+1:
+ mul r4, lr, r1
+ ldm r0, {r2, lr}
+ add_sh r2, r2, r4, asr r3
+ mul r4, r2, r1
+ subs r12, r12, #2
+ add_sh lr, lr, r4, asr r3
+ stm r0!, {r2, lr}
+ bgt 1b
+ it lt
+ poplt {r4, pc}
+2:
+ mul r4, lr, r1
+ ldr r2, [r0]
+ add_sh r2, r2, r4, asr r3
+ str r2, [r0]
+ pop {r4, pc}
+endfunc
+
+function flac_lpc_16_2_arm
+ ldr r12, [sp]
+ subs r12, r12, r2
+ it le
+ bxle lr
+
+ push {r4-r9, lr}
+ ldm r0!, {r6, r7}
+ ldm r1, {r8, r9}
+ subs r12, r12, #1
+ beq 2f
+1:
+ mul r4, r6, r8
+ mul r5, r7, r8
+ mla r4, r7, r9, r4
+ ldm r0, {r6, r7}
+ add_sh r6, r6, r4, asr r3
+ mla r5, r6, r9, r5
+ add_sh r7, r7, r5, asr r3
+ stm r0!, {r6, r7}
+ subs r12, r12, #2
+ bgt 1b
+ it lt
+ poplt {r4-r9, pc}
+2:
+ mul r4, r6, r8
+ mla r4, r7, r9, r4
+ ldr r5, [r0]
+ add_sh r5, r5, r4, asr r3
+ str r5, [r0]
+ pop {r4-r9, pc}
+endfunc
+
+function ff_flac_lpc_16_arm, export=1
+ cmp r2, #2
+ blt flac_lpc_16_1_arm
+ beq flac_lpc_16_2_arm
+
+ ldr r12, [sp]
+ subs r12, r12, r2
+ it le
+ bxle lr
+
+ push {r4-r9, lr}
+
+ subs r12, r12, #1
+ beq 3f
+1:
+ sub lr, r2, #2
+ mov r4, #0
+ mov r5, #0
+
+ ldr r7, [r0], #4
+ ldr r9, [r1], #4
+2:
+ mla r4, r7, r9, r4
+ ldm r0!, {r6, r7}
+ mla r5, r6, r9, r5
+ ldm r1!, {r8, r9}
+ mla r4, r6, r8, r4
+ subs lr, lr, #2
+ mla r5, r7, r8, r5
+ bgt 2b
+ blt 6f
+
+ mla r4, r7, r9, r4
+ ldr r7, [r0], #4
+ mla r5, r7, r9, r5
+ ldr r9, [r1], #4
+6:
+ mla r4, r7, r9, r4
+ ldm r0, {r6, r7}
+ add_sh r6, r6, r4, asr r3
+ mla r5, r6, r9, r5
+ add_sh r7, r7, r5, asr r3
+ stm r0!, {r6, r7}
+ sub r0, r0, r2, lsl #2
+ sub r1, r1, r2, lsl #2
+
+ subs r12, r12, #2
+ bgt 1b
+ it lt
+ poplt {r4-r9, pc}
+3:
+ mov r4, #0
+4:
+ ldr r5, [r1], #4
+ ldr r6, [r0], #4
+ mla r4, r5, r6, r4
+ subs r2, r2, #1
+ bgt 4b
+ ldr r5, [r0]
+ add_sh r5, r5, r4, asr r3
+ str r5, [r0]
+ pop {r4-r9, pc}
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/flacdsp_init_arm.c b/media/ffvpx/libavcodec/arm/flacdsp_init_arm.c
new file mode 100644
index 0000000000..9962cc89f4
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/flacdsp_init_arm.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavcodec/flacdsp.h"
+
+void ff_flac_lpc_16_arm(int32_t *samples, const int coeffs[32], int order,
+ int qlevel, int len);
+
+av_cold void ff_flacdsp_init_arm(FLACDSPContext *c, enum AVSampleFormat fmt, int channels)
+{
+ c->lpc16 = ff_flac_lpc_16_arm;
+}
diff --git a/media/ffvpx/libavcodec/arm/idct.h b/media/ffvpx/libavcodec/arm/idct.h
new file mode 100644
index 0000000000..6c79a69c5f
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idct.h
@@ -0,0 +1,41 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ARM_IDCT_H
+#define AVCODEC_ARM_IDCT_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+void ff_j_rev_dct_arm(int16_t *data);
+
+void ff_simple_idct_arm(int16_t *data);
+
+void ff_simple_idct_armv5te(int16_t *data);
+void ff_simple_idct_put_armv5te(uint8_t *dest, ptrdiff_t line_size, int16_t *data);
+void ff_simple_idct_add_armv5te(uint8_t *dest, ptrdiff_t line_size, int16_t *data);
+
+void ff_simple_idct_armv6(int16_t *data);
+void ff_simple_idct_put_armv6(uint8_t *dest, ptrdiff_t line_size, int16_t *data);
+void ff_simple_idct_add_armv6(uint8_t *dest, ptrdiff_t line_size, int16_t *data);
+
+void ff_simple_idct_neon(int16_t *data);
+void ff_simple_idct_put_neon(uint8_t *dest, ptrdiff_t line_size, int16_t *data);
+void ff_simple_idct_add_neon(uint8_t *dest, ptrdiff_t line_size, int16_t *data);
+
+#endif /* AVCODEC_ARM_IDCT_H */
diff --git a/media/ffvpx/libavcodec/arm/idctdsp_arm.S b/media/ffvpx/libavcodec/arm/idctdsp_arm.S
new file mode 100644
index 0000000000..057eff9be8
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idctdsp_arm.S
@@ -0,0 +1,120 @@
+@
+@ ARMv4-optimized IDCT functions
+@ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp>
+@
+@ This file is part of FFmpeg.
+@
+@ FFmpeg is free software; you can redistribute it and/or
+@ modify it under the terms of the GNU Lesser General Public
+@ License as published by the Free Software Foundation; either
+@ version 2.1 of the License, or (at your option) any later version.
+@
+@ FFmpeg is distributed in the hope that it will be useful,
+@ but WITHOUT ANY WARRANTY; without even the implied warranty of
+@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+@ Lesser General Public License for more details.
+@
+@ You should have received a copy of the GNU Lesser General Public
+@ License along with FFmpeg; if not, write to the Free Software
+@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+@
+
+#include "config.h"
+#include "libavutil/arm/asm.S"
+
+@ void ff_add_pixels_clamped_arm(int16_t *block, uint8_t *dest, ptrdiff_t stride)
+function ff_add_pixels_clamped_arm, export=1, align=5
+ push {r4-r10}
+ mov r10, #8
+1:
+ ldr r4, [r1] /* load dest */
+ /* block[0] and block[1]*/
+ ldrsh r5, [r0]
+ ldrsh r7, [r0, #2]
+ and r6, r4, #0xFF
+ and r8, r4, #0xFF00
+ add r6, r6, r5
+ add r8, r7, r8, lsr #8
+ mvn r5, r5
+ mvn r7, r7
+ tst r6, #0x100
+ it ne
+ movne r6, r5, lsr #24
+ tst r8, #0x100
+ it ne
+ movne r8, r7, lsr #24
+ mov r9, r6
+ ldrsh r5, [r0, #4] /* moved form [A] */
+ orr r9, r9, r8, lsl #8
+ /* block[2] and block[3] */
+ /* [A] */
+ ldrsh r7, [r0, #6]
+ and r6, r4, #0xFF0000
+ and r8, r4, #0xFF000000
+ add r6, r5, r6, lsr #16
+ add r8, r7, r8, lsr #24
+ mvn r5, r5
+ mvn r7, r7
+ tst r6, #0x100
+ it ne
+ movne r6, r5, lsr #24
+ tst r8, #0x100
+ it ne
+ movne r8, r7, lsr #24
+ orr r9, r9, r6, lsl #16
+ ldr r4, [r1, #4] /* moved form [B] */
+ orr r9, r9, r8, lsl #24
+ /* store dest */
+ ldrsh r5, [r0, #8] /* moved form [C] */
+ str r9, [r1]
+
+ /* load dest */
+ /* [B] */
+ /* block[4] and block[5] */
+ /* [C] */
+ ldrsh r7, [r0, #10]
+ and r6, r4, #0xFF
+ and r8, r4, #0xFF00
+ add r6, r6, r5
+ add r8, r7, r8, lsr #8
+ mvn r5, r5
+ mvn r7, r7
+ tst r6, #0x100
+ it ne
+ movne r6, r5, lsr #24
+ tst r8, #0x100
+ it ne
+ movne r8, r7, lsr #24
+ mov r9, r6
+ ldrsh r5, [r0, #12] /* moved from [D] */
+ orr r9, r9, r8, lsl #8
+ /* block[6] and block[7] */
+ /* [D] */
+ ldrsh r7, [r0, #14]
+ and r6, r4, #0xFF0000
+ and r8, r4, #0xFF000000
+ add r6, r5, r6, lsr #16
+ add r8, r7, r8, lsr #24
+ mvn r5, r5
+ mvn r7, r7
+ tst r6, #0x100
+ it ne
+ movne r6, r5, lsr #24
+ tst r8, #0x100
+ it ne
+ movne r8, r7, lsr #24
+ orr r9, r9, r6, lsl #16
+ add r0, r0, #16 /* moved from [E] */
+ orr r9, r9, r8, lsl #24
+ subs r10, r10, #1 /* moved from [F] */
+ /* store dest */
+ str r9, [r1, #4]
+
+ /* [E] */
+ /* [F] */
+ add r1, r1, r2
+ bne 1b
+
+ pop {r4-r10}
+ bx lr
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/idctdsp_arm.h b/media/ffvpx/libavcodec/arm/idctdsp_arm.h
new file mode 100644
index 0000000000..d7bc5cd02a
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idctdsp_arm.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ARM_IDCTDSP_ARM_H
+#define AVCODEC_ARM_IDCTDSP_ARM_H
+
+#include "libavcodec/avcodec.h"
+#include "libavcodec/idctdsp.h"
+
+void ff_idctdsp_init_armv5te(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_armv6(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_neon(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+
+#endif /* AVCODEC_ARM_IDCTDSP_ARM_H */
diff --git a/media/ffvpx/libavcodec/arm/idctdsp_armv6.S b/media/ffvpx/libavcodec/arm/idctdsp_armv6.S
new file mode 100644
index 0000000000..a6e77d6da1
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idctdsp_armv6.S
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+function ff_add_pixels_clamped_armv6, export=1
+ push {r4-r8,lr}
+ mov r3, #8
+1:
+ ldm r0!, {r4,r5,r12,lr}
+ ldrd r6, r7, [r1]
+ pkhbt r8, r4, r5, lsl #16
+ pkhtb r5, r5, r4, asr #16
+ pkhbt r4, r12, lr, lsl #16
+ pkhtb lr, lr, r12, asr #16
+ pld [r1, r2]
+ uxtab16 r8, r8, r6
+ uxtab16 r5, r5, r6, ror #8
+ uxtab16 r4, r4, r7
+ uxtab16 lr, lr, r7, ror #8
+ usat16 r8, #8, r8
+ usat16 r5, #8, r5
+ usat16 r4, #8, r4
+ usat16 lr, #8, lr
+ orr r6, r8, r5, lsl #8
+ orr r7, r4, lr, lsl #8
+ subs r3, r3, #1
+ strd_post r6, r7, r1, r2
+ bgt 1b
+ pop {r4-r8,pc}
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/idctdsp_init_arm.c b/media/ffvpx/libavcodec/arm/idctdsp_init_arm.c
new file mode 100644
index 0000000000..ebc90e4b49
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idctdsp_init_arm.c
@@ -0,0 +1,94 @@
+/*
+ * ARM-optimized IDCT functions
+ * Copyright (c) 2001 Lionel Ulmer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/arm/cpu.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/idctdsp.h"
+#include "idct.h"
+#include "idctdsp_arm.h"
+
+void ff_add_pixels_clamped_arm(const int16_t *block, uint8_t *dest,
+ ptrdiff_t line_size);
+
+/* XXX: those functions should be suppressed ASAP when all IDCTs are
+ * converted */
+static void j_rev_dct_arm_put(uint8_t *dest, ptrdiff_t line_size,
+ int16_t *block)
+{
+ ff_j_rev_dct_arm(block);
+ ff_put_pixels_clamped_c(block, dest, line_size);
+}
+
+static void j_rev_dct_arm_add(uint8_t *dest, ptrdiff_t line_size,
+ int16_t *block)
+{
+ ff_j_rev_dct_arm(block);
+ ff_add_pixels_clamped_arm(block, dest, line_size);
+}
+
+static void simple_idct_arm_put(uint8_t *dest, ptrdiff_t line_size,
+ int16_t *block)
+{
+ ff_simple_idct_arm(block);
+ ff_put_pixels_clamped_c(block, dest, line_size);
+}
+
+static void simple_idct_arm_add(uint8_t *dest, ptrdiff_t line_size,
+ int16_t *block)
+{
+ ff_simple_idct_arm(block);
+ ff_add_pixels_clamped_arm(block, dest, line_size);
+}
+
+av_cold void ff_idctdsp_init_arm(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (!avctx->lowres && !high_bit_depth) {
+ if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) ||
+ avctx->idct_algo == FF_IDCT_ARM) {
+ c->idct_put = j_rev_dct_arm_put;
+ c->idct_add = j_rev_dct_arm_add;
+ c->idct = ff_j_rev_dct_arm;
+ c->perm_type = FF_IDCT_PERM_LIBMPEG2;
+ } else if (avctx->idct_algo == FF_IDCT_SIMPLEARM) {
+ c->idct_put = simple_idct_arm_put;
+ c->idct_add = simple_idct_arm_add;
+ c->idct = ff_simple_idct_arm;
+ c->perm_type = FF_IDCT_PERM_NONE;
+ }
+ }
+
+ c->add_pixels_clamped = ff_add_pixels_clamped_arm;
+
+ if (have_armv5te(cpu_flags))
+ ff_idctdsp_init_armv5te(c, avctx, high_bit_depth);
+ if (have_armv6(cpu_flags))
+ ff_idctdsp_init_armv6(c, avctx, high_bit_depth);
+ if (have_neon(cpu_flags))
+ ff_idctdsp_init_neon(c, avctx, high_bit_depth);
+}
diff --git a/media/ffvpx/libavcodec/arm/idctdsp_init_armv5te.c b/media/ffvpx/libavcodec/arm/idctdsp_init_armv5te.c
new file mode 100644
index 0000000000..3d881e1f18
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idctdsp_init_armv5te.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/idctdsp.h"
+#include "idct.h"
+#include "idctdsp_arm.h"
+
+av_cold void ff_idctdsp_init_armv5te(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth)
+{
+ if (!avctx->lowres && !high_bit_depth &&
+ (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEARMV5TE)) {
+ c->idct_put = ff_simple_idct_put_armv5te;
+ c->idct_add = ff_simple_idct_add_armv5te;
+ c->idct = ff_simple_idct_armv5te;
+ c->perm_type = FF_IDCT_PERM_NONE;
+ }
+}
diff --git a/media/ffvpx/libavcodec/arm/idctdsp_init_armv6.c b/media/ffvpx/libavcodec/arm/idctdsp_init_armv6.c
new file mode 100644
index 0000000000..edf3070e15
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idctdsp_init_armv6.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/idctdsp.h"
+#include "idct.h"
+#include "idctdsp_arm.h"
+
+void ff_add_pixels_clamped_armv6(const int16_t *block, uint8_t *pixels,
+ ptrdiff_t line_size);
+
+av_cold void ff_idctdsp_init_armv6(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth)
+{
+ if (!avctx->lowres && !high_bit_depth) {
+ if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) ||
+ avctx->idct_algo == FF_IDCT_SIMPLEARMV6) {
+ c->idct_put = ff_simple_idct_put_armv6;
+ c->idct_add = ff_simple_idct_add_armv6;
+ c->idct = ff_simple_idct_armv6;
+ c->perm_type = FF_IDCT_PERM_LIBMPEG2;
+ }
+ }
+ c->add_pixels_clamped = ff_add_pixels_clamped_armv6;
+}
diff --git a/media/ffvpx/libavcodec/arm/idctdsp_init_neon.c b/media/ffvpx/libavcodec/arm/idctdsp_init_neon.c
new file mode 100644
index 0000000000..b70c5b0d44
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idctdsp_init_neon.c
@@ -0,0 +1,51 @@
+/*
+ * ARM-NEON-optimized IDCT functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/idctdsp.h"
+#include "idct.h"
+#include "idctdsp_arm.h"
+
+void ff_add_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
+void ff_put_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
+void ff_put_signed_pixels_clamped_neon(const int16_t *, uint8_t *, ptrdiff_t);
+
+av_cold void ff_idctdsp_init_neon(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth)
+{
+ if (!avctx->lowres && !high_bit_depth) {
+ if (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLENEON) {
+ c->idct_put = ff_simple_idct_put_neon;
+ c->idct_add = ff_simple_idct_add_neon;
+ c->idct = ff_simple_idct_neon;
+ c->perm_type = FF_IDCT_PERM_PARTTRANS;
+ }
+ }
+
+ c->add_pixels_clamped = ff_add_pixels_clamped_neon;
+ c->put_pixels_clamped = ff_put_pixels_clamped_neon;
+ c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_neon;
+}
diff --git a/media/ffvpx/libavcodec/arm/idctdsp_neon.S b/media/ffvpx/libavcodec/arm/idctdsp_neon.S
new file mode 100644
index 0000000000..1911a33468
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/idctdsp_neon.S
@@ -0,0 +1,128 @@
+/*
+ * ARM-NEON-optimized IDCT functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+function ff_put_pixels_clamped_neon, export=1
+ vld1.16 {d16-d19}, [r0,:128]!
+ vqmovun.s16 d0, q8
+ vld1.16 {d20-d23}, [r0,:128]!
+ vqmovun.s16 d1, q9
+ vld1.16 {d24-d27}, [r0,:128]!
+ vqmovun.s16 d2, q10
+ vld1.16 {d28-d31}, [r0,:128]!
+ vqmovun.s16 d3, q11
+ vst1.8 {d0}, [r1,:64], r2
+ vqmovun.s16 d4, q12
+ vst1.8 {d1}, [r1,:64], r2
+ vqmovun.s16 d5, q13
+ vst1.8 {d2}, [r1,:64], r2
+ vqmovun.s16 d6, q14
+ vst1.8 {d3}, [r1,:64], r2
+ vqmovun.s16 d7, q15
+ vst1.8 {d4}, [r1,:64], r2
+ vst1.8 {d5}, [r1,:64], r2
+ vst1.8 {d6}, [r1,:64], r2
+ vst1.8 {d7}, [r1,:64], r2
+ bx lr
+endfunc
+
+function ff_put_signed_pixels_clamped_neon, export=1
+ vmov.u8 d31, #128
+ vld1.16 {d16-d17}, [r0,:128]!
+ vqmovn.s16 d0, q8
+ vld1.16 {d18-d19}, [r0,:128]!
+ vqmovn.s16 d1, q9
+ vld1.16 {d16-d17}, [r0,:128]!
+ vqmovn.s16 d2, q8
+ vld1.16 {d18-d19}, [r0,:128]!
+ vadd.u8 d0, d0, d31
+ vld1.16 {d20-d21}, [r0,:128]!
+ vadd.u8 d1, d1, d31
+ vld1.16 {d22-d23}, [r0,:128]!
+ vadd.u8 d2, d2, d31
+ vst1.8 {d0}, [r1,:64], r2
+ vqmovn.s16 d3, q9
+ vst1.8 {d1}, [r1,:64], r2
+ vqmovn.s16 d4, q10
+ vst1.8 {d2}, [r1,:64], r2
+ vqmovn.s16 d5, q11
+ vld1.16 {d24-d25}, [r0,:128]!
+ vadd.u8 d3, d3, d31
+ vld1.16 {d26-d27}, [r0,:128]!
+ vadd.u8 d4, d4, d31
+ vadd.u8 d5, d5, d31
+ vst1.8 {d3}, [r1,:64], r2
+ vqmovn.s16 d6, q12
+ vst1.8 {d4}, [r1,:64], r2
+ vqmovn.s16 d7, q13
+ vst1.8 {d5}, [r1,:64], r2
+ vadd.u8 d6, d6, d31
+ vadd.u8 d7, d7, d31
+ vst1.8 {d6}, [r1,:64], r2
+ vst1.8 {d7}, [r1,:64], r2
+ bx lr
+endfunc
+
+function ff_add_pixels_clamped_neon, export=1
+ mov r3, r1
+ vld1.8 {d16}, [r1,:64], r2
+ vld1.16 {d0-d1}, [r0,:128]!
+ vaddw.u8 q0, q0, d16
+ vld1.8 {d17}, [r1,:64], r2
+ vld1.16 {d2-d3}, [r0,:128]!
+ vqmovun.s16 d0, q0
+ vld1.8 {d18}, [r1,:64], r2
+ vaddw.u8 q1, q1, d17
+ vld1.16 {d4-d5}, [r0,:128]!
+ vaddw.u8 q2, q2, d18
+ vst1.8 {d0}, [r3,:64], r2
+ vqmovun.s16 d2, q1
+ vld1.8 {d19}, [r1,:64], r2
+ vld1.16 {d6-d7}, [r0,:128]!
+ vaddw.u8 q3, q3, d19
+ vqmovun.s16 d4, q2
+ vst1.8 {d2}, [r3,:64], r2
+ vld1.8 {d16}, [r1,:64], r2
+ vqmovun.s16 d6, q3
+ vld1.16 {d0-d1}, [r0,:128]!
+ vaddw.u8 q0, q0, d16
+ vst1.8 {d4}, [r3,:64], r2
+ vld1.8 {d17}, [r1,:64], r2
+ vld1.16 {d2-d3}, [r0,:128]!
+ vaddw.u8 q1, q1, d17
+ vst1.8 {d6}, [r3,:64], r2
+ vqmovun.s16 d0, q0
+ vld1.8 {d18}, [r1,:64], r2
+ vld1.16 {d4-d5}, [r0,:128]!
+ vaddw.u8 q2, q2, d18
+ vst1.8 {d0}, [r3,:64], r2
+ vqmovun.s16 d2, q1
+ vld1.8 {d19}, [r1,:64], r2
+ vqmovun.s16 d4, q2
+ vld1.16 {d6-d7}, [r0,:128]!
+ vaddw.u8 q3, q3, d19
+ vst1.8 {d2}, [r3,:64], r2
+ vqmovun.s16 d6, q3
+ vst1.8 {d4}, [r3,:64], r2
+ vst1.8 {d6}, [r3,:64], r2
+ bx lr
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/jrevdct_arm.S b/media/ffvpx/libavcodec/arm/jrevdct_arm.S
new file mode 100644
index 0000000000..f951e2af34
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/jrevdct_arm.S
@@ -0,0 +1,383 @@
+/*
+ C-like prototype :
+ void j_rev_dct_arm(DCTBLOCK data)
+
+ With DCTBLOCK being a pointer to an array of 64 'signed shorts'
+
+ Copyright (c) 2001 Lionel Ulmer (lionel.ulmer@free.fr / bbrox@bbrox.org)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+*/
+
+#include "libavutil/arm/asm.S"
+
+#define FIX_0_298631336 2446
+#define FIX_0_541196100 4433
+#define FIX_0_765366865 6270
+#define FIX_1_175875602 9633
+#define FIX_1_501321110 12299
+#define FIX_2_053119869 16819
+#define FIX_3_072711026 25172
+#define FIX_M_0_390180644 -3196
+#define FIX_M_0_899976223 -7373
+#define FIX_M_1_847759065 -15137
+#define FIX_M_1_961570560 -16069
+#define FIX_M_2_562915447 -20995
+#define FIX_0xFFFF 0xFFFF
+
+#define FIX_0_298631336_ID 0
+#define FIX_0_541196100_ID 4
+#define FIX_0_765366865_ID 8
+#define FIX_1_175875602_ID 12
+#define FIX_1_501321110_ID 16
+#define FIX_2_053119869_ID 20
+#define FIX_3_072711026_ID 24
+#define FIX_M_0_390180644_ID 28
+#define FIX_M_0_899976223_ID 32
+#define FIX_M_1_847759065_ID 36
+#define FIX_M_1_961570560_ID 40
+#define FIX_M_2_562915447_ID 44
+#define FIX_0xFFFF_ID 48
+
+function ff_j_rev_dct_arm, export=1
+ push {r0, r4 - r11, lr}
+
+ mov lr, r0 @ lr = pointer to the current row
+ mov r12, #8 @ r12 = row-counter
+ movrel r11, const_array @ r11 = base pointer to the constants array
+row_loop:
+ ldrsh r0, [lr, # 0] @ r0 = 'd0'
+ ldrsh r2, [lr, # 2] @ r2 = 'd2'
+
+ @ Optimization for row that have all items except the first set to 0
+ @ (this works as the int16_t are always 4-byte aligned)
+ ldr r5, [lr, # 0]
+ ldr r6, [lr, # 4]
+ ldr r3, [lr, # 8]
+ ldr r4, [lr, #12]
+ orr r3, r3, r4
+ orr r3, r3, r6
+ orrs r5, r3, r5
+ beq end_of_row_loop @ nothing to be done as ALL of them are '0'
+ orrs r3, r3, r2
+ beq empty_row
+
+ ldrsh r1, [lr, # 8] @ r1 = 'd1'
+ ldrsh r4, [lr, # 4] @ r4 = 'd4'
+ ldrsh r6, [lr, # 6] @ r6 = 'd6'
+
+ ldr r3, [r11, #FIX_0_541196100_ID]
+ add r7, r2, r6
+ ldr r5, [r11, #FIX_M_1_847759065_ID]
+ mul r7, r3, r7 @ r7 = z1
+ ldr r3, [r11, #FIX_0_765366865_ID]
+ mla r6, r5, r6, r7 @ r6 = tmp2
+ add r5, r0, r4 @ r5 = tmp0
+ mla r2, r3, r2, r7 @ r2 = tmp3
+ sub r3, r0, r4 @ r3 = tmp1
+
+ add r0, r2, r5, lsl #13 @ r0 = tmp10
+ rsb r2, r2, r5, lsl #13 @ r2 = tmp13
+ add r4, r6, r3, lsl #13 @ r4 = tmp11
+ rsb r3, r6, r3, lsl #13 @ r3 = tmp12
+
+ push {r0, r2, r3, r4} @ save on the stack tmp10, tmp13, tmp12, tmp11
+
+ ldrsh r3, [lr, #10] @ r3 = 'd3'
+ ldrsh r5, [lr, #12] @ r5 = 'd5'
+ ldrsh r7, [lr, #14] @ r7 = 'd7'
+
+ add r0, r3, r5 @ r0 = 'z2'
+ add r2, r1, r7 @ r2 = 'z1'
+ add r4, r3, r7 @ r4 = 'z3'
+ add r6, r1, r5 @ r6 = 'z4'
+ ldr r9, [r11, #FIX_1_175875602_ID]
+ add r8, r4, r6 @ r8 = z3 + z4
+ ldr r10, [r11, #FIX_M_0_899976223_ID]
+ mul r8, r9, r8 @ r8 = 'z5'
+ ldr r9, [r11, #FIX_M_2_562915447_ID]
+ mul r2, r10, r2 @ r2 = 'z1'
+ ldr r10, [r11, #FIX_M_1_961570560_ID]
+ mul r0, r9, r0 @ r0 = 'z2'
+ ldr r9, [r11, #FIX_M_0_390180644_ID]
+ mla r4, r10, r4, r8 @ r4 = 'z3'
+ ldr r10, [r11, #FIX_0_298631336_ID]
+ mla r6, r9, r6, r8 @ r6 = 'z4'
+ ldr r9, [r11, #FIX_2_053119869_ID]
+ mla r7, r10, r7, r2 @ r7 = tmp0 + z1
+ ldr r10, [r11, #FIX_3_072711026_ID]
+ mla r5, r9, r5, r0 @ r5 = tmp1 + z2
+ ldr r9, [r11, #FIX_1_501321110_ID]
+ mla r3, r10, r3, r0 @ r3 = tmp2 + z2
+ add r7, r7, r4 @ r7 = tmp0
+ mla r1, r9, r1, r2 @ r1 = tmp3 + z1
+ add r5, r5, r6 @ r5 = tmp1
+ add r3, r3, r4 @ r3 = tmp2
+ add r1, r1, r6 @ r1 = tmp3
+
+ pop {r0, r2, r4, r6} @ r0 = tmp10 / r2 = tmp13 / r4 = tmp12 / r6 = tmp11
+ @ r1 = tmp3 / r3 = tmp2 / r5 = tmp1 / r7 = tmp0
+
+ @ Compute DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS)
+ add r8, r0, r1
+ add r8, r8, #(1<<10)
+ mov r8, r8, asr #11
+ strh r8, [lr, # 0]
+
+ @ Compute DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS)
+ sub r8, r0, r1
+ add r8, r8, #(1<<10)
+ mov r8, r8, asr #11
+ strh r8, [lr, #14]
+
+ @ Compute DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS)
+ add r8, r6, r3
+ add r8, r8, #(1<<10)
+ mov r8, r8, asr #11
+ strh r8, [lr, # 2]
+
+ @ Compute DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS)
+ sub r8, r6, r3
+ add r8, r8, #(1<<10)
+ mov r8, r8, asr #11
+ strh r8, [lr, #12]
+
+ @ Compute DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS)
+ add r8, r4, r5
+ add r8, r8, #(1<<10)
+ mov r8, r8, asr #11
+ strh r8, [lr, # 4]
+
+ @ Compute DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS)
+ sub r8, r4, r5
+ add r8, r8, #(1<<10)
+ mov r8, r8, asr #11
+ strh r8, [lr, #10]
+
+ @ Compute DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS)
+ add r8, r2, r7
+ add r8, r8, #(1<<10)
+ mov r8, r8, asr #11
+ strh r8, [lr, # 6]
+
+ @ Compute DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS)
+ sub r8, r2, r7
+ add r8, r8, #(1<<10)
+ mov r8, r8, asr #11
+ strh r8, [lr, # 8]
+
+ @ End of row loop
+ add lr, lr, #16
+ subs r12, r12, #1
+ bne row_loop
+ beq start_column_loop
+
+empty_row:
+ ldr r1, [r11, #FIX_0xFFFF_ID]
+ mov r0, r0, lsl #2
+ and r0, r0, r1
+ add r0, r0, r0, lsl #16
+ str r0, [lr, # 0]
+ str r0, [lr, # 4]
+ str r0, [lr, # 8]
+ str r0, [lr, #12]
+
+end_of_row_loop:
+ @ End of loop
+ add lr, lr, #16
+ subs r12, r12, #1
+ bne row_loop
+
+start_column_loop:
+ @ Start of column loop
+ pop {lr}
+ mov r12, #8
+column_loop:
+ ldrsh r0, [lr, #( 0*8)] @ r0 = 'd0'
+ ldrsh r2, [lr, #( 4*8)] @ r2 = 'd2'
+ ldrsh r4, [lr, #( 8*8)] @ r4 = 'd4'
+ ldrsh r6, [lr, #(12*8)] @ r6 = 'd6'
+
+ ldr r3, [r11, #FIX_0_541196100_ID]
+ add r1, r2, r6
+ ldr r5, [r11, #FIX_M_1_847759065_ID]
+ mul r1, r3, r1 @ r1 = z1
+ ldr r3, [r11, #FIX_0_765366865_ID]
+ mla r6, r5, r6, r1 @ r6 = tmp2
+ add r5, r0, r4 @ r5 = tmp0
+ mla r2, r3, r2, r1 @ r2 = tmp3
+ sub r3, r0, r4 @ r3 = tmp1
+
+ add r0, r2, r5, lsl #13 @ r0 = tmp10
+ rsb r2, r2, r5, lsl #13 @ r2 = tmp13
+ add r4, r6, r3, lsl #13 @ r4 = tmp11
+ rsb r6, r6, r3, lsl #13 @ r6 = tmp12
+
+ ldrsh r1, [lr, #( 2*8)] @ r1 = 'd1'
+ ldrsh r3, [lr, #( 6*8)] @ r3 = 'd3'
+ ldrsh r5, [lr, #(10*8)] @ r5 = 'd5'
+ ldrsh r7, [lr, #(14*8)] @ r7 = 'd7'
+
+ @ Check for empty odd column (happens about 20 to 25 % of the time according to my stats)
+ orr r9, r1, r3
+ orr r10, r5, r7
+ orrs r10, r9, r10
+ beq empty_odd_column
+
+ push {r0, r2, r4, r6} @ save on the stack tmp10, tmp13, tmp12, tmp11
+
+ add r0, r3, r5 @ r0 = 'z2'
+ add r2, r1, r7 @ r2 = 'z1'
+ add r4, r3, r7 @ r4 = 'z3'
+ add r6, r1, r5 @ r6 = 'z4'
+ ldr r9, [r11, #FIX_1_175875602_ID]
+ add r8, r4, r6
+ ldr r10, [r11, #FIX_M_0_899976223_ID]
+ mul r8, r9, r8 @ r8 = 'z5'
+ ldr r9, [r11, #FIX_M_2_562915447_ID]
+ mul r2, r10, r2 @ r2 = 'z1'
+ ldr r10, [r11, #FIX_M_1_961570560_ID]
+ mul r0, r9, r0 @ r0 = 'z2'
+ ldr r9, [r11, #FIX_M_0_390180644_ID]
+ mla r4, r10, r4, r8 @ r4 = 'z3'
+ ldr r10, [r11, #FIX_0_298631336_ID]
+ mla r6, r9, r6, r8 @ r6 = 'z4'
+ ldr r9, [r11, #FIX_2_053119869_ID]
+ mla r7, r10, r7, r2 @ r7 = tmp0 + z1
+ ldr r10, [r11, #FIX_3_072711026_ID]
+ mla r5, r9, r5, r0 @ r5 = tmp1 + z2
+ ldr r9, [r11, #FIX_1_501321110_ID]
+ mla r3, r10, r3, r0 @ r3 = tmp2 + z2
+ add r7, r7, r4 @ r7 = tmp0
+ mla r1, r9, r1, r2 @ r1 = tmp3 + z1
+ add r5, r5, r6 @ r5 = tmp1
+ add r3, r3, r4 @ r3 = tmp2
+ add r1, r1, r6 @ r1 = tmp3
+
+ pop {r0, r2, r4, r6} @ r0 = tmp10 / r2 = tmp13 / r4 = tmp11 / r6 = tmp12
+ @ r1 = tmp3 / r3 = tmp2 / r5 = tmp1 / r7 = tmp0
+
+ @ Compute DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3)
+ add r8, r0, r1
+ add r8, r8, #(1<<17)
+ mov r8, r8, asr #18
+ strh r8, [lr, #( 0*8)]
+
+ @ Compute DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3)
+ sub r8, r0, r1
+ add r8, r8, #(1<<17)
+ mov r8, r8, asr #18
+ strh r8, [lr, #(14*8)]
+
+ @ Compute DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3)
+ add r8, r4, r3
+ add r8, r8, #(1<<17)
+ mov r8, r8, asr #18
+ strh r8, [lr, #( 2*8)]
+
+ @ Compute DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3)
+ sub r8, r4, r3
+ add r8, r8, #(1<<17)
+ mov r8, r8, asr #18
+ strh r8, [lr, #(12*8)]
+
+ @ Compute DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3)
+ add r8, r6, r5
+ add r8, r8, #(1<<17)
+ mov r8, r8, asr #18
+ strh r8, [lr, #( 4*8)]
+
+ @ Compute DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3)
+ sub r8, r6, r5
+ add r8, r8, #(1<<17)
+ mov r8, r8, asr #18
+ strh r8, [lr, #(10*8)]
+
+ @ Compute DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3)
+ add r8, r2, r7
+ add r8, r8, #(1<<17)
+ mov r8, r8, asr #18
+ strh r8, [lr, #( 6*8)]
+
+ @ Compute DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3)
+ sub r8, r2, r7
+ add r8, r8, #(1<<17)
+ mov r8, r8, asr #18
+ strh r8, [lr, #( 8*8)]
+
+ @ End of row loop
+ add lr, lr, #2
+ subs r12, r12, #1
+ bne column_loop
+ beq the_end
+
+empty_odd_column:
+ @ Compute DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3)
+ @ Compute DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3)
+ add r0, r0, #(1<<17)
+ mov r0, r0, asr #18
+ strh r0, [lr, #( 0*8)]
+ strh r0, [lr, #(14*8)]
+
+ @ Compute DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3)
+ @ Compute DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3)
+ add r4, r4, #(1<<17)
+ mov r4, r4, asr #18
+ strh r4, [lr, #( 2*8)]
+ strh r4, [lr, #(12*8)]
+
+ @ Compute DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3)
+ @ Compute DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3)
+ add r6, r6, #(1<<17)
+ mov r6, r6, asr #18
+ strh r6, [lr, #( 4*8)]
+ strh r6, [lr, #(10*8)]
+
+ @ Compute DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3)
+ @ Compute DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3)
+ add r2, r2, #(1<<17)
+ mov r2, r2, asr #18
+ strh r2, [lr, #( 6*8)]
+ strh r2, [lr, #( 8*8)]
+
+ @ End of row loop
+ add lr, lr, #2
+ subs r12, r12, #1
+ bne column_loop
+
+the_end:
+ @ The end....
+ pop {r4 - r11, pc}
+endfunc
+
+const const_array
+ .word FIX_0_298631336
+ .word FIX_0_541196100
+ .word FIX_0_765366865
+ .word FIX_1_175875602
+ .word FIX_1_501321110
+ .word FIX_2_053119869
+ .word FIX_3_072711026
+ .word FIX_M_0_390180644
+ .word FIX_M_0_899976223
+ .word FIX_M_1_847759065
+ .word FIX_M_1_961570560
+ .word FIX_M_2_562915447
+ .word FIX_0xFFFF
+endconst
diff --git a/media/ffvpx/libavcodec/arm/mathops.h b/media/ffvpx/libavcodec/arm/mathops.h
new file mode 100644
index 0000000000..dc57c5571c
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/mathops.h
@@ -0,0 +1,108 @@
+/*
+ * simple math operations
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ARM_MATHOPS_H
+#define AVCODEC_ARM_MATHOPS_H
+
+#include <stdint.h>
+#include "config.h"
+#include "libavutil/common.h"
+
+#if HAVE_INLINE_ASM
+
+#if HAVE_ARMV6_INLINE
+#define MULH MULH
+static inline av_const int MULH(int a, int b)
+{
+ int r;
+ __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
+ return r;
+}
+
+#define FASTDIV FASTDIV
+static av_always_inline av_const int FASTDIV(int a, int b)
+{
+ int r;
+ __asm__ ("cmp %2, #2 \n\t"
+ "ldr %0, [%3, %2, lsl #2] \n\t"
+ "ite le \n\t"
+ "lsrle %0, %1, #1 \n\t"
+ "smmulgt %0, %0, %1 \n\t"
+ : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc");
+ return r;
+}
+
+#else /* HAVE_ARMV6_INLINE */
+
+#define FASTDIV FASTDIV
+static av_always_inline av_const int FASTDIV(int a, int b)
+{
+ int r, t;
+ __asm__ ("umull %1, %0, %2, %3"
+ : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
+ return r;
+}
+#endif
+
+#define MLS64(d, a, b) MAC64(d, -(a), b)
+
+#if HAVE_ARMV5TE_INLINE
+
+/* signed 16x16 -> 32 multiply add accumulate */
+# define MAC16(rt, ra, rb) \
+ __asm__ ("smlabb %0, %1, %2, %0" : "+r"(rt) : "r"(ra), "r"(rb));
+
+/* signed 16x16 -> 32 multiply */
+# define MUL16 MUL16
+static inline av_const int MUL16(int ra, int rb)
+{
+ int rt;
+ __asm__ ("smulbb %0, %1, %2" : "=r"(rt) : "r"(ra), "r"(rb));
+ return rt;
+}
+
+#endif
+
+#define mid_pred mid_pred
+static inline av_const int mid_pred(int a, int b, int c)
+{
+ int m;
+ __asm__ (
+ "mov %0, %2 \n\t"
+ "cmp %1, %2 \n\t"
+ "itt gt \n\t"
+ "movgt %0, %1 \n\t"
+ "movgt %1, %2 \n\t"
+ "cmp %1, %3 \n\t"
+ "it le \n\t"
+ "movle %1, %3 \n\t"
+ "cmp %0, %1 \n\t"
+ "it gt \n\t"
+ "movgt %0, %1 \n\t"
+ : "=&r"(m), "+r"(a)
+ : "r"(b), "r"(c)
+ : "cc");
+ return m;
+}
+
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVCODEC_ARM_MATHOPS_H */
diff --git a/media/ffvpx/libavcodec/arm/moz.build b/media/ffvpx/libavcodec/arm/moz.build
new file mode 100644
index 0000000000..dafeab21d8
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/moz.build
@@ -0,0 +1,33 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ 'fft_init_arm.c',
+ 'fft_neon.S',
+ 'fft_vfp.S',
+ 'flacdsp_arm.S',
+ 'flacdsp_init_arm.c',
+ 'idctdsp_arm.S',
+ 'idctdsp_armv6.S',
+ 'idctdsp_init_arm.c',
+ 'idctdsp_init_armv5te.c',
+ 'idctdsp_init_armv6.c',
+ 'idctdsp_init_neon.c',
+ 'idctdsp_neon.S',
+ 'jrevdct_arm.S',
+ 'mpegaudiodsp_fixed_armv6.S',
+ 'mpegaudiodsp_init_arm.c',
+ 'rdft_init_arm.c',
+ 'rdft_neon.S',
+ 'simple_idct_arm.S',
+ 'simple_idct_armv5te.S',
+ 'simple_idct_armv6.S',
+ 'simple_idct_neon.S',
+]
+
+FINAL_LIBRARY = 'mozavcodec'
+
+include('/media/ffvpx/ffvpxcommon.mozbuild')
diff --git a/media/ffvpx/libavcodec/arm/mpegaudiodsp_fixed_armv6.S b/media/ffvpx/libavcodec/arm/mpegaudiodsp_fixed_armv6.S
new file mode 100644
index 0000000000..977abb6939
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/mpegaudiodsp_fixed_armv6.S
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+.macro skip args:vararg
+.endm
+
+.macro sum8 lo, hi, w, p, t1, t2, t3, t4, rsb=skip, offs=0
+ ldr \t1, [\w, #4*\offs]
+ ldr \t2, [\p, #4]!
+ \rsb \t1, \t1, #0
+ .irpc i, 135
+ ldr \t3, [\w, #4*64*\i+4*\offs]
+ ldr \t4, [\p, #4*64*\i]
+ smlal \lo, \hi, \t1, \t2
+ \rsb \t3, \t3, #0
+ ldr \t1, [\w, #4*64*(\i+1)+4*\offs]
+ ldr \t2, [\p, #4*64*(\i+1)]
+ smlal \lo, \hi, \t3, \t4
+ \rsb \t1, \t1, #0
+ .endr
+ ldr \t3, [\w, #4*64*7+4*\offs]
+ ldr \t4, [\p, #4*64*7]
+ smlal \lo, \hi, \t1, \t2
+ \rsb \t3, \t3, #0
+ smlal \lo, \hi, \t3, \t4
+.endm
+
+.macro round rd, lo, hi
+ lsr \rd, \lo, #24
+ bic \lo, \lo, #0xff000000
+ orr \rd, \rd, \hi, lsl #8
+ mov \hi, #0
+ ssat \rd, #16, \rd
+.endm
+
+function ff_mpadsp_apply_window_fixed_armv6, export=1
+ push {r2,r4-r11,lr}
+
+ add r4, r0, #4*512 @ synth_buf + 512
+ .rept 4
+ ldm r0!, {r5-r12}
+ stm r4!, {r5-r12}
+ .endr
+
+ ldr r4, [sp, #40] @ incr
+ sub r0, r0, #4*17 @ synth_buf + 16
+ ldr r8, [r2] @ sum:low
+ add r2, r0, #4*32 @ synth_buf + 48
+ rsb r5, r4, r4, lsl #5 @ 31 * incr
+ lsl r4, r4, #1
+ asr r9, r8, #31 @ sum:high
+ add r5, r3, r5, lsl #1 @ samples2
+ add r6, r1, #4*32 @ w2
+ str r4, [sp, #40]
+
+ sum8 r8, r9, r1, r0, r10, r11, r12, lr
+ sum8 r8, r9, r1, r2, r10, r11, r12, lr, rsb, 32
+ round r10, r8, r9
+ strh_post r10, r3, r4
+
+ mov lr, #15
+1:
+ ldr r12, [r0, #4]!
+ ldr r11, [r6, #-4]!
+ ldr r10, [r1, #4]!
+ .irpc i, 0246
+ .if \i
+ ldr r11, [r6, #4*64*\i]
+ ldr r10, [r1, #4*64*\i]
+ .endif
+ rsb r11, r11, #0
+ smlal r8, r9, r10, r12
+ ldr r10, [r0, #4*64*(\i+1)]
+ .ifeq \i
+ smull r4, r7, r11, r12
+ .else
+ smlal r4, r7, r11, r12
+ .endif
+ ldr r11, [r6, #4*64*(\i+1)]
+ ldr r12, [r1, #4*64*(\i+1)]
+ rsb r11, r11, #0
+ smlal r8, r9, r12, r10
+ .iflt \i-6
+ ldr r12, [r0, #4*64*(\i+2)]
+ .else
+ ldr r12, [r2, #-4]!
+ .endif
+ smlal r4, r7, r11, r10
+ .endr
+ .irpc i, 0246
+ ldr r10, [r1, #4*64*\i+4*32]
+ rsb r12, r12, #0
+ ldr r11, [r6, #4*64*\i+4*32]
+ smlal r8, r9, r10, r12
+ ldr r10, [r2, #4*64*(\i+1)]
+ smlal r4, r7, r11, r12
+ ldr r12, [r1, #4*64*(\i+1)+4*32]
+ rsb r10, r10, #0
+ ldr r11, [r6, #4*64*(\i+1)+4*32]
+ smlal r8, r9, r12, r10
+ .iflt \i-6
+ ldr r12, [r2, #4*64*(\i+2)]
+ .else
+ ldr r12, [sp, #40]
+ .endif
+ smlal r4, r7, r11, r10
+ .endr
+ round r10, r8, r9
+ adds r8, r8, r4
+ adc r9, r9, r7
+ strh_post r10, r3, r12
+ round r11, r8, r9
+ subs lr, lr, #1
+ strh_dpost r11, r5, r12
+ bgt 1b
+
+ sum8 r8, r9, r1, r0, r10, r11, r12, lr, rsb, 33
+ pop {r4}
+ round r10, r8, r9
+ str r8, [r4]
+ strh r10, [r3]
+
+ pop {r4-r11,pc}
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/mpegaudiodsp_init_arm.c b/media/ffvpx/libavcodec/arm/mpegaudiodsp_init_arm.c
new file mode 100644
index 0000000000..d87bd27ad8
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/mpegaudiodsp_init_arm.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/arm/cpu.h"
+#include "libavcodec/mpegaudiodsp.h"
+#include "config.h"
+
+void ff_mpadsp_apply_window_fixed_armv6(int32_t *synth_buf, int32_t *window,
+ int *dither, int16_t *out, ptrdiff_t incr);
+
+av_cold void ff_mpadsp_init_arm(MPADSPContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_armv6(cpu_flags)) {
+ s->apply_window_fixed = ff_mpadsp_apply_window_fixed_armv6;
+ }
+}
diff --git a/media/ffvpx/libavcodec/arm/rdft_init_arm.c b/media/ffvpx/libavcodec/arm/rdft_init_arm.c
new file mode 100644
index 0000000000..1c5d8beb61
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/rdft_init_arm.c
@@ -0,0 +1,33 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/arm/cpu.h"
+
+#include "libavcodec/rdft.h"
+
+void ff_rdft_calc_neon(struct RDFTContext *s, FFTSample *z);
+
+av_cold void ff_rdft_init_arm(RDFTContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags))
+ s->rdft_calc = ff_rdft_calc_neon;
+}
diff --git a/media/ffvpx/libavcodec/arm/rdft_neon.S b/media/ffvpx/libavcodec/arm/rdft_neon.S
new file mode 100644
index 0000000000..eabb92b4bd
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/rdft_neon.S
@@ -0,0 +1,155 @@
+/*
+ * ARM NEON optimised RDFT
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+function ff_rdft_calc_neon, export=1
+ push {r4-r8,lr}
+
+ ldr r6, [r0, #4] @ inverse
+ mov r4, r0
+ mov r5, r1
+
+ lsls r6, r6, #31
+ bne 1f
+ add r0, r4, #24
+ bl X(ff_fft_permute_neon)
+ add r0, r4, #24
+ mov r1, r5
+ bl X(ff_fft_calc_neon)
+1:
+ ldr r12, [r4, #0] @ nbits
+ mov r2, #1
+ ldr r8, [r4, #20] @ negative_sin
+ lsl r12, r2, r12
+ add r0, r5, #8
+ lsl r8, r8, #31
+ add r1, r5, r12, lsl #2
+ lsr r12, r12, #2
+ vdup.32 d26, r8
+ ldr r2, [r4, #12] @ tcos
+ sub r12, r12, #2
+ ldr r3, [r4, #16] @ tsin
+ mov r7, r0
+ sub r1, r1, #8
+ mov lr, r1
+ mov r8, #-8
+ vld1.32 {d0}, [r0,:64]! @ d1[0,1]
+ vld1.32 {d1}, [r1,:64], r8 @ d2[0,1]
+ vld1.32 {d4}, [r2,:64]! @ tcos[i]
+ vld1.32 {d5}, [r3,:64]! @ tsin[i]
+ vmov.f32 d18, #0.5 @ k1
+ vdup.32 d19, r6
+ veor d5, d26, d5
+ pld [r0, #32]
+ veor d19, d18, d19 @ k2
+ vmov.i32 d16, #0
+ vmov.i32 d17, #1<<31
+ pld [r1, #-32]
+ vtrn.32 d16, d17
+ pld [r2, #32]
+ vrev64.32 d16, d16 @ d16=1,0 d17=0,1
+ pld [r3, #32]
+2:
+ veor q1, q0, q8 @ -d1[0],d1[1], d2[0],-d2[1]
+ vld1.32 {d24}, [r0,:64]! @ d1[0,1]
+ vadd.f32 d0, d0, d3 @ d1[0]+d2[0], d1[1]-d2[1]
+ vld1.32 {d25}, [r1,:64], r8 @ d2[0,1]
+ vadd.f32 d1, d2, d1 @ -d1[0]+d2[0], d1[1]+d2[1]
+ veor q3, q12, q8 @ -d1[0],d1[1], d2[0],-d2[1]
+ pld [r0, #32]
+ vmul.f32 q10, q0, q9 @ ev.re, ev.im, od.im, od.re
+ pld [r1, #-32]
+ vadd.f32 d0, d24, d7 @ d1[0]+d2[0], d1[1]-d2[1]
+ vadd.f32 d1, d6, d25 @ -d1[0]+d2[0], d1[1]+d2[1]
+ vmul.f32 q11, q0, q9 @ ev.re, ev.im, od.im, od.re
+ veor d7, d21, d16 @ -od.im, od.re
+ vrev64.32 d3, d21 @ od.re, od.im
+ veor d6, d20, d17 @ ev.re,-ev.im
+ veor d2, d3, d16 @ -od.re, od.im
+ vmla.f32 d20, d3, d4[1]
+ vmla.f32 d20, d7, d5[1]
+ vmla.f32 d6, d2, d4[1]
+ vmla.f32 d6, d21, d5[1]
+ vld1.32 {d4}, [r2,:64]! @ tcos[i]
+ veor d7, d23, d16 @ -od.im, od.re
+ vld1.32 {d5}, [r3,:64]! @ tsin[i]
+ veor d24, d22, d17 @ ev.re,-ev.im
+ vrev64.32 d3, d23 @ od.re, od.im
+ veor d5, d26, d5
+ pld [r2, #32]
+ veor d2, d3, d16 @ -od.re, od.im
+ pld [r3, #32]
+ vmla.f32 d22, d3, d4[0]
+ vmla.f32 d22, d7, d5[0]
+ vmla.f32 d24, d2, d4[0]
+ vmla.f32 d24, d23, d5[0]
+ vld1.32 {d0}, [r0,:64]! @ d1[0,1]
+ vld1.32 {d1}, [r1,:64], r8 @ d2[0,1]
+ vst1.32 {d20}, [r7,:64]!
+ vst1.32 {d6}, [lr,:64], r8
+ vst1.32 {d22}, [r7,:64]!
+ vst1.32 {d24}, [lr,:64], r8
+ subs r12, r12, #2
+ bgt 2b
+
+ veor q1, q0, q8 @ -d1[0],d1[1], d2[0],-d2[1]
+ vadd.f32 d0, d0, d3 @ d1[0]+d2[0], d1[1]-d2[1]
+ vadd.f32 d1, d2, d1 @ -d1[0]+d2[0], d1[1]+d2[1]
+ ldr r2, [r4, #8] @ sign_convention
+ vmul.f32 q10, q0, q9 @ ev.re, ev.im, od.im, od.re
+ add r0, r0, #4
+ bfc r2, #0, #31
+ vld1.32 {d0[0]}, [r0,:32]
+ veor d7, d21, d16 @ -od.im, od.re
+ vrev64.32 d3, d21 @ od.re, od.im
+ veor d6, d20, d17 @ ev.re,-ev.im
+ vld1.32 {d22}, [r5,:64]
+ vdup.32 d1, r2
+ vmov d23, d22
+ veor d2, d3, d16 @ -od.re, od.im
+ vtrn.32 d22, d23
+ veor d0, d0, d1
+ veor d23, d23, d17
+ vmla.f32 d20, d3, d4[1]
+ vmla.f32 d20, d7, d5[1]
+ vmla.f32 d6, d2, d4[1]
+ vmla.f32 d6, d21, d5[1]
+ vadd.f32 d22, d22, d23
+ vst1.32 {d20}, [r7,:64]
+ vst1.32 {d6}, [lr,:64]
+ vst1.32 {d0[0]}, [r0,:32]
+ vst1.32 {d22}, [r5,:64]
+
+ cmp r6, #0
+ it eq
+ popeq {r4-r8,pc}
+
+ vmul.f32 d22, d22, d18
+ vst1.32 {d22}, [r5,:64]
+ add r0, r4, #24
+ mov r1, r5
+ bl X(ff_fft_permute_neon)
+ add r0, r4, #24
+ mov r1, r5
+ pop {r4-r8,lr}
+ b X(ff_fft_calc_neon)
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/simple_idct_arm.S b/media/ffvpx/libavcodec/arm/simple_idct_arm.S
new file mode 100644
index 0000000000..42d79ab95e
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/simple_idct_arm.S
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2002 Frederic 'dilb' Boulay
+ *
+ * Author: Frederic Boulay <dilb@handhelds.org>
+ *
+ * The function defined in this file is derived from the simple_idct function
+ * from the libavcodec library part of the FFmpeg project.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+/* useful constants for the algorithm */
+#define W1 22725
+#define W2 21407
+#define W3 19266
+#define W4 16383
+#define W5 12873
+#define W6 8867
+#define W7 4520
+#define MASK_MSHW 0xFFFF0000
+
+#define ROW_SHIFT 11
+#define ROW_SHIFT2MSHW (16-11)
+#define COL_SHIFT 20
+#define ROW_SHIFTED_1 1024 /* 1<< (ROW_SHIFT-1) */
+#define COL_SHIFTED_1 524288 /* 1<< (COL_SHIFT-1) */
+
+
+function ff_simple_idct_arm, export=1
+ @@ void simple_idct_arm(int16_t *block)
+ @@ save stack for reg needed (take all of them),
+ @@ R0-R3 are scratch regs, so no need to save them, but R0 contains the pointer to block
+ @@ so it must not be overwritten, if it is not saved!!
+ @@ R12 is another scratch register, so it should not be saved too
+ @@ save all registers
+ stmfd sp!, {r4-r11, r14} @ R14 is also called LR
+ @@ at this point, R0=block, other registers are free.
+ add r14, r0, #112 @ R14=&block[8*7], better start from the last row, and decrease the value until row=0, i.e. R12=block.
+ @@ add 2 temporary variables in the stack: R0 and R14
+ sub sp, sp, #8 @ allow 2 local variables
+ str r0, [sp, #0] @ save block in sp[0]
+ @@ stack status
+ @@ sp+4 free
+ @@ sp+0 R0 (block)
+
+
+ @@ at this point, R0=block, R14=&block[56], R12=__const_ptr_, R1-R11 free
+
+
+__row_loop:
+ @@ read the row and check if it is null, almost null, or not, according to strongarm specs, it is not necessary to optimize ldr accesses (i.e. split 32 bits in two 16-bit words), at least it gives more usable registers :)
+ ldr r1, [r14, #0] @ R1=(int32)(R12)[0]=ROWr32[0] (relative row cast to a 32b pointer)
+ ldr r2, [r14, #4] @ R2=(int32)(R12)[1]=ROWr32[1]
+ ldr r3, [r14, #8] @ R3=ROWr32[2]
+ ldr r4, [r14, #12] @ R4=ROWr32[3]
+ @@ check if the words are null, if all of them are null, then proceed with next row (branch __end_row_loop),
+ @@ if ROWr16[0] is the only one not null, then proceed with this special case (branch __almost_empty_row)
+ @@ else follow the complete algorithm.
+ @@ at this point, R0=block, R14=&block[n], R12=__const_ptr_, R1=ROWr32[0], R2=ROWr32[1],
+ @@ R3=ROWr32[2], R4=ROWr32[3], R5-R11 free
+ orr r5, r4, r3 @ R5=R4 | R3
+ orr r5, r5, r2 @ R5=R4 | R3 | R2
+ orrs r6, r5, r1 @ Test R5 | R1 (the aim is to check if everything is null)
+ beq __end_row_loop
+ mov r7, r1, asr #16 @ R7=R1>>16=ROWr16[1] (evaluate it now, as it could be useful later)
+ ldrsh r6, [r14, #0] @ R6=ROWr16[0]
+ orrs r5, r5, r7 @ R5=R4 | R3 | R2 | R7
+ beq __almost_empty_row
+
+@@ __b_evaluation:
+ @@ at this point, R0=block (temp), R1(free), R2=ROWr32[1], R3=ROWr32[2], R4=ROWr32[3],
+ @@ R5=(temp), R6=ROWr16[0], R7=ROWr16[1], R8-R11 free,
+ @@ R12=__const_ptr_, R14=&block[n]
+ @@ to save some registers/calls, proceed with b0-b3 first, followed by a0-a3
+
+ @@ MUL16(b0, W1, row[1]);
+ @@ MUL16(b1, W3, row[1]);
+ @@ MUL16(b2, W5, row[1]);
+ @@ MUL16(b3, W7, row[1]);
+ @@ MAC16(b0, W3, row[3]);
+ @@ MAC16(b1, -W7, row[3]);
+ @@ MAC16(b2, -W1, row[3]);
+ @@ MAC16(b3, -W5, row[3]);
+ ldr r8, =W1 @ R8=W1
+ mov r2, r2, asr #16 @ R2=ROWr16[3]
+ mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
+ ldr r9, =W3 @ R9=W3
+ ldr r10, =W5 @ R10=W5
+ mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
+ ldr r11, =W7 @ R11=W7
+ mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
+ mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
+ teq r2, #0 @ if null avoid muls
+ itttt ne
+ mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)
+ rsbne r2, r2, #0 @ R2=-ROWr16[3]
+ mlane r1, r11, r2, r1 @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)
+ mlane r5, r8, r2, r5 @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)
+ it ne
+ mlane r7, r10, r2, r7 @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)
+
+ @@ at this point, R0=b0, R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3],
+ @@ R5=b2, R6=ROWr16[0], R7=b3, R8=W1, R9=W3, R10=W5, R11=W7,
+ @@ R12=__const_ptr_, R14=&block[n]
+ @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3];
+ @@ if (temp != 0) {}
+ orrs r2, r3, r4 @ R2=ROWr32[2] | ROWr32[3]
+ beq __end_b_evaluation
+
+ @@ at this point, R0=b0, R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3],
+ @@ R5=b2, R6=ROWr16[0], R7=b3, R8=W1, R9=W3, R10=W5, R11=W7,
+ @@ R12=__const_ptr_, R14=&block[n]
+ @@ MAC16(b0, W5, row[5]);
+ @@ MAC16(b2, W7, row[5]);
+ @@ MAC16(b3, W3, row[5]);
+ @@ MAC16(b1, -W1, row[5]);
+ @@ MAC16(b0, W7, row[7]);
+ @@ MAC16(b2, W3, row[7]);
+ @@ MAC16(b3, -W1, row[7]);
+ @@ MAC16(b1, -W5, row[7]);
+ mov r3, r3, asr #16 @ R3=ROWr16[5]
+ teq r3, #0 @ if null avoid muls
+ it ne
+ mlane r0, r10, r3, r0 @ R0+=W5*ROWr16[5]=b0
+ mov r4, r4, asr #16 @ R4=ROWr16[7]
+ itttt ne
+ mlane r5, r11, r3, r5 @ R5+=W7*ROWr16[5]=b2
+ mlane r7, r9, r3, r7 @ R7+=W3*ROWr16[5]=b3
+ rsbne r3, r3, #0 @ R3=-ROWr16[5]
+ mlane r1, r8, r3, r1 @ R7-=W1*ROWr16[5]=b1
+ @@ R3 is free now
+ teq r4, #0 @ if null avoid muls
+ itttt ne
+ mlane r0, r11, r4, r0 @ R0+=W7*ROWr16[7]=b0
+ mlane r5, r9, r4, r5 @ R5+=W3*ROWr16[7]=b2
+ rsbne r4, r4, #0 @ R4=-ROWr16[7]
+ mlane r7, r8, r4, r7 @ R7-=W1*ROWr16[7]=b3
+ it ne
+ mlane r1, r10, r4, r1 @ R1-=W5*ROWr16[7]=b1
+ @@ R4 is free now
+__end_b_evaluation:
+ @@ at this point, R0=b0, R1=b1, R2=ROWr32[2] | ROWr32[3] (tmp), R3 (free), R4 (free),
+ @@ R5=b2, R6=ROWr16[0], R7=b3, R8 (free), R9 (free), R10 (free), R11 (free),
+ @@ R12=__const_ptr_, R14=&block[n]
+
+@@ __a_evaluation:
+ @@ a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1));
+ @@ a1 = a0 + W6 * row[2];
+ @@ a2 = a0 - W6 * row[2];
+ @@ a3 = a0 - W2 * row[2];
+ @@ a0 = a0 + W2 * row[2];
+ ldr r9, =W4 @ R9=W4
+ mul r6, r9, r6 @ R6=W4*ROWr16[0]
+ ldr r10, =W6 @ R10=W6
+ ldrsh r4, [r14, #4] @ R4=ROWr16[2] (a3 not defined yet)
+ add r6, r6, #ROW_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(ROW_SHIFT-1) (a0)
+
+ mul r11, r10, r4 @ R11=W6*ROWr16[2]
+ ldr r8, =W2 @ R8=W2
+ sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2)
+ @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3];
+ @@ if (temp != 0) {}
+ teq r2, #0
+ beq __end_bef_a_evaluation
+
+ add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1)
+ mul r11, r8, r4 @ R11=W2*ROWr16[2]
+ sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3)
+ add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0)
+
+
+ @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3,
+ @@ R5=b2, R6=a0, R7=b3, R8=W2, R9=W4, R10=W6, R11 (free),
+ @@ R12=__const_ptr_, R14=&block[n]
+
+
+ @@ a0 += W4*row[4]
+ @@ a1 -= W4*row[4]
+ @@ a2 -= W4*row[4]
+ @@ a3 += W4*row[4]
+ ldrsh r11, [r14, #8] @ R11=ROWr16[4]
+ teq r11, #0 @ if null avoid muls
+ it ne
+ mulne r11, r9, r11 @ R11=W4*ROWr16[4]
+ @@ R9 is free now
+ ldrsh r9, [r14, #12] @ R9=ROWr16[6]
+ itttt ne
+ addne r6, r6, r11 @ R6+=W4*ROWr16[4] (a0)
+ subne r2, r2, r11 @ R2-=W4*ROWr16[4] (a1)
+ subne r3, r3, r11 @ R3-=W4*ROWr16[4] (a2)
+ addne r4, r4, r11 @ R4+=W4*ROWr16[4] (a3)
+ @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead
+ teq r9, #0 @ if null avoid muls
+ itttt ne
+ mulne r11, r10, r9 @ R11=W6*ROWr16[6]
+ addne r6, r6, r11 @ R6+=W6*ROWr16[6] (a0)
+ mulne r10, r8, r9 @ R10=W2*ROWr16[6]
+ @@ a0 += W6*row[6];
+ @@ a3 -= W6*row[6];
+ @@ a1 -= W2*row[6];
+ @@ a2 += W2*row[6];
+ subne r4, r4, r11 @ R4-=W6*ROWr16[6] (a3)
+ itt ne
+ subne r2, r2, r10 @ R2-=W2*ROWr16[6] (a1)
+ addne r3, r3, r10 @ R3+=W2*ROWr16[6] (a2)
+
+__end_a_evaluation:
+ @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3,
+ @@ R5=b2, R6=a0, R7=b3, R8 (free), R9 (free), R10 (free), R11 (free),
+ @@ R12=__const_ptr_, R14=&block[n]
+ @@ row[0] = (a0 + b0) >> ROW_SHIFT;
+ @@ row[1] = (a1 + b1) >> ROW_SHIFT;
+ @@ row[2] = (a2 + b2) >> ROW_SHIFT;
+ @@ row[3] = (a3 + b3) >> ROW_SHIFT;
+ @@ row[4] = (a3 - b3) >> ROW_SHIFT;
+ @@ row[5] = (a2 - b2) >> ROW_SHIFT;
+ @@ row[6] = (a1 - b1) >> ROW_SHIFT;
+ @@ row[7] = (a0 - b0) >> ROW_SHIFT;
+ add r8, r6, r0 @ R8=a0+b0
+ add r9, r2, r1 @ R9=a1+b1
+ @@ put two 16-bit half-words in a 32-bit word
+ @@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only little-endian compliant then!!!)
+ ldr r10, =MASK_MSHW @ R10=0xFFFF0000
+ and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a1+b1)<<5)
+ mvn r11, r10 @ R11= NOT R10= 0x0000FFFF
+ and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a0+b0)>>11)
+ orr r8, r8, r9
+ str r8, [r14, #0]
+
+ add r8, r3, r5 @ R8=a2+b2
+ add r9, r4, r7 @ R9=a3+b3
+ and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a3+b3)<<5)
+ and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a2+b2)>>11)
+ orr r8, r8, r9
+ str r8, [r14, #4]
+
+ sub r8, r4, r7 @ R8=a3-b3
+ sub r9, r3, r5 @ R9=a2-b2
+ and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a2-b2)<<5)
+ and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a3-b3)>>11)
+ orr r8, r8, r9
+ str r8, [r14, #8]
+
+ sub r8, r2, r1 @ R8=a1-b1
+ sub r9, r6, r0 @ R9=a0-b0
+ and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a0-b0)<<5)
+ and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a1-b1)>>11)
+ orr r8, r8, r9
+ str r8, [r14, #12]
+
+ bal __end_row_loop
+
+__almost_empty_row:
+ @@ the row was empty, except ROWr16[0], now, management of this special case
+ @@ at this point, R0=block, R14=&block[n], R12=__const_ptr_, R1=ROWr32[0], R2=ROWr32[1],
+ @@ R3=ROWr32[2], R4=ROWr32[3], R5=(temp), R6=ROWr16[0], R7=ROWr16[1],
+ @@ R8=0xFFFF (temp), R9-R11 free
+ mov r8, #0x10000 @ R8=0xFFFF (2 steps needed!) it saves a ldr call (because of delay run).
+ sub r8, r8, #1 @ R8 is now ready.
+ and r5, r8, r6, lsl #3 @ R5=R8 & (R6<<3)= (ROWr16[0]<<3) & 0xFFFF
+ orr r5, r5, r5, lsl #16 @ R5=R5 | (R5<<16)
+ str r5, [r14, #0] @ R14[0]=ROWr32[0]=R5
+ str r5, [r14, #4] @ R14[4]=ROWr32[1]=R5
+ str r5, [r14, #8] @ R14[8]=ROWr32[2]=R5
+ str r5, [r14, #12] @ R14[12]=ROWr32[3]=R5
+
+__end_row_loop:
+ @@ at this point, R0-R11 (free)
+ @@ R12=__const_ptr_, R14=&block[n]
+ ldr r0, [sp, #0] @ R0=block
+ teq r0, r14 @ compare current &block[8*n] to block, when block is reached, the loop is finished.
+ sub r14, r14, #16
+ bne __row_loop
+
+
+
+ @@ at this point, R0=block, R1-R11 (free)
+ @@ R12=__const_ptr_, R14=&block[n]
+ add r14, r0, #14 @ R14=&block[7], better start from the last col, and decrease the value until col=0, i.e. R14=block.
+__col_loop:
+
+@@ __b_evaluation2:
+ @@ at this point, R0=block (temp), R1-R11 (free)
+ @@ R12=__const_ptr_, R14=&block[n]
+ @@ proceed with b0-b3 first, followed by a0-a3
+ @@ MUL16(b0, W1, col[8x1]);
+ @@ MUL16(b1, W3, col[8x1]);
+ @@ MUL16(b2, W5, col[8x1]);
+ @@ MUL16(b3, W7, col[8x1]);
+ @@ MAC16(b0, W3, col[8x3]);
+ @@ MAC16(b1, -W7, col[8x3]);
+ @@ MAC16(b2, -W1, col[8x3]);
+ @@ MAC16(b3, -W5, col[8x3]);
+ ldr r8, =W1 @ R8=W1
+ ldrsh r7, [r14, #16]
+ mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
+ ldr r9, =W3 @ R9=W3
+ ldr r10, =W5 @ R10=W5
+ mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
+ ldr r11, =W7 @ R11=W7
+ mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
+ ldrsh r2, [r14, #48]
+ mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)
+ teq r2, #0 @ if 0, then avoid muls
+ itttt ne
+ mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)
+ rsbne r2, r2, #0 @ R2=-ROWr16[3]
+ mlane r1, r11, r2, r1 @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)
+ mlane r5, r8, r2, r5 @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)
+ it ne
+ mlane r7, r10, r2, r7 @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)
+
+ @@ at this point, R0=b0, R1=b1, R2 (free), R3 (free), R4 (free),
+ @@ R5=b2, R6 (free), R7=b3, R8=W1, R9=W3, R10=W5, R11=W7,
+ @@ R12=__const_ptr_, R14=&block[n]
+ @@ MAC16(b0, W5, col[5x8]);
+ @@ MAC16(b2, W7, col[5x8]);
+ @@ MAC16(b3, W3, col[5x8]);
+ @@ MAC16(b1, -W1, col[5x8]);
+ @@ MAC16(b0, W7, col[7x8]);
+ @@ MAC16(b2, W3, col[7x8]);
+ @@ MAC16(b3, -W1, col[7x8]);
+ @@ MAC16(b1, -W5, col[7x8]);
+ ldrsh r3, [r14, #80] @ R3=COLr16[5x8]
+ teq r3, #0 @ if 0 then avoid muls
+ itttt ne
+ mlane r0, r10, r3, r0 @ R0+=W5*ROWr16[5x8]=b0
+ mlane r5, r11, r3, r5 @ R5+=W7*ROWr16[5x8]=b2
+ mlane r7, r9, r3, r7 @ R7+=W3*ROWr16[5x8]=b3
+ rsbne r3, r3, #0 @ R3=-ROWr16[5x8]
+ ldrsh r4, [r14, #112] @ R4=COLr16[7x8]
+ it ne
+ mlane r1, r8, r3, r1 @ R7-=W1*ROWr16[5x8]=b1
+ @@ R3 is free now
+ teq r4, #0 @ if 0 then avoid muls
+ itttt ne
+ mlane r0, r11, r4, r0 @ R0+=W7*ROWr16[7x8]=b0
+ mlane r5, r9, r4, r5 @ R5+=W3*ROWr16[7x8]=b2
+ rsbne r4, r4, #0 @ R4=-ROWr16[7x8]
+ mlane r7, r8, r4, r7 @ R7-=W1*ROWr16[7x8]=b3
+ it ne
+ mlane r1, r10, r4, r1 @ R1-=W5*ROWr16[7x8]=b1
+ @@ R4 is free now
+@@ __end_b_evaluation2:
+ @@ at this point, R0=b0, R1=b1, R2 (free), R3 (free), R4 (free),
+ @@ R5=b2, R6 (free), R7=b3, R8 (free), R9 (free), R10 (free), R11 (free),
+ @@ R12=__const_ptr_, R14=&block[n]
+
+@@ __a_evaluation2:
+ @@ a0 = (W4 * col[8x0]) + (1 << (COL_SHIFT - 1));
+ @@ a1 = a0 + W6 * row[2];
+ @@ a2 = a0 - W6 * row[2];
+ @@ a3 = a0 - W2 * row[2];
+ @@ a0 = a0 + W2 * row[2];
+ ldrsh r6, [r14, #0]
+ ldr r9, =W4 @ R9=W4
+ mul r6, r9, r6 @ R6=W4*ROWr16[0]
+ ldr r10, =W6 @ R10=W6
+ ldrsh r4, [r14, #32] @ R4=ROWr16[2] (a3 not defined yet)
+ add r6, r6, #COL_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(COL_SHIFT-1) (a0)
+ mul r11, r10, r4 @ R11=W6*ROWr16[2]
+ ldr r8, =W2 @ R8=W2
+ add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1)
+ sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2)
+ mul r11, r8, r4 @ R11=W2*ROWr16[2]
+ sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3)
+ add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0)
+
+ @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3,
+ @@ R5=b2, R6=a0, R7=b3, R8=W2, R9=W4, R10=W6, R11 (free),
+ @@ R12=__const_ptr_, R14=&block[n]
+ @@ a0 += W4*row[4]
+ @@ a1 -= W4*row[4]
+ @@ a2 -= W4*row[4]
+ @@ a3 += W4*row[4]
+ ldrsh r11, [r14, #64] @ R11=ROWr16[4]
+ teq r11, #0 @ if null avoid muls
+ itttt ne
+ mulne r11, r9, r11 @ R11=W4*ROWr16[4]
+ @@ R9 is free now
+ addne r6, r6, r11 @ R6+=W4*ROWr16[4] (a0)
+ subne r2, r2, r11 @ R2-=W4*ROWr16[4] (a1)
+ subne r3, r3, r11 @ R3-=W4*ROWr16[4] (a2)
+ ldrsh r9, [r14, #96] @ R9=ROWr16[6]
+ it ne
+ addne r4, r4, r11 @ R4+=W4*ROWr16[4] (a3)
+ @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead
+ teq r9, #0 @ if null avoid muls
+ itttt ne
+ mulne r11, r10, r9 @ R11=W6*ROWr16[6]
+ addne r6, r6, r11 @ R6+=W6*ROWr16[6] (a0)
+ mulne r10, r8, r9 @ R10=W2*ROWr16[6]
+ @@ a0 += W6*row[6];
+ @@ a3 -= W6*row[6];
+ @@ a1 -= W2*row[6];
+ @@ a2 += W2*row[6];
+ subne r4, r4, r11 @ R4-=W6*ROWr16[6] (a3)
+ itt ne
+ subne r2, r2, r10 @ R2-=W2*ROWr16[6] (a1)
+ addne r3, r3, r10 @ R3+=W2*ROWr16[6] (a2)
+@@ __end_a_evaluation2:
+ @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3,
+ @@ R5=b2, R6=a0, R7=b3, R8 (free), R9 (free), R10 (free), R11 (free),
+ @@ R12=__const_ptr_, R14=&block[n]
+ @@ col[0 ] = ((a0 + b0) >> COL_SHIFT);
+ @@ col[8 ] = ((a1 + b1) >> COL_SHIFT);
+ @@ col[16] = ((a2 + b2) >> COL_SHIFT);
+ @@ col[24] = ((a3 + b3) >> COL_SHIFT);
+ @@ col[32] = ((a3 - b3) >> COL_SHIFT);
+ @@ col[40] = ((a2 - b2) >> COL_SHIFT);
+ @@ col[48] = ((a1 - b1) >> COL_SHIFT);
+ @@ col[56] = ((a0 - b0) >> COL_SHIFT);
+ @@@@@ no optimization here @@@@@
+ add r8, r6, r0 @ R8=a0+b0
+ add r9, r2, r1 @ R9=a1+b1
+ mov r8, r8, asr #COL_SHIFT
+ mov r9, r9, asr #COL_SHIFT
+ strh r8, [r14, #0]
+ strh r9, [r14, #16]
+ add r8, r3, r5 @ R8=a2+b2
+ add r9, r4, r7 @ R9=a3+b3
+ mov r8, r8, asr #COL_SHIFT
+ mov r9, r9, asr #COL_SHIFT
+ strh r8, [r14, #32]
+ strh r9, [r14, #48]
+ sub r8, r4, r7 @ R8=a3-b3
+ sub r9, r3, r5 @ R9=a2-b2
+ mov r8, r8, asr #COL_SHIFT
+ mov r9, r9, asr #COL_SHIFT
+ strh r8, [r14, #64]
+ strh r9, [r14, #80]
+ sub r8, r2, r1 @ R8=a1-b1
+ sub r9, r6, r0 @ R9=a0-b0
+ mov r8, r8, asr #COL_SHIFT
+ mov r9, r9, asr #COL_SHIFT
+ strh r8, [r14, #96]
+ strh r9, [r14, #112]
+
+@@ __end_col_loop:
+ @@ at this point, R0-R11 (free)
+ @@ R12=__const_ptr_, R14=&block[n]
+ ldr r0, [sp, #0] @ R0=block
+ teq r0, r14 @ compare current &block[n] to block, when block is reached, the loop is finished.
+ sub r14, r14, #2
+ bne __col_loop
+
+
+
+
+@@ __end_simple_idct_arm:
+ @@ restore registers to previous status!
+ add sp, sp, #8 @@ the local variables!
+ ldmfd sp!, {r4-r11, r15} @@ update PC with LR content.
+
+
+
+@@ kind of sub-function, here not to overload the common case.
+__end_bef_a_evaluation:
+ add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1)
+ mul r11, r8, r4 @ R11=W2*ROWr16[2]
+ sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3)
+ add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0)
+ bal __end_a_evaluation
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/simple_idct_armv5te.S b/media/ffvpx/libavcodec/arm/simple_idct_armv5te.S
new file mode 100644
index 0000000000..a8d03469ab
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/simple_idct_armv5te.S
@@ -0,0 +1,613 @@
+/*
+ * Simple IDCT
+ *
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2006 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+#define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define ROW_SHIFT 11
+#define COL_SHIFT 20
+
+#define W13 (W1 | (W3 << 16))
+#define W26 (W2 | (W6 << 16))
+#define W57 (W5 | (W7 << 16))
+
+function idct_row_armv5te
+ str lr, [sp, #-4]!
+
+ ldrd v1, v2, [a1, #8]
+ ldrd a3, a4, [a1] /* a3 = row[1:0], a4 = row[3:2] */
+ orrs v1, v1, v2
+ itt eq
+ cmpeq v1, a4
+ cmpeq v1, a3, lsr #16
+ beq row_dc_only
+
+ mov v1, #(1<<(ROW_SHIFT-1))
+ mov ip, #16384
+ sub ip, ip, #1 /* ip = W4 */
+ smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */
+ ldr ip, =W26 /* ip = W2 | (W6 << 16) */
+ smultb a2, ip, a4
+ smulbb lr, ip, a4
+ add v2, v1, a2
+ sub v3, v1, a2
+ sub v4, v1, lr
+ add v1, v1, lr
+
+ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
+ ldr lr, =W57 /* lr = W5 | (W7 << 16) */
+ smulbt v5, ip, a3
+ smultt v6, lr, a4
+ smlatt v5, ip, a4, v5
+ smultt a2, ip, a3
+ smulbt v7, lr, a3
+ sub v6, v6, a2
+ smulbt a2, ip, a4
+ smultt fp, lr, a3
+ sub v7, v7, a2
+ smulbt a2, lr, a4
+ ldrd a3, a4, [a1, #8] /* a3=row[5:4] a4=row[7:6] */
+ sub fp, fp, a2
+
+ orrs a2, a3, a4
+ beq 1f
+
+ smlabt v5, lr, a3, v5
+ smlabt v6, ip, a3, v6
+ smlatt v5, lr, a4, v5
+ smlabt v6, lr, a4, v6
+ smlatt v7, lr, a3, v7
+ smlatt fp, ip, a3, fp
+ smulbt a2, ip, a4
+ smlatt v7, ip, a4, v7
+ sub fp, fp, a2
+
+ ldr ip, =W26 /* ip = W2 | (W6 << 16) */
+ mov a2, #16384
+ sub a2, a2, #1 /* a2 = W4 */
+ smulbb a2, a2, a3 /* a2 = W4*row[4] */
+ smultb lr, ip, a4 /* lr = W6*row[6] */
+ add v1, v1, a2 /* v1 += W4*row[4] */
+ add v1, v1, lr /* v1 += W6*row[6] */
+ add v4, v4, a2 /* v4 += W4*row[4] */
+ sub v4, v4, lr /* v4 -= W6*row[6] */
+ smulbb lr, ip, a4 /* lr = W2*row[6] */
+ sub v2, v2, a2 /* v2 -= W4*row[4] */
+ sub v2, v2, lr /* v2 -= W2*row[6] */
+ sub v3, v3, a2 /* v3 -= W4*row[4] */
+ add v3, v3, lr /* v3 += W2*row[6] */
+
+1: add a2, v1, v5
+ mov a3, a2, lsr #11
+ bic a3, a3, #0x1f0000
+ sub a2, v2, v6
+ mov a2, a2, lsr #11
+ add a3, a3, a2, lsl #16
+ add a2, v3, v7
+ mov a4, a2, lsr #11
+ bic a4, a4, #0x1f0000
+ add a2, v4, fp
+ mov a2, a2, lsr #11
+ add a4, a4, a2, lsl #16
+ strd a3, a4, [a1]
+
+ sub a2, v4, fp
+ mov a3, a2, lsr #11
+ bic a3, a3, #0x1f0000
+ sub a2, v3, v7
+ mov a2, a2, lsr #11
+ add a3, a3, a2, lsl #16
+ add a2, v2, v6
+ mov a4, a2, lsr #11
+ bic a4, a4, #0x1f0000
+ sub a2, v1, v5
+ mov a2, a2, lsr #11
+ add a4, a4, a2, lsl #16
+ strd a3, a4, [a1, #8]
+
+ ldr pc, [sp], #4
+
+row_dc_only:
+ orr a3, a3, a3, lsl #16
+ bic a3, a3, #0xe000
+ mov a3, a3, lsl #3
+ mov a4, a3
+ strd a3, a4, [a1]
+ strd a3, a4, [a1, #8]
+
+ ldr pc, [sp], #4
+endfunc
+
+ .macro idct_col
+ ldr a4, [a1] /* a4 = col[1:0] */
+ mov ip, #16384
+ sub ip, ip, #1 /* ip = W4 */
+ mov v1, #((1<<(COL_SHIFT-1))/W4) /* this matches the C version */
+ add v2, v1, a4, asr #16
+ rsb v2, v2, v2, lsl #14
+ mov a4, a4, lsl #16
+ add v1, v1, a4, asr #16
+ ldr a4, [a1, #(16*4)]
+ rsb v1, v1, v1, lsl #14
+
+ smulbb lr, ip, a4
+ smulbt a3, ip, a4
+ sub v3, v1, lr
+ sub v5, v1, lr
+ add v7, v1, lr
+ add v1, v1, lr
+ sub v4, v2, a3
+ sub v6, v2, a3
+ add fp, v2, a3
+ ldr ip, =W26
+ ldr a4, [a1, #(16*2)]
+ add v2, v2, a3
+
+ smulbb lr, ip, a4
+ smultb a3, ip, a4
+ add v1, v1, lr
+ sub v7, v7, lr
+ add v3, v3, a3
+ sub v5, v5, a3
+ smulbt lr, ip, a4
+ smultt a3, ip, a4
+ add v2, v2, lr
+ sub fp, fp, lr
+ add v4, v4, a3
+ ldr a4, [a1, #(16*6)]
+ sub v6, v6, a3
+
+ smultb lr, ip, a4
+ smulbb a3, ip, a4
+ add v1, v1, lr
+ sub v7, v7, lr
+ sub v3, v3, a3
+ add v5, v5, a3
+ smultt lr, ip, a4
+ smulbt a3, ip, a4
+ add v2, v2, lr
+ sub fp, fp, lr
+ sub v4, v4, a3
+ add v6, v6, a3
+
+ stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp}
+
+ ldr ip, =W13
+ ldr a4, [a1, #(16*1)]
+ ldr lr, =W57
+ smulbb v1, ip, a4
+ smultb v3, ip, a4
+ smulbb v5, lr, a4
+ smultb v7, lr, a4
+ smulbt v2, ip, a4
+ smultt v4, ip, a4
+ smulbt v6, lr, a4
+ smultt fp, lr, a4
+ rsb v4, v4, #0
+ ldr a4, [a1, #(16*3)]
+ rsb v3, v3, #0
+
+ smlatb v1, ip, a4, v1
+ smlatb v3, lr, a4, v3
+ smulbb a3, ip, a4
+ smulbb a2, lr, a4
+ sub v5, v5, a3
+ sub v7, v7, a2
+ smlatt v2, ip, a4, v2
+ smlatt v4, lr, a4, v4
+ smulbt a3, ip, a4
+ smulbt a2, lr, a4
+ sub v6, v6, a3
+ ldr a4, [a1, #(16*5)]
+ sub fp, fp, a2
+
+ smlabb v1, lr, a4, v1
+ smlabb v3, ip, a4, v3
+ smlatb v5, lr, a4, v5
+ smlatb v7, ip, a4, v7
+ smlabt v2, lr, a4, v2
+ smlabt v4, ip, a4, v4
+ smlatt v6, lr, a4, v6
+ ldr a3, [a1, #(16*7)]
+ smlatt fp, ip, a4, fp
+
+ smlatb v1, lr, a3, v1
+ smlabb v3, lr, a3, v3
+ smlatb v5, ip, a3, v5
+ smulbb a4, ip, a3
+ smlatt v2, lr, a3, v2
+ sub v7, v7, a4
+ smlabt v4, lr, a3, v4
+ smulbt a4, ip, a3
+ smlatt v6, ip, a3, v6
+ sub fp, fp, a4
+ .endm
+
+function idct_col_armv5te
+ str lr, [sp, #-4]!
+
+ idct_col
+
+ ldmfd sp!, {a3, a4}
+ adds a2, a3, v1
+ mov a2, a2, lsr #20
+ it mi
+ orrmi a2, a2, #0xf000
+ add ip, a4, v2
+ mov ip, ip, asr #20
+ orr a2, a2, ip, lsl #16
+ str a2, [a1]
+ subs a3, a3, v1
+ mov a2, a3, lsr #20
+ it mi
+ orrmi a2, a2, #0xf000
+ sub a4, a4, v2
+ mov a4, a4, asr #20
+ orr a2, a2, a4, lsl #16
+ ldmfd sp!, {a3, a4}
+ str a2, [a1, #(16*7)]
+
+ subs a2, a3, v3
+ mov a2, a2, lsr #20
+ it mi
+ orrmi a2, a2, #0xf000
+ sub ip, a4, v4
+ mov ip, ip, asr #20
+ orr a2, a2, ip, lsl #16
+ str a2, [a1, #(16*1)]
+ adds a3, a3, v3
+ mov a2, a3, lsr #20
+ it mi
+ orrmi a2, a2, #0xf000
+ add a4, a4, v4
+ mov a4, a4, asr #20
+ orr a2, a2, a4, lsl #16
+ ldmfd sp!, {a3, a4}
+ str a2, [a1, #(16*6)]
+
+ adds a2, a3, v5
+ mov a2, a2, lsr #20
+ it mi
+ orrmi a2, a2, #0xf000
+ add ip, a4, v6
+ mov ip, ip, asr #20
+ orr a2, a2, ip, lsl #16
+ str a2, [a1, #(16*2)]
+ subs a3, a3, v5
+ mov a2, a3, lsr #20
+ it mi
+ orrmi a2, a2, #0xf000
+ sub a4, a4, v6
+ mov a4, a4, asr #20
+ orr a2, a2, a4, lsl #16
+ ldmfd sp!, {a3, a4}
+ str a2, [a1, #(16*5)]
+
+ adds a2, a3, v7
+ mov a2, a2, lsr #20
+ it mi
+ orrmi a2, a2, #0xf000
+ add ip, a4, fp
+ mov ip, ip, asr #20
+ orr a2, a2, ip, lsl #16
+ str a2, [a1, #(16*3)]
+ subs a3, a3, v7
+ mov a2, a3, lsr #20
+ it mi
+ orrmi a2, a2, #0xf000
+ sub a4, a4, fp
+ mov a4, a4, asr #20
+ orr a2, a2, a4, lsl #16
+ str a2, [a1, #(16*4)]
+
+ ldr pc, [sp], #4
+endfunc
+
+.macro clip dst, src:vararg
+ movs \dst, \src
+ it mi
+ movmi \dst, #0
+ cmp \dst, #255
+ it gt
+ movgt \dst, #255
+.endm
+
+.macro aclip dst, src:vararg
+ adds \dst, \src
+ it mi
+ movmi \dst, #0
+ cmp \dst, #255
+ it gt
+ movgt \dst, #255
+.endm
+
+function idct_col_put_armv5te
+ str lr, [sp, #-4]!
+
+ idct_col
+
+ ldmfd sp!, {a3, a4}
+ ldr lr, [sp, #32]
+ add a2, a3, v1
+ clip a2, a2, asr #20
+ add ip, a4, v2
+ clip ip, ip, asr #20
+ orr a2, a2, ip, lsl #8
+ sub a3, a3, v1
+ clip a3, a3, asr #20
+ sub a4, a4, v2
+ clip a4, a4, asr #20
+ ldr v1, [sp, #28]
+ strh a2, [v1]
+ add a2, v1, #2
+ str a2, [sp, #28]
+ orr a2, a3, a4, lsl #8
+ rsb v2, lr, lr, lsl #3
+ ldmfd sp!, {a3, a4}
+ strh_pre a2, v2, v1
+
+ sub a2, a3, v3
+ clip a2, a2, asr #20
+ sub ip, a4, v4
+ clip ip, ip, asr #20
+ orr a2, a2, ip, lsl #8
+ strh_pre a2, v1, lr
+ add a3, a3, v3
+ clip a2, a3, asr #20
+ add a4, a4, v4
+ clip a4, a4, asr #20
+ orr a2, a2, a4, lsl #8
+ ldmfd sp!, {a3, a4}
+ strh_dpre a2, v2, lr
+
+ add a2, a3, v5
+ clip a2, a2, asr #20
+ add ip, a4, v6
+ clip ip, ip, asr #20
+ orr a2, a2, ip, lsl #8
+ strh_pre a2, v1, lr
+ sub a3, a3, v5
+ clip a2, a3, asr #20
+ sub a4, a4, v6
+ clip a4, a4, asr #20
+ orr a2, a2, a4, lsl #8
+ ldmfd sp!, {a3, a4}
+ strh_dpre a2, v2, lr
+
+ add a2, a3, v7
+ clip a2, a2, asr #20
+ add ip, a4, fp
+ clip ip, ip, asr #20
+ orr a2, a2, ip, lsl #8
+ strh a2, [v1, lr]
+ sub a3, a3, v7
+ clip a2, a3, asr #20
+ sub a4, a4, fp
+ clip a4, a4, asr #20
+ orr a2, a2, a4, lsl #8
+ strh_dpre a2, v2, lr
+
+ ldr pc, [sp], #4
+endfunc
+
+function idct_col_add_armv5te
+ str lr, [sp, #-4]!
+
+ idct_col
+
+ ldr lr, [sp, #36]
+
+ ldmfd sp!, {a3, a4}
+ ldrh ip, [lr]
+ add a2, a3, v1
+ sub a3, a3, v1
+ and v1, ip, #255
+ aclip a2, v1, a2, asr #20
+ add v1, a4, v2
+ mov v1, v1, asr #20
+ aclip v1, v1, ip, lsr #8
+ orr a2, a2, v1, lsl #8
+ ldr v1, [sp, #32]
+ sub a4, a4, v2
+ rsb v2, v1, v1, lsl #3
+ ldrh_pre ip, v2, lr
+ strh a2, [lr]
+ and a2, ip, #255
+ aclip a3, a2, a3, asr #20
+ mov a4, a4, asr #20
+ aclip a4, a4, ip, lsr #8
+ add a2, lr, #2
+ str a2, [sp, #28]
+ orr a2, a3, a4, lsl #8
+ strh a2, [v2]
+
+ ldmfd sp!, {a3, a4}
+ ldrh_pre ip, lr, v1
+ sub a2, a3, v3
+ add a3, a3, v3
+ and v3, ip, #255
+ aclip a2, v3, a2, asr #20
+ sub v3, a4, v4
+ mov v3, v3, asr #20
+ aclip v3, v3, ip, lsr #8
+ orr a2, a2, v3, lsl #8
+ add a4, a4, v4
+ ldrh_dpre ip, v2, v1
+ strh a2, [lr]
+ and a2, ip, #255
+ aclip a3, a2, a3, asr #20
+ mov a4, a4, asr #20
+ aclip a4, a4, ip, lsr #8
+ orr a2, a3, a4, lsl #8
+ strh a2, [v2]
+
+ ldmfd sp!, {a3, a4}
+ ldrh_pre ip, lr, v1
+ add a2, a3, v5
+ sub a3, a3, v5
+ and v3, ip, #255
+ aclip a2, v3, a2, asr #20
+ add v3, a4, v6
+ mov v3, v3, asr #20
+ aclip v3, v3, ip, lsr #8
+ orr a2, a2, v3, lsl #8
+ sub a4, a4, v6
+ ldrh_dpre ip, v2, v1
+ strh a2, [lr]
+ and a2, ip, #255
+ aclip a3, a2, a3, asr #20
+ mov a4, a4, asr #20
+ aclip a4, a4, ip, lsr #8
+ orr a2, a3, a4, lsl #8
+ strh a2, [v2]
+
+ ldmfd sp!, {a3, a4}
+ ldrh_pre ip, lr, v1
+ add a2, a3, v7
+ sub a3, a3, v7
+ and v3, ip, #255
+ aclip a2, v3, a2, asr #20
+ add v3, a4, fp
+ mov v3, v3, asr #20
+ aclip v3, v3, ip, lsr #8
+ orr a2, a2, v3, lsl #8
+ sub a4, a4, fp
+ ldrh_dpre ip, v2, v1
+ strh a2, [lr]
+ and a2, ip, #255
+ aclip a3, a2, a3, asr #20
+ mov a4, a4, asr #20
+ aclip a4, a4, ip, lsr #8
+ orr a2, a3, a4, lsl #8
+ strh a2, [v2]
+
+ ldr pc, [sp], #4
+endfunc
+
+function ff_simple_idct_armv5te, export=1
+ stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
+
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+
+ sub a1, a1, #(16*7)
+
+ bl idct_col_armv5te
+ add a1, a1, #4
+ bl idct_col_armv5te
+ add a1, a1, #4
+ bl idct_col_armv5te
+ add a1, a1, #4
+ bl idct_col_armv5te
+
+ ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
+endfunc
+
+function ff_simple_idct_add_armv5te, export=1
+ stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
+
+ mov a1, a3
+
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+
+ sub a1, a1, #(16*7)
+
+ bl idct_col_add_armv5te
+ add a1, a1, #4
+ bl idct_col_add_armv5te
+ add a1, a1, #4
+ bl idct_col_add_armv5te
+ add a1, a1, #4
+ bl idct_col_add_armv5te
+
+ add sp, sp, #8
+ ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
+endfunc
+
+function ff_simple_idct_put_armv5te, export=1
+ stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
+
+ mov a1, a3
+
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+
+ sub a1, a1, #(16*7)
+
+ bl idct_col_put_armv5te
+ add a1, a1, #4
+ bl idct_col_put_armv5te
+ add a1, a1, #4
+ bl idct_col_put_armv5te
+ add a1, a1, #4
+ bl idct_col_put_armv5te
+
+ add sp, sp, #8
+ ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/simple_idct_armv6.S b/media/ffvpx/libavcodec/arm/simple_idct_armv6.S
new file mode 100644
index 0000000000..f95c20d295
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/simple_idct_armv6.S
@@ -0,0 +1,425 @@
+/*
+ * Simple IDCT
+ *
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2007 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+#define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define ROW_SHIFT 11
+#define COL_SHIFT 20
+
+#define W13 (W1 | (W3 << 16))
+#define W26 (W2 | (W6 << 16))
+#define W42 (W4 | (W2 << 16))
+#define W42n (-W4&0xffff | (-W2 << 16))
+#define W46 (W4 | (W6 << 16))
+#define W57 (W5 | (W7 << 16))
+
+/*
+ Compute partial IDCT of single row.
+ shift = left-shift amount
+ r0 = source address
+ r2 = row[2,0] <= 2 cycles
+ r3 = row[3,1]
+ ip = w42 <= 2 cycles
+
+ Output in registers r4--r11
+*/
+ .macro idct_row shift
+ ldr lr, =W46 /* lr = W4 | (W6 << 16) */
+ mov r1, #(1<<(\shift-1))
+ smlad r4, r2, ip, r1
+ smlsd r7, r2, ip, r1
+ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
+ ldr r10,=W57 /* r10 = W5 | (W7 << 16) */
+ smlad r5, r2, lr, r1
+ smlsd r6, r2, lr, r1
+
+ smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */
+ smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */
+ ldr lr, [r0, #12] /* lr = row[7,5] */
+ pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */
+ pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */
+ smusdx r9, r2, r3 /* r9 = -B1 = W7*row[3] - W3*row[1] */
+ smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */
+ smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */
+
+ ldr r3, =W42n /* r3 = -W4 | (-W2 << 16) */
+ smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */
+ ldr r2, [r0, #4] /* r2 = row[6,4] */
+ smlsdx r11,lr, ip, r11 /* B3 += W3*row[5] - W1*row[7] */
+ ldr ip, =W46 /* ip = W4 | (W6 << 16) */
+ smlad r9, lr, r1, r9 /* B1 -= W1*row[5] + W5*row[7] */
+
+ smlad r5, r2, r3, r5 /* A1 += -W4*row[4] - W2*row[6] */
+ smlsd r6, r2, r3, r6 /* A2 += -W4*row[4] + W2*row[6] */
+ smlad r4, r2, ip, r4 /* A0 += W4*row[4] + W6*row[6] */
+ smlsd r7, r2, ip, r7 /* A3 += W4*row[4] - W6*row[6] */
+ .endm
+
+/*
+ Compute partial IDCT of half row.
+ shift = left-shift amount
+ r2 = row[2,0]
+ r3 = row[3,1]
+ ip = w42
+
+ Output in registers r4--r11
+*/
+ .macro idct_row4 shift
+ ldr lr, =W46 /* lr = W4 | (W6 << 16) */
+ ldr r10,=W57 /* r10 = W5 | (W7 << 16) */
+ mov r1, #(1<<(\shift-1))
+ smlad r4, r2, ip, r1
+ smlsd r7, r2, ip, r1
+ ldr ip, =W13 /* ip = W1 | (W3 << 16) */
+ smlad r5, r2, lr, r1
+ smlsd r6, r2, lr, r1
+ smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */
+ smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */
+ pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */
+ pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */
+ smusdx r9, r2, r3 /* r9 = -B1 = W7*row[3] - W3*row[1] */
+ smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */
+ .endm
+
+/*
+ Compute final part of IDCT single row without shift.
+ Input in registers r4--r11
+ Output in registers ip, r4--r6, lr, r8--r10
+*/
+ .macro idct_finish
+ add ip, r4, r8 /* r1 = A0 + B0 */
+ sub lr, r4, r8 /* r2 = A0 - B0 */
+ sub r4, r5, r9 /* r2 = A1 + B1 */
+ add r8, r5, r9 /* r2 = A1 - B1 */
+ add r5, r6, r10 /* r1 = A2 + B2 */
+ sub r9, r6, r10 /* r1 = A2 - B2 */
+ add r6, r7, r11 /* r2 = A3 + B3 */
+ sub r10,r7, r11 /* r2 = A3 - B3 */
+ .endm
+
+/*
+ Compute final part of IDCT single row.
+ shift = right-shift amount
+ Input/output in registers r4--r11
+*/
+ .macro idct_finish_shift shift
+ add r3, r4, r8 /* r3 = A0 + B0 */
+ sub r2, r4, r8 /* r2 = A0 - B0 */
+ mov r4, r3, asr #\shift
+ mov r8, r2, asr #\shift
+
+ sub r3, r5, r9 /* r3 = A1 + B1 */
+ add r2, r5, r9 /* r2 = A1 - B1 */
+ mov r5, r3, asr #\shift
+ mov r9, r2, asr #\shift
+
+ add r3, r6, r10 /* r3 = A2 + B2 */
+ sub r2, r6, r10 /* r2 = A2 - B2 */
+ mov r6, r3, asr #\shift
+ mov r10,r2, asr #\shift
+
+ add r3, r7, r11 /* r3 = A3 + B3 */
+ sub r2, r7, r11 /* r2 = A3 - B3 */
+ mov r7, r3, asr #\shift
+ mov r11,r2, asr #\shift
+ .endm
+
+/*
+ Compute final part of IDCT single row, saturating results at 8 bits.
+ shift = right-shift amount
+ Input/output in registers r4--r11
+*/
+ .macro idct_finish_shift_sat shift
+ add r3, r4, r8 /* r3 = A0 + B0 */
+ sub ip, r4, r8 /* ip = A0 - B0 */
+ usat r4, #8, r3, asr #\shift
+ usat r8, #8, ip, asr #\shift
+
+ sub r3, r5, r9 /* r3 = A1 + B1 */
+ add ip, r5, r9 /* ip = A1 - B1 */
+ usat r5, #8, r3, asr #\shift
+ usat r9, #8, ip, asr #\shift
+
+ add r3, r6, r10 /* r3 = A2 + B2 */
+ sub ip, r6, r10 /* ip = A2 - B2 */
+ usat r6, #8, r3, asr #\shift
+ usat r10,#8, ip, asr #\shift
+
+ add r3, r7, r11 /* r3 = A3 + B3 */
+ sub ip, r7, r11 /* ip = A3 - B3 */
+ usat r7, #8, r3, asr #\shift
+ usat r11,#8, ip, asr #\shift
+ .endm
+
+/*
+ Compute IDCT of single row, storing as column.
+ r0 = source
+ r1 = dest
+*/
+function idct_row_armv6
+ push {lr}
+
+ ldr lr, [r0, #12] /* lr = row[7,5] */
+ ldr ip, [r0, #4] /* ip = row[6,4] */
+ ldr r3, [r0, #8] /* r3 = row[3,1] */
+ ldr r2, [r0] /* r2 = row[2,0] */
+ orrs lr, lr, ip
+ itt eq
+ cmpeq lr, r3
+ cmpeq lr, r2, lsr #16
+ beq 1f
+ push {r1}
+ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
+ cmp lr, #0
+ beq 2f
+
+ idct_row ROW_SHIFT
+ b 3f
+
+2: idct_row4 ROW_SHIFT
+
+3: pop {r1}
+ idct_finish_shift ROW_SHIFT
+
+ strh r4, [r1]
+ strh r5, [r1, #(16*2)]
+ strh r6, [r1, #(16*4)]
+ strh r7, [r1, #(16*6)]
+ strh r11,[r1, #(16*1)]
+ strh r10,[r1, #(16*3)]
+ strh r9, [r1, #(16*5)]
+ strh r8, [r1, #(16*7)]
+
+ pop {pc}
+
+1: mov r2, r2, lsl #3
+ strh r2, [r1]
+ strh r2, [r1, #(16*2)]
+ strh r2, [r1, #(16*4)]
+ strh r2, [r1, #(16*6)]
+ strh r2, [r1, #(16*1)]
+ strh r2, [r1, #(16*3)]
+ strh r2, [r1, #(16*5)]
+ strh r2, [r1, #(16*7)]
+ pop {pc}
+endfunc
+
+/*
+ Compute IDCT of single column, read as row.
+ r0 = source
+ r1 = dest
+*/
+function idct_col_armv6
+ push {r1, lr}
+
+ ldr r2, [r0] /* r2 = row[2,0] */
+ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
+ ldr r3, [r0, #8] /* r3 = row[3,1] */
+ idct_row COL_SHIFT
+ pop {r1}
+ idct_finish_shift COL_SHIFT
+
+ strh r4, [r1]
+ strh r5, [r1, #(16*1)]
+ strh r6, [r1, #(16*2)]
+ strh r7, [r1, #(16*3)]
+ strh r11,[r1, #(16*4)]
+ strh r10,[r1, #(16*5)]
+ strh r9, [r1, #(16*6)]
+ strh r8, [r1, #(16*7)]
+
+ pop {pc}
+endfunc
+
+/*
+ Compute IDCT of single column, read as row, store saturated 8-bit.
+ r0 = source
+ r1 = dest
+ r2 = line size
+*/
+function idct_col_put_armv6
+ push {r1, r2, lr}
+
+ ldr r2, [r0] /* r2 = row[2,0] */
+ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
+ ldr r3, [r0, #8] /* r3 = row[3,1] */
+ idct_row COL_SHIFT
+ pop {r1, r2}
+ idct_finish_shift_sat COL_SHIFT
+
+ strb_post r4, r1, r2
+ strb_post r5, r1, r2
+ strb_post r6, r1, r2
+ strb_post r7, r1, r2
+ strb_post r11,r1, r2
+ strb_post r10,r1, r2
+ strb_post r9, r1, r2
+ strb_post r8, r1, r2
+
+ sub r1, r1, r2, lsl #3
+
+ pop {pc}
+endfunc
+
+/*
+ Compute IDCT of single column, read as row, add/store saturated 8-bit.
+ r0 = source
+ r1 = dest
+ r2 = line size
+*/
+function idct_col_add_armv6
+ push {r1, r2, lr}
+
+ ldr r2, [r0] /* r2 = row[2,0] */
+ ldr ip, =W42 /* ip = W4 | (W2 << 16) */
+ ldr r3, [r0, #8] /* r3 = row[3,1] */
+ idct_row COL_SHIFT
+ pop {r1, r2}
+ idct_finish
+
+ ldrb r3, [r1]
+ ldrb r7, [r1, r2]
+ ldrb r11,[r1, r2, lsl #2]
+ add ip, r3, ip, asr #COL_SHIFT
+ usat ip, #8, ip
+ add r4, r7, r4, asr #COL_SHIFT
+ strb_post ip, r1, r2
+ ldrb ip, [r1, r2]
+ usat r4, #8, r4
+ ldrb r11,[r1, r2, lsl #2]
+ add r5, ip, r5, asr #COL_SHIFT
+ usat r5, #8, r5
+ strb_post r4, r1, r2
+ ldrb r3, [r1, r2]
+ ldrb ip, [r1, r2, lsl #2]
+ strb_post r5, r1, r2
+ ldrb r7, [r1, r2]
+ ldrb r4, [r1, r2, lsl #2]
+ add r6, r3, r6, asr #COL_SHIFT
+ usat r6, #8, r6
+ add r10,r7, r10,asr #COL_SHIFT
+ usat r10,#8, r10
+ add r9, r11,r9, asr #COL_SHIFT
+ usat r9, #8, r9
+ add r8, ip, r8, asr #COL_SHIFT
+ usat r8, #8, r8
+ add lr, r4, lr, asr #COL_SHIFT
+ usat lr, #8, lr
+ strb_post r6, r1, r2
+ strb_post r10,r1, r2
+ strb_post r9, r1, r2
+ strb_post r8, r1, r2
+ strb_post lr, r1, r2
+
+ sub r1, r1, r2, lsl #3
+
+ pop {pc}
+endfunc
+
+/*
+ Compute 8 IDCT row transforms.
+ func = IDCT row->col function
+ width = width of columns in bytes
+*/
+ .macro idct_rows func width
+ bl \func
+ add r0, r0, #(16*2)
+ add r1, r1, #\width
+ bl \func
+ add r0, r0, #(16*2)
+ add r1, r1, #\width
+ bl \func
+ add r0, r0, #(16*2)
+ add r1, r1, #\width
+ bl \func
+ sub r0, r0, #(16*5)
+ add r1, r1, #\width
+ bl \func
+ add r0, r0, #(16*2)
+ add r1, r1, #\width
+ bl \func
+ add r0, r0, #(16*2)
+ add r1, r1, #\width
+ bl \func
+ add r0, r0, #(16*2)
+ add r1, r1, #\width
+ bl \func
+
+ sub r0, r0, #(16*7)
+ .endm
+
+/* void ff_simple_idct_armv6(int16_t *data); */
+function ff_simple_idct_armv6, export=1
+ push {r4-r11, lr}
+ sub sp, sp, #128
+
+ mov r1, sp
+ idct_rows idct_row_armv6, 2
+ mov r1, r0
+ mov r0, sp
+ idct_rows idct_col_armv6, 2
+
+ add sp, sp, #128
+ pop {r4-r11, pc}
+endfunc
+
+/* ff_simple_idct_add_armv6(uint8_t *dest, ptrdiff_t line_size, int16_t *data); */
+function ff_simple_idct_add_armv6, export=1
+ push {r0, r1, r4-r11, lr}
+ sub sp, sp, #128
+
+ mov r0, r2
+ mov r1, sp
+ idct_rows idct_row_armv6, 2
+ mov r0, sp
+ ldr r1, [sp, #128]
+ ldr r2, [sp, #(128+4)]
+ idct_rows idct_col_add_armv6, 1
+
+ add sp, sp, #(128+8)
+ pop {r4-r11, pc}
+endfunc
+
+/* ff_simple_idct_put_armv6(uint8_t *dest, ptrdiff_t line_size, int16_t *data); */
+function ff_simple_idct_put_armv6, export=1
+ push {r0, r1, r4-r11, lr}
+ sub sp, sp, #128
+
+ mov r0, r2
+ mov r1, sp
+ idct_rows idct_row_armv6, 2
+ mov r0, sp
+ ldr r1, [sp, #128]
+ ldr r2, [sp, #(128+4)]
+ idct_rows idct_col_put_armv6, 1
+
+ add sp, sp, #(128+8)
+ pop {r4-r11, pc}
+endfunc
diff --git a/media/ffvpx/libavcodec/arm/simple_idct_neon.S b/media/ffvpx/libavcodec/arm/simple_idct_neon.S
new file mode 100644
index 0000000000..726d4cbefa
--- /dev/null
+++ b/media/ffvpx/libavcodec/arm/simple_idct_neon.S
@@ -0,0 +1,375 @@
+/*
+ * ARM NEON IDCT
+ *
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * Based on Simple IDCT
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+
+#define W1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W4c ((1<<(COL_SHIFT-1))/W4)
+#define ROW_SHIFT 11
+#define COL_SHIFT 20
+
+#define w1 d0[0]
+#define w2 d0[1]
+#define w3 d0[2]
+#define w4 d0[3]
+#define w5 d1[0]
+#define w6 d1[1]
+#define w7 d1[2]
+#define w4c d1[3]
+
+ .macro idct_col4_top
+ vmull.s16 q7, d6, w2 /* q9 = W2 * col[2] */
+ vmull.s16 q8, d6, w6 /* q10 = W6 * col[2] */
+ vmull.s16 q9, d4, w1 /* q9 = W1 * col[1] */
+ vadd.i32 q11, q15, q7
+ vmull.s16 q10, d4, w3 /* q10 = W3 * col[1] */
+ vadd.i32 q12, q15, q8
+ vmull.s16 q5, d4, w5 /* q5 = W5 * col[1] */
+ vsub.i32 q13, q15, q8
+ vmull.s16 q6, d4, w7 /* q6 = W7 * col[1] */
+ vsub.i32 q14, q15, q7
+
+ vmlal.s16 q9, d8, w3 /* q9 += W3 * col[3] */
+ vmlsl.s16 q10, d8, w7 /* q10 -= W7 * col[3] */
+ vmlsl.s16 q5, d8, w1 /* q5 -= W1 * col[3] */
+ vmlsl.s16 q6, d8, w5 /* q6 -= W5 * col[3] */
+ .endm
+
+ .text
+ .align 6
+
+function idct_row4_pld_neon
+ pld [r0]
+ add r3, r0, r1, lsl #2
+ pld [r0, r1]
+ pld [r0, r1, lsl #1]
+A pld [r3, -r1]
+ pld [r3]
+ pld [r3, r1]
+ add r3, r3, r1, lsl #1
+ pld [r3]
+ pld [r3, r1]
+endfunc
+
+function idct_row4_neon
+ vmov.i32 q15, #(1<<(ROW_SHIFT-1))
+ vld1.64 {d2-d5}, [r2,:128]!
+ vmlal.s16 q15, d2, w4 /* q15 += W4 * col[0] */
+ vld1.64 {d6,d7}, [r2,:128]!
+ vorr d10, d3, d5
+ vld1.64 {d8,d9}, [r2,:128]!
+ add r2, r2, #-64
+
+ vorr d11, d7, d9
+ vorr d10, d10, d11
+ vmov r3, r4, d10
+
+ idct_col4_top
+
+ orrs r3, r3, r4
+ beq 1f
+
+ vmull.s16 q7, d3, w4 /* q7 = W4 * col[4] */
+ vmlal.s16 q9, d5, w5 /* q9 += W5 * col[5] */
+ vmlsl.s16 q10, d5, w1 /* q10 -= W1 * col[5] */
+ vmull.s16 q8, d7, w2 /* q8 = W2 * col[6] */
+ vmlal.s16 q5, d5, w7 /* q5 += W7 * col[5] */
+ vadd.i32 q11, q11, q7
+ vsub.i32 q12, q12, q7
+ vsub.i32 q13, q13, q7
+ vadd.i32 q14, q14, q7
+ vmlal.s16 q6, d5, w3 /* q6 += W3 * col[5] */
+ vmull.s16 q7, d7, w6 /* q7 = W6 * col[6] */
+ vmlal.s16 q9, d9, w7
+ vmlsl.s16 q10, d9, w5
+ vmlal.s16 q5, d9, w3
+ vmlsl.s16 q6, d9, w1
+ vadd.i32 q11, q11, q7
+ vsub.i32 q12, q12, q8
+ vadd.i32 q13, q13, q8
+ vsub.i32 q14, q14, q7
+
+1: vadd.i32 q3, q11, q9
+ vadd.i32 q4, q12, q10
+ vshrn.i32 d2, q3, #ROW_SHIFT
+ vshrn.i32 d4, q4, #ROW_SHIFT
+ vadd.i32 q7, q13, q5
+ vadd.i32 q8, q14, q6
+ vtrn.16 d2, d4
+ vshrn.i32 d6, q7, #ROW_SHIFT
+ vshrn.i32 d8, q8, #ROW_SHIFT
+ vsub.i32 q14, q14, q6
+ vsub.i32 q11, q11, q9
+ vtrn.16 d6, d8
+ vsub.i32 q13, q13, q5
+ vshrn.i32 d3, q14, #ROW_SHIFT
+ vtrn.32 d2, d6
+ vsub.i32 q12, q12, q10
+ vtrn.32 d4, d8
+ vshrn.i32 d5, q13, #ROW_SHIFT
+ vshrn.i32 d7, q12, #ROW_SHIFT
+ vshrn.i32 d9, q11, #ROW_SHIFT
+
+ vtrn.16 d3, d5
+ vtrn.16 d7, d9
+ vtrn.32 d3, d7
+ vtrn.32 d5, d9
+
+ vst1.64 {d2-d5}, [r2,:128]!
+ vst1.64 {d6-d9}, [r2,:128]!
+
+ bx lr
+endfunc
+
+function idct_col4_neon
+ mov ip, #16
+ vld1.64 {d2}, [r2,:64], ip /* d2 = col[0] */
+ vdup.16 d30, w4c
+ vld1.64 {d4}, [r2,:64], ip /* d3 = col[1] */
+ vadd.i16 d30, d30, d2
+ vld1.64 {d6}, [r2,:64], ip /* d4 = col[2] */
+ vmull.s16 q15, d30, w4 /* q15 = W4*(col[0]+(1<<COL_SHIFT-1)/W4)*/
+ vld1.64 {d8}, [r2,:64], ip /* d5 = col[3] */
+
+ ldrd r4, r5, [r2]
+ ldrd r6, r7, [r2, #16]
+ orrs r4, r4, r5
+
+ idct_col4_top
+ it eq
+ addeq r2, r2, #16
+ beq 1f
+
+ vld1.64 {d3}, [r2,:64], ip /* d6 = col[4] */
+ vmull.s16 q7, d3, w4 /* q7 = W4 * col[4] */
+ vadd.i32 q11, q11, q7
+ vsub.i32 q12, q12, q7
+ vsub.i32 q13, q13, q7
+ vadd.i32 q14, q14, q7
+
+1: orrs r6, r6, r7
+ ldrd r4, r5, [r2, #16]
+ it eq
+ addeq r2, r2, #16
+ beq 2f
+
+ vld1.64 {d5}, [r2,:64], ip /* d7 = col[5] */
+ vmlal.s16 q9, d5, w5 /* q9 += W5 * col[5] */
+ vmlsl.s16 q10, d5, w1 /* q10 -= W1 * col[5] */
+ vmlal.s16 q5, d5, w7 /* q5 += W7 * col[5] */
+ vmlal.s16 q6, d5, w3 /* q6 += W3 * col[5] */
+
+2: orrs r4, r4, r5
+ ldrd r4, r5, [r2, #16]
+ it eq
+ addeq r2, r2, #16
+ beq 3f
+
+ vld1.64 {d7}, [r2,:64], ip /* d8 = col[6] */
+ vmull.s16 q7, d7, w6 /* q7 = W6 * col[6] */
+ vmull.s16 q8, d7, w2 /* q8 = W2 * col[6] */
+ vadd.i32 q11, q11, q7
+ vsub.i32 q14, q14, q7
+ vsub.i32 q12, q12, q8
+ vadd.i32 q13, q13, q8
+
+3: orrs r4, r4, r5
+ it eq
+ addeq r2, r2, #16
+ beq 4f
+
+ vld1.64 {d9}, [r2,:64], ip /* d9 = col[7] */
+ vmlal.s16 q9, d9, w7
+ vmlsl.s16 q10, d9, w5
+ vmlal.s16 q5, d9, w3
+ vmlsl.s16 q6, d9, w1
+
+4: vaddhn.i32 d2, q11, q9
+ vaddhn.i32 d3, q12, q10
+ vaddhn.i32 d4, q13, q5
+ vaddhn.i32 d5, q14, q6
+ vsubhn.i32 d9, q11, q9
+ vsubhn.i32 d8, q12, q10
+ vsubhn.i32 d7, q13, q5
+ vsubhn.i32 d6, q14, q6
+
+ bx lr
+endfunc
+
+ .align 6
+
+function idct_col4_st8_neon
+ vqshrun.s16 d2, q1, #COL_SHIFT-16
+ vqshrun.s16 d3, q2, #COL_SHIFT-16
+ vqshrun.s16 d4, q3, #COL_SHIFT-16
+ vqshrun.s16 d5, q4, #COL_SHIFT-16
+ vst1.32 {d2[0]}, [r0,:32], r1
+ vst1.32 {d2[1]}, [r0,:32], r1
+ vst1.32 {d3[0]}, [r0,:32], r1
+ vst1.32 {d3[1]}, [r0,:32], r1
+ vst1.32 {d4[0]}, [r0,:32], r1
+ vst1.32 {d4[1]}, [r0,:32], r1
+ vst1.32 {d5[0]}, [r0,:32], r1
+ vst1.32 {d5[1]}, [r0,:32], r1
+
+ bx lr
+endfunc
+
+const idct_coeff_neon, align=4
+ .short W1, W2, W3, W4, W5, W6, W7, W4c
+endconst
+
+ .macro idct_start data
+ push {r4-r7, lr}
+ pld [\data]
+ pld [\data, #64]
+ vpush {d8-d15}
+ movrel r3, idct_coeff_neon
+ vld1.64 {d0,d1}, [r3,:128]
+ .endm
+
+ .macro idct_end
+ vpop {d8-d15}
+ pop {r4-r7, pc}
+ .endm
+
+/* void ff_simple_idct_put_neon(uint8_t *dst, ptrdiff_t line_size, int16_t *data); */
+function ff_simple_idct_put_neon, export=1
+ idct_start r2
+
+ bl idct_row4_pld_neon
+ bl idct_row4_neon
+ add r2, r2, #-128
+ bl idct_col4_neon
+ bl idct_col4_st8_neon
+ sub r0, r0, r1, lsl #3
+ add r0, r0, #4
+ add r2, r2, #-120
+ bl idct_col4_neon
+ bl idct_col4_st8_neon
+
+ idct_end
+endfunc
+
+ .align 6
+
+function idct_col4_add8_neon
+ mov ip, r0
+
+ vld1.32 {d10[0]}, [r0,:32], r1
+ vshr.s16 q1, q1, #COL_SHIFT-16
+ vld1.32 {d10[1]}, [r0,:32], r1
+ vshr.s16 q2, q2, #COL_SHIFT-16
+ vld1.32 {d11[0]}, [r0,:32], r1
+ vshr.s16 q3, q3, #COL_SHIFT-16
+ vld1.32 {d11[1]}, [r0,:32], r1
+ vshr.s16 q4, q4, #COL_SHIFT-16
+ vld1.32 {d12[0]}, [r0,:32], r1
+ vaddw.u8 q1, q1, d10
+ vld1.32 {d12[1]}, [r0,:32], r1
+ vaddw.u8 q2, q2, d11
+ vld1.32 {d13[0]}, [r0,:32], r1
+ vqmovun.s16 d2, q1
+ vld1.32 {d13[1]}, [r0,:32], r1
+ vaddw.u8 q3, q3, d12
+ vst1.32 {d2[0]}, [ip,:32], r1
+ vqmovun.s16 d3, q2
+ vst1.32 {d2[1]}, [ip,:32], r1
+ vaddw.u8 q4, q4, d13
+ vst1.32 {d3[0]}, [ip,:32], r1
+ vqmovun.s16 d4, q3
+ vst1.32 {d3[1]}, [ip,:32], r1
+ vqmovun.s16 d5, q4
+ vst1.32 {d4[0]}, [ip,:32], r1
+ vst1.32 {d4[1]}, [ip,:32], r1
+ vst1.32 {d5[0]}, [ip,:32], r1
+ vst1.32 {d5[1]}, [ip,:32], r1
+
+ bx lr
+endfunc
+
+/* void ff_simple_idct_add_neon(uint8_t *dst, ptrdiff_t line_size, int16_t *data); */
+function ff_simple_idct_add_neon, export=1
+ idct_start r2
+
+ bl idct_row4_pld_neon
+ bl idct_row4_neon
+ add r2, r2, #-128
+ bl idct_col4_neon
+ bl idct_col4_add8_neon
+ sub r0, r0, r1, lsl #3
+ add r0, r0, #4
+ add r2, r2, #-120
+ bl idct_col4_neon
+ bl idct_col4_add8_neon
+
+ idct_end
+endfunc
+
+ .align 6
+
+function idct_col4_st16_neon
+ mov ip, #16
+
+ vshr.s16 q1, q1, #COL_SHIFT-16
+ vshr.s16 q2, q2, #COL_SHIFT-16
+ vst1.64 {d2}, [r2,:64], ip
+ vshr.s16 q3, q3, #COL_SHIFT-16
+ vst1.64 {d3}, [r2,:64], ip
+ vshr.s16 q4, q4, #COL_SHIFT-16
+ vst1.64 {d4}, [r2,:64], ip
+ vst1.64 {d5}, [r2,:64], ip
+ vst1.64 {d6}, [r2,:64], ip
+ vst1.64 {d7}, [r2,:64], ip
+ vst1.64 {d8}, [r2,:64], ip
+ vst1.64 {d9}, [r2,:64], ip
+
+ bx lr
+endfunc
+
+/* void ff_simple_idct_neon(int16_t *data); */
+function ff_simple_idct_neon, export=1
+ idct_start r0
+
+ mov r2, r0
+ bl idct_row4_neon
+ bl idct_row4_neon
+ add r2, r2, #-128
+ bl idct_col4_neon
+ add r2, r2, #-128
+ bl idct_col4_st16_neon
+ add r2, r2, #-120
+ bl idct_col4_neon
+ add r2, r2, #-128
+ bl idct_col4_st16_neon
+
+ idct_end
+endfunc
diff --git a/media/ffvpx/libavcodec/atsc_a53.c b/media/ffvpx/libavcodec/atsc_a53.c
new file mode 100644
index 0000000000..29ec71bc5f
--- /dev/null
+++ b/media/ffvpx/libavcodec/atsc_a53.c
@@ -0,0 +1,119 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "atsc_a53.h"
+#include "get_bits.h"
+
+int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len,
+ void **data, size_t *sei_size)
+{
+ AVFrameSideData *side_data = NULL;
+ uint8_t *sei_data;
+
+ if (frame)
+ side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
+
+ if (!side_data) {
+ *data = NULL;
+ return 0;
+ }
+
+ *sei_size = side_data->size + 11;
+ *data = av_mallocz(*sei_size + prefix_len);
+ if (!*data)
+ return AVERROR(ENOMEM);
+ sei_data = (uint8_t*)*data + prefix_len;
+
+ // country code
+ sei_data[0] = 181;
+ sei_data[1] = 0;
+ sei_data[2] = 49;
+
+ /**
+ * 'GA94' is standard in North America for ATSC, but hard coding
+ * this style may not be the right thing to do -- other formats
+ * do exist. This information is not available in the side_data
+ * so we are going with this right now.
+ */
+ AV_WL32(sei_data + 3, MKTAG('G', 'A', '9', '4'));
+ sei_data[7] = 3;
+ sei_data[8] = ((side_data->size/3) & 0x1f) | 0x40;
+ sei_data[9] = 0;
+
+ memcpy(sei_data + 10, side_data->data, side_data->size);
+
+ sei_data[side_data->size+10] = 255;
+
+ return 0;
+}
+
+int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size)
+{
+ AVBufferRef *buf = *pbuf;
+ GetBitContext gb;
+ size_t new_size, old_size = buf ? buf->size : 0;
+ int ret, cc_count;
+
+ if (size < 3)
+ return AVERROR_INVALIDDATA;
+
+ ret = init_get_bits8(&gb, data, size);
+ if (ret < 0)
+ return ret;
+
+ if (get_bits(&gb, 8) != 0x3) // user_data_type_code
+ return 0;
+
+ skip_bits(&gb, 1); // reserved
+ if (!get_bits(&gb, 1)) // process_cc_data_flag
+ return 0;
+
+ skip_bits(&gb, 1); // zero bit
+ cc_count = get_bits(&gb, 5);
+ if (!cc_count)
+ return 0;
+
+ skip_bits(&gb, 8); // reserved
+
+ /* 3 bytes per CC plus one byte marker_bits at the end */
+ if (cc_count * 3 >= (get_bits_left(&gb) >> 3))
+ return AVERROR_INVALIDDATA;
+
+ new_size = (old_size + cc_count * 3);
+
+ if (new_size > INT_MAX)
+ return AVERROR_INVALIDDATA;
+
+ /* Allow merging of the cc data from two fields. */
+ ret = av_buffer_realloc(pbuf, new_size);
+ if (ret < 0)
+ return ret;
+
+ buf = *pbuf;
+ /* Use of av_buffer_realloc assumes buffer is writeable */
+ for (int i = 0; i < cc_count; i++) {
+ buf->data[old_size++] = get_bits(&gb, 8);
+ buf->data[old_size++] = get_bits(&gb, 8);
+ buf->data[old_size++] = get_bits(&gb, 8);
+ }
+
+ return cc_count;
+}
diff --git a/media/ffvpx/libavcodec/atsc_a53.h b/media/ffvpx/libavcodec/atsc_a53.h
new file mode 100644
index 0000000000..0622a55549
--- /dev/null
+++ b/media/ffvpx/libavcodec/atsc_a53.h
@@ -0,0 +1,56 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ATSC_A53_H
+#define AVCODEC_ATSC_A53_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
+
+/**
+ * Check AVFrame for A53 side data and allocate and fill SEI message with A53 info
+ *
+ * @param frame Raw frame to get A53 side data from
+ * @param prefix_len Number of bytes to allocate before SEI message
+ * @param data Pointer to a variable to store allocated memory
+ * Upon return the variable will hold NULL on error or if frame has no A53 info.
+ * Otherwise it will point to prefix_len uninitialized bytes followed by
+ * *sei_size SEI message
+ * @param sei_size Pointer to a variable to store generated SEI message length
+ * @return Zero on success, negative error code on failure
+ */
+int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len,
+ void **data, size_t *sei_size);
+
+/**
+ * Parse a data array for ATSC A53 Part 4 Closed Captions and store them in an AVBufferRef.
+ *
+ * @param pbuf Pointer to an AVBufferRef to append the closed captions. *pbuf may be NULL, in
+ * which case a new buffer will be allocated and put in it.
+ * @param data The data array containing the raw A53 data.
+ * @param size Size of the data array in bytes.
+ *
+ * @return Number of closed captions parsed on success, negative error code on failure.
+ * If no Closed Captions are parsed, *pbuf is untouched.
+ */
+int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size);
+
+#endif /* AVCODEC_ATSC_A53_H */
diff --git a/media/ffvpx/libavcodec/av1.h b/media/ffvpx/libavcodec/av1.h
new file mode 100644
index 0000000000..384f7cddc7
--- /dev/null
+++ b/media/ffvpx/libavcodec/av1.h
@@ -0,0 +1,184 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AV1 common definitions
+ */
+
+#ifndef AVCODEC_AV1_H
+#define AVCODEC_AV1_H
+
+// OBU types (section 6.2.2).
+typedef enum {
+ // 0 reserved.
+ AV1_OBU_SEQUENCE_HEADER = 1,
+ AV1_OBU_TEMPORAL_DELIMITER = 2,
+ AV1_OBU_FRAME_HEADER = 3,
+ AV1_OBU_TILE_GROUP = 4,
+ AV1_OBU_METADATA = 5,
+ AV1_OBU_FRAME = 6,
+ AV1_OBU_REDUNDANT_FRAME_HEADER = 7,
+ AV1_OBU_TILE_LIST = 8,
+ // 9-14 reserved.
+ AV1_OBU_PADDING = 15,
+} AV1_OBU_Type;
+
+// Metadata types (section 6.7.1).
+enum {
+ AV1_METADATA_TYPE_HDR_CLL = 1,
+ AV1_METADATA_TYPE_HDR_MDCV = 2,
+ AV1_METADATA_TYPE_SCALABILITY = 3,
+ AV1_METADATA_TYPE_ITUT_T35 = 4,
+ AV1_METADATA_TYPE_TIMECODE = 5,
+};
+
+// Frame types (section 6.8.2).
+enum {
+ AV1_FRAME_KEY = 0,
+ AV1_FRAME_INTER = 1,
+ AV1_FRAME_INTRA_ONLY = 2,
+ AV1_FRAME_SWITCH = 3,
+};
+
+// Reference frames (section 6.10.24).
+enum {
+ AV1_REF_FRAME_INTRA = 0,
+ AV1_REF_FRAME_LAST = 1,
+ AV1_REF_FRAME_LAST2 = 2,
+ AV1_REF_FRAME_LAST3 = 3,
+ AV1_REF_FRAME_GOLDEN = 4,
+ AV1_REF_FRAME_BWDREF = 5,
+ AV1_REF_FRAME_ALTREF2 = 6,
+ AV1_REF_FRAME_ALTREF = 7,
+};
+
+// Constants (section 3).
+enum {
+ AV1_MAX_OPERATING_POINTS = 32,
+
+ AV1_MAX_SB_SIZE = 128,
+ AV1_MI_SIZE = 4,
+
+ AV1_MAX_TILE_WIDTH = 4096,
+ AV1_MAX_TILE_AREA = 4096 * 2304,
+ AV1_MAX_TILE_ROWS = 64,
+ AV1_MAX_TILE_COLS = 64,
+
+ AV1_NUM_REF_FRAMES = 8,
+ AV1_REFS_PER_FRAME = 7,
+ AV1_TOTAL_REFS_PER_FRAME = 8,
+ AV1_PRIMARY_REF_NONE = 7,
+
+ AV1_MAX_SEGMENTS = 8,
+ AV1_SEG_LVL_MAX = 8,
+
+ AV1_SEG_LVL_ALT_Q = 0,
+ AV1_SEG_LVL_ALT_LF_Y_V = 1,
+ AV1_SEG_LVL_REF_FRAME = 5,
+ AV1_SEG_LVL_SKIP = 6,
+ AV1_SEG_LVL_GLOBAL_MV = 7,
+
+ AV1_SELECT_SCREEN_CONTENT_TOOLS = 2,
+ AV1_SELECT_INTEGER_MV = 2,
+
+ AV1_SUPERRES_NUM = 8,
+ AV1_SUPERRES_DENOM_MIN = 9,
+
+ AV1_INTERPOLATION_FILTER_SWITCHABLE = 4,
+
+ AV1_GM_ABS_ALPHA_BITS = 12,
+ AV1_GM_ALPHA_PREC_BITS = 15,
+ AV1_GM_ABS_TRANS_ONLY_BITS = 9,
+ AV1_GM_TRANS_ONLY_PREC_BITS = 3,
+ AV1_GM_ABS_TRANS_BITS = 12,
+ AV1_GM_TRANS_PREC_BITS = 6,
+ AV1_WARPEDMODEL_PREC_BITS = 16,
+
+ AV1_WARP_MODEL_IDENTITY = 0,
+ AV1_WARP_MODEL_TRANSLATION = 1,
+ AV1_WARP_MODEL_ROTZOOM = 2,
+ AV1_WARP_MODEL_AFFINE = 3,
+ AV1_WARP_PARAM_REDUCE_BITS = 6,
+
+ AV1_DIV_LUT_BITS = 8,
+ AV1_DIV_LUT_PREC_BITS = 14,
+ AV1_DIV_LUT_NUM = 257,
+
+ AV1_MAX_LOOP_FILTER = 63,
+};
+
+
+// The main colour configuration information uses the same ISO/IEC 23001-8
+// (H.273) enums as FFmpeg does, so separate definitions are not required.
+
+// Chroma sample position.
+enum {
+ AV1_CSP_UNKNOWN = 0,
+ AV1_CSP_VERTICAL = 1, // -> AVCHROMA_LOC_LEFT.
+ AV1_CSP_COLOCATED = 2, // -> AVCHROMA_LOC_TOPLEFT.
+};
+
+// Scalability modes (section 6.7.5)
+enum {
+ AV1_SCALABILITY_L1T2 = 0,
+ AV1_SCALABILITY_L1T3 = 1,
+ AV1_SCALABILITY_L2T1 = 2,
+ AV1_SCALABILITY_L2T2 = 3,
+ AV1_SCALABILITY_L2T3 = 4,
+ AV1_SCALABILITY_S2T1 = 5,
+ AV1_SCALABILITY_S2T2 = 6,
+ AV1_SCALABILITY_S2T3 = 7,
+ AV1_SCALABILITY_L2T1h = 8,
+ AV1_SCALABILITY_L2T2h = 9,
+ AV1_SCALABILITY_L2T3h = 10,
+ AV1_SCALABILITY_S2T1h = 11,
+ AV1_SCALABILITY_S2T2h = 12,
+ AV1_SCALABILITY_S2T3h = 13,
+ AV1_SCALABILITY_SS = 14,
+ AV1_SCALABILITY_L3T1 = 15,
+ AV1_SCALABILITY_L3T2 = 16,
+ AV1_SCALABILITY_L3T3 = 17,
+ AV1_SCALABILITY_S3T1 = 18,
+ AV1_SCALABILITY_S3T2 = 19,
+ AV1_SCALABILITY_S3T3 = 20,
+ AV1_SCALABILITY_L3T2_KEY = 21,
+ AV1_SCALABILITY_L3T3_KEY = 22,
+ AV1_SCALABILITY_L4T5_KEY = 23,
+ AV1_SCALABILITY_L4T7_KEY = 24,
+ AV1_SCALABILITY_L3T2_KEY_SHIFT = 25,
+ AV1_SCALABILITY_L3T3_KEY_SHIFT = 26,
+ AV1_SCALABILITY_L4T5_KEY_SHIFT = 27,
+ AV1_SCALABILITY_L4T7_KEY_SHIFT = 28,
+};
+
+// Frame Restoration types (section 6.10.15)
+enum {
+ AV1_RESTORE_NONE = 0,
+ AV1_RESTORE_WIENER = 1,
+ AV1_RESTORE_SGRPROJ = 2,
+ AV1_RESTORE_SWITCHABLE = 3,
+};
+
+// Sequence Headers are actually unbounded because one can use
+// an arbitrary number of leading zeroes when encoding via uvlc.
+// The following estimate is based around using the lowest number
+// of bits for uvlc encoding.
+#define AV1_SANE_SEQUENCE_HEADER_MAX_BITS 3138
+
+#endif /* AVCODEC_AV1_H */
diff --git a/media/ffvpx/libavcodec/av1_frame_split_bsf.c b/media/ffvpx/libavcodec/av1_frame_split_bsf.c
new file mode 100644
index 0000000000..5f6a40316c
--- /dev/null
+++ b/media/ffvpx/libavcodec/av1_frame_split_bsf.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2019 James Almer <jamrial@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * This bitstream filter splits AV1 Temporal Units into packets containing
+ * just one frame, plus any leading and trailing OBUs that may be present at
+ * the beginning or end, respectively.
+ *
+ * Temporal Units already containing only one frame will be passed through
+ * unchanged. When splitting can't be performed, the Temporal Unit will be
+ * passed through containing only the remaining OBUs starting from the first
+ * one after the last successfully split frame.
+ */
+
+#include "libavutil/avassert.h"
+
+#include "bsf.h"
+#include "bsf_internal.h"
+#include "cbs.h"
+#include "cbs_av1.h"
+
+typedef struct AV1FSplitContext {
+ AVPacket *buffer_pkt;
+ CodedBitstreamContext *cbc;
+ CodedBitstreamFragment temporal_unit;
+
+ int nb_frames;
+ int cur_frame;
+ int cur_frame_idx;
+ int last_frame_idx;
+} AV1FSplitContext;
+
+static int av1_frame_split_filter(AVBSFContext *ctx, AVPacket *out)
+{
+ AV1FSplitContext *s = ctx->priv_data;
+ CodedBitstreamFragment *td = &s->temporal_unit;
+ int i, ret;
+ int split = !!s->buffer_pkt->data;
+
+ if (!s->buffer_pkt->data) {
+ int nb_frames = 0;
+
+ ret = ff_bsf_get_packet_ref(ctx, s->buffer_pkt);
+ if (ret < 0)
+ return ret;
+
+ ret = ff_cbs_read_packet(s->cbc, td, s->buffer_pkt);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_WARNING, "Failed to parse temporal unit.\n");
+ goto passthrough;
+ }
+
+ for (i = 0; i < td->nb_units; i++) {
+ CodedBitstreamUnit *unit = &td->units[i];
+
+ if (unit->type == AV1_OBU_FRAME ||
+ unit->type == AV1_OBU_FRAME_HEADER)
+ nb_frames++;
+ else if (unit->type == AV1_OBU_TILE_LIST) {
+ av_log(ctx, AV_LOG_VERBOSE, "Large scale tiles are unsupported.\n");
+ goto passthrough;
+ }
+ }
+ if (nb_frames > 1) {
+ s->cur_frame = 0;
+ s->cur_frame_idx = s->last_frame_idx = 0;
+ s->nb_frames = nb_frames;
+ split = 1;
+ }
+ }
+
+ if (split) {
+ AV1RawFrameHeader *frame = NULL;
+ int cur_frame_type = -1, size = 0;
+
+ for (i = s->cur_frame_idx; i < td->nb_units; i++) {
+ CodedBitstreamUnit *unit = &td->units[i];
+
+ size += unit->data_size;
+ if (unit->type == AV1_OBU_FRAME) {
+ AV1RawOBU *obu = unit->content;
+
+ if (frame) {
+ av_log(ctx, AV_LOG_WARNING, "Frame OBU found when Tile data for a "
+ "previous frame was expected.\n");
+ goto passthrough;
+ }
+
+ frame = &obu->obu.frame.header;
+ cur_frame_type = obu->header.obu_type;
+ s->last_frame_idx = s->cur_frame_idx;
+ s->cur_frame_idx = i + 1;
+ s->cur_frame++;
+
+ // split here unless it's the last frame, in which case
+ // include every trailing OBU
+ if (s->cur_frame < s->nb_frames)
+ break;
+ } else if (unit->type == AV1_OBU_FRAME_HEADER) {
+ AV1RawOBU *obu = unit->content;
+
+ if (frame) {
+ av_log(ctx, AV_LOG_WARNING, "Frame Header OBU found when Tile data for a "
+ "previous frame was expected.\n");
+ goto passthrough;
+ }
+
+ frame = &obu->obu.frame_header;
+ cur_frame_type = obu->header.obu_type;
+ s->last_frame_idx = s->cur_frame_idx;
+ s->cur_frame++;
+
+ // split here if show_existing_frame unless it's the last
+ // frame, in which case include every trailing OBU
+ if (frame->show_existing_frame &&
+ s->cur_frame < s->nb_frames) {
+ s->cur_frame_idx = i + 1;
+ break;
+ }
+ } else if (unit->type == AV1_OBU_TILE_GROUP) {
+ AV1RawOBU *obu = unit->content;
+ AV1RawTileGroup *group = &obu->obu.tile_group;
+
+ if (!frame || cur_frame_type != AV1_OBU_FRAME_HEADER) {
+ av_log(ctx, AV_LOG_WARNING, "Unexpected Tile Group OBU found before a "
+ "Frame Header.\n");
+ goto passthrough;
+ }
+
+ if ((group->tg_end == (frame->tile_cols * frame->tile_rows) - 1) &&
+ // include every trailing OBU with the last frame
+ s->cur_frame < s->nb_frames) {
+ s->cur_frame_idx = i + 1;
+ break;
+ }
+ }
+ }
+ av_assert0(frame && s->cur_frame <= s->nb_frames);
+
+ ret = av_packet_ref(out, s->buffer_pkt);
+ if (ret < 0)
+ goto fail;
+
+ out->data = (uint8_t *)td->units[s->last_frame_idx].data;
+ out->size = size;
+
+ // skip the frame in the buffer packet if it's split successfully, so it's not present
+ // if the packet is passed through in case of failure when splitting another frame.
+ s->buffer_pkt->data += size;
+ s->buffer_pkt->size -= size;
+
+ if (!frame->show_existing_frame && !frame->show_frame)
+ out->pts = AV_NOPTS_VALUE;
+
+ if (s->cur_frame == s->nb_frames) {
+ av_packet_unref(s->buffer_pkt);
+ ff_cbs_fragment_reset(td);
+ }
+
+ return 0;
+ }
+
+passthrough:
+ av_packet_move_ref(out, s->buffer_pkt);
+
+ ret = 0;
+fail:
+ if (ret < 0) {
+ av_packet_unref(out);
+ av_packet_unref(s->buffer_pkt);
+ }
+ ff_cbs_fragment_reset(td);
+
+ return ret;
+}
+
+static const CodedBitstreamUnitType decompose_unit_types[] = {
+ AV1_OBU_TEMPORAL_DELIMITER,
+ AV1_OBU_SEQUENCE_HEADER,
+ AV1_OBU_FRAME_HEADER,
+ AV1_OBU_TILE_GROUP,
+ AV1_OBU_FRAME,
+};
+
+static int av1_frame_split_init(AVBSFContext *ctx)
+{
+ AV1FSplitContext *s = ctx->priv_data;
+ CodedBitstreamFragment *td = &s->temporal_unit;
+ int ret;
+
+ s->buffer_pkt = av_packet_alloc();
+ if (!s->buffer_pkt)
+ return AVERROR(ENOMEM);
+
+ ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, ctx);
+ if (ret < 0)
+ return ret;
+
+ s->cbc->decompose_unit_types = decompose_unit_types;
+ s->cbc->nb_decompose_unit_types = FF_ARRAY_ELEMS(decompose_unit_types);
+
+ if (!ctx->par_in->extradata_size)
+ return 0;
+
+ ret = ff_cbs_read_extradata(s->cbc, td, ctx->par_in);
+ if (ret < 0)
+ av_log(ctx, AV_LOG_WARNING, "Failed to parse extradata.\n");
+
+ ff_cbs_fragment_reset(td);
+
+ return 0;
+}
+
+static void av1_frame_split_flush(AVBSFContext *ctx)
+{
+ AV1FSplitContext *s = ctx->priv_data;
+
+ av_packet_unref(s->buffer_pkt);
+ ff_cbs_fragment_reset(&s->temporal_unit);
+}
+
+static void av1_frame_split_close(AVBSFContext *ctx)
+{
+ AV1FSplitContext *s = ctx->priv_data;
+
+ av_packet_free(&s->buffer_pkt);
+ ff_cbs_fragment_free(&s->temporal_unit);
+ ff_cbs_close(&s->cbc);
+}
+
+static const enum AVCodecID av1_frame_split_codec_ids[] = {
+ AV_CODEC_ID_AV1, AV_CODEC_ID_NONE,
+};
+
+const FFBitStreamFilter ff_av1_frame_split_bsf = {
+ .p.name = "av1_frame_split",
+ .p.codec_ids = av1_frame_split_codec_ids,
+ .priv_data_size = sizeof(AV1FSplitContext),
+ .init = av1_frame_split_init,
+ .flush = av1_frame_split_flush,
+ .close = av1_frame_split_close,
+ .filter = av1_frame_split_filter,
+};
diff --git a/media/ffvpx/libavcodec/av1_parse.h b/media/ffvpx/libavcodec/av1_parse.h
new file mode 100644
index 0000000000..f4a5d2830e
--- /dev/null
+++ b/media/ffvpx/libavcodec/av1_parse.h
@@ -0,0 +1,184 @@
+/*
+ * AV1 common parsing code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AV1_PARSE_H
+#define AVCODEC_AV1_PARSE_H
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "libavutil/error.h"
+#include "libavutil/intmath.h"
+#include "libavutil/macros.h"
+
+#include "av1.h"
+#include "get_bits.h"
+
+// OBU header fields + max leb128 length
+#define MAX_OBU_HEADER_SIZE (2 + 8)
+
+typedef struct AV1OBU {
+ /** Size of payload */
+ int size;
+ const uint8_t *data;
+
+ /**
+ * Size, in bits, of just the data, excluding the trailing_one_bit and
+ * any trailing padding.
+ */
+ int size_bits;
+
+ /** Size of entire OBU, including header */
+ int raw_size;
+ const uint8_t *raw_data;
+
+ /** GetBitContext initialized to the start of the payload */
+ GetBitContext gb;
+
+ int type;
+
+ int temporal_id;
+ int spatial_id;
+} AV1OBU;
+
+/** An input packet split into OBUs */
+typedef struct AV1Packet {
+ AV1OBU *obus;
+ int nb_obus;
+ int obus_allocated;
+ unsigned obus_allocated_size;
+} AV1Packet;
+
+/**
+ * Extract an OBU from a raw bitstream.
+ *
+ * @note This function does not copy or store any bitstream data. All
+ * the pointers in the AV1OBU structure will be valid as long
+ * as the input buffer also is.
+ */
+int ff_av1_extract_obu(AV1OBU *obu, const uint8_t *buf, int length,
+ void *logctx);
+
+/**
+ * Split an input packet into OBUs.
+ *
+ * @note This function does not copy or store any bitstream data. All
+ * the pointers in the AV1Packet structure will be valid as
+ * long as the input buffer also is.
+ */
+int ff_av1_packet_split(AV1Packet *pkt, const uint8_t *buf, int length,
+ void *logctx);
+
+/**
+ * Free all the allocated memory in the packet.
+ */
+void ff_av1_packet_uninit(AV1Packet *pkt);
+
+static inline int64_t leb128(GetBitContext *gb) {
+ int64_t ret = 0;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ int byte = get_bits(gb, 8);
+ ret |= (int64_t)(byte & 0x7f) << (i * 7);
+ if (!(byte & 0x80))
+ break;
+ }
+ return ret;
+}
+
+static inline int parse_obu_header(const uint8_t *buf, int buf_size,
+ int64_t *obu_size, int *start_pos, int *type,
+ int *temporal_id, int *spatial_id)
+{
+ GetBitContext gb;
+ int ret, extension_flag, has_size_flag;
+ int64_t size;
+
+ ret = init_get_bits8(&gb, buf, FFMIN(buf_size, MAX_OBU_HEADER_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (get_bits1(&gb) != 0) // obu_forbidden_bit
+ return AVERROR_INVALIDDATA;
+
+ *type = get_bits(&gb, 4);
+ extension_flag = get_bits1(&gb);
+ has_size_flag = get_bits1(&gb);
+ skip_bits1(&gb); // obu_reserved_1bit
+
+ if (extension_flag) {
+ *temporal_id = get_bits(&gb, 3);
+ *spatial_id = get_bits(&gb, 2);
+ skip_bits(&gb, 3); // extension_header_reserved_3bits
+ } else {
+ *temporal_id = *spatial_id = 0;
+ }
+
+ *obu_size = has_size_flag ? leb128(&gb)
+ : buf_size - 1 - extension_flag;
+
+ if (get_bits_left(&gb) < 0)
+ return AVERROR_INVALIDDATA;
+
+ *start_pos = get_bits_count(&gb) / 8;
+
+ size = *obu_size + *start_pos;
+
+ if (size > buf_size)
+ return AVERROR_INVALIDDATA;
+
+ return size;
+}
+
+static inline int get_obu_bit_length(const uint8_t *buf, int size, int type)
+{
+ int v;
+
+ /* There are no trailing bits on these */
+ if (type == AV1_OBU_TILE_GROUP ||
+ type == AV1_OBU_TILE_LIST ||
+ type == AV1_OBU_FRAME) {
+ if (size > INT_MAX / 8)
+ return AVERROR(ERANGE);
+ else
+ return size * 8;
+ }
+
+ while (size > 0 && buf[size - 1] == 0)
+ size--;
+
+ if (!size)
+ return 0;
+
+ v = buf[size - 1];
+
+ if (size > INT_MAX / 8)
+ return AVERROR(ERANGE);
+ size *= 8;
+
+ /* Remove the trailing_one_bit and following trailing zeros */
+ if (v)
+ size -= ff_ctz(v) + 1;
+
+ return size;
+}
+
+#endif /* AVCODEC_AV1_PARSE_H */
diff --git a/media/ffvpx/libavcodec/av1_parser.c b/media/ffvpx/libavcodec/av1_parser.c
new file mode 100644
index 0000000000..14dae92fe9
--- /dev/null
+++ b/media/ffvpx/libavcodec/av1_parser.c
@@ -0,0 +1,216 @@
+/*
+ * AV1 parser
+ *
+ * Copyright (C) 2018 James Almer <jamrial@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "cbs.h"
+#include "cbs_av1.h"
+#include "parser.h"
+
+typedef struct AV1ParseContext {
+ CodedBitstreamContext *cbc;
+ CodedBitstreamFragment temporal_unit;
+ int parsed_extradata;
+} AV1ParseContext;
+
+static const enum AVPixelFormat pix_fmts_8bit[2][2] = {
+ { AV_PIX_FMT_YUV444P, AV_PIX_FMT_NONE },
+ { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P },
+};
+static const enum AVPixelFormat pix_fmts_10bit[2][2] = {
+ { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_NONE },
+ { AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10 },
+};
+static const enum AVPixelFormat pix_fmts_12bit[2][2] = {
+ { AV_PIX_FMT_YUV444P12, AV_PIX_FMT_NONE },
+ { AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12 },
+};
+
+static const enum AVPixelFormat pix_fmts_rgb[3] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12,
+};
+
+static int av1_parser_parse(AVCodecParserContext *ctx,
+ AVCodecContext *avctx,
+ const uint8_t **out_data, int *out_size,
+ const uint8_t *data, int size)
+{
+ AV1ParseContext *s = ctx->priv_data;
+ CodedBitstreamFragment *td = &s->temporal_unit;
+ const CodedBitstreamAV1Context *av1 = s->cbc->priv_data;
+ const AV1RawSequenceHeader *seq;
+ const AV1RawColorConfig *color;
+ int ret;
+
+ *out_data = data;
+ *out_size = size;
+
+ ctx->key_frame = -1;
+ ctx->pict_type = AV_PICTURE_TYPE_NONE;
+ ctx->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN;
+
+ s->cbc->log_ctx = avctx;
+
+ if (avctx->extradata_size && !s->parsed_extradata) {
+ s->parsed_extradata = 1;
+
+ ret = ff_cbs_read_extradata_from_codec(s->cbc, td, avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_WARNING, "Failed to parse extradata.\n");
+ }
+
+ ff_cbs_fragment_reset(td);
+ }
+
+ ret = ff_cbs_read(s->cbc, td, data, size);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to parse temporal unit.\n");
+ goto end;
+ }
+
+ if (!av1->sequence_header) {
+ av_log(avctx, AV_LOG_ERROR, "No sequence header available\n");
+ goto end;
+ }
+
+ seq = av1->sequence_header;
+ color = &seq->color_config;
+
+ for (int i = 0; i < td->nb_units; i++) {
+ const CodedBitstreamUnit *unit = &td->units[i];
+ const AV1RawOBU *obu = unit->content;
+ const AV1RawFrameHeader *frame;
+
+ if (unit->type == AV1_OBU_FRAME)
+ frame = &obu->obu.frame.header;
+ else if (unit->type == AV1_OBU_FRAME_HEADER)
+ frame = &obu->obu.frame_header;
+ else
+ continue;
+
+ if (obu->header.spatial_id > 0)
+ continue;
+
+ if (!frame->show_frame && !frame->show_existing_frame)
+ continue;
+
+ ctx->width = frame->frame_width_minus_1 + 1;
+ ctx->height = frame->frame_height_minus_1 + 1;
+
+ ctx->key_frame = frame->frame_type == AV1_FRAME_KEY && !frame->show_existing_frame;
+
+ switch (frame->frame_type) {
+ case AV1_FRAME_KEY:
+ case AV1_FRAME_INTRA_ONLY:
+ ctx->pict_type = AV_PICTURE_TYPE_I;
+ break;
+ case AV1_FRAME_INTER:
+ ctx->pict_type = AV_PICTURE_TYPE_P;
+ break;
+ case AV1_FRAME_SWITCH:
+ ctx->pict_type = AV_PICTURE_TYPE_SP;
+ break;
+ }
+ ctx->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
+ }
+
+ switch (av1->bit_depth) {
+ case 8:
+ ctx->format = color->mono_chrome ? AV_PIX_FMT_GRAY8
+ : pix_fmts_8bit [color->subsampling_x][color->subsampling_y];
+ break;
+ case 10:
+ ctx->format = color->mono_chrome ? AV_PIX_FMT_GRAY10
+ : pix_fmts_10bit[color->subsampling_x][color->subsampling_y];
+ break;
+ case 12:
+ ctx->format = color->mono_chrome ? AV_PIX_FMT_GRAY12
+ : pix_fmts_12bit[color->subsampling_x][color->subsampling_y];
+ break;
+ }
+ av_assert2(ctx->format != AV_PIX_FMT_NONE);
+
+ if (!color->subsampling_x && !color->subsampling_y &&
+ color->matrix_coefficients == AVCOL_SPC_RGB &&
+ color->color_primaries == AVCOL_PRI_BT709 &&
+ color->transfer_characteristics == AVCOL_TRC_IEC61966_2_1)
+ ctx->format = pix_fmts_rgb[color->high_bitdepth + color->twelve_bit];
+
+ avctx->profile = seq->seq_profile;
+ avctx->level = seq->seq_level_idx[0];
+
+ avctx->colorspace = (enum AVColorSpace) color->matrix_coefficients;
+ avctx->color_primaries = (enum AVColorPrimaries) color->color_primaries;
+ avctx->color_trc = (enum AVColorTransferCharacteristic) color->transfer_characteristics;
+ avctx->color_range = color->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+
+ if (seq->timing_info_present_flag) {
+ const AV1RawTimingInfo *timing = &seq->timing_info;
+ av_reduce(&avctx->framerate.den, &avctx->framerate.num,
+ timing->num_units_in_display_tick, timing->time_scale, INT_MAX);
+ }
+
+end:
+ ff_cbs_fragment_reset(td);
+
+ s->cbc->log_ctx = NULL;
+
+ return size;
+}
+
+static const CodedBitstreamUnitType decompose_unit_types[] = {
+ AV1_OBU_TEMPORAL_DELIMITER,
+ AV1_OBU_SEQUENCE_HEADER,
+ AV1_OBU_FRAME_HEADER,
+ AV1_OBU_TILE_GROUP,
+ AV1_OBU_FRAME,
+};
+
+static av_cold int av1_parser_init(AVCodecParserContext *ctx)
+{
+ AV1ParseContext *s = ctx->priv_data;
+ int ret;
+
+ ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, NULL);
+ if (ret < 0)
+ return ret;
+
+ s->cbc->decompose_unit_types = decompose_unit_types;
+ s->cbc->nb_decompose_unit_types = FF_ARRAY_ELEMS(decompose_unit_types);
+
+ return 0;
+}
+
+static void av1_parser_close(AVCodecParserContext *ctx)
+{
+ AV1ParseContext *s = ctx->priv_data;
+
+ ff_cbs_fragment_free(&s->temporal_unit);
+ ff_cbs_close(&s->cbc);
+}
+
+const AVCodecParser ff_av1_parser = {
+ .codec_ids = { AV_CODEC_ID_AV1 },
+ .priv_data_size = sizeof(AV1ParseContext),
+ .parser_init = av1_parser_init,
+ .parser_close = av1_parser_close,
+ .parser_parse = av1_parser_parse,
+};
diff --git a/media/ffvpx/libavcodec/av1dec.c b/media/ffvpx/libavcodec/av1dec.c
new file mode 100644
index 0000000000..d83c902f1f
--- /dev/null
+++ b/media/ffvpx/libavcodec/av1dec.c
@@ -0,0 +1,1286 @@
+/*
+ * AV1 video decoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config_components.h"
+
+#include "libavutil/film_grain_params.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "av1dec.h"
+#include "bytestream.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "hwconfig.h"
+#include "profiles.h"
+#include "thread.h"
+
+/**< same with Div_Lut defined in spec 7.11.3.7 */
+static const uint16_t div_lut[AV1_DIV_LUT_NUM] = {
+ 16384, 16320, 16257, 16194, 16132, 16070, 16009, 15948, 15888, 15828, 15768,
+ 15709, 15650, 15592, 15534, 15477, 15420, 15364, 15308, 15252, 15197, 15142,
+ 15087, 15033, 14980, 14926, 14873, 14821, 14769, 14717, 14665, 14614, 14564,
+ 14513, 14463, 14413, 14364, 14315, 14266, 14218, 14170, 14122, 14075, 14028,
+ 13981, 13935, 13888, 13843, 13797, 13752, 13707, 13662, 13618, 13574, 13530,
+ 13487, 13443, 13400, 13358, 13315, 13273, 13231, 13190, 13148, 13107, 13066,
+ 13026, 12985, 12945, 12906, 12866, 12827, 12788, 12749, 12710, 12672, 12633,
+ 12596, 12558, 12520, 12483, 12446, 12409, 12373, 12336, 12300, 12264, 12228,
+ 12193, 12157, 12122, 12087, 12053, 12018, 11984, 11950, 11916, 11882, 11848,
+ 11815, 11782, 11749, 11716, 11683, 11651, 11619, 11586, 11555, 11523, 11491,
+ 11460, 11429, 11398, 11367, 11336, 11305, 11275, 11245, 11215, 11185, 11155,
+ 11125, 11096, 11067, 11038, 11009, 10980, 10951, 10923, 10894, 10866, 10838,
+ 10810, 10782, 10755, 10727, 10700, 10673, 10645, 10618, 10592, 10565, 10538,
+ 10512, 10486, 10460, 10434, 10408, 10382, 10356, 10331, 10305, 10280, 10255,
+ 10230, 10205, 10180, 10156, 10131, 10107, 10082, 10058, 10034, 10010, 9986,
+ 9963, 9939, 9916, 9892, 9869, 9846, 9823, 9800, 9777, 9754, 9732,
+ 9709, 9687, 9664, 9642, 9620, 9598, 9576, 9554, 9533, 9511, 9489,
+ 9468, 9447, 9425, 9404, 9383, 9362, 9341, 9321, 9300, 9279, 9259,
+ 9239, 9218, 9198, 9178, 9158, 9138, 9118, 9098, 9079, 9059, 9039,
+ 9020, 9001, 8981, 8962, 8943, 8924, 8905, 8886, 8867, 8849, 8830,
+ 8812, 8793, 8775, 8756, 8738, 8720, 8702, 8684, 8666, 8648, 8630,
+ 8613, 8595, 8577, 8560, 8542, 8525, 8508, 8490, 8473, 8456, 8439,
+ 8422, 8405, 8389, 8372, 8355, 8339, 8322, 8306, 8289, 8273, 8257,
+ 8240, 8224, 8208, 8192
+};
+
+static uint32_t inverse_recenter(int r, uint32_t v)
+{
+ if (v > 2 * r)
+ return v;
+ else if (v & 1)
+ return r - ((v + 1) >> 1);
+ else
+ return r + (v >> 1);
+}
+
+static uint32_t decode_unsigned_subexp_with_ref(uint32_t sub_exp,
+ int mx, int r)
+{
+ if ((r << 1) <= mx) {
+ return inverse_recenter(r, sub_exp);
+ } else {
+ return mx - 1 - inverse_recenter(mx - 1 - r, sub_exp);
+ }
+}
+
+static int32_t decode_signed_subexp_with_ref(uint32_t sub_exp, int low,
+ int high, int r)
+{
+ int32_t x = decode_unsigned_subexp_with_ref(sub_exp, high - low, r - low);
+ return x + low;
+}
+
+static void read_global_param(AV1DecContext *s, int type, int ref, int idx)
+{
+ uint8_t primary_frame, prev_frame;
+ uint32_t abs_bits, prec_bits, round, prec_diff, sub, mx;
+ int32_t r, prev_gm_param;
+
+ primary_frame = s->raw_frame_header->primary_ref_frame;
+ prev_frame = s->raw_frame_header->ref_frame_idx[primary_frame];
+ abs_bits = AV1_GM_ABS_ALPHA_BITS;
+ prec_bits = AV1_GM_ALPHA_PREC_BITS;
+
+ /* setup_past_independence() sets PrevGmParams to default values. We can
+ * simply point to the current's frame gm_params as they will be initialized
+ * with defaults at this point.
+ */
+ if (s->raw_frame_header->primary_ref_frame == AV1_PRIMARY_REF_NONE)
+ prev_gm_param = s->cur_frame.gm_params[ref][idx];
+ else
+ prev_gm_param = s->ref[prev_frame].gm_params[ref][idx];
+
+ if (idx < 2) {
+ if (type == AV1_WARP_MODEL_TRANSLATION) {
+ abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS -
+ !s->raw_frame_header->allow_high_precision_mv;
+ prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS -
+ !s->raw_frame_header->allow_high_precision_mv;
+ } else {
+ abs_bits = AV1_GM_ABS_TRANS_BITS;
+ prec_bits = AV1_GM_TRANS_PREC_BITS;
+ }
+ }
+ round = (idx % 3) == 2 ? (1 << AV1_WARPEDMODEL_PREC_BITS) : 0;
+ prec_diff = AV1_WARPEDMODEL_PREC_BITS - prec_bits;
+ sub = (idx % 3) == 2 ? (1 << prec_bits) : 0;
+ mx = 1 << abs_bits;
+ r = (prev_gm_param >> prec_diff) - sub;
+
+ s->cur_frame.gm_params[ref][idx] =
+ (decode_signed_subexp_with_ref(s->raw_frame_header->gm_params[ref][idx],
+ -mx, mx + 1, r) << prec_diff) + round;
+}
+
+static uint64_t round_two(uint64_t x, uint16_t n)
+{
+ if (n == 0)
+ return x;
+ return ((x + ((uint64_t)1 << (n - 1))) >> n);
+}
+
+static int64_t round_two_signed(int64_t x, uint16_t n)
+{
+ return ((x<0) ? -((int64_t)round_two(-x, n)) : (int64_t)round_two(x, n));
+}
+
+/**
+ * Resolve divisor process.
+ * see spec 7.11.3.7
+ */
+static int16_t resolve_divisor(uint32_t d, uint16_t *shift)
+{
+ int32_t e, f;
+
+ *shift = av_log2(d);
+ e = d - (1 << (*shift));
+ if (*shift > AV1_DIV_LUT_BITS)
+ f = round_two(e, *shift - AV1_DIV_LUT_BITS);
+ else
+ f = e << (AV1_DIV_LUT_BITS - (*shift));
+
+ *shift += AV1_DIV_LUT_PREC_BITS;
+
+ return div_lut[f];
+}
+
+/**
+ * check if global motion params is valid.
+ * see spec 7.11.3.6
+ */
+static uint8_t get_shear_params_valid(AV1DecContext *s, int idx)
+{
+ int16_t alpha, beta, gamma, delta, divf, divs;
+ int64_t v, w;
+ int32_t *param = &s->cur_frame.gm_params[idx][0];
+ if (param[2] < 0)
+ return 0;
+
+ alpha = av_clip_int16(param[2] - (1 << AV1_WARPEDMODEL_PREC_BITS));
+ beta = av_clip_int16(param[3]);
+ divf = resolve_divisor(abs(param[2]), &divs);
+ v = (int64_t)param[4] * (1 << AV1_WARPEDMODEL_PREC_BITS);
+ w = (int64_t)param[3] * param[4];
+ gamma = av_clip_int16((int)round_two_signed((v * divf), divs));
+ delta = av_clip_int16(param[5] - (int)round_two_signed((w * divf), divs) - (1 << AV1_WARPEDMODEL_PREC_BITS));
+
+ alpha = round_two_signed(alpha, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS;
+ beta = round_two_signed(beta, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS;
+ gamma = round_two_signed(gamma, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS;
+ delta = round_two_signed(delta, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS;
+
+ if ((4 * abs(alpha) + 7 * abs(beta)) >= (1 << AV1_WARPEDMODEL_PREC_BITS) ||
+ (4 * abs(gamma) + 4 * abs(delta)) >= (1 << AV1_WARPEDMODEL_PREC_BITS))
+ return 0;
+
+ return 1;
+}
+
+/**
+* update gm type/params, since cbs already implemented part of this function,
+* so we don't need to full implement spec.
+*/
+static void global_motion_params(AV1DecContext *s)
+{
+ const AV1RawFrameHeader *header = s->raw_frame_header;
+ int type, ref;
+
+ for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
+ s->cur_frame.gm_type[ref] = AV1_WARP_MODEL_IDENTITY;
+ for (int i = 0; i < 6; i++)
+ s->cur_frame.gm_params[ref][i] = (i % 3 == 2) ?
+ 1 << AV1_WARPEDMODEL_PREC_BITS : 0;
+ }
+ if (header->frame_type == AV1_FRAME_KEY ||
+ header->frame_type == AV1_FRAME_INTRA_ONLY)
+ return;
+
+ for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
+ if (header->is_global[ref]) {
+ if (header->is_rot_zoom[ref]) {
+ type = AV1_WARP_MODEL_ROTZOOM;
+ } else {
+ type = header->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION
+ : AV1_WARP_MODEL_AFFINE;
+ }
+ } else {
+ type = AV1_WARP_MODEL_IDENTITY;
+ }
+ s->cur_frame.gm_type[ref] = type;
+
+ if (type >= AV1_WARP_MODEL_ROTZOOM) {
+ read_global_param(s, type, ref, 2);
+ read_global_param(s, type, ref, 3);
+ if (type == AV1_WARP_MODEL_AFFINE) {
+ read_global_param(s, type, ref, 4);
+ read_global_param(s, type, ref, 5);
+ } else {
+ s->cur_frame.gm_params[ref][4] = -s->cur_frame.gm_params[ref][3];
+ s->cur_frame.gm_params[ref][5] = s->cur_frame.gm_params[ref][2];
+ }
+ }
+ if (type >= AV1_WARP_MODEL_TRANSLATION) {
+ read_global_param(s, type, ref, 0);
+ read_global_param(s, type, ref, 1);
+ }
+ if (type <= AV1_WARP_MODEL_AFFINE) {
+ s->cur_frame.gm_invalid[ref] = !get_shear_params_valid(s, ref);
+ }
+ }
+}
+
+static int get_relative_dist(const AV1RawSequenceHeader *seq,
+ unsigned int a, unsigned int b)
+{
+ unsigned int diff = a - b;
+ unsigned int m = 1 << seq->order_hint_bits_minus_1;
+ return (diff & (m - 1)) - (diff & m);
+}
+
+static void skip_mode_params(AV1DecContext *s)
+{
+ const AV1RawFrameHeader *header = s->raw_frame_header;
+ const AV1RawSequenceHeader *seq = s->raw_seq;
+
+ int forward_idx, backward_idx;
+ int forward_hint, backward_hint;
+ int second_forward_idx, second_forward_hint;
+ int ref_hint, dist, i;
+
+ if (!header->skip_mode_present)
+ return;
+
+ forward_idx = -1;
+ backward_idx = -1;
+ for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
+ ref_hint = s->ref[header->ref_frame_idx[i]].raw_frame_header->order_hint;
+ dist = get_relative_dist(seq, ref_hint, header->order_hint);
+ if (dist < 0) {
+ if (forward_idx < 0 ||
+ get_relative_dist(seq, ref_hint, forward_hint) > 0) {
+ forward_idx = i;
+ forward_hint = ref_hint;
+ }
+ } else if (dist > 0) {
+ if (backward_idx < 0 ||
+ get_relative_dist(seq, ref_hint, backward_hint) < 0) {
+ backward_idx = i;
+ backward_hint = ref_hint;
+ }
+ }
+ }
+
+ if (forward_idx < 0) {
+ return;
+ } else if (backward_idx >= 0) {
+ s->cur_frame.skip_mode_frame_idx[0] =
+ AV1_REF_FRAME_LAST + FFMIN(forward_idx, backward_idx);
+ s->cur_frame.skip_mode_frame_idx[1] =
+ AV1_REF_FRAME_LAST + FFMAX(forward_idx, backward_idx);
+ return;
+ }
+
+ second_forward_idx = -1;
+ for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
+ ref_hint = s->ref[header->ref_frame_idx[i]].raw_frame_header->order_hint;
+ if (get_relative_dist(seq, ref_hint, forward_hint) < 0) {
+ if (second_forward_idx < 0 ||
+ get_relative_dist(seq, ref_hint, second_forward_hint) > 0) {
+ second_forward_idx = i;
+ second_forward_hint = ref_hint;
+ }
+ }
+ }
+
+ if (second_forward_idx < 0)
+ return;
+
+ s->cur_frame.skip_mode_frame_idx[0] =
+ AV1_REF_FRAME_LAST + FFMIN(forward_idx, second_forward_idx);
+ s->cur_frame.skip_mode_frame_idx[1] =
+ AV1_REF_FRAME_LAST + FFMAX(forward_idx, second_forward_idx);
+}
+
+static void coded_lossless_param(AV1DecContext *s)
+{
+ const AV1RawFrameHeader *header = s->raw_frame_header;
+ int i;
+
+ if (header->delta_q_y_dc || header->delta_q_u_ac ||
+ header->delta_q_u_dc || header->delta_q_v_ac ||
+ header->delta_q_v_dc) {
+ s->cur_frame.coded_lossless = 0;
+ return;
+ }
+
+ s->cur_frame.coded_lossless = 1;
+ for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
+ int qindex;
+ if (header->feature_enabled[i][AV1_SEG_LVL_ALT_Q]) {
+ qindex = (header->base_q_idx +
+ header->feature_value[i][AV1_SEG_LVL_ALT_Q]);
+ } else {
+ qindex = header->base_q_idx;
+ }
+ qindex = av_clip_uintp2(qindex, 8);
+
+ if (qindex) {
+ s->cur_frame.coded_lossless = 0;
+ return;
+ }
+ }
+}
+
+static void load_grain_params(AV1DecContext *s)
+{
+ const AV1RawFrameHeader *header = s->raw_frame_header;
+ const AV1RawFilmGrainParams *film_grain = &header->film_grain, *src;
+ AV1RawFilmGrainParams *dst = &s->cur_frame.film_grain;
+
+ if (!film_grain->apply_grain)
+ return;
+
+ if (film_grain->update_grain) {
+ memcpy(dst, film_grain, sizeof(*dst));
+ return;
+ }
+
+ src = &s->ref[film_grain->film_grain_params_ref_idx].film_grain;
+
+ memcpy(dst, src, sizeof(*dst));
+ dst->grain_seed = film_grain->grain_seed;
+}
+
+static int init_tile_data(AV1DecContext *s)
+
+{
+ int cur_tile_num =
+ s->raw_frame_header->tile_cols * s->raw_frame_header->tile_rows;
+ if (s->tile_num < cur_tile_num) {
+ int ret = av_reallocp_array(&s->tile_group_info, cur_tile_num,
+ sizeof(TileGroupInfo));
+ if (ret < 0) {
+ s->tile_num = 0;
+ return ret;
+ }
+ }
+ s->tile_num = cur_tile_num;
+
+ return 0;
+}
+
+static int get_tiles_info(AVCodecContext *avctx, const AV1RawTileGroup *tile_group)
+{
+ AV1DecContext *s = avctx->priv_data;
+ GetByteContext gb;
+ uint16_t tile_num, tile_row, tile_col;
+ uint32_t size = 0, size_bytes = 0;
+
+ bytestream2_init(&gb, tile_group->tile_data.data,
+ tile_group->tile_data.data_size);
+ s->tg_start = tile_group->tg_start;
+ s->tg_end = tile_group->tg_end;
+
+ for (tile_num = tile_group->tg_start; tile_num <= tile_group->tg_end; tile_num++) {
+ tile_row = tile_num / s->raw_frame_header->tile_cols;
+ tile_col = tile_num % s->raw_frame_header->tile_cols;
+
+ if (tile_num == tile_group->tg_end) {
+ s->tile_group_info[tile_num].tile_size = bytestream2_get_bytes_left(&gb);
+ s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
+ s->tile_group_info[tile_num].tile_row = tile_row;
+ s->tile_group_info[tile_num].tile_column = tile_col;
+ return 0;
+ }
+ size_bytes = s->raw_frame_header->tile_size_bytes_minus1 + 1;
+ if (bytestream2_get_bytes_left(&gb) < size_bytes)
+ return AVERROR_INVALIDDATA;
+ size = 0;
+ for (int i = 0; i < size_bytes; i++)
+ size |= bytestream2_get_byteu(&gb) << 8 * i;
+ if (bytestream2_get_bytes_left(&gb) <= size)
+ return AVERROR_INVALIDDATA;
+ size++;
+
+ s->tile_group_info[tile_num].tile_size = size;
+ s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
+ s->tile_group_info[tile_num].tile_row = tile_row;
+ s->tile_group_info[tile_num].tile_column = tile_col;
+
+ bytestream2_skipu(&gb, size);
+ }
+
+ return 0;
+
+}
+
+static int get_pixel_format(AVCodecContext *avctx)
+{
+ AV1DecContext *s = avctx->priv_data;
+ const AV1RawSequenceHeader *seq = s->raw_seq;
+ uint8_t bit_depth;
+ int ret;
+ enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
+#define HWACCEL_MAX (CONFIG_AV1_DXVA2_HWACCEL + \
+ CONFIG_AV1_D3D11VA_HWACCEL * 2 + \
+ CONFIG_AV1_NVDEC_HWACCEL + \
+ CONFIG_AV1_VAAPI_HWACCEL + \
+ CONFIG_AV1_VDPAU_HWACCEL)
+ enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
+
+ if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
+ bit_depth = seq->color_config.twelve_bit ? 12 : 10;
+ else if (seq->seq_profile <= 2)
+ bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
+ else {
+ av_log(avctx, AV_LOG_ERROR,
+ "Unknown AV1 profile %d.\n", seq->seq_profile);
+ return -1;
+ }
+
+ if (!seq->color_config.mono_chrome) {
+ // 4:4:4 x:0 y:0, 4:2:2 x:1 y:0, 4:2:0 x:1 y:1
+ if (seq->color_config.subsampling_x == 0 &&
+ seq->color_config.subsampling_y == 0) {
+ if (bit_depth == 8)
+ pix_fmt = AV_PIX_FMT_YUV444P;
+ else if (bit_depth == 10)
+ pix_fmt = AV_PIX_FMT_YUV444P10;
+ else if (bit_depth == 12)
+ pix_fmt = AV_PIX_FMT_YUV444P12;
+ else
+ av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
+ } else if (seq->color_config.subsampling_x == 1 &&
+ seq->color_config.subsampling_y == 0) {
+ if (bit_depth == 8)
+ pix_fmt = AV_PIX_FMT_YUV422P;
+ else if (bit_depth == 10)
+ pix_fmt = AV_PIX_FMT_YUV422P10;
+ else if (bit_depth == 12)
+ pix_fmt = AV_PIX_FMT_YUV422P12;
+ else
+ av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
+ } else if (seq->color_config.subsampling_x == 1 &&
+ seq->color_config.subsampling_y == 1) {
+ if (bit_depth == 8)
+ pix_fmt = AV_PIX_FMT_YUV420P;
+ else if (bit_depth == 10)
+ pix_fmt = AV_PIX_FMT_YUV420P10;
+ else if (bit_depth == 12)
+ pix_fmt = AV_PIX_FMT_YUV420P12;
+ else
+ av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
+ }
+ } else {
+ if (bit_depth == 8)
+ pix_fmt = AV_PIX_FMT_GRAY8;
+ else if (bit_depth == 10)
+ pix_fmt = AV_PIX_FMT_GRAY10;
+ else if (bit_depth == 12)
+ pix_fmt = AV_PIX_FMT_GRAY12;
+ else
+ av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "AV1 decode get format: %s.\n",
+ av_get_pix_fmt_name(pix_fmt));
+
+ if (pix_fmt == AV_PIX_FMT_NONE)
+ return -1;
+
+ switch (pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+#if CONFIG_AV1_DXVA2_HWACCEL
+ *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
+#endif
+#if CONFIG_AV1_D3D11VA_HWACCEL
+ *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
+ *fmtp++ = AV_PIX_FMT_D3D11;
+#endif
+#if CONFIG_AV1_NVDEC_HWACCEL
+ *fmtp++ = AV_PIX_FMT_CUDA;
+#endif
+#if CONFIG_AV1_VAAPI_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
+#if CONFIG_AV1_VDPAU_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VDPAU;
+#endif
+ break;
+ case AV_PIX_FMT_YUV420P10:
+#if CONFIG_AV1_DXVA2_HWACCEL
+ *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
+#endif
+#if CONFIG_AV1_D3D11VA_HWACCEL
+ *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
+ *fmtp++ = AV_PIX_FMT_D3D11;
+#endif
+#if CONFIG_AV1_NVDEC_HWACCEL
+ *fmtp++ = AV_PIX_FMT_CUDA;
+#endif
+#if CONFIG_AV1_VAAPI_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
+#if CONFIG_AV1_VDPAU_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VDPAU;
+#endif
+ break;
+ case AV_PIX_FMT_GRAY8:
+#if CONFIG_AV1_NVDEC_HWACCEL
+ *fmtp++ = AV_PIX_FMT_CUDA;
+#endif
+ break;
+ case AV_PIX_FMT_GRAY10:
+#if CONFIG_AV1_NVDEC_HWACCEL
+ *fmtp++ = AV_PIX_FMT_CUDA;
+#endif
+ break;
+ }
+
+ *fmtp++ = pix_fmt;
+ *fmtp = AV_PIX_FMT_NONE;
+
+ ret = ff_thread_get_format(avctx, pix_fmts);
+ if (ret < 0)
+ return ret;
+
+ /**
+ * check if the HW accel is inited correctly. If not, return un-implemented.
+ * Since now the av1 decoder doesn't support native decode, if it will be
+ * implemented in the future, need remove this check.
+ */
+ if (!avctx->hwaccel) {
+ av_log(avctx, AV_LOG_ERROR, "Your platform doesn't support"
+ " hardware accelerated AV1 decoding.\n");
+ return AVERROR(ENOSYS);
+ }
+
+ s->pix_fmt = pix_fmt;
+ avctx->pix_fmt = ret;
+
+ return 0;
+}
+
+static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
+{
+ ff_thread_release_buffer(avctx, f->f);
+ av_buffer_unref(&f->hwaccel_priv_buf);
+ f->hwaccel_picture_private = NULL;
+ av_buffer_unref(&f->header_ref);
+ f->raw_frame_header = NULL;
+ f->spatial_id = f->temporal_id = 0;
+ memset(f->skip_mode_frame_idx, 0,
+ 2 * sizeof(uint8_t));
+ memset(&f->film_grain, 0, sizeof(f->film_grain));
+ f->coded_lossless = 0;
+}
+
+static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *src)
+{
+ int ret;
+
+ ret = av_buffer_replace(&dst->header_ref, src->header_ref);
+ if (ret < 0)
+ return ret;
+
+ dst->raw_frame_header = src->raw_frame_header;
+
+ if (!src->f->buf[0])
+ return 0;
+
+ ret = av_frame_ref(dst->f, src->f);
+ if (ret < 0)
+ goto fail;
+
+ if (src->hwaccel_picture_private) {
+ dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
+ if (!dst->hwaccel_priv_buf)
+ goto fail;
+ dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
+ }
+
+ dst->spatial_id = src->spatial_id;
+ dst->temporal_id = src->temporal_id;
+ memcpy(dst->gm_invalid,
+ src->gm_invalid,
+ AV1_NUM_REF_FRAMES * sizeof(uint8_t));
+ memcpy(dst->gm_type,
+ src->gm_type,
+ AV1_NUM_REF_FRAMES * sizeof(uint8_t));
+ memcpy(dst->gm_params,
+ src->gm_params,
+ AV1_NUM_REF_FRAMES * 6 * sizeof(int32_t));
+ memcpy(dst->skip_mode_frame_idx,
+ src->skip_mode_frame_idx,
+ 2 * sizeof(uint8_t));
+ memcpy(&dst->film_grain,
+ &src->film_grain,
+ sizeof(dst->film_grain));
+ dst->coded_lossless = src->coded_lossless;
+
+ return 0;
+
+fail:
+ av1_frame_unref(avctx, dst);
+ return AVERROR(ENOMEM);
+}
+
+static av_cold int av1_decode_free(AVCodecContext *avctx)
+{
+ AV1DecContext *s = avctx->priv_data;
+
+ for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
+ av1_frame_unref(avctx, &s->ref[i]);
+ av_frame_free(&s->ref[i].f);
+ }
+ av1_frame_unref(avctx, &s->cur_frame);
+ av_frame_free(&s->cur_frame.f);
+
+ av_buffer_unref(&s->seq_ref);
+ av_buffer_unref(&s->header_ref);
+ av_freep(&s->tile_group_info);
+
+ ff_cbs_fragment_free(&s->current_obu);
+ ff_cbs_close(&s->cbc);
+
+ return 0;
+}
+
+static int set_context_with_sequence(AVCodecContext *avctx,
+ const AV1RawSequenceHeader *seq)
+{
+ int width = seq->max_frame_width_minus_1 + 1;
+ int height = seq->max_frame_height_minus_1 + 1;
+
+ avctx->profile = seq->seq_profile;
+ avctx->level = seq->seq_level_idx[0];
+
+ avctx->color_range =
+ seq->color_config.color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+ avctx->color_primaries = seq->color_config.color_primaries;
+ avctx->colorspace = seq->color_config.color_primaries;
+ avctx->color_trc = seq->color_config.transfer_characteristics;
+
+ switch (seq->color_config.chroma_sample_position) {
+ case AV1_CSP_VERTICAL:
+ avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
+ break;
+ case AV1_CSP_COLOCATED:
+ avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
+ break;
+ }
+
+ if (seq->film_grain_params_present)
+ avctx->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
+ else
+ avctx->properties &= ~FF_CODEC_PROPERTY_FILM_GRAIN;
+
+ if (avctx->width != width || avctx->height != height) {
+ int ret = ff_set_dimensions(avctx, width, height);
+ if (ret < 0)
+ return ret;
+ }
+ avctx->sample_aspect_ratio = (AVRational) { 1, 1 };
+
+ if (seq->timing_info.num_units_in_display_tick &&
+ seq->timing_info.time_scale) {
+ av_reduce(&avctx->framerate.den, &avctx->framerate.num,
+ seq->timing_info.num_units_in_display_tick,
+ seq->timing_info.time_scale,
+ INT_MAX);
+ if (seq->timing_info.equal_picture_interval)
+ avctx->ticks_per_frame = seq->timing_info.num_ticks_per_picture_minus_1 + 1;
+ }
+
+ return 0;
+}
+
+static int update_context_with_frame_header(AVCodecContext *avctx,
+ const AV1RawFrameHeader *header)
+{
+ AVRational aspect_ratio;
+ int width = header->frame_width_minus_1 + 1;
+ int height = header->frame_height_minus_1 + 1;
+ int r_width = header->render_width_minus_1 + 1;
+ int r_height = header->render_height_minus_1 + 1;
+ int ret;
+
+ if (avctx->width != width || avctx->height != height) {
+ ret = ff_set_dimensions(avctx, width, height);
+ if (ret < 0)
+ return ret;
+ }
+
+ av_reduce(&aspect_ratio.num, &aspect_ratio.den,
+ (int64_t)height * r_width,
+ (int64_t)width * r_height,
+ INT_MAX);
+
+ if (av_cmp_q(avctx->sample_aspect_ratio, aspect_ratio)) {
+ ret = ff_set_sar(avctx, aspect_ratio);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static av_cold int av1_decode_init(AVCodecContext *avctx)
+{
+ AV1DecContext *s = avctx->priv_data;
+ AV1RawSequenceHeader *seq;
+ int ret;
+
+ s->avctx = avctx;
+ s->pix_fmt = AV_PIX_FMT_NONE;
+
+ for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
+ s->ref[i].f = av_frame_alloc();
+ if (!s->ref[i].f) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate reference frame buffer %d.\n", i);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ s->cur_frame.f = av_frame_alloc();
+ if (!s->cur_frame.f) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate current frame buffer.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, avctx);
+ if (ret < 0)
+ return ret;
+
+ av_opt_set_int(s->cbc->priv_data, "operating_point", s->operating_point, 0);
+
+ if (avctx->extradata && avctx->extradata_size) {
+ ret = ff_cbs_read_extradata_from_codec(s->cbc,
+ &s->current_obu,
+ avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_WARNING, "Failed to read extradata.\n");
+ return ret;
+ }
+
+ seq = ((CodedBitstreamAV1Context *)(s->cbc->priv_data))->sequence_header;
+ if (!seq) {
+ av_log(avctx, AV_LOG_WARNING, "No sequence header available.\n");
+ goto end;
+ }
+
+ ret = set_context_with_sequence(avctx, seq);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_WARNING, "Failed to set decoder context.\n");
+ goto end;
+ }
+
+ end:
+ ff_cbs_fragment_reset(&s->current_obu);
+ }
+
+ return ret;
+}
+
+static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
+{
+ AV1DecContext *s = avctx->priv_data;
+ AV1RawFrameHeader *header= s->raw_frame_header;
+ AVFrame *frame;
+ int ret;
+
+ ret = update_context_with_frame_header(avctx, header);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to update context with frame header\n");
+ return ret;
+ }
+
+ if ((ret = ff_thread_get_buffer(avctx, f->f, AV_GET_BUFFER_FLAG_REF)) < 0)
+ goto fail;
+
+ frame = f->f;
+ frame->key_frame = header->frame_type == AV1_FRAME_KEY;
+
+ switch (header->frame_type) {
+ case AV1_FRAME_KEY:
+ case AV1_FRAME_INTRA_ONLY:
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ break;
+ case AV1_FRAME_INTER:
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ break;
+ case AV1_FRAME_SWITCH:
+ frame->pict_type = AV_PICTURE_TYPE_SP;
+ break;
+ }
+
+ if (avctx->hwaccel) {
+ const AVHWAccel *hwaccel = avctx->hwaccel;
+ if (hwaccel->frame_priv_data_size) {
+ f->hwaccel_priv_buf =
+ av_buffer_allocz(hwaccel->frame_priv_data_size);
+ if (!f->hwaccel_priv_buf) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
+ }
+ }
+ return 0;
+
+fail:
+ av1_frame_unref(avctx, f);
+ return ret;
+}
+
+static int export_film_grain(AVCodecContext *avctx, AVFrame *frame)
+{
+ AV1DecContext *s = avctx->priv_data;
+ const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
+ AVFilmGrainParams *fgp;
+ AVFilmGrainAOMParams *aom;
+
+ if (!film_grain->apply_grain)
+ return 0;
+
+ fgp = av_film_grain_params_create_side_data(frame);
+ if (!fgp)
+ return AVERROR(ENOMEM);
+
+ fgp->type = AV_FILM_GRAIN_PARAMS_AV1;
+ fgp->seed = film_grain->grain_seed;
+
+ aom = &fgp->codec.aom;
+ aom->chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma;
+ aom->scaling_shift = film_grain->grain_scaling_minus_8 + 8;
+ aom->ar_coeff_lag = film_grain->ar_coeff_lag;
+ aom->ar_coeff_shift = film_grain->ar_coeff_shift_minus_6 + 6;
+ aom->grain_scale_shift = film_grain->grain_scale_shift;
+ aom->overlap_flag = film_grain->overlap_flag;
+ aom->limit_output_range = film_grain->clip_to_restricted_range;
+
+ aom->num_y_points = film_grain->num_y_points;
+ for (int i = 0; i < film_grain->num_y_points; i++) {
+ aom->y_points[i][0] = film_grain->point_y_value[i];
+ aom->y_points[i][1] = film_grain->point_y_scaling[i];
+ }
+ aom->num_uv_points[0] = film_grain->num_cb_points;
+ for (int i = 0; i < film_grain->num_cb_points; i++) {
+ aom->uv_points[0][i][0] = film_grain->point_cb_value[i];
+ aom->uv_points[0][i][1] = film_grain->point_cb_scaling[i];
+ }
+ aom->num_uv_points[1] = film_grain->num_cr_points;
+ for (int i = 0; i < film_grain->num_cr_points; i++) {
+ aom->uv_points[1][i][0] = film_grain->point_cr_value[i];
+ aom->uv_points[1][i][1] = film_grain->point_cr_scaling[i];
+ }
+
+ for (int i = 0; i < 24; i++) {
+ aom->ar_coeffs_y[i] = film_grain->ar_coeffs_y_plus_128[i] - 128;
+ }
+ for (int i = 0; i < 25; i++) {
+ aom->ar_coeffs_uv[0][i] = film_grain->ar_coeffs_cb_plus_128[i] - 128;
+ aom->ar_coeffs_uv[1][i] = film_grain->ar_coeffs_cr_plus_128[i] - 128;
+ }
+
+ aom->uv_mult[0] = film_grain->cb_mult;
+ aom->uv_mult[1] = film_grain->cr_mult;
+ aom->uv_mult_luma[0] = film_grain->cb_luma_mult;
+ aom->uv_mult_luma[1] = film_grain->cr_luma_mult;
+ aom->uv_offset[0] = film_grain->cb_offset;
+ aom->uv_offset[1] = film_grain->cr_offset;
+
+ return 0;
+}
+
+static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
+ const AVPacket *pkt, int *got_frame)
+{
+ AV1DecContext *s = avctx->priv_data;
+ const AVFrame *srcframe = s->cur_frame.f;
+ int ret;
+
+ // TODO: all layers
+ if (s->operating_point_idc &&
+ av_log2(s->operating_point_idc >> 8) > s->cur_frame.spatial_id)
+ return 0;
+
+ ret = av_frame_ref(frame, srcframe);
+ if (ret < 0)
+ return ret;
+
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) {
+ ret = export_film_grain(avctx, frame);
+ if (ret < 0) {
+ av_frame_unref(frame);
+ return ret;
+ }
+ }
+
+ frame->pts = pkt->pts;
+ frame->pkt_dts = pkt->dts;
+ frame->pkt_size = pkt->size;
+
+ *got_frame = 1;
+
+ return 0;
+}
+
+static int update_reference_list(AVCodecContext *avctx)
+{
+ AV1DecContext *s = avctx->priv_data;
+ const AV1RawFrameHeader *header = s->raw_frame_header;
+ int ret;
+
+ for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ if (header->refresh_frame_flags & (1 << i)) {
+ av1_frame_unref(avctx, &s->ref[i]);
+ if ((ret = av1_frame_ref(avctx, &s->ref[i], &s->cur_frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to update frame %d in reference list\n", i);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int get_current_frame(AVCodecContext *avctx)
+{
+ AV1DecContext *s = avctx->priv_data;
+ int ret;
+
+ av1_frame_unref(avctx, &s->cur_frame);
+
+ s->cur_frame.header_ref = av_buffer_ref(s->header_ref);
+ if (!s->cur_frame.header_ref)
+ return AVERROR(ENOMEM);
+
+ s->cur_frame.raw_frame_header = s->raw_frame_header;
+
+ ret = init_tile_data(s);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to init tile data.\n");
+ return ret;
+ }
+
+ if ((avctx->skip_frame >= AVDISCARD_NONINTRA &&
+ (s->raw_frame_header->frame_type != AV1_FRAME_KEY &&
+ s->raw_frame_header->frame_type != AV1_FRAME_INTRA_ONLY)) ||
+ (avctx->skip_frame >= AVDISCARD_NONKEY &&
+ s->raw_frame_header->frame_type != AV1_FRAME_KEY) ||
+ avctx->skip_frame >= AVDISCARD_ALL)
+ return 0;
+
+ ret = av1_frame_alloc(avctx, &s->cur_frame);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate space for current frame.\n");
+ return ret;
+ }
+
+ global_motion_params(s);
+ skip_mode_params(s);
+ coded_lossless_param(s);
+ load_grain_params(s);
+
+ return ret;
+}
+
+static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *pkt)
+{
+ AV1DecContext *s = avctx->priv_data;
+ AV1RawTileGroup *raw_tile_group = NULL;
+ int ret;
+
+ ret = ff_cbs_read_packet(s->cbc, &s->current_obu, pkt);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
+ goto end;
+ }
+ av_log(avctx, AV_LOG_DEBUG, "Total obu for this frame:%d.\n",
+ s->current_obu.nb_units);
+
+ for (int i = 0; i < s->current_obu.nb_units; i++) {
+ CodedBitstreamUnit *unit = &s->current_obu.units[i];
+ AV1RawOBU *obu = unit->content;
+ const AV1RawOBUHeader *header;
+
+ if (!obu)
+ continue;
+
+ header = &obu->header;
+ av_log(avctx, AV_LOG_DEBUG, "Obu idx:%d, obu type:%d.\n", i, unit->type);
+
+ switch (unit->type) {
+ case AV1_OBU_SEQUENCE_HEADER:
+ av_buffer_unref(&s->seq_ref);
+ s->seq_ref = av_buffer_ref(unit->content_ref);
+ if (!s->seq_ref) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ s->raw_seq = &obu->obu.sequence_header;
+
+ ret = set_context_with_sequence(avctx, s->raw_seq);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to set context.\n");
+ s->raw_seq = NULL;
+ goto end;
+ }
+
+ s->operating_point_idc = s->raw_seq->operating_point_idc[s->operating_point];
+
+ if (s->pix_fmt == AV_PIX_FMT_NONE) {
+ ret = get_pixel_format(avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to get pixel format.\n");
+ s->raw_seq = NULL;
+ goto end;
+ }
+ }
+
+ if (avctx->hwaccel && avctx->hwaccel->decode_params) {
+ ret = avctx->hwaccel->decode_params(avctx, unit->type, unit->data,
+ unit->data_size);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "HW accel decode params fail.\n");
+ s->raw_seq = NULL;
+ goto end;
+ }
+ }
+ break;
+ case AV1_OBU_REDUNDANT_FRAME_HEADER:
+ if (s->raw_frame_header)
+ break;
+ // fall-through
+ case AV1_OBU_FRAME:
+ case AV1_OBU_FRAME_HEADER:
+ if (!s->raw_seq) {
+ av_log(avctx, AV_LOG_ERROR, "Missing Sequence Header.\n");
+ ret = AVERROR_INVALIDDATA;
+ goto end;
+ }
+
+ av_buffer_unref(&s->header_ref);
+ s->header_ref = av_buffer_ref(unit->content_ref);
+ if (!s->header_ref) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if (unit->type == AV1_OBU_FRAME)
+ s->raw_frame_header = &obu->obu.frame.header;
+ else
+ s->raw_frame_header = &obu->obu.frame_header;
+
+ if (s->raw_frame_header->show_existing_frame) {
+ av1_frame_unref(avctx, &s->cur_frame);
+
+ ret = av1_frame_ref(avctx, &s->cur_frame,
+ &s->ref[s->raw_frame_header->frame_to_show_map_idx]);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to get reference frame.\n");
+ goto end;
+ }
+
+ ret = update_reference_list(avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
+ goto end;
+ }
+
+ if (s->cur_frame.f->buf[0]) {
+ ret = set_output_frame(avctx, frame, pkt, got_frame);
+ if (ret < 0)
+ av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
+ }
+
+ s->raw_frame_header = NULL;
+
+ goto end;
+ }
+
+ ret = get_current_frame(avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Get current frame error\n");
+ goto end;
+ }
+
+ s->cur_frame.spatial_id = header->spatial_id;
+ s->cur_frame.temporal_id = header->temporal_id;
+
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
+ ret = avctx->hwaccel->start_frame(avctx, unit->data,
+ unit->data_size);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "HW accel start frame fail.\n");
+ goto end;
+ }
+ }
+ if (unit->type != AV1_OBU_FRAME)
+ break;
+ // fall-through
+ case AV1_OBU_TILE_GROUP:
+ if (!s->raw_frame_header) {
+ av_log(avctx, AV_LOG_ERROR, "Missing Frame Header.\n");
+ ret = AVERROR_INVALIDDATA;
+ goto end;
+ }
+
+ if (unit->type == AV1_OBU_FRAME)
+ raw_tile_group = &obu->obu.frame.tile_group;
+ else
+ raw_tile_group = &obu->obu.tile_group;
+
+ ret = get_tiles_info(avctx, raw_tile_group);
+ if (ret < 0)
+ goto end;
+
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
+ ret = avctx->hwaccel->decode_slice(avctx,
+ raw_tile_group->tile_data.data,
+ raw_tile_group->tile_data.data_size);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "HW accel decode slice fail.\n");
+ goto end;
+ }
+ }
+ break;
+ case AV1_OBU_TILE_LIST:
+ case AV1_OBU_TEMPORAL_DELIMITER:
+ case AV1_OBU_PADDING:
+ case AV1_OBU_METADATA:
+ break;
+ default:
+ av_log(avctx, AV_LOG_DEBUG,
+ "Unknown obu type: %d (%"SIZE_SPECIFIER" bits).\n",
+ unit->type, unit->data_size);
+ }
+
+ if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
+ ret = avctx->hwaccel->end_frame(avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
+ goto end;
+ }
+ }
+
+ ret = update_reference_list(avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
+ goto end;
+ }
+
+ if (s->raw_frame_header->show_frame && s->cur_frame.f->buf[0]) {
+ ret = set_output_frame(avctx, frame, pkt, got_frame);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
+ goto end;
+ }
+ }
+ raw_tile_group = NULL;
+ s->raw_frame_header = NULL;
+ }
+ }
+
+end:
+ ff_cbs_fragment_reset(&s->current_obu);
+ if (ret < 0)
+ s->raw_frame_header = NULL;
+ return ret;
+}
+
+static void av1_decode_flush(AVCodecContext *avctx)
+{
+ AV1DecContext *s = avctx->priv_data;
+
+ for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++)
+ av1_frame_unref(avctx, &s->ref[i]);
+
+ av1_frame_unref(avctx, &s->cur_frame);
+ s->operating_point_idc = 0;
+ s->raw_frame_header = NULL;
+ s->raw_seq = NULL;
+
+ ff_cbs_flush(s->cbc);
+}
+
+#define OFFSET(x) offsetof(AV1DecContext, x)
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+static const AVOption av1_options[] = {
+ { "operating_point", "Select an operating point of the scalable bitstream",
+ OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, AV1_MAX_OPERATING_POINTS - 1, VD },
+ { NULL }
+};
+
+static const AVClass av1_class = {
+ .class_name = "AV1 decoder",
+ .item_name = av_default_item_name,
+ .option = av1_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+const FFCodec ff_av1_decoder = {
+ .p.name = "av1",
+ CODEC_LONG_NAME("Alliance for Open Media AV1"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_AV1,
+ .priv_data_size = sizeof(AV1DecContext),
+ .init = av1_decode_init,
+ .close = av1_decode_free,
+ FF_CODEC_DECODE_CB(av1_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING,
+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
+ FF_CODEC_CAP_SETS_PKT_DTS,
+ .flush = av1_decode_flush,
+ .p.profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
+ .p.priv_class = &av1_class,
+ .bsfs = "av1_frame_split",
+ .hw_configs = (const AVCodecHWConfigInternal *const []) {
+#if CONFIG_AV1_DXVA2_HWACCEL
+ HWACCEL_DXVA2(av1),
+#endif
+#if CONFIG_AV1_D3D11VA_HWACCEL
+ HWACCEL_D3D11VA(av1),
+#endif
+#if CONFIG_AV1_D3D11VA2_HWACCEL
+ HWACCEL_D3D11VA2(av1),
+#endif
+#if CONFIG_AV1_NVDEC_HWACCEL
+ HWACCEL_NVDEC(av1),
+#endif
+#if CONFIG_AV1_VAAPI_HWACCEL
+ HWACCEL_VAAPI(av1),
+#endif
+#if CONFIG_AV1_VDPAU_HWACCEL
+ HWACCEL_VDPAU(av1),
+#endif
+
+ NULL
+ },
+};
diff --git a/media/ffvpx/libavcodec/av1dec.h b/media/ffvpx/libavcodec/av1dec.h
new file mode 100644
index 0000000000..82c7084e99
--- /dev/null
+++ b/media/ffvpx/libavcodec/av1dec.h
@@ -0,0 +1,89 @@
+/*
+ * AV1 video decoder
+ * *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AV1DEC_H
+#define AVCODEC_AV1DEC_H
+
+#include <stdint.h>
+
+#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
+#include "libavutil/pixfmt.h"
+#include "avcodec.h"
+#include "cbs.h"
+#include "cbs_av1.h"
+
+typedef struct AV1Frame {
+ AVFrame *f;
+
+ AVBufferRef *hwaccel_priv_buf;
+ void *hwaccel_picture_private;
+
+ AVBufferRef *header_ref;
+ AV1RawFrameHeader *raw_frame_header;
+
+ int temporal_id;
+ int spatial_id;
+
+ uint8_t gm_invalid[AV1_NUM_REF_FRAMES];
+ uint8_t gm_type[AV1_NUM_REF_FRAMES];
+ int32_t gm_params[AV1_NUM_REF_FRAMES][6];
+
+ uint8_t skip_mode_frame_idx[2];
+
+ AV1RawFilmGrainParams film_grain;
+
+ uint8_t coded_lossless;
+} AV1Frame;
+
+typedef struct TileGroupInfo {
+ uint32_t tile_offset;
+ uint32_t tile_size;
+ uint16_t tile_row;
+ uint16_t tile_column;
+} TileGroupInfo;
+
+typedef struct AV1DecContext {
+ const AVClass *class;
+ AVCodecContext *avctx;
+
+ enum AVPixelFormat pix_fmt;
+ CodedBitstreamContext *cbc;
+ CodedBitstreamFragment current_obu;
+
+ AVBufferRef *seq_ref;
+ AV1RawSequenceHeader *raw_seq;
+ AVBufferRef *header_ref;
+ AV1RawFrameHeader *raw_frame_header;
+ TileGroupInfo *tile_group_info;
+ uint16_t tile_num;
+ uint16_t tg_start;
+ uint16_t tg_end;
+
+ int operating_point_idc;
+
+ AV1Frame ref[AV1_NUM_REF_FRAMES];
+ AV1Frame cur_frame;
+
+ // AVOptions
+ int operating_point;
+} AV1DecContext;
+
+#endif /* AVCODEC_AV1DEC_H */
diff --git a/media/ffvpx/libavcodec/avcodec.c b/media/ffvpx/libavcodec/avcodec.c
new file mode 100644
index 0000000000..fb1362290f
--- /dev/null
+++ b/media/ffvpx/libavcodec/avcodec.c
@@ -0,0 +1,716 @@
+/*
+ * AVCodecContext functions for libavcodec
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AVCodecContext functions for libavcodec
+ */
+
+#include "config.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/fifo.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/thread.h"
+#include "avcodec.h"
+#include "bsf.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "encode.h"
+#include "frame_thread_encoder.h"
+#include "internal.h"
+#include "thread.h"
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ int r = func(c, (char *)arg + i * size);
+ if (ret)
+ ret[i] = r;
+ }
+ emms_c();
+ return 0;
+}
+
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ int r = func(c, arg, i, 0);
+ if (ret)
+ ret[i] = r;
+ }
+ emms_c();
+ return 0;
+}
+
+static AVMutex codec_mutex = AV_MUTEX_INITIALIZER;
+
+static void lock_avcodec(const FFCodec *codec)
+{
+ if (codec->caps_internal & FF_CODEC_CAP_NOT_INIT_THREADSAFE && codec->init)
+ ff_mutex_lock(&codec_mutex);
+}
+
+static void unlock_avcodec(const FFCodec *codec)
+{
+ if (codec->caps_internal & FF_CODEC_CAP_NOT_INIT_THREADSAFE && codec->init)
+ ff_mutex_unlock(&codec_mutex);
+}
+
+static int64_t get_bit_rate(AVCodecContext *ctx)
+{
+ int64_t bit_rate;
+ int bits_per_sample;
+
+ switch (ctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ case AVMEDIA_TYPE_DATA:
+ case AVMEDIA_TYPE_SUBTITLE:
+ case AVMEDIA_TYPE_ATTACHMENT:
+ bit_rate = ctx->bit_rate;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ bits_per_sample = av_get_bits_per_sample(ctx->codec_id);
+ if (bits_per_sample) {
+ bit_rate = ctx->sample_rate * (int64_t)ctx->ch_layout.nb_channels;
+ if (bit_rate > INT64_MAX / bits_per_sample) {
+ bit_rate = 0;
+ } else
+ bit_rate *= bits_per_sample;
+ } else
+ bit_rate = ctx->bit_rate;
+ break;
+ default:
+ bit_rate = 0;
+ break;
+ }
+ return bit_rate;
+}
+
+int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
+{
+ int ret = 0;
+ AVCodecInternal *avci;
+ const FFCodec *codec2;
+
+ if (avcodec_is_open(avctx))
+ return 0;
+
+ if (!codec && !avctx->codec) {
+ av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n");
+ return AVERROR(EINVAL);
+ }
+ if (codec && avctx->codec && codec != avctx->codec) {
+ av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, "
+ "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name);
+ return AVERROR(EINVAL);
+ }
+ if (!codec)
+ codec = avctx->codec;
+ codec2 = ffcodec(codec);
+
+ if ((avctx->codec_type != AVMEDIA_TYPE_UNKNOWN && avctx->codec_type != codec->type) ||
+ (avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec->id)) {
+ av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n");
+ return AVERROR(EINVAL);
+ }
+
+ avctx->codec_type = codec->type;
+ avctx->codec_id = codec->id;
+ avctx->codec = codec;
+
+ if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
+ return AVERROR(EINVAL);
+
+ avci = av_mallocz(sizeof(*avci));
+ if (!avci) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ avctx->internal = avci;
+
+ avci->buffer_frame = av_frame_alloc();
+ avci->buffer_pkt = av_packet_alloc();
+ if (!avci->buffer_frame || !avci->buffer_pkt) {
+ ret = AVERROR(ENOMEM);
+ goto free_and_end;
+ }
+
+ if (codec2->priv_data_size > 0) {
+ if (!avctx->priv_data) {
+ avctx->priv_data = av_mallocz(codec2->priv_data_size);
+ if (!avctx->priv_data) {
+ ret = AVERROR(ENOMEM);
+ goto free_and_end;
+ }
+ if (codec->priv_class) {
+ *(const AVClass **)avctx->priv_data = codec->priv_class;
+ av_opt_set_defaults(avctx->priv_data);
+ }
+ }
+ if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, options)) < 0)
+ goto free_and_end;
+ } else {
+ avctx->priv_data = NULL;
+ }
+ if ((ret = av_opt_set_dict(avctx, options)) < 0)
+ goto free_and_end;
+
+ if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+
+ // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions
+ if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height &&
+ (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) {
+ if (avctx->coded_width && avctx->coded_height)
+ ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
+ else if (avctx->width && avctx->height)
+ ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
+ if (ret < 0)
+ goto free_and_end;
+ }
+
+ if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
+ && ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0
+ || av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) {
+ av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n");
+ ff_set_dimensions(avctx, 0, 0);
+ }
+
+ if (avctx->width > 0 && avctx->height > 0) {
+ if (av_image_check_sar(avctx->width, avctx->height,
+ avctx->sample_aspect_ratio) < 0) {
+ av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
+ avctx->sample_aspect_ratio.num,
+ avctx->sample_aspect_ratio.den);
+ avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
+ }
+ }
+
+ if (avctx->sample_rate < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid sample rate: %d\n", avctx->sample_rate);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ if (avctx->block_align < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid block align: %d\n", avctx->block_align);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ /* compat wrapper for old-style callers */
+ if (avctx->channel_layout && !avctx->channels)
+ avctx->channels = av_popcount64(avctx->channel_layout);
+
+ if ((avctx->channels && avctx->ch_layout.nb_channels != avctx->channels) ||
+ (avctx->channel_layout && (avctx->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
+ avctx->ch_layout.u.mask != avctx->channel_layout))) {
+ av_channel_layout_uninit(&avctx->ch_layout);
+ if (avctx->channel_layout) {
+ av_channel_layout_from_mask(&avctx->ch_layout, avctx->channel_layout);
+ } else {
+ avctx->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ }
+ avctx->ch_layout.nb_channels = avctx->channels;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ /* AV_CODEC_CAP_CHANNEL_CONF is a decoder-only flag; so the code below
+ * in particular checks that nb_channels is set for all audio encoders. */
+ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && !avctx->ch_layout.nb_channels
+ && !(codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)) {
+ av_log(avctx, AV_LOG_ERROR, "%s requires channel layout to be set\n",
+ av_codec_is_decoder(codec) ? "Decoder" : "Encoder");
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ if (avctx->ch_layout.nb_channels && !av_channel_layout_check(&avctx->ch_layout)) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid channel layout\n");
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ if (avctx->ch_layout.nb_channels > FF_SANE_NB_CHANNELS) {
+ av_log(avctx, AV_LOG_ERROR, "Too many channels: %d\n", avctx->ch_layout.nb_channels);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+
+ avctx->frame_num = 0;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
+
+ if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) &&
+ avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
+ const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder";
+ const AVCodec *codec2;
+ av_log(avctx, AV_LOG_ERROR,
+ "The %s '%s' is experimental but experimental codecs are not enabled, "
+ "add '-strict %d' if you want to use it.\n",
+ codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL);
+ codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id);
+ if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
+ av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n",
+ codec_string, codec2->name);
+ ret = AVERROR_EXPERIMENTAL;
+ goto free_and_end;
+ }
+
+ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
+ (!avctx->time_base.num || !avctx->time_base.den)) {
+ avctx->time_base.num = 1;
+ avctx->time_base.den = avctx->sample_rate;
+ }
+
+ if (av_codec_is_encoder(avctx->codec))
+ ret = ff_encode_preinit(avctx);
+ else
+ ret = ff_decode_preinit(avctx);
+ if (ret < 0)
+ goto free_and_end;
+
+ if (HAVE_THREADS && !avci->frame_thread_encoder) {
+ /* Frame-threaded decoders call FFCodec.init for their child contexts. */
+ lock_avcodec(codec2);
+ ret = ff_thread_init(avctx);
+ unlock_avcodec(codec2);
+ if (ret < 0) {
+ goto free_and_end;
+ }
+ }
+ if (!HAVE_THREADS && !(codec2->caps_internal & FF_CODEC_CAP_AUTO_THREADS))
+ avctx->thread_count = 1;
+
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME) ||
+ avci->frame_thread_encoder) {
+ if (codec2->init) {
+ lock_avcodec(codec2);
+ ret = codec2->init(avctx);
+ unlock_avcodec(codec2);
+ if (ret < 0) {
+ avci->needs_close = codec2->caps_internal & FF_CODEC_CAP_INIT_CLEANUP;
+ goto free_and_end;
+ }
+ }
+ avci->needs_close = 1;
+ }
+
+ ret=0;
+
+ if (av_codec_is_decoder(avctx->codec)) {
+ if (!avctx->bit_rate)
+ avctx->bit_rate = get_bit_rate(avctx);
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ /* update the deprecated fields for old-style callers */
+ avctx->channels = avctx->ch_layout.nb_channels;
+ avctx->channel_layout = avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ avctx->ch_layout.u.mask : 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ /* validate channel layout from the decoder */
+ if ((avctx->ch_layout.nb_channels && !av_channel_layout_check(&avctx->ch_layout)) ||
+ avctx->ch_layout.nb_channels > FF_SANE_NB_CHANNELS) {
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ if (avctx->bits_per_coded_sample < 0) {
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ }
+ if (codec->priv_class)
+ av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class);
+
+end:
+
+ return ret;
+free_and_end:
+ avcodec_close(avctx);
+ goto end;
+}
+
+void avcodec_flush_buffers(AVCodecContext *avctx)
+{
+ AVCodecInternal *avci = avctx->internal;
+
+ if (av_codec_is_encoder(avctx->codec)) {
+ int caps = avctx->codec->capabilities;
+
+ if (!(caps & AV_CODEC_CAP_ENCODER_FLUSH)) {
+ // Only encoders that explicitly declare support for it can be
+ // flushed. Otherwise, this is a no-op.
+ av_log(avctx, AV_LOG_WARNING, "Ignoring attempt to flush encoder "
+ "that doesn't support it\n");
+ return;
+ }
+ if (avci->in_frame)
+ av_frame_unref(avci->in_frame);
+ if (avci->recon_frame)
+ av_frame_unref(avci->recon_frame);
+ } else {
+ av_packet_unref(avci->last_pkt_props);
+ av_packet_unref(avci->in_pkt);
+
+ avctx->pts_correction_last_pts =
+ avctx->pts_correction_last_dts = INT64_MIN;
+
+ av_bsf_flush(avci->bsf);
+ }
+
+ avci->draining = 0;
+ avci->draining_done = 0;
+ avci->nb_draining_errors = 0;
+ av_frame_unref(avci->buffer_frame);
+ av_packet_unref(avci->buffer_pkt);
+
+ if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
+ ff_thread_flush(avctx);
+ else if (ffcodec(avctx->codec)->flush)
+ ffcodec(avctx->codec)->flush(avctx);
+}
+
+void avsubtitle_free(AVSubtitle *sub)
+{
+ int i;
+
+ for (i = 0; i < sub->num_rects; i++) {
+ AVSubtitleRect *const rect = sub->rects[i];
+
+ av_freep(&rect->data[0]);
+ av_freep(&rect->data[1]);
+ av_freep(&rect->data[2]);
+ av_freep(&rect->data[3]);
+ av_freep(&rect->text);
+ av_freep(&rect->ass);
+
+ av_freep(&sub->rects[i]);
+ }
+
+ av_freep(&sub->rects);
+
+ memset(sub, 0, sizeof(*sub));
+}
+
+av_cold int avcodec_close(AVCodecContext *avctx)
+{
+ int i;
+
+ if (!avctx)
+ return 0;
+
+ if (avcodec_is_open(avctx)) {
+ AVCodecInternal *avci = avctx->internal;
+
+ if (CONFIG_FRAME_THREAD_ENCODER &&
+ avci->frame_thread_encoder && avctx->thread_count > 1) {
+ ff_frame_thread_encoder_free(avctx);
+ }
+ if (HAVE_THREADS && avci->thread_ctx)
+ ff_thread_free(avctx);
+ if (avci->needs_close && ffcodec(avctx->codec)->close)
+ ffcodec(avctx->codec)->close(avctx);
+ avci->byte_buffer_size = 0;
+ av_freep(&avci->byte_buffer);
+ av_frame_free(&avci->buffer_frame);
+ av_packet_free(&avci->buffer_pkt);
+ av_packet_free(&avci->last_pkt_props);
+
+ av_packet_free(&avci->in_pkt);
+ av_frame_free(&avci->in_frame);
+ av_frame_free(&avci->recon_frame);
+
+ av_buffer_unref(&avci->pool);
+
+ if (avctx->hwaccel && avctx->hwaccel->uninit)
+ avctx->hwaccel->uninit(avctx);
+ av_freep(&avci->hwaccel_priv_data);
+
+ av_bsf_free(&avci->bsf);
+
+ av_channel_layout_uninit(&avci->initial_ch_layout);
+
+#if CONFIG_LCMS2
+ ff_icc_context_uninit(&avci->icc);
+#endif
+
+ av_freep(&avctx->internal);
+ }
+
+ for (i = 0; i < avctx->nb_coded_side_data; i++)
+ av_freep(&avctx->coded_side_data[i].data);
+ av_freep(&avctx->coded_side_data);
+ avctx->nb_coded_side_data = 0;
+
+ av_buffer_unref(&avctx->hw_frames_ctx);
+ av_buffer_unref(&avctx->hw_device_ctx);
+
+ if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
+ av_opt_free(avctx->priv_data);
+ av_opt_free(avctx);
+ av_freep(&avctx->priv_data);
+ if (av_codec_is_encoder(avctx->codec)) {
+ av_freep(&avctx->extradata);
+ avctx->extradata_size = 0;
+ } else if (av_codec_is_decoder(avctx->codec))
+ av_freep(&avctx->subtitle_header);
+
+ avctx->codec = NULL;
+ avctx->active_thread_type = 0;
+
+ return 0;
+}
+
+static const char *unknown_if_null(const char *str)
+{
+ return str ? str : "unknown";
+}
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
+{
+ const char *codec_type;
+ const char *codec_name;
+ const char *profile = NULL;
+ AVBPrint bprint;
+ int64_t bitrate;
+ int new_line = 0;
+ AVRational display_aspect_ratio;
+ const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : ", ";
+ const char *str;
+
+ if (!buf || buf_size <= 0)
+ return;
+ av_bprint_init_for_buffer(&bprint, buf, buf_size);
+ codec_type = av_get_media_type_string(enc->codec_type);
+ codec_name = avcodec_get_name(enc->codec_id);
+ profile = avcodec_profile_name(enc->codec_id, enc->profile);
+
+ av_bprintf(&bprint, "%s: %s", codec_type ? codec_type : "unknown",
+ codec_name);
+ buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */
+
+ if (enc->codec && strcmp(enc->codec->name, codec_name))
+ av_bprintf(&bprint, " (%s)", enc->codec->name);
+
+ if (profile)
+ av_bprintf(&bprint, " (%s)", profile);
+ if ( enc->codec_type == AVMEDIA_TYPE_VIDEO
+ && av_log_get_level() >= AV_LOG_VERBOSE
+ && enc->refs)
+ av_bprintf(&bprint, ", %d reference frame%s",
+ enc->refs, enc->refs > 1 ? "s" : "");
+
+ if (enc->codec_tag)
+ av_bprintf(&bprint, " (%s / 0x%04X)",
+ av_fourcc2str(enc->codec_tag), enc->codec_tag);
+
+ switch (enc->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ {
+ unsigned len;
+
+ av_bprintf(&bprint, "%s%s", separator,
+ enc->pix_fmt == AV_PIX_FMT_NONE ? "none" :
+ unknown_if_null(av_get_pix_fmt_name(enc->pix_fmt)));
+
+ av_bprint_chars(&bprint, '(', 1);
+ len = bprint.len;
+
+ /* The following check ensures that '(' has been written
+ * and therefore allows us to erase it if it turns out
+ * to be unnecessary. */
+ if (!av_bprint_is_complete(&bprint))
+ return;
+
+ if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE &&
+ enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth)
+ av_bprintf(&bprint, "%d bpc, ", enc->bits_per_raw_sample);
+ if (enc->color_range != AVCOL_RANGE_UNSPECIFIED &&
+ (str = av_color_range_name(enc->color_range)))
+ av_bprintf(&bprint, "%s, ", str);
+
+ if (enc->colorspace != AVCOL_SPC_UNSPECIFIED ||
+ enc->color_primaries != AVCOL_PRI_UNSPECIFIED ||
+ enc->color_trc != AVCOL_TRC_UNSPECIFIED) {
+ const char *col = unknown_if_null(av_color_space_name(enc->colorspace));
+ const char *pri = unknown_if_null(av_color_primaries_name(enc->color_primaries));
+ const char *trc = unknown_if_null(av_color_transfer_name(enc->color_trc));
+ if (strcmp(col, pri) || strcmp(col, trc)) {
+ new_line = 1;
+ av_bprintf(&bprint, "%s/%s/%s, ", col, pri, trc);
+ } else
+ av_bprintf(&bprint, "%s, ", col);
+ }
+
+ if (enc->field_order != AV_FIELD_UNKNOWN) {
+ const char *field_order = "progressive";
+ if (enc->field_order == AV_FIELD_TT)
+ field_order = "top first";
+ else if (enc->field_order == AV_FIELD_BB)
+ field_order = "bottom first";
+ else if (enc->field_order == AV_FIELD_TB)
+ field_order = "top coded first (swapped)";
+ else if (enc->field_order == AV_FIELD_BT)
+ field_order = "bottom coded first (swapped)";
+
+ av_bprintf(&bprint, "%s, ", field_order);
+ }
+
+ if (av_log_get_level() >= AV_LOG_VERBOSE &&
+ enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED &&
+ (str = av_chroma_location_name(enc->chroma_sample_location)))
+ av_bprintf(&bprint, "%s, ", str);
+
+ if (len == bprint.len) {
+ bprint.str[len - 1] = '\0';
+ bprint.len--;
+ } else {
+ if (bprint.len - 2 < bprint.size) {
+ /* Erase the last ", " */
+ bprint.len -= 2;
+ bprint.str[bprint.len] = '\0';
+ }
+ av_bprint_chars(&bprint, ')', 1);
+ }
+ }
+
+ if (enc->width) {
+ av_bprintf(&bprint, "%s%dx%d", new_line ? separator : ", ",
+ enc->width, enc->height);
+
+ if (av_log_get_level() >= AV_LOG_VERBOSE &&
+ (enc->width != enc->coded_width ||
+ enc->height != enc->coded_height))
+ av_bprintf(&bprint, " (%dx%d)",
+ enc->coded_width, enc->coded_height);
+
+ if (enc->sample_aspect_ratio.num) {
+ av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
+ enc->width * (int64_t)enc->sample_aspect_ratio.num,
+ enc->height * (int64_t)enc->sample_aspect_ratio.den,
+ 1024 * 1024);
+ av_bprintf(&bprint, " [SAR %d:%d DAR %d:%d]",
+ enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den,
+ display_aspect_ratio.num, display_aspect_ratio.den);
+ }
+ if (av_log_get_level() >= AV_LOG_DEBUG) {
+ int g = av_gcd(enc->time_base.num, enc->time_base.den);
+ av_bprintf(&bprint, ", %d/%d",
+ enc->time_base.num / g, enc->time_base.den / g);
+ }
+ }
+ if (encode) {
+ av_bprintf(&bprint, ", q=%d-%d", enc->qmin, enc->qmax);
+ } else {
+ if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)
+ av_bprintf(&bprint, ", Closed Captions");
+ if (enc->properties & FF_CODEC_PROPERTY_FILM_GRAIN)
+ av_bprintf(&bprint, ", Film Grain");
+ if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS)
+ av_bprintf(&bprint, ", lossless");
+ }
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ av_bprintf(&bprint, "%s", separator);
+
+ if (enc->sample_rate) {
+ av_bprintf(&bprint, "%d Hz, ", enc->sample_rate);
+ }
+ {
+ char buf[512];
+ int ret = av_channel_layout_describe(&enc->ch_layout, buf, sizeof(buf));
+ if (ret >= 0)
+ av_bprintf(&bprint, "%s", buf);
+ }
+ if (enc->sample_fmt != AV_SAMPLE_FMT_NONE &&
+ (str = av_get_sample_fmt_name(enc->sample_fmt))) {
+ av_bprintf(&bprint, ", %s", str);
+ }
+ if ( enc->bits_per_raw_sample > 0
+ && enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8)
+ av_bprintf(&bprint, " (%d bit)", enc->bits_per_raw_sample);
+ if (av_log_get_level() >= AV_LOG_VERBOSE) {
+ if (enc->initial_padding)
+ av_bprintf(&bprint, ", delay %d", enc->initial_padding);
+ if (enc->trailing_padding)
+ av_bprintf(&bprint, ", padding %d", enc->trailing_padding);
+ }
+ break;
+ case AVMEDIA_TYPE_DATA:
+ if (av_log_get_level() >= AV_LOG_DEBUG) {
+ int g = av_gcd(enc->time_base.num, enc->time_base.den);
+ if (g)
+ av_bprintf(&bprint, ", %d/%d",
+ enc->time_base.num / g, enc->time_base.den / g);
+ }
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ if (enc->width)
+ av_bprintf(&bprint, ", %dx%d", enc->width, enc->height);
+ break;
+ default:
+ return;
+ }
+ if (encode) {
+ if (enc->flags & AV_CODEC_FLAG_PASS1)
+ av_bprintf(&bprint, ", pass 1");
+ if (enc->flags & AV_CODEC_FLAG_PASS2)
+ av_bprintf(&bprint, ", pass 2");
+ }
+ bitrate = get_bit_rate(enc);
+ if (bitrate != 0) {
+ av_bprintf(&bprint, ", %"PRId64" kb/s", bitrate / 1000);
+ } else if (enc->rc_max_rate > 0) {
+ av_bprintf(&bprint, ", max. %"PRId64" kb/s", enc->rc_max_rate / 1000);
+ }
+}
+
+int avcodec_is_open(AVCodecContext *s)
+{
+ return !!s->internal;
+}
+
+int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ av_frame_unref(frame);
+
+ if (av_codec_is_decoder(avctx->codec))
+ return ff_decode_receive_frame(avctx, frame);
+ return ff_encode_receive_frame(avctx, frame);
+}
diff --git a/media/ffvpx/libavcodec/avcodec.h b/media/ffvpx/libavcodec/avcodec.h
new file mode 100644
index 0000000000..9a0fe97cad
--- /dev/null
+++ b/media/ffvpx/libavcodec/avcodec.h
@@ -0,0 +1,3193 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec external API header
+ */
+
+#include "libavutil/samplefmt.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/dict.h"
+#include "libavutil/frame.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "codec.h"
+#include "codec_desc.h"
+#include "codec_par.h"
+#include "codec_id.h"
+#include "defs.h"
+#include "packet.h"
+#include "version_major.h"
+#ifndef HAVE_AV_CONFIG_H
+/* When included as part of the ffmpeg build, only include the major version
+ * to avoid unnecessary rebuilds. When included externally, keep including
+ * the full version information. */
+#include "version.h"
+#endif
+
+/**
+ * @defgroup libavc libavcodec
+ * Encoding/Decoding Library
+ *
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ */
+
+/**
+ * @ingroup libavc
+ * @defgroup lavc_encdec send/receive encoding and decoding API overview
+ * @{
+ *
+ * The avcodec_send_packet()/avcodec_receive_frame()/avcodec_send_frame()/
+ * avcodec_receive_packet() functions provide an encode/decode API, which
+ * decouples input and output.
+ *
+ * The API is very similar for encoding/decoding and audio/video, and works as
+ * follows:
+ * - Set up and open the AVCodecContext as usual.
+ * - Send valid input:
+ * - For decoding, call avcodec_send_packet() to give the decoder raw
+ * compressed data in an AVPacket.
+ * - For encoding, call avcodec_send_frame() to give the encoder an AVFrame
+ * containing uncompressed audio or video.
+ *
+ * In both cases, it is recommended that AVPackets and AVFrames are
+ * refcounted, or libavcodec might have to copy the input data. (libavformat
+ * always returns refcounted AVPackets, and av_frame_get_buffer() allocates
+ * refcounted AVFrames.)
+ * - Receive output in a loop. Periodically call one of the avcodec_receive_*()
+ * functions and process their output:
+ * - For decoding, call avcodec_receive_frame(). On success, it will return
+ * an AVFrame containing uncompressed audio or video data.
+ * - For encoding, call avcodec_receive_packet(). On success, it will return
+ * an AVPacket with a compressed frame.
+ *
+ * Repeat this call until it returns AVERROR(EAGAIN) or an error. The
+ * AVERROR(EAGAIN) return value means that new input data is required to
+ * return new output. In this case, continue with sending input. For each
+ * input frame/packet, the codec will typically return 1 output frame/packet,
+ * but it can also be 0 or more than 1.
+ *
+ * At the beginning of decoding or encoding, the codec might accept multiple
+ * input frames/packets without returning a frame, until its internal buffers
+ * are filled. This situation is handled transparently if you follow the steps
+ * outlined above.
+ *
+ * In theory, sending input can result in EAGAIN - this should happen only if
+ * not all output was received. You can use this to structure alternative decode
+ * or encode loops other than the one suggested above. For example, you could
+ * try sending new input on each iteration, and try to receive output if that
+ * returns EAGAIN.
+ *
+ * End of stream situations. These require "flushing" (aka draining) the codec,
+ * as the codec might buffer multiple frames or packets internally for
+ * performance or out of necessity (consider B-frames).
+ * This is handled as follows:
+ * - Instead of valid input, send NULL to the avcodec_send_packet() (decoding)
+ * or avcodec_send_frame() (encoding) functions. This will enter draining
+ * mode.
+ * - Call avcodec_receive_frame() (decoding) or avcodec_receive_packet()
+ * (encoding) in a loop until AVERROR_EOF is returned. The functions will
+ * not return AVERROR(EAGAIN), unless you forgot to enter draining mode.
+ * - Before decoding can be resumed again, the codec has to be reset with
+ * avcodec_flush_buffers().
+ *
+ * Using the API as outlined above is highly recommended. But it is also
+ * possible to call functions outside of this rigid schema. For example, you can
+ * call avcodec_send_packet() repeatedly without calling
+ * avcodec_receive_frame(). In this case, avcodec_send_packet() will succeed
+ * until the codec's internal buffer has been filled up (which is typically of
+ * size 1 per output frame, after initial input), and then reject input with
+ * AVERROR(EAGAIN). Once it starts rejecting input, you have no choice but to
+ * read at least some output.
+ *
+ * Not all codecs will follow a rigid and predictable dataflow; the only
+ * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call on
+ * one end implies that a receive/send call on the other end will succeed, or
+ * at least will not fail with AVERROR(EAGAIN). In general, no codec will
+ * permit unlimited buffering of input or output.
+ *
+ * A codec is not allowed to return AVERROR(EAGAIN) for both sending and receiving. This
+ * would be an invalid state, which could put the codec user into an endless
+ * loop. The API has no concept of time either: it cannot happen that trying to
+ * do avcodec_send_packet() results in AVERROR(EAGAIN), but a repeated call 1 second
+ * later accepts the packet (with no other receive/flush API calls involved).
+ * The API is a strict state machine, and the passage of time is not supposed
+ * to influence it. Some timing-dependent behavior might still be deemed
+ * acceptable in certain cases. But it must never result in both send/receive
+ * returning EAGAIN at the same time at any point. It must also absolutely be
+ * avoided that the current state is "unstable" and can "flip-flop" between
+ * the send/receive APIs allowing progress. For example, it's not allowed that
+ * the codec randomly decides that it actually wants to consume a packet now
+ * instead of returning a frame, after it just returned AVERROR(EAGAIN) on an
+ * avcodec_send_packet() call.
+ * @}
+ */
+
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
+
+/**
+ * @ingroup lavc_encoding
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define AV_INPUT_BUFFER_MIN_SIZE 16384
+
+/**
+ * @ingroup lavc_encoding
+ */
+typedef struct RcOverride{
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+/**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+#define AV_CODEC_FLAG_UNALIGNED (1 << 0)
+/**
+ * Use fixed qscale.
+ */
+#define AV_CODEC_FLAG_QSCALE (1 << 1)
+/**
+ * 4 MV per MB allowed / advanced prediction for H.263.
+ */
+#define AV_CODEC_FLAG_4MV (1 << 2)
+/**
+ * Output even those frames that might be corrupted.
+ */
+#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3)
+/**
+ * Use qpel MC.
+ */
+#define AV_CODEC_FLAG_QPEL (1 << 4)
+/**
+ * Don't output frames whose parameters differ from first
+ * decoded frame in stream.
+ */
+#define AV_CODEC_FLAG_DROPCHANGED (1 << 5)
+/**
+ * Request the encoder to output reconstructed frames, i.e.\ frames that would
+ * be produced by decoding the encoded bistream. These frames may be retrieved
+ * by calling avcodec_receive_frame() immediately after a successful call to
+ * avcodec_receive_packet().
+ *
+ * Should only be used with encoders flagged with the
+ * @ref AV_CODEC_CAP_ENCODER_RECON_FRAME capability.
+ */
+#define AV_CODEC_FLAG_RECON_FRAME (1 << 6)
+/**
+ * @par decoding
+ * Request the decoder to propagate each packets AVPacket.opaque and
+ * AVPacket.opaque_ref to its corresponding output AVFrame.
+ *
+ * @par encoding:
+ * Request the encoder to propagate each frame's AVFrame.opaque and
+ * AVFrame.opaque_ref values to its corresponding output AVPacket.
+ *
+ * @par
+ * May only be set on encoders that have the
+ * @ref AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability flag.
+ *
+ * @note
+ * While in typical cases one input frame produces exactly one output packet
+ * (perhaps after a delay), in general the mapping of frames to packets is
+ * M-to-N, so
+ * - Any number of input frames may be associated with any given output packet.
+ * This includes zero - e.g. some encoders may output packets that carry only
+ * metadata about the whole stream.
+ * - A given input frame may be associated with any number of output packets.
+ * Again this includes zero - e.g. some encoders may drop frames under certain
+ * conditions.
+ * .
+ * This implies that when using this flag, the caller must NOT assume that
+ * - a given input frame's opaques will necessarily appear on some output packet;
+ * - every output packet will have some non-NULL opaque value.
+ * .
+ * When an output packet contains multiple frames, the opaque values will be
+ * taken from the first of those.
+ *
+ * @note
+ * The converse holds for decoders, with frames and packets switched.
+ */
+#define AV_CODEC_FLAG_COPY_OPAQUE (1 << 7)
+/**
+ * Signal to the encoder that the values of AVFrame.duration are valid and
+ * should be used (typically for transferring them to output packets).
+ *
+ * If this flag is not set, frame durations are ignored.
+ */
+#define AV_CODEC_FLAG_FRAME_DURATION (1 << 8)
+/**
+ * Use internal 2pass ratecontrol in first pass mode.
+ */
+#define AV_CODEC_FLAG_PASS1 (1 << 9)
+/**
+ * Use internal 2pass ratecontrol in second pass mode.
+ */
+#define AV_CODEC_FLAG_PASS2 (1 << 10)
+/**
+ * loop filter.
+ */
+#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11)
+/**
+ * Only decode/encode grayscale.
+ */
+#define AV_CODEC_FLAG_GRAY (1 << 13)
+/**
+ * error[?] variables will be set during encoding.
+ */
+#define AV_CODEC_FLAG_PSNR (1 << 15)
+/**
+ * Use interlaced DCT.
+ */
+#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18)
+/**
+ * Force low delay.
+ */
+#define AV_CODEC_FLAG_LOW_DELAY (1 << 19)
+/**
+ * Place global headers in extradata instead of every keyframe.
+ */
+#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22)
+/**
+ * Use only bitexact stuff (except (I)DCT).
+ */
+#define AV_CODEC_FLAG_BITEXACT (1 << 23)
+/* Fx : Flag for H.263+ extra options */
+/**
+ * H.263 advanced intra coding / MPEG-4 AC prediction
+ */
+#define AV_CODEC_FLAG_AC_PRED (1 << 24)
+/**
+ * interlaced motion estimation
+ */
+#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29)
+#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31)
+
+/**
+ * Allow non spec compliant speedup tricks.
+ */
+#define AV_CODEC_FLAG2_FAST (1 << 0)
+/**
+ * Skip bitstream encoding.
+ */
+#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2)
+/**
+ * Place global headers at every keyframe instead of in extradata.
+ */
+#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3)
+
+/**
+ * Input bitstream might be truncated at a packet boundaries
+ * instead of only at frame boundaries.
+ */
+#define AV_CODEC_FLAG2_CHUNKS (1 << 15)
+/**
+ * Discard cropping information from SPS.
+ */
+#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16)
+
+/**
+ * Show all frames before the first keyframe
+ */
+#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22)
+/**
+ * Export motion vectors through frame side data
+ */
+#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28)
+/**
+ * Do not skip samples and export skip information as frame side data
+ */
+#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29)
+/**
+ * Do not reset ASS ReadOrder field on flush (subtitles decoding)
+ */
+#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30)
+/**
+ * Generate/parse ICC profiles on encode/decode, as appropriate for the type of
+ * file. No effect on codecs which cannot contain embedded ICC profiles, or
+ * when compiled without support for lcms2.
+ */
+#define AV_CODEC_FLAG2_ICC_PROFILES (1U << 31)
+
+/* Exported side data.
+ These flags can be passed in AVCodecContext.export_side_data before initialization.
+*/
+/**
+ * Export motion vectors through frame side data
+ */
+#define AV_CODEC_EXPORT_DATA_MVS (1 << 0)
+/**
+ * Export encoder Producer Reference Time through packet side data
+ */
+#define AV_CODEC_EXPORT_DATA_PRFT (1 << 1)
+/**
+ * Decoding only.
+ * Export the AVVideoEncParams structure through frame side data.
+ */
+#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS (1 << 2)
+/**
+ * Decoding only.
+ * Do not apply film grain, export it instead.
+ */
+#define AV_CODEC_EXPORT_DATA_FILM_GRAIN (1 << 3)
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
+
+/**
+ * The encoder will keep a reference to the packet and may reuse it later.
+ */
+#define AV_GET_ENCODE_BUFFER_FLAG_REF (1 << 0)
+
+struct AVCodecInternal;
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * You can use AVOptions (av_opt* / av_set/get*()) to access these fields from user
+ * applications.
+ * The name string for AVOptions options matches the associated command line
+ * parameter name and can be found in libavcodec/options_table.h
+ * The AVOption/command line parameter names differ in some cases from the C
+ * structure field names for historic reasons or brevity.
+ * sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context3
+ */
+ const AVClass *av_class;
+ int log_level_offset;
+
+ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+ const struct AVCodec *codec;
+ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify the codec.
+ * If there are multiple such fields in a container then the demuxer should choose the one
+ * which maximizes the information about the used codec.
+ * If the codec tag field in a container is larger than 32 bits then the demuxer should
+ * remap the longer ID to 32 bits with a table or other structure. Alternatively a new
+ * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
+ * first.
+ * - encoding: Set by user, if not then the default based on codec_id will be used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int codec_tag;
+
+ void *priv_data;
+
+ /**
+ * Private context used for internal data.
+ *
+ * Unlike priv_data, this is not codec-specific. It is used in general
+ * libavcodec functions.
+ */
+ struct AVCodecInternal *internal;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by user, may be overwritten by libavcodec
+ * if this info is available in the stream
+ */
+ int64_t bit_rate;
+
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ /**
+ * AV_CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * AV_CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * MJPEG: Huffman tables
+ * rv10: additional flags
+ * MPEG-4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid problems if it is read with the bitstream reader.
+ * The bytewise contents of extradata must not depend on the architecture or CPU endianness.
+ * Must be allocated with the av_malloc() family of functions.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ uint8_t *extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * This often, but not always is the inverse of the frame rate or field rate
+ * for video. 1/time_base is not the average frame rate if the frame rate is not
+ * constant.
+ *
+ * Like containers, elementary streams also can store timestamps, 1/time_base
+ * is the unit in which these timestamps are specified.
+ * As example of such codec time base see ISO/IEC 14496-2:2001(E)
+ * vop_time_increment_resolution and fixed_vop_rate
+ * (fixed_vop_rate == 0 implies that it is different from the framerate)
+ *
+ * - encoding: MUST be set by user.
+ * - decoding: unused.
+ */
+ AVRational time_base;
+
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame rate.
+ * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+ * if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+ */
+ int ticks_per_frame;
+
+ /**
+ * Codec delay.
+ *
+ * Encoding: Number of frames delay there will be from the encoder input to
+ * the decoder output. (we assume the decoder matches the spec)
+ * Decoding: Number of frames delay in addition to what a standard decoder
+ * as specified in the spec would produce.
+ *
+ * Video:
+ * Number of frames the decoded output will be delayed relative to the
+ * encoded input.
+ *
+ * Audio:
+ * For encoding, this field is unused (see initial_padding).
+ *
+ * For decoding, this is the number of samples the decoder needs to
+ * output before the decoder's output is valid. When seeking, you should
+ * start decoding this many samples prior to your desired seek point.
+ *
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int delay;
+
+
+ /* video only */
+ /**
+ * picture width / height.
+ *
+ * @note Those fields may not match the values of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: MUST be set by user.
+ * - decoding: May be set by the user before opening the decoder if known e.g.
+ * from the container. Some decoders will require the dimensions
+ * to be set by the caller. During decoding, the decoder may
+ * overwrite those values as required while parsing the data.
+ */
+ int width, height;
+
+ /**
+ * Bitstream width / height, may be different from width/height e.g. when
+ * the decoded frame is cropped before being output or lowres is enabled.
+ *
+ * @note Those field may not match the value of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: unused
+ * - decoding: May be set by the user before opening the decoder if known
+ * e.g. from the container. During decoding, the decoder may
+ * overwrite those values as required while parsing the data.
+ */
+ int coded_width, coded_height;
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Pixel format, see AV_PIX_FMT_xxx.
+ * May be set by the demuxer if known from headers.
+ * May be overridden by the decoder if it knows better.
+ *
+ * @note This field may not match the value of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: Set by user.
+ * - decoding: Set by user if known, overridden by libavcodec while
+ * parsing the data.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * When multithreading is used, it may be called from multiple threads
+ * at the same time; threads might draw different parts of the same AVFrame,
+ * or multiple AVFrames, and there is no guarantee that slices will be drawn
+ * in order.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext *s,
+ const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
+ int y, int type, int height);
+
+ /**
+ * Callback to negotiate the pixel format. Decoding only, may be set by the
+ * caller before avcodec_open2().
+ *
+ * Called by some decoders to select the pixel format that will be used for
+ * the output frames. This is mainly used to set up hardware acceleration,
+ * then the provided format list contains the corresponding hwaccel pixel
+ * formats alongside the "software" one. The software pixel format may also
+ * be retrieved from \ref sw_pix_fmt.
+ *
+ * This callback will be called when the coded frame properties (such as
+ * resolution, pixel format, etc.) change and more than one output format is
+ * supported for those new properties. If a hardware pixel format is chosen
+ * and initialization for it fails, the callback may be called again
+ * immediately.
+ *
+ * This callback may be called from different threads if the decoder is
+ * multi-threaded, but not from more than one thread simultaneously.
+ *
+ * @param fmt list of formats which may be used in the current
+ * configuration, terminated by AV_PIX_FMT_NONE.
+ * @warning Behavior is undefined if the callback returns a value other
+ * than one of the formats in fmt or AV_PIX_FMT_NONE.
+ * @return the chosen format or AV_PIX_FMT_NONE
+ */
+ enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * qscale factor between P- and I-frames
+ * If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor + offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * slice count
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user (or 0).
+ */
+ int slice_count;
+
+ /**
+ * slice offsets in the frame in bytes
+ * - encoding: Set/allocated by libavcodec.
+ * - decoding: Set/allocated by user (or NULL).
+ */
+ int *slice_offset;
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_W53 11
+#define FF_CMP_W97 12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_MEDIAN_SAD 15
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG-2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * Must be allocated with the av_malloc() family of functions, and will be freed in
+ * avcodec_free_context().
+ * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
+ * - decoding: Set/allocated/freed by libavcodec.
+ */
+ uint16_t *intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * Must be allocated with the av_malloc() family of functions, and will be freed in
+ * avcodec_free_context().
+ * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
+ * - decoding: Set/allocated/freed by libavcodec.
+ */
+ uint16_t *inter_matrix;
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec
+ */
+ int intra_dc_precision;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+ /**
+ * minimum MB Lagrange multiplier
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB Lagrange multiplier
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /**
+ * Number of slices.
+ * Indicates number of picture subdivisions. Used for parallelized
+ * decoding.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ int slices;
+
+ /** Field order
+ * - encoding: set by libavcodec
+ * - decoding: Set by user.
+ */
+ enum AVFieldOrder field_order;
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * number of audio channels
+ * @deprecated use ch_layout.nb_channels
+ */
+ attribute_deprecated
+ int channels;
+#endif
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVSampleFormat sample_fmt; ///< sample format
+
+ /* The following data should not be initialized. */
+ /**
+ * Number of samples per channel in an audio frame.
+ *
+ * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
+ * except the last must contain exactly frame_size samples per channel.
+ * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then the
+ * frame size is not restricted.
+ * - decoding: may be set by some decoders to indicate constant frame size
+ */
+ int frame_size;
+
+#if FF_API_AVCTX_FRAME_NUMBER
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ * @deprecated use frame_num instead
+ */
+ attribute_deprecated
+ int frame_number;
+#endif
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * Audio channel layout.
+ * - encoding: set by user.
+ * - decoding: set by user, may be overwritten by libavcodec.
+ * @deprecated use ch_layout
+ */
+ attribute_deprecated
+ uint64_t channel_layout;
+
+ /**
+ * Request decoder to use this channel layout if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @deprecated use "downmix" codec private option
+ */
+ attribute_deprecated
+ uint64_t request_channel_layout;
+#endif
+
+ /**
+ * Type of service that the audio stream conveys.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVAudioServiceType audio_service_type;
+
+ /**
+ * desired sample format
+ * - encoding: Not used.
+ * - decoding: Set by user.
+ * Decoder will decode to this format if it can.
+ */
+ enum AVSampleFormat request_sample_fmt;
+
+ /**
+ * This callback is called at the beginning of each frame to get data
+ * buffer(s) for it. There may be one contiguous buffer for all the data or
+ * there may be a buffer per each data plane or anything in between. What
+ * this means is, you may set however many entries in buf[] you feel necessary.
+ * Each buffer must be reference-counted using the AVBuffer API (see description
+ * of buf[] below).
+ *
+ * The following fields will be set in the frame before this callback is
+ * called:
+ * - format
+ * - width, height (video only)
+ * - sample_rate, channel_layout, nb_samples (audio only)
+ * Their values may differ from the corresponding values in
+ * AVCodecContext. This callback must use the frame values, not the codec
+ * context values, to calculate the required buffer size.
+ *
+ * This callback must fill the following fields in the frame:
+ * - data[]
+ * - linesize[]
+ * - extended_data:
+ * * if the data is planar audio with more than 8 channels, then this
+ * callback must allocate and fill extended_data to contain all pointers
+ * to all data planes. data[] must hold as many pointers as it can.
+ * extended_data must be allocated with av_malloc() and will be freed in
+ * av_frame_unref().
+ * * otherwise extended_data must point to data
+ * - buf[] must contain one or more pointers to AVBufferRef structures. Each of
+ * the frame's data and extended_data pointers must be contained in these. That
+ * is, one AVBufferRef for each allocated chunk of memory, not necessarily one
+ * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(),
+ * and av_buffer_ref().
+ * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+ * this callback and filled with the extra buffers if there are more
+ * buffers than buf[] can hold. extended_buf will be freed in
+ * av_frame_unref().
+ *
+ * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call
+ * avcodec_default_get_buffer2() instead of providing buffers allocated by
+ * some other means.
+ *
+ * Each data plane must be aligned to the maximum required by the target
+ * CPU.
+ *
+ * @see avcodec_default_get_buffer2()
+ *
+ * Video:
+ *
+ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * Some decoders do not support linesizes changing between frames.
+ *
+ * If frame multithreading is used, this callback may be called from a
+ * different thread, but not from more than one at once. Does not need to be
+ * reentrant.
+ *
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+
+ /* - encoding parameters */
+ float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_buffer_size;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ int rc_override_count;
+ RcOverride *rc_override;
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: Set by user, may be overwritten by libavcodec.
+ */
+ int64_t rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int64_t rc_min_rate;
+
+ /**
+ * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char *stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char *stats_in;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED 16384
+#define FF_BUG_IEDGE 32768
+
+ /**
+ * strictly follow the standard (MPEG-4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things, whereas setting it to unofficial or lower
+ * will mean the encoder might produce output that is not supported by all
+ * spec-compliant decoders. Decoders don't differentiate between normal,
+ * unofficial and experimental (that is, they always try to decode things
+ * when they can) unless they are explicitly asked to behave stupidly
+ * (=strictly conform to the specs)
+ * This may only be set to one of the FF_COMPLIANCE_* values in defs.h.
+ */
+ int strict_std_compliance;
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+#define FF_EC_FAVOR_INTER 256
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#define FF_DEBUG_BUFFERS 0x00008000
+#define FF_DEBUG_THREADS 0x00010000
+#define FF_DEBUG_GREEN_MD 0x00800000
+#define FF_DEBUG_NOMC 0x01000000
+
+ /**
+ * Error recognition; may misdetect some more or less valid parts as errors.
+ * This is a bitfield of the AV_EF_* values defined in defs.h.
+ *
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int err_recognition;
+
+#if FF_API_REORDERED_OPAQUE
+ /**
+ * opaque 64-bit number (generally a PTS) that will be reordered and
+ * output in AVFrame.reordered_opaque
+ * - encoding: Set by libavcodec to the reordered_opaque of the input
+ * frame corresponding to the last returned packet. Only
+ * supported by encoders with the
+ * AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability.
+ * - decoding: Set by user.
+ *
+ * @deprecated Use AV_CODEC_FLAG_COPY_OPAQUE instead
+ */
+ attribute_deprecated
+ int64_t reordered_opaque;
+#endif
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ const struct AVHWAccel *hwaccel;
+
+ /**
+ * Legacy hardware accelerator context.
+ *
+ * For some hardware acceleration methods, the caller may use this field to
+ * signal hwaccel-specific data to the codec. The struct pointed to by this
+ * pointer is hwaccel-dependent and defined in the respective header. Please
+ * refer to the FFmpeg HW accelerator documentation to know how to fill
+ * this.
+ *
+ * In most cases this field is optional - the necessary information may also
+ * be provided to libavcodec through @ref hw_frames_ctx or @ref
+ * hw_device_ctx (see avcodec_get_hw_config()). However, in some cases it
+ * may be the only method of signalling some (optional) information.
+ *
+ * The struct and its contents are owned by the caller.
+ *
+ * - encoding: May be set by the caller before avcodec_open2(). Must remain
+ * valid until avcodec_free_context().
+ * - decoding: May be set by the caller in the get_format() callback.
+ * Must remain valid until the next get_format() call,
+ * or avcodec_free_context() (whichever comes first).
+ */
+ void *hwaccel_context;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#define FF_DCT_INT 2
+#define FF_DCT_MMX 3
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_INT 1
+#define FF_IDCT_SIMPLE 2
+#define FF_IDCT_SIMPLEMMX 3
+#define FF_IDCT_ARM 7
+#define FF_IDCT_ALTIVEC 8
+#define FF_IDCT_SIMPLEARM 10
+#define FF_IDCT_XVID 14
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6 17
+#define FF_IDCT_FAAN 20
+#define FF_IDCT_SIMPLENEON 22
+#if FF_API_IDCT_NONE
+// formerly used by xvmc
+#define FF_IDCT_NONE 24
+#endif
+#define FF_IDCT_SIMPLEAUTO 128
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int lowres;
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * Which multithreading methods to use.
+ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
+ * so clients which cannot provide future frames should not use it.
+ *
+ * - encoding: Set by user, otherwise the default is used.
+ * - decoding: Set by user, otherwise the default is used.
+ */
+ int thread_type;
+#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once
+
+ /**
+ * Which multithreading methods are in use by the codec.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int active_thread_type;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count" values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to count-1.
+ * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
+ * two instances of func executing at the same time will have the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement where when any call to func
+ * returns < 0 no further calls to func may be done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
+
+ /**
+ * noise vs. sse weight for the nsse comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW 1
+#define FF_PROFILE_AAC_SSR 2
+#define FF_PROFILE_AAC_LTP 3
+#define FF_PROFILE_AAC_HE 4
+#define FF_PROFILE_AAC_HE_V2 28
+#define FF_PROFILE_AAC_LD 22
+#define FF_PROFILE_AAC_ELD 38
+#define FF_PROFILE_MPEG2_AAC_LOW 128
+#define FF_PROFILE_MPEG2_AAC_HE 131
+
+#define FF_PROFILE_DNXHD 0
+#define FF_PROFILE_DNXHR_LB 1
+#define FF_PROFILE_DNXHR_SQ 2
+#define FF_PROFILE_DNXHR_HQ 3
+#define FF_PROFILE_DNXHR_HQX 4
+#define FF_PROFILE_DNXHR_444 5
+
+#define FF_PROFILE_DTS 20
+#define FF_PROFILE_DTS_ES 30
+#define FF_PROFILE_DTS_96_24 40
+#define FF_PROFILE_DTS_HD_HRA 50
+#define FF_PROFILE_DTS_HD_MA 60
+#define FF_PROFILE_DTS_EXPRESS 70
+
+#define FF_PROFILE_MPEG2_422 0
+#define FF_PROFILE_MPEG2_HIGH 1
+#define FF_PROFILE_MPEG2_SS 2
+#define FF_PROFILE_MPEG2_SNR_SCALABLE 3
+#define FF_PROFILE_MPEG2_MAIN 4
+#define FF_PROFILE_MPEG2_SIMPLE 5
+
+#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag
+#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag
+
+#define FF_PROFILE_H264_BASELINE 66
+#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)
+#define FF_PROFILE_H264_MAIN 77
+#define FF_PROFILE_H264_EXTENDED 88
+#define FF_PROFILE_H264_HIGH 100
+#define FF_PROFILE_H264_HIGH_10 110
+#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_MULTIVIEW_HIGH 118
+#define FF_PROFILE_H264_HIGH_422 122
+#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_STEREO_HIGH 128
+#define FF_PROFILE_H264_HIGH_444 144
+#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
+#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_CAVLC_444 44
+
+#define FF_PROFILE_VC1_SIMPLE 0
+#define FF_PROFILE_VC1_MAIN 1
+#define FF_PROFILE_VC1_COMPLEX 2
+#define FF_PROFILE_VC1_ADVANCED 3
+
+#define FF_PROFILE_MPEG4_SIMPLE 0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define FF_PROFILE_MPEG4_CORE 2
+#define FF_PROFILE_MPEG4_MAIN 3
+#define FF_PROFILE_MPEG4_N_BIT 4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define FF_PROFILE_MPEG4_HYBRID 8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2
+#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768
+#define FF_PROFILE_JPEG2000_DCINEMA_2K 3
+#define FF_PROFILE_JPEG2000_DCINEMA_4K 4
+
+#define FF_PROFILE_VP9_0 0
+#define FF_PROFILE_VP9_1 1
+#define FF_PROFILE_VP9_2 2
+#define FF_PROFILE_VP9_3 3
+
+#define FF_PROFILE_HEVC_MAIN 1
+#define FF_PROFILE_HEVC_MAIN_10 2
+#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
+#define FF_PROFILE_HEVC_REXT 4
+#define FF_PROFILE_HEVC_SCC 9
+
+#define FF_PROFILE_VVC_MAIN_10 1
+#define FF_PROFILE_VVC_MAIN_10_444 33
+
+#define FF_PROFILE_AV1_MAIN 0
+#define FF_PROFILE_AV1_HIGH 1
+#define FF_PROFILE_AV1_PROFESSIONAL 2
+
+#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0
+#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1
+#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2
+#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3
+#define FF_PROFILE_MJPEG_JPEG_LS 0xf7
+
+#define FF_PROFILE_SBC_MSBC 1
+
+#define FF_PROFILE_PRORES_PROXY 0
+#define FF_PROFILE_PRORES_LT 1
+#define FF_PROFILE_PRORES_STANDARD 2
+#define FF_PROFILE_PRORES_HQ 3
+#define FF_PROFILE_PRORES_4444 4
+#define FF_PROFILE_PRORES_XQ 5
+
+#define FF_PROFILE_ARIB_PROFILE_A 0
+#define FF_PROFILE_ARIB_PROFILE_C 1
+
+#define FF_PROFILE_KLVA_SYNC 0
+#define FF_PROFILE_KLVA_ASYNC 1
+
+ /**
+ * level
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int level;
+#define FF_LEVEL_UNKNOWN -99
+
+ /**
+ * Skip loop filtering for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ * Skip IDCT/dequantization for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ * Skip decoding for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ * Header containing style information for text subtitles.
+ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+ * the Format line following. It shouldn't include any Dialogue line.
+ * - encoding: Set/allocated/freed by user (before avcodec_open2())
+ * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+ */
+ uint8_t *subtitle_header;
+ int subtitle_header_size;
+
+ /**
+ * Audio only. The number of "priming" samples (padding) inserted by the
+ * encoder at the beginning of the audio. I.e. this number of leading
+ * decoded samples must be discarded by the caller to get the original audio
+ * without leading padding.
+ *
+ * - decoding: unused
+ * - encoding: Set by libavcodec. The timestamps on the output packets are
+ * adjusted by the encoder so that they always refer to the
+ * first sample of the data actually contained in the packet,
+ * including any added padding. E.g. if the timebase is
+ * 1/samplerate and the timestamp of the first input sample is
+ * 0, the timestamp of the first output packet will be
+ * -initial_padding.
+ */
+ int initial_padding;
+
+ /**
+ * - decoding: For codecs that store a framerate value in the compressed
+ * bitstream, the decoder may export it here. { 0, 1} when
+ * unknown.
+ * - encoding: May be used to signal the framerate of CFR content to an
+ * encoder.
+ */
+ AVRational framerate;
+
+ /**
+ * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
+ * - encoding: unused.
+ * - decoding: Set by libavcodec before calling get_format()
+ */
+ enum AVPixelFormat sw_pix_fmt;
+
+ /**
+ * Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
+ * - encoding unused.
+ * - decoding set by user.
+ */
+ AVRational pkt_timebase;
+
+ /**
+ * AVCodecDescriptor
+ * - encoding: unused.
+ * - decoding: set by libavcodec.
+ */
+ const AVCodecDescriptor *codec_descriptor;
+
+ /**
+ * Current statistics for PTS correction.
+ * - decoding: maintained and used by libavcodec, not intended to be used by user apps
+ * - encoding: unused
+ */
+ int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far
+ int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far
+ int64_t pts_correction_last_pts; /// PTS of the last frame
+ int64_t pts_correction_last_dts; /// DTS of the last frame
+
+ /**
+ * Character encoding of the input subtitles file.
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ char *sub_charenc;
+
+ /**
+ * Subtitles character encoding mode. Formats or codecs might be adjusting
+ * this setting (if they are doing the conversion themselves for instance).
+ * - decoding: set by libavcodec
+ * - encoding: unused
+ */
+ int sub_charenc_mode;
+#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance)
+#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself
+#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv
+#define FF_SUB_CHARENC_MODE_IGNORE 2 ///< neither convert the subtitles, nor check them for valid UTF-8
+
+ /**
+ * Skip processing alpha if supported by codec.
+ * Note that if the format uses pre-multiplied alpha (common with VP6,
+ * and recommended due to better video quality/compression)
+ * the image will look as if alpha-blended onto a black background.
+ * However for formats that do not use pre-multiplied alpha
+ * there might be serious artefacts (though e.g. libswscale currently
+ * assumes pre-multiplied alpha anyway).
+ *
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ int skip_alpha;
+
+ /**
+ * Number of samples to skip after a discontinuity
+ * - decoding: unused
+ * - encoding: set by libavcodec
+ */
+ int seek_preroll;
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: unused.
+ */
+ uint16_t *chroma_intra_matrix;
+
+ /**
+ * dump format separator.
+ * can be ", " or "\n " or anything else
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ uint8_t *dump_separator;
+
+ /**
+ * ',' separated list of allowed decoders.
+ * If NULL then all are allowed
+ * - encoding: unused
+ * - decoding: set by user
+ */
+ char *codec_whitelist;
+
+ /**
+ * Properties of the stream that gets decoded
+ * - encoding: unused
+ * - decoding: set by libavcodec
+ */
+ unsigned properties;
+#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001
+#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002
+#define FF_CODEC_PROPERTY_FILM_GRAIN 0x00000004
+
+ /**
+ * Additional data associated with the entire coded stream.
+ *
+ * - decoding: unused
+ * - encoding: may be set by libavcodec after avcodec_open2().
+ */
+ AVPacketSideData *coded_side_data;
+ int nb_coded_side_data;
+
+ /**
+ * A reference to the AVHWFramesContext describing the input (for encoding)
+ * or output (decoding) frames. The reference is set by the caller and
+ * afterwards owned (and freed) by libavcodec - it should never be read by
+ * the caller after being set.
+ *
+ * - decoding: This field should be set by the caller from the get_format()
+ * callback. The previous reference (if any) will always be
+ * unreffed by libavcodec before the get_format() call.
+ *
+ * If the default get_buffer2() is used with a hwaccel pixel
+ * format, then this AVHWFramesContext will be used for
+ * allocating the frame buffers.
+ *
+ * - encoding: For hardware encoders configured to use a hwaccel pixel
+ * format, this field should be set by the caller to a reference
+ * to the AVHWFramesContext describing input frames.
+ * AVHWFramesContext.format must be equal to
+ * AVCodecContext.pix_fmt.
+ *
+ * This field should be set before avcodec_open2() is called.
+ */
+ AVBufferRef *hw_frames_ctx;
+
+ /**
+ * Audio only. The amount of padding (in samples) appended by the encoder to
+ * the end of the audio. I.e. this number of decoded samples must be
+ * discarded by the caller from the end of the stream to get the original
+ * audio without any trailing padding.
+ *
+ * - decoding: unused
+ * - encoding: unused
+ */
+ int trailing_padding;
+
+ /**
+ * The number of pixels per image to maximally accept.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int64_t max_pixels;
+
+ /**
+ * A reference to the AVHWDeviceContext describing the device which will
+ * be used by a hardware encoder/decoder. The reference is set by the
+ * caller and afterwards owned (and freed) by libavcodec.
+ *
+ * This should be used if either the codec device does not require
+ * hardware frames or any that are used are to be allocated internally by
+ * libavcodec. If the user wishes to supply any of the frames used as
+ * encoder input or decoder output then hw_frames_ctx should be used
+ * instead. When hw_frames_ctx is set in get_format() for a decoder, this
+ * field will be ignored while decoding the associated stream segment, but
+ * may again be used on a following one after another get_format() call.
+ *
+ * For both encoders and decoders this field should be set before
+ * avcodec_open2() is called and must not be written to thereafter.
+ *
+ * Note that some decoders may require this field to be set initially in
+ * order to support hw_frames_ctx at all - in that case, all frames
+ * contexts used must be created on the same device.
+ */
+ AVBufferRef *hw_device_ctx;
+
+ /**
+ * Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated
+ * decoding (if active).
+ * - encoding: unused
+ * - decoding: Set by user (either before avcodec_open2(), or in the
+ * AVCodecContext.get_format callback)
+ */
+ int hwaccel_flags;
+
+ /**
+ * Video decoding only. Certain video codecs support cropping, meaning that
+ * only a sub-rectangle of the decoded frame is intended for display. This
+ * option controls how cropping is handled by libavcodec.
+ *
+ * When set to 1 (the default), libavcodec will apply cropping internally.
+ * I.e. it will modify the output frame width/height fields and offset the
+ * data pointers (only by as much as possible while preserving alignment, or
+ * by the full amount if the AV_CODEC_FLAG_UNALIGNED flag is set) so that
+ * the frames output by the decoder refer only to the cropped area. The
+ * crop_* fields of the output frames will be zero.
+ *
+ * When set to 0, the width/height fields of the output frames will be set
+ * to the coded dimensions and the crop_* fields will describe the cropping
+ * rectangle. Applying the cropping is left to the caller.
+ *
+ * @warning When hardware acceleration with opaque output frames is used,
+ * libavcodec is unable to apply cropping from the top/left border.
+ *
+ * @note when this option is set to zero, the width/height fields of the
+ * AVCodecContext and output AVFrames have different meanings. The codec
+ * context fields store display dimensions (with the coded dimensions in
+ * coded_width/height), while the frame fields store the coded dimensions
+ * (with the display dimensions being determined by the crop_* fields).
+ */
+ int apply_cropping;
+
+ /*
+ * Video decoding only. Sets the number of extra hardware frames which
+ * the decoder will allocate for use by the caller. This must be set
+ * before avcodec_open2() is called.
+ *
+ * Some hardware decoders require all frames that they will use for
+ * output to be defined in advance before decoding starts. For such
+ * decoders, the hardware frame pool must therefore be of a fixed size.
+ * The extra frames set here are on top of any number that the decoder
+ * needs internally in order to operate normally (for example, frames
+ * used as reference pictures).
+ */
+ int extra_hw_frames;
+
+ /**
+ * The percentage of damaged samples to discard a frame.
+ *
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ int discard_damaged_percentage;
+
+ /**
+ * The number of samples per frame to maximally accept.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int64_t max_samples;
+
+ /**
+ * Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of
+ * metadata exported in frame, packet, or coded stream side data by
+ * decoders and encoders.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int export_side_data;
+
+ /**
+ * This callback is called at the beginning of each packet to get a data
+ * buffer for it.
+ *
+ * The following field will be set in the packet before this callback is
+ * called:
+ * - size
+ * This callback must use the above value to calculate the required buffer size,
+ * which must padded by at least AV_INPUT_BUFFER_PADDING_SIZE bytes.
+ *
+ * In some specific cases, the encoder may not use the entire buffer allocated by this
+ * callback. This will be reflected in the size value in the packet once returned by
+ * avcodec_receive_packet().
+ *
+ * This callback must fill the following fields in the packet:
+ * - data: alignment requirements for AVPacket apply, if any. Some architectures and
+ * encoders may benefit from having aligned data.
+ * - buf: must contain a pointer to an AVBufferRef structure. The packet's
+ * data pointer must be contained in it. See: av_buffer_create(), av_buffer_alloc(),
+ * and av_buffer_ref().
+ *
+ * If AV_CODEC_CAP_DR1 is not set then get_encode_buffer() must call
+ * avcodec_default_get_encode_buffer() instead of providing a buffer allocated by
+ * some other means.
+ *
+ * The flags field may contain a combination of AV_GET_ENCODE_BUFFER_FLAG_ flags.
+ * They may be used for example to hint what use the buffer may get after being
+ * created.
+ * Implementations of this callback may ignore flags they don't understand.
+ * If AV_GET_ENCODE_BUFFER_FLAG_REF is set in flags then the packet may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * This callback must be thread-safe, as when frame threading is used, it may
+ * be called from multiple threads simultaneously.
+ *
+ * @see avcodec_default_get_encode_buffer()
+ *
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: unused
+ */
+ int (*get_encode_buffer)(struct AVCodecContext *s, AVPacket *pkt, int flags);
+
+ /**
+ * Audio channel layout.
+ * - encoding: must be set by the caller, to one of AVCodec.ch_layouts.
+ * - decoding: may be set by the caller if known e.g. from the container.
+ * The decoder can then override during decoding as needed.
+ */
+ AVChannelLayout ch_layout;
+
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ */
+ int64_t frame_num;
+} AVCodecContext;
+
+/**
+ * @defgroup lavc_hwaccel AVHWAccel
+ *
+ * @note Nothing in this structure should be accessed by the user. At some
+ * point in future it will not be externally visible at all.
+ *
+ * @{
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char *name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See AVMEDIA_TYPE_xxx
+ */
+ enum AVMediaType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See AV_CODEC_ID_xxx
+ */
+ enum AVCodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see AV_HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavcodec and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+
+ /**
+ * Allocate a custom buffer
+ */
+ int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame);
+
+ /**
+ * Called at the beginning of each frame or field picture.
+ *
+ * Meaningful frame information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * Note that buf can be NULL along with buf_size set to 0.
+ * Otherwise, this means the whole frame is available at this point.
+ *
+ * @param avctx the codec context
+ * @param buf the frame data buffer base
+ * @param buf_size the size of the frame in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Callback for parameter data (SPS/PPS/VPS etc).
+ *
+ * Useful for hardware decoders which keep persistent state about the
+ * video parameters, and need to receive any changes to update that state.
+ *
+ * @param avctx the codec context
+ * @param type the nal unit type
+ * @param buf the nal unit data buffer
+ * @param buf_size the size of the nal unit in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Callback for each slice.
+ *
+ * Meaningful slice information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @param buf the slice data buffer base
+ * @param buf_size the size of the slice in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Called at the end of each frame or field picture.
+ *
+ * The whole picture is parsed at this point and can now be sent
+ * to the hardware accelerator. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*end_frame)(AVCodecContext *avctx);
+
+ /**
+ * Size of per-frame hardware accelerator private data.
+ *
+ * Private data is allocated with av_mallocz() before
+ * AVCodecContext.get_buffer() and deallocated after
+ * AVCodecContext.release_buffer().
+ */
+ int frame_priv_data_size;
+
+ /**
+ * Initialize the hwaccel private data.
+ *
+ * This will be called from ff_get_format(), after hwaccel and
+ * hwaccel_context are set and the hwaccel private data in AVCodecInternal
+ * is allocated.
+ */
+ int (*init)(AVCodecContext *avctx);
+
+ /**
+ * Uninitialize the hwaccel private data.
+ *
+ * This will be called from get_format() or avcodec_close(), after hwaccel
+ * and hwaccel_context are already uninitialized.
+ */
+ int (*uninit)(AVCodecContext *avctx);
+
+ /**
+ * Size of the private data to allocate in
+ * AVCodecInternal.hwaccel_priv_data.
+ */
+ int priv_data_size;
+
+ /**
+ * Internal hwaccel capabilities.
+ */
+ int caps_internal;
+
+ /**
+ * Fill the given hw_frames context with current codec parameters. Called
+ * from get_format. Refer to avcodec_get_hw_frames_parameters() for
+ * details.
+ *
+ * This CAN be called before AVHWAccel.init is called, and you must assume
+ * that avctx->hwaccel_priv_data is invalid.
+ */
+ int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx);
+} AVHWAccel;
+
+/**
+ * HWAccel is experimental and is thus avoided in favor of non experimental
+ * codecs
+ */
+#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200
+
+/**
+ * Hardware acceleration should be used for decoding even if the codec level
+ * used is unknown or higher than the maximum supported level reported by the
+ * hardware driver.
+ *
+ * It's generally a good idea to pass this flag unless you have a specific
+ * reason not to, as hardware tends to under-report supported levels.
+ */
+#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0)
+
+/**
+ * Hardware acceleration can output YUV pixel formats with a different chroma
+ * sampling than 4:2:0 and/or other than 8 bits per component.
+ */
+#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1)
+
+/**
+ * Hardware acceleration should still be attempted for decoding when the
+ * codec profile does not match the reported capabilities of the hardware.
+ *
+ * For example, this can be used to try to decode baseline profile H.264
+ * streams in hardware - it will often succeed, because many streams marked
+ * as baseline profile actually conform to constrained baseline profile.
+ *
+ * @warning If the stream is actually not supported then the behaviour is
+ * undefined, and may include returning entirely incorrect output
+ * while indicating success.
+ */
+#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2)
+
+/**
+ * Some hardware decoders (namely nvdec) can either output direct decoder
+ * surfaces, or make an on-device copy and return said copy.
+ * There is a hard limit on how many decoder surfaces there can be, and it
+ * cannot be accurately guessed ahead of time.
+ * For some processing chains, this can be okay, but others will run into the
+ * limit and in turn produce very confusing errors that require fine tuning of
+ * more or less obscure options by the user, or in extreme cases cannot be
+ * resolved at all without inserting an avfilter that forces a copy.
+ *
+ * Thus, the hwaccel will by default make a copy for safety and resilience.
+ * If a users really wants to minimize the amount of copies, they can set this
+ * flag and ensure their processing chain does not exhaust the surface pool.
+ */
+#define AV_HWACCEL_FLAG_UNSAFE_OUTPUT (1 << 3)
+
+/**
+ * @}
+ */
+
+enum AVSubtitleType {
+ SUBTITLE_NONE,
+
+ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
+
+ /**
+ * Plain text, the text field must be set by the decoder and is
+ * authoritative. ass and pict fields may contain approximations.
+ */
+ SUBTITLE_TEXT,
+
+ /**
+ * Formatted text, the ass field must be set by the decoder and is
+ * authoritative. pict and text fields may contain approximations.
+ */
+ SUBTITLE_ASS,
+};
+
+#define AV_SUBTITLE_FLAG_FORCED 0x00000001
+
+typedef struct AVSubtitleRect {
+ int x; ///< top left corner of pict, undefined when pict is not set
+ int y; ///< top left corner of pict, undefined when pict is not set
+ int w; ///< width of pict, undefined when pict is not set
+ int h; ///< height of pict, undefined when pict is not set
+ int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+ /**
+ * data+linesize for the bitmap of this subtitle.
+ * Can be set for text/ass as well once they are rendered.
+ */
+ uint8_t *data[4];
+ int linesize[4];
+
+ enum AVSubtitleType type;
+
+ char *text; ///< 0 terminated plain UTF-8 text
+
+ /**
+ * 0 terminated ASS/SSA compatible event line.
+ * The presentation of this is unaffected by the other values in this
+ * struct.
+ */
+ char *ass;
+
+ int flags;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+ uint16_t format; /* 0 = graphics */
+ uint32_t start_display_time; /* relative to packet pts, in ms */
+ uint32_t end_display_time; /* relative to packet pts, in ms */
+ unsigned num_rects;
+ AVSubtitleRect **rects;
+ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char *avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char *avcodec_license(void);
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct should be freed with avcodec_free_context().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ * If NULL, then the codec-specific defaults won't be initialized,
+ * which may result in suboptimal default settings (this is
+ * important mainly for encoders, e.g. libx264).
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ */
+AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
+
+/**
+ * Free the codec context and everything associated with it and write NULL to
+ * the provided pointer.
+ */
+void avcodec_free_context(AVCodecContext **avctx);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_class(void);
+
+/**
+ * Get the AVClass for AVSubtitleRect. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_subtitle_rect_class(void);
+
+/**
+ * Fill the parameters struct based on the values from the supplied codec
+ * context. Any allocated fields in par are freed and replaced with duplicates
+ * of the corresponding fields in codec.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure
+ */
+int avcodec_parameters_from_context(AVCodecParameters *par,
+ const AVCodecContext *codec);
+
+/**
+ * Fill the codec context based on the values from the supplied codec
+ * parameters. Any allocated fields in codec that have a corresponding field in
+ * par are freed and replaced with duplicates of the corresponding field in par.
+ * Fields in codec that do not have a counterpart in par are not touched.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure.
+ */
+int avcodec_parameters_to_context(AVCodecContext *codec,
+ const AVCodecParameters *par);
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @note Always call this function before using decoding routines (such as
+ * @ref avcodec_receive_frame()).
+ *
+ * @code
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * for this context, then this parameter MUST be either NULL or
+ * equal to the previously passed codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private options.
+ * On return this object will be filled with options that were not found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
+ * av_dict_set(), av_opt_find().
+ */
+int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
+
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() with a non-NULL
+ * codec. Subsequent calls will do nothing.
+ *
+ * @note Do not use this function. Use avcodec_free_context() to destroy a
+ * codec context (either open or closed). Opening and closing a codec context
+ * multiple times is not supported anymore -- use multiple codec contexts
+ * instead.
+ */
+int avcodec_close(AVCodecContext *avctx);
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle *sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_decoding
+ * @{
+ */
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * AV_CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
+
+/**
+ * The default callback for AVCodecContext.get_encode_buffer(). It is made public so
+ * it can be called by custom get_encode_buffer() implementations for encoders without
+ * AV_CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_encode_buffer(AVCodecContext *s, AVPacket *pkt, int flags);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+ int linesize_align[AV_NUM_DATA_POINTERS]);
+
+#ifdef FF_API_AVCODEC_CHROMA_POS
+/**
+ * Converts AVChromaLocation to swscale x/y chroma position.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ * @deprecated Use av_chroma_location_enum_to_pos() instead.
+ */
+ attribute_deprecated
+int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos);
+
+/**
+ * Converts swscale x/y chroma position to AVChromaLocation.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ * @deprecated Use av_chroma_location_pos_to_enum() instead.
+ */
+ attribute_deprecated
+enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos);
+#endif
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expected to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform badly
+ * due to a potentially very different allocation pattern.
+ *
+ * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning subtitles. It is safe to flush even those decoders that are not
+ * marked with AV_CODEC_CAP_DELAY, then no subtitles will be returned.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The preallocated AVSubtitle in which the decoded subtitle will be stored,
+ * must be freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
+ int *got_sub_ptr, const AVPacket *avpkt);
+
+/**
+ * Supply raw packet data as input to a decoder.
+ *
+ * Internally, this call will copy relevant AVCodecContext fields, which can
+ * influence decoding per-packet, and apply them when the packet is actually
+ * decoded. (For example AVCodecContext.skip_frame, which might direct the
+ * decoder to drop the frame contained by the packet sent with this function.)
+ *
+ * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx codec context
+ * @param[in] avpkt The input AVPacket. Usually, this will be a single video
+ * frame, or several complete audio frames.
+ * Ownership of the packet remains with the caller, and the
+ * decoder will not write to the packet. The decoder may create
+ * a reference to the packet data (or copy it if the packet is
+ * not reference-counted).
+ * Unlike with older APIs, the packet is always fully consumed,
+ * and if it contains multiple frames (e.g. some audio codecs),
+ * will require you to call avcodec_receive_frame() multiple
+ * times afterwards before you can send a new packet.
+ * It can be NULL (or an AVPacket with data set to NULL and
+ * size set to 0); in this case, it is considered a flush
+ * packet, which signals the end of the stream. Sending the
+ * first flush packet will return success. Subsequent ones are
+ * unnecessary and will return AVERROR_EOF. If the decoder
+ * still has frames buffered, it will return them after sending
+ * a flush packet.
+ *
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user
+ * must read output with avcodec_receive_frame() (once
+ * all output is read, the packet should be resent,
+ * and the call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the decoder has been flushed, and no new packets can be
+ * sent to it (also returned if more than 1 flush
+ * packet is sent)
+ * @retval AVERROR(EINVAL) codec not opened, it is an encoder, or requires flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate decoding errors
+ */
+int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
+
+/**
+ * Return decoded output data from a decoder or encoder (when the
+ * AV_CODEC_FLAG_RECON_FRAME flag is used).
+ *
+ * @param avctx codec context
+ * @param frame This will be set to a reference-counted video or audio
+ * frame (depending on the decoder type) allocated by the
+ * codec. Note that the function will always call
+ * av_frame_unref(frame) before doing anything else.
+ *
+ * @retval 0 success, a frame was returned
+ * @retval AVERROR(EAGAIN) output is not available in this state - user must
+ * try to send new input
+ * @retval AVERROR_EOF the codec has been fully flushed, and there will be
+ * no more output frames
+ * @retval AVERROR(EINVAL) codec not opened, or it is an encoder without the
+ * AV_CODEC_FLAG_RECON_FRAME flag enabled
+ * @retval AVERROR_INPUT_CHANGED current decoded frame has changed parameters with
+ * respect to first decoded frame. Applicable when flag
+ * AV_CODEC_FLAG_DROPCHANGED is set.
+ * @retval "other negative error code" legitimate decoding errors
+ */
+int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Supply a raw video or audio frame to the encoder. Use avcodec_receive_packet()
+ * to retrieve buffered output packets.
+ *
+ * @param avctx codec context
+ * @param[in] frame AVFrame containing the raw audio or video frame to be encoded.
+ * Ownership of the frame remains with the caller, and the
+ * encoder will not write to the frame. The encoder may create
+ * a reference to the frame data (or copy it if the frame is
+ * not reference-counted).
+ * It can be NULL, in which case it is considered a flush
+ * packet. This signals the end of the stream. If the encoder
+ * still has packets buffered, it will return them after this
+ * call. Once flushing mode has been entered, additional flush
+ * packets are ignored, and sending frames will return
+ * AVERROR_EOF.
+ *
+ * For audio:
+ * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If it is not set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames except the last.
+ * The final frame may be smaller than avctx->frame_size.
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user must
+ * read output with avcodec_receive_packet() (once all
+ * output is read, the packet should be resent, and the
+ * call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the encoder has been flushed, and no new frames can
+ * be sent to it
+ * @retval AVERROR(EINVAL) codec not opened, it is a decoder, or requires flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate encoding errors
+ */
+int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame);
+
+/**
+ * Read encoded data from the encoder.
+ *
+ * @param avctx codec context
+ * @param avpkt This will be set to a reference-counted packet allocated by the
+ * encoder. Note that the function will always call
+ * av_packet_unref(avpkt) before doing anything else.
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) output is not available in the current state - user must
+ * try to send input
+ * @retval AVERROR_EOF the encoder has been fully flushed, and there will be no
+ * more output packets
+ * @retval AVERROR(EINVAL) codec not opened, or it is a decoder
+ * @retval "another negative error code" legitimate encoding errors
+ */
+int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
+
+/**
+ * Create and return a AVHWFramesContext with values adequate for hardware
+ * decoding. This is meant to get called from the get_format callback, and is
+ * a helper for preparing a AVHWFramesContext for AVCodecContext.hw_frames_ctx.
+ * This API is for decoding with certain hardware acceleration modes/APIs only.
+ *
+ * The returned AVHWFramesContext is not initialized. The caller must do this
+ * with av_hwframe_ctx_init().
+ *
+ * Calling this function is not a requirement, but makes it simpler to avoid
+ * codec or hardware API specific details when manually allocating frames.
+ *
+ * Alternatively to this, an API user can set AVCodecContext.hw_device_ctx,
+ * which sets up AVCodecContext.hw_frames_ctx fully automatically, and makes
+ * it unnecessary to call this function or having to care about
+ * AVHWFramesContext initialization at all.
+ *
+ * There are a number of requirements for calling this function:
+ *
+ * - It must be called from get_format with the same avctx parameter that was
+ * passed to get_format. Calling it outside of get_format is not allowed, and
+ * can trigger undefined behavior.
+ * - The function is not always supported (see description of return values).
+ * Even if this function returns successfully, hwaccel initialization could
+ * fail later. (The degree to which implementations check whether the stream
+ * is actually supported varies. Some do this check only after the user's
+ * get_format callback returns.)
+ * - The hw_pix_fmt must be one of the choices suggested by get_format. If the
+ * user decides to use a AVHWFramesContext prepared with this API function,
+ * the user must return the same hw_pix_fmt from get_format.
+ * - The device_ref passed to this function must support the given hw_pix_fmt.
+ * - After calling this API function, it is the user's responsibility to
+ * initialize the AVHWFramesContext (returned by the out_frames_ref parameter),
+ * and to set AVCodecContext.hw_frames_ctx to it. If done, this must be done
+ * before returning from get_format (this is implied by the normal
+ * AVCodecContext.hw_frames_ctx API rules).
+ * - The AVHWFramesContext parameters may change every time time get_format is
+ * called. Also, AVCodecContext.hw_frames_ctx is reset before get_format. So
+ * you are inherently required to go through this process again on every
+ * get_format call.
+ * - It is perfectly possible to call this function without actually using
+ * the resulting AVHWFramesContext. One use-case might be trying to reuse a
+ * previously initialized AVHWFramesContext, and calling this API function
+ * only to test whether the required frame parameters have changed.
+ * - Fields that use dynamically allocated values of any kind must not be set
+ * by the user unless setting them is explicitly allowed by the documentation.
+ * If the user sets AVHWFramesContext.free and AVHWFramesContext.user_opaque,
+ * the new free callback must call the potentially set previous free callback.
+ * This API call may set any dynamically allocated fields, including the free
+ * callback.
+ *
+ * The function will set at least the following fields on AVHWFramesContext
+ * (potentially more, depending on hwaccel API):
+ *
+ * - All fields set by av_hwframe_ctx_alloc().
+ * - Set the format field to hw_pix_fmt.
+ * - Set the sw_format field to the most suited and most versatile format. (An
+ * implication is that this will prefer generic formats over opaque formats
+ * with arbitrary restrictions, if possible.)
+ * - Set the width/height fields to the coded frame size, rounded up to the
+ * API-specific minimum alignment.
+ * - Only _if_ the hwaccel requires a pre-allocated pool: set the initial_pool_size
+ * field to the number of maximum reference surfaces possible with the codec,
+ * plus 1 surface for the user to work (meaning the user can safely reference
+ * at most 1 decoded surface at a time), plus additional buffering introduced
+ * by frame threading. If the hwaccel does not require pre-allocation, the
+ * field is left to 0, and the decoder will allocate new surfaces on demand
+ * during decoding.
+ * - Possibly AVHWFramesContext.hwctx fields, depending on the underlying
+ * hardware API.
+ *
+ * Essentially, out_frames_ref returns the same as av_hwframe_ctx_alloc(), but
+ * with basic frame parameters set.
+ *
+ * The function is stateless, and does not change the AVCodecContext or the
+ * device_ref AVHWDeviceContext.
+ *
+ * @param avctx The context which is currently calling get_format, and which
+ * implicitly contains all state needed for filling the returned
+ * AVHWFramesContext properly.
+ * @param device_ref A reference to the AVHWDeviceContext describing the device
+ * which will be used by the hardware decoder.
+ * @param hw_pix_fmt The hwaccel format you are going to return from get_format.
+ * @param out_frames_ref On success, set to a reference to an _uninitialized_
+ * AVHWFramesContext, created from the given device_ref.
+ * Fields will be set to values required for decoding.
+ * Not changed if an error is returned.
+ * @return zero on success, a negative value on error. The following error codes
+ * have special semantics:
+ * AVERROR(ENOENT): the decoder does not support this functionality. Setup
+ * is always manual, or it is a decoder which does not
+ * support setting AVCodecContext.hw_frames_ctx at all,
+ * or it is a software format.
+ * AVERROR(EINVAL): it is known that hardware decoding is not supported for
+ * this configuration, or the device_ref is not supported
+ * for the hwaccel referenced by hw_pix_fmt.
+ */
+int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
+ AVBufferRef *device_ref,
+ enum AVPixelFormat hw_pix_fmt,
+ AVBufferRef **out_frames_ref);
+
+
+
+/**
+ * @defgroup lavc_parsing Frame parsing
+ * @{
+ */
+
+enum AVPictureStructure {
+ AV_PICTURE_STRUCTURE_UNKNOWN, ///< unknown
+ AV_PICTURE_STRUCTURE_TOP_FIELD, ///< coded as top field
+ AV_PICTURE_STRUCTURE_BOTTOM_FIELD, ///< coded as bottom field
+ AV_PICTURE_STRUCTURE_FRAME, ///< coded as frame
+};
+
+typedef struct AVCodecParserContext {
+ void *priv_data;
+ const struct AVCodecParser *parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (1 + repeat_pict) * time_base
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+#define PARSER_FLAG_ONCE 0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET 0x0004
+#define PARSER_FLAG_USE_CODEC_TS 0x1000
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /**
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+
+ /**
+ * Duration of the current frame.
+ * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+ * For all other types, this is in units of AVCodecContext.time_base.
+ */
+ int duration;
+
+ enum AVFieldOrder field_order;
+
+ /**
+ * Indicate whether a picture is coded as a frame, top field or bottom field.
+ *
+ * For example, H.264 field_pic_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag
+ * equal to 1 and bottom_field_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_TOP_FIELD.
+ */
+ enum AVPictureStructure picture_structure;
+
+ /**
+ * Picture number incremented in presentation or output order.
+ * This field may be reinitialized at the first picture of a new sequence.
+ *
+ * For example, this corresponds to H.264 PicOrderCnt.
+ */
+ int output_picture_number;
+
+ /**
+ * Dimensions of the decoded video intended for presentation.
+ */
+ int width;
+ int height;
+
+ /**
+ * Dimensions of the coded video.
+ */
+ int coded_width;
+ int coded_height;
+
+ /**
+ * The format of the coded data, corresponds to enum AVPixelFormat for video
+ * and for enum AVSampleFormat for audio.
+ *
+ * Note that a decoder can have considerable freedom in how exactly it
+ * decodes the data, so the format reported here might be different from the
+ * one returned by a decoder.
+ */
+ int format;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[7]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext *s);
+ /* This callback never returns an error, a negative value means that
+ * the frame start was in a previous packet. */
+ int (*parser_parse)(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext *s);
+ int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
+} AVCodecParser;
+
+/**
+ * Iterate over all registered codec parsers.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered codec parser or NULL when the iteration is
+ * finished
+ */
+const AVCodecParser *av_parser_iterate(void **opaque);
+
+AVCodecParserContext *av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
+ * @param buf input buffer.
+ * @param buf_size buffer size in bytes without the padding. I.e. the full buffer
+ size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE.
+ To signal EOF, this should be 0 (so that the last frame
+ can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts,
+ int64_t pos);
+
+void av_parser_close(AVCodecParserContext *s);
+
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub);
+
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_list parameter.
+ *
+ *
+ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+/**
+ * @}
+ */
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+//FIXME func typedef
+
+/**
+ * Fill AVFrame audio data and linesize pointers.
+ *
+ * The buffer buf must be a preallocated buffer with a size big enough
+ * to contain the specified samples amount. The filled AVFrame data
+ * pointers will point to this buffer.
+ *
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment (0 = default)
+ * @return >=0 on success, negative error code on failure
+ * @todo return the size in bytes required to store the samples in
+ * case of success, at the next libavutil bump
+ */
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t *buf,
+ int buf_size, int align);
+
+/**
+ * Reset the internal codec state / flush internal buffers. Should be called
+ * e.g. when seeking or when switching to a different stream.
+ *
+ * @note for decoders, this function just releases any references the decoder
+ * might keep internally, but the caller's references remain valid.
+ *
+ * @note for encoders, this function will only do something if the encoder
+ * declares support for AV_CODEC_CAP_ENCODER_FLUSH. When called, the encoder
+ * will drain any remaining packets, and can then be re-used for a different
+ * stream (as opposed to sending a null frame which will leave the encoder
+ * in a permanent EOF state after draining). This can be desirable if the
+ * cost of tearing down and replacing the encoder instance is high.
+ */
+void avcodec_flush_buffers(AVCodecContext *avctx);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx codec context
+ * @param frame_bytes size of the frame, or 0 if unknown
+ * @return frame duration, in samples, if known. 0 if not able to
+ * determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
+
+/* memory */
+
+/**
+ * Same behaviour av_fast_malloc but the buffer has additional
+ * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.
+ *
+ * In addition the whole buffer will initially and after resizes
+ * be 0-initialized so that no uninitialized data will ever appear.
+ */
+void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Same behaviour av_fast_padded_malloc except that buffer will always
+ * be 0-initialized after call.
+ */
+void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext *s);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/media/ffvpx/libavcodec/avcodec.symbols b/media/ffvpx/libavcodec/avcodec.symbols
new file mode 100644
index 0000000000..b15862fa50
--- /dev/null
+++ b/media/ffvpx/libavcodec/avcodec.symbols
@@ -0,0 +1,81 @@
+av_codec_ffversion
+av_codec_is_decoder
+av_codec_is_encoder
+av_codec_iterate
+av_fast_padded_malloc
+av_fast_padded_mallocz
+av_get_audio_frame_duration
+av_get_bits_per_sample
+av_get_exact_bits_per_sample
+av_get_pcm_codec
+av_get_profile_name
+av_grow_packet
+av_init_packet
+av_new_packet
+av_packet_copy_props
+av_packet_free_side_data
+av_packet_from_data
+av_packet_get_side_data
+av_packet_move_ref
+av_packet_new_side_data
+av_packet_pack_dictionary
+av_packet_ref
+av_packet_rescale_ts
+av_packet_shrink_side_data
+av_packet_side_data_name
+av_packet_unpack_dictionary
+av_packet_unref
+av_parser_close
+av_parser_init
+av_parser_parse2
+#ifdef MOZ_LIBAV_FFT
+av_rdft_calc
+av_rdft_end
+av_rdft_init
+#endif
+av_shrink_packet
+av_vorbis_parse_frame
+av_vorbis_parse_frame_flags
+av_vorbis_parse_free
+av_vorbis_parse_init
+av_vorbis_parse_reset
+av_xiphlacing
+avcodec_align_dimensions
+avcodec_align_dimensions2
+avcodec_alloc_context3
+avcodec_chroma_pos_to_enum
+avcodec_close
+avcodec_configuration
+avcodec_decode_subtitle2
+avcodec_default_execute
+avcodec_default_execute2
+avcodec_default_get_buffer2
+avcodec_default_get_format
+avcodec_descriptor_get
+avcodec_descriptor_get_by_name
+avcodec_descriptor_next
+avcodec_enum_to_chroma_pos
+avcodec_fill_audio_frame
+avcodec_find_decoder
+avcodec_find_decoder_by_name
+avcodec_find_encoder
+avcodec_find_encoder_by_name
+avcodec_flush_buffers
+avcodec_free_context
+avcodec_get_class
+avcodec_get_hw_config
+avcodec_get_name
+avcodec_get_subtitle_rect_class
+avcodec_get_type
+avcodec_is_open
+avcodec_license
+avcodec_open2
+avcodec_string
+avcodec_version
+avsubtitle_free
+avcodec_send_packet
+avcodec_receive_frame
+ff_init_vlc_from_lengths
+ff_init_vlc_sparse
+ff_mpa_freq_tab
+ff_mpa_bitrate_tab
diff --git a/media/ffvpx/libavcodec/avdct.c b/media/ffvpx/libavcodec/avdct.c
new file mode 100644
index 0000000000..e8fa41f73b
--- /dev/null
+++ b/media/ffvpx/libavcodec/avdct.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2014 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "idctdsp.h"
+#include "fdctdsp.h"
+#include "pixblockdsp.h"
+#include "avdct.h"
+
+#define OFFSET(x) offsetof(AVDCT,x)
+#define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
+//these names are too long to be readable
+#define V AV_OPT_FLAG_VIDEO_PARAM
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define E AV_OPT_FLAG_ENCODING_PARAM
+#define D AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption avdct_options[] = {
+{"dct", "DCT algorithm", OFFSET(dct_algo), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, V|E, "dct"},
+{"auto", "autoselect a good one", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_AUTO }, INT_MIN, INT_MAX, V|E, "dct"},
+{"fastint", "fast integer (experimental / for debugging)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_FASTINT }, INT_MIN, INT_MAX, V|E, "dct"},
+{"int", "accurate integer", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_INT }, INT_MIN, INT_MAX, V|E, "dct"},
+{"mmx", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_MMX }, INT_MIN, INT_MAX, V|E, "dct"},
+{"altivec", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_ALTIVEC }, INT_MIN, INT_MAX, V|E, "dct"},
+{"faan", "floating point AAN DCT (experimental / for debugging)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_FAAN }, INT_MIN, INT_MAX, V|E, "dct"},
+
+{"idct", "select IDCT implementation", OFFSET(idct_algo), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, V|E|D, "idct"},
+{"auto", "autoselect a good one", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_AUTO }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"int", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_INT }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simple", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLE }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplemmx", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEMMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"arm", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"altivec", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ALTIVEC }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearm", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearmv5te", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARMV5TE }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearmv6", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARMV6 }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simpleneon", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLENEON }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"xvid", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_XVID }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"xvidmmx", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_XVID }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"faani", "floating point AAN IDCT (experimental / for debugging)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_FAAN }, INT_MIN, INT_MAX, V|D|E, "idct"},
+{"simpleauto", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEAUTO }, INT_MIN, INT_MAX, V|E|D, "idct"},
+
+{"bits_per_sample", "", OFFSET(bits_per_sample), AV_OPT_TYPE_INT, {.i64 = 8 }, 0, 14, 0,},
+{NULL},
+};
+
+static const AVClass avdct_class = {
+ .class_name = "AVDCT",
+ .option = avdct_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+const AVClass *avcodec_dct_get_class(void)
+{
+ return &avdct_class;
+}
+
+AVDCT *avcodec_dct_alloc(void)
+{
+ AVDCT *dsp = av_mallocz(sizeof(AVDCT));
+
+ if (!dsp)
+ return NULL;
+
+ dsp->av_class = &avdct_class;
+ av_opt_set_defaults(dsp);
+
+ return dsp;
+}
+
+int avcodec_dct_init(AVDCT *dsp)
+{
+ AVCodecContext *avctx = avcodec_alloc_context3(NULL);
+
+ if (!avctx)
+ return AVERROR(ENOMEM);
+
+ avctx->idct_algo = dsp->idct_algo;
+ avctx->dct_algo = dsp->dct_algo;
+ avctx->bits_per_raw_sample = dsp->bits_per_sample;
+
+#define COPY(src, name) memcpy(&dsp->name, &src.name, sizeof(dsp->name))
+
+#if CONFIG_IDCTDSP
+ {
+ IDCTDSPContext idsp = {0};
+ ff_idctdsp_init(&idsp, avctx);
+ COPY(idsp, idct);
+ COPY(idsp, idct_permutation);
+ }
+#endif
+
+#if CONFIG_FDCTDSP
+ {
+ FDCTDSPContext fdsp;
+ ff_fdctdsp_init(&fdsp, avctx);
+ COPY(fdsp, fdct);
+ }
+#endif
+
+#if CONFIG_PIXBLOCKDSP
+ {
+ PixblockDSPContext pdsp;
+ ff_pixblockdsp_init(&pdsp, avctx);
+ COPY(pdsp, get_pixels);
+ COPY(pdsp, get_pixels_unaligned);
+ }
+#endif
+
+ avcodec_free_context(&avctx);
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/avdct.h b/media/ffvpx/libavcodec/avdct.h
new file mode 100644
index 0000000000..6411fab6f6
--- /dev/null
+++ b/media/ffvpx/libavcodec/avdct.h
@@ -0,0 +1,88 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVDCT_H
+#define AVCODEC_AVDCT_H
+
+#include "libavutil/opt.h"
+
+/**
+ * AVDCT context.
+ * @note function pointers can be NULL if the specific features have been
+ * disabled at build time.
+ */
+typedef struct AVDCT {
+ const AVClass *av_class;
+
+ void (*idct)(int16_t *block /* align 16 */);
+
+ /**
+ * IDCT input permutation.
+ * Several optimized IDCTs need a permutated input (relative to the
+ * normal order of the reference IDCT).
+ * This permutation must be performed before the idct_put/add.
+ * Note, normally this can be merged with the zigzag/alternate scan<br>
+ * An example to avoid confusion:
+ * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...)
+ * - (x -> reference DCT -> reference IDCT -> x)
+ * - (x -> reference DCT -> simple_mmx_perm = idct_permutation
+ * -> simple_idct_mmx -> x)
+ * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant
+ * -> simple_idct_mmx -> ...)
+ */
+ uint8_t idct_permutation[64];
+
+ void (*fdct)(int16_t *block /* align 16 */);
+
+
+ /**
+ * DCT algorithm.
+ * must use AVOptions to set this field.
+ */
+ int dct_algo;
+
+ /**
+ * IDCT algorithm.
+ * must use AVOptions to set this field.
+ */
+ int idct_algo;
+
+ void (*get_pixels)(int16_t *block /* align 16 */,
+ const uint8_t *pixels /* align 8 */,
+ ptrdiff_t line_size);
+
+ int bits_per_sample;
+
+ void (*get_pixels_unaligned)(int16_t *block /* align 16 */,
+ const uint8_t *pixels,
+ ptrdiff_t line_size);
+} AVDCT;
+
+/**
+ * Allocates a AVDCT context.
+ * This needs to be initialized with avcodec_dct_init() after optionally
+ * configuring it with AVOptions.
+ *
+ * To free it use av_free()
+ */
+AVDCT *avcodec_dct_alloc(void);
+int avcodec_dct_init(AVDCT *);
+
+const AVClass *avcodec_dct_get_class(void);
+
+#endif /* AVCODEC_AVDCT_H */
diff --git a/media/ffvpx/libavcodec/avfft.c b/media/ffvpx/libavcodec/avfft.c
new file mode 100644
index 0000000000..2200f37708
--- /dev/null
+++ b/media/ffvpx/libavcodec/avfft.c
@@ -0,0 +1,145 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/mem.h"
+#include "avfft.h"
+#include "fft.h"
+#include "rdft.h"
+#include "dct.h"
+
+/* FFT */
+
+FFTContext *av_fft_init(int nbits, int inverse)
+{
+ FFTContext *s = av_mallocz(sizeof(*s));
+
+ if (s && ff_fft_init(s, nbits, inverse))
+ av_freep(&s);
+
+ return s;
+}
+
+void av_fft_permute(FFTContext *s, FFTComplex *z)
+{
+ s->fft_permute(s, z);
+}
+
+void av_fft_calc(FFTContext *s, FFTComplex *z)
+{
+ s->fft_calc(s, z);
+}
+
+av_cold void av_fft_end(FFTContext *s)
+{
+ if (s) {
+ ff_fft_end(s);
+ av_free(s);
+ }
+}
+
+#if CONFIG_MDCT
+
+FFTContext *av_mdct_init(int nbits, int inverse, double scale)
+{
+ FFTContext *s = av_malloc(sizeof(*s));
+
+ if (s && ff_mdct_init(s, nbits, inverse, scale))
+ av_freep(&s);
+
+ return s;
+}
+
+void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ s->imdct_calc(s, output, input);
+}
+
+void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ s->imdct_half(s, output, input);
+}
+
+void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ s->mdct_calc(s, output, input);
+}
+
+av_cold void av_mdct_end(FFTContext *s)
+{
+ if (s) {
+ ff_mdct_end(s);
+ av_free(s);
+ }
+}
+
+#endif /* CONFIG_MDCT */
+
+#if CONFIG_RDFT
+
+RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans)
+{
+ RDFTContext *s = av_malloc(sizeof(*s));
+
+ if (s && ff_rdft_init(s, nbits, trans))
+ av_freep(&s);
+
+ return s;
+}
+
+void av_rdft_calc(RDFTContext *s, FFTSample *data)
+{
+ s->rdft_calc(s, data);
+}
+
+av_cold void av_rdft_end(RDFTContext *s)
+{
+ if (s) {
+ ff_rdft_end(s);
+ av_free(s);
+ }
+}
+
+#endif /* CONFIG_RDFT */
+
+#if CONFIG_DCT
+
+DCTContext *av_dct_init(int nbits, enum DCTTransformType inverse)
+{
+ DCTContext *s = av_malloc(sizeof(*s));
+
+ if (s && ff_dct_init(s, nbits, inverse))
+ av_freep(&s);
+
+ return s;
+}
+
+void av_dct_calc(DCTContext *s, FFTSample *data)
+{
+ s->dct_calc(s, data);
+}
+
+av_cold void av_dct_end(DCTContext *s)
+{
+ if (s) {
+ ff_dct_end(s);
+ av_free(s);
+ }
+}
+
+#endif /* CONFIG_DCT */
diff --git a/media/ffvpx/libavcodec/avfft.h b/media/ffvpx/libavcodec/avfft.h
new file mode 100644
index 0000000000..0c0f9b8d8d
--- /dev/null
+++ b/media/ffvpx/libavcodec/avfft.h
@@ -0,0 +1,118 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVFFT_H
+#define AVCODEC_AVFFT_H
+
+/**
+ * @file
+ * @ingroup lavc_fft
+ * FFT functions
+ */
+
+/**
+ * @defgroup lavc_fft FFT functions
+ * @ingroup lavc_misc
+ *
+ * @{
+ */
+
+typedef float FFTSample;
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef struct FFTContext FFTContext;
+
+/**
+ * Set up a complex FFT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse if 0 perform the forward transform, if 1 perform the inverse
+ */
+FFTContext *av_fft_init(int nbits, int inverse);
+
+/**
+ * Do the permutation needed BEFORE calling ff_fft_calc().
+ */
+void av_fft_permute(FFTContext *s, FFTComplex *z);
+
+/**
+ * Do a complex FFT with the parameters defined in av_fft_init(). The
+ * input data must be permuted before. No 1.0/sqrt(n) normalization is done.
+ */
+void av_fft_calc(FFTContext *s, FFTComplex *z);
+
+void av_fft_end(FFTContext *s);
+
+FFTContext *av_mdct_init(int nbits, int inverse, double scale);
+void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_end(FFTContext *s);
+
+/* Real Discrete Fourier Transform */
+
+enum RDFTransformType {
+ DFT_R2C,
+ IDFT_C2R,
+ IDFT_R2C,
+ DFT_C2R,
+};
+
+typedef struct RDFTContext RDFTContext;
+
+/**
+ * Set up a real FFT.
+ * @param nbits log2 of the length of the input array
+ * @param trans the type of transform
+ */
+RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);
+void av_rdft_calc(RDFTContext *s, FFTSample *data);
+void av_rdft_end(RDFTContext *s);
+
+/* Discrete Cosine Transform */
+
+typedef struct DCTContext DCTContext;
+
+enum DCTTransformType {
+ DCT_II = 0,
+ DCT_III,
+ DCT_I,
+ DST_I,
+};
+
+/**
+ * Set up DCT.
+ *
+ * @param nbits size of the input array:
+ * (1 << nbits) for DCT-II, DCT-III and DST-I
+ * (1 << nbits) + 1 for DCT-I
+ * @param type the type of transform
+ *
+ * @note the first element of the input of DST-I is ignored
+ */
+DCTContext *av_dct_init(int nbits, enum DCTTransformType type);
+void av_dct_calc(DCTContext *s, FFTSample *data);
+void av_dct_end (DCTContext *s);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVFFT_H */
diff --git a/media/ffvpx/libavcodec/avpacket.c b/media/ffvpx/libavcodec/avpacket.c
new file mode 100644
index 0000000000..5fef65e97a
--- /dev/null
+++ b/media/ffvpx/libavcodec/avpacket.c
@@ -0,0 +1,647 @@
+/*
+ * AVPacket functions for libavcodec
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/mem.h"
+#include "libavutil/rational.h"
+
+#include "defs.h"
+#include "packet.h"
+#include "packet_internal.h"
+
+#if FF_API_INIT_PACKET
+void av_init_packet(AVPacket *pkt)
+{
+ pkt->pts = AV_NOPTS_VALUE;
+ pkt->dts = AV_NOPTS_VALUE;
+ pkt->pos = -1;
+ pkt->duration = 0;
+ pkt->flags = 0;
+ pkt->stream_index = 0;
+ pkt->buf = NULL;
+ pkt->side_data = NULL;
+ pkt->side_data_elems = 0;
+ pkt->opaque = NULL;
+ pkt->opaque_ref = NULL;
+ pkt->time_base = av_make_q(0, 1);
+}
+#endif
+
+static void get_packet_defaults(AVPacket *pkt)
+{
+ memset(pkt, 0, sizeof(*pkt));
+
+ pkt->pts = AV_NOPTS_VALUE;
+ pkt->dts = AV_NOPTS_VALUE;
+ pkt->pos = -1;
+ pkt->time_base = av_make_q(0, 1);
+}
+
+AVPacket *av_packet_alloc(void)
+{
+ AVPacket *pkt = av_malloc(sizeof(AVPacket));
+ if (!pkt)
+ return pkt;
+
+ get_packet_defaults(pkt);
+
+ return pkt;
+}
+
+void av_packet_free(AVPacket **pkt)
+{
+ if (!pkt || !*pkt)
+ return;
+
+ av_packet_unref(*pkt);
+ av_freep(pkt);
+}
+
+static int packet_alloc(AVBufferRef **buf, int size)
+{
+ int ret;
+ if (size < 0 || size >= INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR(EINVAL);
+
+ ret = av_buffer_realloc(buf, size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (ret < 0)
+ return ret;
+
+ memset((*buf)->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ return 0;
+}
+
+int av_new_packet(AVPacket *pkt, int size)
+{
+ AVBufferRef *buf = NULL;
+ int ret = packet_alloc(&buf, size);
+ if (ret < 0)
+ return ret;
+
+ get_packet_defaults(pkt);
+ pkt->buf = buf;
+ pkt->data = buf->data;
+ pkt->size = size;
+
+ return 0;
+}
+
+void av_shrink_packet(AVPacket *pkt, int size)
+{
+ if (pkt->size <= size)
+ return;
+ pkt->size = size;
+ memset(pkt->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+}
+
+int av_grow_packet(AVPacket *pkt, int grow_by)
+{
+ int new_size;
+ av_assert0((unsigned)pkt->size <= INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
+ if ((unsigned)grow_by >
+ INT_MAX - (pkt->size + AV_INPUT_BUFFER_PADDING_SIZE))
+ return AVERROR(ENOMEM);
+
+ new_size = pkt->size + grow_by + AV_INPUT_BUFFER_PADDING_SIZE;
+ if (pkt->buf) {
+ size_t data_offset;
+ uint8_t *old_data = pkt->data;
+ if (pkt->data == NULL) {
+ data_offset = 0;
+ pkt->data = pkt->buf->data;
+ } else {
+ data_offset = pkt->data - pkt->buf->data;
+ if (data_offset > INT_MAX - new_size)
+ return AVERROR(ENOMEM);
+ }
+
+ if (new_size + data_offset > pkt->buf->size ||
+ !av_buffer_is_writable(pkt->buf)) {
+ int ret;
+
+ // allocate slightly more than requested to avoid excessive
+ // reallocations
+ if (new_size + data_offset < INT_MAX - new_size/16)
+ new_size += new_size/16;
+
+ ret = av_buffer_realloc(&pkt->buf, new_size + data_offset);
+ if (ret < 0) {
+ pkt->data = old_data;
+ return ret;
+ }
+ pkt->data = pkt->buf->data + data_offset;
+ }
+ } else {
+ pkt->buf = av_buffer_alloc(new_size);
+ if (!pkt->buf)
+ return AVERROR(ENOMEM);
+ if (pkt->size > 0)
+ memcpy(pkt->buf->data, pkt->data, pkt->size);
+ pkt->data = pkt->buf->data;
+ }
+ pkt->size += grow_by;
+ memset(pkt->data + pkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ return 0;
+}
+
+int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size)
+{
+ if (size >= INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR(EINVAL);
+
+ pkt->buf = av_buffer_create(data, size + AV_INPUT_BUFFER_PADDING_SIZE,
+ av_buffer_default_free, NULL, 0);
+ if (!pkt->buf)
+ return AVERROR(ENOMEM);
+
+ pkt->data = data;
+ pkt->size = size;
+
+ return 0;
+}
+
+void av_packet_free_side_data(AVPacket *pkt)
+{
+ int i;
+ for (i = 0; i < pkt->side_data_elems; i++)
+ av_freep(&pkt->side_data[i].data);
+ av_freep(&pkt->side_data);
+ pkt->side_data_elems = 0;
+}
+
+int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ uint8_t *data, size_t size)
+{
+ AVPacketSideData *tmp;
+ int i, elems = pkt->side_data_elems;
+
+ for (i = 0; i < elems; i++) {
+ AVPacketSideData *sd = &pkt->side_data[i];
+
+ if (sd->type == type) {
+ av_free(sd->data);
+ sd->data = data;
+ sd->size = size;
+ return 0;
+ }
+ }
+
+ if ((unsigned)elems + 1 > AV_PKT_DATA_NB)
+ return AVERROR(ERANGE);
+
+ tmp = av_realloc(pkt->side_data, (elems + 1) * sizeof(*tmp));
+ if (!tmp)
+ return AVERROR(ENOMEM);
+
+ pkt->side_data = tmp;
+ pkt->side_data[elems].data = data;
+ pkt->side_data[elems].size = size;
+ pkt->side_data[elems].type = type;
+ pkt->side_data_elems++;
+
+ return 0;
+}
+
+
+uint8_t *av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t size)
+{
+ int ret;
+ uint8_t *data;
+
+ if (size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return NULL;
+ data = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!data)
+ return NULL;
+
+ ret = av_packet_add_side_data(pkt, type, data, size);
+ if (ret < 0) {
+ av_freep(&data);
+ return NULL;
+ }
+
+ return data;
+}
+
+uint8_t *av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t *size)
+{
+ int i;
+
+ for (i = 0; i < pkt->side_data_elems; i++) {
+ if (pkt->side_data[i].type == type) {
+ if (size)
+ *size = pkt->side_data[i].size;
+ return pkt->side_data[i].data;
+ }
+ }
+ if (size)
+ *size = 0;
+ return NULL;
+}
+
+const char *av_packet_side_data_name(enum AVPacketSideDataType type)
+{
+ switch(type) {
+ case AV_PKT_DATA_PALETTE: return "Palette";
+ case AV_PKT_DATA_NEW_EXTRADATA: return "New Extradata";
+ case AV_PKT_DATA_PARAM_CHANGE: return "Param Change";
+ case AV_PKT_DATA_H263_MB_INFO: return "H263 MB Info";
+ case AV_PKT_DATA_REPLAYGAIN: return "Replay Gain";
+ case AV_PKT_DATA_DISPLAYMATRIX: return "Display Matrix";
+ case AV_PKT_DATA_STEREO3D: return "Stereo 3D";
+ case AV_PKT_DATA_AUDIO_SERVICE_TYPE: return "Audio Service Type";
+ case AV_PKT_DATA_QUALITY_STATS: return "Quality stats";
+ case AV_PKT_DATA_FALLBACK_TRACK: return "Fallback track";
+ case AV_PKT_DATA_CPB_PROPERTIES: return "CPB properties";
+ case AV_PKT_DATA_SKIP_SAMPLES: return "Skip Samples";
+ case AV_PKT_DATA_JP_DUALMONO: return "JP Dual Mono";
+ case AV_PKT_DATA_STRINGS_METADATA: return "Strings Metadata";
+ case AV_PKT_DATA_SUBTITLE_POSITION: return "Subtitle Position";
+ case AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL: return "Matroska BlockAdditional";
+ case AV_PKT_DATA_WEBVTT_IDENTIFIER: return "WebVTT ID";
+ case AV_PKT_DATA_WEBVTT_SETTINGS: return "WebVTT Settings";
+ case AV_PKT_DATA_METADATA_UPDATE: return "Metadata Update";
+ case AV_PKT_DATA_MPEGTS_STREAM_ID: return "MPEGTS Stream ID";
+ case AV_PKT_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
+ case AV_PKT_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
+ case AV_PKT_DATA_SPHERICAL: return "Spherical Mapping";
+ case AV_PKT_DATA_A53_CC: return "A53 Closed Captions";
+ case AV_PKT_DATA_ENCRYPTION_INIT_INFO: return "Encryption initialization data";
+ case AV_PKT_DATA_ENCRYPTION_INFO: return "Encryption info";
+ case AV_PKT_DATA_AFD: return "Active Format Description data";
+ case AV_PKT_DATA_PRFT: return "Producer Reference Time";
+ case AV_PKT_DATA_ICC_PROFILE: return "ICC Profile";
+ case AV_PKT_DATA_DOVI_CONF: return "DOVI configuration record";
+ case AV_PKT_DATA_S12M_TIMECODE: return "SMPTE ST 12-1:2014 timecode";
+ case AV_PKT_DATA_DYNAMIC_HDR10_PLUS: return "HDR10+ Dynamic Metadata (SMPTE 2094-40)";
+ }
+ return NULL;
+}
+
+uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size)
+{
+ uint8_t *data = NULL;
+ *size = 0;
+
+ if (!dict)
+ return NULL;
+
+ for (int pass = 0; pass < 2; pass++) {
+ const AVDictionaryEntry *t = NULL;
+ size_t total_length = 0;
+
+ while ((t = av_dict_iterate(dict, t))) {
+ for (int i = 0; i < 2; i++) {
+ const char *str = i ? t->value : t->key;
+ const size_t len = strlen(str) + 1;
+
+ if (pass)
+ memcpy(data + total_length, str, len);
+ else if (len > SIZE_MAX - total_length)
+ return NULL;
+ total_length += len;
+ }
+ }
+ if (pass)
+ break;
+ data = av_malloc(total_length);
+ if (!data)
+ return NULL;
+ *size = total_length;
+ }
+
+ return data;
+}
+
+int av_packet_unpack_dictionary(const uint8_t *data, size_t size,
+ AVDictionary **dict)
+{
+ const uint8_t *end;
+ int ret;
+
+ if (!dict || !data || !size)
+ return 0;
+ end = data + size;
+ if (size && end[-1])
+ return AVERROR_INVALIDDATA;
+ while (data < end) {
+ const uint8_t *key = data;
+ const uint8_t *val = data + strlen(key) + 1;
+
+ if (val >= end || !*key)
+ return AVERROR_INVALIDDATA;
+
+ ret = av_dict_set(dict, key, val, 0);
+ if (ret < 0)
+ return ret;
+ data = val + strlen(val) + 1;
+ }
+
+ return 0;
+}
+
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t size)
+{
+ int i;
+
+ for (i = 0; i < pkt->side_data_elems; i++) {
+ if (pkt->side_data[i].type == type) {
+ if (size > pkt->side_data[i].size)
+ return AVERROR(ENOMEM);
+ pkt->side_data[i].size = size;
+ return 0;
+ }
+ }
+ return AVERROR(ENOENT);
+}
+
+int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
+{
+ int i, ret;
+
+ dst->pts = src->pts;
+ dst->dts = src->dts;
+ dst->pos = src->pos;
+ dst->duration = src->duration;
+ dst->flags = src->flags;
+ dst->stream_index = src->stream_index;
+ dst->opaque = src->opaque;
+ dst->time_base = src->time_base;
+ dst->opaque_ref = NULL;
+ dst->side_data = NULL;
+ dst->side_data_elems = 0;
+
+ ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < src->side_data_elems; i++) {
+ enum AVPacketSideDataType type = src->side_data[i].type;
+ size_t size = src->side_data[i].size;
+ uint8_t *src_data = src->side_data[i].data;
+ uint8_t *dst_data = av_packet_new_side_data(dst, type, size);
+
+ if (!dst_data) {
+ av_buffer_unref(&dst->opaque_ref);
+ av_packet_free_side_data(dst);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(dst_data, src_data, size);
+ }
+
+ return 0;
+}
+
+void av_packet_unref(AVPacket *pkt)
+{
+ av_packet_free_side_data(pkt);
+ av_buffer_unref(&pkt->opaque_ref);
+ av_buffer_unref(&pkt->buf);
+ get_packet_defaults(pkt);
+}
+
+int av_packet_ref(AVPacket *dst, const AVPacket *src)
+{
+ int ret;
+
+ dst->buf = NULL;
+
+ ret = av_packet_copy_props(dst, src);
+ if (ret < 0)
+ goto fail;
+
+ if (!src->buf) {
+ ret = packet_alloc(&dst->buf, src->size);
+ if (ret < 0)
+ goto fail;
+ av_assert1(!src->size || src->data);
+ if (src->size)
+ memcpy(dst->buf->data, src->data, src->size);
+
+ dst->data = dst->buf->data;
+ } else {
+ dst->buf = av_buffer_ref(src->buf);
+ if (!dst->buf) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ dst->data = src->data;
+ }
+
+ dst->size = src->size;
+
+ return 0;
+fail:
+ av_packet_unref(dst);
+ return ret;
+}
+
+AVPacket *av_packet_clone(const AVPacket *src)
+{
+ AVPacket *ret = av_packet_alloc();
+
+ if (!ret)
+ return ret;
+
+ if (av_packet_ref(ret, src))
+ av_packet_free(&ret);
+
+ return ret;
+}
+
+void av_packet_move_ref(AVPacket *dst, AVPacket *src)
+{
+ *dst = *src;
+ get_packet_defaults(src);
+}
+
+int av_packet_make_refcounted(AVPacket *pkt)
+{
+ int ret;
+
+ if (pkt->buf)
+ return 0;
+
+ ret = packet_alloc(&pkt->buf, pkt->size);
+ if (ret < 0)
+ return ret;
+ av_assert1(!pkt->size || pkt->data);
+ if (pkt->size)
+ memcpy(pkt->buf->data, pkt->data, pkt->size);
+
+ pkt->data = pkt->buf->data;
+
+ return 0;
+}
+
+int av_packet_make_writable(AVPacket *pkt)
+{
+ AVBufferRef *buf = NULL;
+ int ret;
+
+ if (pkt->buf && av_buffer_is_writable(pkt->buf))
+ return 0;
+
+ ret = packet_alloc(&buf, pkt->size);
+ if (ret < 0)
+ return ret;
+ av_assert1(!pkt->size || pkt->data);
+ if (pkt->size)
+ memcpy(buf->data, pkt->data, pkt->size);
+
+ av_buffer_unref(&pkt->buf);
+ pkt->buf = buf;
+ pkt->data = buf->data;
+
+ return 0;
+}
+
+void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
+{
+ if (pkt->pts != AV_NOPTS_VALUE)
+ pkt->pts = av_rescale_q(pkt->pts, src_tb, dst_tb);
+ if (pkt->dts != AV_NOPTS_VALUE)
+ pkt->dts = av_rescale_q(pkt->dts, src_tb, dst_tb);
+ if (pkt->duration > 0)
+ pkt->duration = av_rescale_q(pkt->duration, src_tb, dst_tb);
+}
+
+int avpriv_packet_list_put(PacketList *packet_buffer,
+ AVPacket *pkt,
+ int (*copy)(AVPacket *dst, const AVPacket *src),
+ int flags)
+{
+ PacketListEntry *pktl = av_malloc(sizeof(*pktl));
+ int ret;
+
+ if (!pktl)
+ return AVERROR(ENOMEM);
+
+ if (copy) {
+ get_packet_defaults(&pktl->pkt);
+ ret = copy(&pktl->pkt, pkt);
+ if (ret < 0) {
+ av_free(pktl);
+ return ret;
+ }
+ } else {
+ ret = av_packet_make_refcounted(pkt);
+ if (ret < 0) {
+ av_free(pktl);
+ return ret;
+ }
+ av_packet_move_ref(&pktl->pkt, pkt);
+ }
+
+ pktl->next = NULL;
+
+ if (packet_buffer->head)
+ packet_buffer->tail->next = pktl;
+ else
+ packet_buffer->head = pktl;
+
+ /* Add the packet in the buffered packet list. */
+ packet_buffer->tail = pktl;
+ return 0;
+}
+
+int avpriv_packet_list_get(PacketList *pkt_buffer,
+ AVPacket *pkt)
+{
+ PacketListEntry *pktl = pkt_buffer->head;
+ if (!pktl)
+ return AVERROR(EAGAIN);
+ *pkt = pktl->pkt;
+ pkt_buffer->head = pktl->next;
+ if (!pkt_buffer->head)
+ pkt_buffer->tail = NULL;
+ av_freep(&pktl);
+ return 0;
+}
+
+void avpriv_packet_list_free(PacketList *pkt_buf)
+{
+ PacketListEntry *tmp = pkt_buf->head;
+
+ while (tmp) {
+ PacketListEntry *pktl = tmp;
+ tmp = pktl->next;
+ av_packet_unref(&pktl->pkt);
+ av_freep(&pktl);
+ }
+ pkt_buf->head = pkt_buf->tail = NULL;
+}
+
+int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
+{
+ uint8_t *side_data;
+ size_t side_data_size;
+ int i;
+
+ side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, &side_data_size);
+ if (!side_data) {
+ side_data_size = 4+4+8*error_count;
+ side_data = av_packet_new_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
+ side_data_size);
+ }
+
+ if (!side_data || side_data_size < 4+4+8*error_count)
+ return AVERROR(ENOMEM);
+
+ AV_WL32(side_data , quality );
+ side_data[4] = pict_type;
+ side_data[5] = error_count;
+ for (i = 0; i<error_count; i++)
+ AV_WL64(side_data+8 + 8*i , error[i]);
+
+ return 0;
+}
+
+int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp)
+{
+ AVProducerReferenceTime *prft;
+ uint8_t *side_data;
+ size_t side_data_size;
+
+ side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, &side_data_size);
+ if (!side_data) {
+ side_data_size = sizeof(AVProducerReferenceTime);
+ side_data = av_packet_new_side_data(pkt, AV_PKT_DATA_PRFT, side_data_size);
+ }
+
+ if (!side_data || side_data_size < sizeof(AVProducerReferenceTime))
+ return AVERROR(ENOMEM);
+
+ prft = (AVProducerReferenceTime *)side_data;
+ prft->wallclock = timestamp;
+ prft->flags = 0;
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/avpicture.c b/media/ffvpx/libavcodec/avpicture.c
new file mode 100644
index 0000000000..56435f4fc9
--- /dev/null
+++ b/media/ffvpx/libavcodec/avpicture.c
@@ -0,0 +1,82 @@
+/*
+ * AVPicture management routines
+ * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AVPicture management routines
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/colorspace.h"
+
+#if FF_API_AVPICTURE
+FF_DISABLE_DEPRECATION_WARNINGS
+int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
+ enum AVPixelFormat pix_fmt, int width, int height)
+{
+ return av_image_fill_arrays(picture->data, picture->linesize,
+ ptr, pix_fmt, width, height, 1);
+}
+
+int avpicture_layout(const AVPicture* src, enum AVPixelFormat pix_fmt, int width, int height,
+ unsigned char *dest, int dest_size)
+{
+ return av_image_copy_to_buffer(dest, dest_size,
+ (const uint8_t * const*)src->data, src->linesize,
+ pix_fmt, width, height, 1);
+}
+
+int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height)
+{
+ return av_image_get_buffer_size(pix_fmt, width, height, 1);
+}
+
+int avpicture_alloc(AVPicture *picture,
+ enum AVPixelFormat pix_fmt, int width, int height)
+{
+ int ret = av_image_alloc(picture->data, picture->linesize,
+ width, height, pix_fmt, 1);
+ if (ret < 0) {
+ memset(picture, 0, sizeof(AVPicture));
+ return ret;
+ }
+
+ return 0;
+}
+
+void avpicture_free(AVPicture *picture)
+{
+ av_freep(&picture->data[0]);
+}
+
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int width, int height)
+{
+ av_image_copy(dst->data, dst->linesize, (const uint8_t **)src->data,
+ src->linesize, pix_fmt, width, height);
+}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif /* FF_API_AVPICTURE */
diff --git a/media/ffvpx/libavcodec/bit_depth_template.c b/media/ffvpx/libavcodec/bit_depth_template.c
new file mode 100644
index 0000000000..d44d47ea45
--- /dev/null
+++ b/media/ffvpx/libavcodec/bit_depth_template.c
@@ -0,0 +1,108 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mathops.h"
+#include "rnd_avg.h"
+#include "libavutil/intreadwrite.h"
+
+#ifndef BIT_DEPTH
+#define BIT_DEPTH 8
+#endif
+
+#ifdef AVCODEC_BIT_DEPTH_TEMPLATE_C
+# undef pixel
+# undef pixel2
+# undef pixel4
+# undef dctcoef
+# undef idctin
+# undef INIT_CLIP
+# undef no_rnd_avg_pixel4
+# undef rnd_avg_pixel4
+# undef AV_RN2P
+# undef AV_RN4P
+# undef AV_RN4PA
+# undef AV_WN2P
+# undef AV_WN4P
+# undef AV_WN4PA
+# undef CLIP
+# undef FUNC
+# undef FUNCC
+# undef av_clip_pixel
+# undef PIXEL_SPLAT_X4
+#else
+# define AVCODEC_BIT_DEPTH_TEMPLATE_C
+#endif
+
+#if BIT_DEPTH > 8
+# define pixel uint16_t
+# define pixel2 uint32_t
+# define pixel4 uint64_t
+# define dctcoef int32_t
+
+#ifdef IN_IDCT_DEPTH
+#if IN_IDCT_DEPTH == 32
+# define idctin int32_t
+#else
+# define idctin int16_t
+#endif
+#else
+# define idctin int16_t
+#endif
+
+# define INIT_CLIP
+# define no_rnd_avg_pixel4 no_rnd_avg64
+# define rnd_avg_pixel4 rnd_avg64
+# define AV_RN2P AV_RN32
+# define AV_RN4P AV_RN64
+# define AV_RN4PA AV_RN64A
+# define AV_WN2P AV_WN32
+# define AV_WN4P AV_WN64
+# define AV_WN4PA AV_WN64A
+# define PIXEL_SPLAT_X4(x) ((x)*0x0001000100010001ULL)
+
+# define av_clip_pixel(a) av_clip_uintp2(a, BIT_DEPTH)
+# define CLIP(a) av_clip_uintp2(a, BIT_DEPTH)
+#else
+# define pixel uint8_t
+# define pixel2 uint16_t
+# define pixel4 uint32_t
+# define dctcoef int16_t
+# define idctin int16_t
+
+# define INIT_CLIP
+# define no_rnd_avg_pixel4 no_rnd_avg32
+# define rnd_avg_pixel4 rnd_avg32
+# define AV_RN2P AV_RN16
+# define AV_RN4P AV_RN32
+# define AV_RN4PA AV_RN32A
+# define AV_WN2P AV_WN16
+# define AV_WN4P AV_WN32
+# define AV_WN4PA AV_WN32A
+# define PIXEL_SPLAT_X4(x) ((x)*0x01010101U)
+
+# define av_clip_pixel(a) av_clip_uint8(a)
+# define CLIP(a) av_clip_uint8(a)
+#endif
+
+#define FUNC3(a, b, c) a ## _ ## b ## c
+#define FUNC2(a, b, c) FUNC3(a, b, c)
+#define FUNC(a) FUNC2(a, BIT_DEPTH,)
+#define FUNCC(a) FUNC2(a, BIT_DEPTH, _c)
+#define FUNC4(a, b, c) a ## _int ## b ## _ ## c ## bit
+#define FUNC5(a, b, c) FUNC4(a, b, c)
+#define FUNC6(a) FUNC5(a, IN_IDCT_DEPTH, BIT_DEPTH)
diff --git a/media/ffvpx/libavcodec/bitstream.c b/media/ffvpx/libavcodec/bitstream.c
new file mode 100644
index 0000000000..3606575055
--- /dev/null
+++ b/media/ffvpx/libavcodec/bitstream.c
@@ -0,0 +1,72 @@
+/*
+ * Common bit i/o utils
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2010 Loren Merritt
+ *
+ * alternative bitstream reader & writer by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * bitstream api.
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "config.h"
+#include "libavutil/avassert.h"
+#include "libavutil/intreadwrite.h"
+#include "put_bits.h"
+
+void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
+{
+ while (*string) {
+ put_bits(pb, 8, *string);
+ string++;
+ }
+ if (terminate_string)
+ put_bits(pb, 8, 0);
+}
+
+void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
+{
+ int words = length >> 4;
+ int bits = length & 15;
+ int i;
+
+ if (length == 0)
+ return;
+
+ av_assert0(length <= put_bits_left(pb));
+
+ if (CONFIG_SMALL || words < 16 || put_bits_count(pb) & 7) {
+ for (i = 0; i < words; i++)
+ put_bits(pb, 16, AV_RB16(src + 2 * i));
+ } else {
+ for (i = 0; put_bits_count(pb) & 31; i++)
+ put_bits(pb, 8, src[i]);
+ flush_put_bits(pb);
+ memcpy(put_bits_ptr(pb), src + i, 2 * words - i);
+ skip_put_bytes(pb, 2 * words - i);
+ }
+
+ put_bits(pb, bits, AV_RB16(src + 2 * words) >> (16 - bits));
+}
diff --git a/media/ffvpx/libavcodec/bitstream_filters.c b/media/ffvpx/libavcodec/bitstream_filters.c
new file mode 100644
index 0000000000..e8216819ca
--- /dev/null
+++ b/media/ffvpx/libavcodec/bitstream_filters.c
@@ -0,0 +1,109 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "libavutil/log.h"
+
+#include "bsf.h"
+#include "bsf_internal.h"
+
+extern const FFBitStreamFilter ff_aac_adtstoasc_bsf;
+extern const FFBitStreamFilter ff_av1_frame_merge_bsf;
+extern const FFBitStreamFilter ff_av1_frame_split_bsf;
+extern const FFBitStreamFilter ff_av1_metadata_bsf;
+extern const FFBitStreamFilter ff_chomp_bsf;
+extern const FFBitStreamFilter ff_dump_extradata_bsf;
+extern const FFBitStreamFilter ff_dca_core_bsf;
+extern const FFBitStreamFilter ff_dts2pts_bsf;
+extern const FFBitStreamFilter ff_dv_error_marker_bsf;
+extern const FFBitStreamFilter ff_eac3_core_bsf;
+extern const FFBitStreamFilter ff_extract_extradata_bsf;
+extern const FFBitStreamFilter ff_filter_units_bsf;
+extern const FFBitStreamFilter ff_h264_metadata_bsf;
+extern const FFBitStreamFilter ff_h264_mp4toannexb_bsf;
+extern const FFBitStreamFilter ff_h264_redundant_pps_bsf;
+extern const FFBitStreamFilter ff_hapqa_extract_bsf;
+extern const FFBitStreamFilter ff_hevc_metadata_bsf;
+extern const FFBitStreamFilter ff_hevc_mp4toannexb_bsf;
+extern const FFBitStreamFilter ff_imx_dump_header_bsf;
+extern const FFBitStreamFilter ff_media100_to_mjpegb_bsf;
+extern const FFBitStreamFilter ff_mjpeg2jpeg_bsf;
+extern const FFBitStreamFilter ff_mjpega_dump_header_bsf;
+extern const FFBitStreamFilter ff_mp3_header_decompress_bsf;
+extern const FFBitStreamFilter ff_mpeg2_metadata_bsf;
+extern const FFBitStreamFilter ff_mpeg4_unpack_bframes_bsf;
+extern const FFBitStreamFilter ff_mov2textsub_bsf;
+extern const FFBitStreamFilter ff_noise_bsf;
+extern const FFBitStreamFilter ff_null_bsf;
+extern const FFBitStreamFilter ff_opus_metadata_bsf;
+extern const FFBitStreamFilter ff_pcm_rechunk_bsf;
+extern const FFBitStreamFilter ff_pgs_frame_merge_bsf;
+extern const FFBitStreamFilter ff_prores_metadata_bsf;
+extern const FFBitStreamFilter ff_remove_extradata_bsf;
+extern const FFBitStreamFilter ff_setts_bsf;
+extern const FFBitStreamFilter ff_text2movsub_bsf;
+extern const FFBitStreamFilter ff_trace_headers_bsf;
+extern const FFBitStreamFilter ff_truehd_core_bsf;
+extern const FFBitStreamFilter ff_vp9_metadata_bsf;
+extern const FFBitStreamFilter ff_vp9_raw_reorder_bsf;
+extern const FFBitStreamFilter ff_vp9_superframe_bsf;
+extern const FFBitStreamFilter ff_vp9_superframe_split_bsf;
+
+#include "libavcodec/bsf_list.c"
+
+const AVBitStreamFilter *av_bsf_iterate(void **opaque)
+{
+ uintptr_t i = (uintptr_t)*opaque;
+ const FFBitStreamFilter *f = bitstream_filters[i];
+
+ if (f) {
+ *opaque = (void*)(i + 1);
+ return &f->p;
+ }
+ return NULL;
+}
+
+const AVBitStreamFilter *av_bsf_get_by_name(const char *name)
+{
+ const AVBitStreamFilter *f = NULL;
+ void *i = 0;
+
+ if (!name)
+ return NULL;
+
+ while ((f = av_bsf_iterate(&i))) {
+ if (!strcmp(f->name, name))
+ return f;
+ }
+
+ return NULL;
+}
+
+const AVClass *ff_bsf_child_class_iterate(void **opaque)
+{
+ const AVBitStreamFilter *f;
+
+ /* find next filter with priv options */
+ while ((f = av_bsf_iterate(opaque))) {
+ if (f->priv_class)
+ return f->priv_class;
+ }
+ return NULL;
+}
diff --git a/media/ffvpx/libavcodec/blockdsp.h b/media/ffvpx/libavcodec/blockdsp.h
new file mode 100644
index 0000000000..d853adada2
--- /dev/null
+++ b/media/ffvpx/libavcodec/blockdsp.h
@@ -0,0 +1,47 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BLOCKDSP_H
+#define AVCODEC_BLOCKDSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* add and put pixel (decoding)
+ * Block sizes for op_pixels_func are 8x4,8x8 16x8 16x16.
+ * h for op_pixels_func is limited to { width / 2, width },
+ * but never larger than 16 and never smaller than 4. */
+typedef void (*op_fill_func)(uint8_t *block /* align width (8 or 16) */,
+ uint8_t value, ptrdiff_t line_size, int h);
+
+typedef struct BlockDSPContext {
+ void (*clear_block)(int16_t *block /* align 32 */);
+ void (*clear_blocks)(int16_t *blocks /* align 32 */);
+
+ op_fill_func fill_block_tab[2];
+} BlockDSPContext;
+
+void ff_blockdsp_init(BlockDSPContext *c);
+
+void ff_blockdsp_init_alpha(BlockDSPContext *c);
+void ff_blockdsp_init_arm(BlockDSPContext *c);
+void ff_blockdsp_init_ppc(BlockDSPContext *c);
+void ff_blockdsp_init_x86(BlockDSPContext *c);
+void ff_blockdsp_init_mips(BlockDSPContext *c);
+
+#endif /* AVCODEC_BLOCKDSP_H */
diff --git a/media/ffvpx/libavcodec/bsf.c b/media/ffvpx/libavcodec/bsf.c
new file mode 100644
index 0000000000..42cc1b5ab0
--- /dev/null
+++ b/media/ffvpx/libavcodec/bsf.c
@@ -0,0 +1,562 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include "config_components.h"
+
+#include "libavutil/avassert.h"
+#include "libavutil/log.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+
+#include "bsf.h"
+#include "bsf_internal.h"
+#include "codec_desc.h"
+#include "codec_par.h"
+
+#define IS_EMPTY(pkt) (!(pkt)->data && !(pkt)->side_data_elems)
+
+static av_always_inline const FFBitStreamFilter *ff_bsf(const AVBitStreamFilter *bsf)
+{
+ return (const FFBitStreamFilter*)bsf;
+}
+
+typedef struct FFBSFContext {
+ AVBSFContext pub;
+ AVPacket *buffer_pkt;
+ int eof;
+} FFBSFContext;
+
+static av_always_inline FFBSFContext *ffbsfcontext(AVBSFContext *ctx)
+{
+ return (FFBSFContext *)ctx;
+}
+
+void av_bsf_free(AVBSFContext **pctx)
+{
+ AVBSFContext *ctx;
+ FFBSFContext *bsfi;
+
+ if (!pctx || !*pctx)
+ return;
+ ctx = *pctx;
+ bsfi = ffbsfcontext(ctx);
+
+ if (ctx->priv_data) {
+ if (ff_bsf(ctx->filter)->close)
+ ff_bsf(ctx->filter)->close(ctx);
+ if (ctx->filter->priv_class)
+ av_opt_free(ctx->priv_data);
+ av_freep(&ctx->priv_data);
+ }
+ av_packet_free(&bsfi->buffer_pkt);
+
+ avcodec_parameters_free(&ctx->par_in);
+ avcodec_parameters_free(&ctx->par_out);
+
+ av_freep(pctx);
+}
+
+static void *bsf_child_next(void *obj, void *prev)
+{
+ AVBSFContext *ctx = obj;
+ if (!prev && ctx->filter->priv_class)
+ return ctx->priv_data;
+ return NULL;
+}
+
+static const char *bsf_to_name(void *bsf)
+{
+ return ((AVBSFContext *)bsf)->filter->name;
+}
+
+static const AVClass bsf_class = {
+ .class_name = "AVBSFContext",
+ .item_name = bsf_to_name,
+ .version = LIBAVUTIL_VERSION_INT,
+ .child_next = bsf_child_next,
+ .child_class_iterate = ff_bsf_child_class_iterate,
+ .category = AV_CLASS_CATEGORY_BITSTREAM_FILTER,
+};
+
+const AVClass *av_bsf_get_class(void)
+{
+ return &bsf_class;
+}
+
+int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
+{
+ AVBSFContext *ctx;
+ FFBSFContext *bsfi;
+ int ret;
+
+ bsfi = av_mallocz(sizeof(*bsfi));
+ if (!bsfi)
+ return AVERROR(ENOMEM);
+ ctx = &bsfi->pub;
+
+ ctx->av_class = &bsf_class;
+ ctx->filter = filter;
+
+ ctx->par_in = avcodec_parameters_alloc();
+ ctx->par_out = avcodec_parameters_alloc();
+ if (!ctx->par_in || !ctx->par_out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ /* allocate priv data and init private options */
+ if (ff_bsf(filter)->priv_data_size) {
+ ctx->priv_data = av_mallocz(ff_bsf(filter)->priv_data_size);
+ if (!ctx->priv_data) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ if (filter->priv_class) {
+ *(const AVClass **)ctx->priv_data = filter->priv_class;
+ av_opt_set_defaults(ctx->priv_data);
+ }
+ }
+ bsfi->buffer_pkt = av_packet_alloc();
+ if (!bsfi->buffer_pkt) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ *pctx = ctx;
+ return 0;
+fail:
+ av_bsf_free(&ctx);
+ return ret;
+}
+
+int av_bsf_init(AVBSFContext *ctx)
+{
+ int ret, i;
+
+ /* check that the codec is supported */
+ if (ctx->filter->codec_ids) {
+ for (i = 0; ctx->filter->codec_ids[i] != AV_CODEC_ID_NONE; i++)
+ if (ctx->par_in->codec_id == ctx->filter->codec_ids[i])
+ break;
+ if (ctx->filter->codec_ids[i] == AV_CODEC_ID_NONE) {
+ const AVCodecDescriptor *desc = avcodec_descriptor_get(ctx->par_in->codec_id);
+ av_log(ctx, AV_LOG_ERROR, "Codec '%s' (%d) is not supported by the "
+ "bitstream filter '%s'. Supported codecs are: ",
+ desc ? desc->name : "unknown", ctx->par_in->codec_id, ctx->filter->name);
+ for (i = 0; ctx->filter->codec_ids[i] != AV_CODEC_ID_NONE; i++) {
+ enum AVCodecID codec_id = ctx->filter->codec_ids[i];
+ av_log(ctx, AV_LOG_ERROR, "%s (%d) ",
+ avcodec_get_name(codec_id), codec_id);
+ }
+ av_log(ctx, AV_LOG_ERROR, "\n");
+ return AVERROR(EINVAL);
+ }
+ }
+
+ /* initialize output parameters to be the same as input
+ * init below might overwrite that */
+ ret = avcodec_parameters_copy(ctx->par_out, ctx->par_in);
+ if (ret < 0)
+ return ret;
+
+ ctx->time_base_out = ctx->time_base_in;
+
+ if (ff_bsf(ctx->filter)->init) {
+ ret = ff_bsf(ctx->filter)->init(ctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void av_bsf_flush(AVBSFContext *ctx)
+{
+ FFBSFContext *const bsfi = ffbsfcontext(ctx);
+
+ bsfi->eof = 0;
+
+ av_packet_unref(bsfi->buffer_pkt);
+
+ if (ff_bsf(ctx->filter)->flush)
+ ff_bsf(ctx->filter)->flush(ctx);
+}
+
+int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
+{
+ FFBSFContext *const bsfi = ffbsfcontext(ctx);
+ int ret;
+
+ if (!pkt || IS_EMPTY(pkt)) {
+ if (pkt)
+ av_packet_unref(pkt);
+ bsfi->eof = 1;
+ return 0;
+ }
+
+ if (bsfi->eof) {
+ av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!IS_EMPTY(bsfi->buffer_pkt))
+ return AVERROR(EAGAIN);
+
+ ret = av_packet_make_refcounted(pkt);
+ if (ret < 0)
+ return ret;
+ av_packet_move_ref(bsfi->buffer_pkt, pkt);
+
+ return 0;
+}
+
+int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
+{
+ return ff_bsf(ctx->filter)->filter(ctx, pkt);
+}
+
+int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
+{
+ FFBSFContext *const bsfi = ffbsfcontext(ctx);
+ AVPacket *tmp_pkt;
+
+ if (bsfi->eof)
+ return AVERROR_EOF;
+
+ if (IS_EMPTY(bsfi->buffer_pkt))
+ return AVERROR(EAGAIN);
+
+ tmp_pkt = av_packet_alloc();
+ if (!tmp_pkt)
+ return AVERROR(ENOMEM);
+
+ *pkt = bsfi->buffer_pkt;
+ bsfi->buffer_pkt = tmp_pkt;
+
+ return 0;
+}
+
+int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
+{
+ FFBSFContext *const bsfi = ffbsfcontext(ctx);
+
+ if (bsfi->eof)
+ return AVERROR_EOF;
+
+ if (IS_EMPTY(bsfi->buffer_pkt))
+ return AVERROR(EAGAIN);
+
+ av_packet_move_ref(pkt, bsfi->buffer_pkt);
+
+ return 0;
+}
+
+typedef struct BSFListContext {
+ const AVClass *class;
+
+ AVBSFContext **bsfs;
+ int nb_bsfs;
+
+ unsigned idx; // index of currently processed BSF
+
+ char * item_name;
+} BSFListContext;
+
+
+static int bsf_list_init(AVBSFContext *bsf)
+{
+ BSFListContext *lst = bsf->priv_data;
+ int ret, i;
+ const AVCodecParameters *cod_par = bsf->par_in;
+ AVRational tb = bsf->time_base_in;
+
+ for (i = 0; i < lst->nb_bsfs; ++i) {
+ ret = avcodec_parameters_copy(lst->bsfs[i]->par_in, cod_par);
+ if (ret < 0)
+ goto fail;
+
+ lst->bsfs[i]->time_base_in = tb;
+
+ ret = av_bsf_init(lst->bsfs[i]);
+ if (ret < 0)
+ goto fail;
+
+ cod_par = lst->bsfs[i]->par_out;
+ tb = lst->bsfs[i]->time_base_out;
+ }
+
+ bsf->time_base_out = tb;
+ ret = avcodec_parameters_copy(bsf->par_out, cod_par);
+
+fail:
+ return ret;
+}
+
+static int bsf_list_filter(AVBSFContext *bsf, AVPacket *out)
+{
+ BSFListContext *lst = bsf->priv_data;
+ int ret, eof = 0;
+
+ if (!lst->nb_bsfs)
+ return ff_bsf_get_packet_ref(bsf, out);
+
+ while (1) {
+ /* get a packet from the previous filter up the chain */
+ if (lst->idx)
+ ret = av_bsf_receive_packet(lst->bsfs[lst->idx-1], out);
+ else
+ ret = ff_bsf_get_packet_ref(bsf, out);
+ if (ret == AVERROR(EAGAIN)) {
+ if (!lst->idx)
+ return ret;
+ lst->idx--;
+ continue;
+ } else if (ret == AVERROR_EOF) {
+ eof = 1;
+ } else if (ret < 0)
+ return ret;
+
+ /* send it to the next filter down the chain */
+ if (lst->idx < lst->nb_bsfs) {
+ ret = av_bsf_send_packet(lst->bsfs[lst->idx], eof ? NULL : out);
+ av_assert1(ret != AVERROR(EAGAIN));
+ if (ret < 0) {
+ av_packet_unref(out);
+ return ret;
+ }
+ lst->idx++;
+ eof = 0;
+ } else if (eof) {
+ return ret;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static void bsf_list_flush(AVBSFContext *bsf)
+{
+ BSFListContext *lst = bsf->priv_data;
+
+ for (int i = 0; i < lst->nb_bsfs; i++)
+ av_bsf_flush(lst->bsfs[i]);
+ lst->idx = 0;
+}
+
+static void bsf_list_close(AVBSFContext *bsf)
+{
+ BSFListContext *lst = bsf->priv_data;
+ int i;
+
+ for (i = 0; i < lst->nb_bsfs; ++i)
+ av_bsf_free(&lst->bsfs[i]);
+ av_freep(&lst->bsfs);
+ av_freep(&lst->item_name);
+}
+
+static const char *bsf_list_item_name(void *ctx)
+{
+ static const char *null_filter_name = "null";
+ AVBSFContext *bsf_ctx = ctx;
+ BSFListContext *lst = bsf_ctx->priv_data;
+
+ if (!lst->nb_bsfs)
+ return null_filter_name;
+
+ if (!lst->item_name) {
+ int i;
+ AVBPrint bp;
+ av_bprint_init(&bp, 16, 128);
+
+ av_bprintf(&bp, "bsf_list(");
+ for (i = 0; i < lst->nb_bsfs; i++)
+ av_bprintf(&bp, i ? ",%s" : "%s", lst->bsfs[i]->filter->name);
+ av_bprintf(&bp, ")");
+
+ av_bprint_finalize(&bp, &lst->item_name);
+ }
+
+ return lst->item_name;
+}
+
+static const AVClass bsf_list_class = {
+ .class_name = "bsf_list",
+ .item_name = bsf_list_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static const FFBitStreamFilter list_bsf = {
+ .p.name = "bsf_list",
+ .p.priv_class = &bsf_list_class,
+ .priv_data_size = sizeof(BSFListContext),
+ .init = bsf_list_init,
+ .filter = bsf_list_filter,
+ .flush = bsf_list_flush,
+ .close = bsf_list_close,
+};
+
+struct AVBSFList {
+ AVBSFContext **bsfs;
+ int nb_bsfs;
+};
+
+AVBSFList *av_bsf_list_alloc(void)
+{
+ return av_mallocz(sizeof(AVBSFList));
+}
+
+void av_bsf_list_free(AVBSFList **lst)
+{
+ int i;
+
+ if (!*lst)
+ return;
+
+ for (i = 0; i < (*lst)->nb_bsfs; ++i)
+ av_bsf_free(&(*lst)->bsfs[i]);
+ av_free((*lst)->bsfs);
+ av_freep(lst);
+}
+
+int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf)
+{
+ return av_dynarray_add_nofree(&lst->bsfs, &lst->nb_bsfs, bsf);
+}
+
+static int bsf_list_append_internal(AVBSFList *lst, const char *bsf_name, const char *options, AVDictionary ** options_dict)
+{
+ int ret;
+ const AVBitStreamFilter *filter;
+ AVBSFContext *bsf;
+
+ filter = av_bsf_get_by_name(bsf_name);
+ if (!filter)
+ return AVERROR_BSF_NOT_FOUND;
+
+ ret = av_bsf_alloc(filter, &bsf);
+ if (ret < 0)
+ return ret;
+
+ if (options && filter->priv_class) {
+ const AVOption *opt = av_opt_next(bsf->priv_data, NULL);
+ const char * shorthand[2] = {NULL};
+
+ if (opt)
+ shorthand[0] = opt->name;
+
+ ret = av_opt_set_from_string(bsf->priv_data, options, shorthand, "=", ":");
+ if (ret < 0)
+ goto end;
+ }
+
+ if (options_dict) {
+ ret = av_opt_set_dict2(bsf, options_dict, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ }
+
+ ret = av_bsf_list_append(lst, bsf);
+
+end:
+ if (ret < 0)
+ av_bsf_free(&bsf);
+
+ return ret;
+}
+
+int av_bsf_list_append2(AVBSFList *lst, const char *bsf_name, AVDictionary ** options)
+{
+ return bsf_list_append_internal(lst, bsf_name, NULL, options);
+}
+
+int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf)
+{
+ int ret = 0;
+ BSFListContext *ctx;
+
+ if ((*lst)->nb_bsfs == 1) {
+ *bsf = (*lst)->bsfs[0];
+ av_freep(&(*lst)->bsfs);
+ (*lst)->nb_bsfs = 0;
+ goto end;
+ }
+
+ ret = av_bsf_alloc(&list_bsf.p, bsf);
+ if (ret < 0)
+ return ret;
+
+ ctx = (*bsf)->priv_data;
+
+ ctx->bsfs = (*lst)->bsfs;
+ ctx->nb_bsfs = (*lst)->nb_bsfs;
+
+end:
+ av_freep(lst);
+ return ret;
+}
+
+static int bsf_parse_single(char *str, AVBSFList *bsf_lst)
+{
+ char *bsf_name, *bsf_options_str;
+
+ bsf_name = av_strtok(str, "=", &bsf_options_str);
+ if (!bsf_name)
+ return AVERROR(EINVAL);
+
+ return bsf_list_append_internal(bsf_lst, bsf_name, bsf_options_str, NULL);
+}
+
+int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
+{
+ AVBSFList *lst;
+ int ret;
+
+ if (!str)
+ return av_bsf_get_null_filter(bsf_lst);
+
+ lst = av_bsf_list_alloc();
+ if (!lst)
+ return AVERROR(ENOMEM);
+
+ do {
+ char *bsf_str = av_get_token(&str, ",");
+ ret = bsf_parse_single(bsf_str, lst);
+ av_free(bsf_str);
+ if (ret < 0)
+ goto end;
+ } while (*str && *++str);
+
+ ret = av_bsf_list_finalize(&lst, bsf_lst);
+end:
+ if (ret < 0)
+ av_bsf_list_free(&lst);
+ return ret;
+}
+
+int av_bsf_get_null_filter(AVBSFContext **bsf)
+{
+#if CONFIG_NULL_BSF
+ extern const FFBitStreamFilter ff_null_bsf;
+ return av_bsf_alloc(&ff_null_bsf.p, bsf);
+#else
+ return av_bsf_alloc(&list_bsf.p, bsf);
+#endif
+}
diff --git a/media/ffvpx/libavcodec/bsf.h b/media/ffvpx/libavcodec/bsf.h
new file mode 100644
index 0000000000..a09c69f242
--- /dev/null
+++ b/media/ffvpx/libavcodec/bsf.h
@@ -0,0 +1,332 @@
+/*
+ * Bitstream filters public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BSF_H
+#define AVCODEC_BSF_H
+
+#include "libavutil/dict.h"
+#include "libavutil/log.h"
+#include "libavutil/rational.h"
+
+#include "codec_id.h"
+#include "codec_par.h"
+#include "packet.h"
+
+/**
+ * @defgroup lavc_bsf Bitstream filters
+ * @ingroup libavc
+ *
+ * Bitstream filters transform encoded media data without decoding it. This
+ * allows e.g. manipulating various header values. Bitstream filters operate on
+ * @ref AVPacket "AVPackets".
+ *
+ * The bitstream filtering API is centered around two structures:
+ * AVBitStreamFilter and AVBSFContext. The former represents a bitstream filter
+ * in abstract, the latter a specific filtering process. Obtain an
+ * AVBitStreamFilter using av_bsf_get_by_name() or av_bsf_iterate(), then pass
+ * it to av_bsf_alloc() to create an AVBSFContext. Fill in the user-settable
+ * AVBSFContext fields, as described in its documentation, then call
+ * av_bsf_init() to prepare the filter context for use.
+ *
+ * Submit packets for filtering using av_bsf_send_packet(), obtain filtered
+ * results with av_bsf_receive_packet(). When no more input packets will be
+ * sent, submit a NULL AVPacket to signal the end of the stream to the filter.
+ * av_bsf_receive_packet() will then return trailing packets, if any are
+ * produced by the filter.
+ *
+ * Finally, free the filter context with av_bsf_free().
+ * @{
+ */
+
+/**
+ * The bitstream filter state.
+ *
+ * This struct must be allocated with av_bsf_alloc() and freed with
+ * av_bsf_free().
+ *
+ * The fields in the struct will only be changed (by the caller or by the
+ * filter) as described in their documentation, and are to be considered
+ * immutable otherwise.
+ */
+typedef struct AVBSFContext {
+ /**
+ * A class for logging and AVOptions
+ */
+ const AVClass *av_class;
+
+ /**
+ * The bitstream filter this context is an instance of.
+ */
+ const struct AVBitStreamFilter *filter;
+
+ /**
+ * Opaque filter-specific private data. If filter->priv_class is non-NULL,
+ * this is an AVOptions-enabled struct.
+ */
+ void *priv_data;
+
+ /**
+ * Parameters of the input stream. This field is allocated in
+ * av_bsf_alloc(), it needs to be filled by the caller before
+ * av_bsf_init().
+ */
+ AVCodecParameters *par_in;
+
+ /**
+ * Parameters of the output stream. This field is allocated in
+ * av_bsf_alloc(), it is set by the filter in av_bsf_init().
+ */
+ AVCodecParameters *par_out;
+
+ /**
+ * The timebase used for the timestamps of the input packets. Set by the
+ * caller before av_bsf_init().
+ */
+ AVRational time_base_in;
+
+ /**
+ * The timebase used for the timestamps of the output packets. Set by the
+ * filter in av_bsf_init().
+ */
+ AVRational time_base_out;
+} AVBSFContext;
+
+typedef struct AVBitStreamFilter {
+ const char *name;
+
+ /**
+ * A list of codec ids supported by the filter, terminated by
+ * AV_CODEC_ID_NONE.
+ * May be NULL, in that case the bitstream filter works with any codec id.
+ */
+ const enum AVCodecID *codec_ids;
+
+ /**
+ * A class for the private data, used to declare bitstream filter private
+ * AVOptions. This field is NULL for bitstream filters that do not declare
+ * any options.
+ *
+ * If this field is non-NULL, the first member of the filter private data
+ * must be a pointer to AVClass, which will be set by libavcodec generic
+ * code to this class.
+ */
+ const AVClass *priv_class;
+} AVBitStreamFilter;
+
+/**
+ * @return a bitstream filter with the specified name or NULL if no such
+ * bitstream filter exists.
+ */
+const AVBitStreamFilter *av_bsf_get_by_name(const char *name);
+
+/**
+ * Iterate over all registered bitstream filters.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered bitstream filter or NULL when the iteration is
+ * finished
+ */
+const AVBitStreamFilter *av_bsf_iterate(void **opaque);
+
+/**
+ * Allocate a context for a given bitstream filter. The caller must fill in the
+ * context parameters as described in the documentation and then call
+ * av_bsf_init() before sending any data to the filter.
+ *
+ * @param filter the filter for which to allocate an instance.
+ * @param[out] ctx a pointer into which the pointer to the newly-allocated context
+ * will be written. It must be freed with av_bsf_free() after the
+ * filtering is done.
+ *
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx);
+
+/**
+ * Prepare the filter for use, after all the parameters and options have been
+ * set.
+ *
+ * @param ctx a AVBSFContext previously allocated with av_bsf_alloc()
+ */
+int av_bsf_init(AVBSFContext *ctx);
+
+/**
+ * Submit a packet for filtering.
+ *
+ * After sending each packet, the filter must be completely drained by calling
+ * av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or
+ * AVERROR_EOF.
+ *
+ * @param ctx an initialized AVBSFContext
+ * @param pkt the packet to filter. The bitstream filter will take ownership of
+ * the packet and reset the contents of pkt. pkt is not touched if an error occurs.
+ * If pkt is empty (i.e. NULL, or pkt->data is NULL and pkt->side_data_elems zero),
+ * it signals the end of the stream (i.e. no more non-empty packets will be sent;
+ * sending more empty packets does nothing) and will cause the filter to output
+ * any packets it may have buffered internally.
+ *
+ * @return
+ * - 0 on success.
+ * - AVERROR(EAGAIN) if packets need to be retrieved from the filter (using
+ * av_bsf_receive_packet()) before new input can be consumed.
+ * - Another negative AVERROR value if an error occurs.
+ */
+int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
+
+/**
+ * Retrieve a filtered packet.
+ *
+ * @param ctx an initialized AVBSFContext
+ * @param[out] pkt this struct will be filled with the contents of the filtered
+ * packet. It is owned by the caller and must be freed using
+ * av_packet_unref() when it is no longer needed.
+ * This parameter should be "clean" (i.e. freshly allocated
+ * with av_packet_alloc() or unreffed with av_packet_unref())
+ * when this function is called. If this function returns
+ * successfully, the contents of pkt will be completely
+ * overwritten by the returned data. On failure, pkt is not
+ * touched.
+ *
+ * @return
+ * - 0 on success.
+ * - AVERROR(EAGAIN) if more packets need to be sent to the filter (using
+ * av_bsf_send_packet()) to get more output.
+ * - AVERROR_EOF if there will be no further output from the filter.
+ * - Another negative AVERROR value if an error occurs.
+ *
+ * @note one input packet may result in several output packets, so after sending
+ * a packet with av_bsf_send_packet(), this function needs to be called
+ * repeatedly until it stops returning 0. It is also possible for a filter to
+ * output fewer packets than were sent to it, so this function may return
+ * AVERROR(EAGAIN) immediately after a successful av_bsf_send_packet() call.
+ */
+int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt);
+
+/**
+ * Reset the internal bitstream filter state. Should be called e.g. when seeking.
+ */
+void av_bsf_flush(AVBSFContext *ctx);
+
+/**
+ * Free a bitstream filter context and everything associated with it; write NULL
+ * into the supplied pointer.
+ */
+void av_bsf_free(AVBSFContext **ctx);
+
+/**
+ * Get the AVClass for AVBSFContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *av_bsf_get_class(void);
+
+/**
+ * Structure for chain/list of bitstream filters.
+ * Empty list can be allocated by av_bsf_list_alloc().
+ */
+typedef struct AVBSFList AVBSFList;
+
+/**
+ * Allocate empty list of bitstream filters.
+ * The list must be later freed by av_bsf_list_free()
+ * or finalized by av_bsf_list_finalize().
+ *
+ * @return Pointer to @ref AVBSFList on success, NULL in case of failure
+ */
+AVBSFList *av_bsf_list_alloc(void);
+
+/**
+ * Free list of bitstream filters.
+ *
+ * @param lst Pointer to pointer returned by av_bsf_list_alloc()
+ */
+void av_bsf_list_free(AVBSFList **lst);
+
+/**
+ * Append bitstream filter to the list of bitstream filters.
+ *
+ * @param lst List to append to
+ * @param bsf Filter context to be appended
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf);
+
+/**
+ * Construct new bitstream filter context given it's name and options
+ * and append it to the list of bitstream filters.
+ *
+ * @param lst List to append to
+ * @param bsf_name Name of the bitstream filter
+ * @param options Options for the bitstream filter, can be set to NULL
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_append2(AVBSFList *lst, const char * bsf_name, AVDictionary **options);
+/**
+ * Finalize list of bitstream filters.
+ *
+ * This function will transform @ref AVBSFList to single @ref AVBSFContext,
+ * so the whole chain of bitstream filters can be treated as single filter
+ * freshly allocated by av_bsf_alloc().
+ * If the call is successful, @ref AVBSFList structure is freed and lst
+ * will be set to NULL. In case of failure, caller is responsible for
+ * freeing the structure by av_bsf_list_free()
+ *
+ * @param lst Filter list structure to be transformed
+ * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure
+ * representing the chain of bitstream filters
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf);
+
+/**
+ * Parse string describing list of bitstream filters and create single
+ * @ref AVBSFContext describing the whole chain of bitstream filters.
+ * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext freshly
+ * allocated by av_bsf_alloc().
+ *
+ * @param str String describing chain of bitstream filters in format
+ * `bsf1[=opt1=val1:opt2=val2][,bsf2]`
+ * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure
+ * representing the chain of bitstream filters
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf);
+
+/**
+ * Get null/pass-through bitstream filter.
+ *
+ * @param[out] bsf Pointer to be set to new instance of pass-through bitstream filter
+ *
+ * @return
+ */
+int av_bsf_get_null_filter(AVBSFContext **bsf);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_BSF_H
diff --git a/media/ffvpx/libavcodec/bsf_internal.h b/media/ffvpx/libavcodec/bsf_internal.h
new file mode 100644
index 0000000000..922b03c01b
--- /dev/null
+++ b/media/ffvpx/libavcodec/bsf_internal.h
@@ -0,0 +1,60 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BSF_INTERNAL_H
+#define AVCODEC_BSF_INTERNAL_H
+
+#include "libavutil/log.h"
+
+#include "bsf.h"
+#include "packet.h"
+
+typedef struct FFBitStreamFilter {
+ /**
+ * The public AVBitStreamFilter. See bsf.h for it.
+ */
+ AVBitStreamFilter p;
+
+ int priv_data_size;
+ int (*init)(AVBSFContext *ctx);
+ int (*filter)(AVBSFContext *ctx, AVPacket *pkt);
+ void (*close)(AVBSFContext *ctx);
+ void (*flush)(AVBSFContext *ctx);
+} FFBitStreamFilter;
+
+/**
+ * Called by the bitstream filters to get the next packet for filtering.
+ * The filter is responsible for either freeing the packet or passing it to the
+ * caller.
+ */
+int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt);
+
+/**
+ * Called by bitstream filters to get packet for filtering.
+ * The reference to packet is moved to provided packet structure.
+ *
+ * @param ctx pointer to AVBSFContext of filter
+ * @param pkt pointer to packet to move reference to
+ *
+ * @return 0 on success, negative AVERROR in case of failure
+ */
+int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt);
+
+const AVClass *ff_bsf_child_class_iterate(void **opaque);
+
+#endif /* AVCODEC_BSF_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/bsf_list.c b/media/ffvpx/libavcodec/bsf_list.c
new file mode 100644
index 0000000000..4050b41fde
--- /dev/null
+++ b/media/ffvpx/libavcodec/bsf_list.c
@@ -0,0 +1,11 @@
+#include "config_components.h"
+
+static const FFBitStreamFilter * const bitstream_filters[] = {
+#if CONFIG_VP9_SUPERFRAME_SPLIT_BSF
+ &ff_vp9_superframe_split_bsf,
+#endif
+#if CONFIG_AV1_VAAPI_HWACCEL
+ &ff_av1_frame_split_bsf,
+#endif
+ &ff_null_bsf,
+ NULL };
diff --git a/media/ffvpx/libavcodec/bytestream.h b/media/ffvpx/libavcodec/bytestream.h
new file mode 100644
index 0000000000..d0033f14f3
--- /dev/null
+++ b/media/ffvpx/libavcodec/bytestream.h
@@ -0,0 +1,380 @@
+/*
+ * Bytestream functions
+ * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
+ * Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BYTESTREAM_H
+#define AVCODEC_BYTESTREAM_H
+
+#include <stdint.h>
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/intreadwrite.h"
+
+typedef struct GetByteContext {
+ const uint8_t *buffer, *buffer_end, *buffer_start;
+} GetByteContext;
+
+typedef struct PutByteContext {
+ uint8_t *buffer, *buffer_end, *buffer_start;
+ int eof;
+} PutByteContext;
+
+#define DEF(type, name, bytes, read, write) \
+static av_always_inline type bytestream_get_ ## name(const uint8_t **b) \
+{ \
+ (*b) += bytes; \
+ return read(*b - bytes); \
+} \
+static av_always_inline void bytestream_put_ ## name(uint8_t **b, \
+ const type value) \
+{ \
+ write(*b, value); \
+ (*b) += bytes; \
+} \
+static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, \
+ const type value) \
+{ \
+ bytestream_put_ ## name(&p->buffer, value); \
+} \
+static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, \
+ const type value) \
+{ \
+ if (!p->eof && (p->buffer_end - p->buffer >= bytes)) { \
+ write(p->buffer, value); \
+ p->buffer += bytes; \
+ } else \
+ p->eof = 1; \
+} \
+static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \
+{ \
+ return bytestream_get_ ## name(&g->buffer); \
+} \
+static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \
+{ \
+ if (g->buffer_end - g->buffer < bytes) { \
+ g->buffer = g->buffer_end; \
+ return 0; \
+ } \
+ return bytestream2_get_ ## name ## u(g); \
+} \
+static av_always_inline type bytestream2_peek_ ## name ## u(GetByteContext *g) \
+{ \
+ return read(g->buffer); \
+} \
+static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \
+{ \
+ if (g->buffer_end - g->buffer < bytes) \
+ return 0; \
+ return bytestream2_peek_ ## name ## u(g); \
+}
+
+DEF(uint64_t, le64, 8, AV_RL64, AV_WL64)
+DEF(unsigned int, le32, 4, AV_RL32, AV_WL32)
+DEF(unsigned int, le24, 3, AV_RL24, AV_WL24)
+DEF(unsigned int, le16, 2, AV_RL16, AV_WL16)
+DEF(uint64_t, be64, 8, AV_RB64, AV_WB64)
+DEF(unsigned int, be32, 4, AV_RB32, AV_WB32)
+DEF(unsigned int, be24, 3, AV_RB24, AV_WB24)
+DEF(unsigned int, be16, 2, AV_RB16, AV_WB16)
+DEF(unsigned int, byte, 1, AV_RB8 , AV_WB8)
+
+#if AV_HAVE_BIGENDIAN
+# define bytestream2_get_ne16 bytestream2_get_be16
+# define bytestream2_get_ne24 bytestream2_get_be24
+# define bytestream2_get_ne32 bytestream2_get_be32
+# define bytestream2_get_ne64 bytestream2_get_be64
+# define bytestream2_get_ne16u bytestream2_get_be16u
+# define bytestream2_get_ne24u bytestream2_get_be24u
+# define bytestream2_get_ne32u bytestream2_get_be32u
+# define bytestream2_get_ne64u bytestream2_get_be64u
+# define bytestream2_put_ne16 bytestream2_put_be16
+# define bytestream2_put_ne24 bytestream2_put_be24
+# define bytestream2_put_ne32 bytestream2_put_be32
+# define bytestream2_put_ne64 bytestream2_put_be64
+# define bytestream2_peek_ne16 bytestream2_peek_be16
+# define bytestream2_peek_ne24 bytestream2_peek_be24
+# define bytestream2_peek_ne32 bytestream2_peek_be32
+# define bytestream2_peek_ne64 bytestream2_peek_be64
+#else
+# define bytestream2_get_ne16 bytestream2_get_le16
+# define bytestream2_get_ne24 bytestream2_get_le24
+# define bytestream2_get_ne32 bytestream2_get_le32
+# define bytestream2_get_ne64 bytestream2_get_le64
+# define bytestream2_get_ne16u bytestream2_get_le16u
+# define bytestream2_get_ne24u bytestream2_get_le24u
+# define bytestream2_get_ne32u bytestream2_get_le32u
+# define bytestream2_get_ne64u bytestream2_get_le64u
+# define bytestream2_put_ne16 bytestream2_put_le16
+# define bytestream2_put_ne24 bytestream2_put_le24
+# define bytestream2_put_ne32 bytestream2_put_le32
+# define bytestream2_put_ne64 bytestream2_put_le64
+# define bytestream2_peek_ne16 bytestream2_peek_le16
+# define bytestream2_peek_ne24 bytestream2_peek_le24
+# define bytestream2_peek_ne32 bytestream2_peek_le32
+# define bytestream2_peek_ne64 bytestream2_peek_le64
+#endif
+
+static av_always_inline void bytestream2_init(GetByteContext *g,
+ const uint8_t *buf,
+ int buf_size)
+{
+ av_assert0(buf_size >= 0);
+ g->buffer = buf;
+ g->buffer_start = buf;
+ g->buffer_end = buf + buf_size;
+}
+
+static av_always_inline void bytestream2_init_writer(PutByteContext *p,
+ uint8_t *buf,
+ int buf_size)
+{
+ av_assert0(buf_size >= 0);
+ p->buffer = buf;
+ p->buffer_start = buf;
+ p->buffer_end = buf + buf_size;
+ p->eof = 0;
+}
+
+static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
+{
+ return g->buffer_end - g->buffer;
+}
+
+static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p)
+{
+ return p->buffer_end - p->buffer;
+}
+
+static av_always_inline void bytestream2_skip(GetByteContext *g,
+ unsigned int size)
+{
+ g->buffer += FFMIN(g->buffer_end - g->buffer, size);
+}
+
+static av_always_inline void bytestream2_skipu(GetByteContext *g,
+ unsigned int size)
+{
+ g->buffer += size;
+}
+
+static av_always_inline void bytestream2_skip_p(PutByteContext *p,
+ unsigned int size)
+{
+ int size2;
+ if (p->eof)
+ return;
+ size2 = FFMIN(p->buffer_end - p->buffer, size);
+ if (size2 != size)
+ p->eof = 1;
+ p->buffer += size2;
+}
+
+static av_always_inline int bytestream2_tell(GetByteContext *g)
+{
+ return (int)(g->buffer - g->buffer_start);
+}
+
+static av_always_inline int bytestream2_tell_p(PutByteContext *p)
+{
+ return (int)(p->buffer - p->buffer_start);
+}
+
+static av_always_inline int bytestream2_size(GetByteContext *g)
+{
+ return (int)(g->buffer_end - g->buffer_start);
+}
+
+static av_always_inline int bytestream2_size_p(PutByteContext *p)
+{
+ return (int)(p->buffer_end - p->buffer_start);
+}
+
+static av_always_inline int bytestream2_seek(GetByteContext *g,
+ int offset,
+ int whence)
+{
+ switch (whence) {
+ case SEEK_CUR:
+ offset = av_clip(offset, -(g->buffer - g->buffer_start),
+ g->buffer_end - g->buffer);
+ g->buffer += offset;
+ break;
+ case SEEK_END:
+ offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
+ g->buffer = g->buffer_end + offset;
+ break;
+ case SEEK_SET:
+ offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
+ g->buffer = g->buffer_start + offset;
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+ return bytestream2_tell(g);
+}
+
+static av_always_inline int bytestream2_seek_p(PutByteContext *p,
+ int offset,
+ int whence)
+{
+ p->eof = 0;
+ switch (whence) {
+ case SEEK_CUR:
+ if (p->buffer_end - p->buffer < offset)
+ p->eof = 1;
+ offset = av_clip(offset, -(p->buffer - p->buffer_start),
+ p->buffer_end - p->buffer);
+ p->buffer += offset;
+ break;
+ case SEEK_END:
+ if (offset > 0)
+ p->eof = 1;
+ offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0);
+ p->buffer = p->buffer_end + offset;
+ break;
+ case SEEK_SET:
+ if (p->buffer_end - p->buffer_start < offset)
+ p->eof = 1;
+ offset = av_clip(offset, 0, p->buffer_end - p->buffer_start);
+ p->buffer = p->buffer_start + offset;
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+ return bytestream2_tell_p(p);
+}
+
+static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
+ uint8_t *dst,
+ unsigned int size)
+{
+ int size2 = FFMIN(g->buffer_end - g->buffer, size);
+ memcpy(dst, g->buffer, size2);
+ g->buffer += size2;
+ return size2;
+}
+
+static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g,
+ uint8_t *dst,
+ unsigned int size)
+{
+ memcpy(dst, g->buffer, size);
+ g->buffer += size;
+ return size;
+}
+
+static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
+ const uint8_t *src,
+ unsigned int size)
+{
+ int size2;
+ if (p->eof)
+ return 0;
+ size2 = FFMIN(p->buffer_end - p->buffer, size);
+ if (size2 != size)
+ p->eof = 1;
+ memcpy(p->buffer, src, size2);
+ p->buffer += size2;
+ return size2;
+}
+
+static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p,
+ const uint8_t *src,
+ unsigned int size)
+{
+ memcpy(p->buffer, src, size);
+ p->buffer += size;
+ return size;
+}
+
+static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
+ const uint8_t c,
+ unsigned int size)
+{
+ int size2;
+ if (p->eof)
+ return;
+ size2 = FFMIN(p->buffer_end - p->buffer, size);
+ if (size2 != size)
+ p->eof = 1;
+ memset(p->buffer, c, size2);
+ p->buffer += size2;
+}
+
+static av_always_inline void bytestream2_set_bufferu(PutByteContext *p,
+ const uint8_t c,
+ unsigned int size)
+{
+ memset(p->buffer, c, size);
+ p->buffer += size;
+}
+
+static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
+{
+ return p->eof;
+}
+
+static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p,
+ GetByteContext *g,
+ unsigned int size)
+{
+ memcpy(p->buffer, g->buffer, size);
+ p->buffer += size;
+ g->buffer += size;
+ return size;
+}
+
+static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p,
+ GetByteContext *g,
+ unsigned int size)
+{
+ int size2;
+
+ if (p->eof)
+ return 0;
+ size = FFMIN(g->buffer_end - g->buffer, size);
+ size2 = FFMIN(p->buffer_end - p->buffer, size);
+ if (size2 != size)
+ p->eof = 1;
+
+ return bytestream2_copy_bufferu(p, g, size2);
+}
+
+static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
+ uint8_t *dst,
+ unsigned int size)
+{
+ memcpy(dst, *b, size);
+ (*b) += size;
+ return size;
+}
+
+static av_always_inline void bytestream_put_buffer(uint8_t **b,
+ const uint8_t *src,
+ unsigned int size)
+{
+ memcpy(*b, src, size);
+ (*b) += size;
+}
+
+#endif /* AVCODEC_BYTESTREAM_H */
diff --git a/media/ffvpx/libavcodec/cbs.c b/media/ffvpx/libavcodec/cbs.c
new file mode 100644
index 0000000000..504197e06d
--- /dev/null
+++ b/media/ffvpx/libavcodec/cbs.c
@@ -0,0 +1,1028 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include "config.h"
+
+#include "libavutil/avassert.h"
+#include "libavutil/buffer.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+
+#include "avcodec.h"
+#include "cbs.h"
+#include "cbs_internal.h"
+
+
+static const CodedBitstreamType *const cbs_type_table[] = {
+#if CONFIG_CBS_AV1
+ &ff_cbs_type_av1,
+#endif
+#if CONFIG_CBS_H264
+ &ff_cbs_type_h264,
+#endif
+#if CONFIG_CBS_H265
+ &ff_cbs_type_h265,
+#endif
+#if CONFIG_CBS_JPEG
+ &ff_cbs_type_jpeg,
+#endif
+#if CONFIG_CBS_MPEG2
+ &ff_cbs_type_mpeg2,
+#endif
+#if CONFIG_CBS_VP9
+ &ff_cbs_type_vp9,
+#endif
+};
+
+const enum AVCodecID ff_cbs_all_codec_ids[] = {
+#if CONFIG_CBS_AV1
+ AV_CODEC_ID_AV1,
+#endif
+#if CONFIG_CBS_H264
+ AV_CODEC_ID_H264,
+#endif
+#if CONFIG_CBS_H265
+ AV_CODEC_ID_H265,
+#endif
+#if CONFIG_CBS_JPEG
+ AV_CODEC_ID_MJPEG,
+#endif
+#if CONFIG_CBS_MPEG2
+ AV_CODEC_ID_MPEG2VIDEO,
+#endif
+#if CONFIG_CBS_VP9
+ AV_CODEC_ID_VP9,
+#endif
+ AV_CODEC_ID_NONE
+};
+
+av_cold int ff_cbs_init(CodedBitstreamContext **ctx_ptr,
+ enum AVCodecID codec_id, void *log_ctx)
+{
+ CodedBitstreamContext *ctx;
+ const CodedBitstreamType *type;
+ int i;
+
+ type = NULL;
+ for (i = 0; i < FF_ARRAY_ELEMS(cbs_type_table); i++) {
+ if (cbs_type_table[i]->codec_id == codec_id) {
+ type = cbs_type_table[i];
+ break;
+ }
+ }
+ if (!type)
+ return AVERROR(EINVAL);
+
+ ctx = av_mallocz(sizeof(*ctx));
+ if (!ctx)
+ return AVERROR(ENOMEM);
+
+ ctx->log_ctx = log_ctx;
+ ctx->codec = type; /* Must be before any error */
+
+ if (type->priv_data_size) {
+ ctx->priv_data = av_mallocz(ctx->codec->priv_data_size);
+ if (!ctx->priv_data) {
+ av_freep(&ctx);
+ return AVERROR(ENOMEM);
+ }
+ if (type->priv_class) {
+ *(const AVClass **)ctx->priv_data = type->priv_class;
+ av_opt_set_defaults(ctx->priv_data);
+ }
+ }
+
+ ctx->decompose_unit_types = NULL;
+
+ ctx->trace_enable = 0;
+ ctx->trace_level = AV_LOG_TRACE;
+
+ *ctx_ptr = ctx;
+ return 0;
+}
+
+av_cold void ff_cbs_flush(CodedBitstreamContext *ctx)
+{
+ if (ctx->codec->flush)
+ ctx->codec->flush(ctx);
+}
+
+av_cold void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
+{
+ CodedBitstreamContext *ctx = *ctx_ptr;
+
+ if (!ctx)
+ return;
+
+ if (ctx->codec->close)
+ ctx->codec->close(ctx);
+
+ av_freep(&ctx->write_buffer);
+
+ if (ctx->codec->priv_class && ctx->priv_data)
+ av_opt_free(ctx->priv_data);
+
+ av_freep(&ctx->priv_data);
+ av_freep(ctx_ptr);
+}
+
+static void cbs_unit_uninit(CodedBitstreamUnit *unit)
+{
+ av_buffer_unref(&unit->content_ref);
+ unit->content = NULL;
+
+ av_buffer_unref(&unit->data_ref);
+ unit->data = NULL;
+ unit->data_size = 0;
+ unit->data_bit_padding = 0;
+}
+
+void ff_cbs_fragment_reset(CodedBitstreamFragment *frag)
+{
+ int i;
+
+ for (i = 0; i < frag->nb_units; i++)
+ cbs_unit_uninit(&frag->units[i]);
+ frag->nb_units = 0;
+
+ av_buffer_unref(&frag->data_ref);
+ frag->data = NULL;
+ frag->data_size = 0;
+ frag->data_bit_padding = 0;
+}
+
+av_cold void ff_cbs_fragment_free(CodedBitstreamFragment *frag)
+{
+ ff_cbs_fragment_reset(frag);
+
+ av_freep(&frag->units);
+ frag->nb_units_allocated = 0;
+}
+
+static int cbs_read_fragment_content(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag)
+{
+ int err, i, j;
+
+ for (i = 0; i < frag->nb_units; i++) {
+ CodedBitstreamUnit *unit = &frag->units[i];
+
+ if (ctx->decompose_unit_types) {
+ for (j = 0; j < ctx->nb_decompose_unit_types; j++) {
+ if (ctx->decompose_unit_types[j] == unit->type)
+ break;
+ }
+ if (j >= ctx->nb_decompose_unit_types)
+ continue;
+ }
+
+ av_buffer_unref(&unit->content_ref);
+ unit->content = NULL;
+
+ av_assert0(unit->data && unit->data_ref);
+
+ err = ctx->codec->read_unit(ctx, unit);
+ if (err == AVERROR(ENOSYS)) {
+ av_log(ctx->log_ctx, AV_LOG_VERBOSE,
+ "Decomposition unimplemented for unit %d "
+ "(type %"PRIu32").\n", i, unit->type);
+ } else if (err == AVERROR(EAGAIN)) {
+ av_log(ctx->log_ctx, AV_LOG_VERBOSE,
+ "Skipping decomposition of unit %d "
+ "(type %"PRIu32").\n", i, unit->type);
+ av_buffer_unref(&unit->content_ref);
+ unit->content = NULL;
+ } else if (err < 0) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Failed to read unit %d "
+ "(type %"PRIu32").\n", i, unit->type);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int cbs_fill_fragment_data(CodedBitstreamFragment *frag,
+ const uint8_t *data, size_t size)
+{
+ av_assert0(!frag->data && !frag->data_ref);
+
+ frag->data_ref =
+ av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!frag->data_ref)
+ return AVERROR(ENOMEM);
+
+ frag->data = frag->data_ref->data;
+ frag->data_size = size;
+
+ memcpy(frag->data, data, size);
+ memset(frag->data + size, 0,
+ AV_INPUT_BUFFER_PADDING_SIZE);
+
+ return 0;
+}
+
+static int cbs_read_data(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ AVBufferRef *buf,
+ const uint8_t *data, size_t size,
+ int header)
+{
+ int err;
+
+ if (buf) {
+ frag->data_ref = av_buffer_ref(buf);
+ if (!frag->data_ref)
+ return AVERROR(ENOMEM);
+
+ frag->data = (uint8_t *)data;
+ frag->data_size = size;
+
+ } else {
+ err = cbs_fill_fragment_data(frag, data, size);
+ if (err < 0)
+ return err;
+ }
+
+ err = ctx->codec->split_fragment(ctx, frag, header);
+ if (err < 0)
+ return err;
+
+ return cbs_read_fragment_content(ctx, frag);
+}
+
+int ff_cbs_read_extradata(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const AVCodecParameters *par)
+{
+ return cbs_read_data(ctx, frag, NULL,
+ par->extradata,
+ par->extradata_size, 1);
+}
+
+int ff_cbs_read_extradata_from_codec(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const AVCodecContext *avctx)
+{
+ return cbs_read_data(ctx, frag, NULL,
+ avctx->extradata,
+ avctx->extradata_size, 1);
+}
+
+int ff_cbs_read_packet(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const AVPacket *pkt)
+{
+ return cbs_read_data(ctx, frag, pkt->buf,
+ pkt->data, pkt->size, 0);
+}
+
+int ff_cbs_read_packet_side_data(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const AVPacket *pkt)
+{
+ size_t side_data_size;
+ const uint8_t *side_data =
+ av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,
+ &side_data_size);
+
+ return cbs_read_data(ctx, frag, NULL,
+ side_data, side_data_size, 1);
+}
+
+int ff_cbs_read(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const uint8_t *data, size_t size)
+{
+ return cbs_read_data(ctx, frag, NULL,
+ data, size, 0);
+}
+
+/**
+ * Allocate a new internal data buffer of the given size in the unit.
+ *
+ * The data buffer will have input padding.
+ */
+static int cbs_alloc_unit_data(CodedBitstreamUnit *unit,
+ size_t size)
+{
+ av_assert0(!unit->data && !unit->data_ref);
+
+ unit->data_ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!unit->data_ref)
+ return AVERROR(ENOMEM);
+
+ unit->data = unit->data_ref->data;
+ unit->data_size = size;
+
+ memset(unit->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ return 0;
+}
+
+static int cbs_write_unit_data(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit)
+{
+ PutBitContext pbc;
+ int ret;
+
+ if (!ctx->write_buffer) {
+ // Initial write buffer size is 1MB.
+ ctx->write_buffer_size = 1024 * 1024;
+
+ reallocate_and_try_again:
+ ret = av_reallocp(&ctx->write_buffer, ctx->write_buffer_size);
+ if (ret < 0) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
+ "sufficiently large write buffer (last attempt "
+ "%"SIZE_SPECIFIER" bytes).\n", ctx->write_buffer_size);
+ return ret;
+ }
+ }
+
+ init_put_bits(&pbc, ctx->write_buffer, ctx->write_buffer_size);
+
+ ret = ctx->codec->write_unit(ctx, unit, &pbc);
+ if (ret < 0) {
+ if (ret == AVERROR(ENOSPC)) {
+ // Overflow.
+ if (ctx->write_buffer_size == INT_MAX / 8)
+ return AVERROR(ENOMEM);
+ ctx->write_buffer_size = FFMIN(2 * ctx->write_buffer_size, INT_MAX / 8);
+ goto reallocate_and_try_again;
+ }
+ // Write failed for some other reason.
+ return ret;
+ }
+
+ // Overflow but we didn't notice.
+ av_assert0(put_bits_count(&pbc) <= 8 * ctx->write_buffer_size);
+
+ if (put_bits_count(&pbc) % 8)
+ unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
+ else
+ unit->data_bit_padding = 0;
+
+ flush_put_bits(&pbc);
+
+ ret = cbs_alloc_unit_data(unit, put_bytes_output(&pbc));
+ if (ret < 0)
+ return ret;
+
+ memcpy(unit->data, ctx->write_buffer, unit->data_size);
+
+ return 0;
+}
+
+int ff_cbs_write_fragment_data(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag)
+{
+ int err, i;
+
+ for (i = 0; i < frag->nb_units; i++) {
+ CodedBitstreamUnit *unit = &frag->units[i];
+
+ if (!unit->content)
+ continue;
+
+ av_buffer_unref(&unit->data_ref);
+ unit->data = NULL;
+
+ err = cbs_write_unit_data(ctx, unit);
+ if (err < 0) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Failed to write unit %d "
+ "(type %"PRIu32").\n", i, unit->type);
+ return err;
+ }
+ av_assert0(unit->data && unit->data_ref);
+ }
+
+ av_buffer_unref(&frag->data_ref);
+ frag->data = NULL;
+
+ err = ctx->codec->assemble_fragment(ctx, frag);
+ if (err < 0) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Failed to assemble fragment.\n");
+ return err;
+ }
+ av_assert0(frag->data && frag->data_ref);
+
+ return 0;
+}
+
+int ff_cbs_write_extradata(CodedBitstreamContext *ctx,
+ AVCodecParameters *par,
+ CodedBitstreamFragment *frag)
+{
+ int err;
+
+ err = ff_cbs_write_fragment_data(ctx, frag);
+ if (err < 0)
+ return err;
+
+ av_freep(&par->extradata);
+ par->extradata_size = 0;
+
+ if (!frag->data_size)
+ return 0;
+
+ par->extradata = av_malloc(frag->data_size +
+ AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!par->extradata)
+ return AVERROR(ENOMEM);
+
+ memcpy(par->extradata, frag->data, frag->data_size);
+ memset(par->extradata + frag->data_size, 0,
+ AV_INPUT_BUFFER_PADDING_SIZE);
+ par->extradata_size = frag->data_size;
+
+ return 0;
+}
+
+int ff_cbs_write_packet(CodedBitstreamContext *ctx,
+ AVPacket *pkt,
+ CodedBitstreamFragment *frag)
+{
+ AVBufferRef *buf;
+ int err;
+
+ err = ff_cbs_write_fragment_data(ctx, frag);
+ if (err < 0)
+ return err;
+
+ buf = av_buffer_ref(frag->data_ref);
+ if (!buf)
+ return AVERROR(ENOMEM);
+
+ av_buffer_unref(&pkt->buf);
+
+ pkt->buf = buf;
+ pkt->data = frag->data;
+ pkt->size = frag->data_size;
+
+ return 0;
+}
+
+
+void ff_cbs_trace_header(CodedBitstreamContext *ctx,
+ const char *name)
+{
+ if (!ctx->trace_enable)
+ return;
+
+ av_log(ctx->log_ctx, ctx->trace_level, "%s\n", name);
+}
+
+void ff_cbs_trace_syntax_element(CodedBitstreamContext *ctx, int position,
+ const char *str, const int *subscripts,
+ const char *bits, int64_t value)
+{
+ char name[256];
+ size_t name_len, bits_len;
+ int pad, subs, i, j, k, n;
+
+ if (!ctx->trace_enable)
+ return;
+
+ av_assert0(value >= INT_MIN && value <= UINT32_MAX);
+
+ subs = subscripts ? subscripts[0] : 0;
+ n = 0;
+ for (i = j = 0; str[i];) {
+ if (str[i] == '[') {
+ if (n < subs) {
+ ++n;
+ k = snprintf(name + j, sizeof(name) - j, "[%d", subscripts[n]);
+ av_assert0(k > 0 && j + k < sizeof(name));
+ j += k;
+ for (++i; str[i] && str[i] != ']'; i++);
+ av_assert0(str[i] == ']');
+ } else {
+ while (str[i] && str[i] != ']')
+ name[j++] = str[i++];
+ av_assert0(str[i] == ']');
+ }
+ } else {
+ av_assert0(j + 1 < sizeof(name));
+ name[j++] = str[i++];
+ }
+ }
+ av_assert0(j + 1 < sizeof(name));
+ name[j] = 0;
+ av_assert0(n == subs);
+
+ name_len = strlen(name);
+ bits_len = strlen(bits);
+
+ if (name_len + bits_len > 60)
+ pad = bits_len + 2;
+ else
+ pad = 61 - name_len;
+
+ av_log(ctx->log_ctx, ctx->trace_level, "%-10d %s%*s = %"PRId64"\n",
+ position, name, pad, bits, value);
+}
+
+int ff_cbs_read_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ int width, const char *name,
+ const int *subscripts, uint32_t *write_to,
+ uint32_t range_min, uint32_t range_max)
+{
+ uint32_t value;
+ int position;
+
+ av_assert0(width > 0 && width <= 32);
+
+ if (get_bits_left(gbc) < width) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid value at "
+ "%s: bitstream ended.\n", name);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (ctx->trace_enable)
+ position = get_bits_count(gbc);
+
+ value = get_bits_long(gbc, width);
+
+ if (ctx->trace_enable) {
+ char bits[33];
+ int i;
+ for (i = 0; i < width; i++)
+ bits[i] = value >> (width - i - 1) & 1 ? '1' : '0';
+ bits[i] = 0;
+
+ ff_cbs_trace_syntax_element(ctx, position, name, subscripts,
+ bits, value);
+ }
+
+ if (value < range_min || value > range_max) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n",
+ name, value, range_min, range_max);
+ return AVERROR_INVALIDDATA;
+ }
+
+ *write_to = value;
+ return 0;
+}
+
+int ff_cbs_write_unsigned(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ int width, const char *name,
+ const int *subscripts, uint32_t value,
+ uint32_t range_min, uint32_t range_max)
+{
+ av_assert0(width > 0 && width <= 32);
+
+ if (value < range_min || value > range_max) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n",
+ name, value, range_min, range_max);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (put_bits_left(pbc) < width)
+ return AVERROR(ENOSPC);
+
+ if (ctx->trace_enable) {
+ char bits[33];
+ int i;
+ for (i = 0; i < width; i++)
+ bits[i] = value >> (width - i - 1) & 1 ? '1' : '0';
+ bits[i] = 0;
+
+ ff_cbs_trace_syntax_element(ctx, put_bits_count(pbc),
+ name, subscripts, bits, value);
+ }
+
+ if (width < 32)
+ put_bits(pbc, width, value);
+ else
+ put_bits32(pbc, value);
+
+ return 0;
+}
+
+int ff_cbs_read_signed(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ int width, const char *name,
+ const int *subscripts, int32_t *write_to,
+ int32_t range_min, int32_t range_max)
+{
+ int32_t value;
+ int position;
+
+ av_assert0(width > 0 && width <= 32);
+
+ if (get_bits_left(gbc) < width) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid value at "
+ "%s: bitstream ended.\n", name);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (ctx->trace_enable)
+ position = get_bits_count(gbc);
+
+ value = get_sbits_long(gbc, width);
+
+ if (ctx->trace_enable) {
+ char bits[33];
+ int i;
+ for (i = 0; i < width; i++)
+ bits[i] = value & (1U << (width - i - 1)) ? '1' : '0';
+ bits[i] = 0;
+
+ ff_cbs_trace_syntax_element(ctx, position, name, subscripts,
+ bits, value);
+ }
+
+ if (value < range_min || value > range_max) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRId32", but must be in [%"PRId32",%"PRId32"].\n",
+ name, value, range_min, range_max);
+ return AVERROR_INVALIDDATA;
+ }
+
+ *write_to = value;
+ return 0;
+}
+
+int ff_cbs_write_signed(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ int width, const char *name,
+ const int *subscripts, int32_t value,
+ int32_t range_min, int32_t range_max)
+{
+ av_assert0(width > 0 && width <= 32);
+
+ if (value < range_min || value > range_max) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRId32", but must be in [%"PRId32",%"PRId32"].\n",
+ name, value, range_min, range_max);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (put_bits_left(pbc) < width)
+ return AVERROR(ENOSPC);
+
+ if (ctx->trace_enable) {
+ char bits[33];
+ int i;
+ for (i = 0; i < width; i++)
+ bits[i] = value & (1U << (width - i - 1)) ? '1' : '0';
+ bits[i] = 0;
+
+ ff_cbs_trace_syntax_element(ctx, put_bits_count(pbc),
+ name, subscripts, bits, value);
+ }
+
+ if (width < 32)
+ put_sbits(pbc, width, value);
+ else
+ put_bits32(pbc, value);
+
+ return 0;
+}
+
+
+static int cbs_insert_unit(CodedBitstreamFragment *frag,
+ int position)
+{
+ CodedBitstreamUnit *units;
+
+ if (frag->nb_units < frag->nb_units_allocated) {
+ units = frag->units;
+
+ if (position < frag->nb_units)
+ memmove(units + position + 1, units + position,
+ (frag->nb_units - position) * sizeof(*units));
+ } else {
+ units = av_malloc_array(frag->nb_units*2 + 1, sizeof(*units));
+ if (!units)
+ return AVERROR(ENOMEM);
+
+ frag->nb_units_allocated = 2*frag->nb_units_allocated + 1;
+
+ if (position > 0)
+ memcpy(units, frag->units, position * sizeof(*units));
+
+ if (position < frag->nb_units)
+ memcpy(units + position + 1, frag->units + position,
+ (frag->nb_units - position) * sizeof(*units));
+ }
+
+ memset(units + position, 0, sizeof(*units));
+
+ if (units != frag->units) {
+ av_free(frag->units);
+ frag->units = units;
+ }
+
+ ++frag->nb_units;
+
+ return 0;
+}
+
+int ff_cbs_insert_unit_content(CodedBitstreamFragment *frag,
+ int position,
+ CodedBitstreamUnitType type,
+ void *content,
+ AVBufferRef *content_buf)
+{
+ CodedBitstreamUnit *unit;
+ AVBufferRef *content_ref;
+ int err;
+
+ if (position == -1)
+ position = frag->nb_units;
+ av_assert0(position >= 0 && position <= frag->nb_units);
+
+ if (content_buf) {
+ content_ref = av_buffer_ref(content_buf);
+ if (!content_ref)
+ return AVERROR(ENOMEM);
+ } else {
+ content_ref = NULL;
+ }
+
+ err = cbs_insert_unit(frag, position);
+ if (err < 0) {
+ av_buffer_unref(&content_ref);
+ return err;
+ }
+
+ unit = &frag->units[position];
+ unit->type = type;
+ unit->content = content;
+ unit->content_ref = content_ref;
+
+ return 0;
+}
+
+static int cbs_insert_unit_data(CodedBitstreamFragment *frag,
+ CodedBitstreamUnitType type,
+ uint8_t *data, size_t data_size,
+ AVBufferRef *data_buf,
+ int position)
+{
+ CodedBitstreamUnit *unit;
+ AVBufferRef *data_ref;
+ int err;
+
+ av_assert0(position >= 0 && position <= frag->nb_units);
+
+ if (data_buf)
+ data_ref = av_buffer_ref(data_buf);
+ else
+ data_ref = av_buffer_create(data, data_size, NULL, NULL, 0);
+ if (!data_ref) {
+ if (!data_buf)
+ av_free(data);
+ return AVERROR(ENOMEM);
+ }
+
+ err = cbs_insert_unit(frag, position);
+ if (err < 0) {
+ av_buffer_unref(&data_ref);
+ return err;
+ }
+
+ unit = &frag->units[position];
+ unit->type = type;
+ unit->data = data;
+ unit->data_size = data_size;
+ unit->data_ref = data_ref;
+
+ return 0;
+}
+
+int ff_cbs_append_unit_data(CodedBitstreamFragment *frag,
+ CodedBitstreamUnitType type,
+ uint8_t *data, size_t data_size,
+ AVBufferRef *data_buf)
+{
+ return cbs_insert_unit_data(frag, type,
+ data, data_size, data_buf,
+ frag->nb_units);
+}
+
+void ff_cbs_delete_unit(CodedBitstreamFragment *frag,
+ int position)
+{
+ av_assert0(0 <= position && position < frag->nb_units
+ && "Unit to be deleted not in fragment.");
+
+ cbs_unit_uninit(&frag->units[position]);
+
+ --frag->nb_units;
+
+ if (frag->nb_units > 0)
+ memmove(frag->units + position,
+ frag->units + position + 1,
+ (frag->nb_units - position) * sizeof(*frag->units));
+}
+
+static void cbs_default_free_unit_content(void *opaque, uint8_t *data)
+{
+ const CodedBitstreamUnitTypeDescriptor *desc = opaque;
+
+ for (int i = 0; i < desc->type.ref.nb_offsets; i++) {
+ void **ptr = (void**)(data + desc->type.ref.offsets[i]);
+ av_buffer_unref((AVBufferRef**)(ptr + 1));
+ }
+ av_free(data);
+}
+
+static const CodedBitstreamUnitTypeDescriptor
+ *cbs_find_unit_type_desc(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit)
+{
+ const CodedBitstreamUnitTypeDescriptor *desc;
+ int i, j;
+
+ if (!ctx->codec->unit_types)
+ return NULL;
+
+ for (i = 0;; i++) {
+ desc = &ctx->codec->unit_types[i];
+ if (desc->nb_unit_types == 0)
+ break;
+ if (desc->nb_unit_types == CBS_UNIT_TYPE_RANGE) {
+ if (unit->type >= desc->unit_type.range.start &&
+ unit->type <= desc->unit_type.range.end)
+ return desc;
+ } else {
+ for (j = 0; j < desc->nb_unit_types; j++) {
+ if (desc->unit_type.list[j] == unit->type)
+ return desc;
+ }
+ }
+ }
+ return NULL;
+}
+
+int ff_cbs_alloc_unit_content(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit)
+{
+ const CodedBitstreamUnitTypeDescriptor *desc;
+
+ av_assert0(!unit->content && !unit->content_ref);
+
+ desc = cbs_find_unit_type_desc(ctx, unit);
+ if (!desc)
+ return AVERROR(ENOSYS);
+
+ unit->content = av_mallocz(desc->content_size);
+ if (!unit->content)
+ return AVERROR(ENOMEM);
+
+ unit->content_ref =
+ av_buffer_create(unit->content, desc->content_size,
+ desc->content_type == CBS_CONTENT_TYPE_COMPLEX
+ ? desc->type.complex.content_free
+ : cbs_default_free_unit_content,
+ (void*)desc, 0);
+ if (!unit->content_ref) {
+ av_freep(&unit->content);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int cbs_clone_internal_refs_unit_content(AVBufferRef **clone_ref,
+ const CodedBitstreamUnit *unit,
+ const CodedBitstreamUnitTypeDescriptor *desc)
+{
+ const uint8_t *src;
+ uint8_t *copy;
+ int err, i;
+
+ av_assert0(unit->content);
+ src = unit->content;
+
+ copy = av_memdup(src, desc->content_size);
+ if (!copy)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < desc->type.ref.nb_offsets; i++) {
+ const uint8_t *const *src_ptr = (const uint8_t* const*)(src + desc->type.ref.offsets[i]);
+ const AVBufferRef *src_buf = *(AVBufferRef**)(src_ptr + 1);
+ uint8_t **copy_ptr = (uint8_t**)(copy + desc->type.ref.offsets[i]);
+ AVBufferRef **copy_buf = (AVBufferRef**)(copy_ptr + 1);
+
+ if (!*src_ptr) {
+ av_assert0(!src_buf);
+ continue;
+ }
+ if (!src_buf) {
+ // We can't handle a non-refcounted pointer here - we don't
+ // have enough information to handle whatever structure lies
+ // at the other end of it.
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ *copy_buf = av_buffer_ref(src_buf);
+ if (!*copy_buf) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+
+ *clone_ref = av_buffer_create(copy, desc->content_size,
+ cbs_default_free_unit_content,
+ (void*)desc, 0);
+ if (!*clone_ref) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ for (--i; i >= 0; i--)
+ av_buffer_unref((AVBufferRef**)(copy + desc->type.ref.offsets[i]));
+ av_freep(&copy);
+ *clone_ref = NULL;
+ return err;
+}
+
+/*
+ * On success, unit->content and unit->content_ref are updated with
+ * the new content; unit is untouched on failure.
+ * Any old content_ref is simply overwritten and not freed.
+ */
+static int cbs_clone_unit_content(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit)
+{
+ const CodedBitstreamUnitTypeDescriptor *desc;
+ AVBufferRef *ref;
+ int err;
+
+ desc = cbs_find_unit_type_desc(ctx, unit);
+ if (!desc)
+ return AVERROR(ENOSYS);
+
+ switch (desc->content_type) {
+ case CBS_CONTENT_TYPE_INTERNAL_REFS:
+ err = cbs_clone_internal_refs_unit_content(&ref, unit, desc);
+ break;
+
+ case CBS_CONTENT_TYPE_COMPLEX:
+ if (!desc->type.complex.content_clone)
+ return AVERROR_PATCHWELCOME;
+ err = desc->type.complex.content_clone(&ref, unit);
+ break;
+
+ default:
+ av_assert0(0 && "Invalid content type.");
+ }
+
+ if (err < 0)
+ return err;
+
+ unit->content_ref = ref;
+ unit->content = ref->data;
+ return 0;
+}
+
+int ff_cbs_make_unit_refcounted(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit)
+{
+ av_assert0(unit->content);
+ if (unit->content_ref)
+ return 0;
+ return cbs_clone_unit_content(ctx, unit);
+}
+
+int ff_cbs_make_unit_writable(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit)
+{
+ AVBufferRef *ref = unit->content_ref;
+ int err;
+
+ av_assert0(unit->content);
+ if (ref && av_buffer_is_writable(ref))
+ return 0;
+
+ err = cbs_clone_unit_content(ctx, unit);
+ if (err < 0)
+ return err;
+ av_buffer_unref(&ref);
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/cbs.h b/media/ffvpx/libavcodec/cbs.h
new file mode 100644
index 0000000000..ee21623dac
--- /dev/null
+++ b/media/ffvpx/libavcodec/cbs.h
@@ -0,0 +1,436 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CBS_H
+#define AVCODEC_CBS_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/buffer.h"
+
+#include "codec_id.h"
+#include "codec_par.h"
+#include "packet.h"
+
+
+/*
+ * This defines a framework for converting between a coded bitstream
+ * and structures defining all individual syntax elements found in
+ * such a stream.
+ *
+ * Conversion in both directions is possible. Given a coded bitstream
+ * (any meaningful fragment), it can be parsed and decomposed into
+ * syntax elements stored in a set of codec-specific structures.
+ * Similarly, given a set of those same codec-specific structures the
+ * syntax elements can be serialised and combined to create a coded
+ * bitstream.
+ */
+
+struct AVCodecContext;
+struct CodedBitstreamType;
+
+/**
+ * The codec-specific type of a bitstream unit.
+ *
+ * AV1: obu_type
+ * H.264 / AVC: nal_unit_type
+ * H.265 / HEVC: nal_unit_type
+ * JPEG: marker value (without 0xff prefix)
+ * MPEG-2: start code value (without prefix)
+ * VP9: unused, set to zero (every unit is a frame)
+ */
+typedef uint32_t CodedBitstreamUnitType;
+
+/**
+ * Coded bitstream unit structure.
+ *
+ * A bitstream unit the smallest element of a bitstream which
+ * is meaningful on its own. For example, an H.264 NAL unit.
+ *
+ * See the codec-specific header for the meaning of this for any
+ * particular codec.
+ */
+typedef struct CodedBitstreamUnit {
+ /**
+ * Codec-specific type of this unit.
+ */
+ CodedBitstreamUnitType type;
+
+ /**
+ * Pointer to the directly-parsable bitstream form of this unit.
+ *
+ * May be NULL if the unit currently only exists in decomposed form.
+ */
+ uint8_t *data;
+ /**
+ * The number of bytes in the bitstream (including any padding bits
+ * in the final byte).
+ */
+ size_t data_size;
+ /**
+ * The number of bits which should be ignored in the final byte.
+ *
+ * This supports non-byte-aligned bitstreams.
+ */
+ size_t data_bit_padding;
+ /**
+ * A reference to the buffer containing data.
+ *
+ * Must be set if data is not NULL.
+ */
+ AVBufferRef *data_ref;
+
+ /**
+ * Pointer to the decomposed form of this unit.
+ *
+ * The type of this structure depends on both the codec and the
+ * type of this unit. May be NULL if the unit only exists in
+ * bitstream form.
+ */
+ void *content;
+ /**
+ * If content is reference counted, a reference to the buffer containing
+ * content. Null if content is not reference counted.
+ */
+ AVBufferRef *content_ref;
+} CodedBitstreamUnit;
+
+/**
+ * Coded bitstream fragment structure, combining one or more units.
+ *
+ * This is any sequence of units. It need not form some greater whole,
+ * though in many cases it will. For example, an H.264 access unit,
+ * which is composed of a sequence of H.264 NAL units.
+ */
+typedef struct CodedBitstreamFragment {
+ /**
+ * Pointer to the bitstream form of this fragment.
+ *
+ * May be NULL if the fragment only exists as component units.
+ */
+ uint8_t *data;
+ /**
+ * The number of bytes in the bitstream.
+ *
+ * The number of bytes in the bitstream (including any padding bits
+ * in the final byte).
+ */
+ size_t data_size;
+ /**
+ * The number of bits which should be ignored in the final byte.
+ */
+ size_t data_bit_padding;
+ /**
+ * A reference to the buffer containing data.
+ *
+ * Must be set if data is not NULL.
+ */
+ AVBufferRef *data_ref;
+
+ /**
+ * Number of units in this fragment.
+ *
+ * This may be zero if the fragment only exists in bitstream form
+ * and has not been decomposed.
+ */
+ int nb_units;
+
+ /**
+ * Number of allocated units.
+ *
+ * Must always be >= nb_units; designed for internal use by cbs.
+ */
+ int nb_units_allocated;
+
+ /**
+ * Pointer to an array of units of length nb_units_allocated.
+ * Only the first nb_units are valid.
+ *
+ * Must be NULL if nb_units_allocated is zero.
+ */
+ CodedBitstreamUnit *units;
+} CodedBitstreamFragment;
+
+/**
+ * Context structure for coded bitstream operations.
+ */
+typedef struct CodedBitstreamContext {
+ /**
+ * Logging context to be passed to all av_log() calls associated
+ * with this context.
+ */
+ void *log_ctx;
+
+ /**
+ * Internal codec-specific hooks.
+ */
+ const struct CodedBitstreamType *codec;
+
+ /**
+ * Internal codec-specific data.
+ *
+ * This contains any information needed when reading/writing
+ * bitsteams which will not necessarily be present in a fragment.
+ * For example, for H.264 it contains all currently visible
+ * parameter sets - they are required to determine the bitstream
+ * syntax but need not be present in every access unit.
+ */
+ void *priv_data;
+
+ /**
+ * Array of unit types which should be decomposed when reading.
+ *
+ * Types not in this list will be available in bitstream form only.
+ * If NULL, all supported types will be decomposed.
+ */
+ const CodedBitstreamUnitType *decompose_unit_types;
+ /**
+ * Length of the decompose_unit_types array.
+ */
+ int nb_decompose_unit_types;
+
+ /**
+ * Enable trace output during read/write operations.
+ */
+ int trace_enable;
+ /**
+ * Log level to use for trace output.
+ *
+ * From AV_LOG_*; defaults to AV_LOG_TRACE.
+ */
+ int trace_level;
+
+ /**
+ * Write buffer. Used as intermediate buffer when writing units.
+ * For internal use of cbs only.
+ */
+ uint8_t *write_buffer;
+ size_t write_buffer_size;
+} CodedBitstreamContext;
+
+
+/**
+ * Table of all supported codec IDs.
+ *
+ * Terminated by AV_CODEC_ID_NONE.
+ */
+extern const enum AVCodecID ff_cbs_all_codec_ids[];
+
+
+/**
+ * Create and initialise a new context for the given codec.
+ */
+int ff_cbs_init(CodedBitstreamContext **ctx,
+ enum AVCodecID codec_id, void *log_ctx);
+
+/**
+ * Reset all internal state in a context.
+ */
+void ff_cbs_flush(CodedBitstreamContext *ctx);
+
+/**
+ * Close a context and free all internal state.
+ */
+void ff_cbs_close(CodedBitstreamContext **ctx);
+
+
+/**
+ * Read the extradata bitstream found in codec parameters into a
+ * fragment, then split into units and decompose.
+ *
+ * This also updates the internal state, so will need to be called for
+ * codecs with extradata to read parameter sets necessary for further
+ * parsing even if the fragment itself is not desired.
+ *
+ * The fragment must have been zeroed or reset via ff_cbs_fragment_reset
+ * before use.
+ */
+int ff_cbs_read_extradata(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const AVCodecParameters *par);
+
+/**
+ * Read the extradata bitstream found in a codec context into a
+ * fragment, then split into units and decompose.
+ *
+ * This acts identical to ff_cbs_read_extradata() for the case where
+ * you already have a codec context.
+ */
+int ff_cbs_read_extradata_from_codec(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const struct AVCodecContext *avctx);
+
+int ff_cbs_read_packet_side_data(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const AVPacket *pkt);
+
+/**
+ * Read the data bitstream from a packet into a fragment, then
+ * split into units and decompose.
+ *
+ * This also updates the internal state of the coded bitstream context
+ * with any persistent data from the fragment which may be required to
+ * read following fragments (e.g. parameter sets).
+ *
+ * The fragment must have been zeroed or reset via ff_cbs_fragment_reset
+ * before use.
+ */
+int ff_cbs_read_packet(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const AVPacket *pkt);
+
+/**
+ * Read a bitstream from a memory region into a fragment, then
+ * split into units and decompose.
+ *
+ * This also updates the internal state of the coded bitstream context
+ * with any persistent data from the fragment which may be required to
+ * read following fragments (e.g. parameter sets).
+ *
+ * The fragment must have been zeroed or reset via ff_cbs_fragment_reset
+ * before use.
+ */
+int ff_cbs_read(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ const uint8_t *data, size_t size);
+
+
+/**
+ * Write the content of the fragment to its own internal buffer.
+ *
+ * Writes the content of all units and then assembles them into a new
+ * data buffer. When modifying the content of decomposed units, this
+ * can be used to regenerate the bitstream form of units or the whole
+ * fragment so that it can be extracted for other use.
+ *
+ * This also updates the internal state of the coded bitstream context
+ * with any persistent data from the fragment which may be required to
+ * write following fragments (e.g. parameter sets).
+ */
+int ff_cbs_write_fragment_data(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag);
+
+/**
+ * Write the bitstream of a fragment to the extradata in codec parameters.
+ *
+ * Modifies context and fragment as ff_cbs_write_fragment_data does and
+ * replaces any existing extradata in the structure.
+ */
+int ff_cbs_write_extradata(CodedBitstreamContext *ctx,
+ AVCodecParameters *par,
+ CodedBitstreamFragment *frag);
+
+/**
+ * Write the bitstream of a fragment to a packet.
+ *
+ * Modifies context and fragment as ff_cbs_write_fragment_data does.
+ *
+ * On success, the packet's buf is unreferenced and its buf, data and
+ * size fields are set to the corresponding values from the newly updated
+ * fragment; other fields are not touched. On failure, the packet is not
+ * touched at all.
+ */
+int ff_cbs_write_packet(CodedBitstreamContext *ctx,
+ AVPacket *pkt,
+ CodedBitstreamFragment *frag);
+
+
+/**
+ * Free the units contained in a fragment as well as the fragment's
+ * own data buffer, but not the units array itself.
+ */
+void ff_cbs_fragment_reset(CodedBitstreamFragment *frag);
+
+/**
+ * Free the units array of a fragment in addition to what
+ * ff_cbs_fragment_reset does.
+ */
+void ff_cbs_fragment_free(CodedBitstreamFragment *frag);
+
+/**
+ * Allocate a new internal content buffer matching the type of the unit.
+ *
+ * The content will be zeroed.
+ */
+int ff_cbs_alloc_unit_content(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit);
+
+/**
+ * Insert a new unit into a fragment with the given content.
+ *
+ * The content structure continues to be owned by the caller if
+ * content_buf is not supplied.
+ */
+int ff_cbs_insert_unit_content(CodedBitstreamFragment *frag,
+ int position,
+ CodedBitstreamUnitType type,
+ void *content,
+ AVBufferRef *content_buf);
+
+/**
+ * Add a new unit to a fragment with the given data bitstream.
+ *
+ * If data_buf is not supplied then data must have been allocated with
+ * av_malloc() and will on success become owned by the unit after this
+ * call or freed on error.
+ */
+int ff_cbs_append_unit_data(CodedBitstreamFragment *frag,
+ CodedBitstreamUnitType type,
+ uint8_t *data, size_t data_size,
+ AVBufferRef *data_buf);
+
+/**
+ * Delete a unit from a fragment and free all memory it uses.
+ *
+ * Requires position to be >= 0 and < frag->nb_units.
+ */
+void ff_cbs_delete_unit(CodedBitstreamFragment *frag,
+ int position);
+
+
+/**
+ * Make the content of a unit refcounted.
+ *
+ * If the unit is not refcounted, this will do a deep copy of the unit
+ * content to new refcounted buffers.
+ *
+ * It is not valid to call this function on a unit which does not have
+ * decomposed content.
+ */
+int ff_cbs_make_unit_refcounted(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit);
+
+/**
+ * Make the content of a unit writable so that internal fields can be
+ * modified.
+ *
+ * If it is known that there are no other references to the content of
+ * the unit, does nothing and returns success. Otherwise (including the
+ * case where the unit content is not refcounted), it does a full clone
+ * of the content (including any internal buffers) to make a new copy,
+ * and replaces the existing references inside the unit with that.
+ *
+ * It is not valid to call this function on a unit which does not have
+ * decomposed content.
+ */
+int ff_cbs_make_unit_writable(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit);
+
+
+#endif /* AVCODEC_CBS_H */
diff --git a/media/ffvpx/libavcodec/cbs_av1.c b/media/ffvpx/libavcodec/cbs_av1.c
new file mode 100644
index 0000000000..45e1288a51
--- /dev/null
+++ b/media/ffvpx/libavcodec/cbs_av1.c
@@ -0,0 +1,1366 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixfmt.h"
+
+#include "avcodec.h"
+#include "cbs.h"
+#include "cbs_internal.h"
+#include "cbs_av1.h"
+
+
+static int cbs_av1_read_uvlc(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ const char *name, uint32_t *write_to,
+ uint32_t range_min, uint32_t range_max)
+{
+ uint32_t zeroes, bits_value, value;
+ int position;
+
+ if (ctx->trace_enable)
+ position = get_bits_count(gbc);
+
+ zeroes = 0;
+ while (1) {
+ if (get_bits_left(gbc) < 1) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid uvlc code at "
+ "%s: bitstream ended.\n", name);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (get_bits1(gbc))
+ break;
+ ++zeroes;
+ }
+
+ if (zeroes >= 32) {
+ value = MAX_UINT_BITS(32);
+ } else {
+ if (get_bits_left(gbc) < zeroes) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid uvlc code at "
+ "%s: bitstream ended.\n", name);
+ return AVERROR_INVALIDDATA;
+ }
+
+ bits_value = get_bits_long(gbc, zeroes);
+ value = bits_value + (UINT32_C(1) << zeroes) - 1;
+ }
+
+ if (ctx->trace_enable) {
+ char bits[65];
+ int i, j, k;
+
+ if (zeroes >= 32) {
+ while (zeroes > 32) {
+ k = FFMIN(zeroes - 32, 32);
+ for (i = 0; i < k; i++)
+ bits[i] = '0';
+ bits[i] = 0;
+ ff_cbs_trace_syntax_element(ctx, position, name,
+ NULL, bits, 0);
+ zeroes -= k;
+ position += k;
+ }
+ }
+
+ for (i = 0; i < zeroes; i++)
+ bits[i] = '0';
+ bits[i++] = '1';
+
+ if (zeroes < 32) {
+ for (j = 0; j < zeroes; j++)
+ bits[i++] = (bits_value >> (zeroes - j - 1) & 1) ? '1' : '0';
+ }
+
+ bits[i] = 0;
+ ff_cbs_trace_syntax_element(ctx, position, name,
+ NULL, bits, value);
+ }
+
+ if (value < range_min || value > range_max) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n",
+ name, value, range_min, range_max);
+ return AVERROR_INVALIDDATA;
+ }
+
+ *write_to = value;
+ return 0;
+}
+
+static int cbs_av1_write_uvlc(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ const char *name, uint32_t value,
+ uint32_t range_min, uint32_t range_max)
+{
+ uint32_t v;
+ int position, zeroes;
+
+ if (value < range_min || value > range_max) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n",
+ name, value, range_min, range_max);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (ctx->trace_enable)
+ position = put_bits_count(pbc);
+
+ zeroes = av_log2(value + 1);
+ v = value - (1U << zeroes) + 1;
+ put_bits(pbc, zeroes, 0);
+ put_bits(pbc, 1, 1);
+ put_bits(pbc, zeroes, v);
+
+ if (ctx->trace_enable) {
+ char bits[65];
+ int i, j;
+ i = 0;
+ for (j = 0; j < zeroes; j++)
+ bits[i++] = '0';
+ bits[i++] = '1';
+ for (j = 0; j < zeroes; j++)
+ bits[i++] = (v >> (zeroes - j - 1) & 1) ? '1' : '0';
+ bits[i++] = 0;
+ ff_cbs_trace_syntax_element(ctx, position, name, NULL,
+ bits, value);
+ }
+
+ return 0;
+}
+
+static int cbs_av1_read_leb128(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ const char *name, uint64_t *write_to)
+{
+ uint64_t value;
+ int position, err, i;
+
+ if (ctx->trace_enable)
+ position = get_bits_count(gbc);
+
+ value = 0;
+ for (i = 0; i < 8; i++) {
+ int subscript[2] = { 1, i };
+ uint32_t byte;
+ err = ff_cbs_read_unsigned(ctx, gbc, 8, "leb128_byte[i]", subscript,
+ &byte, 0x00, 0xff);
+ if (err < 0)
+ return err;
+
+ value |= (uint64_t)(byte & 0x7f) << (i * 7);
+ if (!(byte & 0x80))
+ break;
+ }
+
+ if (value > UINT32_MAX)
+ return AVERROR_INVALIDDATA;
+
+ if (ctx->trace_enable)
+ ff_cbs_trace_syntax_element(ctx, position, name, NULL, "", value);
+
+ *write_to = value;
+ return 0;
+}
+
+static int cbs_av1_write_leb128(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ const char *name, uint64_t value)
+{
+ int position, err, len, i;
+ uint8_t byte;
+
+ len = (av_log2(value) + 7) / 7;
+
+ if (ctx->trace_enable)
+ position = put_bits_count(pbc);
+
+ for (i = 0; i < len; i++) {
+ int subscript[2] = { 1, i };
+
+ byte = value >> (7 * i) & 0x7f;
+ if (i < len - 1)
+ byte |= 0x80;
+
+ err = ff_cbs_write_unsigned(ctx, pbc, 8, "leb128_byte[i]", subscript,
+ byte, 0x00, 0xff);
+ if (err < 0)
+ return err;
+ }
+
+ if (ctx->trace_enable)
+ ff_cbs_trace_syntax_element(ctx, position, name, NULL, "", value);
+
+ return 0;
+}
+
+static int cbs_av1_read_ns(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ uint32_t n, const char *name,
+ const int *subscripts, uint32_t *write_to)
+{
+ uint32_t m, v, extra_bit, value;
+ int position, w;
+
+ av_assert0(n > 0);
+
+ if (ctx->trace_enable)
+ position = get_bits_count(gbc);
+
+ w = av_log2(n) + 1;
+ m = (1 << w) - n;
+
+ if (get_bits_left(gbc) < w) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid non-symmetric value at "
+ "%s: bitstream ended.\n", name);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (w - 1 > 0)
+ v = get_bits(gbc, w - 1);
+ else
+ v = 0;
+
+ if (v < m) {
+ value = v;
+ } else {
+ extra_bit = get_bits1(gbc);
+ value = (v << 1) - m + extra_bit;
+ }
+
+ if (ctx->trace_enable) {
+ char bits[33];
+ int i;
+ for (i = 0; i < w - 1; i++)
+ bits[i] = (v >> i & 1) ? '1' : '0';
+ if (v >= m)
+ bits[i++] = extra_bit ? '1' : '0';
+ bits[i] = 0;
+
+ ff_cbs_trace_syntax_element(ctx, position,
+ name, subscripts, bits, value);
+ }
+
+ *write_to = value;
+ return 0;
+}
+
+static int cbs_av1_write_ns(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ uint32_t n, const char *name,
+ const int *subscripts, uint32_t value)
+{
+ uint32_t w, m, v, extra_bit;
+ int position;
+
+ if (value > n) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRIu32", but must be in [0,%"PRIu32"].\n",
+ name, value, n);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (ctx->trace_enable)
+ position = put_bits_count(pbc);
+
+ w = av_log2(n) + 1;
+ m = (1 << w) - n;
+
+ if (put_bits_left(pbc) < w)
+ return AVERROR(ENOSPC);
+
+ if (value < m) {
+ v = value;
+ put_bits(pbc, w - 1, v);
+ } else {
+ v = m + ((value - m) >> 1);
+ extra_bit = (value - m) & 1;
+ put_bits(pbc, w - 1, v);
+ put_bits(pbc, 1, extra_bit);
+ }
+
+ if (ctx->trace_enable) {
+ char bits[33];
+ int i;
+ for (i = 0; i < w - 1; i++)
+ bits[i] = (v >> i & 1) ? '1' : '0';
+ if (value >= m)
+ bits[i++] = extra_bit ? '1' : '0';
+ bits[i] = 0;
+
+ ff_cbs_trace_syntax_element(ctx, position,
+ name, subscripts, bits, value);
+ }
+
+ return 0;
+}
+
+static int cbs_av1_read_increment(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ uint32_t range_min, uint32_t range_max,
+ const char *name, uint32_t *write_to)
+{
+ uint32_t value;
+ int position, i;
+ char bits[33];
+
+ av_assert0(range_min <= range_max && range_max - range_min < sizeof(bits) - 1);
+ if (ctx->trace_enable)
+ position = get_bits_count(gbc);
+
+ for (i = 0, value = range_min; value < range_max;) {
+ if (get_bits_left(gbc) < 1) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid increment value at "
+ "%s: bitstream ended.\n", name);
+ return AVERROR_INVALIDDATA;
+ }
+ if (get_bits1(gbc)) {
+ bits[i++] = '1';
+ ++value;
+ } else {
+ bits[i++] = '0';
+ break;
+ }
+ }
+
+ if (ctx->trace_enable) {
+ bits[i] = 0;
+ ff_cbs_trace_syntax_element(ctx, position,
+ name, NULL, bits, value);
+ }
+
+ *write_to = value;
+ return 0;
+}
+
+static int cbs_av1_write_increment(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ uint32_t range_min, uint32_t range_max,
+ const char *name, uint32_t value)
+{
+ int len;
+
+ av_assert0(range_min <= range_max && range_max - range_min < 32);
+ if (value < range_min || value > range_max) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n",
+ name, value, range_min, range_max);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (value == range_max)
+ len = range_max - range_min;
+ else
+ len = value - range_min + 1;
+ if (put_bits_left(pbc) < len)
+ return AVERROR(ENOSPC);
+
+ if (ctx->trace_enable) {
+ char bits[33];
+ int i;
+ for (i = 0; i < len; i++) {
+ if (range_min + i == value)
+ bits[i] = '0';
+ else
+ bits[i] = '1';
+ }
+ bits[i] = 0;
+ ff_cbs_trace_syntax_element(ctx, put_bits_count(pbc),
+ name, NULL, bits, value);
+ }
+
+ if (len > 0)
+ put_bits(pbc, len, (1 << len) - 1 - (value != range_max));
+
+ return 0;
+}
+
+static int cbs_av1_read_subexp(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ uint32_t range_max, const char *name,
+ const int *subscripts, uint32_t *write_to)
+{
+ uint32_t value;
+ int position, err;
+ uint32_t max_len, len, range_offset, range_bits;
+
+ if (ctx->trace_enable)
+ position = get_bits_count(gbc);
+
+ av_assert0(range_max > 0);
+ max_len = av_log2(range_max - 1) - 3;
+
+ err = cbs_av1_read_increment(ctx, gbc, 0, max_len,
+ "subexp_more_bits", &len);
+ if (err < 0)
+ return err;
+
+ if (len) {
+ range_bits = 2 + len;
+ range_offset = 1 << range_bits;
+ } else {
+ range_bits = 3;
+ range_offset = 0;
+ }
+
+ if (len < max_len) {
+ err = ff_cbs_read_unsigned(ctx, gbc, range_bits,
+ "subexp_bits", NULL, &value,
+ 0, MAX_UINT_BITS(range_bits));
+ if (err < 0)
+ return err;
+
+ } else {
+ err = cbs_av1_read_ns(ctx, gbc, range_max - range_offset,
+ "subexp_final_bits", NULL, &value);
+ if (err < 0)
+ return err;
+ }
+ value += range_offset;
+
+ if (ctx->trace_enable)
+ ff_cbs_trace_syntax_element(ctx, position,
+ name, subscripts, "", value);
+
+ *write_to = value;
+ return err;
+}
+
+static int cbs_av1_write_subexp(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ uint32_t range_max, const char *name,
+ const int *subscripts, uint32_t value)
+{
+ int position, err;
+ uint32_t max_len, len, range_offset, range_bits;
+
+ if (value > range_max) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
+ "%"PRIu32", but must be in [0,%"PRIu32"].\n",
+ name, value, range_max);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (ctx->trace_enable)
+ position = put_bits_count(pbc);
+
+ av_assert0(range_max > 0);
+ max_len = av_log2(range_max - 1) - 3;
+
+ if (value < 8) {
+ range_bits = 3;
+ range_offset = 0;
+ len = 0;
+ } else {
+ range_bits = av_log2(value);
+ len = range_bits - 2;
+ if (len > max_len) {
+ // The top bin is combined with the one below it.
+ av_assert0(len == max_len + 1);
+ --range_bits;
+ len = max_len;
+ }
+ range_offset = 1 << range_bits;
+ }
+
+ err = cbs_av1_write_increment(ctx, pbc, 0, max_len,
+ "subexp_more_bits", len);
+ if (err < 0)
+ return err;
+
+ if (len < max_len) {
+ err = ff_cbs_write_unsigned(ctx, pbc, range_bits,
+ "subexp_bits", NULL,
+ value - range_offset,
+ 0, MAX_UINT_BITS(range_bits));
+ if (err < 0)
+ return err;
+
+ } else {
+ err = cbs_av1_write_ns(ctx, pbc, range_max - range_offset,
+ "subexp_final_bits", NULL,
+ value - range_offset);
+ if (err < 0)
+ return err;
+ }
+
+ if (ctx->trace_enable)
+ ff_cbs_trace_syntax_element(ctx, position,
+ name, subscripts, "", value);
+
+ return err;
+}
+
+
+static int cbs_av1_tile_log2(int blksize, int target)
+{
+ int k;
+ for (k = 0; (blksize << k) < target; k++);
+ return k;
+}
+
+static int cbs_av1_get_relative_dist(const AV1RawSequenceHeader *seq,
+ unsigned int a, unsigned int b)
+{
+ unsigned int diff, m;
+ if (!seq->enable_order_hint)
+ return 0;
+ diff = a - b;
+ m = 1 << seq->order_hint_bits_minus_1;
+ diff = (diff & (m - 1)) - (diff & m);
+ return diff;
+}
+
+static size_t cbs_av1_get_payload_bytes_left(GetBitContext *gbc)
+{
+ GetBitContext tmp = *gbc;
+ size_t size = 0;
+ for (int i = 0; get_bits_left(&tmp) >= 8; i++) {
+ if (get_bits(&tmp, 8))
+ size = i;
+ }
+ return size;
+}
+
+
+#define HEADER(name) do { \
+ ff_cbs_trace_header(ctx, name); \
+ } while (0)
+
+#define CHECK(call) do { \
+ err = (call); \
+ if (err < 0) \
+ return err; \
+ } while (0)
+
+#define FUNC_NAME(rw, codec, name) cbs_ ## codec ## _ ## rw ## _ ## name
+#define FUNC_AV1(rw, name) FUNC_NAME(rw, av1, name)
+#define FUNC(name) FUNC_AV1(READWRITE, name)
+
+#define SUBSCRIPTS(subs, ...) (subs > 0 ? ((int[subs + 1]){ subs, __VA_ARGS__ }) : NULL)
+
+#define fb(width, name) \
+ xf(width, name, current->name, 0, MAX_UINT_BITS(width), 0, )
+#define fc(width, name, range_min, range_max) \
+ xf(width, name, current->name, range_min, range_max, 0, )
+#define flag(name) fb(1, name)
+#define su(width, name) \
+ xsu(width, name, current->name, 0, )
+
+#define fbs(width, name, subs, ...) \
+ xf(width, name, current->name, 0, MAX_UINT_BITS(width), subs, __VA_ARGS__)
+#define fcs(width, name, range_min, range_max, subs, ...) \
+ xf(width, name, current->name, range_min, range_max, subs, __VA_ARGS__)
+#define flags(name, subs, ...) \
+ xf(1, name, current->name, 0, 1, subs, __VA_ARGS__)
+#define sus(width, name, subs, ...) \
+ xsu(width, name, current->name, subs, __VA_ARGS__)
+
+#define fixed(width, name, value) do { \
+ av_unused uint32_t fixed_value = value; \
+ xf(width, name, fixed_value, value, value, 0, ); \
+ } while (0)
+
+
+#define READ
+#define READWRITE read
+#define RWContext GetBitContext
+
+#define xf(width, name, var, range_min, range_max, subs, ...) do { \
+ uint32_t value; \
+ CHECK(ff_cbs_read_unsigned(ctx, rw, width, #name, \
+ SUBSCRIPTS(subs, __VA_ARGS__), \
+ &value, range_min, range_max)); \
+ var = value; \
+ } while (0)
+
+#define xsu(width, name, var, subs, ...) do { \
+ int32_t value; \
+ CHECK(ff_cbs_read_signed(ctx, rw, width, #name, \
+ SUBSCRIPTS(subs, __VA_ARGS__), &value, \
+ MIN_INT_BITS(width), \
+ MAX_INT_BITS(width))); \
+ var = value; \
+ } while (0)
+
+#define uvlc(name, range_min, range_max) do { \
+ uint32_t value; \
+ CHECK(cbs_av1_read_uvlc(ctx, rw, #name, \
+ &value, range_min, range_max)); \
+ current->name = value; \
+ } while (0)
+
+#define ns(max_value, name, subs, ...) do { \
+ uint32_t value; \
+ CHECK(cbs_av1_read_ns(ctx, rw, max_value, #name, \
+ SUBSCRIPTS(subs, __VA_ARGS__), &value)); \
+ current->name = value; \
+ } while (0)
+
+#define increment(name, min, max) do { \
+ uint32_t value; \
+ CHECK(cbs_av1_read_increment(ctx, rw, min, max, #name, &value)); \
+ current->name = value; \
+ } while (0)
+
+#define subexp(name, max, subs, ...) do { \
+ uint32_t value; \
+ CHECK(cbs_av1_read_subexp(ctx, rw, max, #name, \
+ SUBSCRIPTS(subs, __VA_ARGS__), &value)); \
+ current->name = value; \
+ } while (0)
+
+#define delta_q(name) do { \
+ uint8_t delta_coded; \
+ int8_t delta_q; \
+ xf(1, name.delta_coded, delta_coded, 0, 1, 0, ); \
+ if (delta_coded) \
+ xsu(1 + 6, name.delta_q, delta_q, 0, ); \
+ else \
+ delta_q = 0; \
+ current->name = delta_q; \
+ } while (0)
+
+#define leb128(name) do { \
+ uint64_t value; \
+ CHECK(cbs_av1_read_leb128(ctx, rw, #name, &value)); \
+ current->name = value; \
+ } while (0)
+
+#define infer(name, value) do { \
+ current->name = value; \
+ } while (0)
+
+#define byte_alignment(rw) (get_bits_count(rw) % 8)
+
+#include "cbs_av1_syntax_template.c"
+
+#undef READ
+#undef READWRITE
+#undef RWContext
+#undef xf
+#undef xsu
+#undef uvlc
+#undef ns
+#undef increment
+#undef subexp
+#undef delta_q
+#undef leb128
+#undef infer
+#undef byte_alignment
+
+
+#define WRITE
+#define READWRITE write
+#define RWContext PutBitContext
+
+#define xf(width, name, var, range_min, range_max, subs, ...) do { \
+ CHECK(ff_cbs_write_unsigned(ctx, rw, width, #name, \
+ SUBSCRIPTS(subs, __VA_ARGS__), \
+ var, range_min, range_max)); \
+ } while (0)
+
+#define xsu(width, name, var, subs, ...) do { \
+ CHECK(ff_cbs_write_signed(ctx, rw, width, #name, \
+ SUBSCRIPTS(subs, __VA_ARGS__), var, \
+ MIN_INT_BITS(width), \
+ MAX_INT_BITS(width))); \
+ } while (0)
+
+#define uvlc(name, range_min, range_max) do { \
+ CHECK(cbs_av1_write_uvlc(ctx, rw, #name, current->name, \
+ range_min, range_max)); \
+ } while (0)
+
+#define ns(max_value, name, subs, ...) do { \
+ CHECK(cbs_av1_write_ns(ctx, rw, max_value, #name, \
+ SUBSCRIPTS(subs, __VA_ARGS__), \
+ current->name)); \
+ } while (0)
+
+#define increment(name, min, max) do { \
+ CHECK(cbs_av1_write_increment(ctx, rw, min, max, #name, \
+ current->name)); \
+ } while (0)
+
+#define subexp(name, max, subs, ...) do { \
+ CHECK(cbs_av1_write_subexp(ctx, rw, max, #name, \
+ SUBSCRIPTS(subs, __VA_ARGS__), \
+ current->name)); \
+ } while (0)
+
+#define delta_q(name) do { \
+ xf(1, name.delta_coded, current->name != 0, 0, 1, 0, ); \
+ if (current->name) \
+ xsu(1 + 6, name.delta_q, current->name, 0, ); \
+ } while (0)
+
+#define leb128(name) do { \
+ CHECK(cbs_av1_write_leb128(ctx, rw, #name, current->name)); \
+ } while (0)
+
+#define infer(name, value) do { \
+ if (current->name != (value)) { \
+ av_log(ctx->log_ctx, AV_LOG_ERROR, \
+ "%s does not match inferred value: " \
+ "%"PRId64", but should be %"PRId64".\n", \
+ #name, (int64_t)current->name, (int64_t)(value)); \
+ return AVERROR_INVALIDDATA; \
+ } \
+ } while (0)
+
+#define byte_alignment(rw) (put_bits_count(rw) % 8)
+
+#include "cbs_av1_syntax_template.c"
+
+#undef WRITE
+#undef READWRITE
+#undef RWContext
+#undef xf
+#undef xsu
+#undef uvlc
+#undef ns
+#undef increment
+#undef subexp
+#undef delta_q
+#undef leb128
+#undef infer
+#undef byte_alignment
+
+
+static int cbs_av1_split_fragment(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ int header)
+{
+ GetBitContext gbc;
+ uint8_t *data;
+ size_t size;
+ uint64_t obu_length;
+ int pos, err, trace;
+
+ // Don't include this parsing in trace output.
+ trace = ctx->trace_enable;
+ ctx->trace_enable = 0;
+
+ data = frag->data;
+ size = frag->data_size;
+
+ if (INT_MAX / 8 < size) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid fragment: "
+ "too large (%"SIZE_SPECIFIER" bytes).\n", size);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ if (header && size && data[0] & 0x80) {
+ // first bit is nonzero, the extradata does not consist purely of
+ // OBUs. Expect MP4/Matroska AV1CodecConfigurationRecord
+ int config_record_version = data[0] & 0x7f;
+
+ if (config_record_version != 1) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR,
+ "Unknown version %d of AV1CodecConfigurationRecord "
+ "found!\n",
+ config_record_version);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ if (size <= 4) {
+ if (size < 4) {
+ av_log(ctx->log_ctx, AV_LOG_WARNING,
+ "Undersized AV1CodecConfigurationRecord v%d found!\n",
+ config_record_version);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ goto success;
+ }
+
+ // In AV1CodecConfigurationRecord v1, actual OBUs start after
+ // four bytes. Thus set the offset as required for properly
+ // parsing them.
+ data += 4;
+ size -= 4;
+ }
+
+ while (size > 0) {
+ AV1RawOBUHeader header;
+ uint64_t obu_size;
+
+ init_get_bits(&gbc, data, 8 * size);
+
+ err = cbs_av1_read_obu_header(ctx, &gbc, &header);
+ if (err < 0)
+ goto fail;
+
+ if (header.obu_has_size_field) {
+ if (get_bits_left(&gbc) < 8) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid OBU: fragment "
+ "too short (%"SIZE_SPECIFIER" bytes).\n", size);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ err = cbs_av1_read_leb128(ctx, &gbc, "obu_size", &obu_size);
+ if (err < 0)
+ goto fail;
+ } else
+ obu_size = size - 1 - header.obu_extension_flag;
+
+ pos = get_bits_count(&gbc);
+ av_assert0(pos % 8 == 0 && pos / 8 <= size);
+
+ obu_length = pos / 8 + obu_size;
+
+ if (size < obu_length) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid OBU length: "
+ "%"PRIu64", but only %"SIZE_SPECIFIER" bytes remaining in fragment.\n",
+ obu_length, size);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ err = ff_cbs_append_unit_data(frag, header.obu_type,
+ data, obu_length, frag->data_ref);
+ if (err < 0)
+ goto fail;
+
+ data += obu_length;
+ size -= obu_length;
+ }
+
+success:
+ err = 0;
+fail:
+ ctx->trace_enable = trace;
+ return err;
+}
+
+static int cbs_av1_ref_tile_data(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit,
+ GetBitContext *gbc,
+ AV1RawTileData *td)
+{
+ int pos;
+
+ pos = get_bits_count(gbc);
+ if (pos >= 8 * unit->data_size) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Bitstream ended before "
+ "any data in tile group (%d bits read).\n", pos);
+ return AVERROR_INVALIDDATA;
+ }
+ // Must be byte-aligned at this point.
+ av_assert0(pos % 8 == 0);
+
+ td->data_ref = av_buffer_ref(unit->data_ref);
+ if (!td->data_ref)
+ return AVERROR(ENOMEM);
+
+ td->data = unit->data + pos / 8;
+ td->data_size = unit->data_size - pos / 8;
+
+ return 0;
+}
+
+static int cbs_av1_read_unit(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ AV1RawOBU *obu;
+ GetBitContext gbc;
+ int err, start_pos, end_pos;
+
+ err = ff_cbs_alloc_unit_content(ctx, unit);
+ if (err < 0)
+ return err;
+ obu = unit->content;
+
+ err = init_get_bits(&gbc, unit->data, 8 * unit->data_size);
+ if (err < 0)
+ return err;
+
+ err = cbs_av1_read_obu_header(ctx, &gbc, &obu->header);
+ if (err < 0)
+ return err;
+ av_assert0(obu->header.obu_type == unit->type);
+
+ if (obu->header.obu_has_size_field) {
+ uint64_t obu_size;
+ err = cbs_av1_read_leb128(ctx, &gbc, "obu_size", &obu_size);
+ if (err < 0)
+ return err;
+ obu->obu_size = obu_size;
+ } else {
+ if (unit->data_size < 1 + obu->header.obu_extension_flag) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid OBU length: "
+ "unit too short (%"SIZE_SPECIFIER").\n", unit->data_size);
+ return AVERROR_INVALIDDATA;
+ }
+ obu->obu_size = unit->data_size - 1 - obu->header.obu_extension_flag;
+ }
+
+ start_pos = get_bits_count(&gbc);
+
+ if (obu->header.obu_extension_flag) {
+ if (obu->header.obu_type != AV1_OBU_SEQUENCE_HEADER &&
+ obu->header.obu_type != AV1_OBU_TEMPORAL_DELIMITER &&
+ priv->operating_point_idc) {
+ int in_temporal_layer =
+ (priv->operating_point_idc >> priv->temporal_id ) & 1;
+ int in_spatial_layer =
+ (priv->operating_point_idc >> (priv->spatial_id + 8)) & 1;
+ if (!in_temporal_layer || !in_spatial_layer) {
+ return AVERROR(EAGAIN); // drop_obu()
+ }
+ }
+ }
+
+ switch (obu->header.obu_type) {
+ case AV1_OBU_SEQUENCE_HEADER:
+ {
+ err = cbs_av1_read_sequence_header_obu(ctx, &gbc,
+ &obu->obu.sequence_header);
+ if (err < 0)
+ return err;
+
+ if (priv->operating_point >= 0) {
+ AV1RawSequenceHeader *sequence_header = &obu->obu.sequence_header;
+
+ if (priv->operating_point > sequence_header->operating_points_cnt_minus_1) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid Operating Point %d requested. "
+ "Must not be higher than %u.\n",
+ priv->operating_point, sequence_header->operating_points_cnt_minus_1);
+ return AVERROR(EINVAL);
+ }
+ priv->operating_point_idc = sequence_header->operating_point_idc[priv->operating_point];
+ }
+
+ av_buffer_unref(&priv->sequence_header_ref);
+ priv->sequence_header = NULL;
+
+ priv->sequence_header_ref = av_buffer_ref(unit->content_ref);
+ if (!priv->sequence_header_ref)
+ return AVERROR(ENOMEM);
+ priv->sequence_header = &obu->obu.sequence_header;
+ }
+ break;
+ case AV1_OBU_TEMPORAL_DELIMITER:
+ {
+ err = cbs_av1_read_temporal_delimiter_obu(ctx, &gbc);
+ if (err < 0)
+ return err;
+ }
+ break;
+ case AV1_OBU_FRAME_HEADER:
+ case AV1_OBU_REDUNDANT_FRAME_HEADER:
+ {
+ err = cbs_av1_read_frame_header_obu(ctx, &gbc,
+ &obu->obu.frame_header,
+ obu->header.obu_type ==
+ AV1_OBU_REDUNDANT_FRAME_HEADER,
+ unit->data_ref);
+ if (err < 0)
+ return err;
+ }
+ break;
+ case AV1_OBU_TILE_GROUP:
+ {
+ err = cbs_av1_read_tile_group_obu(ctx, &gbc,
+ &obu->obu.tile_group);
+ if (err < 0)
+ return err;
+
+ err = cbs_av1_ref_tile_data(ctx, unit, &gbc,
+ &obu->obu.tile_group.tile_data);
+ if (err < 0)
+ return err;
+ }
+ break;
+ case AV1_OBU_FRAME:
+ {
+ err = cbs_av1_read_frame_obu(ctx, &gbc, &obu->obu.frame,
+ unit->data_ref);
+ if (err < 0)
+ return err;
+
+ err = cbs_av1_ref_tile_data(ctx, unit, &gbc,
+ &obu->obu.frame.tile_group.tile_data);
+ if (err < 0)
+ return err;
+ }
+ break;
+ case AV1_OBU_TILE_LIST:
+ {
+ err = cbs_av1_read_tile_list_obu(ctx, &gbc,
+ &obu->obu.tile_list);
+ if (err < 0)
+ return err;
+
+ err = cbs_av1_ref_tile_data(ctx, unit, &gbc,
+ &obu->obu.tile_list.tile_data);
+ if (err < 0)
+ return err;
+ }
+ break;
+ case AV1_OBU_METADATA:
+ {
+ err = cbs_av1_read_metadata_obu(ctx, &gbc, &obu->obu.metadata);
+ if (err < 0)
+ return err;
+ }
+ break;
+ case AV1_OBU_PADDING:
+ {
+ err = cbs_av1_read_padding_obu(ctx, &gbc, &obu->obu.padding);
+ if (err < 0)
+ return err;
+ }
+ break;
+ default:
+ return AVERROR(ENOSYS);
+ }
+
+ end_pos = get_bits_count(&gbc);
+ av_assert0(end_pos <= unit->data_size * 8);
+
+ if (obu->obu_size > 0 &&
+ obu->header.obu_type != AV1_OBU_TILE_GROUP &&
+ obu->header.obu_type != AV1_OBU_TILE_LIST &&
+ obu->header.obu_type != AV1_OBU_FRAME) {
+ int nb_bits = obu->obu_size * 8 + start_pos - end_pos;
+
+ if (nb_bits <= 0)
+ return AVERROR_INVALIDDATA;
+
+ err = cbs_av1_read_trailing_bits(ctx, &gbc, nb_bits);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cbs_av1_write_obu(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit,
+ PutBitContext *pbc)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ AV1RawOBU *obu = unit->content;
+ PutBitContext pbc_tmp;
+ AV1RawTileData *td;
+ size_t header_size;
+ int err, start_pos, end_pos, data_pos;
+ CodedBitstreamAV1Context av1ctx;
+
+ // OBUs in the normal bitstream format must contain a size field
+ // in every OBU (in annex B it is optional, but we don't support
+ // writing that).
+ obu->header.obu_has_size_field = 1;
+ av1ctx = *priv;
+
+ if (priv->sequence_header_ref) {
+ av1ctx.sequence_header_ref = av_buffer_ref(priv->sequence_header_ref);
+ if (!av1ctx.sequence_header_ref)
+ return AVERROR(ENOMEM);
+ }
+
+ if (priv->frame_header_ref) {
+ av1ctx.frame_header_ref = av_buffer_ref(priv->frame_header_ref);
+ if (!av1ctx.frame_header_ref) {
+ err = AVERROR(ENOMEM);
+ goto error;
+ }
+ }
+
+ err = cbs_av1_write_obu_header(ctx, pbc, &obu->header);
+ if (err < 0)
+ goto error;
+
+ if (obu->header.obu_has_size_field) {
+ pbc_tmp = *pbc;
+ // Add space for the size field to fill later.
+ put_bits32(pbc, 0);
+ put_bits32(pbc, 0);
+ }
+
+ td = NULL;
+ start_pos = put_bits_count(pbc);
+
+ switch (obu->header.obu_type) {
+ case AV1_OBU_SEQUENCE_HEADER:
+ {
+ err = cbs_av1_write_sequence_header_obu(ctx, pbc,
+ &obu->obu.sequence_header);
+ if (err < 0)
+ goto error;
+
+ av_buffer_unref(&priv->sequence_header_ref);
+ priv->sequence_header = NULL;
+
+ err = ff_cbs_make_unit_refcounted(ctx, unit);
+ if (err < 0)
+ goto error;
+
+ priv->sequence_header_ref = av_buffer_ref(unit->content_ref);
+ if (!priv->sequence_header_ref) {
+ err = AVERROR(ENOMEM);
+ goto error;
+ }
+
+ priv->sequence_header = &obu->obu.sequence_header;
+ }
+ break;
+ case AV1_OBU_TEMPORAL_DELIMITER:
+ {
+ err = cbs_av1_write_temporal_delimiter_obu(ctx, pbc);
+ if (err < 0)
+ goto error;
+ }
+ break;
+ case AV1_OBU_FRAME_HEADER:
+ case AV1_OBU_REDUNDANT_FRAME_HEADER:
+ {
+ err = cbs_av1_write_frame_header_obu(ctx, pbc,
+ &obu->obu.frame_header,
+ obu->header.obu_type ==
+ AV1_OBU_REDUNDANT_FRAME_HEADER,
+ NULL);
+ if (err < 0)
+ goto error;
+ }
+ break;
+ case AV1_OBU_TILE_GROUP:
+ {
+ err = cbs_av1_write_tile_group_obu(ctx, pbc,
+ &obu->obu.tile_group);
+ if (err < 0)
+ goto error;
+
+ td = &obu->obu.tile_group.tile_data;
+ }
+ break;
+ case AV1_OBU_FRAME:
+ {
+ err = cbs_av1_write_frame_obu(ctx, pbc, &obu->obu.frame, NULL);
+ if (err < 0)
+ goto error;
+
+ td = &obu->obu.frame.tile_group.tile_data;
+ }
+ break;
+ case AV1_OBU_TILE_LIST:
+ {
+ err = cbs_av1_write_tile_list_obu(ctx, pbc, &obu->obu.tile_list);
+ if (err < 0)
+ goto error;
+
+ td = &obu->obu.tile_list.tile_data;
+ }
+ break;
+ case AV1_OBU_METADATA:
+ {
+ err = cbs_av1_write_metadata_obu(ctx, pbc, &obu->obu.metadata);
+ if (err < 0)
+ goto error;
+ }
+ break;
+ case AV1_OBU_PADDING:
+ {
+ err = cbs_av1_write_padding_obu(ctx, pbc, &obu->obu.padding);
+ if (err < 0)
+ goto error;
+ }
+ break;
+ default:
+ err = AVERROR(ENOSYS);
+ goto error;
+ }
+
+ end_pos = put_bits_count(pbc);
+ header_size = (end_pos - start_pos + 7) / 8;
+ if (td) {
+ obu->obu_size = header_size + td->data_size;
+ } else if (header_size > 0) {
+ // Add trailing bits and recalculate.
+ err = cbs_av1_write_trailing_bits(ctx, pbc, 8 - end_pos % 8);
+ if (err < 0)
+ goto error;
+ end_pos = put_bits_count(pbc);
+ obu->obu_size = header_size = (end_pos - start_pos + 7) / 8;
+ } else {
+ // Empty OBU.
+ obu->obu_size = 0;
+ }
+
+ end_pos = put_bits_count(pbc);
+ // Must now be byte-aligned.
+ av_assert0(end_pos % 8 == 0);
+ flush_put_bits(pbc);
+ start_pos /= 8;
+ end_pos /= 8;
+
+ *pbc = pbc_tmp;
+ err = cbs_av1_write_leb128(ctx, pbc, "obu_size", obu->obu_size);
+ if (err < 0)
+ goto error;
+
+ data_pos = put_bits_count(pbc) / 8;
+ flush_put_bits(pbc);
+ av_assert0(data_pos <= start_pos);
+
+ if (8 * obu->obu_size > put_bits_left(pbc)) {
+ av_buffer_unref(&priv->sequence_header_ref);
+ av_buffer_unref(&priv->frame_header_ref);
+ *priv = av1ctx;
+
+ return AVERROR(ENOSPC);
+ }
+
+ if (obu->obu_size > 0) {
+ memmove(pbc->buf + data_pos,
+ pbc->buf + start_pos, header_size);
+ skip_put_bytes(pbc, header_size);
+
+ if (td) {
+ memcpy(pbc->buf + data_pos + header_size,
+ td->data, td->data_size);
+ skip_put_bytes(pbc, td->data_size);
+ }
+ }
+
+ // OBU data must be byte-aligned.
+ av_assert0(put_bits_count(pbc) % 8 == 0);
+ err = 0;
+
+error:
+ av_buffer_unref(&av1ctx.sequence_header_ref);
+ av_buffer_unref(&av1ctx.frame_header_ref);
+
+ return err;
+}
+
+static int cbs_av1_assemble_fragment(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag)
+{
+ size_t size, pos;
+ int i;
+
+ size = 0;
+ for (i = 0; i < frag->nb_units; i++)
+ size += frag->units[i].data_size;
+
+ frag->data_ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!frag->data_ref)
+ return AVERROR(ENOMEM);
+ frag->data = frag->data_ref->data;
+ memset(frag->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ pos = 0;
+ for (i = 0; i < frag->nb_units; i++) {
+ memcpy(frag->data + pos, frag->units[i].data,
+ frag->units[i].data_size);
+ pos += frag->units[i].data_size;
+ }
+ av_assert0(pos == size);
+ frag->data_size = size;
+
+ return 0;
+}
+
+static void cbs_av1_flush(CodedBitstreamContext *ctx)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+
+ av_buffer_unref(&priv->frame_header_ref);
+ priv->sequence_header = NULL;
+ priv->frame_header = NULL;
+
+ memset(priv->ref, 0, sizeof(priv->ref));
+ priv->operating_point_idc = 0;
+ priv->seen_frame_header = 0;
+ priv->tile_num = 0;
+}
+
+static void cbs_av1_close(CodedBitstreamContext *ctx)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+
+ av_buffer_unref(&priv->sequence_header_ref);
+ av_buffer_unref(&priv->frame_header_ref);
+}
+
+static void cbs_av1_free_metadata(void *unit, uint8_t *content)
+{
+ AV1RawOBU *obu = (AV1RawOBU*)content;
+ AV1RawMetadata *md;
+
+ av_assert0(obu->header.obu_type == AV1_OBU_METADATA);
+ md = &obu->obu.metadata;
+
+ switch (md->metadata_type) {
+ case AV1_METADATA_TYPE_ITUT_T35:
+ av_buffer_unref(&md->metadata.itut_t35.payload_ref);
+ break;
+ }
+ av_free(content);
+}
+
+static const CodedBitstreamUnitTypeDescriptor cbs_av1_unit_types[] = {
+ CBS_UNIT_TYPE_POD(AV1_OBU_SEQUENCE_HEADER, AV1RawOBU),
+ CBS_UNIT_TYPE_POD(AV1_OBU_TEMPORAL_DELIMITER, AV1RawOBU),
+ CBS_UNIT_TYPE_POD(AV1_OBU_FRAME_HEADER, AV1RawOBU),
+ CBS_UNIT_TYPE_POD(AV1_OBU_REDUNDANT_FRAME_HEADER, AV1RawOBU),
+
+ CBS_UNIT_TYPE_INTERNAL_REF(AV1_OBU_TILE_GROUP, AV1RawOBU,
+ obu.tile_group.tile_data.data),
+ CBS_UNIT_TYPE_INTERNAL_REF(AV1_OBU_FRAME, AV1RawOBU,
+ obu.frame.tile_group.tile_data.data),
+ CBS_UNIT_TYPE_INTERNAL_REF(AV1_OBU_TILE_LIST, AV1RawOBU,
+ obu.tile_list.tile_data.data),
+ CBS_UNIT_TYPE_INTERNAL_REF(AV1_OBU_PADDING, AV1RawOBU,
+ obu.padding.payload),
+
+ CBS_UNIT_TYPE_COMPLEX(AV1_OBU_METADATA, AV1RawOBU,
+ &cbs_av1_free_metadata),
+
+ CBS_UNIT_TYPE_END_OF_LIST
+};
+
+#define OFFSET(x) offsetof(CodedBitstreamAV1Context, x)
+static const AVOption cbs_av1_options[] = {
+ { "operating_point", "Set operating point to select layers to parse from a scalable bitstream",
+ OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AV1_MAX_OPERATING_POINTS - 1, 0 },
+ { NULL }
+};
+
+static const AVClass cbs_av1_class = {
+ .class_name = "cbs_av1",
+ .item_name = av_default_item_name,
+ .option = cbs_av1_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+const CodedBitstreamType ff_cbs_type_av1 = {
+ .codec_id = AV_CODEC_ID_AV1,
+
+ .priv_class = &cbs_av1_class,
+ .priv_data_size = sizeof(CodedBitstreamAV1Context),
+
+ .unit_types = cbs_av1_unit_types,
+
+ .split_fragment = &cbs_av1_split_fragment,
+ .read_unit = &cbs_av1_read_unit,
+ .write_unit = &cbs_av1_write_obu,
+ .assemble_fragment = &cbs_av1_assemble_fragment,
+
+ .flush = &cbs_av1_flush,
+ .close = &cbs_av1_close,
+};
diff --git a/media/ffvpx/libavcodec/cbs_av1.h b/media/ffvpx/libavcodec/cbs_av1.h
new file mode 100644
index 0000000000..1fc80dcfa0
--- /dev/null
+++ b/media/ffvpx/libavcodec/cbs_av1.h
@@ -0,0 +1,464 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CBS_AV1_H
+#define AVCODEC_CBS_AV1_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "av1.h"
+#include "cbs.h"
+
+
+typedef struct AV1RawOBUHeader {
+ uint8_t obu_forbidden_bit;
+ uint8_t obu_type;
+ uint8_t obu_extension_flag;
+ uint8_t obu_has_size_field;
+ uint8_t obu_reserved_1bit;
+
+ uint8_t temporal_id;
+ uint8_t spatial_id;
+ uint8_t extension_header_reserved_3bits;
+} AV1RawOBUHeader;
+
+typedef struct AV1RawColorConfig {
+ uint8_t high_bitdepth;
+ uint8_t twelve_bit;
+ uint8_t mono_chrome;
+
+ uint8_t color_description_present_flag;
+ uint8_t color_primaries;
+ uint8_t transfer_characteristics;
+ uint8_t matrix_coefficients;
+
+ uint8_t color_range;
+ uint8_t subsampling_x;
+ uint8_t subsampling_y;
+ uint8_t chroma_sample_position;
+ uint8_t separate_uv_delta_q;
+} AV1RawColorConfig;
+
+typedef struct AV1RawTimingInfo {
+ uint32_t num_units_in_display_tick;
+ uint32_t time_scale;
+
+ uint8_t equal_picture_interval;
+ uint32_t num_ticks_per_picture_minus_1;
+} AV1RawTimingInfo;
+
+typedef struct AV1RawDecoderModelInfo {
+ uint8_t buffer_delay_length_minus_1;
+ uint32_t num_units_in_decoding_tick;
+ uint8_t buffer_removal_time_length_minus_1;
+ uint8_t frame_presentation_time_length_minus_1;
+} AV1RawDecoderModelInfo;
+
+typedef struct AV1RawSequenceHeader {
+ uint8_t seq_profile;
+ uint8_t still_picture;
+ uint8_t reduced_still_picture_header;
+
+ uint8_t timing_info_present_flag;
+ uint8_t decoder_model_info_present_flag;
+ uint8_t initial_display_delay_present_flag;
+ uint8_t operating_points_cnt_minus_1;
+
+ AV1RawTimingInfo timing_info;
+ AV1RawDecoderModelInfo decoder_model_info;
+
+ uint16_t operating_point_idc[AV1_MAX_OPERATING_POINTS];
+ uint8_t seq_level_idx[AV1_MAX_OPERATING_POINTS];
+ uint8_t seq_tier[AV1_MAX_OPERATING_POINTS];
+ uint8_t decoder_model_present_for_this_op[AV1_MAX_OPERATING_POINTS];
+ uint32_t decoder_buffer_delay[AV1_MAX_OPERATING_POINTS];
+ uint32_t encoder_buffer_delay[AV1_MAX_OPERATING_POINTS];
+ uint8_t low_delay_mode_flag[AV1_MAX_OPERATING_POINTS];
+ uint8_t initial_display_delay_present_for_this_op[AV1_MAX_OPERATING_POINTS];
+ uint8_t initial_display_delay_minus_1[AV1_MAX_OPERATING_POINTS];
+
+ uint8_t frame_width_bits_minus_1;
+ uint8_t frame_height_bits_minus_1;
+ uint16_t max_frame_width_minus_1;
+ uint16_t max_frame_height_minus_1;
+
+ uint8_t frame_id_numbers_present_flag;
+ uint8_t delta_frame_id_length_minus_2;
+ uint8_t additional_frame_id_length_minus_1;
+
+ uint8_t use_128x128_superblock;
+ uint8_t enable_filter_intra;
+ uint8_t enable_intra_edge_filter;
+ uint8_t enable_interintra_compound;
+ uint8_t enable_masked_compound;
+ uint8_t enable_warped_motion;
+ uint8_t enable_dual_filter;
+
+ uint8_t enable_order_hint;
+ uint8_t enable_jnt_comp;
+ uint8_t enable_ref_frame_mvs;
+
+ uint8_t seq_choose_screen_content_tools;
+ uint8_t seq_force_screen_content_tools;
+ uint8_t seq_choose_integer_mv;
+ uint8_t seq_force_integer_mv;
+
+ uint8_t order_hint_bits_minus_1;
+
+ uint8_t enable_superres;
+ uint8_t enable_cdef;
+ uint8_t enable_restoration;
+
+ AV1RawColorConfig color_config;
+
+ uint8_t film_grain_params_present;
+} AV1RawSequenceHeader;
+
+typedef struct AV1RawFilmGrainParams {
+ uint8_t apply_grain;
+ uint16_t grain_seed;
+ uint8_t update_grain;
+ uint8_t film_grain_params_ref_idx;
+ uint8_t num_y_points;
+ uint8_t point_y_value[14];
+ uint8_t point_y_scaling[14];
+ uint8_t chroma_scaling_from_luma;
+ uint8_t num_cb_points;
+ uint8_t point_cb_value[10];
+ uint8_t point_cb_scaling[10];
+ uint8_t num_cr_points;
+ uint8_t point_cr_value[10];
+ uint8_t point_cr_scaling[10];
+ uint8_t grain_scaling_minus_8;
+ uint8_t ar_coeff_lag;
+ uint8_t ar_coeffs_y_plus_128[24];
+ uint8_t ar_coeffs_cb_plus_128[25];
+ uint8_t ar_coeffs_cr_plus_128[25];
+ uint8_t ar_coeff_shift_minus_6;
+ uint8_t grain_scale_shift;
+ uint8_t cb_mult;
+ uint8_t cb_luma_mult;
+ uint16_t cb_offset;
+ uint8_t cr_mult;
+ uint8_t cr_luma_mult;
+ uint16_t cr_offset;
+ uint8_t overlap_flag;
+ uint8_t clip_to_restricted_range;
+} AV1RawFilmGrainParams;
+
+typedef struct AV1RawFrameHeader {
+ uint8_t show_existing_frame;
+ uint8_t frame_to_show_map_idx;
+ uint32_t frame_presentation_time;
+ uint32_t display_frame_id;
+
+ uint8_t frame_type;
+ uint8_t show_frame;
+ uint8_t showable_frame;
+
+ uint8_t error_resilient_mode;
+ uint8_t disable_cdf_update;
+ uint8_t allow_screen_content_tools;
+ uint8_t force_integer_mv;
+
+ uint32_t current_frame_id;
+ uint8_t frame_size_override_flag;
+ uint8_t order_hint;
+
+ uint8_t buffer_removal_time_present_flag;
+ uint32_t buffer_removal_time[AV1_MAX_OPERATING_POINTS];
+
+ uint8_t primary_ref_frame;
+ uint16_t frame_width_minus_1;
+ uint16_t frame_height_minus_1;
+ uint8_t use_superres;
+ uint8_t coded_denom;
+ uint8_t render_and_frame_size_different;
+ uint16_t render_width_minus_1;
+ uint16_t render_height_minus_1;
+
+ uint8_t found_ref[AV1_REFS_PER_FRAME];
+
+ uint8_t refresh_frame_flags;
+ uint8_t allow_intrabc;
+ uint8_t ref_order_hint[AV1_NUM_REF_FRAMES];
+ uint8_t frame_refs_short_signaling;
+ uint8_t last_frame_idx;
+ uint8_t golden_frame_idx;
+ int8_t ref_frame_idx[AV1_REFS_PER_FRAME];
+ uint32_t delta_frame_id_minus1[AV1_REFS_PER_FRAME];
+
+ uint8_t allow_high_precision_mv;
+ uint8_t is_filter_switchable;
+ uint8_t interpolation_filter;
+ uint8_t is_motion_mode_switchable;
+ uint8_t use_ref_frame_mvs;
+
+ uint8_t disable_frame_end_update_cdf;
+
+ uint8_t uniform_tile_spacing_flag;
+ uint8_t tile_cols_log2;
+ uint8_t tile_rows_log2;
+ uint8_t width_in_sbs_minus_1[AV1_MAX_TILE_COLS];
+ uint8_t height_in_sbs_minus_1[AV1_MAX_TILE_ROWS];
+ uint16_t context_update_tile_id;
+ uint8_t tile_size_bytes_minus1;
+
+ // These are derived values, but it's very unhelpful to have to
+ // recalculate them all the time so we store them here.
+ uint16_t tile_cols;
+ uint16_t tile_rows;
+
+ uint8_t base_q_idx;
+ int8_t delta_q_y_dc;
+ uint8_t diff_uv_delta;
+ int8_t delta_q_u_dc;
+ int8_t delta_q_u_ac;
+ int8_t delta_q_v_dc;
+ int8_t delta_q_v_ac;
+ uint8_t using_qmatrix;
+ uint8_t qm_y;
+ uint8_t qm_u;
+ uint8_t qm_v;
+
+ uint8_t segmentation_enabled;
+ uint8_t segmentation_update_map;
+ uint8_t segmentation_temporal_update;
+ uint8_t segmentation_update_data;
+ uint8_t feature_enabled[AV1_MAX_SEGMENTS][AV1_SEG_LVL_MAX];
+ int16_t feature_value[AV1_MAX_SEGMENTS][AV1_SEG_LVL_MAX];
+
+ uint8_t delta_q_present;
+ uint8_t delta_q_res;
+ uint8_t delta_lf_present;
+ uint8_t delta_lf_res;
+ uint8_t delta_lf_multi;
+
+ uint8_t loop_filter_level[4];
+ uint8_t loop_filter_sharpness;
+ uint8_t loop_filter_delta_enabled;
+ uint8_t loop_filter_delta_update;
+ uint8_t update_ref_delta[AV1_TOTAL_REFS_PER_FRAME];
+ int8_t loop_filter_ref_deltas[AV1_TOTAL_REFS_PER_FRAME];
+ uint8_t update_mode_delta[2];
+ int8_t loop_filter_mode_deltas[2];
+
+ uint8_t cdef_damping_minus_3;
+ uint8_t cdef_bits;
+ uint8_t cdef_y_pri_strength[8];
+ uint8_t cdef_y_sec_strength[8];
+ uint8_t cdef_uv_pri_strength[8];
+ uint8_t cdef_uv_sec_strength[8];
+
+ uint8_t lr_type[3];
+ uint8_t lr_unit_shift;
+ uint8_t lr_uv_shift;
+
+ uint8_t tx_mode;
+ uint8_t reference_select;
+ uint8_t skip_mode_present;
+
+ uint8_t allow_warped_motion;
+ uint8_t reduced_tx_set;
+
+ uint8_t is_global[AV1_TOTAL_REFS_PER_FRAME];
+ uint8_t is_rot_zoom[AV1_TOTAL_REFS_PER_FRAME];
+ uint8_t is_translation[AV1_TOTAL_REFS_PER_FRAME];
+ //AV1RawSubexp gm_params[AV1_TOTAL_REFS_PER_FRAME][6];
+ uint32_t gm_params[AV1_TOTAL_REFS_PER_FRAME][6];
+
+ AV1RawFilmGrainParams film_grain;
+} AV1RawFrameHeader;
+
+typedef struct AV1RawTileData {
+ uint8_t *data;
+ AVBufferRef *data_ref;
+ size_t data_size;
+} AV1RawTileData;
+
+typedef struct AV1RawTileGroup {
+ uint8_t tile_start_and_end_present_flag;
+ uint16_t tg_start;
+ uint16_t tg_end;
+
+ AV1RawTileData tile_data;
+} AV1RawTileGroup;
+
+typedef struct AV1RawFrame {
+ AV1RawFrameHeader header;
+ AV1RawTileGroup tile_group;
+} AV1RawFrame;
+
+typedef struct AV1RawTileList {
+ uint8_t output_frame_width_in_tiles_minus_1;
+ uint8_t output_frame_height_in_tiles_minus_1;
+ uint16_t tile_count_minus_1;
+
+ AV1RawTileData tile_data;
+} AV1RawTileList;
+
+typedef struct AV1RawMetadataHDRCLL {
+ uint16_t max_cll;
+ uint16_t max_fall;
+} AV1RawMetadataHDRCLL;
+
+typedef struct AV1RawMetadataHDRMDCV {
+ uint16_t primary_chromaticity_x[3];
+ uint16_t primary_chromaticity_y[3];
+ uint16_t white_point_chromaticity_x;
+ uint16_t white_point_chromaticity_y;
+ uint32_t luminance_max;
+ uint32_t luminance_min;
+} AV1RawMetadataHDRMDCV;
+
+typedef struct AV1RawMetadataScalability {
+ uint8_t scalability_mode_idc;
+ uint8_t spatial_layers_cnt_minus_1;
+ uint8_t spatial_layer_dimensions_present_flag;
+ uint8_t spatial_layer_description_present_flag;
+ uint8_t temporal_group_description_present_flag;
+ uint8_t scalability_structure_reserved_3bits;
+ uint16_t spatial_layer_max_width[4];
+ uint16_t spatial_layer_max_height[4];
+ uint8_t spatial_layer_ref_id[4];
+ uint8_t temporal_group_size;
+ uint8_t temporal_group_temporal_id[255];
+ uint8_t temporal_group_temporal_switching_up_point_flag[255];
+ uint8_t temporal_group_spatial_switching_up_point_flag[255];
+ uint8_t temporal_group_ref_cnt[255];
+ uint8_t temporal_group_ref_pic_diff[255][7];
+} AV1RawMetadataScalability;
+
+typedef struct AV1RawMetadataITUTT35 {
+ uint8_t itu_t_t35_country_code;
+ uint8_t itu_t_t35_country_code_extension_byte;
+
+ uint8_t *payload;
+ AVBufferRef *payload_ref;
+ size_t payload_size;
+} AV1RawMetadataITUTT35;
+
+typedef struct AV1RawMetadataTimecode {
+ uint8_t counting_type;
+ uint8_t full_timestamp_flag;
+ uint8_t discontinuity_flag;
+ uint8_t cnt_dropped_flag;
+ uint16_t n_frames;
+ uint8_t seconds_value;
+ uint8_t minutes_value;
+ uint8_t hours_value;
+ uint8_t seconds_flag;
+ uint8_t minutes_flag;
+ uint8_t hours_flag;
+ uint8_t time_offset_length;
+ uint32_t time_offset_value;
+} AV1RawMetadataTimecode;
+
+typedef struct AV1RawMetadata {
+ uint64_t metadata_type;
+ union {
+ AV1RawMetadataHDRCLL hdr_cll;
+ AV1RawMetadataHDRMDCV hdr_mdcv;
+ AV1RawMetadataScalability scalability;
+ AV1RawMetadataITUTT35 itut_t35;
+ AV1RawMetadataTimecode timecode;
+ } metadata;
+} AV1RawMetadata;
+
+typedef struct AV1RawPadding {
+ uint8_t *payload;
+ AVBufferRef *payload_ref;
+ size_t payload_size;
+} AV1RawPadding;
+
+
+typedef struct AV1RawOBU {
+ AV1RawOBUHeader header;
+
+ size_t obu_size;
+
+ union {
+ AV1RawSequenceHeader sequence_header;
+ AV1RawFrameHeader frame_header;
+ AV1RawFrame frame;
+ AV1RawTileGroup tile_group;
+ AV1RawTileList tile_list;
+ AV1RawMetadata metadata;
+ AV1RawPadding padding;
+ } obu;
+} AV1RawOBU;
+
+typedef struct AV1ReferenceFrameState {
+ int valid; // RefValid
+ int frame_id; // RefFrameId
+ int upscaled_width; // RefUpscaledWidth
+ int frame_width; // RefFrameWidth
+ int frame_height; // RefFrameHeight
+ int render_width; // RefRenderWidth
+ int render_height; // RefRenderHeight
+ int frame_type; // RefFrameType
+ int subsampling_x; // RefSubsamplingX
+ int subsampling_y; // RefSubsamplingY
+ int bit_depth; // RefBitDepth
+ int order_hint; // RefOrderHint
+
+ int8_t loop_filter_ref_deltas[AV1_TOTAL_REFS_PER_FRAME];
+ int8_t loop_filter_mode_deltas[2];
+ uint8_t feature_enabled[AV1_MAX_SEGMENTS][AV1_SEG_LVL_MAX];
+ int16_t feature_value[AV1_MAX_SEGMENTS][AV1_SEG_LVL_MAX];
+} AV1ReferenceFrameState;
+
+typedef struct CodedBitstreamAV1Context {
+ const AVClass *class;
+
+ AV1RawSequenceHeader *sequence_header;
+ AVBufferRef *sequence_header_ref;
+
+ int seen_frame_header;
+ AVBufferRef *frame_header_ref;
+ uint8_t *frame_header;
+ size_t frame_header_size;
+
+ int temporal_id;
+ int spatial_id;
+ int operating_point_idc;
+
+ int bit_depth;
+ int order_hint;
+ int frame_width;
+ int frame_height;
+ int upscaled_width;
+ int render_width;
+ int render_height;
+
+ int num_planes;
+ int coded_lossless;
+ int all_lossless;
+ int tile_cols;
+ int tile_rows;
+ int tile_num;
+
+ AV1ReferenceFrameState ref[AV1_NUM_REF_FRAMES];
+
+ // AVOptions
+ int operating_point;
+} CodedBitstreamAV1Context;
+
+
+#endif /* AVCODEC_CBS_AV1_H */
diff --git a/media/ffvpx/libavcodec/cbs_av1_syntax_template.c b/media/ffvpx/libavcodec/cbs_av1_syntax_template.c
new file mode 100644
index 0000000000..e95925a493
--- /dev/null
+++ b/media/ffvpx/libavcodec/cbs_av1_syntax_template.c
@@ -0,0 +1,2050 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+static int FUNC(obu_header)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawOBUHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ int err;
+
+ HEADER("OBU header");
+
+ fc(1, obu_forbidden_bit, 0, 0);
+
+ fc(4, obu_type, 0, AV1_OBU_PADDING);
+ flag(obu_extension_flag);
+ flag(obu_has_size_field);
+
+ fc(1, obu_reserved_1bit, 0, 0);
+
+ if (current->obu_extension_flag) {
+ fb(3, temporal_id);
+ fb(2, spatial_id);
+ fc(3, extension_header_reserved_3bits, 0, 0);
+ } else {
+ infer(temporal_id, 0);
+ infer(spatial_id, 0);
+ }
+
+ priv->temporal_id = current->temporal_id;
+ priv->spatial_id = current->spatial_id;
+
+ return 0;
+}
+
+static int FUNC(trailing_bits)(CodedBitstreamContext *ctx, RWContext *rw, int nb_bits)
+{
+ int err;
+
+ av_assert0(nb_bits > 0);
+
+ fixed(1, trailing_one_bit, 1);
+ --nb_bits;
+
+ while (nb_bits > 0) {
+ fixed(1, trailing_zero_bit, 0);
+ --nb_bits;
+ }
+
+ return 0;
+}
+
+static int FUNC(byte_alignment)(CodedBitstreamContext *ctx, RWContext *rw)
+{
+ int err;
+
+ while (byte_alignment(rw) != 0)
+ fixed(1, zero_bit, 0);
+
+ return 0;
+}
+
+static int FUNC(color_config)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawColorConfig *current, int seq_profile)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ int err;
+
+ flag(high_bitdepth);
+
+ if (seq_profile == FF_PROFILE_AV1_PROFESSIONAL &&
+ current->high_bitdepth) {
+ flag(twelve_bit);
+ priv->bit_depth = current->twelve_bit ? 12 : 10;
+ } else {
+ priv->bit_depth = current->high_bitdepth ? 10 : 8;
+ }
+
+ if (seq_profile == FF_PROFILE_AV1_HIGH)
+ infer(mono_chrome, 0);
+ else
+ flag(mono_chrome);
+ priv->num_planes = current->mono_chrome ? 1 : 3;
+
+ flag(color_description_present_flag);
+ if (current->color_description_present_flag) {
+ fb(8, color_primaries);
+ fb(8, transfer_characteristics);
+ fb(8, matrix_coefficients);
+ } else {
+ infer(color_primaries, AVCOL_PRI_UNSPECIFIED);
+ infer(transfer_characteristics, AVCOL_TRC_UNSPECIFIED);
+ infer(matrix_coefficients, AVCOL_SPC_UNSPECIFIED);
+ }
+
+ if (current->mono_chrome) {
+ flag(color_range);
+
+ infer(subsampling_x, 1);
+ infer(subsampling_y, 1);
+ infer(chroma_sample_position, AV1_CSP_UNKNOWN);
+ infer(separate_uv_delta_q, 0);
+
+ } else if (current->color_primaries == AVCOL_PRI_BT709 &&
+ current->transfer_characteristics == AVCOL_TRC_IEC61966_2_1 &&
+ current->matrix_coefficients == AVCOL_SPC_RGB) {
+ infer(color_range, 1);
+ infer(subsampling_x, 0);
+ infer(subsampling_y, 0);
+ flag(separate_uv_delta_q);
+
+ } else {
+ flag(color_range);
+
+ if (seq_profile == FF_PROFILE_AV1_MAIN) {
+ infer(subsampling_x, 1);
+ infer(subsampling_y, 1);
+ } else if (seq_profile == FF_PROFILE_AV1_HIGH) {
+ infer(subsampling_x, 0);
+ infer(subsampling_y, 0);
+ } else {
+ if (priv->bit_depth == 12) {
+ fb(1, subsampling_x);
+ if (current->subsampling_x)
+ fb(1, subsampling_y);
+ else
+ infer(subsampling_y, 0);
+ } else {
+ infer(subsampling_x, 1);
+ infer(subsampling_y, 0);
+ }
+ }
+ if (current->subsampling_x && current->subsampling_y) {
+ fc(2, chroma_sample_position, AV1_CSP_UNKNOWN,
+ AV1_CSP_COLOCATED);
+ }
+
+ flag(separate_uv_delta_q);
+ }
+
+ return 0;
+}
+
+static int FUNC(timing_info)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawTimingInfo *current)
+{
+ int err;
+
+ fc(32, num_units_in_display_tick, 1, MAX_UINT_BITS(32));
+ fc(32, time_scale, 1, MAX_UINT_BITS(32));
+
+ flag(equal_picture_interval);
+ if (current->equal_picture_interval)
+ uvlc(num_ticks_per_picture_minus_1, 0, MAX_UINT_BITS(32) - 1);
+
+ return 0;
+}
+
+static int FUNC(decoder_model_info)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawDecoderModelInfo *current)
+{
+ int err;
+
+ fb(5, buffer_delay_length_minus_1);
+ fb(32, num_units_in_decoding_tick);
+ fb(5, buffer_removal_time_length_minus_1);
+ fb(5, frame_presentation_time_length_minus_1);
+
+ return 0;
+}
+
+static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawSequenceHeader *current)
+{
+ int i, err;
+
+ HEADER("Sequence Header");
+
+ fc(3, seq_profile, FF_PROFILE_AV1_MAIN,
+ FF_PROFILE_AV1_PROFESSIONAL);
+ flag(still_picture);
+ flag(reduced_still_picture_header);
+
+ if (current->reduced_still_picture_header) {
+ infer(timing_info_present_flag, 0);
+ infer(decoder_model_info_present_flag, 0);
+ infer(initial_display_delay_present_flag, 0);
+ infer(operating_points_cnt_minus_1, 0);
+ infer(operating_point_idc[0], 0);
+
+ fb(5, seq_level_idx[0]);
+
+ infer(seq_tier[0], 0);
+ infer(decoder_model_present_for_this_op[0], 0);
+ infer(initial_display_delay_present_for_this_op[0], 0);
+
+ } else {
+ flag(timing_info_present_flag);
+ if (current->timing_info_present_flag) {
+ CHECK(FUNC(timing_info)(ctx, rw, &current->timing_info));
+
+ flag(decoder_model_info_present_flag);
+ if (current->decoder_model_info_present_flag) {
+ CHECK(FUNC(decoder_model_info)
+ (ctx, rw, &current->decoder_model_info));
+ }
+ } else {
+ infer(decoder_model_info_present_flag, 0);
+ }
+
+ flag(initial_display_delay_present_flag);
+
+ fb(5, operating_points_cnt_minus_1);
+ for (i = 0; i <= current->operating_points_cnt_minus_1; i++) {
+ fbs(12, operating_point_idc[i], 1, i);
+ fbs(5, seq_level_idx[i], 1, i);
+
+ if (current->seq_level_idx[i] > 7)
+ flags(seq_tier[i], 1, i);
+ else
+ infer(seq_tier[i], 0);
+
+ if (current->decoder_model_info_present_flag) {
+ flags(decoder_model_present_for_this_op[i], 1, i);
+ if (current->decoder_model_present_for_this_op[i]) {
+ int n = current->decoder_model_info.buffer_delay_length_minus_1 + 1;
+ fbs(n, decoder_buffer_delay[i], 1, i);
+ fbs(n, encoder_buffer_delay[i], 1, i);
+ flags(low_delay_mode_flag[i], 1, i);
+ }
+ } else {
+ infer(decoder_model_present_for_this_op[i], 0);
+ }
+
+ if (current->initial_display_delay_present_flag) {
+ flags(initial_display_delay_present_for_this_op[i], 1, i);
+ if (current->initial_display_delay_present_for_this_op[i])
+ fbs(4, initial_display_delay_minus_1[i], 1, i);
+ }
+ }
+ }
+
+ fb(4, frame_width_bits_minus_1);
+ fb(4, frame_height_bits_minus_1);
+
+ fb(current->frame_width_bits_minus_1 + 1, max_frame_width_minus_1);
+ fb(current->frame_height_bits_minus_1 + 1, max_frame_height_minus_1);
+
+ if (current->reduced_still_picture_header)
+ infer(frame_id_numbers_present_flag, 0);
+ else
+ flag(frame_id_numbers_present_flag);
+ if (current->frame_id_numbers_present_flag) {
+ fb(4, delta_frame_id_length_minus_2);
+ fb(3, additional_frame_id_length_minus_1);
+ }
+
+ flag(use_128x128_superblock);
+ flag(enable_filter_intra);
+ flag(enable_intra_edge_filter);
+
+ if (current->reduced_still_picture_header) {
+ infer(enable_interintra_compound, 0);
+ infer(enable_masked_compound, 0);
+ infer(enable_warped_motion, 0);
+ infer(enable_dual_filter, 0);
+ infer(enable_order_hint, 0);
+ infer(enable_jnt_comp, 0);
+ infer(enable_ref_frame_mvs, 0);
+
+ infer(seq_force_screen_content_tools,
+ AV1_SELECT_SCREEN_CONTENT_TOOLS);
+ infer(seq_force_integer_mv,
+ AV1_SELECT_INTEGER_MV);
+ } else {
+ flag(enable_interintra_compound);
+ flag(enable_masked_compound);
+ flag(enable_warped_motion);
+ flag(enable_dual_filter);
+
+ flag(enable_order_hint);
+ if (current->enable_order_hint) {
+ flag(enable_jnt_comp);
+ flag(enable_ref_frame_mvs);
+ } else {
+ infer(enable_jnt_comp, 0);
+ infer(enable_ref_frame_mvs, 0);
+ }
+
+ flag(seq_choose_screen_content_tools);
+ if (current->seq_choose_screen_content_tools)
+ infer(seq_force_screen_content_tools,
+ AV1_SELECT_SCREEN_CONTENT_TOOLS);
+ else
+ fb(1, seq_force_screen_content_tools);
+ if (current->seq_force_screen_content_tools > 0) {
+ flag(seq_choose_integer_mv);
+ if (current->seq_choose_integer_mv)
+ infer(seq_force_integer_mv,
+ AV1_SELECT_INTEGER_MV);
+ else
+ fb(1, seq_force_integer_mv);
+ } else {
+ infer(seq_force_integer_mv, AV1_SELECT_INTEGER_MV);
+ }
+
+ if (current->enable_order_hint)
+ fb(3, order_hint_bits_minus_1);
+ }
+
+ flag(enable_superres);
+ flag(enable_cdef);
+ flag(enable_restoration);
+
+ CHECK(FUNC(color_config)(ctx, rw, &current->color_config,
+ current->seq_profile));
+
+ flag(film_grain_params_present);
+
+ return 0;
+}
+
+static int FUNC(temporal_delimiter_obu)(CodedBitstreamContext *ctx, RWContext *rw)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+
+ HEADER("Temporal Delimiter");
+
+ priv->seen_frame_header = 0;
+
+ return 0;
+}
+
+static int FUNC(set_frame_refs)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ static const uint8_t ref_frame_list[AV1_NUM_REF_FRAMES - 2] = {
+ AV1_REF_FRAME_LAST2, AV1_REF_FRAME_LAST3, AV1_REF_FRAME_BWDREF,
+ AV1_REF_FRAME_ALTREF2, AV1_REF_FRAME_ALTREF
+ };
+ int8_t ref_frame_idx[AV1_REFS_PER_FRAME], used_frame[AV1_NUM_REF_FRAMES];
+ int16_t shifted_order_hints[AV1_NUM_REF_FRAMES];
+ int cur_frame_hint, latest_order_hint, earliest_order_hint, ref;
+ int i, j;
+
+ for (i = 0; i < AV1_REFS_PER_FRAME; i++)
+ ref_frame_idx[i] = -1;
+ ref_frame_idx[AV1_REF_FRAME_LAST - AV1_REF_FRAME_LAST] = current->last_frame_idx;
+ ref_frame_idx[AV1_REF_FRAME_GOLDEN - AV1_REF_FRAME_LAST] = current->golden_frame_idx;
+
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
+ used_frame[i] = 0;
+ used_frame[current->last_frame_idx] = 1;
+ used_frame[current->golden_frame_idx] = 1;
+
+ cur_frame_hint = 1 << (seq->order_hint_bits_minus_1);
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
+ shifted_order_hints[i] = cur_frame_hint +
+ cbs_av1_get_relative_dist(seq, priv->ref[i].order_hint,
+ priv->order_hint);
+
+ latest_order_hint = shifted_order_hints[current->last_frame_idx];
+ earliest_order_hint = shifted_order_hints[current->golden_frame_idx];
+
+ ref = -1;
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ int hint = shifted_order_hints[i];
+ if (!used_frame[i] && hint >= cur_frame_hint &&
+ (ref < 0 || hint >= latest_order_hint)) {
+ ref = i;
+ latest_order_hint = hint;
+ }
+ }
+ if (ref >= 0) {
+ ref_frame_idx[AV1_REF_FRAME_ALTREF - AV1_REF_FRAME_LAST] = ref;
+ used_frame[ref] = 1;
+ }
+
+ ref = -1;
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ int hint = shifted_order_hints[i];
+ if (!used_frame[i] && hint >= cur_frame_hint &&
+ (ref < 0 || hint < earliest_order_hint)) {
+ ref = i;
+ earliest_order_hint = hint;
+ }
+ }
+ if (ref >= 0) {
+ ref_frame_idx[AV1_REF_FRAME_BWDREF - AV1_REF_FRAME_LAST] = ref;
+ used_frame[ref] = 1;
+ }
+
+ ref = -1;
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ int hint = shifted_order_hints[i];
+ if (!used_frame[i] && hint >= cur_frame_hint &&
+ (ref < 0 || hint < earliest_order_hint)) {
+ ref = i;
+ earliest_order_hint = hint;
+ }
+ }
+ if (ref >= 0) {
+ ref_frame_idx[AV1_REF_FRAME_ALTREF2 - AV1_REF_FRAME_LAST] = ref;
+ used_frame[ref] = 1;
+ }
+
+ for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) {
+ int ref_frame = ref_frame_list[i];
+ if (ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] < 0 ) {
+ ref = -1;
+ for (j = 0; j < AV1_NUM_REF_FRAMES; j++) {
+ int hint = shifted_order_hints[j];
+ if (!used_frame[j] && hint < cur_frame_hint &&
+ (ref < 0 || hint >= latest_order_hint)) {
+ ref = j;
+ latest_order_hint = hint;
+ }
+ }
+ if (ref >= 0) {
+ ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] = ref;
+ used_frame[ref] = 1;
+ }
+ }
+ }
+
+ ref = -1;
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ int hint = shifted_order_hints[i];
+ if (ref < 0 || hint < earliest_order_hint) {
+ ref = i;
+ earliest_order_hint = hint;
+ }
+ }
+ for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
+ if (ref_frame_idx[i] < 0)
+ ref_frame_idx[i] = ref;
+ infer(ref_frame_idx[i], ref_frame_idx[i]);
+ }
+
+ return 0;
+}
+
+static int FUNC(superres_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ int denom, err;
+
+ if (seq->enable_superres)
+ flag(use_superres);
+ else
+ infer(use_superres, 0);
+
+ if (current->use_superres) {
+ fb(3, coded_denom);
+ denom = current->coded_denom + AV1_SUPERRES_DENOM_MIN;
+ } else {
+ denom = AV1_SUPERRES_NUM;
+ }
+
+ priv->upscaled_width = priv->frame_width;
+ priv->frame_width = (priv->upscaled_width * AV1_SUPERRES_NUM +
+ denom / 2) / denom;
+
+ return 0;
+}
+
+static int FUNC(frame_size)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ int err;
+
+ if (current->frame_size_override_flag) {
+ fb(seq->frame_width_bits_minus_1 + 1, frame_width_minus_1);
+ fb(seq->frame_height_bits_minus_1 + 1, frame_height_minus_1);
+ } else {
+ infer(frame_width_minus_1, seq->max_frame_width_minus_1);
+ infer(frame_height_minus_1, seq->max_frame_height_minus_1);
+ }
+
+ priv->frame_width = current->frame_width_minus_1 + 1;
+ priv->frame_height = current->frame_height_minus_1 + 1;
+
+ CHECK(FUNC(superres_params)(ctx, rw, current));
+
+ return 0;
+}
+
+static int FUNC(render_size)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ int err;
+
+ flag(render_and_frame_size_different);
+
+ if (current->render_and_frame_size_different) {
+ fb(16, render_width_minus_1);
+ fb(16, render_height_minus_1);
+ } else {
+ infer(render_width_minus_1, current->frame_width_minus_1);
+ infer(render_height_minus_1, current->frame_height_minus_1);
+ }
+
+ priv->render_width = current->render_width_minus_1 + 1;
+ priv->render_height = current->render_height_minus_1 + 1;
+
+ return 0;
+}
+
+static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ int i, err;
+
+ for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
+ flags(found_ref[i], 1, i);
+ if (current->found_ref[i]) {
+ AV1ReferenceFrameState *ref =
+ &priv->ref[current->ref_frame_idx[i]];
+
+ if (!ref->valid) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR,
+ "Missing reference frame needed for frame size "
+ "(ref = %d, ref_frame_idx = %d).\n",
+ i, current->ref_frame_idx[i]);
+ return AVERROR_INVALIDDATA;
+ }
+
+ infer(frame_width_minus_1, ref->upscaled_width - 1);
+ infer(frame_height_minus_1, ref->frame_height - 1);
+ infer(render_width_minus_1, ref->render_width - 1);
+ infer(render_height_minus_1, ref->render_height - 1);
+
+ priv->upscaled_width = ref->upscaled_width;
+ priv->frame_width = priv->upscaled_width;
+ priv->frame_height = ref->frame_height;
+ priv->render_width = ref->render_width;
+ priv->render_height = ref->render_height;
+ break;
+ }
+ }
+
+ if (i >= AV1_REFS_PER_FRAME) {
+ CHECK(FUNC(frame_size)(ctx, rw, current));
+ CHECK(FUNC(render_size)(ctx, rw, current));
+ } else {
+ CHECK(FUNC(superres_params)(ctx, rw, current));
+ }
+
+ return 0;
+}
+
+static int FUNC(interpolation_filter)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ int err;
+
+ flag(is_filter_switchable);
+ if (current->is_filter_switchable)
+ infer(interpolation_filter,
+ AV1_INTERPOLATION_FILTER_SWITCHABLE);
+ else
+ fb(2, interpolation_filter);
+
+ return 0;
+}
+
+static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ int mi_cols, mi_rows, sb_cols, sb_rows, sb_shift, sb_size;
+ int max_tile_width_sb, max_tile_height_sb, max_tile_area_sb;
+ int min_log2_tile_cols, max_log2_tile_cols, max_log2_tile_rows;
+ int min_log2_tiles, min_log2_tile_rows;
+ int i, err;
+
+ mi_cols = 2 * ((priv->frame_width + 7) >> 3);
+ mi_rows = 2 * ((priv->frame_height + 7) >> 3);
+
+ sb_cols = seq->use_128x128_superblock ? ((mi_cols + 31) >> 5)
+ : ((mi_cols + 15) >> 4);
+ sb_rows = seq->use_128x128_superblock ? ((mi_rows + 31) >> 5)
+ : ((mi_rows + 15) >> 4);
+
+ sb_shift = seq->use_128x128_superblock ? 5 : 4;
+ sb_size = sb_shift + 2;
+
+ max_tile_width_sb = AV1_MAX_TILE_WIDTH >> sb_size;
+ max_tile_area_sb = AV1_MAX_TILE_AREA >> (2 * sb_size);
+
+ min_log2_tile_cols = cbs_av1_tile_log2(max_tile_width_sb, sb_cols);
+ max_log2_tile_cols = cbs_av1_tile_log2(1, FFMIN(sb_cols, AV1_MAX_TILE_COLS));
+ max_log2_tile_rows = cbs_av1_tile_log2(1, FFMIN(sb_rows, AV1_MAX_TILE_ROWS));
+ min_log2_tiles = FFMAX(min_log2_tile_cols,
+ cbs_av1_tile_log2(max_tile_area_sb, sb_rows * sb_cols));
+
+ flag(uniform_tile_spacing_flag);
+
+ if (current->uniform_tile_spacing_flag) {
+ int tile_width_sb, tile_height_sb;
+
+ increment(tile_cols_log2, min_log2_tile_cols, max_log2_tile_cols);
+
+ tile_width_sb = (sb_cols + (1 << current->tile_cols_log2) - 1) >>
+ current->tile_cols_log2;
+ current->tile_cols = (sb_cols + tile_width_sb - 1) / tile_width_sb;
+
+ min_log2_tile_rows = FFMAX(min_log2_tiles - current->tile_cols_log2, 0);
+
+ increment(tile_rows_log2, min_log2_tile_rows, max_log2_tile_rows);
+
+ tile_height_sb = (sb_rows + (1 << current->tile_rows_log2) - 1) >>
+ current->tile_rows_log2;
+ current->tile_rows = (sb_rows + tile_height_sb - 1) / tile_height_sb;
+
+ for (i = 0; i < current->tile_cols - 1; i++)
+ infer(width_in_sbs_minus_1[i], tile_width_sb - 1);
+ infer(width_in_sbs_minus_1[i],
+ sb_cols - (current->tile_cols - 1) * tile_width_sb - 1);
+ for (i = 0; i < current->tile_rows - 1; i++)
+ infer(height_in_sbs_minus_1[i], tile_height_sb - 1);
+ infer(height_in_sbs_minus_1[i],
+ sb_rows - (current->tile_rows - 1) * tile_height_sb - 1);
+
+ } else {
+ int widest_tile_sb, start_sb, size_sb, max_width, max_height;
+
+ widest_tile_sb = 0;
+
+ start_sb = 0;
+ for (i = 0; start_sb < sb_cols && i < AV1_MAX_TILE_COLS; i++) {
+ max_width = FFMIN(sb_cols - start_sb, max_tile_width_sb);
+ ns(max_width, width_in_sbs_minus_1[i], 1, i);
+ size_sb = current->width_in_sbs_minus_1[i] + 1;
+ widest_tile_sb = FFMAX(size_sb, widest_tile_sb);
+ start_sb += size_sb;
+ }
+ current->tile_cols_log2 = cbs_av1_tile_log2(1, i);
+ current->tile_cols = i;
+
+ if (min_log2_tiles > 0)
+ max_tile_area_sb = (sb_rows * sb_cols) >> (min_log2_tiles + 1);
+ else
+ max_tile_area_sb = sb_rows * sb_cols;
+ max_tile_height_sb = FFMAX(max_tile_area_sb / widest_tile_sb, 1);
+
+ start_sb = 0;
+ for (i = 0; start_sb < sb_rows && i < AV1_MAX_TILE_ROWS; i++) {
+ max_height = FFMIN(sb_rows - start_sb, max_tile_height_sb);
+ ns(max_height, height_in_sbs_minus_1[i], 1, i);
+ size_sb = current->height_in_sbs_minus_1[i] + 1;
+ start_sb += size_sb;
+ }
+ current->tile_rows_log2 = cbs_av1_tile_log2(1, i);
+ current->tile_rows = i;
+ }
+
+ if (current->tile_cols_log2 > 0 ||
+ current->tile_rows_log2 > 0) {
+ fb(current->tile_cols_log2 + current->tile_rows_log2,
+ context_update_tile_id);
+ fb(2, tile_size_bytes_minus1);
+ } else {
+ infer(context_update_tile_id, 0);
+ }
+
+ priv->tile_cols = current->tile_cols;
+ priv->tile_rows = current->tile_rows;
+
+ return 0;
+}
+
+static int FUNC(quantization_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ int err;
+
+ fb(8, base_q_idx);
+
+ delta_q(delta_q_y_dc);
+
+ if (priv->num_planes > 1) {
+ if (seq->color_config.separate_uv_delta_q)
+ flag(diff_uv_delta);
+ else
+ infer(diff_uv_delta, 0);
+
+ delta_q(delta_q_u_dc);
+ delta_q(delta_q_u_ac);
+
+ if (current->diff_uv_delta) {
+ delta_q(delta_q_v_dc);
+ delta_q(delta_q_v_ac);
+ } else {
+ infer(delta_q_v_dc, current->delta_q_u_dc);
+ infer(delta_q_v_ac, current->delta_q_u_ac);
+ }
+ } else {
+ infer(delta_q_u_dc, 0);
+ infer(delta_q_u_ac, 0);
+ infer(delta_q_v_dc, 0);
+ infer(delta_q_v_ac, 0);
+ }
+
+ flag(using_qmatrix);
+ if (current->using_qmatrix) {
+ fb(4, qm_y);
+ fb(4, qm_u);
+ if (seq->color_config.separate_uv_delta_q)
+ fb(4, qm_v);
+ else
+ infer(qm_v, current->qm_u);
+ }
+
+ return 0;
+}
+
+static int FUNC(segmentation_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ static const uint8_t bits[AV1_SEG_LVL_MAX] = { 8, 6, 6, 6, 6, 3, 0, 0 };
+ static const uint8_t sign[AV1_SEG_LVL_MAX] = { 1, 1, 1, 1, 1, 0, 0, 0 };
+ static const uint8_t default_feature_enabled[AV1_SEG_LVL_MAX] = { 0 };
+ static const int16_t default_feature_value[AV1_SEG_LVL_MAX] = { 0 };
+ int i, j, err;
+
+ flag(segmentation_enabled);
+
+ if (current->segmentation_enabled) {
+ if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
+ infer(segmentation_update_map, 1);
+ infer(segmentation_temporal_update, 0);
+ infer(segmentation_update_data, 1);
+ } else {
+ flag(segmentation_update_map);
+ if (current->segmentation_update_map)
+ flag(segmentation_temporal_update);
+ else
+ infer(segmentation_temporal_update, 0);
+ flag(segmentation_update_data);
+ }
+
+ for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
+ const uint8_t *ref_feature_enabled;
+ const int16_t *ref_feature_value;
+
+ if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
+ ref_feature_enabled = default_feature_enabled;
+ ref_feature_value = default_feature_value;
+ } else {
+ ref_feature_enabled =
+ priv->ref[current->ref_frame_idx[current->primary_ref_frame]].feature_enabled[i];
+ ref_feature_value =
+ priv->ref[current->ref_frame_idx[current->primary_ref_frame]].feature_value[i];
+ }
+
+ for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
+ if (current->segmentation_update_data) {
+ flags(feature_enabled[i][j], 2, i, j);
+
+ if (current->feature_enabled[i][j] && bits[j] > 0) {
+ if (sign[j])
+ sus(1 + bits[j], feature_value[i][j], 2, i, j);
+ else
+ fbs(bits[j], feature_value[i][j], 2, i, j);
+ } else {
+ infer(feature_value[i][j], 0);
+ }
+ } else {
+ infer(feature_enabled[i][j], ref_feature_enabled[j]);
+ infer(feature_value[i][j], ref_feature_value[j]);
+ }
+ }
+ }
+ } else {
+ for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
+ for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
+ infer(feature_enabled[i][j], 0);
+ infer(feature_value[i][j], 0);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int FUNC(delta_q_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ int err;
+
+ if (current->base_q_idx > 0)
+ flag(delta_q_present);
+ else
+ infer(delta_q_present, 0);
+
+ if (current->delta_q_present)
+ fb(2, delta_q_res);
+
+ return 0;
+}
+
+static int FUNC(delta_lf_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ int err;
+
+ if (current->delta_q_present) {
+ if (!current->allow_intrabc)
+ flag(delta_lf_present);
+ else
+ infer(delta_lf_present, 0);
+ if (current->delta_lf_present) {
+ fb(2, delta_lf_res);
+ flag(delta_lf_multi);
+ } else {
+ infer(delta_lf_res, 0);
+ infer(delta_lf_multi, 0);
+ }
+ } else {
+ infer(delta_lf_present, 0);
+ infer(delta_lf_res, 0);
+ infer(delta_lf_multi, 0);
+ }
+
+ return 0;
+}
+
+static int FUNC(loop_filter_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ static const int8_t default_loop_filter_ref_deltas[AV1_TOTAL_REFS_PER_FRAME] =
+ { 1, 0, 0, 0, -1, 0, -1, -1 };
+ static const int8_t default_loop_filter_mode_deltas[2] = { 0, 0 };
+ int i, err;
+
+ if (priv->coded_lossless || current->allow_intrabc) {
+ infer(loop_filter_level[0], 0);
+ infer(loop_filter_level[1], 0);
+ infer(loop_filter_ref_deltas[AV1_REF_FRAME_INTRA], 1);
+ infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST], 0);
+ infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST2], 0);
+ infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST3], 0);
+ infer(loop_filter_ref_deltas[AV1_REF_FRAME_BWDREF], 0);
+ infer(loop_filter_ref_deltas[AV1_REF_FRAME_GOLDEN], -1);
+ infer(loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF], -1);
+ infer(loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF2], -1);
+ for (i = 0; i < 2; i++)
+ infer(loop_filter_mode_deltas[i], 0);
+ return 0;
+ }
+
+ fb(6, loop_filter_level[0]);
+ fb(6, loop_filter_level[1]);
+
+ if (priv->num_planes > 1) {
+ if (current->loop_filter_level[0] ||
+ current->loop_filter_level[1]) {
+ fb(6, loop_filter_level[2]);
+ fb(6, loop_filter_level[3]);
+ }
+ }
+
+ fb(3, loop_filter_sharpness);
+
+ flag(loop_filter_delta_enabled);
+ if (current->loop_filter_delta_enabled) {
+ const int8_t *ref_loop_filter_ref_deltas, *ref_loop_filter_mode_deltas;
+
+ if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
+ ref_loop_filter_ref_deltas = default_loop_filter_ref_deltas;
+ ref_loop_filter_mode_deltas = default_loop_filter_mode_deltas;
+ } else {
+ ref_loop_filter_ref_deltas =
+ priv->ref[current->ref_frame_idx[current->primary_ref_frame]].loop_filter_ref_deltas;
+ ref_loop_filter_mode_deltas =
+ priv->ref[current->ref_frame_idx[current->primary_ref_frame]].loop_filter_mode_deltas;
+ }
+
+ flag(loop_filter_delta_update);
+ for (i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
+ if (current->loop_filter_delta_update)
+ flags(update_ref_delta[i], 1, i);
+ else
+ infer(update_ref_delta[i], 0);
+ if (current->update_ref_delta[i])
+ sus(1 + 6, loop_filter_ref_deltas[i], 1, i);
+ else
+ infer(loop_filter_ref_deltas[i], ref_loop_filter_ref_deltas[i]);
+ }
+ for (i = 0; i < 2; i++) {
+ if (current->loop_filter_delta_update)
+ flags(update_mode_delta[i], 1, i);
+ else
+ infer(update_mode_delta[i], 0);
+ if (current->update_mode_delta[i])
+ sus(1 + 6, loop_filter_mode_deltas[i], 1, i);
+ else
+ infer(loop_filter_mode_deltas[i], ref_loop_filter_mode_deltas[i]);
+ }
+ } else {
+ for (i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++)
+ infer(loop_filter_ref_deltas[i], default_loop_filter_ref_deltas[i]);
+ for (i = 0; i < 2; i++)
+ infer(loop_filter_mode_deltas[i], default_loop_filter_mode_deltas[i]);
+ }
+
+ return 0;
+}
+
+static int FUNC(cdef_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ int i, err;
+
+ if (priv->coded_lossless || current->allow_intrabc ||
+ !seq->enable_cdef) {
+ infer(cdef_damping_minus_3, 0);
+ infer(cdef_bits, 0);
+ infer(cdef_y_pri_strength[0], 0);
+ infer(cdef_y_sec_strength[0], 0);
+ infer(cdef_uv_pri_strength[0], 0);
+ infer(cdef_uv_sec_strength[0], 0);
+
+ return 0;
+ }
+
+ fb(2, cdef_damping_minus_3);
+ fb(2, cdef_bits);
+
+ for (i = 0; i < (1 << current->cdef_bits); i++) {
+ fbs(4, cdef_y_pri_strength[i], 1, i);
+ fbs(2, cdef_y_sec_strength[i], 1, i);
+
+ if (priv->num_planes > 1) {
+ fbs(4, cdef_uv_pri_strength[i], 1, i);
+ fbs(2, cdef_uv_sec_strength[i], 1, i);
+ }
+ }
+
+ return 0;
+}
+
+static int FUNC(lr_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ int uses_lr, uses_chroma_lr;
+ int i, err;
+
+ if (priv->all_lossless || current->allow_intrabc ||
+ !seq->enable_restoration) {
+ return 0;
+ }
+
+ uses_lr = uses_chroma_lr = 0;
+ for (i = 0; i < priv->num_planes; i++) {
+ fbs(2, lr_type[i], 1, i);
+
+ if (current->lr_type[i] != AV1_RESTORE_NONE) {
+ uses_lr = 1;
+ if (i > 0)
+ uses_chroma_lr = 1;
+ }
+ }
+
+ if (uses_lr) {
+ if (seq->use_128x128_superblock)
+ increment(lr_unit_shift, 1, 2);
+ else
+ increment(lr_unit_shift, 0, 2);
+
+ if(seq->color_config.subsampling_x &&
+ seq->color_config.subsampling_y && uses_chroma_lr) {
+ fb(1, lr_uv_shift);
+ } else {
+ infer(lr_uv_shift, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int FUNC(read_tx_mode)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ int err;
+
+ if (priv->coded_lossless)
+ infer(tx_mode, 0);
+ else
+ increment(tx_mode, 1, 2);
+
+ return 0;
+}
+
+static int FUNC(frame_reference_mode)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ int err;
+
+ if (current->frame_type == AV1_FRAME_INTRA_ONLY ||
+ current->frame_type == AV1_FRAME_KEY)
+ infer(reference_select, 0);
+ else
+ flag(reference_select);
+
+ return 0;
+}
+
+static int FUNC(skip_mode_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ int skip_mode_allowed;
+ int err;
+
+ if (current->frame_type == AV1_FRAME_KEY ||
+ current->frame_type == AV1_FRAME_INTRA_ONLY ||
+ !current->reference_select || !seq->enable_order_hint) {
+ skip_mode_allowed = 0;
+ } else {
+ int forward_idx, backward_idx;
+ int forward_hint, backward_hint;
+ int ref_hint, dist, i;
+
+ forward_idx = -1;
+ backward_idx = -1;
+ for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
+ ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
+ dist = cbs_av1_get_relative_dist(seq, ref_hint,
+ priv->order_hint);
+ if (dist < 0) {
+ if (forward_idx < 0 ||
+ cbs_av1_get_relative_dist(seq, ref_hint,
+ forward_hint) > 0) {
+ forward_idx = i;
+ forward_hint = ref_hint;
+ }
+ } else if (dist > 0) {
+ if (backward_idx < 0 ||
+ cbs_av1_get_relative_dist(seq, ref_hint,
+ backward_hint) < 0) {
+ backward_idx = i;
+ backward_hint = ref_hint;
+ }
+ }
+ }
+
+ if (forward_idx < 0) {
+ skip_mode_allowed = 0;
+ } else if (backward_idx >= 0) {
+ skip_mode_allowed = 1;
+ // Frames for skip mode are forward_idx and backward_idx.
+ } else {
+ int second_forward_idx;
+ int second_forward_hint;
+
+ second_forward_idx = -1;
+ for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
+ ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
+ if (cbs_av1_get_relative_dist(seq, ref_hint,
+ forward_hint) < 0) {
+ if (second_forward_idx < 0 ||
+ cbs_av1_get_relative_dist(seq, ref_hint,
+ second_forward_hint) > 0) {
+ second_forward_idx = i;
+ second_forward_hint = ref_hint;
+ }
+ }
+ }
+
+ if (second_forward_idx < 0) {
+ skip_mode_allowed = 0;
+ } else {
+ skip_mode_allowed = 1;
+ // Frames for skip mode are forward_idx and second_forward_idx.
+ }
+ }
+ }
+
+ if (skip_mode_allowed)
+ flag(skip_mode_present);
+ else
+ infer(skip_mode_present, 0);
+
+ return 0;
+}
+
+static int FUNC(global_motion_param)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current,
+ int type, int ref, int idx)
+{
+ uint32_t abs_bits, prec_bits, num_syms;
+ int err;
+
+ if (idx < 2) {
+ if (type == AV1_WARP_MODEL_TRANSLATION) {
+ abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS - !current->allow_high_precision_mv;
+ prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS - !current->allow_high_precision_mv;
+ } else {
+ abs_bits = AV1_GM_ABS_TRANS_BITS;
+ prec_bits = AV1_GM_TRANS_PREC_BITS;
+ }
+ } else {
+ abs_bits = AV1_GM_ABS_ALPHA_BITS;
+ prec_bits = AV1_GM_ALPHA_PREC_BITS;
+ }
+
+ num_syms = 2 * (1 << abs_bits) + 1;
+ subexp(gm_params[ref][idx], num_syms, 2, ref, idx);
+
+ // Actual gm_params value is not reconstructed here.
+ (void)prec_bits;
+
+ return 0;
+}
+
+static int FUNC(global_motion_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ int ref, type;
+ int err;
+
+ if (current->frame_type == AV1_FRAME_KEY ||
+ current->frame_type == AV1_FRAME_INTRA_ONLY)
+ return 0;
+
+ for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
+ flags(is_global[ref], 1, ref);
+ if (current->is_global[ref]) {
+ flags(is_rot_zoom[ref], 1, ref);
+ if (current->is_rot_zoom[ref]) {
+ type = AV1_WARP_MODEL_ROTZOOM;
+ } else {
+ flags(is_translation[ref], 1, ref);
+ type = current->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION
+ : AV1_WARP_MODEL_AFFINE;
+ }
+ } else {
+ type = AV1_WARP_MODEL_IDENTITY;
+ }
+
+ if (type >= AV1_WARP_MODEL_ROTZOOM) {
+ CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 2));
+ CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 3));
+ if (type == AV1_WARP_MODEL_AFFINE) {
+ CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 4));
+ CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 5));
+ } else {
+ // gm_params[ref][4] = -gm_params[ref][3]
+ // gm_params[ref][5] = gm_params[ref][2]
+ }
+ }
+ if (type >= AV1_WARP_MODEL_TRANSLATION) {
+ CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 0));
+ CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 1));
+ }
+ }
+
+ return 0;
+}
+
+static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFilmGrainParams *current,
+ AV1RawFrameHeader *frame_header)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq = priv->sequence_header;
+ int num_pos_luma, num_pos_chroma;
+ int i, err;
+
+ if (!seq->film_grain_params_present ||
+ (!frame_header->show_frame && !frame_header->showable_frame))
+ return 0;
+
+ flag(apply_grain);
+
+ if (!current->apply_grain)
+ return 0;
+
+ fb(16, grain_seed);
+
+ if (frame_header->frame_type == AV1_FRAME_INTER)
+ flag(update_grain);
+ else
+ infer(update_grain, 1);
+
+ if (!current->update_grain) {
+ fb(3, film_grain_params_ref_idx);
+ return 0;
+ }
+
+ fc(4, num_y_points, 0, 14);
+ for (i = 0; i < current->num_y_points; i++) {
+ fcs(8, point_y_value[i],
+ i ? current->point_y_value[i - 1] + 1 : 0,
+ MAX_UINT_BITS(8) - (current->num_y_points - i - 1),
+ 1, i);
+ fbs(8, point_y_scaling[i], 1, i);
+ }
+
+ if (seq->color_config.mono_chrome)
+ infer(chroma_scaling_from_luma, 0);
+ else
+ flag(chroma_scaling_from_luma);
+
+ if (seq->color_config.mono_chrome ||
+ current->chroma_scaling_from_luma ||
+ (seq->color_config.subsampling_x == 1 &&
+ seq->color_config.subsampling_y == 1 &&
+ current->num_y_points == 0)) {
+ infer(num_cb_points, 0);
+ infer(num_cr_points, 0);
+ } else {
+ fc(4, num_cb_points, 0, 10);
+ for (i = 0; i < current->num_cb_points; i++) {
+ fcs(8, point_cb_value[i],
+ i ? current->point_cb_value[i - 1] + 1 : 0,
+ MAX_UINT_BITS(8) - (current->num_cb_points - i - 1),
+ 1, i);
+ fbs(8, point_cb_scaling[i], 1, i);
+ }
+ fc(4, num_cr_points, 0, 10);
+ for (i = 0; i < current->num_cr_points; i++) {
+ fcs(8, point_cr_value[i],
+ i ? current->point_cr_value[i - 1] + 1 : 0,
+ MAX_UINT_BITS(8) - (current->num_cr_points - i - 1),
+ 1, i);
+ fbs(8, point_cr_scaling[i], 1, i);
+ }
+ }
+
+ fb(2, grain_scaling_minus_8);
+ fb(2, ar_coeff_lag);
+ num_pos_luma = 2 * current->ar_coeff_lag * (current->ar_coeff_lag + 1);
+ if (current->num_y_points) {
+ num_pos_chroma = num_pos_luma + 1;
+ for (i = 0; i < num_pos_luma; i++)
+ fbs(8, ar_coeffs_y_plus_128[i], 1, i);
+ } else {
+ num_pos_chroma = num_pos_luma;
+ }
+ if (current->chroma_scaling_from_luma || current->num_cb_points) {
+ for (i = 0; i < num_pos_chroma; i++)
+ fbs(8, ar_coeffs_cb_plus_128[i], 1, i);
+ }
+ if (current->chroma_scaling_from_luma || current->num_cr_points) {
+ for (i = 0; i < num_pos_chroma; i++)
+ fbs(8, ar_coeffs_cr_plus_128[i], 1, i);
+ }
+ fb(2, ar_coeff_shift_minus_6);
+ fb(2, grain_scale_shift);
+ if (current->num_cb_points) {
+ fb(8, cb_mult);
+ fb(8, cb_luma_mult);
+ fb(9, cb_offset);
+ }
+ if (current->num_cr_points) {
+ fb(8, cr_mult);
+ fb(8, cr_luma_mult);
+ fb(9, cr_offset);
+ }
+
+ flag(overlap_flag);
+ flag(clip_to_restricted_range);
+
+ return 0;
+}
+
+static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq;
+ int id_len, diff_len, all_frames, frame_is_intra, order_hint_bits;
+ int i, err;
+
+ if (!priv->sequence_header) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "No sequence header available: "
+ "unable to decode frame header.\n");
+ return AVERROR_INVALIDDATA;
+ }
+ seq = priv->sequence_header;
+
+ id_len = seq->additional_frame_id_length_minus_1 +
+ seq->delta_frame_id_length_minus_2 + 3;
+ all_frames = (1 << AV1_NUM_REF_FRAMES) - 1;
+
+ if (seq->reduced_still_picture_header) {
+ infer(show_existing_frame, 0);
+ infer(frame_type, AV1_FRAME_KEY);
+ infer(show_frame, 1);
+ infer(showable_frame, 0);
+ frame_is_intra = 1;
+
+ } else {
+ flag(show_existing_frame);
+
+ if (current->show_existing_frame) {
+ AV1ReferenceFrameState *ref;
+
+ fb(3, frame_to_show_map_idx);
+ ref = &priv->ref[current->frame_to_show_map_idx];
+
+ if (!ref->valid) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Missing reference frame needed for "
+ "show_existing_frame (frame_to_show_map_idx = %d).\n",
+ current->frame_to_show_map_idx);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (seq->decoder_model_info_present_flag &&
+ !seq->timing_info.equal_picture_interval) {
+ fb(seq->decoder_model_info.frame_presentation_time_length_minus_1 + 1,
+ frame_presentation_time);
+ }
+
+ if (seq->frame_id_numbers_present_flag)
+ fb(id_len, display_frame_id);
+
+ infer(frame_type, ref->frame_type);
+ if (current->frame_type == AV1_FRAME_KEY) {
+ infer(refresh_frame_flags, all_frames);
+
+ // Section 7.21
+ infer(current_frame_id, ref->frame_id);
+ priv->upscaled_width = ref->upscaled_width;
+ priv->frame_width = ref->frame_width;
+ priv->frame_height = ref->frame_height;
+ priv->render_width = ref->render_width;
+ priv->render_height = ref->render_height;
+ priv->bit_depth = ref->bit_depth;
+ priv->order_hint = ref->order_hint;
+ } else
+ infer(refresh_frame_flags, 0);
+
+ infer(frame_width_minus_1, ref->upscaled_width - 1);
+ infer(frame_height_minus_1, ref->frame_height - 1);
+ infer(render_width_minus_1, ref->render_width - 1);
+ infer(render_height_minus_1, ref->render_height - 1);
+
+ // Section 7.20
+ goto update_refs;
+ }
+
+ fb(2, frame_type);
+ frame_is_intra = (current->frame_type == AV1_FRAME_INTRA_ONLY ||
+ current->frame_type == AV1_FRAME_KEY);
+
+ flag(show_frame);
+ if (current->show_frame &&
+ seq->decoder_model_info_present_flag &&
+ !seq->timing_info.equal_picture_interval) {
+ fb(seq->decoder_model_info.frame_presentation_time_length_minus_1 + 1,
+ frame_presentation_time);
+ }
+ if (current->show_frame)
+ infer(showable_frame, current->frame_type != AV1_FRAME_KEY);
+ else
+ flag(showable_frame);
+
+ if (current->frame_type == AV1_FRAME_SWITCH ||
+ (current->frame_type == AV1_FRAME_KEY && current->show_frame))
+ infer(error_resilient_mode, 1);
+ else
+ flag(error_resilient_mode);
+ }
+
+ if (current->frame_type == AV1_FRAME_KEY && current->show_frame) {
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ priv->ref[i].valid = 0;
+ priv->ref[i].order_hint = 0;
+ }
+ }
+
+ flag(disable_cdf_update);
+
+ if (seq->seq_force_screen_content_tools ==
+ AV1_SELECT_SCREEN_CONTENT_TOOLS) {
+ flag(allow_screen_content_tools);
+ } else {
+ infer(allow_screen_content_tools,
+ seq->seq_force_screen_content_tools);
+ }
+ if (current->allow_screen_content_tools) {
+ if (seq->seq_force_integer_mv == AV1_SELECT_INTEGER_MV)
+ flag(force_integer_mv);
+ else
+ infer(force_integer_mv, seq->seq_force_integer_mv);
+ } else {
+ infer(force_integer_mv, 0);
+ }
+
+ if (seq->frame_id_numbers_present_flag) {
+ fb(id_len, current_frame_id);
+
+ diff_len = seq->delta_frame_id_length_minus_2 + 2;
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ if (current->current_frame_id > (1 << diff_len)) {
+ if (priv->ref[i].frame_id > current->current_frame_id ||
+ priv->ref[i].frame_id < (current->current_frame_id -
+ (1 << diff_len)))
+ priv->ref[i].valid = 0;
+ } else {
+ if (priv->ref[i].frame_id > current->current_frame_id &&
+ priv->ref[i].frame_id < ((1 << id_len) +
+ current->current_frame_id -
+ (1 << diff_len)))
+ priv->ref[i].valid = 0;
+ }
+ }
+ } else {
+ infer(current_frame_id, 0);
+ }
+
+ if (current->frame_type == AV1_FRAME_SWITCH)
+ infer(frame_size_override_flag, 1);
+ else if(seq->reduced_still_picture_header)
+ infer(frame_size_override_flag, 0);
+ else
+ flag(frame_size_override_flag);
+
+ order_hint_bits =
+ seq->enable_order_hint ? seq->order_hint_bits_minus_1 + 1 : 0;
+ if (order_hint_bits > 0)
+ fb(order_hint_bits, order_hint);
+ else
+ infer(order_hint, 0);
+ priv->order_hint = current->order_hint;
+
+ if (frame_is_intra || current->error_resilient_mode)
+ infer(primary_ref_frame, AV1_PRIMARY_REF_NONE);
+ else
+ fb(3, primary_ref_frame);
+
+ if (seq->decoder_model_info_present_flag) {
+ flag(buffer_removal_time_present_flag);
+ if (current->buffer_removal_time_present_flag) {
+ for (i = 0; i <= seq->operating_points_cnt_minus_1; i++) {
+ if (seq->decoder_model_present_for_this_op[i]) {
+ int op_pt_idc = seq->operating_point_idc[i];
+ int in_temporal_layer = (op_pt_idc >> priv->temporal_id ) & 1;
+ int in_spatial_layer = (op_pt_idc >> (priv->spatial_id + 8)) & 1;
+ if (seq->operating_point_idc[i] == 0 ||
+ (in_temporal_layer && in_spatial_layer)) {
+ fbs(seq->decoder_model_info.buffer_removal_time_length_minus_1 + 1,
+ buffer_removal_time[i], 1, i);
+ }
+ }
+ }
+ }
+ }
+
+ if (current->frame_type == AV1_FRAME_SWITCH ||
+ (current->frame_type == AV1_FRAME_KEY && current->show_frame))
+ infer(refresh_frame_flags, all_frames);
+ else
+ fb(8, refresh_frame_flags);
+
+ if (!frame_is_intra || current->refresh_frame_flags != all_frames) {
+ if (seq->enable_order_hint) {
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ if (current->error_resilient_mode)
+ fbs(order_hint_bits, ref_order_hint[i], 1, i);
+ else
+ infer(ref_order_hint[i], priv->ref[i].order_hint);
+ if (current->ref_order_hint[i] != priv->ref[i].order_hint)
+ priv->ref[i].valid = 0;
+ }
+ }
+ }
+
+ if (current->frame_type == AV1_FRAME_KEY ||
+ current->frame_type == AV1_FRAME_INTRA_ONLY) {
+ CHECK(FUNC(frame_size)(ctx, rw, current));
+ CHECK(FUNC(render_size)(ctx, rw, current));
+
+ if (current->allow_screen_content_tools &&
+ priv->upscaled_width == priv->frame_width)
+ flag(allow_intrabc);
+ else
+ infer(allow_intrabc, 0);
+
+ } else {
+ if (!seq->enable_order_hint) {
+ infer(frame_refs_short_signaling, 0);
+ } else {
+ flag(frame_refs_short_signaling);
+ if (current->frame_refs_short_signaling) {
+ fb(3, last_frame_idx);
+ fb(3, golden_frame_idx);
+ CHECK(FUNC(set_frame_refs)(ctx, rw, current));
+ }
+ }
+
+ for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
+ if (!current->frame_refs_short_signaling)
+ fbs(3, ref_frame_idx[i], 1, i);
+ if (seq->frame_id_numbers_present_flag) {
+ fbs(seq->delta_frame_id_length_minus_2 + 2,
+ delta_frame_id_minus1[i], 1, i);
+ }
+ }
+
+ if (current->frame_size_override_flag &&
+ !current->error_resilient_mode) {
+ CHECK(FUNC(frame_size_with_refs)(ctx, rw, current));
+ } else {
+ CHECK(FUNC(frame_size)(ctx, rw, current));
+ CHECK(FUNC(render_size)(ctx, rw, current));
+ }
+
+ if (current->force_integer_mv)
+ infer(allow_high_precision_mv, 0);
+ else
+ flag(allow_high_precision_mv);
+
+ CHECK(FUNC(interpolation_filter)(ctx, rw, current));
+
+ flag(is_motion_mode_switchable);
+
+ if (current->error_resilient_mode ||
+ !seq->enable_ref_frame_mvs)
+ infer(use_ref_frame_mvs, 0);
+ else
+ flag(use_ref_frame_mvs);
+
+ infer(allow_intrabc, 0);
+ }
+
+ if (!frame_is_intra) {
+ // Derive reference frame sign biases.
+ }
+
+ if (seq->reduced_still_picture_header || current->disable_cdf_update)
+ infer(disable_frame_end_update_cdf, 1);
+ else
+ flag(disable_frame_end_update_cdf);
+
+ if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
+ // Init non-coeff CDFs.
+ // Setup past independence.
+ } else {
+ // Load CDF tables from previous frame.
+ // Load params from previous frame.
+ }
+
+ if (current->use_ref_frame_mvs) {
+ // Perform motion field estimation process.
+ }
+
+ CHECK(FUNC(tile_info)(ctx, rw, current));
+
+ CHECK(FUNC(quantization_params)(ctx, rw, current));
+
+ CHECK(FUNC(segmentation_params)(ctx, rw, current));
+
+ CHECK(FUNC(delta_q_params)(ctx, rw, current));
+
+ CHECK(FUNC(delta_lf_params)(ctx, rw, current));
+
+ // Init coeff CDFs / load previous segments.
+
+ priv->coded_lossless = 1;
+ for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
+ int qindex;
+ if (current->feature_enabled[i][AV1_SEG_LVL_ALT_Q]) {
+ qindex = (current->base_q_idx +
+ current->feature_value[i][AV1_SEG_LVL_ALT_Q]);
+ } else {
+ qindex = current->base_q_idx;
+ }
+ qindex = av_clip_uintp2(qindex, 8);
+
+ if (qindex || current->delta_q_y_dc ||
+ current->delta_q_u_ac || current->delta_q_u_dc ||
+ current->delta_q_v_ac || current->delta_q_v_dc) {
+ priv->coded_lossless = 0;
+ }
+ }
+ priv->all_lossless = priv->coded_lossless &&
+ priv->frame_width == priv->upscaled_width;
+
+ CHECK(FUNC(loop_filter_params)(ctx, rw, current));
+
+ CHECK(FUNC(cdef_params)(ctx, rw, current));
+
+ CHECK(FUNC(lr_params)(ctx, rw, current));
+
+ CHECK(FUNC(read_tx_mode)(ctx, rw, current));
+
+ CHECK(FUNC(frame_reference_mode)(ctx, rw, current));
+
+ CHECK(FUNC(skip_mode_params)(ctx, rw, current));
+
+ if (frame_is_intra || current->error_resilient_mode ||
+ !seq->enable_warped_motion)
+ infer(allow_warped_motion, 0);
+ else
+ flag(allow_warped_motion);
+
+ flag(reduced_tx_set);
+
+ CHECK(FUNC(global_motion_params)(ctx, rw, current));
+
+ CHECK(FUNC(film_grain_params)(ctx, rw, &current->film_grain, current));
+
+ av_log(ctx->log_ctx, AV_LOG_DEBUG, "Frame %d: size %dx%d "
+ "upscaled %d render %dx%d subsample %dx%d "
+ "bitdepth %d tiles %dx%d.\n", priv->order_hint,
+ priv->frame_width, priv->frame_height, priv->upscaled_width,
+ priv->render_width, priv->render_height,
+ seq->color_config.subsampling_x + 1,
+ seq->color_config.subsampling_y + 1, priv->bit_depth,
+ priv->tile_rows, priv->tile_cols);
+
+update_refs:
+ for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ if (current->refresh_frame_flags & (1 << i)) {
+ priv->ref[i] = (AV1ReferenceFrameState) {
+ .valid = 1,
+ .frame_id = current->current_frame_id,
+ .upscaled_width = priv->upscaled_width,
+ .frame_width = priv->frame_width,
+ .frame_height = priv->frame_height,
+ .render_width = priv->render_width,
+ .render_height = priv->render_height,
+ .frame_type = current->frame_type,
+ .subsampling_x = seq->color_config.subsampling_x,
+ .subsampling_y = seq->color_config.subsampling_y,
+ .bit_depth = priv->bit_depth,
+ .order_hint = priv->order_hint,
+ };
+ memcpy(priv->ref[i].loop_filter_ref_deltas, current->loop_filter_ref_deltas,
+ sizeof(current->loop_filter_ref_deltas));
+ memcpy(priv->ref[i].loop_filter_mode_deltas, current->loop_filter_mode_deltas,
+ sizeof(current->loop_filter_mode_deltas));
+ memcpy(priv->ref[i].feature_enabled, current->feature_enabled,
+ sizeof(current->feature_enabled));
+ memcpy(priv->ref[i].feature_value, current->feature_value,
+ sizeof(current->feature_value));
+ }
+ }
+
+ return 0;
+}
+
+static int FUNC(frame_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrameHeader *current, int redundant,
+ AVBufferRef *rw_buffer_ref)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ int start_pos, fh_bits, fh_bytes, err;
+ uint8_t *fh_start;
+
+ if (priv->seen_frame_header) {
+ if (!redundant) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid repeated "
+ "frame header OBU.\n");
+ return AVERROR_INVALIDDATA;
+ } else {
+ GetBitContext fh;
+ size_t i, b;
+ uint32_t val;
+
+ HEADER("Redundant Frame Header");
+
+ av_assert0(priv->frame_header_ref && priv->frame_header);
+
+ init_get_bits(&fh, priv->frame_header,
+ priv->frame_header_size);
+ for (i = 0; i < priv->frame_header_size; i += 8) {
+ b = FFMIN(priv->frame_header_size - i, 8);
+ val = get_bits(&fh, b);
+ xf(b, frame_header_copy[i],
+ val, val, val, 1, i / 8);
+ }
+ }
+ } else {
+ if (redundant)
+ HEADER("Redundant Frame Header (used as Frame Header)");
+ else
+ HEADER("Frame Header");
+
+#ifdef READ
+ start_pos = get_bits_count(rw);
+#else
+ start_pos = put_bits_count(rw);
+#endif
+
+ CHECK(FUNC(uncompressed_header)(ctx, rw, current));
+
+ priv->tile_num = 0;
+
+ if (current->show_existing_frame) {
+ priv->seen_frame_header = 0;
+ } else {
+ priv->seen_frame_header = 1;
+
+ av_buffer_unref(&priv->frame_header_ref);
+
+#ifdef READ
+ fh_bits = get_bits_count(rw) - start_pos;
+ fh_start = (uint8_t*)rw->buffer + start_pos / 8;
+#else
+ // Need to flush the bitwriter so that we can copy its output,
+ // but use a copy so we don't affect the caller's structure.
+ {
+ PutBitContext tmp = *rw;
+ flush_put_bits(&tmp);
+ }
+
+ fh_bits = put_bits_count(rw) - start_pos;
+ fh_start = rw->buf + start_pos / 8;
+#endif
+ fh_bytes = (fh_bits + 7) / 8;
+
+ priv->frame_header_size = fh_bits;
+
+ if (rw_buffer_ref) {
+ priv->frame_header_ref = av_buffer_ref(rw_buffer_ref);
+ if (!priv->frame_header_ref)
+ return AVERROR(ENOMEM);
+ priv->frame_header = fh_start;
+ } else {
+ priv->frame_header_ref =
+ av_buffer_alloc(fh_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!priv->frame_header_ref)
+ return AVERROR(ENOMEM);
+ priv->frame_header = priv->frame_header_ref->data;
+ memcpy(priv->frame_header, fh_start, fh_bytes);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int FUNC(tile_group_obu)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawTileGroup *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ int num_tiles, tile_bits;
+ int err;
+
+ HEADER("Tile Group");
+
+ num_tiles = priv->tile_cols * priv->tile_rows;
+ if (num_tiles > 1)
+ flag(tile_start_and_end_present_flag);
+ else
+ infer(tile_start_and_end_present_flag, 0);
+
+ if (num_tiles == 1 || !current->tile_start_and_end_present_flag) {
+ infer(tg_start, 0);
+ infer(tg_end, num_tiles - 1);
+ } else {
+ tile_bits = cbs_av1_tile_log2(1, priv->tile_cols) +
+ cbs_av1_tile_log2(1, priv->tile_rows);
+ fc(tile_bits, tg_start, priv->tile_num, num_tiles - 1);
+ fc(tile_bits, tg_end, current->tg_start, num_tiles - 1);
+ }
+
+ priv->tile_num = current->tg_end + 1;
+
+ CHECK(FUNC(byte_alignment)(ctx, rw));
+
+ // Reset header for next frame.
+ if (current->tg_end == num_tiles - 1)
+ priv->seen_frame_header = 0;
+
+ // Tile data follows.
+
+ return 0;
+}
+
+static int FUNC(frame_obu)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawFrame *current,
+ AVBufferRef *rw_buffer_ref)
+{
+ int err;
+
+ CHECK(FUNC(frame_header_obu)(ctx, rw, &current->header,
+ 0, rw_buffer_ref));
+
+ CHECK(FUNC(byte_alignment)(ctx, rw));
+
+ CHECK(FUNC(tile_group_obu)(ctx, rw, &current->tile_group));
+
+ return 0;
+}
+
+static int FUNC(tile_list_obu)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawTileList *current)
+{
+ int err;
+
+ fb(8, output_frame_width_in_tiles_minus_1);
+ fb(8, output_frame_height_in_tiles_minus_1);
+
+ fb(16, tile_count_minus_1);
+
+ // Tile data follows.
+
+ return 0;
+}
+
+static int FUNC(metadata_hdr_cll)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawMetadataHDRCLL *current)
+{
+ int err;
+
+ fb(16, max_cll);
+ fb(16, max_fall);
+
+ return 0;
+}
+
+static int FUNC(metadata_hdr_mdcv)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawMetadataHDRMDCV *current)
+{
+ int err, i;
+
+ for (i = 0; i < 3; i++) {
+ fbs(16, primary_chromaticity_x[i], 1, i);
+ fbs(16, primary_chromaticity_y[i], 1, i);
+ }
+
+ fb(16, white_point_chromaticity_x);
+ fb(16, white_point_chromaticity_y);
+
+ fb(32, luminance_max);
+ fb(32, luminance_min);
+
+ return 0;
+}
+
+static int FUNC(scalability_structure)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawMetadataScalability *current)
+{
+ CodedBitstreamAV1Context *priv = ctx->priv_data;
+ const AV1RawSequenceHeader *seq;
+ int err, i, j;
+
+ if (!priv->sequence_header) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "No sequence header available: "
+ "unable to parse scalability metadata.\n");
+ return AVERROR_INVALIDDATA;
+ }
+ seq = priv->sequence_header;
+
+ fb(2, spatial_layers_cnt_minus_1);
+ flag(spatial_layer_dimensions_present_flag);
+ flag(spatial_layer_description_present_flag);
+ flag(temporal_group_description_present_flag);
+ fc(3, scalability_structure_reserved_3bits, 0, 0);
+ if (current->spatial_layer_dimensions_present_flag) {
+ for (i = 0; i <= current->spatial_layers_cnt_minus_1; i++) {
+ fcs(16, spatial_layer_max_width[i],
+ 0, seq->max_frame_width_minus_1 + 1, 1, i);
+ fcs(16, spatial_layer_max_height[i],
+ 0, seq->max_frame_height_minus_1 + 1, 1, i);
+ }
+ }
+ if (current->spatial_layer_description_present_flag) {
+ for (i = 0; i <= current->spatial_layers_cnt_minus_1; i++)
+ fbs(8, spatial_layer_ref_id[i], 1, i);
+ }
+ if (current->temporal_group_description_present_flag) {
+ fb(8, temporal_group_size);
+ for (i = 0; i < current->temporal_group_size; i++) {
+ fbs(3, temporal_group_temporal_id[i], 1, i);
+ flags(temporal_group_temporal_switching_up_point_flag[i], 1, i);
+ flags(temporal_group_spatial_switching_up_point_flag[i], 1, i);
+ fbs(3, temporal_group_ref_cnt[i], 1, i);
+ for (j = 0; j < current->temporal_group_ref_cnt[i]; j++) {
+ fbs(8, temporal_group_ref_pic_diff[i][j], 2, i, j);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int FUNC(metadata_scalability)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawMetadataScalability *current)
+{
+ int err;
+
+ fb(8, scalability_mode_idc);
+
+ if (current->scalability_mode_idc == AV1_SCALABILITY_SS)
+ CHECK(FUNC(scalability_structure)(ctx, rw, current));
+
+ return 0;
+}
+
+static int FUNC(metadata_itut_t35)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawMetadataITUTT35 *current)
+{
+ int err;
+ size_t i;
+
+ fb(8, itu_t_t35_country_code);
+ if (current->itu_t_t35_country_code == 0xff)
+ fb(8, itu_t_t35_country_code_extension_byte);
+
+#ifdef READ
+ // The payload runs up to the start of the trailing bits, but there might
+ // be arbitrarily many trailing zeroes so we need to read through twice.
+ current->payload_size = cbs_av1_get_payload_bytes_left(rw);
+
+ current->payload_ref = av_buffer_alloc(current->payload_size);
+ if (!current->payload_ref)
+ return AVERROR(ENOMEM);
+ current->payload = current->payload_ref->data;
+#endif
+
+ for (i = 0; i < current->payload_size; i++)
+ xf(8, itu_t_t35_payload_bytes[i], current->payload[i],
+ 0x00, 0xff, 1, i);
+
+ return 0;
+}
+
+static int FUNC(metadata_timecode)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawMetadataTimecode *current)
+{
+ int err;
+
+ fb(5, counting_type);
+ flag(full_timestamp_flag);
+ flag(discontinuity_flag);
+ flag(cnt_dropped_flag);
+ fb(9, n_frames);
+
+ if (current->full_timestamp_flag) {
+ fc(6, seconds_value, 0, 59);
+ fc(6, minutes_value, 0, 59);
+ fc(5, hours_value, 0, 23);
+ } else {
+ flag(seconds_flag);
+ if (current->seconds_flag) {
+ fc(6, seconds_value, 0, 59);
+ flag(minutes_flag);
+ if (current->minutes_flag) {
+ fc(6, minutes_value, 0, 59);
+ flag(hours_flag);
+ if (current->hours_flag)
+ fc(5, hours_value, 0, 23);
+ }
+ }
+ }
+
+ fb(5, time_offset_length);
+ if (current->time_offset_length > 0)
+ fb(current->time_offset_length, time_offset_value);
+ else
+ infer(time_offset_length, 0);
+
+ return 0;
+}
+
+static int FUNC(metadata_obu)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawMetadata *current)
+{
+ int err;
+
+ leb128(metadata_type);
+
+ switch (current->metadata_type) {
+ case AV1_METADATA_TYPE_HDR_CLL:
+ CHECK(FUNC(metadata_hdr_cll)(ctx, rw, &current->metadata.hdr_cll));
+ break;
+ case AV1_METADATA_TYPE_HDR_MDCV:
+ CHECK(FUNC(metadata_hdr_mdcv)(ctx, rw, &current->metadata.hdr_mdcv));
+ break;
+ case AV1_METADATA_TYPE_SCALABILITY:
+ CHECK(FUNC(metadata_scalability)(ctx, rw, &current->metadata.scalability));
+ break;
+ case AV1_METADATA_TYPE_ITUT_T35:
+ CHECK(FUNC(metadata_itut_t35)(ctx, rw, &current->metadata.itut_t35));
+ break;
+ case AV1_METADATA_TYPE_TIMECODE:
+ CHECK(FUNC(metadata_timecode)(ctx, rw, &current->metadata.timecode));
+ break;
+ default:
+ // Unknown metadata type.
+ return AVERROR_PATCHWELCOME;
+ }
+
+ return 0;
+}
+
+static int FUNC(padding_obu)(CodedBitstreamContext *ctx, RWContext *rw,
+ AV1RawPadding *current)
+{
+ int i, err;
+
+ HEADER("Padding");
+
+#ifdef READ
+ // The payload runs up to the start of the trailing bits, but there might
+ // be arbitrarily many trailing zeroes so we need to read through twice.
+ current->payload_size = cbs_av1_get_payload_bytes_left(rw);
+
+ current->payload_ref = av_buffer_alloc(current->payload_size);
+ if (!current->payload_ref)
+ return AVERROR(ENOMEM);
+ current->payload = current->payload_ref->data;
+#endif
+
+ for (i = 0; i < current->payload_size; i++)
+ xf(8, obu_padding_byte[i], current->payload[i], 0x00, 0xff, 1, i);
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/cbs_internal.h b/media/ffvpx/libavcodec/cbs_internal.h
new file mode 100644
index 0000000000..e585c77934
--- /dev/null
+++ b/media/ffvpx/libavcodec/cbs_internal.h
@@ -0,0 +1,253 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CBS_INTERNAL_H
+#define AVCODEC_CBS_INTERNAL_H
+
+#include <stdint.h>
+
+#include "libavutil/buffer.h"
+#include "libavutil/log.h"
+
+#include "cbs.h"
+#include "codec_id.h"
+#include "get_bits.h"
+#include "put_bits.h"
+
+
+enum CBSContentType {
+ // Unit content may contain some references to other structures, but all
+ // managed via buffer reference counting. The descriptor defines the
+ // structure offsets of every buffer reference.
+ CBS_CONTENT_TYPE_INTERNAL_REFS,
+ // Unit content is something more complex. The descriptor defines
+ // special functions to manage the content.
+ CBS_CONTENT_TYPE_COMPLEX,
+};
+
+enum {
+ // Maximum number of unit types described by the same non-range
+ // unit type descriptor.
+ CBS_MAX_LIST_UNIT_TYPES = 3,
+ // Maximum number of reference buffer offsets in any one unit.
+ CBS_MAX_REF_OFFSETS = 2,
+ // Special value used in a unit type descriptor to indicate that it
+ // applies to a large range of types rather than a set of discrete
+ // values.
+ CBS_UNIT_TYPE_RANGE = -1,
+};
+
+typedef const struct CodedBitstreamUnitTypeDescriptor {
+ // Number of entries in the unit_types array, or the special value
+ // CBS_UNIT_TYPE_RANGE to indicate that the range fields should be
+ // used instead.
+ int nb_unit_types;
+
+ union {
+ // Array of unit types that this entry describes.
+ CodedBitstreamUnitType list[CBS_MAX_LIST_UNIT_TYPES];
+ // Start and end of unit type range, used if nb_unit_types is
+ // CBS_UNIT_TYPE_RANGE.
+ struct {
+ CodedBitstreamUnitType start;
+ CodedBitstreamUnitType end;
+ } range;
+ } unit_type;
+
+ // The type of content described.
+ enum CBSContentType content_type;
+ // The size of the structure which should be allocated to contain
+ // the decomposed content of this type of unit.
+ size_t content_size;
+
+ union {
+ // This union's state is determined by content_type:
+ // ref for CBS_CONTENT_TYPE_INTERNAL_REFS,
+ // complex for CBS_CONTENT_TYPE_COMPLEX.
+ struct {
+ // Number of entries in the ref_offsets array.
+ // May be zero, then the structure is POD-like.
+ int nb_offsets;
+ // The structure must contain two adjacent elements:
+ // type *field;
+ // AVBufferRef *field_ref;
+ // where field points to something in the buffer referred to by
+ // field_ref. This offset is then set to offsetof(struct, field).
+ size_t offsets[CBS_MAX_REF_OFFSETS];
+ } ref;
+
+ struct {
+ void (*content_free)(void *opaque, uint8_t *data);
+ int (*content_clone)(AVBufferRef **ref, CodedBitstreamUnit *unit);
+ } complex;
+ } type;
+} CodedBitstreamUnitTypeDescriptor;
+
+typedef struct CodedBitstreamType {
+ enum AVCodecID codec_id;
+
+ // A class for the private data, used to declare private AVOptions.
+ // This field is NULL for types that do not declare any options.
+ // If this field is non-NULL, the first member of the filter private data
+ // must be a pointer to AVClass.
+ const AVClass *priv_class;
+
+ size_t priv_data_size;
+
+ // List of unit type descriptors for this codec.
+ // Terminated by a descriptor with nb_unit_types equal to zero.
+ const CodedBitstreamUnitTypeDescriptor *unit_types;
+
+ // Split frag->data into coded bitstream units, creating the
+ // frag->units array. Fill data but not content on each unit.
+ // The header argument should be set if the fragment came from
+ // a header block, which may require different parsing for some
+ // codecs (e.g. the AVCC header in H.264).
+ int (*split_fragment)(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag,
+ int header);
+
+ // Read the unit->data bitstream and decompose it, creating
+ // unit->content.
+ int (*read_unit)(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit);
+
+ // Write the data bitstream from unit->content into pbc.
+ // Return value AVERROR(ENOSPC) indicates that pbc was too small.
+ int (*write_unit)(CodedBitstreamContext *ctx,
+ CodedBitstreamUnit *unit,
+ PutBitContext *pbc);
+
+ // Read the data from all of frag->units and assemble it into
+ // a bitstream for the whole fragment.
+ int (*assemble_fragment)(CodedBitstreamContext *ctx,
+ CodedBitstreamFragment *frag);
+
+ // Reset the codec internal state.
+ void (*flush)(CodedBitstreamContext *ctx);
+
+ // Free the codec internal state.
+ void (*close)(CodedBitstreamContext *ctx);
+} CodedBitstreamType;
+
+
+// Helper functions for trace output.
+
+void ff_cbs_trace_header(CodedBitstreamContext *ctx,
+ const char *name);
+
+void ff_cbs_trace_syntax_element(CodedBitstreamContext *ctx, int position,
+ const char *name, const int *subscripts,
+ const char *bitstring, int64_t value);
+
+
+// Helper functions for read/write of common bitstream elements, including
+// generation of trace output.
+
+int ff_cbs_read_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ int width, const char *name,
+ const int *subscripts, uint32_t *write_to,
+ uint32_t range_min, uint32_t range_max);
+
+int ff_cbs_write_unsigned(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ int width, const char *name,
+ const int *subscripts, uint32_t value,
+ uint32_t range_min, uint32_t range_max);
+
+int ff_cbs_read_signed(CodedBitstreamContext *ctx, GetBitContext *gbc,
+ int width, const char *name,
+ const int *subscripts, int32_t *write_to,
+ int32_t range_min, int32_t range_max);
+
+int ff_cbs_write_signed(CodedBitstreamContext *ctx, PutBitContext *pbc,
+ int width, const char *name,
+ const int *subscripts, int32_t value,
+ int32_t range_min, int32_t range_max);
+
+// The largest unsigned value representable in N bits, suitable for use as
+// range_max in the above functions.
+#define MAX_UINT_BITS(length) ((UINT64_C(1) << (length)) - 1)
+
+// The largest signed value representable in N bits, suitable for use as
+// range_max in the above functions.
+#define MAX_INT_BITS(length) ((INT64_C(1) << ((length) - 1)) - 1)
+
+// The smallest signed value representable in N bits, suitable for use as
+// range_min in the above functions.
+#define MIN_INT_BITS(length) (-(INT64_C(1) << ((length) - 1)))
+
+#define TYPE_LIST(...) { __VA_ARGS__ }
+#define CBS_UNIT_TYPE_POD(type_, structure) { \
+ .nb_unit_types = 1, \
+ .unit_type.list = { type_ }, \
+ .content_type = CBS_CONTENT_TYPE_INTERNAL_REFS, \
+ .content_size = sizeof(structure), \
+ .type.ref = { .nb_offsets = 0 }, \
+ }
+#define CBS_UNIT_RANGE_POD(range_start, range_end, structure) { \
+ .nb_unit_types = CBS_UNIT_TYPE_RANGE, \
+ .unit_type.range.start = range_start, \
+ .unit_type.range.end = range_end, \
+ .content_type = CBS_CONTENT_TYPE_INTERNAL_REFS, \
+ .content_size = sizeof(structure), \
+ .type.ref = { .nb_offsets = 0 }, \
+ }
+
+#define CBS_UNIT_TYPES_INTERNAL_REF(types, structure, ref_field) { \
+ .nb_unit_types = FF_ARRAY_ELEMS((CodedBitstreamUnitType[])TYPE_LIST types), \
+ .unit_type.list = TYPE_LIST types, \
+ .content_type = CBS_CONTENT_TYPE_INTERNAL_REFS, \
+ .content_size = sizeof(structure), \
+ .type.ref = { .nb_offsets = 1, \
+ .offsets = { offsetof(structure, ref_field) } }, \
+ }
+#define CBS_UNIT_TYPE_INTERNAL_REF(type, structure, ref_field) \
+ CBS_UNIT_TYPES_INTERNAL_REF((type), structure, ref_field)
+
+#define CBS_UNIT_RANGE_INTERNAL_REF(range_start, range_end, structure, ref_field) { \
+ .nb_unit_types = CBS_UNIT_TYPE_RANGE, \
+ .unit_type.range.start = range_start, \
+ .unit_type.range.end = range_end, \
+ .content_type = CBS_CONTENT_TYPE_INTERNAL_REFS, \
+ .content_size = sizeof(structure), \
+ .type.ref = { .nb_offsets = 1, \
+ .offsets = { offsetof(structure, ref_field) } }, \
+ }
+
+#define CBS_UNIT_TYPES_COMPLEX(types, structure, free_func) { \
+ .nb_unit_types = FF_ARRAY_ELEMS((CodedBitstreamUnitType[])TYPE_LIST types), \
+ .unit_type.list = TYPE_LIST types, \
+ .content_type = CBS_CONTENT_TYPE_COMPLEX, \
+ .content_size = sizeof(structure), \
+ .type.complex = { .content_free = free_func }, \
+ }
+#define CBS_UNIT_TYPE_COMPLEX(type, structure, free_func) \
+ CBS_UNIT_TYPES_COMPLEX((type), structure, free_func)
+
+#define CBS_UNIT_TYPE_END_OF_LIST { .nb_unit_types = 0 }
+
+
+extern const CodedBitstreamType ff_cbs_type_av1;
+extern const CodedBitstreamType ff_cbs_type_h264;
+extern const CodedBitstreamType ff_cbs_type_h265;
+extern const CodedBitstreamType ff_cbs_type_jpeg;
+extern const CodedBitstreamType ff_cbs_type_mpeg2;
+extern const CodedBitstreamType ff_cbs_type_vp9;
+
+
+#endif /* AVCODEC_CBS_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/codec.h b/media/ffvpx/libavcodec/codec.h
new file mode 100644
index 0000000000..3b1995bcfe
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec.h
@@ -0,0 +1,375 @@
+/*
+ * AVCodec public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_H
+#define AVCODEC_CODEC_H
+
+#include <stdint.h>
+
+#include "libavutil/avutil.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+#include "libavutil/samplefmt.h"
+
+#include "libavcodec/codec_id.h"
+#include "libavcodec/version_major.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * Decoder can use draw_horiz_band callback.
+ */
+#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
+/**
+ * Codec uses get_buffer() or get_encode_buffer() for allocating buffers and
+ * supports custom allocators.
+ * If not set, it might not use get_buffer() or get_encode_buffer() at all, or
+ * use operations that assume the buffer was allocated by
+ * avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer.
+ */
+#define AV_CODEC_CAP_DR1 (1 << 1)
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define AV_CODEC_CAP_DELAY (1 << 5)
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
+
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carrying such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define AV_CODEC_CAP_SUBFRAMES (1 << 8)
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
+/**
+ * Codec supports multithreading through a method other than slice- or
+ * frame-level multithreading. Typically this marks wrappers around
+ * multithreading-capable external libraries.
+ */
+#define AV_CODEC_CAP_OTHER_THREADS (1 << 15)
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
+/**
+ * Decoder is not a preferred choice for probing.
+ * This indicates that the decoder is not a good choice for probing.
+ * It could for example be an expensive to spin up hardware decoder,
+ * or it could simply not provide a lot of useful information about
+ * the stream.
+ * A decoder marked with this flag should only be used as last resort
+ * choice for probing.
+ */
+#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
+
+/**
+ * Codec is backed by a hardware implementation. Typically used to
+ * identify a non-hwaccel hardware decoder. For information about hwaccels, use
+ * avcodec_get_hw_config() instead.
+ */
+#define AV_CODEC_CAP_HARDWARE (1 << 18)
+
+/**
+ * Codec is potentially backed by a hardware implementation, but not
+ * necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the
+ * implementation provides some sort of internal fallback.
+ */
+#define AV_CODEC_CAP_HYBRID (1 << 19)
+
+/**
+ * This encoder can reorder user opaque values from input AVFrames and return
+ * them with corresponding output packets.
+ * @see AV_CODEC_FLAG_COPY_OPAQUE
+ */
+#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20)
+
+/**
+ * This encoder can be flushed using avcodec_flush_buffers(). If this flag is
+ * not set, the encoder must be closed and reopened to ensure that no frames
+ * remain pending.
+ */
+#define AV_CODEC_CAP_ENCODER_FLUSH (1 << 21)
+
+/**
+ * The encoder is able to output reconstructed frame data, i.e. raw frames that
+ * would be produced by decoding the encoded bitstream.
+ *
+ * Reconstructed frame output is enabled by the AV_CODEC_FLAG_RECON_FRAME flag.
+ */
+#define AV_CODEC_CAP_ENCODER_RECON_FRAME (1 << 22)
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+ int profile;
+ const char *name; ///< short name for the profile
+} AVProfile;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char *name;
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *long_name;
+ enum AVMediaType type;
+ enum AVCodecID id;
+ /**
+ * Codec capabilities.
+ * see AV_CODEC_CAP_*
+ */
+ int capabilities;
+ uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
+ const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+ const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+ const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+ const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * @deprecated use ch_layouts instead
+ */
+ attribute_deprecated
+ const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+#endif
+ const AVClass *priv_class; ///< AVClass for the private context
+ const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+
+ /**
+ * Group name of the codec implementation.
+ * This is a short symbolic name of the wrapper backing this codec. A
+ * wrapper uses some kind of external implementation for the codec, such
+ * as an external library, or a codec implementation provided by the OS or
+ * the hardware.
+ * If this field is NULL, this is a builtin, libavcodec native codec.
+ * If non-NULL, this will be the suffix in AVCodec.name in most cases
+ * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>").
+ */
+ const char *wrapper_name;
+
+ /**
+ * Array of supported channel layouts, terminated with a zeroed layout.
+ */
+ const AVChannelLayout *ch_layouts;
+} AVCodec;
+
+/**
+ * Iterate over all registered codecs.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered codec or NULL when the iteration is
+ * finished
+ */
+const AVCodec *av_codec_iterate(void **opaque);
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+const AVCodec *avcodec_find_decoder(enum AVCodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+const AVCodec *avcodec_find_decoder_by_name(const char *name);
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+const AVCodec *avcodec_find_encoder(enum AVCodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+const AVCodec *avcodec_find_encoder_by_name(const char *name);
+/**
+ * @return a non-zero number if codec is an encoder, zero otherwise
+ */
+int av_codec_is_encoder(const AVCodec *codec);
+
+/**
+ * @return a non-zero number if codec is a decoder, zero otherwise
+ */
+int av_codec_is_decoder(const AVCodec *codec);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+enum {
+ /**
+ * The codec supports this format via the hw_device_ctx interface.
+ *
+ * When selecting this format, AVCodecContext.hw_device_ctx should
+ * have been set to a device of the specified type before calling
+ * avcodec_open2().
+ */
+ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01,
+ /**
+ * The codec supports this format via the hw_frames_ctx interface.
+ *
+ * When selecting this format for a decoder,
+ * AVCodecContext.hw_frames_ctx should be set to a suitable frames
+ * context inside the get_format() callback. The frames context
+ * must have been created on a device of the specified type.
+ *
+ * When selecting this format for an encoder,
+ * AVCodecContext.hw_frames_ctx should be set to the context which
+ * will be used for the input frames before calling avcodec_open2().
+ */
+ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02,
+ /**
+ * The codec supports this format by some internal method.
+ *
+ * This format can be selected without any additional configuration -
+ * no device or frames context is required.
+ */
+ AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04,
+ /**
+ * The codec supports this format by some ad-hoc method.
+ *
+ * Additional settings and/or function calls are required. See the
+ * codec-specific documentation for details. (Methods requiring
+ * this sort of configuration are deprecated and others should be
+ * used in preference.)
+ */
+ AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08,
+};
+
+typedef struct AVCodecHWConfig {
+ /**
+ * For decoders, a hardware pixel format which that decoder may be
+ * able to decode to if suitable hardware is available.
+ *
+ * For encoders, a pixel format which the encoder may be able to
+ * accept. If set to AV_PIX_FMT_NONE, this applies to all pixel
+ * formats supported by the codec.
+ */
+ enum AVPixelFormat pix_fmt;
+ /**
+ * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible
+ * setup methods which can be used with this configuration.
+ */
+ int methods;
+ /**
+ * The device type associated with the configuration.
+ *
+ * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and
+ * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused.
+ */
+ enum AVHWDeviceType device_type;
+} AVCodecHWConfig;
+
+/**
+ * Retrieve supported hardware configurations for a codec.
+ *
+ * Values of index from zero to some maximum return the indexed configuration
+ * descriptor; all other values return NULL. If the codec does not support
+ * any hardware configurations then it will always return NULL.
+ */
+const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_CODEC_H */
diff --git a/media/ffvpx/libavcodec/codec_desc.c b/media/ffvpx/libavcodec/codec_desc.c
new file mode 100644
index 0000000000..199f62df15
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_desc.c
@@ -0,0 +1,3693 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * This table was generated from the long and short names of AVCodecs
+ * please see the respective codec sources for authorship
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "libavutil/internal.h"
+#include "libavutil/macros.h"
+
+#include "codec_id.h"
+#include "codec_desc.h"
+#include "profiles.h"
+
+#define MT(...) (const char *const[]){ __VA_ARGS__, NULL }
+
+static const AVCodecDescriptor codec_descriptors[] = {
+ /* video codecs */
+ {
+ .id = AV_CODEC_ID_MPEG1VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mpeg1video",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_MPEG2VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mpeg2video",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_mpeg2_video_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_H261,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "h261",
+ .long_name = NULL_IF_CONFIG_SMALL("H.261"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_H263,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "h263",
+ .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_RV10,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rv10",
+ .long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_RV20,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rv20",
+ .long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_MJPEG,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mjpeg",
+ .long_name = NULL_IF_CONFIG_SMALL("Motion JPEG"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ .mime_types= MT("image/jpeg"),
+ .profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_MJPEGB,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mjpegb",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple MJPEG-B"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_LJPEG,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ljpeg",
+ .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SP5X,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "sp5x",
+ .long_name = NULL_IF_CONFIG_SMALL("Sunplus JPEG (SP5X)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_JPEGLS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "jpegls",
+ .long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
+ AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MPEG4,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mpeg4",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_mpeg4_video_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_RAWVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rawvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("raw video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MSMPEG4V1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "msmpeg4v1",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSMPEG4V2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "msmpeg4v2",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSMPEG4V3,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "msmpeg4v3",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WMV1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wmv1",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WMV2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wmv2",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_H263P,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "h263p",
+ .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_H263I,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "h263i",
+ .long_name = NULL_IF_CONFIG_SMALL("Intel H.263"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_FLV1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "flv1",
+ .long_name = NULL_IF_CONFIG_SMALL("FLV / Sorenson Spark / Sorenson H.263 (Flash Video)"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SVQ1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "svq1",
+ .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SVQ3,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "svq3",
+ .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_DVVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dvvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HUFFYUV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "huffyuv",
+ .long_name = NULL_IF_CONFIG_SMALL("HuffYUV"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_CYUV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cyuv",
+ .long_name = NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_H264,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "h264",
+ .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_INDEO3,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "indeo3",
+ .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VP3,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp3",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_THEORA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "theora",
+ .long_name = NULL_IF_CONFIG_SMALL("Theora"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ASV1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "asv1",
+ .long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ASV2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "asv2",
+ .long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FFV1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ffv1",
+ .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_4XM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "4xm",
+ .long_name = NULL_IF_CONFIG_SMALL("4X Movie"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VCR1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vcr1",
+ .long_name = NULL_IF_CONFIG_SMALL("ATI VCR1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CLJR,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cljr",
+ .long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MDEC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mdec",
+ .long_name = NULL_IF_CONFIG_SMALL("Sony PlayStation MDEC (Motion DECoder)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ROQ,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "roq",
+ .long_name = NULL_IF_CONFIG_SMALL("id RoQ video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_INTERPLAY_VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "interplayvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("Interplay MVE video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_XAN_WC3,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "xan_wc3",
+ .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_XAN_WC4,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "xan_wc4",
+ .long_name = NULL_IF_CONFIG_SMALL("Wing Commander IV / Xxan"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_RPZA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rpza",
+ .long_name = NULL_IF_CONFIG_SMALL("QuickTime video (RPZA)"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CINEPAK,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cinepak",
+ .long_name = NULL_IF_CONFIG_SMALL("Cinepak"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WS_VQA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ws_vqa",
+ .long_name = NULL_IF_CONFIG_SMALL("Westwood Studios VQA (Vector Quantized Animation) video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSRLE,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "msrle",
+ .long_name = NULL_IF_CONFIG_SMALL("Microsoft RLE"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MSVIDEO1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "msvideo1",
+ .long_name = NULL_IF_CONFIG_SMALL("Microsoft Video 1"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_IDCIN,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "idcin",
+ .long_name = NULL_IF_CONFIG_SMALL("id Quake II CIN video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_8BPS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "8bps",
+ .long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SMC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "smc",
+ .long_name = NULL_IF_CONFIG_SMALL("QuickTime Graphics (SMC)"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FLIC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "flic",
+ .long_name = NULL_IF_CONFIG_SMALL("Autodesk Animator Flic video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_TRUEMOTION1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "truemotion1",
+ .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 1.0"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VMDVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vmdvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSZH,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mszh",
+ .long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) MSZH"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ZLIB,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "zlib",
+ .long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) ZLIB"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_QTRLE,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "qtrle",
+ .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_TSCC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tscc",
+ .long_name = NULL_IF_CONFIG_SMALL("TechSmith Screen Capture Codec"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ULTI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ulti",
+ .long_name = NULL_IF_CONFIG_SMALL("IBM UltiMotion"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_QDRAW,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "qdraw",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple QuickDraw"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_VIXL,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vixl",
+ .long_name = NULL_IF_CONFIG_SMALL("Miro VideoXL"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_QPEG,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "qpeg",
+ .long_name = NULL_IF_CONFIG_SMALL("Q-team QPEG"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PNG,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "png",
+ .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/png"),
+ },
+ {
+ .id = AV_CODEC_ID_PPM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ppm",
+ .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PBM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pbm",
+ .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PGM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pgm",
+ .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PGMYUV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pgmyuv",
+ .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PAM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pam",
+ .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/x-portable-pixmap"),
+ },
+ {
+ .id = AV_CODEC_ID_FFVHUFF,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ffvhuff",
+ .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_RV30,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rv30",
+ .long_name = NULL_IF_CONFIG_SMALL("RealVideo 3.0"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_RV40,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rv40",
+ .long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_VC1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vc1",
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_vc1_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_WMV3,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wmv3",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_vc1_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_LOCO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "loco",
+ .long_name = NULL_IF_CONFIG_SMALL("LOCO"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_WNV1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wnv1",
+ .long_name = NULL_IF_CONFIG_SMALL("Winnov WNV1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_AASC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "aasc",
+ .long_name = NULL_IF_CONFIG_SMALL("Autodesk RLE"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_INDEO2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "indeo2",
+ .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 2"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FRAPS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "fraps",
+ .long_name = NULL_IF_CONFIG_SMALL("Fraps"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_TRUEMOTION2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "truemotion2",
+ .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_BMP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "bmp",
+ .long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/x-ms-bmp"),
+ },
+ {
+ .id = AV_CODEC_ID_CSCD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cscd",
+ .long_name = NULL_IF_CONFIG_SMALL("CamStudio"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MMVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mmvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("American Laser Games MM Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ZMBV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "zmbv",
+ .long_name = NULL_IF_CONFIG_SMALL("Zip Motion Blocks Video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_AVS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "avs",
+ .long_name = NULL_IF_CONFIG_SMALL("AVS (Audio Video Standard) video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SMACKVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "smackvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("Smacker video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_NUV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "nuv",
+ .long_name = NULL_IF_CONFIG_SMALL("NuppelVideo/RTJPEG"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_KMVC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "kmvc",
+ .long_name = NULL_IF_CONFIG_SMALL("Karl Morton's video codec"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FLASHSV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "flashsv",
+ .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v1"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_CAVS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cavs",
+ .long_name = NULL_IF_CONFIG_SMALL("Chinese AVS (Audio Video Standard) (AVS1-P2, JiZhun profile)"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_JPEG2000,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "jpeg2000",
+ .long_name = NULL_IF_CONFIG_SMALL("JPEG 2000"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
+ AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/jp2"),
+ .profiles = NULL_IF_CONFIG_SMALL(ff_jpeg2000_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_VMNC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vmnc",
+ .long_name = NULL_IF_CONFIG_SMALL("VMware Screen Codec / VMware Video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_VP5,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp5",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 VP5"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VP6,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp6",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 VP6"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VP6F,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp6f",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version)"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TARGA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "targa",
+ .long_name = NULL_IF_CONFIG_SMALL("Truevision Targa image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/x-targa", "image/x-tga"),
+ },
+ {
+ .id = AV_CODEC_ID_DSICINVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dsicinvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TIERTEXSEQVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tiertexseqvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("Tiertex Limited SEQ video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TIFF,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tiff",
+ .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/tiff"),
+ },
+ {
+ .id = AV_CODEC_ID_GIF,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "gif",
+ .long_name = NULL_IF_CONFIG_SMALL("CompuServe GIF (Graphics Interchange Format)"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/gif"),
+ },
+ {
+ .id = AV_CODEC_ID_DXA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dxa",
+ .long_name = NULL_IF_CONFIG_SMALL("Feeble Files/ScummVM DXA"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_DNXHD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dnxhd",
+ .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_THP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "thp",
+ .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SGI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "sgi",
+ .long_name = NULL_IF_CONFIG_SMALL("SGI image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_C93,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "c93",
+ .long_name = NULL_IF_CONFIG_SMALL("Interplay C93"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_BETHSOFTVID,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "bethsoftvid",
+ .long_name = NULL_IF_CONFIG_SMALL("Bethesda VID video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PTX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ptx",
+ .long_name = NULL_IF_CONFIG_SMALL("V.Flash PTX image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TXD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "txd",
+ .long_name = NULL_IF_CONFIG_SMALL("Renderware TXD (TeXture Dictionary) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VP6A,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp6a",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version, with alpha channel)"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_AMV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "amv",
+ .long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VB,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vb",
+ .long_name = NULL_IF_CONFIG_SMALL("Beam Software VB"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PCX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pcx",
+ .long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/x-pcx"),
+ },
+ {
+ .id = AV_CODEC_ID_SUNRAST,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "sunrast",
+ .long_name = NULL_IF_CONFIG_SMALL("Sun Rasterfile image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_INDEO4,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "indeo4",
+ .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo Video Interactive 4"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_INDEO5,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "indeo5",
+ .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo Video Interactive 5"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MIMIC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mimic",
+ .long_name = NULL_IF_CONFIG_SMALL("Mimic"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_RL2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rl2",
+ .long_name = NULL_IF_CONFIG_SMALL("RL2 video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ESCAPE124,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "escape124",
+ .long_name = NULL_IF_CONFIG_SMALL("Escape 124"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DIRAC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dirac",
+ .long_name = NULL_IF_CONFIG_SMALL("Dirac"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS | AV_CODEC_PROP_REORDER,
+ },
+ {
+ .id = AV_CODEC_ID_BFI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "bfi",
+ .long_name = NULL_IF_CONFIG_SMALL("Brute Force & Ignorance"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CMV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cmv",
+ .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts CMV video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MOTIONPIXELS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "motionpixels",
+ .long_name = NULL_IF_CONFIG_SMALL("Motion Pixels video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TGV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tgv",
+ .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGV video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TGQ,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tgq",
+ .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TQI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tqi",
+ .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TQI video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_AURA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "aura",
+ .long_name = NULL_IF_CONFIG_SMALL("Auravision AURA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_AURA2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "aura2",
+ .long_name = NULL_IF_CONFIG_SMALL("Auravision Aura 2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_V210X,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "v210x",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_TMV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tmv",
+ .long_name = NULL_IF_CONFIG_SMALL("8088flex TMV"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_V210,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "v210",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_DPX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dpx",
+ .long_name = NULL_IF_CONFIG_SMALL("DPX (Digital Picture Exchange) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MAD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mad",
+ .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts Madcow Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FRWU,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "frwu",
+ .long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_FLASHSV2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "flashsv2",
+ .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v2"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CDGRAPHICS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cdgraphics",
+ .long_name = NULL_IF_CONFIG_SMALL("CD Graphics video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_R210,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "r210",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed RGB 10-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ANM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "anm",
+ .long_name = NULL_IF_CONFIG_SMALL("Deluxe Paint Animation"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_BINKVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "binkvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("Bink video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_IFF_ILBM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "iff_ilbm",
+ .long_name = NULL_IF_CONFIG_SMALL("IFF ACBM/ANIM/DEEP/ILBM/PBM/RGB8/RGBN"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_KGV1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "kgv1",
+ .long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_YOP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "yop",
+ .long_name = NULL_IF_CONFIG_SMALL("Psygnosis YOP Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VP8,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp8",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PICTOR,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pictor",
+ .long_name = NULL_IF_CONFIG_SMALL("Pictor/PC Paint"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ANSI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ansi",
+ .long_name = NULL_IF_CONFIG_SMALL("ASCII/ANSI art"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_A64_MULTI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "a64_multi",
+ .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_A64_MULTI5,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "a64_multi5",
+ .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_R10K,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "r10k",
+ .long_name = NULL_IF_CONFIG_SMALL("AJA Kona 10-bit RGB Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MXPEG,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mxpeg",
+ .long_name = NULL_IF_CONFIG_SMALL("Mobotix MxPEG video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_LAGARITH,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "lagarith",
+ .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PRORES,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "prores",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_JV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "jv",
+ .long_name = NULL_IF_CONFIG_SMALL("Bitmap Brothers JV video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DFA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dfa",
+ .long_name = NULL_IF_CONFIG_SMALL("Chronomaster DFA"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WMV3IMAGE,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wmv3image",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VC1IMAGE,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vc1image",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_UTVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "utvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_BMV_VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "bmv_video",
+ .long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_VBLE,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vble",
+ .long_name = NULL_IF_CONFIG_SMALL("VBLE Lossless Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_DXTORY,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dxtory",
+ .long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_V410,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "v410",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:4:4 10-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_XWD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "xwd",
+ .long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/x-xwindowdump"),
+ },
+ {
+ .id = AV_CODEC_ID_CDXL,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cdxl",
+ .long_name = NULL_IF_CONFIG_SMALL("Commodore CDXL video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_XBM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "xbm",
+ .long_name = NULL_IF_CONFIG_SMALL("XBM (X BitMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/x-xbitmap"),
+ },
+ {
+ .id = AV_CODEC_ID_ZEROCODEC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "zerocodec",
+ .long_name = NULL_IF_CONFIG_SMALL("ZeroCodec Lossless Video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MSS1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mss1",
+ .long_name = NULL_IF_CONFIG_SMALL("MS Screen 1"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSA1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "msa1",
+ .long_name = NULL_IF_CONFIG_SMALL("MS ATC Screen"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TSCC2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tscc2",
+ .long_name = NULL_IF_CONFIG_SMALL("TechSmith Screen Codec 2"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MTS2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mts2",
+ .long_name = NULL_IF_CONFIG_SMALL("MS Expression Encoder Screen"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CLLC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cllc",
+ .long_name = NULL_IF_CONFIG_SMALL("Canopus Lossless Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MSS2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mss2",
+ .long_name = NULL_IF_CONFIG_SMALL("MS Windows Media Video V9 Screen"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VP9,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp9",
+ .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
+ .props = AV_CODEC_PROP_LOSSY,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_AIC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "aic",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple Intermediate Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ESCAPE130,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "escape130",
+ .long_name = NULL_IF_CONFIG_SMALL("Escape 130"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_G2M,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "g2m",
+ .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WEBP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "webp",
+ .long_name = NULL_IF_CONFIG_SMALL("WebP"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
+ AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/webp"),
+ },
+ {
+ .id = AV_CODEC_ID_HNM4_VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "hnm4video",
+ .long_name = NULL_IF_CONFIG_SMALL("HNM 4 video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HEVC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "hevc",
+ .long_name = NULL_IF_CONFIG_SMALL("H.265 / HEVC (High Efficiency Video Coding)"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_FIC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "fic",
+ .long_name = NULL_IF_CONFIG_SMALL("Mirillis FIC"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ALIAS_PIX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "alias_pix",
+ .long_name = NULL_IF_CONFIG_SMALL("Alias/Wavefront PIX image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_BRENDER_PIX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "brender_pix",
+ .long_name = NULL_IF_CONFIG_SMALL("BRender PIX image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PAF_VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "paf_video",
+ .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_EXR,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "exr",
+ .long_name = NULL_IF_CONFIG_SMALL("OpenEXR image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
+ AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_VP7,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp7",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SANM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "sanm",
+ .long_name = NULL_IF_CONFIG_SMALL("LucasArts SANM/SMUSH video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SGIRLE,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "sgirle",
+ .long_name = NULL_IF_CONFIG_SMALL("SGI RLE 8-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MVC1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mvc1",
+ .long_name = NULL_IF_CONFIG_SMALL("Silicon Graphics Motion Video Compressor 1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MVC2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mvc2",
+ .long_name = NULL_IF_CONFIG_SMALL("Silicon Graphics Motion Video Compressor 2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HQX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "hqx",
+ .long_name = NULL_IF_CONFIG_SMALL("Canopus HQX"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TDSC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "tdsc",
+ .long_name = NULL_IF_CONFIG_SMALL("TDSC"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HQ_HQA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "hq_hqa",
+ .long_name = NULL_IF_CONFIG_SMALL("Canopus HQ/HQA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HAP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "hap",
+ .long_name = NULL_IF_CONFIG_SMALL("Vidvox Hap"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DDS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dds",
+ .long_name = NULL_IF_CONFIG_SMALL("DirectDraw Surface image decoder"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
+ AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_DXV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "dxv",
+ .long_name = NULL_IF_CONFIG_SMALL("Resolume DXV"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SCREENPRESSO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "screenpresso",
+ .long_name = NULL_IF_CONFIG_SMALL("Screenpresso"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_RSCC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rscc",
+ .long_name = NULL_IF_CONFIG_SMALL("innoHeim/Rsupport Screen Capture Codec"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_AVS2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "avs2",
+ .long_name = NULL_IF_CONFIG_SMALL("AVS2-P2/IEEE1857.4"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PGX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pgx",
+ .long_name = NULL_IF_CONFIG_SMALL("PGX (JPEG2000 Test Format)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_AVS3,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "avs3",
+ .long_name = NULL_IF_CONFIG_SMALL("AVS3-P2/IEEE1857.10"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSP2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "msp2",
+ .long_name = NULL_IF_CONFIG_SMALL("Microsoft Paint (MSP) version 2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_VVC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vvc",
+ .long_name = NULL_IF_CONFIG_SMALL("H.266 / VVC (Versatile Video Coding)"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_vvc_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_Y41P,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "y41p",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed YUV 4:1:1 12-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_AVRP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "avrp",
+ .long_name = NULL_IF_CONFIG_SMALL("Avid 1:1 10-bit RGB Packer"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_012V,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "012v",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_AVUI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "avui",
+ .long_name = NULL_IF_CONFIG_SMALL("Avid Meridien Uncompressed"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+#if FF_API_AYUV_CODECID
+ {
+ .id = AV_CODEC_ID_AYUV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ayuv",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed MS 4:4:4:4"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+#endif
+ {
+ .id = AV_CODEC_ID_TARGA_Y216,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "targa_y216",
+ .long_name = NULL_IF_CONFIG_SMALL("Pinnacle TARGA CineWave YUV16"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_V308,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "v308",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed 4:4:4"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_V408,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "v408",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed QT 4:4:4:4"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_YUV4,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "yuv4",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed 4:2:0"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_AVRN,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "avrn",
+ .long_name = NULL_IF_CONFIG_SMALL("Avid AVI Codec"),
+ },
+ {
+ .id = AV_CODEC_ID_CPIA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cpia",
+ .long_name = NULL_IF_CONFIG_SMALL("CPiA video format"),
+ },
+ {
+ .id = AV_CODEC_ID_XFACE,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "xface",
+ .long_name = NULL_IF_CONFIG_SMALL("X-face image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SNOW,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "snow",
+ .long_name = NULL_IF_CONFIG_SMALL("Snow"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SMVJPEG,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "smvjpeg",
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ .long_name = NULL_IF_CONFIG_SMALL("Sigmatel Motion Video"),
+ },
+ {
+ .id = AV_CODEC_ID_APNG,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "apng",
+ .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/png"),
+ },
+ {
+ .id = AV_CODEC_ID_DAALA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "daala",
+ .long_name = NULL_IF_CONFIG_SMALL("Daala"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_CFHD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cfhd",
+ .long_name = NULL_IF_CONFIG_SMALL("GoPro CineForm HD"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TRUEMOTION2RT,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "truemotion2rt",
+ .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0 Real Time"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_M101,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "m101",
+ .long_name = NULL_IF_CONFIG_SMALL("Matrox Uncompressed SD"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MAGICYUV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "magicyuv",
+ .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SHEERVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "sheervideo",
+ .long_name = NULL_IF_CONFIG_SMALL("BitJazz SheerVideo"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_YLC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ylc",
+ .long_name = NULL_IF_CONFIG_SMALL("YUY2 Lossless Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PSD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "psd",
+ .long_name = NULL_IF_CONFIG_SMALL("Photoshop PSD file"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PIXLET,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pixlet",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SPEEDHQ,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "speedhq",
+ .long_name = NULL_IF_CONFIG_SMALL("NewTek SpeedHQ"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FMVC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "fmvc",
+ .long_name = NULL_IF_CONFIG_SMALL("FM Screen Capture Codec"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SCPR,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "scpr",
+ .long_name = NULL_IF_CONFIG_SMALL("ScreenPressor"),
+ .props = AV_CODEC_PROP_LOSSLESS | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CLEARVIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "clearvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("Iterated Systems ClearVideo"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_XPM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "xpm",
+ .long_name = NULL_IF_CONFIG_SMALL("XPM (X PixMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/x-xpixmap"),
+ },
+ {
+ .id = AV_CODEC_ID_AV1,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "av1",
+ .long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"),
+ .props = AV_CODEC_PROP_LOSSY,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_BITPACKED,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "bitpacked",
+ .long_name = NULL_IF_CONFIG_SMALL("Bitpacked"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MSCC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mscc",
+ .long_name = NULL_IF_CONFIG_SMALL("Mandsoft Screen Capture Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SRGC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "srgc",
+ .long_name = NULL_IF_CONFIG_SMALL("Screen Recorder Gold Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SVG,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "svg",
+ .long_name = NULL_IF_CONFIG_SMALL("Scalable Vector Graphics"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/svg+xml"),
+ },
+ {
+ .id = AV_CODEC_ID_GDV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "gdv",
+ .long_name = NULL_IF_CONFIG_SMALL("Gremlin Digital Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FITS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "fits",
+ .long_name = NULL_IF_CONFIG_SMALL("FITS (Flexible Image Transport System)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_IMM4,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "imm4",
+ .long_name = NULL_IF_CONFIG_SMALL("Infinity IMM4"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PROSUMER,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "prosumer",
+ .long_name = NULL_IF_CONFIG_SMALL("Brooktree ProSumer Video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MWSC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mwsc",
+ .long_name = NULL_IF_CONFIG_SMALL("MatchWare Screen Capture Codec"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_WCMV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wcmv",
+ .long_name = NULL_IF_CONFIG_SMALL("WinCAM Motion Video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_RASC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "rasc",
+ .long_name = NULL_IF_CONFIG_SMALL("RemotelyAnywhere Screen Capture"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HYMT,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "hymt",
+ .long_name = NULL_IF_CONFIG_SMALL("HuffYUV MT"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ARBC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "arbc",
+ .long_name = NULL_IF_CONFIG_SMALL("Gryphon's Anim Compressor"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_AGM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "agm",
+ .long_name = NULL_IF_CONFIG_SMALL("Amuse Graphics Movie"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_LSCR,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "lscr",
+ .long_name = NULL_IF_CONFIG_SMALL("LEAD Screen Capture"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VP4,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vp4",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 VP4"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_IMM5,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "imm5",
+ .long_name = NULL_IF_CONFIG_SMALL("Infinity IMM5"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MVDV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mvdv",
+ .long_name = NULL_IF_CONFIG_SMALL("MidiVid VQ"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MVHA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mvha",
+ .long_name = NULL_IF_CONFIG_SMALL("MidiVid Archive Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CDTOONS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cdtoons",
+ .long_name = NULL_IF_CONFIG_SMALL("CDToons video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MV30,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mv30",
+ .long_name = NULL_IF_CONFIG_SMALL("MidiVid 3.0"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_NOTCHLC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "notchlc",
+ .long_name = NULL_IF_CONFIG_SMALL("NotchLC"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PFM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pfm",
+ .long_name = NULL_IF_CONFIG_SMALL("PFM (Portable FloatMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MOBICLIP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mobiclip",
+ .long_name = NULL_IF_CONFIG_SMALL("MobiClip Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PHOTOCD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "photocd",
+ .long_name = NULL_IF_CONFIG_SMALL("Kodak Photo CD"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_IPU,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ipu",
+ .long_name = NULL_IF_CONFIG_SMALL("IPU Video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ARGO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "argo",
+ .long_name = NULL_IF_CONFIG_SMALL("Argonaut Games Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CRI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cri",
+ .long_name = NULL_IF_CONFIG_SMALL("Cintel RAW"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SIMBIOSIS_IMX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "simbiosis_imx",
+ .long_name = NULL_IF_CONFIG_SMALL("Simbiosis Interactive IMX Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SGA_VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "sga",
+ .long_name = NULL_IF_CONFIG_SMALL("Digital Pictures SGA Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_GEM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "gem",
+ .long_name = NULL_IF_CONFIG_SMALL("GEM Raster image"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VBN,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vbn",
+ .long_name = NULL_IF_CONFIG_SMALL("Vizrt Binary Image"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_JPEGXL,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "jpegxl",
+ .long_name = NULL_IF_CONFIG_SMALL("JPEG XL"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
+ AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/jxl"),
+ },
+ {
+ .id = AV_CODEC_ID_QOI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "qoi",
+ .long_name = NULL_IF_CONFIG_SMALL("QOI (Quite OK Image)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PHM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "phm",
+ .long_name = NULL_IF_CONFIG_SMALL("PHM (Portable HalfFloatMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_RADIANCE_HDR,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "hdr",
+ .long_name = NULL_IF_CONFIG_SMALL("HDR (Radiance RGBE format) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WBMP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wbmp",
+ .long_name = NULL_IF_CONFIG_SMALL("WBMP (Wireless Application Protocol Bitmap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MEDIA100,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "media100",
+ .long_name = NULL_IF_CONFIG_SMALL("Media 100i"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VQC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vqc",
+ .long_name = NULL_IF_CONFIG_SMALL("ViewQuest VQC"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+
+ /* various PCM "codecs" */
+ {
+ .id = AV_CODEC_ID_PCM_S16LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s16le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S16BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s16be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_U16LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_u16le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 16-bit little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_U16BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_u16be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 16-bit big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S8,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s8",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_U8,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_u8",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 8-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_MULAW,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_mulaw",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM mu-law / G.711 mu-law"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_ALAW,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_alaw",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM A-law / G.711 A-law"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S32LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s32le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S32BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s32be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_U32LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_u32le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 32-bit little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_U32BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_u32be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 32-bit big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S24LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s24le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S24BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s24be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_U24LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_u24le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 24-bit little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_U24BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_u24be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 24-bit big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S24DAUD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s24daud",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM D-Cinema audio signed 24-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S16LE_PLANAR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s16le_planar",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit little-endian planar"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_DVD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_dvd",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 20|24-bit big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_F32BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_f32be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM 32-bit floating point big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_F32LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_f32le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM 32-bit floating point little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_F64BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_f64be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM 64-bit floating point big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_F64LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_f64le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM 64-bit floating point little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_BLURAY,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_bluray",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16|20|24-bit big-endian for Blu-ray media"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_LXF,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_lxf",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 20-bit little-endian planar"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_S302M,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "s302m",
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S8_PLANAR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s8_planar",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S24LE_PLANAR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s24le_planar",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit little-endian planar"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S32LE_PLANAR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s32le_planar",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit little-endian planar"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S16BE_PLANAR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s16be_planar",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit big-endian planar"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S64LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s64le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 64-bit little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_S64BE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_s64be",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM signed 64-bit big-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_F16LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_f16le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM 16.8 floating point little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_F24LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_f24le",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM 24.0 floating point little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_VIDC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_vidc",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM Archimedes VIDC"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_SGA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_sga",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM SGA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+
+ /* various ADPCM codecs */
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_QT,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_qt",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA QuickTime"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_WAV,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_wav",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA WAV"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_DK3,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_dk3",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Duck DK3"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_DK4,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_dk4",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Duck DK4"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_WS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_ws",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Westwood"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_smjpeg",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Loki SDL MJPEG"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_MS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ms",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Microsoft"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_4XM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_4xm",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM 4X Movie"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_XA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_xa",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM CDROM XA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_ADX,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_adx",
+ .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_EA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ea",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_G726,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_g726",
+ .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_CT,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ct",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Creative Technology"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_SWF,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_swf",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Shockwave Flash"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_YAMAHA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_yamaha",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Yamaha"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_SBPRO_4,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_sbpro_4",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 4-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_SBPRO_3,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_sbpro_3",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 2.6-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_SBPRO_2,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_sbpro_2",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 2-bit"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_THP,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_thp",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo THP"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_AMV,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_amv",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA AMV"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_EA_R1,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ea_r1",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_EA_R3,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ea_r3",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R3"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_EA_R2,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ea_r2",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_ea_sead",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Electronic Arts SEAD"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_ea_eacs",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Electronic Arts EACS"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_EA_XAS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ea_xas",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts XAS"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ea_maxis_xa",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts Maxis CDROM XA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_ISS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_iss",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Funcom ISS"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_G722,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_g722",
+ .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_APC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_apc",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA CRYO APC"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_VIMA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_vima",
+ .long_name = NULL_IF_CONFIG_SMALL("LucasArts VIMA audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_AFC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_afc",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo Gamecube AFC"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_OKI,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_oki",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Dialogic OKI"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_DTK,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_dtk",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo Gamecube DTK"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_RAD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_rad",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Radical"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_G726LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_g726le",
+ .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM little-endian"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_THP_LE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_thp_le",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo THP (Little-Endian)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_PSX,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_psx",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Playstation"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_AICA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_aica",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Yamaha AICA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_DAT4,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_dat4",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Eurocom DAT4"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_MTAF,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_mtaf",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM MTAF"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_AGM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_agm",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM AmuseGraphics Movie AGM"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_ARGO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_argo",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Argonaut Games"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_SSI,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_ssi",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Simon & Schuster Interactive"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_ZORK,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_zork",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Zork"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_APM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_apm",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Ubisoft APM"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_ALP,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_alp",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA High Voltage Software ALP"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_MTF,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_mtf",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Capcom's MT Framework"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_CUNNING,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_cunning",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Cunning Developments"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_MOFLEX,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_moflex",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA MobiClip MOFLEX"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_ACORN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_acorn",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Acorn Replay"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_XMD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_xmd",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Konami XMD"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+
+ /* AMR */
+ {
+ .id = AV_CODEC_ID_AMR_NB,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "amr_nb",
+ .long_name = NULL_IF_CONFIG_SMALL("AMR-NB (Adaptive Multi-Rate NarrowBand)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_AMR_WB,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "amr_wb",
+ .long_name = NULL_IF_CONFIG_SMALL("AMR-WB (Adaptive Multi-Rate WideBand)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+
+ /* RealAudio codecs*/
+ {
+ .id = AV_CODEC_ID_RA_144,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ra_144",
+ .long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_RA_288,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ra_288",
+ .long_name = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+
+ /* various DPCM codecs */
+ {
+ .id = AV_CODEC_ID_ROQ_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "roq_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM id RoQ"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_INTERPLAY_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "interplay_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Interplay"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_XAN_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "xan_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Xan"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SOL_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "sol_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Sol"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SDX2_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "sdx2_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Squareroot-Delta-Exact"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_GREMLIN_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "gremlin_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Gremlin"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DERF_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "derf_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Xilam DERF"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WADY_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wady_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Marble WADY"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CBD2_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "cbd2_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Cuberoot-Delta-Exact"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+
+ /* audio codecs */
+ {
+ .id = AV_CODEC_ID_MP2,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mp2",
+ .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MP3,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mp3",
+ .long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_AAC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "aac",
+ .long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_aac_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_AC3,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ac3",
+ .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DTS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dts",
+ .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_dca_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_VORBIS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "vorbis",
+ .long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DVAUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dvaudio",
+ .long_name = NULL_IF_CONFIG_SMALL("DV audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WMAV1,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wmav1",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WMAV2,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wmav2",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MACE3,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mace3",
+ .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MACE6,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mace6",
+ .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VMDAUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "vmdaudio",
+ .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FLAC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "flac",
+ .long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MP3ADU,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mp3adu",
+ .long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MP3ON4,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mp3on4",
+ .long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SHORTEN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "shorten",
+ .long_name = NULL_IF_CONFIG_SMALL("Shorten"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ALAC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "alac",
+ .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_WESTWOOD_SND1,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "westwood_snd1",
+ .long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_GSM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "gsm",
+ .long_name = NULL_IF_CONFIG_SMALL("GSM"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_QDM2,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "qdm2",
+ .long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_COOK,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "cook",
+ .long_name = NULL_IF_CONFIG_SMALL("Cook / Cooker / Gecko (RealAudio G2)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TRUESPEECH,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "truespeech",
+ .long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TTA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "tta",
+ .long_name = NULL_IF_CONFIG_SMALL("TTA (True Audio)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SMACKAUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "smackaudio",
+ .long_name = NULL_IF_CONFIG_SMALL("Smacker audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_QCELP,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "qcelp",
+ .long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WAVPACK,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wavpack",
+ .long_name = NULL_IF_CONFIG_SMALL("WavPack"),
+ .props = AV_CODEC_PROP_INTRA_ONLY |
+ AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_DSICINAUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dsicinaudio",
+ .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_IMC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "imc",
+ .long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MUSEPACK7,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "musepack7",
+ .long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MLP,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mlp",
+ .long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_GSM_MS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "gsm_ms",
+ .long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ATRAC3,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "atrac3",
+ .long_name = NULL_IF_CONFIG_SMALL("ATRAC3 (Adaptive TRansform Acoustic Coding 3)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_APE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ape",
+ .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_NELLYMOSER,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "nellymoser",
+ .long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MUSEPACK8,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "musepack8",
+ .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SPEEX,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "speex",
+ .long_name = NULL_IF_CONFIG_SMALL("Speex"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WMAVOICE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wmavoice",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WMAPRO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wmapro",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WMALOSSLESS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wmalossless",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Lossless"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ATRAC3P,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "atrac3p",
+ .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_EAC3,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "eac3",
+ .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SIPR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "sipr",
+ .long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MP1,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mp1",
+ .long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TWINVQ,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "twinvq",
+ .long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TRUEHD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "truehd",
+ .long_name = NULL_IF_CONFIG_SMALL("TrueHD"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MP4ALS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mp4als",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ATRAC1,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "atrac1",
+ .long_name = NULL_IF_CONFIG_SMALL("ATRAC1 (Adaptive TRansform Acoustic Coding)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_BINKAUDIO_RDFT,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "binkaudio_rdft",
+ .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_BINKAUDIO_DCT,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "binkaudio_dct",
+ .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_AAC_LATM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "aac_latm",
+ .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_aac_profiles),
+ },
+ {
+ .id = AV_CODEC_ID_QDMC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "qdmc",
+ .long_name = NULL_IF_CONFIG_SMALL("QDesign Music"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CELT,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "celt",
+ .long_name = NULL_IF_CONFIG_SMALL("Constrained Energy Lapped Transform (CELT)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_G723_1,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "g723_1",
+ .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_G729,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "g729",
+ .long_name = NULL_IF_CONFIG_SMALL("G.729"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_8SVX_EXP,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "8svx_exp",
+ .long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_8SVX_FIB,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "8svx_fib",
+ .long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_BMV_AUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "bmv_audio",
+ .long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_RALF,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ralf",
+ .long_name = NULL_IF_CONFIG_SMALL("RealAudio Lossless"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_IAC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "iac",
+ .long_name = NULL_IF_CONFIG_SMALL("IAC (Indeo Audio Coder)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ILBC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ilbc",
+ .long_name = NULL_IF_CONFIG_SMALL("iLBC (Internet Low Bitrate Codec)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_OPUS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "opus",
+ .long_name = NULL_IF_CONFIG_SMALL("Opus (Opus Interactive Audio Codec)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_COMFORT_NOISE,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "comfortnoise",
+ .long_name = NULL_IF_CONFIG_SMALL("RFC 3389 Comfort Noise"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_TAK,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "tak",
+ .long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_METASOUND,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "metasound",
+ .long_name = NULL_IF_CONFIG_SMALL("Voxware MetaSound"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PAF_AUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "paf_audio",
+ .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ON2AVC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "avc",
+ .long_name = NULL_IF_CONFIG_SMALL("On2 Audio for Video Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DSS_SP,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dss_sp",
+ .long_name = NULL_IF_CONFIG_SMALL("Digital Speech Standard - Standard Play mode (DSS SP)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CODEC2,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "codec2",
+ .long_name = NULL_IF_CONFIG_SMALL("codec2 (very low bitrate speech codec)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FFWAVESYNTH,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wavesynth",
+ .long_name = NULL_IF_CONFIG_SMALL("Wave synthesis pseudo-codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
+ },
+ {
+ .id = AV_CODEC_ID_SONIC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "sonic",
+ .long_name = NULL_IF_CONFIG_SMALL("Sonic"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
+ },
+ {
+ .id = AV_CODEC_ID_SONIC_LS,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "sonicls",
+ .long_name = NULL_IF_CONFIG_SMALL("Sonic lossless"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
+ },
+ {
+ .id = AV_CODEC_ID_EVRC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "evrc",
+ .long_name = NULL_IF_CONFIG_SMALL("EVRC (Enhanced Variable Rate Codec)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SMV,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "smv",
+ .long_name = NULL_IF_CONFIG_SMALL("SMV (Selectable Mode Vocoder)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DSD_LSBF,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dsd_lsbf",
+ .long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), least significant bit first"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DSD_MSBF,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dsd_msbf",
+ .long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), most significant bit first"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DSD_LSBF_PLANAR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dsd_lsbf_planar",
+ .long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), least significant bit first, planar"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DSD_MSBF_PLANAR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dsd_msbf_planar",
+ .long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), most significant bit first, planar"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_4GV,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "4gv",
+ .long_name = NULL_IF_CONFIG_SMALL("4GV (Fourth Generation Vocoder)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_INTERPLAY_ACM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "interplayacm",
+ .long_name = NULL_IF_CONFIG_SMALL("Interplay ACM"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_XMA1,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "xma1",
+ .long_name = NULL_IF_CONFIG_SMALL("Xbox Media Audio 1"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_XMA2,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "xma2",
+ .long_name = NULL_IF_CONFIG_SMALL("Xbox Media Audio 2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DST,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dst",
+ .long_name = NULL_IF_CONFIG_SMALL("DST (Direct Stream Transfer)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ATRAC3AL,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "atrac3al",
+ .long_name = NULL_IF_CONFIG_SMALL("ATRAC3 AL (Adaptive TRansform Acoustic Coding 3 Advanced Lossless)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_ATRAC3PAL,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "atrac3pal",
+ .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ AL (Adaptive TRansform Acoustic Coding 3+ Advanced Lossless)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_DOLBY_E,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dolby_e",
+ .long_name = NULL_IF_CONFIG_SMALL("Dolby E"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_APTX,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "aptx",
+ .long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_APTX_HD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "aptx_hd",
+ .long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SBC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "sbc",
+ .long_name = NULL_IF_CONFIG_SMALL("SBC (low-complexity subband codec)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ATRAC9,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "atrac9",
+ .long_name = NULL_IF_CONFIG_SMALL("ATRAC9 (Adaptive TRansform Acoustic Coding 9)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HCOM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "hcom",
+ .long_name = NULL_IF_CONFIG_SMALL("HCOM Audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ACELP_KELVIN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "acelp.kelvin",
+ .long_name = NULL_IF_CONFIG_SMALL("Sipro ACELP.KELVIN"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MPEGH_3D_AUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mpegh_3d_audio",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-H 3D Audio"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SIREN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "siren",
+ .long_name = NULL_IF_CONFIG_SMALL("Siren"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HCA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "hca",
+ .long_name = NULL_IF_CONFIG_SMALL("CRI HCA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FASTAUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "fastaudio",
+ .long_name = NULL_IF_CONFIG_SMALL("MobiClip FastAudio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSNSIREN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "msnsiren",
+ .long_name = NULL_IF_CONFIG_SMALL("MSN Siren"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DFPWM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dfpwm",
+ .long_name = NULL_IF_CONFIG_SMALL("DFPWM (Dynamic Filter Pulse Width Modulation)"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_BONK,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "bonk",
+ .long_name = NULL_IF_CONFIG_SMALL("Bonk audio"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MISC4,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "misc4",
+ .long_name = NULL_IF_CONFIG_SMALL("Micronas SC-4 Audio"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_INTRA_ONLY,
+ },
+ {
+ .id = AV_CODEC_ID_APAC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "apac",
+ .long_name = NULL_IF_CONFIG_SMALL("Marian's A-pac audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_FTR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ftr",
+ .long_name = NULL_IF_CONFIG_SMALL("FTR Voice"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WAVARC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wavarc",
+ .long_name = NULL_IF_CONFIG_SMALL("Waveform Archiver"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_RKA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "rka",
+ .long_name = NULL_IF_CONFIG_SMALL("RKA (RK Audio)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ },
+
+ /* subtitle codecs */
+ {
+ .id = AV_CODEC_ID_DVD_SUBTITLE,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "dvd_subtitle",
+ .long_name = NULL_IF_CONFIG_SMALL("DVD subtitles"),
+ .props = AV_CODEC_PROP_BITMAP_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_DVB_SUBTITLE,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "dvb_subtitle",
+ .long_name = NULL_IF_CONFIG_SMALL("DVB subtitles"),
+ .props = AV_CODEC_PROP_BITMAP_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_TEXT,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "text",
+ .long_name = NULL_IF_CONFIG_SMALL("raw UTF-8 text"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_XSUB,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "xsub",
+ .long_name = NULL_IF_CONFIG_SMALL("XSUB"),
+ .props = AV_CODEC_PROP_BITMAP_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_SSA,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "ssa",
+ .long_name = NULL_IF_CONFIG_SMALL("SSA (SubStation Alpha) subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_MOV_TEXT,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "mov_text",
+ .long_name = NULL_IF_CONFIG_SMALL("MOV text"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "hdmv_pgs_subtitle",
+ .long_name = NULL_IF_CONFIG_SMALL("HDMV Presentation Graphic Stream subtitles"),
+ .props = AV_CODEC_PROP_BITMAP_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_DVB_TELETEXT,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "dvb_teletext",
+ .long_name = NULL_IF_CONFIG_SMALL("DVB teletext"),
+ },
+ {
+ .id = AV_CODEC_ID_SRT,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "srt",
+ .long_name = NULL_IF_CONFIG_SMALL("SubRip subtitle with embedded timing"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_MICRODVD,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "microdvd",
+ .long_name = NULL_IF_CONFIG_SMALL("MicroDVD subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_EIA_608,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "eia_608",
+ .long_name = NULL_IF_CONFIG_SMALL("EIA-608 closed captions"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_JACOSUB,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "jacosub",
+ .long_name = NULL_IF_CONFIG_SMALL("JACOsub subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_SAMI,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "sami",
+ .long_name = NULL_IF_CONFIG_SMALL("SAMI subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_REALTEXT,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "realtext",
+ .long_name = NULL_IF_CONFIG_SMALL("RealText subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_STL,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "stl",
+ .long_name = NULL_IF_CONFIG_SMALL("Spruce subtitle format"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_SUBVIEWER1,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "subviewer1",
+ .long_name = NULL_IF_CONFIG_SMALL("SubViewer v1 subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_SUBVIEWER,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "subviewer",
+ .long_name = NULL_IF_CONFIG_SMALL("SubViewer subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_SUBRIP,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "subrip",
+ .long_name = NULL_IF_CONFIG_SMALL("SubRip subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_WEBVTT,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "webvtt",
+ .long_name = NULL_IF_CONFIG_SMALL("WebVTT subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_MPL2,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "mpl2",
+ .long_name = NULL_IF_CONFIG_SMALL("MPL2 subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_VPLAYER,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "vplayer",
+ .long_name = NULL_IF_CONFIG_SMALL("VPlayer subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_PJS,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "pjs",
+ .long_name = NULL_IF_CONFIG_SMALL("PJS (Phoenix Japanimation Society) subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_ASS,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "ass",
+ .long_name = NULL_IF_CONFIG_SMALL("ASS (Advanced SSA) subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "hdmv_text_subtitle",
+ .long_name = NULL_IF_CONFIG_SMALL("HDMV Text subtitle"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_TTML,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "ttml",
+ .long_name = NULL_IF_CONFIG_SMALL("Timed Text Markup Language"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
+ {
+ .id = AV_CODEC_ID_ARIB_CAPTION,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "arib_caption",
+ .long_name = NULL_IF_CONFIG_SMALL("ARIB STD-B24 caption"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_arib_caption_profiles),
+ },
+
+ /* other kind of codecs and pseudo-codecs */
+ {
+ .id = AV_CODEC_ID_TTF,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "ttf",
+ .long_name = NULL_IF_CONFIG_SMALL("TrueType font"),
+ .mime_types= MT("application/x-truetype-font", "application/x-font"),
+ },
+ {
+ .id = AV_CODEC_ID_SCTE_35,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "scte_35",
+ .long_name = NULL_IF_CONFIG_SMALL("SCTE 35 Message Queue"),
+ },
+ {
+ .id = AV_CODEC_ID_EPG,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "epg",
+ .long_name = NULL_IF_CONFIG_SMALL("Electronic Program Guide"),
+ },
+ {
+ .id = AV_CODEC_ID_BINTEXT,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "bintext",
+ .long_name = NULL_IF_CONFIG_SMALL("Binary text"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
+ },
+ {
+ .id = AV_CODEC_ID_XBIN,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "xbin",
+ .long_name = NULL_IF_CONFIG_SMALL("eXtended BINary text"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
+ },
+ {
+ .id = AV_CODEC_ID_IDF,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "idf",
+ .long_name = NULL_IF_CONFIG_SMALL("iCEDraw text"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
+ },
+ {
+ .id = AV_CODEC_ID_OTF,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "otf",
+ .long_name = NULL_IF_CONFIG_SMALL("OpenType font"),
+ .mime_types= MT("application/vnd.ms-opentype"),
+ },
+ {
+ .id = AV_CODEC_ID_SMPTE_KLV,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "klv",
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE 336M Key-Length-Value (KLV) metadata"),
+ },
+ {
+ .id = AV_CODEC_ID_DVD_NAV,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "dvd_nav_packet",
+ .long_name = NULL_IF_CONFIG_SMALL("DVD Nav packet"),
+ },
+ {
+ .id = AV_CODEC_ID_TIMED_ID3,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "timed_id3",
+ .long_name = NULL_IF_CONFIG_SMALL("timed ID3 metadata"),
+ },
+ {
+ .id = AV_CODEC_ID_BIN_DATA,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "bin_data",
+ .long_name = NULL_IF_CONFIG_SMALL("binary data"),
+ .mime_types= MT("application/octet-stream"),
+ },
+ {
+ .id = AV_CODEC_ID_MPEG2TS,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "mpegts",
+ .long_name = NULL_IF_CONFIG_SMALL("raw MPEG-TS stream"),
+ .mime_types= MT("application/MP2T"),
+ },
+ {
+ .id = AV_CODEC_ID_WRAPPED_AVFRAME,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wrapped_avframe",
+ .long_name = NULL_IF_CONFIG_SMALL("AVFrame to AVPacket passthrough"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_VNULL,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vnull",
+ .long_name = NULL_IF_CONFIG_SMALL("Null video codec"),
+ },
+ {
+ .id = AV_CODEC_ID_ANULL,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "anull",
+ .long_name = NULL_IF_CONFIG_SMALL("Null audio codec"),
+ },
+};
+
+static int descriptor_compare(const void *key, const void *member)
+{
+ enum AVCodecID id = *(const enum AVCodecID *) key;
+ const AVCodecDescriptor *desc = member;
+
+ return id - desc->id;
+}
+
+const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id)
+{
+ return bsearch(&id, codec_descriptors, FF_ARRAY_ELEMS(codec_descriptors),
+ sizeof(codec_descriptors[0]), descriptor_compare);
+}
+
+const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev)
+{
+ if (!prev)
+ return &codec_descriptors[0];
+ if (prev - codec_descriptors < FF_ARRAY_ELEMS(codec_descriptors) - 1)
+ return prev + 1;
+ return NULL;
+}
+
+const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name)
+{
+ const AVCodecDescriptor *desc = NULL;
+
+ while ((desc = avcodec_descriptor_next(desc)))
+ if (!strcmp(desc->name, name))
+ return desc;
+ return NULL;
+}
+
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id)
+{
+ const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
+ return desc ? desc->type : AVMEDIA_TYPE_UNKNOWN;
+}
diff --git a/media/ffvpx/libavcodec/codec_desc.h b/media/ffvpx/libavcodec/codec_desc.h
new file mode 100644
index 0000000000..126b52df47
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_desc.h
@@ -0,0 +1,128 @@
+/*
+ * Codec descriptors public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_DESC_H
+#define AVCODEC_CODEC_DESC_H
+
+#include "libavutil/avutil.h"
+
+#include "codec_id.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * This struct describes the properties of a single codec described by an
+ * AVCodecID.
+ * @see avcodec_descriptor_get()
+ */
+typedef struct AVCodecDescriptor {
+ enum AVCodecID id;
+ enum AVMediaType type;
+ /**
+ * Name of the codec described by this descriptor. It is non-empty and
+ * unique for each codec descriptor. It should contain alphanumeric
+ * characters and '_' only.
+ */
+ const char *name;
+ /**
+ * A more descriptive name for this codec. May be NULL.
+ */
+ const char *long_name;
+ /**
+ * Codec properties, a combination of AV_CODEC_PROP_* flags.
+ */
+ int props;
+ /**
+ * MIME type(s) associated with the codec.
+ * May be NULL; if not, a NULL-terminated array of MIME types.
+ * The first item is always non-NULL and is the preferred MIME type.
+ */
+ const char *const *mime_types;
+ /**
+ * If non-NULL, an array of profiles recognized for this codec.
+ * Terminated with FF_PROFILE_UNKNOWN.
+ */
+ const struct AVProfile *profiles;
+} AVCodecDescriptor;
+
+/**
+ * Codec uses only intra compression.
+ * Video and audio codecs only.
+ */
+#define AV_CODEC_PROP_INTRA_ONLY (1 << 0)
+/**
+ * Codec supports lossy compression. Audio and video codecs only.
+ * @note a codec may support both lossy and lossless
+ * compression modes
+ */
+#define AV_CODEC_PROP_LOSSY (1 << 1)
+/**
+ * Codec supports lossless compression. Audio and video codecs only.
+ */
+#define AV_CODEC_PROP_LOSSLESS (1 << 2)
+/**
+ * Codec supports frame reordering. That is, the coded order (the order in which
+ * the encoded packets are output by the encoders / stored / input to the
+ * decoders) may be different from the presentation order of the corresponding
+ * frames.
+ *
+ * For codecs that do not have this property set, PTS and DTS should always be
+ * equal.
+ */
+#define AV_CODEC_PROP_REORDER (1 << 3)
+/**
+ * Subtitle codec is bitmap based
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.
+ */
+#define AV_CODEC_PROP_BITMAP_SUB (1 << 16)
+/**
+ * Subtitle codec is text based.
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.
+ */
+#define AV_CODEC_PROP_TEXT_SUB (1 << 17)
+
+/**
+ * @return descriptor for given codec ID or NULL if no descriptor exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);
+
+/**
+ * Iterate over all codec descriptors known to libavcodec.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);
+
+/**
+ * @return codec descriptor with the given name or NULL if no such descriptor
+ * exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_DESC_H
diff --git a/media/ffvpx/libavcodec/codec_id.h b/media/ffvpx/libavcodec/codec_id.h
new file mode 100644
index 0000000000..89a4a0cb89
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_id.h
@@ -0,0 +1,661 @@
+/*
+ * Codec IDs
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_ID_H
+#define AVCODEC_CODEC_ID_H
+
+#include "libavutil/avutil.h"
+#include "libavutil/samplefmt.h"
+
+#include "version_major.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of an existing codec ID changes (that would break ABI),
+ * 2. it is as close as possible to similar codecs
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+ AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+ AV_CODEC_ID_H261,
+ AV_CODEC_ID_H263,
+ AV_CODEC_ID_RV10,
+ AV_CODEC_ID_RV20,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_MJPEGB,
+ AV_CODEC_ID_LJPEG,
+ AV_CODEC_ID_SP5X,
+ AV_CODEC_ID_JPEGLS,
+ AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_RAWVIDEO,
+ AV_CODEC_ID_MSMPEG4V1,
+ AV_CODEC_ID_MSMPEG4V2,
+ AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_WMV1,
+ AV_CODEC_ID_WMV2,
+ AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H263I,
+ AV_CODEC_ID_FLV1,
+ AV_CODEC_ID_SVQ1,
+ AV_CODEC_ID_SVQ3,
+ AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_HUFFYUV,
+ AV_CODEC_ID_CYUV,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_INDEO3,
+ AV_CODEC_ID_VP3,
+ AV_CODEC_ID_THEORA,
+ AV_CODEC_ID_ASV1,
+ AV_CODEC_ID_ASV2,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_4XM,
+ AV_CODEC_ID_VCR1,
+ AV_CODEC_ID_CLJR,
+ AV_CODEC_ID_MDEC,
+ AV_CODEC_ID_ROQ,
+ AV_CODEC_ID_INTERPLAY_VIDEO,
+ AV_CODEC_ID_XAN_WC3,
+ AV_CODEC_ID_XAN_WC4,
+ AV_CODEC_ID_RPZA,
+ AV_CODEC_ID_CINEPAK,
+ AV_CODEC_ID_WS_VQA,
+ AV_CODEC_ID_MSRLE,
+ AV_CODEC_ID_MSVIDEO1,
+ AV_CODEC_ID_IDCIN,
+ AV_CODEC_ID_8BPS,
+ AV_CODEC_ID_SMC,
+ AV_CODEC_ID_FLIC,
+ AV_CODEC_ID_TRUEMOTION1,
+ AV_CODEC_ID_VMDVIDEO,
+ AV_CODEC_ID_MSZH,
+ AV_CODEC_ID_ZLIB,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_TSCC,
+ AV_CODEC_ID_ULTI,
+ AV_CODEC_ID_QDRAW,
+ AV_CODEC_ID_VIXL,
+ AV_CODEC_ID_QPEG,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_PPM,
+ AV_CODEC_ID_PBM,
+ AV_CODEC_ID_PGM,
+ AV_CODEC_ID_PGMYUV,
+ AV_CODEC_ID_PAM,
+ AV_CODEC_ID_FFVHUFF,
+ AV_CODEC_ID_RV30,
+ AV_CODEC_ID_RV40,
+ AV_CODEC_ID_VC1,
+ AV_CODEC_ID_WMV3,
+ AV_CODEC_ID_LOCO,
+ AV_CODEC_ID_WNV1,
+ AV_CODEC_ID_AASC,
+ AV_CODEC_ID_INDEO2,
+ AV_CODEC_ID_FRAPS,
+ AV_CODEC_ID_TRUEMOTION2,
+ AV_CODEC_ID_BMP,
+ AV_CODEC_ID_CSCD,
+ AV_CODEC_ID_MMVIDEO,
+ AV_CODEC_ID_ZMBV,
+ AV_CODEC_ID_AVS,
+ AV_CODEC_ID_SMACKVIDEO,
+ AV_CODEC_ID_NUV,
+ AV_CODEC_ID_KMVC,
+ AV_CODEC_ID_FLASHSV,
+ AV_CODEC_ID_CAVS,
+ AV_CODEC_ID_JPEG2000,
+ AV_CODEC_ID_VMNC,
+ AV_CODEC_ID_VP5,
+ AV_CODEC_ID_VP6,
+ AV_CODEC_ID_VP6F,
+ AV_CODEC_ID_TARGA,
+ AV_CODEC_ID_DSICINVIDEO,
+ AV_CODEC_ID_TIERTEXSEQVIDEO,
+ AV_CODEC_ID_TIFF,
+ AV_CODEC_ID_GIF,
+ AV_CODEC_ID_DXA,
+ AV_CODEC_ID_DNXHD,
+ AV_CODEC_ID_THP,
+ AV_CODEC_ID_SGI,
+ AV_CODEC_ID_C93,
+ AV_CODEC_ID_BETHSOFTVID,
+ AV_CODEC_ID_PTX,
+ AV_CODEC_ID_TXD,
+ AV_CODEC_ID_VP6A,
+ AV_CODEC_ID_AMV,
+ AV_CODEC_ID_VB,
+ AV_CODEC_ID_PCX,
+ AV_CODEC_ID_SUNRAST,
+ AV_CODEC_ID_INDEO4,
+ AV_CODEC_ID_INDEO5,
+ AV_CODEC_ID_MIMIC,
+ AV_CODEC_ID_RL2,
+ AV_CODEC_ID_ESCAPE124,
+ AV_CODEC_ID_DIRAC,
+ AV_CODEC_ID_BFI,
+ AV_CODEC_ID_CMV,
+ AV_CODEC_ID_MOTIONPIXELS,
+ AV_CODEC_ID_TGV,
+ AV_CODEC_ID_TGQ,
+ AV_CODEC_ID_TQI,
+ AV_CODEC_ID_AURA,
+ AV_CODEC_ID_AURA2,
+ AV_CODEC_ID_V210X,
+ AV_CODEC_ID_TMV,
+ AV_CODEC_ID_V210,
+ AV_CODEC_ID_DPX,
+ AV_CODEC_ID_MAD,
+ AV_CODEC_ID_FRWU,
+ AV_CODEC_ID_FLASHSV2,
+ AV_CODEC_ID_CDGRAPHICS,
+ AV_CODEC_ID_R210,
+ AV_CODEC_ID_ANM,
+ AV_CODEC_ID_BINKVIDEO,
+ AV_CODEC_ID_IFF_ILBM,
+#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM
+ AV_CODEC_ID_KGV1,
+ AV_CODEC_ID_YOP,
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_PICTOR,
+ AV_CODEC_ID_ANSI,
+ AV_CODEC_ID_A64_MULTI,
+ AV_CODEC_ID_A64_MULTI5,
+ AV_CODEC_ID_R10K,
+ AV_CODEC_ID_MXPEG,
+ AV_CODEC_ID_LAGARITH,
+ AV_CODEC_ID_PRORES,
+ AV_CODEC_ID_JV,
+ AV_CODEC_ID_DFA,
+ AV_CODEC_ID_WMV3IMAGE,
+ AV_CODEC_ID_VC1IMAGE,
+ AV_CODEC_ID_UTVIDEO,
+ AV_CODEC_ID_BMV_VIDEO,
+ AV_CODEC_ID_VBLE,
+ AV_CODEC_ID_DXTORY,
+ AV_CODEC_ID_V410,
+ AV_CODEC_ID_XWD,
+ AV_CODEC_ID_CDXL,
+ AV_CODEC_ID_XBM,
+ AV_CODEC_ID_ZEROCODEC,
+ AV_CODEC_ID_MSS1,
+ AV_CODEC_ID_MSA1,
+ AV_CODEC_ID_TSCC2,
+ AV_CODEC_ID_MTS2,
+ AV_CODEC_ID_CLLC,
+ AV_CODEC_ID_MSS2,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_AIC,
+ AV_CODEC_ID_ESCAPE130,
+ AV_CODEC_ID_G2M,
+ AV_CODEC_ID_WEBP,
+ AV_CODEC_ID_HNM4_VIDEO,
+ AV_CODEC_ID_HEVC,
+#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
+ AV_CODEC_ID_FIC,
+ AV_CODEC_ID_ALIAS_PIX,
+ AV_CODEC_ID_BRENDER_PIX,
+ AV_CODEC_ID_PAF_VIDEO,
+ AV_CODEC_ID_EXR,
+ AV_CODEC_ID_VP7,
+ AV_CODEC_ID_SANM,
+ AV_CODEC_ID_SGIRLE,
+ AV_CODEC_ID_MVC1,
+ AV_CODEC_ID_MVC2,
+ AV_CODEC_ID_HQX,
+ AV_CODEC_ID_TDSC,
+ AV_CODEC_ID_HQ_HQA,
+ AV_CODEC_ID_HAP,
+ AV_CODEC_ID_DDS,
+ AV_CODEC_ID_DXV,
+ AV_CODEC_ID_SCREENPRESSO,
+ AV_CODEC_ID_RSCC,
+ AV_CODEC_ID_AVS2,
+ AV_CODEC_ID_PGX,
+ AV_CODEC_ID_AVS3,
+ AV_CODEC_ID_MSP2,
+ AV_CODEC_ID_VVC,
+#define AV_CODEC_ID_H266 AV_CODEC_ID_VVC
+ AV_CODEC_ID_Y41P,
+ AV_CODEC_ID_AVRP,
+ AV_CODEC_ID_012V,
+ AV_CODEC_ID_AVUI,
+#if FF_API_AYUV_CODECID
+ AV_CODEC_ID_AYUV,
+#endif
+ AV_CODEC_ID_TARGA_Y216,
+ AV_CODEC_ID_V308,
+ AV_CODEC_ID_V408,
+ AV_CODEC_ID_YUV4,
+ AV_CODEC_ID_AVRN,
+ AV_CODEC_ID_CPIA,
+ AV_CODEC_ID_XFACE,
+ AV_CODEC_ID_SNOW,
+ AV_CODEC_ID_SMVJPEG,
+ AV_CODEC_ID_APNG,
+ AV_CODEC_ID_DAALA,
+ AV_CODEC_ID_CFHD,
+ AV_CODEC_ID_TRUEMOTION2RT,
+ AV_CODEC_ID_M101,
+ AV_CODEC_ID_MAGICYUV,
+ AV_CODEC_ID_SHEERVIDEO,
+ AV_CODEC_ID_YLC,
+ AV_CODEC_ID_PSD,
+ AV_CODEC_ID_PIXLET,
+ AV_CODEC_ID_SPEEDHQ,
+ AV_CODEC_ID_FMVC,
+ AV_CODEC_ID_SCPR,
+ AV_CODEC_ID_CLEARVIDEO,
+ AV_CODEC_ID_XPM,
+ AV_CODEC_ID_AV1,
+ AV_CODEC_ID_BITPACKED,
+ AV_CODEC_ID_MSCC,
+ AV_CODEC_ID_SRGC,
+ AV_CODEC_ID_SVG,
+ AV_CODEC_ID_GDV,
+ AV_CODEC_ID_FITS,
+ AV_CODEC_ID_IMM4,
+ AV_CODEC_ID_PROSUMER,
+ AV_CODEC_ID_MWSC,
+ AV_CODEC_ID_WCMV,
+ AV_CODEC_ID_RASC,
+ AV_CODEC_ID_HYMT,
+ AV_CODEC_ID_ARBC,
+ AV_CODEC_ID_AGM,
+ AV_CODEC_ID_LSCR,
+ AV_CODEC_ID_VP4,
+ AV_CODEC_ID_IMM5,
+ AV_CODEC_ID_MVDV,
+ AV_CODEC_ID_MVHA,
+ AV_CODEC_ID_CDTOONS,
+ AV_CODEC_ID_MV30,
+ AV_CODEC_ID_NOTCHLC,
+ AV_CODEC_ID_PFM,
+ AV_CODEC_ID_MOBICLIP,
+ AV_CODEC_ID_PHOTOCD,
+ AV_CODEC_ID_IPU,
+ AV_CODEC_ID_ARGO,
+ AV_CODEC_ID_CRI,
+ AV_CODEC_ID_SIMBIOSIS_IMX,
+ AV_CODEC_ID_SGA_VIDEO,
+ AV_CODEC_ID_GEM,
+ AV_CODEC_ID_VBN,
+ AV_CODEC_ID_JPEGXL,
+ AV_CODEC_ID_QOI,
+ AV_CODEC_ID_PHM,
+ AV_CODEC_ID_RADIANCE_HDR,
+ AV_CODEC_ID_WBMP,
+ AV_CODEC_ID_MEDIA100,
+ AV_CODEC_ID_VQC,
+
+ /* various PCM "codecs" */
+ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ AV_CODEC_ID_PCM_S16LE = 0x10000,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_PCM_U16LE,
+ AV_CODEC_ID_PCM_U16BE,
+ AV_CODEC_ID_PCM_S8,
+ AV_CODEC_ID_PCM_U8,
+ AV_CODEC_ID_PCM_MULAW,
+ AV_CODEC_ID_PCM_ALAW,
+ AV_CODEC_ID_PCM_S32LE,
+ AV_CODEC_ID_PCM_S32BE,
+ AV_CODEC_ID_PCM_U32LE,
+ AV_CODEC_ID_PCM_U32BE,
+ AV_CODEC_ID_PCM_S24LE,
+ AV_CODEC_ID_PCM_S24BE,
+ AV_CODEC_ID_PCM_U24LE,
+ AV_CODEC_ID_PCM_U24BE,
+ AV_CODEC_ID_PCM_S24DAUD,
+ AV_CODEC_ID_PCM_ZORK,
+ AV_CODEC_ID_PCM_S16LE_PLANAR,
+ AV_CODEC_ID_PCM_DVD,
+ AV_CODEC_ID_PCM_F32BE,
+ AV_CODEC_ID_PCM_F32LE,
+ AV_CODEC_ID_PCM_F64BE,
+ AV_CODEC_ID_PCM_F64LE,
+ AV_CODEC_ID_PCM_BLURAY,
+ AV_CODEC_ID_PCM_LXF,
+ AV_CODEC_ID_S302M,
+ AV_CODEC_ID_PCM_S8_PLANAR,
+ AV_CODEC_ID_PCM_S24LE_PLANAR,
+ AV_CODEC_ID_PCM_S32LE_PLANAR,
+ AV_CODEC_ID_PCM_S16BE_PLANAR,
+ AV_CODEC_ID_PCM_S64LE,
+ AV_CODEC_ID_PCM_S64BE,
+ AV_CODEC_ID_PCM_F16LE,
+ AV_CODEC_ID_PCM_F24LE,
+ AV_CODEC_ID_PCM_VIDC,
+ AV_CODEC_ID_PCM_SGA,
+
+ /* various ADPCM codecs */
+ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ AV_CODEC_ID_ADPCM_IMA_WAV,
+ AV_CODEC_ID_ADPCM_IMA_DK3,
+ AV_CODEC_ID_ADPCM_IMA_DK4,
+ AV_CODEC_ID_ADPCM_IMA_WS,
+ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ AV_CODEC_ID_ADPCM_MS,
+ AV_CODEC_ID_ADPCM_4XM,
+ AV_CODEC_ID_ADPCM_XA,
+ AV_CODEC_ID_ADPCM_ADX,
+ AV_CODEC_ID_ADPCM_EA,
+ AV_CODEC_ID_ADPCM_G726,
+ AV_CODEC_ID_ADPCM_CT,
+ AV_CODEC_ID_ADPCM_SWF,
+ AV_CODEC_ID_ADPCM_YAMAHA,
+ AV_CODEC_ID_ADPCM_SBPRO_4,
+ AV_CODEC_ID_ADPCM_SBPRO_3,
+ AV_CODEC_ID_ADPCM_SBPRO_2,
+ AV_CODEC_ID_ADPCM_THP,
+ AV_CODEC_ID_ADPCM_IMA_AMV,
+ AV_CODEC_ID_ADPCM_EA_R1,
+ AV_CODEC_ID_ADPCM_EA_R3,
+ AV_CODEC_ID_ADPCM_EA_R2,
+ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ AV_CODEC_ID_ADPCM_EA_XAS,
+ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ AV_CODEC_ID_ADPCM_IMA_ISS,
+ AV_CODEC_ID_ADPCM_G722,
+ AV_CODEC_ID_ADPCM_IMA_APC,
+ AV_CODEC_ID_ADPCM_VIMA,
+ AV_CODEC_ID_ADPCM_AFC,
+ AV_CODEC_ID_ADPCM_IMA_OKI,
+ AV_CODEC_ID_ADPCM_DTK,
+ AV_CODEC_ID_ADPCM_IMA_RAD,
+ AV_CODEC_ID_ADPCM_G726LE,
+ AV_CODEC_ID_ADPCM_THP_LE,
+ AV_CODEC_ID_ADPCM_PSX,
+ AV_CODEC_ID_ADPCM_AICA,
+ AV_CODEC_ID_ADPCM_IMA_DAT4,
+ AV_CODEC_ID_ADPCM_MTAF,
+ AV_CODEC_ID_ADPCM_AGM,
+ AV_CODEC_ID_ADPCM_ARGO,
+ AV_CODEC_ID_ADPCM_IMA_SSI,
+ AV_CODEC_ID_ADPCM_ZORK,
+ AV_CODEC_ID_ADPCM_IMA_APM,
+ AV_CODEC_ID_ADPCM_IMA_ALP,
+ AV_CODEC_ID_ADPCM_IMA_MTF,
+ AV_CODEC_ID_ADPCM_IMA_CUNNING,
+ AV_CODEC_ID_ADPCM_IMA_MOFLEX,
+ AV_CODEC_ID_ADPCM_IMA_ACORN,
+ AV_CODEC_ID_ADPCM_XMD,
+
+ /* AMR */
+ AV_CODEC_ID_AMR_NB = 0x12000,
+ AV_CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ AV_CODEC_ID_RA_144 = 0x13000,
+ AV_CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ AV_CODEC_ID_ROQ_DPCM = 0x14000,
+ AV_CODEC_ID_INTERPLAY_DPCM,
+ AV_CODEC_ID_XAN_DPCM,
+ AV_CODEC_ID_SOL_DPCM,
+ AV_CODEC_ID_SDX2_DPCM,
+ AV_CODEC_ID_GREMLIN_DPCM,
+ AV_CODEC_ID_DERF_DPCM,
+ AV_CODEC_ID_WADY_DPCM,
+ AV_CODEC_ID_CBD2_DPCM,
+
+ /* audio codecs */
+ AV_CODEC_ID_MP2 = 0x15000,
+ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_VORBIS,
+ AV_CODEC_ID_DVAUDIO,
+ AV_CODEC_ID_WMAV1,
+ AV_CODEC_ID_WMAV2,
+ AV_CODEC_ID_MACE3,
+ AV_CODEC_ID_MACE6,
+ AV_CODEC_ID_VMDAUDIO,
+ AV_CODEC_ID_FLAC,
+ AV_CODEC_ID_MP3ADU,
+ AV_CODEC_ID_MP3ON4,
+ AV_CODEC_ID_SHORTEN,
+ AV_CODEC_ID_ALAC,
+ AV_CODEC_ID_WESTWOOD_SND1,
+ AV_CODEC_ID_GSM, ///< as in Berlin toast format
+ AV_CODEC_ID_QDM2,
+ AV_CODEC_ID_COOK,
+ AV_CODEC_ID_TRUESPEECH,
+ AV_CODEC_ID_TTA,
+ AV_CODEC_ID_SMACKAUDIO,
+ AV_CODEC_ID_QCELP,
+ AV_CODEC_ID_WAVPACK,
+ AV_CODEC_ID_DSICINAUDIO,
+ AV_CODEC_ID_IMC,
+ AV_CODEC_ID_MUSEPACK7,
+ AV_CODEC_ID_MLP,
+ AV_CODEC_ID_GSM_MS, /* as found in WAV */
+ AV_CODEC_ID_ATRAC3,
+ AV_CODEC_ID_APE,
+ AV_CODEC_ID_NELLYMOSER,
+ AV_CODEC_ID_MUSEPACK8,
+ AV_CODEC_ID_SPEEX,
+ AV_CODEC_ID_WMAVOICE,
+ AV_CODEC_ID_WMAPRO,
+ AV_CODEC_ID_WMALOSSLESS,
+ AV_CODEC_ID_ATRAC3P,
+ AV_CODEC_ID_EAC3,
+ AV_CODEC_ID_SIPR,
+ AV_CODEC_ID_MP1,
+ AV_CODEC_ID_TWINVQ,
+ AV_CODEC_ID_TRUEHD,
+ AV_CODEC_ID_MP4ALS,
+ AV_CODEC_ID_ATRAC1,
+ AV_CODEC_ID_BINKAUDIO_RDFT,
+ AV_CODEC_ID_BINKAUDIO_DCT,
+ AV_CODEC_ID_AAC_LATM,
+ AV_CODEC_ID_QDMC,
+ AV_CODEC_ID_CELT,
+ AV_CODEC_ID_G723_1,
+ AV_CODEC_ID_G729,
+ AV_CODEC_ID_8SVX_EXP,
+ AV_CODEC_ID_8SVX_FIB,
+ AV_CODEC_ID_BMV_AUDIO,
+ AV_CODEC_ID_RALF,
+ AV_CODEC_ID_IAC,
+ AV_CODEC_ID_ILBC,
+ AV_CODEC_ID_OPUS,
+ AV_CODEC_ID_COMFORT_NOISE,
+ AV_CODEC_ID_TAK,
+ AV_CODEC_ID_METASOUND,
+ AV_CODEC_ID_PAF_AUDIO,
+ AV_CODEC_ID_ON2AVC,
+ AV_CODEC_ID_DSS_SP,
+ AV_CODEC_ID_CODEC2,
+ AV_CODEC_ID_FFWAVESYNTH,
+ AV_CODEC_ID_SONIC,
+ AV_CODEC_ID_SONIC_LS,
+ AV_CODEC_ID_EVRC,
+ AV_CODEC_ID_SMV,
+ AV_CODEC_ID_DSD_LSBF,
+ AV_CODEC_ID_DSD_MSBF,
+ AV_CODEC_ID_DSD_LSBF_PLANAR,
+ AV_CODEC_ID_DSD_MSBF_PLANAR,
+ AV_CODEC_ID_4GV,
+ AV_CODEC_ID_INTERPLAY_ACM,
+ AV_CODEC_ID_XMA1,
+ AV_CODEC_ID_XMA2,
+ AV_CODEC_ID_DST,
+ AV_CODEC_ID_ATRAC3AL,
+ AV_CODEC_ID_ATRAC3PAL,
+ AV_CODEC_ID_DOLBY_E,
+ AV_CODEC_ID_APTX,
+ AV_CODEC_ID_APTX_HD,
+ AV_CODEC_ID_SBC,
+ AV_CODEC_ID_ATRAC9,
+ AV_CODEC_ID_HCOM,
+ AV_CODEC_ID_ACELP_KELVIN,
+ AV_CODEC_ID_MPEGH_3D_AUDIO,
+ AV_CODEC_ID_SIREN,
+ AV_CODEC_ID_HCA,
+ AV_CODEC_ID_FASTAUDIO,
+ AV_CODEC_ID_MSNSIREN,
+ AV_CODEC_ID_DFPWM,
+ AV_CODEC_ID_BONK,
+ AV_CODEC_ID_MISC4,
+ AV_CODEC_ID_APAC,
+ AV_CODEC_ID_FTR,
+ AV_CODEC_ID_WAVARC,
+ AV_CODEC_ID_RKA,
+
+ /* subtitle codecs */
+ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+ AV_CODEC_ID_DVB_SUBTITLE,
+ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
+ AV_CODEC_ID_XSUB,
+ AV_CODEC_ID_SSA,
+ AV_CODEC_ID_MOV_TEXT,
+ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ AV_CODEC_ID_DVB_TELETEXT,
+ AV_CODEC_ID_SRT,
+ AV_CODEC_ID_MICRODVD,
+ AV_CODEC_ID_EIA_608,
+ AV_CODEC_ID_JACOSUB,
+ AV_CODEC_ID_SAMI,
+ AV_CODEC_ID_REALTEXT,
+ AV_CODEC_ID_STL,
+ AV_CODEC_ID_SUBVIEWER1,
+ AV_CODEC_ID_SUBVIEWER,
+ AV_CODEC_ID_SUBRIP,
+ AV_CODEC_ID_WEBVTT,
+ AV_CODEC_ID_MPL2,
+ AV_CODEC_ID_VPLAYER,
+ AV_CODEC_ID_PJS,
+ AV_CODEC_ID_ASS,
+ AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
+ AV_CODEC_ID_TTML,
+ AV_CODEC_ID_ARIB_CAPTION,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ AV_CODEC_ID_TTF = 0x18000,
+
+ AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream.
+ AV_CODEC_ID_EPG,
+ AV_CODEC_ID_BINTEXT,
+ AV_CODEC_ID_XBIN,
+ AV_CODEC_ID_IDF,
+ AV_CODEC_ID_OTF,
+ AV_CODEC_ID_SMPTE_KLV,
+ AV_CODEC_ID_DVD_NAV,
+ AV_CODEC_ID_TIMED_ID3,
+ AV_CODEC_ID_BIN_DATA,
+
+
+ AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
+
+ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+ AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket
+ /**
+ * Dummy null video codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_VNULL,
+ /**
+ * Dummy null audio codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_ANULL,
+};
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * Get the name of a codec.
+ * @return a static string identifying the codec; never NULL
+ */
+const char *avcodec_get_name(enum AVCodecID id);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec_id the ID of the codec to which the requested profile belongs
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ *
+ * @note unlike av_get_profile_name(), which searches a list of profiles
+ * supported by a specific decoder or encoder implementation, this
+ * function searches the list of profiles from the AVCodecDescriptor
+ */
+const char *avcodec_profile_name(enum AVCodecID codec_id, int profile);
+
+/**
+ * Return the PCM codec associated with a sample format.
+ * @param be endianness, 0 for little, 1 for big,
+ * -1 (or anything else) for native
+ * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
+ */
+enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_ID_H
diff --git a/media/ffvpx/libavcodec/codec_internal.h b/media/ffvpx/libavcodec/codec_internal.h
new file mode 100644
index 0000000000..130a7dc3cd
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_internal.h
@@ -0,0 +1,330 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_INTERNAL_H
+#define AVCODEC_CODEC_INTERNAL_H
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "codec.h"
+#include "config.h"
+
+/**
+ * The codec is not known to be init-threadsafe (i.e. it might be unsafe
+ * to initialize this codec and another codec concurrently, typically because
+ * the codec calls external APIs that are not known to be thread-safe).
+ * Therefore calling the codec's init function needs to be guarded with a lock.
+ */
+#define FF_CODEC_CAP_NOT_INIT_THREADSAFE (1 << 0)
+/**
+ * The codec allows calling the close function for deallocation even if
+ * the init function returned a failure. Without this capability flag, a
+ * codec does such cleanup internally when returning failures from the
+ * init function and does not expect the close function to be called at
+ * all.
+ */
+#define FF_CODEC_CAP_INIT_CLEANUP (1 << 1)
+/**
+ * Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set
+ * AVFrame.pkt_dts manually. If the flag is set, decode.c won't overwrite
+ * this field. If it's unset, decode.c tries to guess the pkt_dts field
+ * from the input AVPacket.
+ */
+#define FF_CODEC_CAP_SETS_PKT_DTS (1 << 2)
+/**
+ * The decoder extracts and fills its parameters even if the frame is
+ * skipped due to the skip_frame setting.
+ */
+#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM (1 << 3)
+/**
+ * The decoder sets the cropping fields in the output frames manually.
+ * If this cap is set, the generic code will initialize output frame
+ * dimensions to coded rather than display values.
+ */
+#define FF_CODEC_CAP_EXPORTS_CROPPING (1 << 4)
+/**
+ * Codec initializes slice-based threading with a main function
+ */
+#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF (1 << 5)
+/*
+ * The codec supports frame threading and has inter-frame dependencies, so it
+ * uses ff_thread_report/await_progress().
+ */
+#define FF_CODEC_CAP_ALLOCATE_PROGRESS (1 << 6)
+/**
+ * Codec handles avctx->thread_count == 0 (auto) internally.
+ */
+#define FF_CODEC_CAP_AUTO_THREADS (1 << 7)
+/**
+ * Codec handles output frame properties internally instead of letting the
+ * internal logic derive them from AVCodecInternal.last_pkt_props.
+ */
+#define FF_CODEC_CAP_SETS_FRAME_PROPS (1 << 8)
+/**
+ * Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
+ */
+#define FF_CODEC_CAP_ICC_PROFILES (1 << 9)
+/**
+ * The encoder has AV_CODEC_CAP_DELAY set, but does not actually have delay - it
+ * only wants to be flushed at the end to update some context variables (e.g.
+ * 2pass stats) or produce a trailing packet. Besides that it immediately
+ * produces exactly one output packet per each input frame, just as no-delay
+ * encoders do.
+ */
+#define FF_CODEC_CAP_EOF_FLUSH (1 << 10)
+
+/**
+ * FFCodec.codec_tags termination value
+ */
+#define FF_CODEC_TAGS_END -1
+
+typedef struct FFCodecDefault {
+ const char *key;
+ const char *value;
+} FFCodecDefault;
+
+struct AVCodecContext;
+struct AVSubtitle;
+struct AVPacket;
+
+enum FFCodecType {
+ /* The codec is a decoder using the decode callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_DECODE,
+ /* The codec is a decoder using the decode_sub callback;
+ * subtitle codecs only. */
+ FF_CODEC_CB_TYPE_DECODE_SUB,
+ /* The codec is a decoder using the receive_frame callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_RECEIVE_FRAME,
+ /* The codec is an encoder using the encode callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_ENCODE,
+ /* The codec is an encoder using the encode_sub callback;
+ * subtitle codecs only. */
+ FF_CODEC_CB_TYPE_ENCODE_SUB,
+ /* The codec is an encoder using the receive_packet callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_RECEIVE_PACKET,
+};
+
+typedef struct FFCodec {
+ /**
+ * The public AVCodec. See codec.h for it.
+ */
+ AVCodec p;
+
+ /**
+ * Internal codec capabilities FF_CODEC_CAP_*.
+ */
+ unsigned caps_internal:29;
+
+ /**
+ * This field determines the type of the codec (decoder/encoder)
+ * and also the exact callback cb implemented by the codec.
+ * cb_type uses enum FFCodecType values.
+ */
+ unsigned cb_type:3;
+
+ int priv_data_size;
+ /**
+ * @name Frame-level threading support functions
+ * @{
+ */
+ /**
+ * Copy necessary context variables from a previous thread context to the current one.
+ * If not defined, the next thread will start automatically; otherwise, the codec
+ * must call ff_thread_finish_setup().
+ *
+ * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
+ */
+ int (*update_thread_context)(struct AVCodecContext *dst, const struct AVCodecContext *src);
+
+ /**
+ * Copy variables back to the user-facing context
+ */
+ int (*update_thread_context_for_user)(struct AVCodecContext *dst, const struct AVCodecContext *src);
+ /** @} */
+
+ /**
+ * Private codec-specific defaults.
+ */
+ const FFCodecDefault *defaults;
+
+ /**
+ * Initialize codec static data, called from av_codec_iterate().
+ *
+ * This is not intended for time consuming operations as it is
+ * run for every codec regardless of that codec being used.
+ */
+ void (*init_static_data)(struct FFCodec *codec);
+
+ int (*init)(struct AVCodecContext *);
+
+ union {
+ /**
+ * Decode to an AVFrame.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE.
+ *
+ * @param avctx codec context
+ * @param[out] frame AVFrame for output
+ * @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that
+ * a non-empty frame was returned in frame.
+ * @param[in] avpkt AVPacket containing the data to be decoded
+ * @return amount of bytes read from the packet on success,
+ * negative error code on failure
+ */
+ int (*decode)(struct AVCodecContext *avctx, struct AVFrame *frame,
+ int *got_frame_ptr, struct AVPacket *avpkt);
+ /**
+ * Decode subtitle data to an AVSubtitle.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE_SUB.
+ *
+ * Apart from that this is like the decode callback.
+ */
+ int (*decode_sub)(struct AVCodecContext *avctx, struct AVSubtitle *sub,
+ int *got_frame_ptr, const struct AVPacket *avpkt);
+ /**
+ * Decode API with decoupled packet/frame dataflow.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_FRAME.
+ *
+ * This function is called to get one output frame. It should call
+ * ff_decode_get_packet() to obtain input data.
+ */
+ int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
+ /**
+ * Encode data to an AVPacket.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE
+ *
+ * @param avctx codec context
+ * @param[out] avpkt output AVPacket
+ * @param[in] frame AVFrame containing the input to be encoded
+ * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+ * non-empty packet was returned in avpkt.
+ * @return 0 on success, negative error code on failure
+ */
+ int (*encode)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
+ const struct AVFrame *frame, int *got_packet_ptr);
+ /**
+ * Encode subtitles to a raw buffer.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE_SUB.
+ */
+ int (*encode_sub)(struct AVCodecContext *avctx, uint8_t *buf,
+ int buf_size, const struct AVSubtitle *sub);
+ /**
+ * Encode API with decoupled frame/packet dataflow.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_PACKET.
+ *
+ * This function is called to get one output packet.
+ * It should call ff_encode_get_frame() to obtain input data.
+ */
+ int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
+ } cb;
+
+ int (*close)(struct AVCodecContext *);
+
+ /**
+ * Flush buffers.
+ * Will be called when seeking
+ */
+ void (*flush)(struct AVCodecContext *);
+
+ /**
+ * Decoding only, a comma-separated list of bitstream filters to apply to
+ * packets before decoding.
+ */
+ const char *bsfs;
+
+ /**
+ * Array of pointers to hardware configurations supported by the codec,
+ * or NULL if no hardware supported. The array is terminated by a NULL
+ * pointer.
+ *
+ * The user can only access this field via avcodec_get_hw_config().
+ */
+ const struct AVCodecHWConfigInternal *const *hw_configs;
+
+ /**
+ * List of supported codec_tags, terminated by FF_CODEC_TAGS_END.
+ */
+ const uint32_t *codec_tags;
+} FFCodec;
+
+#if CONFIG_SMALL
+#define CODEC_LONG_NAME(str) .p.long_name = NULL
+#else
+#define CODEC_LONG_NAME(str) .p.long_name = str
+#endif
+
+#if HAVE_THREADS
+#define UPDATE_THREAD_CONTEXT(func) \
+ .update_thread_context = (func)
+#define UPDATE_THREAD_CONTEXT_FOR_USER(func) \
+ .update_thread_context_for_user = (func)
+#else
+#define UPDATE_THREAD_CONTEXT(func) \
+ .update_thread_context = NULL
+#define UPDATE_THREAD_CONTEXT_FOR_USER(func) \
+ .update_thread_context_for_user = NULL
+#endif
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+#define CODEC_OLD_CHANNEL_LAYOUTS(...) CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(((const uint64_t[]) { __VA_ARGS__, 0 }))
+#if defined(__clang__)
+#define CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(array) \
+ FF_DISABLE_DEPRECATION_WARNINGS \
+ .p.channel_layouts = (array), \
+ FF_ENABLE_DEPRECATION_WARNINGS
+#else
+#define CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(array) .p.channel_layouts = (array),
+#endif
+#else
+/* This is only provided to allow to test disabling FF_API_OLD_CHANNEL_LAYOUT
+ * without removing all the FF_API_OLD_CHANNEL_LAYOUT codeblocks.
+ * It is of course still expected to be removed when FF_API_OLD_CHANNEL_LAYOUT
+ * will be finally removed (along with all usages of these macros). */
+#define CODEC_OLD_CHANNEL_LAYOUTS(...)
+#define CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(array)
+#endif
+
+#define FF_CODEC_DECODE_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_DECODE, \
+ .cb.decode = (func)
+#define FF_CODEC_DECODE_SUB_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_DECODE_SUB, \
+ .cb.decode_sub = (func)
+#define FF_CODEC_RECEIVE_FRAME_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_RECEIVE_FRAME, \
+ .cb.receive_frame = (func)
+#define FF_CODEC_ENCODE_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_ENCODE, \
+ .cb.encode = (func)
+#define FF_CODEC_ENCODE_SUB_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_ENCODE_SUB, \
+ .cb.encode_sub = (func)
+#define FF_CODEC_RECEIVE_PACKET_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_RECEIVE_PACKET, \
+ .cb.receive_packet = (func)
+
+static av_always_inline const FFCodec *ffcodec(const AVCodec *codec)
+{
+ return (const FFCodec*)codec;
+}
+
+#endif /* AVCODEC_CODEC_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/codec_list.c b/media/ffvpx/libavcodec/codec_list.c
new file mode 100644
index 0000000000..db49c8fffb
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_list.c
@@ -0,0 +1,20 @@
+static const FFCodec * const codec_list[] = {
+#if CONFIG_VP8_DECODER
+ &ff_vp8_decoder,
+#endif
+#if CONFIG_VP9_DECODER
+ &ff_vp9_decoder,
+#endif
+#if CONFIG_FLAC_DECODER
+ &ff_flac_decoder,
+#endif
+#if CONFIG_MP3_DECODER
+ &ff_mp3_decoder,
+#endif
+#if CONFIG_LIBDAV1D
+ &ff_libdav1d_decoder,
+#endif
+#if CONFIG_AV1_DECODER
+ &ff_av1_decoder,
+#endif
+ NULL };
diff --git a/media/ffvpx/libavcodec/codec_par.c b/media/ffvpx/libavcodec/codec_par.c
new file mode 100644
index 0000000000..abda649aa8
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_par.c
@@ -0,0 +1,263 @@
+/*
+ * AVCodecParameters functions for libavcodec
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AVCodecParameters functions for libavcodec.
+ */
+
+#include <string.h>
+#include "libavutil/mem.h"
+#include "avcodec.h"
+#include "codec_par.h"
+
+static void codec_parameters_reset(AVCodecParameters *par)
+{
+ av_freep(&par->extradata);
+ av_channel_layout_uninit(&par->ch_layout);
+
+ memset(par, 0, sizeof(*par));
+
+ par->codec_type = AVMEDIA_TYPE_UNKNOWN;
+ par->codec_id = AV_CODEC_ID_NONE;
+ par->format = -1;
+ par->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ par->field_order = AV_FIELD_UNKNOWN;
+ par->color_range = AVCOL_RANGE_UNSPECIFIED;
+ par->color_primaries = AVCOL_PRI_UNSPECIFIED;
+ par->color_trc = AVCOL_TRC_UNSPECIFIED;
+ par->color_space = AVCOL_SPC_UNSPECIFIED;
+ par->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
+ par->sample_aspect_ratio = (AVRational){ 0, 1 };
+ par->profile = FF_PROFILE_UNKNOWN;
+ par->level = FF_LEVEL_UNKNOWN;
+}
+
+AVCodecParameters *avcodec_parameters_alloc(void)
+{
+ AVCodecParameters *par = av_mallocz(sizeof(*par));
+
+ if (!par)
+ return NULL;
+ codec_parameters_reset(par);
+ return par;
+}
+
+void avcodec_parameters_free(AVCodecParameters **ppar)
+{
+ AVCodecParameters *par = *ppar;
+
+ if (!par)
+ return;
+ codec_parameters_reset(par);
+
+ av_freep(ppar);
+}
+
+int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
+{
+ int ret;
+
+ codec_parameters_reset(dst);
+ memcpy(dst, src, sizeof(*dst));
+
+ dst->ch_layout = (AVChannelLayout){0};
+ dst->extradata = NULL;
+ dst->extradata_size = 0;
+ if (src->extradata) {
+ dst->extradata = av_mallocz(src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!dst->extradata)
+ return AVERROR(ENOMEM);
+ memcpy(dst->extradata, src->extradata, src->extradata_size);
+ dst->extradata_size = src->extradata_size;
+ }
+
+ ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int avcodec_parameters_from_context(AVCodecParameters *par,
+ const AVCodecContext *codec)
+{
+ int ret;
+
+ codec_parameters_reset(par);
+
+ par->codec_type = codec->codec_type;
+ par->codec_id = codec->codec_id;
+ par->codec_tag = codec->codec_tag;
+
+ par->bit_rate = codec->bit_rate;
+ par->bits_per_coded_sample = codec->bits_per_coded_sample;
+ par->bits_per_raw_sample = codec->bits_per_raw_sample;
+ par->profile = codec->profile;
+ par->level = codec->level;
+
+ switch (par->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ par->format = codec->pix_fmt;
+ par->width = codec->width;
+ par->height = codec->height;
+ par->field_order = codec->field_order;
+ par->color_range = codec->color_range;
+ par->color_primaries = codec->color_primaries;
+ par->color_trc = codec->color_trc;
+ par->color_space = codec->colorspace;
+ par->chroma_location = codec->chroma_sample_location;
+ par->sample_aspect_ratio = codec->sample_aspect_ratio;
+ par->video_delay = codec->has_b_frames;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ par->format = codec->sample_fmt;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ // if the old/new fields are set inconsistently, prefer the old ones
+ if ((codec->channels && codec->channels != codec->ch_layout.nb_channels) ||
+ (codec->channel_layout && (codec->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
+ codec->ch_layout.u.mask != codec->channel_layout))) {
+ if (codec->channel_layout)
+ av_channel_layout_from_mask(&par->ch_layout, codec->channel_layout);
+ else {
+ par->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ par->ch_layout.nb_channels = codec->channels;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+ } else {
+#endif
+ ret = av_channel_layout_copy(&par->ch_layout, &codec->ch_layout);
+ if (ret < 0)
+ return ret;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ }
+ par->channel_layout = par->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ par->ch_layout.u.mask : 0;
+ par->channels = par->ch_layout.nb_channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ par->sample_rate = codec->sample_rate;
+ par->block_align = codec->block_align;
+ par->frame_size = codec->frame_size;
+ par->initial_padding = codec->initial_padding;
+ par->trailing_padding = codec->trailing_padding;
+ par->seek_preroll = codec->seek_preroll;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ par->width = codec->width;
+ par->height = codec->height;
+ break;
+ }
+
+ if (codec->extradata) {
+ par->extradata = av_mallocz(codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!par->extradata)
+ return AVERROR(ENOMEM);
+ memcpy(par->extradata, codec->extradata, codec->extradata_size);
+ par->extradata_size = codec->extradata_size;
+ }
+
+ return 0;
+}
+
+int avcodec_parameters_to_context(AVCodecContext *codec,
+ const AVCodecParameters *par)
+{
+ int ret;
+
+ codec->codec_type = par->codec_type;
+ codec->codec_id = par->codec_id;
+ codec->codec_tag = par->codec_tag;
+
+ codec->bit_rate = par->bit_rate;
+ codec->bits_per_coded_sample = par->bits_per_coded_sample;
+ codec->bits_per_raw_sample = par->bits_per_raw_sample;
+ codec->profile = par->profile;
+ codec->level = par->level;
+
+ switch (par->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ codec->pix_fmt = par->format;
+ codec->width = par->width;
+ codec->height = par->height;
+ codec->field_order = par->field_order;
+ codec->color_range = par->color_range;
+ codec->color_primaries = par->color_primaries;
+ codec->color_trc = par->color_trc;
+ codec->colorspace = par->color_space;
+ codec->chroma_sample_location = par->chroma_location;
+ codec->sample_aspect_ratio = par->sample_aspect_ratio;
+ codec->has_b_frames = par->video_delay;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ codec->sample_fmt = par->format;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ // if the old/new fields are set inconsistently, prefer the old ones
+ if ((par->channels && par->channels != par->ch_layout.nb_channels) ||
+ (par->channel_layout && (par->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
+ par->ch_layout.u.mask != par->channel_layout))) {
+ if (par->channel_layout)
+ av_channel_layout_from_mask(&codec->ch_layout, par->channel_layout);
+ else {
+ codec->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ codec->ch_layout.nb_channels = par->channels;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+ } else {
+#endif
+ ret = av_channel_layout_copy(&codec->ch_layout, &par->ch_layout);
+ if (ret < 0)
+ return ret;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ }
+ codec->channel_layout = codec->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ codec->ch_layout.u.mask : 0;
+ codec->channels = codec->ch_layout.nb_channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ codec->sample_rate = par->sample_rate;
+ codec->block_align = par->block_align;
+ codec->frame_size = par->frame_size;
+ codec->delay =
+ codec->initial_padding = par->initial_padding;
+ codec->trailing_padding = par->trailing_padding;
+ codec->seek_preroll = par->seek_preroll;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ codec->width = par->width;
+ codec->height = par->height;
+ break;
+ }
+
+ if (par->extradata) {
+ av_freep(&codec->extradata);
+ codec->extradata = av_mallocz(par->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!codec->extradata)
+ return AVERROR(ENOMEM);
+ memcpy(codec->extradata, par->extradata, par->extradata_size);
+ codec->extradata_size = par->extradata_size;
+ }
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/codec_par.h b/media/ffvpx/libavcodec/codec_par.h
new file mode 100644
index 0000000000..f51d27c590
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_par.h
@@ -0,0 +1,247 @@
+/*
+ * Codec parameters public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_PAR_H
+#define AVCODEC_CODEC_PAR_H
+
+#include <stdint.h>
+
+#include "libavutil/avutil.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/rational.h"
+#include "libavutil/pixfmt.h"
+
+#include "codec_id.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, ///< Top coded_first, top displayed first
+ AV_FIELD_BB, ///< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, ///< Top coded first, bottom displayed first
+ AV_FIELD_BT, ///< Bottom coded first, top displayed first
+};
+
+/**
+ * This struct describes the properties of an encoded stream.
+ *
+ * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must
+ * be allocated with avcodec_parameters_alloc() and freed with
+ * avcodec_parameters_free().
+ */
+typedef struct AVCodecParameters {
+ /**
+ * General type of the encoded data.
+ */
+ enum AVMediaType codec_type;
+ /**
+ * Specific type of the encoded data (the codec used).
+ */
+ enum AVCodecID codec_id;
+ /**
+ * Additional information about the codec (corresponds to the AVI FOURCC).
+ */
+ uint32_t codec_tag;
+
+ /**
+ * Extra binary data needed for initializing the decoder, codec-dependent.
+ *
+ * Must be allocated with av_malloc() and will be freed by
+ * avcodec_parameters_free(). The allocated size of extradata must be at
+ * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding
+ * bytes zeroed.
+ */
+ uint8_t *extradata;
+ /**
+ * Size of the extradata content in bytes.
+ */
+ int extradata_size;
+
+ /**
+ * - video: the pixel format, the value corresponds to enum AVPixelFormat.
+ * - audio: the sample format, the value corresponds to enum AVSampleFormat.
+ */
+ int format;
+
+ /**
+ * The average bitrate of the encoded data (in bits per second).
+ */
+ int64_t bit_rate;
+
+ /**
+ * The number of bits per sample in the codedwords.
+ *
+ * This is basically the bitrate per sample. It is mandatory for a bunch of
+ * formats to actually decode them. It's the number of bits for one sample in
+ * the actual coded bitstream.
+ *
+ * This could be for example 4 for ADPCM
+ * For PCM formats this matches bits_per_raw_sample
+ * Can be 0
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * This is the number of valid bits in each output sample. If the
+ * sample format has more bits, the least significant bits are additional
+ * padding bits, which are always 0. Use right shifts to reduce the sample
+ * to its actual size. For example, audio formats with 24 bit samples will
+ * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32.
+ * To get the original sample use "(int32_t)sample >> 8"."
+ *
+ * For ADPCM this might be 12 or 16 or similar
+ * Can be 0
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * Codec-specific bitstream restrictions that the stream conforms to.
+ */
+ int profile;
+ int level;
+
+ /**
+ * Video only. The dimensions of the video frame in pixels.
+ */
+ int width;
+ int height;
+
+ /**
+ * Video only. The aspect ratio (width / height) which a single pixel
+ * should have when displayed.
+ *
+ * When the aspect ratio is unknown / undefined, the numerator should be
+ * set to 0 (the denominator may have any value).
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Video only. The order of the fields in interlaced video.
+ */
+ enum AVFieldOrder field_order;
+
+ /**
+ * Video only. Additional colorspace characteristics.
+ */
+ enum AVColorRange color_range;
+ enum AVColorPrimaries color_primaries;
+ enum AVColorTransferCharacteristic color_trc;
+ enum AVColorSpace color_space;
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * Video only. Number of delayed frames.
+ */
+ int video_delay;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * Audio only. The channel layout bitmask. May be 0 if the channel layout is
+ * unknown or unspecified, otherwise the number of bits set must be equal to
+ * the channels field.
+ * @deprecated use ch_layout
+ */
+ attribute_deprecated
+ uint64_t channel_layout;
+ /**
+ * Audio only. The number of audio channels.
+ * @deprecated use ch_layout.nb_channels
+ */
+ attribute_deprecated
+ int channels;
+#endif
+ /**
+ * Audio only. The number of audio samples per second.
+ */
+ int sample_rate;
+ /**
+ * Audio only. The number of bytes per coded audio frame, required by some
+ * formats.
+ *
+ * Corresponds to nBlockAlign in WAVEFORMATEX.
+ */
+ int block_align;
+ /**
+ * Audio only. Audio frame size, if known. Required by some formats to be static.
+ */
+ int frame_size;
+
+ /**
+ * Audio only. The amount of padding (in samples) inserted by the encoder at
+ * the beginning of the audio. I.e. this number of leading decoded samples
+ * must be discarded by the caller to get the original audio without leading
+ * padding.
+ */
+ int initial_padding;
+ /**
+ * Audio only. The amount of padding (in samples) appended by the encoder to
+ * the end of the audio. I.e. this number of decoded samples must be
+ * discarded by the caller from the end of the stream to get the original
+ * audio without any trailing padding.
+ */
+ int trailing_padding;
+ /**
+ * Audio only. Number of samples to skip after a discontinuity.
+ */
+ int seek_preroll;
+
+ /**
+ * Audio only. The channel layout and number of channels.
+ */
+ AVChannelLayout ch_layout;
+} AVCodecParameters;
+
+/**
+ * Allocate a new AVCodecParameters and set its fields to default values
+ * (unknown/invalid/0). The returned struct must be freed with
+ * avcodec_parameters_free().
+ */
+AVCodecParameters *avcodec_parameters_alloc(void);
+
+/**
+ * Free an AVCodecParameters instance and everything associated with it and
+ * write NULL to the supplied pointer.
+ */
+void avcodec_parameters_free(AVCodecParameters **par);
+
+/**
+ * Copy the contents of src to dst. Any allocated fields in dst are freed and
+ * replaced with newly allocated duplicates of the corresponding fields in src.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure.
+ */
+int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src);
+
+/**
+ * This function is the same as av_get_audio_frame_duration(), except it works
+ * with AVCodecParameters instead of an AVCodecContext.
+ */
+int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_PAR_H
diff --git a/media/ffvpx/libavcodec/dct.c b/media/ffvpx/libavcodec/dct.c
new file mode 100644
index 0000000000..eeb4d154e0
--- /dev/null
+++ b/media/ffvpx/libavcodec/dct.c
@@ -0,0 +1,228 @@
+/*
+ * (I)DCT Transforms
+ * Copyright (c) 2009 Peter Ross <pross@xvid.org>
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ * Copyright (c) 2010 Vitor Sessak
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * (Inverse) Discrete Cosine Transforms. These are also known as the
+ * type II and type III DCTs respectively.
+ */
+
+#include <math.h>
+#include <string.h>
+
+#include "libavutil/error.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/mem.h"
+#include "dct.h"
+#include "dct32.h"
+
+/* sin((M_PI * x / (2 * n)) */
+#define SIN(s, n, x) (s->costab[(n) - (x)])
+
+/* cos((M_PI * x / (2 * n)) */
+#define COS(s, n, x) (s->costab[x])
+
+static void dst_calc_I_c(DCTContext *ctx, FFTSample *data)
+{
+ int n = 1 << ctx->nbits;
+ int i;
+
+ data[0] = 0;
+ for (i = 1; i < n / 2; i++) {
+ float tmp1 = data[i ];
+ float tmp2 = data[n - i];
+ float s = SIN(ctx, n, 2 * i);
+
+ s *= tmp1 + tmp2;
+ tmp1 = (tmp1 - tmp2) * 0.5f;
+ data[i] = s + tmp1;
+ data[n - i] = s - tmp1;
+ }
+
+ data[n / 2] *= 2;
+ ctx->rdft.rdft_calc(&ctx->rdft, data);
+
+ data[0] *= 0.5f;
+
+ for (i = 1; i < n - 2; i += 2) {
+ data[i + 1] += data[i - 1];
+ data[i] = -data[i + 2];
+ }
+
+ data[n - 1] = 0;
+}
+
+static void dct_calc_I_c(DCTContext *ctx, FFTSample *data)
+{
+ int n = 1 << ctx->nbits;
+ int i;
+ float next = -0.5f * (data[0] - data[n]);
+
+ for (i = 0; i < n / 2; i++) {
+ float tmp1 = data[i];
+ float tmp2 = data[n - i];
+ float s = SIN(ctx, n, 2 * i);
+ float c = COS(ctx, n, 2 * i);
+
+ c *= tmp1 - tmp2;
+ s *= tmp1 - tmp2;
+
+ next += c;
+
+ tmp1 = (tmp1 + tmp2) * 0.5f;
+ data[i] = tmp1 - s;
+ data[n - i] = tmp1 + s;
+ }
+
+ ctx->rdft.rdft_calc(&ctx->rdft, data);
+ data[n] = data[1];
+ data[1] = next;
+
+ for (i = 3; i <= n; i += 2)
+ data[i] = data[i - 2] - data[i];
+}
+
+static void dct_calc_III_c(DCTContext *ctx, FFTSample *data)
+{
+ int n = 1 << ctx->nbits;
+ int i;
+
+ float next = data[n - 1];
+ float inv_n = 1.0f / n;
+
+ for (i = n - 2; i >= 2; i -= 2) {
+ float val1 = data[i];
+ float val2 = data[i - 1] - data[i + 1];
+ float c = COS(ctx, n, i);
+ float s = SIN(ctx, n, i);
+
+ data[i] = c * val1 + s * val2;
+ data[i + 1] = s * val1 - c * val2;
+ }
+
+ data[1] = 2 * next;
+
+ ctx->rdft.rdft_calc(&ctx->rdft, data);
+
+ for (i = 0; i < n / 2; i++) {
+ float tmp1 = data[i] * inv_n;
+ float tmp2 = data[n - i - 1] * inv_n;
+ float csc = ctx->csc2[i] * (tmp1 - tmp2);
+
+ tmp1 += tmp2;
+ data[i] = tmp1 + csc;
+ data[n - i - 1] = tmp1 - csc;
+ }
+}
+
+static void dct_calc_II_c(DCTContext *ctx, FFTSample *data)
+{
+ int n = 1 << ctx->nbits;
+ int i;
+ float next;
+
+ for (i = 0; i < n / 2; i++) {
+ float tmp1 = data[i];
+ float tmp2 = data[n - i - 1];
+ float s = SIN(ctx, n, 2 * i + 1);
+
+ s *= tmp1 - tmp2;
+ tmp1 = (tmp1 + tmp2) * 0.5f;
+
+ data[i] = tmp1 + s;
+ data[n-i-1] = tmp1 - s;
+ }
+
+ ctx->rdft.rdft_calc(&ctx->rdft, data);
+
+ next = data[1] * 0.5;
+ data[1] *= -1;
+
+ for (i = n - 2; i >= 0; i -= 2) {
+ float inr = data[i ];
+ float ini = data[i + 1];
+ float c = COS(ctx, n, i);
+ float s = SIN(ctx, n, i);
+
+ data[i] = c * inr + s * ini;
+ data[i + 1] = next;
+
+ next += s * inr - c * ini;
+ }
+}
+
+static void dct32_func(DCTContext *ctx, FFTSample *data)
+{
+ ctx->dct32(data, data);
+}
+
+av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse)
+{
+ int n = 1 << nbits;
+ int i;
+ int ret;
+
+ memset(s, 0, sizeof(*s));
+
+ s->nbits = nbits;
+ s->inverse = inverse;
+
+ if (inverse == DCT_II && nbits == 5) {
+ s->dct_calc = dct32_func;
+ } else {
+ ff_init_ff_cos_tabs(nbits + 2);
+
+ s->costab = ff_cos_tabs[nbits + 2];
+ s->csc2 = av_malloc_array(n / 2, sizeof(FFTSample));
+ if (!s->csc2)
+ return AVERROR(ENOMEM);
+
+ if ((ret = ff_rdft_init(&s->rdft, nbits, inverse == DCT_III)) < 0) {
+ av_freep(&s->csc2);
+ return ret;
+ }
+
+ for (i = 0; i < n / 2; i++)
+ s->csc2[i] = 0.5 / sin((M_PI / (2 * n) * (2 * i + 1)));
+
+ switch (inverse) {
+ case DCT_I : s->dct_calc = dct_calc_I_c; break;
+ case DCT_II : s->dct_calc = dct_calc_II_c; break;
+ case DCT_III: s->dct_calc = dct_calc_III_c; break;
+ case DST_I : s->dct_calc = dst_calc_I_c; break;
+ }
+ }
+
+ s->dct32 = ff_dct32_float;
+#if ARCH_X86
+ ff_dct_init_x86(s);
+#endif
+
+ return 0;
+}
+
+av_cold void ff_dct_end(DCTContext *s)
+{
+ ff_rdft_end(&s->rdft);
+ av_freep(&s->csc2);
+}
diff --git a/media/ffvpx/libavcodec/dct.h b/media/ffvpx/libavcodec/dct.h
new file mode 100644
index 0000000000..0a03e256d1
--- /dev/null
+++ b/media/ffvpx/libavcodec/dct.h
@@ -0,0 +1,69 @@
+/*
+ * (I)DCT Transforms
+ * Copyright (c) 2009 Peter Ross <pross@xvid.org>
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ * Copyright (c) 2010 Vitor Sessak
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#if !defined(AVCODEC_DCT_H) && (!defined(FFT_FLOAT) || FFT_FLOAT)
+#define AVCODEC_DCT_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "rdft.h"
+
+struct DCTContext {
+ int nbits;
+ int inverse;
+ RDFTContext rdft;
+ const float *costab;
+ FFTSample *csc2;
+ void (*dct_calc)(struct DCTContext *s, FFTSample *data);
+ void (*dct32)(FFTSample *out, const FFTSample *in);
+};
+
+/**
+ * Set up DCT.
+ * @param nbits size of the input array:
+ * (1 << nbits) for DCT-II, DCT-III and DST-I
+ * (1 << nbits) + 1 for DCT-I
+ *
+ * @note the first element of the input of DST-I is ignored
+ */
+int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType type);
+void ff_dct_end (DCTContext *s);
+
+void ff_dct_init_x86(DCTContext *s);
+
+void ff_fdct_ifast(int16_t *data);
+void ff_fdct_ifast248(int16_t *data);
+void ff_jpeg_fdct_islow_8(int16_t *data);
+void ff_jpeg_fdct_islow_10(int16_t *data);
+void ff_fdct248_islow_8(int16_t *data);
+void ff_fdct248_islow_10(int16_t *data);
+
+void ff_j_rev_dct(int16_t *data);
+void ff_j_rev_dct4(int16_t *data);
+void ff_j_rev_dct2(int16_t *data);
+void ff_j_rev_dct1(int16_t *data);
+void ff_jref_idct_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_jref_idct_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+#endif /* AVCODEC_DCT_H */
diff --git a/media/ffvpx/libavcodec/dct32.h b/media/ffvpx/libavcodec/dct32.h
new file mode 100644
index 0000000000..61bf223a8d
--- /dev/null
+++ b/media/ffvpx/libavcodec/dct32.h
@@ -0,0 +1,25 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DCT32_H
+#define AVCODEC_DCT32_H
+
+void ff_dct32_float(float *dst, const float *src);
+void ff_dct32_fixed(int *dst, const int *src);
+
+#endif /* AVCODEC_DCT32_H */
diff --git a/media/ffvpx/libavcodec/dct32_fixed.c b/media/ffvpx/libavcodec/dct32_fixed.c
new file mode 100644
index 0000000000..9025d5efdd
--- /dev/null
+++ b/media/ffvpx/libavcodec/dct32_fixed.c
@@ -0,0 +1,20 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define DCT32_FLOAT 0
+#include "dct32_template.c"
diff --git a/media/ffvpx/libavcodec/dct32_float.c b/media/ffvpx/libavcodec/dct32_float.c
new file mode 100644
index 0000000000..597c9bb639
--- /dev/null
+++ b/media/ffvpx/libavcodec/dct32_float.c
@@ -0,0 +1,20 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define DCT32_FLOAT 1
+#include "dct32_template.c"
diff --git a/media/ffvpx/libavcodec/dct32_template.c b/media/ffvpx/libavcodec/dct32_template.c
new file mode 100644
index 0000000000..51cebc053f
--- /dev/null
+++ b/media/ffvpx/libavcodec/dct32_template.c
@@ -0,0 +1,288 @@
+/*
+ * Template for the Discrete Cosine Transform for 32 samples
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dct32.h"
+#include "mathops.h"
+#include "libavutil/internal.h"
+
+#ifdef CHECKED
+#define SUINT int
+#define SUINT32 int32_t
+#else
+#define SUINT unsigned
+#define SUINT32 uint32_t
+#endif
+
+#if DCT32_FLOAT
+# define dct32 ff_dct32_float
+# define FIXHR(x) ((float)(x))
+# define MULH3(x, y, s) ((s)*(y)*(x))
+# define INTFLOAT float
+# define SUINTFLOAT float
+#else
+# define dct32 ff_dct32_fixed
+# define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
+# define MULH3(x, y, s) MULH((s)*(x), y)
+# define INTFLOAT int
+# define SUINTFLOAT SUINT
+#endif
+
+
+/* tab[i][j] = 1.0 / (2.0 * cos(pi*(2*k+1) / 2^(6 - j))) */
+
+/* cos(i*pi/64) */
+
+#define COS0_0 FIXHR(0.50060299823519630134/2)
+#define COS0_1 FIXHR(0.50547095989754365998/2)
+#define COS0_2 FIXHR(0.51544730992262454697/2)
+#define COS0_3 FIXHR(0.53104259108978417447/2)
+#define COS0_4 FIXHR(0.55310389603444452782/2)
+#define COS0_5 FIXHR(0.58293496820613387367/2)
+#define COS0_6 FIXHR(0.62250412303566481615/2)
+#define COS0_7 FIXHR(0.67480834145500574602/2)
+#define COS0_8 FIXHR(0.74453627100229844977/2)
+#define COS0_9 FIXHR(0.83934964541552703873/2)
+#define COS0_10 FIXHR(0.97256823786196069369/2)
+#define COS0_11 FIXHR(1.16943993343288495515/4)
+#define COS0_12 FIXHR(1.48416461631416627724/4)
+#define COS0_13 FIXHR(2.05778100995341155085/8)
+#define COS0_14 FIXHR(3.40760841846871878570/8)
+#define COS0_15 FIXHR(10.19000812354805681150/32)
+
+#define COS1_0 FIXHR(0.50241928618815570551/2)
+#define COS1_1 FIXHR(0.52249861493968888062/2)
+#define COS1_2 FIXHR(0.56694403481635770368/2)
+#define COS1_3 FIXHR(0.64682178335999012954/2)
+#define COS1_4 FIXHR(0.78815462345125022473/2)
+#define COS1_5 FIXHR(1.06067768599034747134/4)
+#define COS1_6 FIXHR(1.72244709823833392782/4)
+#define COS1_7 FIXHR(5.10114861868916385802/16)
+
+#define COS2_0 FIXHR(0.50979557910415916894/2)
+#define COS2_1 FIXHR(0.60134488693504528054/2)
+#define COS2_2 FIXHR(0.89997622313641570463/2)
+#define COS2_3 FIXHR(2.56291544774150617881/8)
+
+#define COS3_0 FIXHR(0.54119610014619698439/2)
+#define COS3_1 FIXHR(1.30656296487637652785/4)
+
+#define COS4_0 FIXHR(M_SQRT1_2/2)
+
+/* butterfly operator */
+#define BF(a, b, c, s)\
+{\
+ tmp0 = val##a + val##b;\
+ tmp1 = val##a - val##b;\
+ val##a = tmp0;\
+ val##b = MULH3(tmp1, c, 1<<(s));\
+}
+
+#define BF0(a, b, c, s)\
+{\
+ tmp0 = tab[a] + tab[b];\
+ tmp1 = tab[a] - tab[b];\
+ val##a = tmp0;\
+ val##b = MULH3(tmp1, c, 1<<(s));\
+}
+
+#define BF1(a, b, c, d)\
+{\
+ BF(a, b, COS4_0, 1);\
+ BF(c, d,-COS4_0, 1);\
+ val##c += val##d;\
+}
+
+#define BF2(a, b, c, d)\
+{\
+ BF(a, b, COS4_0, 1);\
+ BF(c, d,-COS4_0, 1);\
+ val##c += val##d;\
+ val##a += val##c;\
+ val##c += val##b;\
+ val##b += val##d;\
+}
+
+#define ADD(a, b) val##a += val##b
+
+/* DCT32 without 1/sqrt(2) coef zero scaling. */
+void dct32(INTFLOAT *out, const INTFLOAT *tab_arg)
+{
+ const SUINTFLOAT *tab = tab_arg;
+ SUINTFLOAT tmp0, tmp1;
+
+ SUINTFLOAT val0 , val1 , val2 , val3 , val4 , val5 , val6 , val7 ,
+ val8 , val9 , val10, val11, val12, val13, val14, val15,
+ val16, val17, val18, val19, val20, val21, val22, val23,
+ val24, val25, val26, val27, val28, val29, val30, val31;
+
+ /* pass 1 */
+ BF0( 0, 31, COS0_0 , 1);
+ BF0(15, 16, COS0_15, 5);
+ /* pass 2 */
+ BF( 0, 15, COS1_0 , 1);
+ BF(16, 31,-COS1_0 , 1);
+ /* pass 1 */
+ BF0( 7, 24, COS0_7 , 1);
+ BF0( 8, 23, COS0_8 , 1);
+ /* pass 2 */
+ BF( 7, 8, COS1_7 , 4);
+ BF(23, 24,-COS1_7 , 4);
+ /* pass 3 */
+ BF( 0, 7, COS2_0 , 1);
+ BF( 8, 15,-COS2_0 , 1);
+ BF(16, 23, COS2_0 , 1);
+ BF(24, 31,-COS2_0 , 1);
+ /* pass 1 */
+ BF0( 3, 28, COS0_3 , 1);
+ BF0(12, 19, COS0_12, 2);
+ /* pass 2 */
+ BF( 3, 12, COS1_3 , 1);
+ BF(19, 28,-COS1_3 , 1);
+ /* pass 1 */
+ BF0( 4, 27, COS0_4 , 1);
+ BF0(11, 20, COS0_11, 2);
+ /* pass 2 */
+ BF( 4, 11, COS1_4 , 1);
+ BF(20, 27,-COS1_4 , 1);
+ /* pass 3 */
+ BF( 3, 4, COS2_3 , 3);
+ BF(11, 12,-COS2_3 , 3);
+ BF(19, 20, COS2_3 , 3);
+ BF(27, 28,-COS2_3 , 3);
+ /* pass 4 */
+ BF( 0, 3, COS3_0 , 1);
+ BF( 4, 7,-COS3_0 , 1);
+ BF( 8, 11, COS3_0 , 1);
+ BF(12, 15,-COS3_0 , 1);
+ BF(16, 19, COS3_0 , 1);
+ BF(20, 23,-COS3_0 , 1);
+ BF(24, 27, COS3_0 , 1);
+ BF(28, 31,-COS3_0 , 1);
+
+
+
+ /* pass 1 */
+ BF0( 1, 30, COS0_1 , 1);
+ BF0(14, 17, COS0_14, 3);
+ /* pass 2 */
+ BF( 1, 14, COS1_1 , 1);
+ BF(17, 30,-COS1_1 , 1);
+ /* pass 1 */
+ BF0( 6, 25, COS0_6 , 1);
+ BF0( 9, 22, COS0_9 , 1);
+ /* pass 2 */
+ BF( 6, 9, COS1_6 , 2);
+ BF(22, 25,-COS1_6 , 2);
+ /* pass 3 */
+ BF( 1, 6, COS2_1 , 1);
+ BF( 9, 14,-COS2_1 , 1);
+ BF(17, 22, COS2_1 , 1);
+ BF(25, 30,-COS2_1 , 1);
+
+ /* pass 1 */
+ BF0( 2, 29, COS0_2 , 1);
+ BF0(13, 18, COS0_13, 3);
+ /* pass 2 */
+ BF( 2, 13, COS1_2 , 1);
+ BF(18, 29,-COS1_2 , 1);
+ /* pass 1 */
+ BF0( 5, 26, COS0_5 , 1);
+ BF0(10, 21, COS0_10, 1);
+ /* pass 2 */
+ BF( 5, 10, COS1_5 , 2);
+ BF(21, 26,-COS1_5 , 2);
+ /* pass 3 */
+ BF( 2, 5, COS2_2 , 1);
+ BF(10, 13,-COS2_2 , 1);
+ BF(18, 21, COS2_2 , 1);
+ BF(26, 29,-COS2_2 , 1);
+ /* pass 4 */
+ BF( 1, 2, COS3_1 , 2);
+ BF( 5, 6,-COS3_1 , 2);
+ BF( 9, 10, COS3_1 , 2);
+ BF(13, 14,-COS3_1 , 2);
+ BF(17, 18, COS3_1 , 2);
+ BF(21, 22,-COS3_1 , 2);
+ BF(25, 26, COS3_1 , 2);
+ BF(29, 30,-COS3_1 , 2);
+
+ /* pass 5 */
+ BF1( 0, 1, 2, 3);
+ BF2( 4, 5, 6, 7);
+ BF1( 8, 9, 10, 11);
+ BF2(12, 13, 14, 15);
+ BF1(16, 17, 18, 19);
+ BF2(20, 21, 22, 23);
+ BF1(24, 25, 26, 27);
+ BF2(28, 29, 30, 31);
+
+ /* pass 6 */
+
+ ADD( 8, 12);
+ ADD(12, 10);
+ ADD(10, 14);
+ ADD(14, 9);
+ ADD( 9, 13);
+ ADD(13, 11);
+ ADD(11, 15);
+
+ out[ 0] = val0;
+ out[16] = val1;
+ out[ 8] = val2;
+ out[24] = val3;
+ out[ 4] = val4;
+ out[20] = val5;
+ out[12] = val6;
+ out[28] = val7;
+ out[ 2] = val8;
+ out[18] = val9;
+ out[10] = val10;
+ out[26] = val11;
+ out[ 6] = val12;
+ out[22] = val13;
+ out[14] = val14;
+ out[30] = val15;
+
+ ADD(24, 28);
+ ADD(28, 26);
+ ADD(26, 30);
+ ADD(30, 25);
+ ADD(25, 29);
+ ADD(29, 27);
+ ADD(27, 31);
+
+ out[ 1] = val16 + val24;
+ out[17] = val17 + val25;
+ out[ 9] = val18 + val26;
+ out[25] = val19 + val27;
+ out[ 5] = val20 + val28;
+ out[21] = val21 + val29;
+ out[13] = val22 + val30;
+ out[29] = val23 + val31;
+ out[ 3] = val24 + val20;
+ out[19] = val25 + val21;
+ out[11] = val26 + val22;
+ out[27] = val27 + val23;
+ out[ 7] = val28 + val18;
+ out[23] = val29 + val19;
+ out[15] = val30 + val17;
+ out[31] = val31;
+}
diff --git a/media/ffvpx/libavcodec/decode.c b/media/ffvpx/libavcodec/decode.c
new file mode 100644
index 0000000000..be2be81089
--- /dev/null
+++ b/media/ffvpx/libavcodec/decode.c
@@ -0,0 +1,1687 @@
+/*
+ * generic decoding-related code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "config.h"
+
+#if CONFIG_ICONV
+# include <iconv.h>
+#endif
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "libavutil/fifo.h"
+#include "libavutil/frame.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/intmath.h"
+#include "libavutil/opt.h"
+
+#include "avcodec.h"
+#include "bytestream.h"
+#include "bsf.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "hwconfig.h"
+#include "internal.h"
+#include "thread.h"
+
+static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
+{
+ int ret;
+ size_t size;
+ const uint8_t *data;
+ uint32_t flags;
+ int64_t val;
+
+ data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
+ if (!data)
+ return 0;
+
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
+ av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
+ "changes, but PARAM_CHANGE side data was sent to it.\n");
+ ret = AVERROR(EINVAL);
+ goto fail2;
+ }
+
+ if (size < 4)
+ goto fail;
+
+ flags = bytestream_get_le32(&data);
+ size -= 4;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
+ if (size < 4)
+ goto fail;
+ val = bytestream_get_le32(&data);
+ if (val <= 0 || val > INT_MAX) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
+ ret = AVERROR_INVALIDDATA;
+ goto fail2;
+ }
+ avctx->channels = val;
+ size -= 4;
+ }
+ if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
+ if (size < 8)
+ goto fail;
+ avctx->channel_layout = bytestream_get_le64(&data);
+ size -= 8;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
+ if (size < 4)
+ goto fail;
+ val = bytestream_get_le32(&data);
+ if (val <= 0 || val > INT_MAX) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
+ ret = AVERROR_INVALIDDATA;
+ goto fail2;
+ }
+ avctx->sample_rate = val;
+ size -= 4;
+ }
+ if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
+ if (size < 8)
+ goto fail;
+ avctx->width = bytestream_get_le32(&data);
+ avctx->height = bytestream_get_le32(&data);
+ size -= 8;
+ ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
+ if (ret < 0)
+ goto fail2;
+ }
+
+ return 0;
+fail:
+ av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
+ ret = AVERROR_INVALIDDATA;
+fail2:
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
+ if (avctx->err_recognition & AV_EF_EXPLODE)
+ return ret;
+ }
+ return 0;
+}
+
+static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
+{
+ int ret = 0;
+
+ av_packet_unref(avci->last_pkt_props);
+ if (pkt) {
+ ret = av_packet_copy_props(avci->last_pkt_props, pkt);
+ if (!ret)
+ avci->last_pkt_props->opaque = (void *)(intptr_t)pkt->size; // Needed for ff_decode_frame_props().
+ }
+ return ret;
+}
+
+static int decode_bsfs_init(AVCodecContext *avctx)
+{
+ AVCodecInternal *avci = avctx->internal;
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ int ret;
+
+ if (avci->bsf)
+ return 0;
+
+ ret = av_bsf_list_parse_str(codec->bsfs, &avci->bsf);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", codec->bsfs, av_err2str(ret));
+ if (ret != AVERROR(ENOMEM))
+ ret = AVERROR_BUG;
+ goto fail;
+ }
+
+ /* We do not currently have an API for passing the input timebase into decoders,
+ * but no filters used here should actually need it.
+ * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
+ avci->bsf->time_base_in = (AVRational){ 1, 90000 };
+ ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
+ if (ret < 0)
+ goto fail;
+
+ ret = av_bsf_init(avci->bsf);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+fail:
+ av_bsf_free(&avci->bsf);
+ return ret;
+}
+
+int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (avci->draining)
+ return AVERROR_EOF;
+
+ ret = av_bsf_receive_packet(avci->bsf, pkt);
+ if (ret == AVERROR_EOF)
+ avci->draining = 1;
+ if (ret < 0)
+ return ret;
+
+ if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
+ ret = extract_packet_props(avctx->internal, pkt);
+ if (ret < 0)
+ goto finish;
+ }
+
+ ret = apply_param_change(avctx, pkt);
+ if (ret < 0)
+ goto finish;
+
+ return 0;
+finish:
+ av_packet_unref(pkt);
+ return ret;
+}
+
+/**
+ * Attempt to guess proper monotonic timestamps for decoded video frames
+ * which might have incorrect times. Input timestamps may wrap around, in
+ * which case the output will as well.
+ *
+ * @param pts the pts field of the decoded AVPacket, as passed through
+ * AVFrame.pts
+ * @param dts the dts field of the decoded AVPacket
+ * @return one of the input values, may be AV_NOPTS_VALUE
+ */
+static int64_t guess_correct_pts(AVCodecContext *ctx,
+ int64_t reordered_pts, int64_t dts)
+{
+ int64_t pts = AV_NOPTS_VALUE;
+
+ if (dts != AV_NOPTS_VALUE) {
+ ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts;
+ ctx->pts_correction_last_dts = dts;
+ } else if (reordered_pts != AV_NOPTS_VALUE)
+ ctx->pts_correction_last_dts = reordered_pts;
+
+ if (reordered_pts != AV_NOPTS_VALUE) {
+ ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
+ ctx->pts_correction_last_pts = reordered_pts;
+ } else if(dts != AV_NOPTS_VALUE)
+ ctx->pts_correction_last_pts = dts;
+
+ if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
+ && reordered_pts != AV_NOPTS_VALUE)
+ pts = reordered_pts;
+ else
+ pts = dts;
+
+ return pts;
+}
+
+/*
+ * The core of the receive_frame_wrapper for the decoders implementing
+ * the simple API. Certain decoders might consume partial packets without
+ * returning any output, so this function needs to be called in a loop until it
+ * returns EAGAIN.
+ **/
+static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
+{
+ AVCodecInternal *avci = avctx->internal;
+ AVPacket *const pkt = avci->in_pkt;
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ int got_frame, actual_got_frame;
+ int ret;
+
+ if (!pkt->data && !avci->draining) {
+ av_packet_unref(pkt);
+ ret = ff_decode_get_packet(avctx, pkt);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ // Some codecs (at least wma lossless) will crash when feeding drain packets
+ // after EOF was signaled.
+ if (avci->draining_done)
+ return AVERROR_EOF;
+
+ if (!pkt->data &&
+ !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
+ avctx->active_thread_type & FF_THREAD_FRAME))
+ return AVERROR_EOF;
+
+ got_frame = 0;
+
+ if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
+ ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
+ } else {
+ ret = codec->cb.decode(avctx, frame, &got_frame, pkt);
+
+ if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
+ frame->pkt_dts = pkt->dts;
+ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ if(!avctx->has_b_frames)
+ frame->pkt_pos = pkt->pos;
+ //FIXME these should be under if(!avctx->has_b_frames)
+ /* get_buffer is supposed to set frame parameters */
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
+ if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
+ if (!frame->width) frame->width = avctx->width;
+ if (!frame->height) frame->height = avctx->height;
+ if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
+ }
+ }
+ }
+ emms_c();
+ actual_got_frame = got_frame;
+
+ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ if (frame->flags & AV_FRAME_FLAG_DISCARD)
+ got_frame = 0;
+ } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ uint8_t *side;
+ size_t side_size;
+ uint32_t discard_padding = 0;
+ uint8_t skip_reason = 0;
+ uint8_t discard_reason = 0;
+
+ if (ret >= 0 && got_frame) {
+ if (frame->format == AV_SAMPLE_FMT_NONE)
+ frame->format = avctx->sample_fmt;
+ if (!frame->ch_layout.nb_channels) {
+ int ret2 = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
+ if (ret2 < 0) {
+ ret = ret2;
+ got_frame = 0;
+ }
+ }
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!frame->channel_layout)
+ frame->channel_layout = avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ avctx->ch_layout.u.mask : 0;
+ if (!frame->channels)
+ frame->channels = avctx->ch_layout.nb_channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ if (!frame->sample_rate)
+ frame->sample_rate = avctx->sample_rate;
+ }
+
+ side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
+ if(side && side_size>=10) {
+ avci->skip_samples = AV_RL32(side);
+ avci->skip_samples = FFMAX(0, avci->skip_samples);
+ discard_padding = AV_RL32(side + 4);
+ av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
+ avci->skip_samples, (int)discard_padding);
+ skip_reason = AV_RL8(side + 8);
+ discard_reason = AV_RL8(side + 9);
+ }
+
+ if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
+ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
+ got_frame = 0;
+ *discarded_samples += frame->nb_samples;
+ }
+
+ if (avci->skip_samples > 0 && got_frame &&
+ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ if(frame->nb_samples <= avci->skip_samples){
+ got_frame = 0;
+ *discarded_samples += frame->nb_samples;
+ avci->skip_samples -= frame->nb_samples;
+ av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
+ avci->skip_samples);
+ } else {
+ av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
+ frame->nb_samples - avci->skip_samples, avctx->ch_layout.nb_channels, frame->format);
+ if(avctx->pkt_timebase.num && avctx->sample_rate) {
+ int64_t diff_ts = av_rescale_q(avci->skip_samples,
+ (AVRational){1, avctx->sample_rate},
+ avctx->pkt_timebase);
+ if(frame->pts!=AV_NOPTS_VALUE)
+ frame->pts += diff_ts;
+ if(frame->pkt_dts!=AV_NOPTS_VALUE)
+ frame->pkt_dts += diff_ts;
+ if (frame->duration >= diff_ts)
+ frame->duration -= diff_ts;
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
+ }
+ av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
+ avci->skip_samples, frame->nb_samples);
+ *discarded_samples += avci->skip_samples;
+ frame->nb_samples -= avci->skip_samples;
+ avci->skip_samples = 0;
+ }
+ }
+
+ if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
+ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ if (discard_padding == frame->nb_samples) {
+ *discarded_samples += frame->nb_samples;
+ got_frame = 0;
+ } else {
+ if(avctx->pkt_timebase.num && avctx->sample_rate) {
+ int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
+ (AVRational){1, avctx->sample_rate},
+ avctx->pkt_timebase);
+ frame->duration = diff_ts;
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
+ }
+ av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
+ (int)discard_padding, frame->nb_samples);
+ frame->nb_samples -= discard_padding;
+ }
+ }
+
+ if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
+ AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
+ if (fside) {
+ AV_WL32(fside->data, avci->skip_samples);
+ AV_WL32(fside->data + 4, discard_padding);
+ AV_WL8(fside->data + 8, skip_reason);
+ AV_WL8(fside->data + 9, discard_reason);
+ avci->skip_samples = 0;
+ }
+ }
+ }
+
+ if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
+ !avci->showed_multi_packet_warning &&
+ ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
+ av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
+ avci->showed_multi_packet_warning = 1;
+ }
+
+ if (!got_frame)
+ av_frame_unref(frame);
+
+ if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
+ ret = pkt->size;
+
+ /* do not stop draining when actual_got_frame != 0 or ret < 0 */
+ /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
+ if (avci->draining && !actual_got_frame) {
+ if (ret < 0) {
+ /* prevent infinite loop if a decoder wrongly always return error on draining */
+ /* reasonable nb_errors_max = maximum b frames + thread count */
+ int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
+ avctx->thread_count : 1);
+
+ if (avci->nb_draining_errors++ >= nb_errors_max) {
+ av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
+ "Stop draining and force EOF.\n");
+ avci->draining_done = 1;
+ ret = AVERROR_BUG;
+ }
+ } else {
+ avci->draining_done = 1;
+ }
+ }
+
+ if (ret >= pkt->size || ret < 0) {
+ av_packet_unref(pkt);
+ } else {
+ int consumed = ret;
+
+ pkt->data += consumed;
+ pkt->size -= consumed;
+ pkt->pts = AV_NOPTS_VALUE;
+ pkt->dts = AV_NOPTS_VALUE;
+ if (!(codec->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
+ // See extract_packet_props() comment.
+ avci->last_pkt_props->opaque = (void *)((intptr_t)avci->last_pkt_props->opaque - consumed);
+ avci->last_pkt_props->pts = AV_NOPTS_VALUE;
+ avci->last_pkt_props->dts = AV_NOPTS_VALUE;
+ }
+ }
+
+ if (got_frame)
+ av_assert0(frame->buf[0]);
+
+ return ret < 0 ? ret : 0;
+}
+
+#if CONFIG_LCMS2
+static int detect_colorspace(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ enum AVColorTransferCharacteristic trc;
+ AVColorPrimariesDesc coeffs;
+ enum AVColorPrimaries prim;
+ cmsHPROFILE profile;
+ AVFrameSideData *sd;
+ int ret;
+ if (!(avctx->flags2 & AV_CODEC_FLAG2_ICC_PROFILES))
+ return 0;
+
+ sd = av_frame_get_side_data(frame, AV_FRAME_DATA_ICC_PROFILE);
+ if (!sd || !sd->size)
+ return 0;
+
+ if (!avci->icc.avctx) {
+ ret = ff_icc_context_init(&avci->icc, avctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ profile = cmsOpenProfileFromMemTHR(avci->icc.ctx, sd->data, sd->size);
+ if (!profile)
+ return AVERROR_INVALIDDATA;
+
+ ret = ff_icc_profile_read_primaries(&avci->icc, profile, &coeffs);
+ if (!ret)
+ ret = ff_icc_profile_detect_transfer(&avci->icc, profile, &trc);
+ cmsCloseProfile(profile);
+ if (ret < 0)
+ return ret;
+
+ prim = av_csp_primaries_id_from_desc(&coeffs);
+ if (prim != AVCOL_PRI_UNSPECIFIED)
+ frame->color_primaries = prim;
+ if (trc != AVCOL_TRC_UNSPECIFIED)
+ frame->color_trc = trc;
+ return 0;
+}
+#else /* !CONFIG_LCMS2 */
+static int detect_colorspace(av_unused AVCodecContext *c, av_unused AVFrame *f)
+{
+ return 0;
+}
+#endif
+
+static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ int ret;
+ int64_t discarded_samples = 0;
+
+ while (!frame->buf[0]) {
+ if (discarded_samples > avctx->max_samples)
+ return AVERROR(EAGAIN);
+ ret = decode_simple_internal(avctx, frame, &discarded_samples);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ int ret, ok;
+
+ av_assert0(!frame->buf[0]);
+
+ if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
+ ret = codec->cb.receive_frame(avctx, frame);
+ } else
+ ret = decode_simple_receive_frame(avctx, frame);
+
+ if (ret == AVERROR_EOF)
+ avci->draining_done = 1;
+
+ /* preserve ret */
+ ok = detect_colorspace(avctx, frame);
+ if (ok < 0) {
+ av_frame_unref(frame);
+ return ok;
+ }
+
+ if (!ret) {
+ frame->best_effort_timestamp = guess_correct_pts(avctx,
+ frame->pts,
+ frame->pkt_dts);
+
+#if FF_API_PKT_DURATION
+FF_DISABLE_DEPRECATION_WARNINGS
+ frame->pkt_duration = frame->duration;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ /* the only case where decode data is not set should be decoders
+ * that do not call ff_get_buffer() */
+ av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
+ !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
+
+ if (frame->private_ref) {
+ FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
+
+ if (fdd->post_process) {
+ ret = fdd->post_process(avctx, frame);
+ if (ret < 0) {
+ av_frame_unref(frame);
+ return ret;
+ }
+ }
+ }
+ }
+
+ /* free the per-frame decode data */
+ av_buffer_unref(&frame->private_ref);
+
+ return ret;
+}
+
+int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avctx->internal->draining)
+ return AVERROR_EOF;
+
+ if (avpkt && !avpkt->size && avpkt->data)
+ return AVERROR(EINVAL);
+
+ av_packet_unref(avci->buffer_pkt);
+ if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
+ ret = av_packet_ref(avci->buffer_pkt, avpkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
+ if (ret < 0) {
+ av_packet_unref(avci->buffer_pkt);
+ return ret;
+ }
+
+ if (!avci->buffer_frame->buf[0]) {
+ ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
+ if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
+{
+ /* make sure we are noisy about decoders returning invalid cropping data */
+ if (frame->crop_left >= INT_MAX - frame->crop_right ||
+ frame->crop_top >= INT_MAX - frame->crop_bottom ||
+ (frame->crop_left + frame->crop_right) >= frame->width ||
+ (frame->crop_top + frame->crop_bottom) >= frame->height) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Invalid cropping information set by a decoder: "
+ "%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER" "
+ "(frame size %dx%d). This is a bug, please report it\n",
+ frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
+ frame->width, frame->height);
+ frame->crop_left = 0;
+ frame->crop_right = 0;
+ frame->crop_top = 0;
+ frame->crop_bottom = 0;
+ return 0;
+ }
+
+ if (!avctx->apply_cropping)
+ return 0;
+
+ return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
+ AV_FRAME_CROP_UNALIGNED : 0);
+}
+
+// make sure frames returned to the caller are valid
+static int frame_validate(AVCodecContext *avctx, AVFrame *frame)
+{
+ if (!frame->buf[0] || frame->format < 0)
+ goto fail;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ if (frame->width <= 0 || frame->height <= 0)
+ goto fail;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ if (!av_channel_layout_check(&frame->ch_layout) ||
+ frame->sample_rate <= 0)
+ goto fail;
+
+ break;
+ default: av_assert0(0);
+ }
+
+ return 0;
+fail:
+ av_log(avctx, AV_LOG_ERROR, "An invalid frame was output by a decoder. "
+ "This is a bug, please report it.\n");
+ return AVERROR_BUG;
+}
+
+int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret, changed;
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avci->buffer_frame->buf[0]) {
+ av_frame_move_ref(frame, avci->buffer_frame);
+ } else {
+ ret = decode_receive_frame_internal(avctx, frame);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = frame_validate(avctx, frame);
+ if (ret < 0)
+ goto fail;
+
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ ret = apply_cropping(avctx, frame);
+ if (ret < 0)
+ goto fail;
+ }
+
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
+
+ if (avctx->frame_num == 1) {
+ avci->initial_format = frame->format;
+ switch(avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ avci->initial_width = frame->width;
+ avci->initial_height = frame->height;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
+ avctx->sample_rate;
+ ret = av_channel_layout_copy(&avci->initial_ch_layout, &frame->ch_layout);
+ if (ret < 0)
+ goto fail;
+ break;
+ }
+ }
+
+ if (avctx->frame_num > 1) {
+ changed = avci->initial_format != frame->format;
+
+ switch(avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ changed |= avci->initial_width != frame->width ||
+ avci->initial_height != frame->height;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ changed |= avci->initial_sample_rate != frame->sample_rate ||
+ avci->initial_sample_rate != avctx->sample_rate ||
+ av_channel_layout_compare(&avci->initial_ch_layout, &frame->ch_layout);
+ break;
+ }
+
+ if (changed) {
+ avci->changed_frames_dropped++;
+ av_log(avctx, AV_LOG_INFO, "dropped changed frame #%"PRId64" pts %"PRId64
+ " drop count: %d \n",
+ avctx->frame_num, frame->pts,
+ avci->changed_frames_dropped);
+ ret = AVERROR_INPUT_CHANGED;
+ goto fail;
+ }
+ }
+ }
+ return 0;
+fail:
+ av_frame_unref(frame);
+ return ret;
+}
+
+static void get_subtitle_defaults(AVSubtitle *sub)
+{
+ memset(sub, 0, sizeof(*sub));
+ sub->pts = AV_NOPTS_VALUE;
+}
+
+#define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
+static int recode_subtitle(AVCodecContext *avctx, const AVPacket **outpkt,
+ const AVPacket *inpkt, AVPacket *buf_pkt)
+{
+#if CONFIG_ICONV
+ iconv_t cd = (iconv_t)-1;
+ int ret = 0;
+ char *inb, *outb;
+ size_t inl, outl;
+#endif
+
+ if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0) {
+ *outpkt = inpkt;
+ return 0;
+ }
+
+#if CONFIG_ICONV
+ inb = inpkt->data;
+ inl = inpkt->size;
+
+ if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
+ return AVERROR(ERANGE);
+ }
+
+ cd = iconv_open("UTF-8", avctx->sub_charenc);
+ av_assert0(cd != (iconv_t)-1);
+
+ ret = av_new_packet(buf_pkt, inl * UTF8_MAX_BYTES);
+ if (ret < 0)
+ goto end;
+ ret = av_packet_copy_props(buf_pkt, inpkt);
+ if (ret < 0)
+ goto end;
+ outb = buf_pkt->data;
+ outl = buf_pkt->size;
+
+ if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
+ iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
+ outl >= buf_pkt->size || inl != 0) {
+ ret = FFMIN(AVERROR(errno), -1);
+ av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
+ "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
+ goto end;
+ }
+ buf_pkt->size -= outl;
+ memset(buf_pkt->data + buf_pkt->size, 0, outl);
+ *outpkt = buf_pkt;
+
+ ret = 0;
+end:
+ if (ret < 0)
+ av_packet_unref(buf_pkt);
+ if (cd != (iconv_t)-1)
+ iconv_close(cd);
+ return ret;
+#else
+ av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
+ return AVERROR(EINVAL);
+#endif
+}
+
+static int utf8_check(const uint8_t *str)
+{
+ const uint8_t *byte;
+ uint32_t codepoint, min;
+
+ while (*str) {
+ byte = str;
+ GET_UTF8(codepoint, *(byte++), return 0;);
+ min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
+ 1 << (5 * (byte - str) - 4);
+ if (codepoint < min || codepoint >= 0x110000 ||
+ codepoint == 0xFFFE /* BOM */ ||
+ codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
+ return 0;
+ str = byte;
+ }
+ return 1;
+}
+
+int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
+ int *got_sub_ptr, const AVPacket *avpkt)
+{
+ int ret = 0;
+
+ if (!avpkt->data && avpkt->size) {
+ av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
+ return AVERROR(EINVAL);
+ }
+ if (!avctx->codec)
+ return AVERROR(EINVAL);
+ if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
+ return AVERROR(EINVAL);
+ }
+
+ *got_sub_ptr = 0;
+ get_subtitle_defaults(sub);
+
+ if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
+ AVCodecInternal *avci = avctx->internal;
+ const AVPacket *pkt;
+
+ ret = recode_subtitle(avctx, &pkt, avpkt, avci->buffer_pkt);
+ if (ret < 0)
+ return ret;
+
+ if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
+ sub->pts = av_rescale_q(avpkt->pts,
+ avctx->pkt_timebase, AV_TIME_BASE_Q);
+ ret = ffcodec(avctx->codec)->cb.decode_sub(avctx, sub, got_sub_ptr, pkt);
+ if (pkt == avci->buffer_pkt) // did we recode?
+ av_packet_unref(avci->buffer_pkt);
+ if (ret < 0) {
+ *got_sub_ptr = 0;
+ avsubtitle_free(sub);
+ return ret;
+ }
+ av_assert1(!sub->num_rects || *got_sub_ptr);
+
+ if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
+ avctx->pkt_timebase.num) {
+ AVRational ms = { 1, 1000 };
+ sub->end_display_time = av_rescale_q(avpkt->duration,
+ avctx->pkt_timebase, ms);
+ }
+
+ if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
+ sub->format = 0;
+ else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
+ sub->format = 1;
+
+ for (unsigned i = 0; i < sub->num_rects; i++) {
+ if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_IGNORE &&
+ sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid UTF-8 in decoded subtitles text; "
+ "maybe missing -sub_charenc option\n");
+ avsubtitle_free(sub);
+ *got_sub_ptr = 0;
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (*got_sub_ptr)
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ }
+
+ return ret;
+}
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx,
+ const enum AVPixelFormat *fmt)
+{
+ const AVPixFmtDescriptor *desc;
+ const AVCodecHWConfig *config;
+ int i, n;
+
+ // If a device was supplied when the codec was opened, assume that the
+ // user wants to use it.
+ if (avctx->hw_device_ctx && ffcodec(avctx->codec)->hw_configs) {
+ AVHWDeviceContext *device_ctx =
+ (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ for (i = 0;; i++) {
+ config = &ffcodec(avctx->codec)->hw_configs[i]->public;
+ if (!config)
+ break;
+ if (!(config->methods &
+ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
+ continue;
+ if (device_ctx->type != config->device_type)
+ continue;
+ for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
+ if (config->pix_fmt == fmt[n])
+ return fmt[n];
+ }
+ }
+ }
+ // No device or other setup, so we have to choose from things which
+ // don't any other external information.
+
+ // If the last element of the list is a software format, choose it
+ // (this should be best software format if any exist).
+ for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
+ desc = av_pix_fmt_desc_get(fmt[n - 1]);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ return fmt[n - 1];
+
+ // Finally, traverse the list in order and choose the first entry
+ // with no external dependencies (if there is no hardware configuration
+ // information available then this just picks the first entry).
+ for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
+ for (i = 0;; i++) {
+ config = avcodec_get_hw_config(avctx->codec, i);
+ if (!config)
+ break;
+ if (config->pix_fmt == fmt[n])
+ break;
+ }
+ if (!config) {
+ // No specific config available, so the decoder must be able
+ // to handle this format without any additional setup.
+ return fmt[n];
+ }
+ if (config->methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
+ // Usable with only internal setup.
+ return fmt[n];
+ }
+ }
+
+ // Nothing is usable, give up.
+ return AV_PIX_FMT_NONE;
+}
+
+int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
+ enum AVHWDeviceType dev_type)
+{
+ AVHWDeviceContext *device_ctx;
+ AVHWFramesContext *frames_ctx;
+ int ret;
+
+ if (!avctx->hwaccel)
+ return AVERROR(ENOSYS);
+
+ if (avctx->hw_frames_ctx)
+ return 0;
+ if (!avctx->hw_device_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
+ "required for hardware accelerated decoding.\n");
+ return AVERROR(EINVAL);
+ }
+
+ device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
+ if (device_ctx->type != dev_type) {
+ av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
+ "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
+ av_hwdevice_get_type_name(device_ctx->type));
+ return AVERROR(EINVAL);
+ }
+
+ ret = avcodec_get_hw_frames_parameters(avctx,
+ avctx->hw_device_ctx,
+ avctx->hwaccel->pix_fmt,
+ &avctx->hw_frames_ctx);
+ if (ret < 0)
+ return ret;
+
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+
+
+ if (frames_ctx->initial_pool_size) {
+ // We guarantee 4 base work surfaces. The function above guarantees 1
+ // (the absolute minimum), so add the missing count.
+ frames_ctx->initial_pool_size += 3;
+ }
+
+ ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
+ if (ret < 0) {
+ av_buffer_unref(&avctx->hw_frames_ctx);
+ return ret;
+ }
+
+ return 0;
+}
+
+int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
+ AVBufferRef *device_ref,
+ enum AVPixelFormat hw_pix_fmt,
+ AVBufferRef **out_frames_ref)
+{
+ AVBufferRef *frames_ref = NULL;
+ const AVCodecHWConfigInternal *hw_config;
+ const AVHWAccel *hwa;
+ int i, ret;
+
+ for (i = 0;; i++) {
+ hw_config = ffcodec(avctx->codec)->hw_configs[i];
+ if (!hw_config)
+ return AVERROR(ENOENT);
+ if (hw_config->public.pix_fmt == hw_pix_fmt)
+ break;
+ }
+
+ hwa = hw_config->hwaccel;
+ if (!hwa || !hwa->frame_params)
+ return AVERROR(ENOENT);
+
+ frames_ref = av_hwframe_ctx_alloc(device_ref);
+ if (!frames_ref)
+ return AVERROR(ENOMEM);
+
+ ret = hwa->frame_params(avctx, frames_ref);
+ if (ret >= 0) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
+
+ if (frames_ctx->initial_pool_size) {
+ // If the user has requested that extra output surfaces be
+ // available then add them here.
+ if (avctx->extra_hw_frames > 0)
+ frames_ctx->initial_pool_size += avctx->extra_hw_frames;
+
+ // If frame threading is enabled then an extra surface per thread
+ // is also required.
+ if (avctx->active_thread_type & FF_THREAD_FRAME)
+ frames_ctx->initial_pool_size += avctx->thread_count;
+ }
+
+ *out_frames_ref = frames_ref;
+ } else {
+ av_buffer_unref(&frames_ref);
+ }
+ return ret;
+}
+
+static int hwaccel_init(AVCodecContext *avctx,
+ const AVCodecHWConfigInternal *hw_config)
+{
+ const AVHWAccel *hwaccel;
+ int err;
+
+ hwaccel = hw_config->hwaccel;
+ if (hwaccel->capabilities & AV_HWACCEL_CODEC_CAP_EXPERIMENTAL &&
+ avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
+ av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
+ hwaccel->name);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ if (hwaccel->priv_data_size) {
+ avctx->internal->hwaccel_priv_data =
+ av_mallocz(hwaccel->priv_data_size);
+ if (!avctx->internal->hwaccel_priv_data)
+ return AVERROR(ENOMEM);
+ }
+
+ avctx->hwaccel = hwaccel;
+ if (hwaccel->init) {
+ err = hwaccel->init(avctx);
+ if (err < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
+ "hwaccel initialisation returned error.\n",
+ av_get_pix_fmt_name(hw_config->public.pix_fmt));
+ av_freep(&avctx->internal->hwaccel_priv_data);
+ avctx->hwaccel = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void hwaccel_uninit(AVCodecContext *avctx)
+{
+ if (avctx->hwaccel && avctx->hwaccel->uninit)
+ avctx->hwaccel->uninit(avctx);
+
+ av_freep(&avctx->internal->hwaccel_priv_data);
+
+ avctx->hwaccel = NULL;
+
+ av_buffer_unref(&avctx->hw_frames_ctx);
+}
+
+int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
+{
+ const AVPixFmtDescriptor *desc;
+ enum AVPixelFormat *choices;
+ enum AVPixelFormat ret, user_choice;
+ const AVCodecHWConfigInternal *hw_config;
+ const AVCodecHWConfig *config;
+ int i, n, err;
+
+ // Find end of list.
+ for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
+ // Must contain at least one entry.
+ av_assert0(n >= 1);
+ // If a software format is available, it must be the last entry.
+ desc = av_pix_fmt_desc_get(fmt[n - 1]);
+ if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
+ // No software format is available.
+ } else {
+ avctx->sw_pix_fmt = fmt[n - 1];
+ }
+
+ choices = av_memdup(fmt, (n + 1) * sizeof(*choices));
+ if (!choices)
+ return AV_PIX_FMT_NONE;
+
+ for (;;) {
+ // Remove the previous hwaccel, if there was one.
+ hwaccel_uninit(avctx);
+
+ user_choice = avctx->get_format(avctx, choices);
+ if (user_choice == AV_PIX_FMT_NONE) {
+ // Explicitly chose nothing, give up.
+ ret = AV_PIX_FMT_NONE;
+ break;
+ }
+
+ desc = av_pix_fmt_desc_get(user_choice);
+ if (!desc) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
+ "get_format() callback.\n");
+ ret = AV_PIX_FMT_NONE;
+ break;
+ }
+ av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
+ desc->name);
+
+ for (i = 0; i < n; i++) {
+ if (choices[i] == user_choice)
+ break;
+ }
+ if (i == n) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
+ "%s not in possible list.\n", desc->name);
+ ret = AV_PIX_FMT_NONE;
+ break;
+ }
+
+ if (ffcodec(avctx->codec)->hw_configs) {
+ for (i = 0;; i++) {
+ hw_config = ffcodec(avctx->codec)->hw_configs[i];
+ if (!hw_config)
+ break;
+ if (hw_config->public.pix_fmt == user_choice)
+ break;
+ }
+ } else {
+ hw_config = NULL;
+ }
+
+ if (!hw_config) {
+ // No config available, so no extra setup required.
+ ret = user_choice;
+ break;
+ }
+ config = &hw_config->public;
+
+ if (config->methods &
+ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
+ avctx->hw_frames_ctx) {
+ const AVHWFramesContext *frames_ctx =
+ (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ if (frames_ctx->format != user_choice) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
+ "does not match the format of the provided frames "
+ "context.\n", desc->name);
+ goto try_again;
+ }
+ } else if (config->methods &
+ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
+ avctx->hw_device_ctx) {
+ const AVHWDeviceContext *device_ctx =
+ (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ if (device_ctx->type != config->device_type) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
+ "does not match the type of the provided device "
+ "context.\n", desc->name);
+ goto try_again;
+ }
+ } else if (config->methods &
+ AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
+ // Internal-only setup, no additional configuration.
+ } else if (config->methods &
+ AV_CODEC_HW_CONFIG_METHOD_AD_HOC) {
+ // Some ad-hoc configuration we can't see and can't check.
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
+ "missing configuration.\n", desc->name);
+ goto try_again;
+ }
+ if (hw_config->hwaccel) {
+ av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
+ "initialisation.\n", desc->name);
+ err = hwaccel_init(avctx, hw_config);
+ if (err < 0)
+ goto try_again;
+ }
+ ret = user_choice;
+ break;
+
+ try_again:
+ av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
+ "get_format() without it.\n", desc->name);
+ for (i = 0; i < n; i++) {
+ if (choices[i] == user_choice)
+ break;
+ }
+ for (; i + 1 < n; i++)
+ choices[i] = choices[i + 1];
+ --n;
+ }
+
+ av_freep(&choices);
+ return ret;
+}
+
+static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
+{
+ size_t size;
+ const uint8_t *side_metadata;
+
+ AVDictionary **frame_md = &frame->metadata;
+
+ side_metadata = av_packet_get_side_data(avpkt,
+ AV_PKT_DATA_STRINGS_METADATA, &size);
+ return av_packet_unpack_dictionary(side_metadata, size, frame_md);
+}
+
+int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
+ AVFrame *frame, const AVPacket *pkt)
+{
+ static const struct {
+ enum AVPacketSideDataType packet;
+ enum AVFrameSideDataType frame;
+ } sd[] = {
+ { AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN },
+ { AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX },
+ { AV_PKT_DATA_SPHERICAL, AV_FRAME_DATA_SPHERICAL },
+ { AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D },
+ { AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE },
+ { AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA },
+ { AV_PKT_DATA_CONTENT_LIGHT_LEVEL, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL },
+ { AV_PKT_DATA_A53_CC, AV_FRAME_DATA_A53_CC },
+ { AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE },
+ { AV_PKT_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE },
+ { AV_PKT_DATA_DYNAMIC_HDR10_PLUS, AV_FRAME_DATA_DYNAMIC_HDR_PLUS },
+ };
+
+ frame->pts = pkt->pts;
+ frame->pkt_pos = pkt->pos;
+ frame->duration = pkt->duration;
+ frame->pkt_size = pkt->size;
+
+ for (int i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
+ size_t size;
+ uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
+ if (packet_sd) {
+ AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
+ sd[i].frame,
+ size);
+ if (!frame_sd)
+ return AVERROR(ENOMEM);
+
+ memcpy(frame_sd->data, packet_sd, size);
+ }
+ }
+ add_metadata_from_side_data(pkt, frame);
+
+ if (pkt->flags & AV_PKT_FLAG_DISCARD) {
+ frame->flags |= AV_FRAME_FLAG_DISCARD;
+ } else {
+ frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
+ }
+
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
+ int ret = av_buffer_replace(&frame->opaque_ref, pkt->opaque_ref);
+ if (ret < 0)
+ return ret;
+ frame->opaque = pkt->opaque;
+ }
+
+ return 0;
+}
+
+int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
+{
+ const AVPacket *pkt = avctx->internal->last_pkt_props;
+
+ if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
+ int ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt);
+ if (ret < 0)
+ return ret;
+ frame->pkt_size = (int)(intptr_t)pkt->opaque;
+ }
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ frame->reordered_opaque = avctx->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
+ frame->color_primaries = avctx->color_primaries;
+ if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
+ frame->color_trc = avctx->color_trc;
+ if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
+ frame->colorspace = avctx->colorspace;
+ if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
+ frame->color_range = avctx->color_range;
+ if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED)
+ frame->chroma_location = avctx->chroma_sample_location;
+
+ switch (avctx->codec->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ frame->format = avctx->pix_fmt;
+ if (!frame->sample_aspect_ratio.num)
+ frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
+
+ if (frame->width && frame->height &&
+ av_image_check_sar(frame->width, frame->height,
+ frame->sample_aspect_ratio) < 0) {
+ av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
+ frame->sample_aspect_ratio.num,
+ frame->sample_aspect_ratio.den);
+ frame->sample_aspect_ratio = (AVRational){ 0, 1 };
+ }
+
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ if (!frame->sample_rate)
+ frame->sample_rate = avctx->sample_rate;
+ if (frame->format < 0)
+ frame->format = avctx->sample_fmt;
+ if (!frame->ch_layout.nb_channels) {
+ int ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
+ if (ret < 0)
+ return ret;
+ }
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ frame->channels = frame->ch_layout.nb_channels;
+ frame->channel_layout = frame->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ frame->ch_layout.u.mask : 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ break;
+ }
+ return 0;
+}
+
+static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
+{
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ int i;
+ int num_planes = av_pix_fmt_count_planes(frame->format);
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ int flags = desc ? desc->flags : 0;
+ if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
+ num_planes = 2;
+ for (i = 0; i < num_planes; i++) {
+ av_assert0(frame->data[i]);
+ }
+ // For formats without data like hwaccel allow unused pointers to be non-NULL.
+ for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
+ if (frame->data[i])
+ av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
+ frame->data[i] = NULL;
+ }
+ }
+}
+
+static void decode_data_free(void *opaque, uint8_t *data)
+{
+ FrameDecodeData *fdd = (FrameDecodeData*)data;
+
+ if (fdd->post_process_opaque_free)
+ fdd->post_process_opaque_free(fdd->post_process_opaque);
+
+ if (fdd->hwaccel_priv_free)
+ fdd->hwaccel_priv_free(fdd->hwaccel_priv);
+
+ av_freep(&fdd);
+}
+
+int ff_attach_decode_data(AVFrame *frame)
+{
+ AVBufferRef *fdd_buf;
+ FrameDecodeData *fdd;
+
+ av_assert1(!frame->private_ref);
+ av_buffer_unref(&frame->private_ref);
+
+ fdd = av_mallocz(sizeof(*fdd));
+ if (!fdd)
+ return AVERROR(ENOMEM);
+
+ fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
+ NULL, AV_BUFFER_FLAG_READONLY);
+ if (!fdd_buf) {
+ av_freep(&fdd);
+ return AVERROR(ENOMEM);
+ }
+
+ frame->private_ref = fdd_buf;
+
+ return 0;
+}
+
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+ const AVHWAccel *hwaccel = avctx->hwaccel;
+ int override_dimensions = 1;
+ int ret;
+
+ av_assert0(av_codec_is_decoder(avctx->codec));
+
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN ||
+ (ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
+ av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if (frame->width <= 0 || frame->height <= 0) {
+ frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
+ frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
+ override_dimensions = 0;
+ }
+
+ if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
+ av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ /* compat layer for old-style get_buffer() implementations */
+ avctx->channels = avctx->ch_layout.nb_channels;
+ avctx->channel_layout = (avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE) ?
+ avctx->ch_layout.u.mask : 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (frame->nb_samples * (int64_t)avctx->ch_layout.nb_channels > avctx->max_samples) {
+ av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ }
+ ret = ff_decode_frame_props(avctx, frame);
+ if (ret < 0)
+ goto fail;
+
+ if (hwaccel) {
+ if (hwaccel->alloc_frame) {
+ ret = hwaccel->alloc_frame(avctx, frame);
+ goto end;
+ }
+ } else
+ avctx->sw_pix_fmt = avctx->pix_fmt;
+
+ ret = avctx->get_buffer2(avctx, frame, flags);
+ if (ret < 0)
+ goto fail;
+
+ validate_avframe_allocation(avctx, frame);
+
+ ret = ff_attach_decode_data(frame);
+ if (ret < 0)
+ goto fail;
+
+end:
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
+ !(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)) {
+ frame->width = avctx->width;
+ frame->height = avctx->height;
+ }
+
+fail:
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ av_frame_unref(frame);
+ }
+
+ return ret;
+}
+
+static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+ AVFrame *tmp;
+ int ret;
+
+ av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
+
+ if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
+ av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
+ frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
+ av_frame_unref(frame);
+ }
+
+ if (!frame->data[0])
+ return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
+
+ if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
+ return ff_decode_frame_props(avctx, frame);
+
+ tmp = av_frame_alloc();
+ if (!tmp)
+ return AVERROR(ENOMEM);
+
+ av_frame_move_ref(tmp, frame);
+
+ ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0) {
+ av_frame_free(&tmp);
+ return ret;
+ }
+
+ av_frame_copy(frame, tmp);
+ av_frame_free(&tmp);
+
+ return 0;
+}
+
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+ int ret = reget_buffer_internal(avctx, frame, flags);
+ if (ret < 0)
+ av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
+ return ret;
+}
+
+int ff_decode_preinit(AVCodecContext *avctx)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret = 0;
+
+ /* if the decoder init function was already called previously,
+ * free the already allocated subtitle_header before overwriting it */
+ av_freep(&avctx->subtitle_header);
+
+ if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) {
+ av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
+ avctx->codec->max_lowres);
+ avctx->lowres = avctx->codec->max_lowres;
+ }
+ if (avctx->sub_charenc) {
+ if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) {
+ av_log(avctx, AV_LOG_ERROR, "Character encoding is only "
+ "supported with subtitles codecs\n");
+ return AVERROR(EINVAL);
+ } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) {
+ av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, "
+ "subtitles character encoding will be ignored\n",
+ avctx->codec_descriptor->name);
+ avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING;
+ } else {
+ /* input character encoding is set for a text based subtitle
+ * codec at this point */
+ if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC)
+ avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER;
+
+ if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) {
+#if CONFIG_ICONV
+ iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc);
+ if (cd == (iconv_t)-1) {
+ ret = AVERROR(errno);
+ av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context "
+ "with input character encoding \"%s\"\n", avctx->sub_charenc);
+ return ret;
+ }
+ iconv_close(cd);
+#else
+ av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles "
+ "conversion needs a libavcodec built with iconv support "
+ "for this codec\n");
+ return AVERROR(ENOSYS);
+#endif
+ }
+ }
+ }
+
+ avctx->pts_correction_num_faulty_pts =
+ avctx->pts_correction_num_faulty_dts = 0;
+ avctx->pts_correction_last_pts =
+ avctx->pts_correction_last_dts = INT64_MIN;
+
+ if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY
+ && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO)
+ av_log(avctx, AV_LOG_WARNING,
+ "gray decoding requested but not enabled at configuration time\n");
+ if (avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) {
+ avctx->export_side_data |= AV_CODEC_EXPORT_DATA_MVS;
+ }
+
+ avci->in_pkt = av_packet_alloc();
+ avci->last_pkt_props = av_packet_alloc();
+ if (!avci->in_pkt || !avci->last_pkt_props)
+ return AVERROR(ENOMEM);
+
+ ret = decode_bsfs_init(avctx);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
+{
+ size_t size;
+ const void *pal = av_packet_get_side_data(src, AV_PKT_DATA_PALETTE, &size);
+
+ if (pal && size == AVPALETTE_SIZE) {
+ memcpy(dst, pal, AVPALETTE_SIZE);
+ return 1;
+ } else if (pal) {
+ av_log(logctx, AV_LOG_ERROR,
+ "Palette size %"SIZE_SPECIFIER" is wrong\n", size);
+ }
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/decode.h b/media/ffvpx/libavcodec/decode.h
new file mode 100644
index 0000000000..8430ffbd66
--- /dev/null
+++ b/media/ffvpx/libavcodec/decode.h
@@ -0,0 +1,153 @@
+/*
+ * generic decoding-related code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DECODE_H
+#define AVCODEC_DECODE_H
+
+#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
+#include "libavutil/hwcontext.h"
+
+#include "avcodec.h"
+
+/**
+ * This struct stores per-frame lavc-internal data and is attached to it via
+ * private_ref.
+ */
+typedef struct FrameDecodeData {
+ /**
+ * The callback to perform some delayed processing on the frame right
+ * before it is returned to the caller.
+ *
+ * @note This code is called at some unspecified point after the frame is
+ * returned from the decoder's decode/receive_frame call. Therefore it cannot rely
+ * on AVCodecContext being in any specific state, so it does not get to
+ * access AVCodecContext directly at all. All the state it needs must be
+ * stored in the post_process_opaque object.
+ */
+ int (*post_process)(void *logctx, AVFrame *frame);
+ void *post_process_opaque;
+ void (*post_process_opaque_free)(void *opaque);
+
+ /**
+ * Per-frame private data for hwaccels.
+ */
+ void *hwaccel_priv;
+ void (*hwaccel_priv_free)(void *priv);
+} FrameDecodeData;
+
+/**
+ * avcodec_receive_frame() implementation for decoders.
+ */
+int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Called by decoders to get the next packet for decoding.
+ *
+ * @param pkt An empty packet to be filled with data.
+ * @return 0 if a new reference has been successfully written to pkt
+ * AVERROR(EAGAIN) if no data is currently available
+ * AVERROR_EOF if and end of stream has been reached, so no more data
+ * will be available
+ */
+int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt);
+
+/**
+ * Set various frame properties from the provided packet.
+ */
+int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
+ AVFrame *frame, const AVPacket *pkt);
+
+/**
+ * Set various frame properties from the codec context / packet data.
+ */
+int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Make sure avctx.hw_frames_ctx is set. If it's not set, the function will
+ * try to allocate it from hw_device_ctx. If that is not possible, an error
+ * message is printed, and an error code is returned.
+ */
+int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
+ enum AVHWDeviceType dev_type);
+
+int ff_attach_decode_data(AVFrame *frame);
+
+/**
+ * Check whether the side-data of src contains a palette of
+ * size AVPALETTE_SIZE; if so, copy it to dst and return 1;
+ * else return 0.
+ * Also emit an error message upon encountering a palette
+ * with invalid size.
+ */
+int ff_copy_palette(void *dst, const AVPacket *src, void *logctx);
+
+/**
+ * Perform decoder initialization and validation.
+ * Called when opening the decoder, before the FFCodec.init() call.
+ */
+int ff_decode_preinit(AVCodecContext *avctx);
+
+/**
+ * Check that the provided frame dimensions are valid and set them on the codec
+ * context.
+ */
+int ff_set_dimensions(AVCodecContext *s, int width, int height);
+
+/**
+ * Check that the provided sample aspect ratio is valid and set it on the codec
+ * context.
+ */
+int ff_set_sar(AVCodecContext *avctx, AVRational sar);
+
+/**
+ * Select the (possibly hardware accelerated) pixel format.
+ * This is a wrapper around AVCodecContext.get_format() and should be used
+ * instead of calling get_format() directly.
+ *
+ * The list of pixel formats must contain at least one valid entry, and is
+ * terminated with AV_PIX_FMT_NONE. If it is possible to decode to software,
+ * the last entry in the list must be the most accurate software format.
+ * If it is not possible to decode to software, AVCodecContext.sw_pix_fmt
+ * must be set before calling this function.
+ */
+int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt);
+
+/**
+ * Get a buffer for a frame. This is a wrapper around
+ * AVCodecContext.get_buffer() and should be used instead calling get_buffer()
+ * directly.
+ */
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
+
+#define FF_REGET_BUFFER_FLAG_READONLY 1 ///< the returned buffer does not need to be writable
+/**
+ * Identical in function to ff_get_buffer(), except it reuses the existing buffer
+ * if available.
+ */
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
+
+/**
+ * Add or update AV_FRAME_DATA_MATRIXENCODING side data.
+ */
+int ff_side_data_update_matrix_encoding(AVFrame *frame,
+ enum AVMatrixEncoding matrix_encoding);
+
+#endif /* AVCODEC_DECODE_H */
diff --git a/media/ffvpx/libavcodec/defs.h b/media/ffvpx/libavcodec/defs.h
new file mode 100644
index 0000000000..fbe3254db2
--- /dev/null
+++ b/media/ffvpx/libavcodec/defs.h
@@ -0,0 +1,192 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DEFS_H
+#define AVCODEC_DEFS_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Misc types and constants that do not belong anywhere else.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define AV_INPUT_BUFFER_PADDING_SIZE 64
+
+/**
+ * Verify checksums embedded in the bitstream (could be of either encoded or
+ * decoded data, depending on the format) and print an error message on mismatch.
+ * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
+ * decoder/demuxer returning an error.
+ */
+#define AV_EF_CRCCHECK (1<<0)
+#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations
+#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length
+#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection
+
+#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue
+#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors
+#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors
+#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder/muxer should not do as an error
+
+#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard{
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE =-16, ///< discard nothing
+ AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
+};
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan {
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+} AVPanScan;
+
+/**
+ * This structure describes the bitrate properties of an encoded bitstream. It
+ * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
+ * parameters for H.264/HEVC.
+ */
+typedef struct AVCPBProperties {
+ /**
+ * Maximum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t max_bitrate;
+ /**
+ * Minimum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t min_bitrate;
+ /**
+ * Average bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t avg_bitrate;
+
+ /**
+ * The size of the buffer to which the ratecontrol is applied, in bits.
+ * Zero if unknown or unspecified.
+ */
+ int64_t buffer_size;
+
+ /**
+ * The delay between the time the packet this structure is associated with
+ * is received and the time when it should be decoded, in periods of a 27MHz
+ * clock.
+ *
+ * UINT64_MAX when unknown or unspecified.
+ */
+ uint64_t vbv_delay;
+} AVCPBProperties;
+
+/**
+ * Allocate a CPB properties structure and initialize its fields to default
+ * values.
+ *
+ * @param size if non-NULL, the size of the allocated struct will be written
+ * here. This is useful for embedding it in side data.
+ *
+ * @return the newly allocated struct or NULL on failure
+ */
+AVCPBProperties *av_cpb_properties_alloc(size_t *size);
+
+/**
+ * This structure supplies correlation between a packet timestamp and a wall clock
+ * production time. The definition follows the Producer Reference Time ('prft')
+ * as defined in ISO/IEC 14496-12
+ */
+typedef struct AVProducerReferenceTime {
+ /**
+ * A UTC timestamp, in microseconds, since Unix epoch (e.g, av_gettime()).
+ */
+ int64_t wallclock;
+ int flags;
+} AVProducerReferenceTime;
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+#endif // AVCODEC_DEFS_H
diff --git a/media/ffvpx/libavcodec/encode.c b/media/ffvpx/libavcodec/encode.c
new file mode 100644
index 0000000000..041fc7670e
--- /dev/null
+++ b/media/ffvpx/libavcodec/encode.c
@@ -0,0 +1,774 @@
+/*
+ * generic encoding-related code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/frame.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/samplefmt.h"
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "encode.h"
+#include "frame_thread_encoder.h"
+#include "internal.h"
+
+int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
+{
+ if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
+ size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
+ return AVERROR(EINVAL);
+ }
+
+ av_assert0(!avpkt->data);
+
+ av_fast_padded_malloc(&avctx->internal->byte_buffer,
+ &avctx->internal->byte_buffer_size, size);
+ avpkt->data = avctx->internal->byte_buffer;
+ if (!avpkt->data) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
+ return AVERROR(ENOMEM);
+ }
+ avpkt->size = size;
+
+ return 0;
+}
+
+int avcodec_default_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int flags)
+{
+ int ret;
+
+ if (avpkt->size < 0 || avpkt->size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR(EINVAL);
+
+ if (avpkt->data || avpkt->buf) {
+ av_log(avctx, AV_LOG_ERROR, "avpkt->{data,buf} != NULL in avcodec_default_get_encode_buffer()\n");
+ return AVERROR(EINVAL);
+ }
+
+ ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", avpkt->size);
+ return ret;
+ }
+ avpkt->data = avpkt->buf->data;
+
+ return 0;
+}
+
+int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
+{
+ int ret;
+
+ if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR(EINVAL);
+
+ av_assert0(!avpkt->data && !avpkt->buf);
+
+ avpkt->size = size;
+ ret = avctx->get_encode_buffer(avctx, avpkt, flags);
+ if (ret < 0)
+ goto fail;
+
+ if (!avpkt->data || !avpkt->buf) {
+ av_log(avctx, AV_LOG_ERROR, "No buffer returned by get_encode_buffer()\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ ret = 0;
+fail:
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_encode_buffer() failed\n");
+ av_packet_unref(avpkt);
+ }
+
+ return ret;
+}
+
+static int encode_make_refcounted(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ uint8_t *data = avpkt->data;
+ int ret;
+
+ if (avpkt->buf)
+ return 0;
+
+ avpkt->data = NULL;
+ ret = ff_get_encode_buffer(avctx, avpkt, avpkt->size, 0);
+ if (ret < 0)
+ return ret;
+ memcpy(avpkt->data, data, avpkt->size);
+
+ return 0;
+}
+
+/**
+ * Pad last frame with silence.
+ */
+static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src, int out_samples)
+{
+ int ret;
+
+ frame->format = src->format;
+ frame->nb_samples = out_samples;
+ ret = av_channel_layout_copy(&frame->ch_layout, &s->ch_layout);
+ if (ret < 0)
+ goto fail;
+ ret = av_frame_get_buffer(frame, 0);
+ if (ret < 0)
+ goto fail;
+
+ ret = av_frame_copy_props(frame, src);
+ if (ret < 0)
+ goto fail;
+
+ if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
+ src->nb_samples, s->ch_layout.nb_channels,
+ s->sample_fmt)) < 0)
+ goto fail;
+ if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
+ frame->nb_samples - src->nb_samples,
+ s->ch_layout.nb_channels, s->sample_fmt)) < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ av_frame_unref(frame);
+ s->internal->last_audio_frame = 0;
+ return ret;
+}
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub)
+{
+ int ret;
+ if (sub->start_display_time) {
+ av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
+ return -1;
+ }
+
+ ret = ffcodec(avctx->codec)->cb.encode_sub(avctx, buf, buf_size, sub);
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ return ret;
+}
+
+int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+
+ if (avci->draining)
+ return AVERROR_EOF;
+
+ if (!avci->buffer_frame->buf[0])
+ return AVERROR(EAGAIN);
+
+ av_frame_move_ref(frame, avci->buffer_frame);
+
+ return 0;
+}
+
+int ff_encode_reordered_opaque(AVCodecContext *avctx,
+ AVPacket *pkt, const AVFrame *frame)
+{
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->reordered_opaque = frame->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
+ int ret = av_buffer_replace(&pkt->opaque_ref, frame->opaque_ref);
+ if (ret < 0)
+ return ret;
+ pkt->opaque = frame->opaque;
+ }
+
+ return 0;
+}
+
+int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
+ AVFrame *frame, int *got_packet)
+{
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ int ret;
+
+ ret = codec->cb.encode(avctx, avpkt, frame, got_packet);
+ emms_c();
+ av_assert0(ret <= 0);
+
+ if (!ret && *got_packet) {
+ if (avpkt->data) {
+ ret = encode_make_refcounted(avctx, avpkt);
+ if (ret < 0)
+ goto unref;
+ // Date returned by encoders must always be ref-counted
+ av_assert0(avpkt->buf);
+ }
+
+ // set the timestamps for the simple no-delay case
+ // encoders with delay have to set the timestamps themselves
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) ||
+ (frame && (codec->caps_internal & FF_CODEC_CAP_EOF_FLUSH))) {
+ if (avpkt->pts == AV_NOPTS_VALUE)
+ avpkt->pts = frame->pts;
+
+ if (!avpkt->duration) {
+ if (frame->duration)
+ avpkt->duration = frame->duration;
+ else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ avpkt->duration = ff_samples_to_time_base(avctx,
+ frame->nb_samples);
+ }
+ }
+
+ ret = ff_encode_reordered_opaque(avctx, avpkt, frame);
+ if (ret < 0)
+ goto unref;
+ }
+
+ // dts equals pts unless there is reordering
+ // there can be no reordering if there is no encoder delay
+ if (!(avctx->codec_descriptor->props & AV_CODEC_PROP_REORDER) ||
+ !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) ||
+ (codec->caps_internal & FF_CODEC_CAP_EOF_FLUSH))
+ avpkt->dts = avpkt->pts;
+ } else {
+unref:
+ av_packet_unref(avpkt);
+ }
+
+ if (frame)
+ av_frame_unref(frame);
+
+ return ret;
+}
+
+static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ AVFrame *frame = avci->in_frame;
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ int got_packet;
+ int ret;
+
+ if (avci->draining_done)
+ return AVERROR_EOF;
+
+ if (!frame->buf[0] && !avci->draining) {
+ av_frame_unref(frame);
+ ret = ff_encode_get_frame(avctx, frame);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ if (!frame->buf[0]) {
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
+ avci->frame_thread_encoder))
+ return AVERROR_EOF;
+
+ // Flushing is signaled with a NULL frame
+ frame = NULL;
+ }
+
+ got_packet = 0;
+
+ av_assert0(codec->cb_type == FF_CODEC_CB_TYPE_ENCODE);
+
+ if (CONFIG_FRAME_THREAD_ENCODER && avci->frame_thread_encoder)
+ /* This will unref frame. */
+ ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
+ else {
+ ret = ff_encode_encode_cb(avctx, avpkt, frame, &got_packet);
+ }
+
+ if (avci->draining && !got_packet)
+ avci->draining_done = 1;
+
+ return ret;
+}
+
+static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ int ret;
+
+ while (!avpkt->data && !avpkt->side_data) {
+ ret = encode_simple_internal(avctx, avpkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (avci->draining_done)
+ return AVERROR_EOF;
+
+ av_assert0(!avpkt->data && !avpkt->side_data);
+
+ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
+ avctx->stats_out[0] = '\0';
+ if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
+ return AVERROR(EINVAL);
+ }
+
+ if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_RECEIVE_PACKET) {
+ ret = ffcodec(avctx->codec)->cb.receive_packet(avctx, avpkt);
+ if (ret < 0)
+ av_packet_unref(avpkt);
+ else
+ // Encoders must always return ref-counted buffers.
+ // Side-data only packets have no data and can be not ref-counted.
+ av_assert0(!avpkt->data || avpkt->buf);
+ } else
+ ret = encode_simple_receive_packet(avctx, avpkt);
+ if (ret >= 0)
+ avpkt->flags |= avci->intra_only_flag;
+
+ if (ret == AVERROR_EOF)
+ avci->draining_done = 1;
+
+ return ret;
+}
+
+#if CONFIG_LCMS2
+static int encode_generate_icc_profile(AVCodecContext *avctx, AVFrame *frame)
+{
+ enum AVColorTransferCharacteristic trc = frame->color_trc;
+ enum AVColorPrimaries prim = frame->color_primaries;
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ AVCodecInternal *avci = avctx->internal;
+ cmsHPROFILE profile;
+ int ret;
+
+ /* don't generate ICC profiles if disabled or unsupported */
+ if (!(avctx->flags2 & AV_CODEC_FLAG2_ICC_PROFILES))
+ return 0;
+ if (!(codec->caps_internal & FF_CODEC_CAP_ICC_PROFILES))
+ return 0;
+
+ if (trc == AVCOL_TRC_UNSPECIFIED)
+ trc = avctx->color_trc;
+ if (prim == AVCOL_PRI_UNSPECIFIED)
+ prim = avctx->color_primaries;
+ if (trc == AVCOL_TRC_UNSPECIFIED || prim == AVCOL_PRI_UNSPECIFIED)
+ return 0; /* can't generate ICC profile with missing csp tags */
+
+ if (av_frame_get_side_data(frame, AV_FRAME_DATA_ICC_PROFILE))
+ return 0; /* don't overwrite existing ICC profile */
+
+ if (!avci->icc.avctx) {
+ ret = ff_icc_context_init(&avci->icc, avctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = ff_icc_profile_generate(&avci->icc, prim, trc, &profile);
+ if (ret < 0)
+ return ret;
+
+ ret = ff_icc_profile_attach(&avci->icc, profile, frame);
+ cmsCloseProfile(profile);
+ return ret;
+}
+#else /* !CONFIG_LCMS2 */
+static int encode_generate_icc_profile(av_unused AVCodecContext *c, av_unused AVFrame *f)
+{
+ return 0;
+}
+#endif
+
+static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
+{
+ AVCodecInternal *avci = avctx->internal;
+ AVFrame *dst = avci->buffer_frame;
+ int ret;
+
+ if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ /* extract audio service type metadata */
+ AVFrameSideData *sd = av_frame_get_side_data(src, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
+ if (sd && sd->size >= sizeof(enum AVAudioServiceType))
+ avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
+
+ /* check for valid frame size */
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
+ /* if we already got an undersized frame, that must have been the last */
+ if (avctx->internal->last_audio_frame) {
+ av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
+ return AVERROR(EINVAL);
+ }
+ if (src->nb_samples > avctx->frame_size) {
+ av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) > frame_size (%d)\n", src->nb_samples, avctx->frame_size);
+ return AVERROR(EINVAL);
+ }
+ if (src->nb_samples < avctx->frame_size) {
+ avctx->internal->last_audio_frame = 1;
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME)) {
+ int pad_samples = avci->pad_samples ? avci->pad_samples : avctx->frame_size;
+ int out_samples = (src->nb_samples + pad_samples - 1) / pad_samples * pad_samples;
+
+ if (out_samples != src->nb_samples) {
+ ret = pad_last_frame(avctx, dst, src, out_samples);
+ if (ret < 0)
+ return ret;
+ goto finish;
+ }
+ }
+ }
+ }
+ }
+
+ ret = av_frame_ref(dst, src);
+ if (ret < 0)
+ return ret;
+
+finish:
+
+#if FF_API_PKT_DURATION
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (dst->pkt_duration && dst->pkt_duration != dst->duration)
+ dst->duration = dst->pkt_duration;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ ret = encode_generate_icc_profile(avctx, dst);
+ if (ret < 0)
+ return ret;
+ }
+
+ // unset frame duration unless AV_CODEC_FLAG_FRAME_DURATION is set,
+ // since otherwise we cannot be sure that whatever value it has is in the
+ // right timebase, so we would produce an incorrect value, which is worse
+ // than none at all
+ if (!(avctx->flags & AV_CODEC_FLAG_FRAME_DURATION))
+ dst->duration = 0;
+
+ return 0;
+}
+
+int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avci->draining)
+ return AVERROR_EOF;
+
+ if (avci->buffer_frame->buf[0])
+ return AVERROR(EAGAIN);
+
+ if (!frame) {
+ avci->draining = 1;
+ } else {
+ ret = encode_send_frame_internal(avctx, frame);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
+ ret = encode_receive_packet_internal(avctx, avci->buffer_pkt);
+ if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ return 0;
+}
+
+int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ av_packet_unref(avpkt);
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
+ av_packet_move_ref(avpkt, avci->buffer_pkt);
+ } else {
+ ret = encode_receive_packet_internal(avctx, avpkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int encode_preinit_video(AVCodecContext *avctx)
+{
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
+ int i;
+
+ if (avctx->codec->pix_fmts) {
+ for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
+ if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
+ break;
+ if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
+ av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
+ (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
+ return AVERROR(EINVAL);
+ }
+ if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
+ avctx->color_range = AVCOL_RANGE_JPEG;
+ }
+
+ if ( avctx->bits_per_raw_sample < 0
+ || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
+ av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
+ avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
+ avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
+ }
+ if (avctx->width <= 0 || avctx->height <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->ticks_per_frame && avctx->time_base.num &&
+ avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
+ av_log(avctx, AV_LOG_ERROR,
+ "ticks_per_frame %d too large for the timebase %d/%d.",
+ avctx->ticks_per_frame,
+ avctx->time_base.num,
+ avctx->time_base.den);
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ if (frames_ctx->format != avctx->pix_fmt) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
+ return AVERROR(EINVAL);
+ }
+ if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE &&
+ avctx->sw_pix_fmt != frames_ctx->sw_format) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Mismatching AVCodecContext.sw_pix_fmt (%s) "
+ "and AVHWFramesContext.sw_format (%s)\n",
+ av_get_pix_fmt_name(avctx->sw_pix_fmt),
+ av_get_pix_fmt_name(frames_ctx->sw_format));
+ return AVERROR(EINVAL);
+ }
+ avctx->sw_pix_fmt = frames_ctx->sw_format;
+ }
+
+ return 0;
+}
+
+static int encode_preinit_audio(AVCodecContext *avctx)
+{
+ int i;
+
+ if (avctx->codec->sample_fmts) {
+ for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
+ if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
+ break;
+ if (avctx->ch_layout.nb_channels == 1 &&
+ av_get_planar_sample_fmt(avctx->sample_fmt) ==
+ av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
+ avctx->sample_fmt = avctx->codec->sample_fmts[i];
+ break;
+ }
+ }
+ if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
+ av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
+ (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
+ return AVERROR(EINVAL);
+ }
+ }
+ if (avctx->codec->supported_samplerates) {
+ for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
+ if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
+ break;
+ if (avctx->codec->supported_samplerates[i] == 0) {
+ av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
+ avctx->sample_rate);
+ return AVERROR(EINVAL);
+ }
+ }
+ if (avctx->sample_rate < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
+ avctx->sample_rate);
+ return AVERROR(EINVAL);
+ }
+ if (avctx->codec->ch_layouts) {
+ for (i = 0; avctx->codec->ch_layouts[i].nb_channels; i++) {
+ if (!av_channel_layout_compare(&avctx->ch_layout, &avctx->codec->ch_layouts[i]))
+ break;
+ }
+ if (!avctx->codec->ch_layouts[i].nb_channels) {
+ char buf[512];
+ int ret = av_channel_layout_describe(&avctx->ch_layout, buf, sizeof(buf));
+ if (ret > 0)
+ av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ if (!avctx->bits_per_raw_sample)
+ avctx->bits_per_raw_sample = 8 * av_get_bytes_per_sample(avctx->sample_fmt);
+
+ return 0;
+}
+
+int ff_encode_preinit(AVCodecContext *avctx)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret = 0;
+
+ if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE &&
+ !(avctx->codec->capabilities & AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE)) {
+ av_log(avctx, AV_LOG_ERROR, "The copy_opaque flag is set, but the "
+ "encoder does not support it.\n");
+ return AVERROR(EINVAL);
+ }
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO: ret = encode_preinit_video(avctx); break;
+ case AVMEDIA_TYPE_AUDIO: ret = encode_preinit_audio(avctx); break;
+ }
+ if (ret < 0)
+ return ret;
+
+ if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
+ && avctx->bit_rate>0 && avctx->bit_rate<1000) {
+ av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
+ }
+
+ if (!avctx->rc_initial_buffer_occupancy)
+ avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
+
+ if (avctx->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY)
+ avctx->internal->intra_only_flag = AV_PKT_FLAG_KEY;
+
+ if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_ENCODE) {
+ avci->in_frame = av_frame_alloc();
+ if (!avci->in_frame)
+ return AVERROR(ENOMEM);
+ }
+
+ if ((avctx->flags & AV_CODEC_FLAG_RECON_FRAME)) {
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_ENCODER_RECON_FRAME)) {
+ av_log(avctx, AV_LOG_ERROR, "Reconstructed frame output requested "
+ "from an encoder not supporting it\n");
+ return AVERROR(ENOSYS);
+ }
+
+ avci->recon_frame = av_frame_alloc();
+ if (!avci->recon_frame)
+ return AVERROR(ENOMEM);
+ }
+
+ if (CONFIG_FRAME_THREAD_ENCODER) {
+ ret = ff_frame_thread_encoder_init(avctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ int ret;
+
+ switch (avctx->codec->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ frame->format = avctx->pix_fmt;
+ if (frame->width <= 0 || frame->height <= 0) {
+ frame->width = FFMAX(avctx->width, avctx->coded_width);
+ frame->height = FFMAX(avctx->height, avctx->coded_height);
+ }
+
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ frame->sample_rate = avctx->sample_rate;
+ frame->format = avctx->sample_fmt;
+ if (!frame->ch_layout.nb_channels) {
+ ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ }
+
+ ret = avcodec_default_get_buffer2(avctx, frame, 0);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ av_frame_unref(frame);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ff_encode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+
+ if (!avci->recon_frame)
+ return AVERROR(EINVAL);
+ if (!avci->recon_frame->buf[0])
+ return avci->draining_done ? AVERROR_EOF : AVERROR(EAGAIN);
+
+ av_frame_move_ref(frame, avci->recon_frame);
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/encode.h b/media/ffvpx/libavcodec/encode.h
new file mode 100644
index 0000000000..26a3304045
--- /dev/null
+++ b/media/ffvpx/libavcodec/encode.h
@@ -0,0 +1,99 @@
+/*
+ * generic encoding-related code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ENCODE_H
+#define AVCODEC_ENCODE_H
+
+#include "libavutil/frame.h"
+
+#include "avcodec.h"
+#include "packet.h"
+
+/**
+ * avcodec_receive_frame() implementation for encoders.
+ */
+int ff_encode_receive_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Called by encoders to get the next frame for encoding.
+ *
+ * @param frame An empty frame to be filled with data.
+ * @return 0 if a new reference has been successfully written to frame
+ * AVERROR(EAGAIN) if no data is currently available
+ * AVERROR_EOF if end of stream has been reached, so no more data
+ * will be available
+ */
+int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Get a buffer for a packet. This is a wrapper around
+ * AVCodecContext.get_encode_buffer() and should be used instead calling get_encode_buffer()
+ * directly.
+ */
+int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags);
+
+/**
+ * Allocate buffers for a frame. Encoder equivalent to ff_get_buffer().
+ */
+int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Check AVPacket size and allocate data.
+ *
+ * Encoders of type FF_CODEC_CB_TYPE_ENCODE can use this as a convenience to
+ * obtain a big enough buffer for the encoded bitstream.
+ *
+ * @param avctx the AVCodecContext of the encoder
+ * @param avpkt The AVPacket: on success, avpkt->data will point to a buffer
+ * of size at least `size`; the packet will not be refcounted.
+ * This packet must be initially blank.
+ * @param size an upper bound of the size of the packet to encode
+ * @return non negative on success, negative error code on failure
+ */
+int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size);
+
+/**
+ * Propagate user opaque values from the frame to avctx/pkt as needed.
+ */
+int ff_encode_reordered_opaque(AVCodecContext *avctx,
+ AVPacket *pkt, const AVFrame *frame);
+
+/*
+ * Perform encoder initialization and validation.
+ * Called when opening the encoder, before the FFCodec.init() call.
+ */
+int ff_encode_preinit(AVCodecContext *avctx);
+
+int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
+ AVFrame *frame, int *got_packet);
+
+/**
+ * Rescale from sample rate to AVCodecContext.time_base.
+ */
+static av_always_inline int64_t ff_samples_to_time_base(const AVCodecContext *avctx,
+ int64_t samples)
+{
+ if (samples == AV_NOPTS_VALUE)
+ return AV_NOPTS_VALUE;
+ return av_rescale_q(samples, (AVRational){ 1, avctx->sample_rate },
+ avctx->time_base);
+}
+
+#endif /* AVCODEC_ENCODE_H */
diff --git a/media/ffvpx/libavcodec/error_resilience.h b/media/ffvpx/libavcodec/error_resilience.h
new file mode 100644
index 0000000000..47cc8a4fc6
--- /dev/null
+++ b/media/ffvpx/libavcodec/error_resilience.h
@@ -0,0 +1,97 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ERROR_RESILIENCE_H
+#define AVCODEC_ERROR_RESILIENCE_H
+
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include "avcodec.h"
+#include "me_cmp.h"
+#include "threadframe.h"
+
+///< current MB is the first after a resync marker
+#define VP_START 1
+#define ER_AC_ERROR 2
+#define ER_DC_ERROR 4
+#define ER_MV_ERROR 8
+#define ER_AC_END 16
+#define ER_DC_END 32
+#define ER_MV_END 64
+
+#define ER_MB_ERROR (ER_AC_ERROR|ER_DC_ERROR|ER_MV_ERROR)
+#define ER_MB_END (ER_AC_END|ER_DC_END|ER_MV_END)
+
+typedef struct ERPicture {
+ AVFrame *f;
+ ThreadFrame *tf;
+
+ // it is the caller's responsibility to allocate these buffers
+ int16_t (*motion_val[2])[2];
+ int8_t *ref_index[2];
+
+ uint32_t *mb_type;
+ int field_picture;
+} ERPicture;
+
+typedef struct ERContext {
+ AVCodecContext *avctx;
+
+ me_cmp_func sad;
+ int mecc_inited;
+
+ int *mb_index2xy;
+ int mb_num;
+ int mb_width, mb_height;
+ ptrdiff_t mb_stride;
+ ptrdiff_t b8_stride;
+
+ atomic_int error_count;
+ int error_occurred;
+ uint8_t *error_status_table;
+ uint8_t *er_temp_buffer;
+ int16_t *dc_val[3];
+ uint8_t *mbskip_table;
+ uint8_t *mbintra_table;
+ int mv[2][4][2];
+
+ ERPicture cur_pic;
+ ERPicture last_pic;
+ ERPicture next_pic;
+
+ int8_t *ref_index[2];
+ int16_t (*motion_val_base[2])[2];
+
+ uint16_t pp_time;
+ uint16_t pb_time;
+ int quarter_sample;
+ int partitioned_frame;
+
+ void (*decode_mb)(void *opaque, int ref, int mv_dir, int mv_type,
+ int (*mv)[2][4][2],
+ int mb_x, int mb_y, int mb_intra, int mb_skipped);
+ void *opaque;
+} ERContext;
+
+void ff_er_frame_start(ERContext *s);
+void ff_er_frame_end(ERContext *s);
+void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy,
+ int status);
+
+#endif /* AVCODEC_ERROR_RESILIENCE_H */
diff --git a/media/ffvpx/libavcodec/faandct.c b/media/ffvpx/libavcodec/faandct.c
new file mode 100644
index 0000000000..38c392bbae
--- /dev/null
+++ b/media/ffvpx/libavcodec/faandct.c
@@ -0,0 +1,215 @@
+/*
+ * Floating point AAN DCT
+ * this implementation is based upon the IJG integer AAN DCT (see jfdctfst.c)
+ *
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2003 Roman Shaposhnik
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * @file
+ * @brief
+ * Floating point AAN DCT
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "faandct.h"
+#include "libavutil/internal.h"
+#include "libavutil/libm.h"
+
+typedef float FLOAT;
+
+/* numbers generated by arbitrary precision arithmetic followed by truncation
+to 36 fractional digits (enough for a 128-bit IEEE quad, see /usr/include/math.h
+for this approach). Unfortunately, long double is not always available correctly,
+e.g ppc has issues.
+TODO: add L suffixes when ppc and toolchains sort out their stuff.
+*/
+#define B0 1.000000000000000000000000000000000000
+#define B1 0.720959822006947913789091890943021267 // (cos(pi*1/16)sqrt(2))^-1
+#define B2 0.765366864730179543456919968060797734 // (cos(pi*2/16)sqrt(2))^-1
+#define B3 0.850430094767256448766702844371412325 // (cos(pi*3/16)sqrt(2))^-1
+#define B4 1.000000000000000000000000000000000000 // (cos(pi*4/16)sqrt(2))^-1
+#define B5 1.272758580572833938461007018281767032 // (cos(pi*5/16)sqrt(2))^-1
+#define B6 1.847759065022573512256366378793576574 // (cos(pi*6/16)sqrt(2))^-1
+#define B7 3.624509785411551372409941227504289587 // (cos(pi*7/16)sqrt(2))^-1
+
+#define A1 M_SQRT1_2 // cos(pi*4/16)
+#define A2 0.54119610014619698435 // cos(pi*6/16)sqrt(2)
+#define A5 0.38268343236508977170 // cos(pi*6/16)
+#define A4 1.30656296487637652774 // cos(pi*2/16)sqrt(2)
+
+static const FLOAT postscale[64]={
+B0*B0, B0*B1, B0*B2, B0*B3, B0*B4, B0*B5, B0*B6, B0*B7,
+B1*B0, B1*B1, B1*B2, B1*B3, B1*B4, B1*B5, B1*B6, B1*B7,
+B2*B0, B2*B1, B2*B2, B2*B3, B2*B4, B2*B5, B2*B6, B2*B7,
+B3*B0, B3*B1, B3*B2, B3*B3, B3*B4, B3*B5, B3*B6, B3*B7,
+B4*B0, B4*B1, B4*B2, B4*B3, B4*B4, B4*B5, B4*B6, B4*B7,
+B5*B0, B5*B1, B5*B2, B5*B3, B5*B4, B5*B5, B5*B6, B5*B7,
+B6*B0, B6*B1, B6*B2, B6*B3, B6*B4, B6*B5, B6*B6, B6*B7,
+B7*B0, B7*B1, B7*B2, B7*B3, B7*B4, B7*B5, B7*B6, B7*B7,
+};
+
+static av_always_inline void row_fdct(FLOAT temp[64], int16_t *data)
+{
+ FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ FLOAT tmp10, tmp11, tmp12, tmp13;
+ FLOAT z2, z4, z11, z13;
+ int i;
+
+ for (i=0; i<8*8; i+=8) {
+ tmp0= data[0 + i] + data[7 + i];
+ tmp7= data[0 + i] - data[7 + i];
+ tmp1= data[1 + i] + data[6 + i];
+ tmp6= data[1 + i] - data[6 + i];
+ tmp2= data[2 + i] + data[5 + i];
+ tmp5= data[2 + i] - data[5 + i];
+ tmp3= data[3 + i] + data[4 + i];
+ tmp4= data[3 + i] - data[4 + i];
+
+ tmp10= tmp0 + tmp3;
+ tmp13= tmp0 - tmp3;
+ tmp11= tmp1 + tmp2;
+ tmp12= tmp1 - tmp2;
+
+ temp[0 + i]= tmp10 + tmp11;
+ temp[4 + i]= tmp10 - tmp11;
+
+ tmp12 += tmp13;
+ tmp12 *= A1;
+ temp[2 + i]= tmp13 + tmp12;
+ temp[6 + i]= tmp13 - tmp12;
+
+ tmp4 += tmp5;
+ tmp5 += tmp6;
+ tmp6 += tmp7;
+
+ z2= tmp4*(A2+A5) - tmp6*A5;
+ z4= tmp6*(A4-A5) + tmp4*A5;
+
+ tmp5*=A1;
+
+ z11= tmp7 + tmp5;
+ z13= tmp7 - tmp5;
+
+ temp[5 + i]= z13 + z2;
+ temp[3 + i]= z13 - z2;
+ temp[1 + i]= z11 + z4;
+ temp[7 + i]= z11 - z4;
+ }
+}
+
+void ff_faandct(int16_t *data)
+{
+ FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ FLOAT tmp10, tmp11, tmp12, tmp13;
+ FLOAT z2, z4, z11, z13;
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ row_fdct(temp, data);
+
+ for (i=0; i<8; i++) {
+ tmp0= temp[8*0 + i] + temp[8*7 + i];
+ tmp7= temp[8*0 + i] - temp[8*7 + i];
+ tmp1= temp[8*1 + i] + temp[8*6 + i];
+ tmp6= temp[8*1 + i] - temp[8*6 + i];
+ tmp2= temp[8*2 + i] + temp[8*5 + i];
+ tmp5= temp[8*2 + i] - temp[8*5 + i];
+ tmp3= temp[8*3 + i] + temp[8*4 + i];
+ tmp4= temp[8*3 + i] - temp[8*4 + i];
+
+ tmp10= tmp0 + tmp3;
+ tmp13= tmp0 - tmp3;
+ tmp11= tmp1 + tmp2;
+ tmp12= tmp1 - tmp2;
+
+ data[8*0 + i]= lrintf(postscale[8*0 + i] * (tmp10 + tmp11));
+ data[8*4 + i]= lrintf(postscale[8*4 + i] * (tmp10 - tmp11));
+
+ tmp12 += tmp13;
+ tmp12 *= A1;
+ data[8*2 + i]= lrintf(postscale[8*2 + i] * (tmp13 + tmp12));
+ data[8*6 + i]= lrintf(postscale[8*6 + i] * (tmp13 - tmp12));
+
+ tmp4 += tmp5;
+ tmp5 += tmp6;
+ tmp6 += tmp7;
+
+ z2= tmp4*(A2+A5) - tmp6*A5;
+ z4= tmp6*(A4-A5) + tmp4*A5;
+
+ tmp5*=A1;
+
+ z11= tmp7 + tmp5;
+ z13= tmp7 - tmp5;
+
+ data[8*5 + i]= lrintf(postscale[8*5 + i] * (z13 + z2));
+ data[8*3 + i]= lrintf(postscale[8*3 + i] * (z13 - z2));
+ data[8*1 + i]= lrintf(postscale[8*1 + i] * (z11 + z4));
+ data[8*7 + i]= lrintf(postscale[8*7 + i] * (z11 - z4));
+ }
+}
+
+void ff_faandct248(int16_t *data)
+{
+ FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ FLOAT tmp10, tmp11, tmp12, tmp13;
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ row_fdct(temp, data);
+
+ for (i=0; i<8; i++) {
+ tmp0 = temp[8*0 + i] + temp[8*1 + i];
+ tmp1 = temp[8*2 + i] + temp[8*3 + i];
+ tmp2 = temp[8*4 + i] + temp[8*5 + i];
+ tmp3 = temp[8*6 + i] + temp[8*7 + i];
+ tmp4 = temp[8*0 + i] - temp[8*1 + i];
+ tmp5 = temp[8*2 + i] - temp[8*3 + i];
+ tmp6 = temp[8*4 + i] - temp[8*5 + i];
+ tmp7 = temp[8*6 + i] - temp[8*7 + i];
+
+ tmp10 = tmp0 + tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ tmp13 = tmp0 - tmp3;
+
+ data[8*0 + i] = lrintf(postscale[8*0 + i] * (tmp10 + tmp11));
+ data[8*4 + i] = lrintf(postscale[8*4 + i] * (tmp10 - tmp11));
+
+ tmp12 += tmp13;
+ tmp12 *= A1;
+ data[8*2 + i] = lrintf(postscale[8*2 + i] * (tmp13 + tmp12));
+ data[8*6 + i] = lrintf(postscale[8*6 + i] * (tmp13 - tmp12));
+
+ tmp10 = tmp4 + tmp7;
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp5 - tmp6;
+ tmp13 = tmp4 - tmp7;
+
+ data[8*1 + i] = lrintf(postscale[8*0 + i] * (tmp10 + tmp11));
+ data[8*5 + i] = lrintf(postscale[8*4 + i] * (tmp10 - tmp11));
+
+ tmp12 += tmp13;
+ tmp12 *= A1;
+ data[8*3 + i] = lrintf(postscale[8*2 + i] * (tmp13 + tmp12));
+ data[8*7 + i] = lrintf(postscale[8*6 + i] * (tmp13 - tmp12));
+ }
+}
diff --git a/media/ffvpx/libavcodec/faandct.h b/media/ffvpx/libavcodec/faandct.h
new file mode 100644
index 0000000000..c5ef96dcf1
--- /dev/null
+++ b/media/ffvpx/libavcodec/faandct.h
@@ -0,0 +1,37 @@
+/*
+ * Floating point AAN DCT
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @brief
+ * Floating point AAN DCT
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_FAANDCT_H
+#define AVCODEC_FAANDCT_H
+
+#include <stdint.h>
+
+void ff_faandct(int16_t *data);
+void ff_faandct248(int16_t *data);
+
+#endif /* AVCODEC_FAANDCT_H */
diff --git a/media/ffvpx/libavcodec/faanidct.c b/media/ffvpx/libavcodec/faanidct.c
new file mode 100644
index 0000000000..3921f82dae
--- /dev/null
+++ b/media/ffvpx/libavcodec/faanidct.c
@@ -0,0 +1,166 @@
+/*
+ * Floating point AAN IDCT
+ * Copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "faanidct.h"
+#include "libavutil/common.h"
+
+/* To allow switching to double. */
+typedef float FLOAT;
+
+#define B0 1.0000000000000000000000
+#define B1 1.3870398453221474618216 // cos(pi*1/16)sqrt(2)
+#define B2 1.3065629648763765278566 // cos(pi*2/16)sqrt(2)
+#define B3 1.1758756024193587169745 // cos(pi*3/16)sqrt(2)
+#define B4 1.0000000000000000000000 // cos(pi*4/16)sqrt(2)
+#define B5 0.7856949583871021812779 // cos(pi*5/16)sqrt(2)
+#define B6 0.5411961001461969843997 // cos(pi*6/16)sqrt(2)
+#define B7 0.2758993792829430123360 // cos(pi*7/16)sqrt(2)
+
+#define A4 0.70710678118654752438 // cos(pi*4/16)
+#define A2 0.92387953251128675613 // cos(pi*2/16)
+
+static const FLOAT prescale[64]={
+B0*B0/8, B0*B1/8, B0*B2/8, B0*B3/8, B0*B4/8, B0*B5/8, B0*B6/8, B0*B7/8,
+B1*B0/8, B1*B1/8, B1*B2/8, B1*B3/8, B1*B4/8, B1*B5/8, B1*B6/8, B1*B7/8,
+B2*B0/8, B2*B1/8, B2*B2/8, B2*B3/8, B2*B4/8, B2*B5/8, B2*B6/8, B2*B7/8,
+B3*B0/8, B3*B1/8, B3*B2/8, B3*B3/8, B3*B4/8, B3*B5/8, B3*B6/8, B3*B7/8,
+B4*B0/8, B4*B1/8, B4*B2/8, B4*B3/8, B4*B4/8, B4*B5/8, B4*B6/8, B4*B7/8,
+B5*B0/8, B5*B1/8, B5*B2/8, B5*B3/8, B5*B4/8, B5*B5/8, B5*B6/8, B5*B7/8,
+B6*B0/8, B6*B1/8, B6*B2/8, B6*B3/8, B6*B4/8, B6*B5/8, B6*B6/8, B6*B7/8,
+B7*B0/8, B7*B1/8, B7*B2/8, B7*B3/8, B7*B4/8, B7*B5/8, B7*B6/8, B7*B7/8,
+};
+
+static inline void p8idct(int16_t data[64], FLOAT temp[64], uint8_t *dest,
+ ptrdiff_t stride, int x, int y, int type)
+{
+ int i;
+ FLOAT s04, d04, s17, d17, s26, d26, s53, d53;
+ FLOAT os07, os16, os25, os34;
+ FLOAT od07, od16, od25, od34;
+
+ for(i=0; i<y*8; i+=y){
+ s17= temp[1*x + i] + temp[7*x + i];
+ d17= temp[1*x + i] - temp[7*x + i];
+ s53= temp[5*x + i] + temp[3*x + i];
+ d53= temp[5*x + i] - temp[3*x + i];
+
+ od07= s17 + s53;
+ od25= (s17 - s53)*(2*A4);
+
+ od34= d17*(2*(B6-A2)) - d53*(2*A2);
+ od16= d53*(2*(A2-B2)) + d17*(2*A2);
+
+ od16 -= od07;
+ od25 -= od16;
+ od34 += od25;
+
+ s26 = temp[2*x + i] + temp[6*x + i];
+ d26 = temp[2*x + i] - temp[6*x + i];
+ d26*= 2*A4;
+ d26-= s26;
+
+ s04= temp[0*x + i] + temp[4*x + i];
+ d04= temp[0*x + i] - temp[4*x + i];
+
+ os07= s04 + s26;
+ os34= s04 - s26;
+ os16= d04 + d26;
+ os25= d04 - d26;
+
+ if(type==0){
+ temp[0*x + i]= os07 + od07;
+ temp[7*x + i]= os07 - od07;
+ temp[1*x + i]= os16 + od16;
+ temp[6*x + i]= os16 - od16;
+ temp[2*x + i]= os25 + od25;
+ temp[5*x + i]= os25 - od25;
+ temp[3*x + i]= os34 - od34;
+ temp[4*x + i]= os34 + od34;
+ }else if(type==1){
+ data[0*x + i]= lrintf(os07 + od07);
+ data[7*x + i]= lrintf(os07 - od07);
+ data[1*x + i]= lrintf(os16 + od16);
+ data[6*x + i]= lrintf(os16 - od16);
+ data[2*x + i]= lrintf(os25 + od25);
+ data[5*x + i]= lrintf(os25 - od25);
+ data[3*x + i]= lrintf(os34 - od34);
+ data[4*x + i]= lrintf(os34 + od34);
+ }else if(type==2){
+ dest[0*stride + i]= av_clip_uint8(((int)dest[0*stride + i]) + lrintf(os07 + od07));
+ dest[7*stride + i]= av_clip_uint8(((int)dest[7*stride + i]) + lrintf(os07 - od07));
+ dest[1*stride + i]= av_clip_uint8(((int)dest[1*stride + i]) + lrintf(os16 + od16));
+ dest[6*stride + i]= av_clip_uint8(((int)dest[6*stride + i]) + lrintf(os16 - od16));
+ dest[2*stride + i]= av_clip_uint8(((int)dest[2*stride + i]) + lrintf(os25 + od25));
+ dest[5*stride + i]= av_clip_uint8(((int)dest[5*stride + i]) + lrintf(os25 - od25));
+ dest[3*stride + i]= av_clip_uint8(((int)dest[3*stride + i]) + lrintf(os34 - od34));
+ dest[4*stride + i]= av_clip_uint8(((int)dest[4*stride + i]) + lrintf(os34 + od34));
+ }else{
+ dest[0*stride + i]= av_clip_uint8(lrintf(os07 + od07));
+ dest[7*stride + i]= av_clip_uint8(lrintf(os07 - od07));
+ dest[1*stride + i]= av_clip_uint8(lrintf(os16 + od16));
+ dest[6*stride + i]= av_clip_uint8(lrintf(os16 - od16));
+ dest[2*stride + i]= av_clip_uint8(lrintf(os25 + od25));
+ dest[5*stride + i]= av_clip_uint8(lrintf(os25 - od25));
+ dest[3*stride + i]= av_clip_uint8(lrintf(os34 - od34));
+ dest[4*stride + i]= av_clip_uint8(lrintf(os34 + od34));
+ }
+ }
+}
+
+void ff_faanidct(int16_t block[64]){
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ for(i=0; i<64; i++)
+ temp[i] = block[i] * prescale[i];
+
+ p8idct(block, temp, NULL, 0, 1, 8, 0);
+ p8idct(block, temp, NULL, 0, 8, 1, 1);
+}
+
+void ff_faanidct_add(uint8_t *dest, ptrdiff_t line_size, int16_t block[64])
+{
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ for(i=0; i<64; i++)
+ temp[i] = block[i] * prescale[i];
+
+ p8idct(block, temp, NULL, 0, 1, 8, 0);
+ p8idct(NULL , temp, dest, line_size, 8, 1, 2);
+}
+
+void ff_faanidct_put(uint8_t *dest, ptrdiff_t line_size, int16_t block[64])
+{
+ FLOAT temp[64];
+ int i;
+
+ emms_c();
+
+ for(i=0; i<64; i++)
+ temp[i] = block[i] * prescale[i];
+
+ p8idct(block, temp, NULL, 0, 1, 8, 0);
+ p8idct(NULL , temp, dest, line_size, 8, 1, 3);
+}
diff --git a/media/ffvpx/libavcodec/faanidct.h b/media/ffvpx/libavcodec/faanidct.h
new file mode 100644
index 0000000000..6f4da67c1b
--- /dev/null
+++ b/media/ffvpx/libavcodec/faanidct.h
@@ -0,0 +1,32 @@
+/*
+ * Floating point AAN IDCT
+ * Copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FAANIDCT_H
+#define AVCODEC_FAANIDCT_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+void ff_faanidct(int16_t block[64]);
+void ff_faanidct_add(uint8_t *dest, ptrdiff_t line_size, int16_t block[64]);
+void ff_faanidct_put(uint8_t *dest, ptrdiff_t line_size, int16_t block[64]);
+
+#endif /* AVCODEC_FAANIDCT_H */
diff --git a/media/ffvpx/libavcodec/fdctdsp.c b/media/ffvpx/libavcodec/fdctdsp.c
new file mode 100644
index 0000000000..5306c9d047
--- /dev/null
+++ b/media/ffvpx/libavcodec/fdctdsp.c
@@ -0,0 +1,51 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "avcodec.h"
+#include "dct.h"
+#include "faandct.h"
+#include "fdctdsp.h"
+#include "config.h"
+
+av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
+{
+ av_unused const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
+
+ if (avctx->bits_per_raw_sample == 10 || avctx->bits_per_raw_sample == 9) {
+ c->fdct = ff_jpeg_fdct_islow_10;
+ c->fdct248 = ff_fdct248_islow_10;
+ } else if (avctx->dct_algo == FF_DCT_FASTINT) {
+ c->fdct = ff_fdct_ifast;
+ c->fdct248 = ff_fdct_ifast248;
+#if CONFIG_FAANDCT
+ } else if (avctx->dct_algo == FF_DCT_FAAN) {
+ c->fdct = ff_faandct;
+ c->fdct248 = ff_faandct248;
+#endif /* CONFIG_FAANDCT */
+ } else {
+ c->fdct = ff_jpeg_fdct_islow_8; // slow/accurate/default
+ c->fdct248 = ff_fdct248_islow_8;
+ }
+
+#if ARCH_PPC
+ ff_fdctdsp_init_ppc(c, avctx, high_bit_depth);
+#elif ARCH_X86
+ ff_fdctdsp_init_x86(c, avctx, high_bit_depth);
+#endif
+}
diff --git a/media/ffvpx/libavcodec/fdctdsp.h b/media/ffvpx/libavcodec/fdctdsp.h
new file mode 100644
index 0000000000..3e1f683b9e
--- /dev/null
+++ b/media/ffvpx/libavcodec/fdctdsp.h
@@ -0,0 +1,37 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FDCTDSP_H
+#define AVCODEC_FDCTDSP_H
+
+#include <stdint.h>
+
+#include "avcodec.h"
+
+typedef struct FDCTDSPContext {
+ void (*fdct)(int16_t *block /* align 16 */);
+ void (*fdct248)(int16_t *block /* align 16 */);
+} FDCTDSPContext;
+
+void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx);
+void ff_fdctdsp_init_ppc(FDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_fdctdsp_init_x86(FDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+
+#endif /* AVCODEC_FDCTDSP_H */
diff --git a/media/ffvpx/libavcodec/fdctdsp_init.c b/media/ffvpx/libavcodec/fdctdsp_init.c
new file mode 100644
index 0000000000..0cb5fd625b
--- /dev/null
+++ b/media/ffvpx/libavcodec/fdctdsp_init.c
@@ -0,0 +1,44 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/fdctdsp.h"
+#include "fdct.h"
+
+av_cold void ff_fdctdsp_init_x86(FDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth)
+{
+ int cpu_flags = av_get_cpu_flags();
+ const int dct_algo = avctx->dct_algo;
+
+ if (!high_bit_depth) {
+ if ((dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)) {
+ if (INLINE_MMX(cpu_flags))
+ c->fdct = ff_fdct_mmx;
+
+ if (INLINE_MMXEXT(cpu_flags))
+ c->fdct = ff_fdct_mmxext;
+
+ if (INLINE_SSE2(cpu_flags))
+ c->fdct = ff_fdct_sse2;
+ }
+ }
+}
diff --git a/media/ffvpx/libavcodec/fft-internal.h b/media/ffvpx/libavcodec/fft-internal.h
new file mode 100644
index 0000000000..d89a3e38ca
--- /dev/null
+++ b/media/ffvpx/libavcodec/fft-internal.h
@@ -0,0 +1,62 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FFT_INTERNAL_H
+#define AVCODEC_FFT_INTERNAL_H
+
+#include "libavutil/mathematics.h"
+#include "fft.h"
+
+#if FFT_FLOAT
+
+#define FIX15(v) (v)
+#define sqrthalf (float)M_SQRT1_2
+
+#define BF(x, y, a, b) do { \
+ x = a - b; \
+ y = a + b; \
+ } while (0)
+
+#define CMUL(dre, dim, are, aim, bre, bim) do { \
+ (dre) = (are) * (bre) - (aim) * (bim); \
+ (dim) = (are) * (bim) + (aim) * (bre); \
+ } while (0)
+
+#else /* FFT_FLOAT */
+
+#define CMUL(dre, dim, are, aim, bre, bim) do { \
+ int64_t accu; \
+ (accu) = (int64_t)(bre) * (are); \
+ (accu) -= (int64_t)(bim) * (aim); \
+ (dre) = (int)(((accu) + 0x40000000) >> 31); \
+ (accu) = (int64_t)(bre) * (aim); \
+ (accu) += (int64_t)(bim) * (are); \
+ (dim) = (int)(((accu) + 0x40000000) >> 31); \
+ } while (0)
+
+#endif /* FFT_FLOAT */
+
+#define ff_imdct_calc_c FFT_NAME(ff_imdct_calc_c)
+#define ff_imdct_half_c FFT_NAME(ff_imdct_half_c)
+#define ff_mdct_calc_c FFT_NAME(ff_mdct_calc_c)
+
+void ff_imdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_c(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_mdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input);
+
+#endif /* AVCODEC_FFT_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/fft.h b/media/ffvpx/libavcodec/fft.h
new file mode 100644
index 0000000000..d46e5a3f0b
--- /dev/null
+++ b/media/ffvpx/libavcodec/fft.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FFT_H
+#define AVCODEC_FFT_H
+
+#ifndef FFT_FLOAT
+#define FFT_FLOAT 1
+#endif
+
+#include <stdint.h>
+#include "config.h"
+
+#include "libavutil/attributes_internal.h"
+#include "libavutil/mem_internal.h"
+
+#if FFT_FLOAT
+
+#include "avfft.h"
+
+#define FFT_NAME(x) x
+
+typedef float FFTDouble;
+
+#else
+
+#define Q31(x) (int)((x)*2147483648.0 + 0.5)
+#define FFT_NAME(x) x ## _fixed_32
+
+typedef int32_t FFTSample;
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef int FFTDouble;
+typedef struct FFTContext FFTContext;
+
+#endif /* FFT_FLOAT */
+
+typedef struct FFTDComplex {
+ FFTDouble re, im;
+} FFTDComplex;
+
+/* FFT computation */
+
+enum fft_permutation_type {
+ FF_FFT_PERM_DEFAULT,
+ FF_FFT_PERM_SWAP_LSBS,
+ FF_FFT_PERM_AVX,
+};
+
+enum mdct_permutation_type {
+ FF_MDCT_PERM_NONE,
+ FF_MDCT_PERM_INTERLEAVE,
+};
+
+struct FFTContext {
+ int nbits;
+ int inverse;
+ uint16_t *revtab;
+ FFTComplex *tmp_buf;
+ int mdct_size; /* size of MDCT (i.e. number of input data * 2) */
+ int mdct_bits; /* n = 2^nbits */
+ /* pre/post rotation tables */
+ FFTSample *tcos;
+ FFTSample *tsin;
+ /**
+ * Do the permutation needed BEFORE calling fft_calc().
+ */
+ void (*fft_permute)(struct FFTContext *s, FFTComplex *z);
+ /**
+ * Do a complex FFT with the parameters defined in ff_fft_init(). The
+ * input data must be permuted before. No 1.0/sqrt(n) normalization is done.
+ */
+ void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
+ void (*imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ void (*imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ void (*mdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ enum fft_permutation_type fft_permutation;
+ enum mdct_permutation_type mdct_permutation;
+ uint32_t *revtab32;
+};
+
+#if CONFIG_HARDCODED_TABLES
+#define COSTABLE_CONST const
+#define ff_init_ff_cos_tabs(index)
+#else
+#define COSTABLE_CONST
+#define ff_init_ff_cos_tabs FFT_NAME(ff_init_ff_cos_tabs)
+
+/**
+ * Initialize the cosine table in ff_cos_tabs[index]
+ * @param index index in ff_cos_tabs array of the table to initialize
+ */
+void ff_init_ff_cos_tabs(int index);
+#endif
+
+#define COSTABLE(size) \
+ COSTABLE_CONST attribute_visibility_hidden DECLARE_ALIGNED(32, FFTSample, FFT_NAME(ff_cos_##size))[size/2]
+
+extern COSTABLE(16);
+extern COSTABLE(32);
+extern COSTABLE(64);
+extern COSTABLE(128);
+extern COSTABLE(256);
+extern COSTABLE(512);
+extern COSTABLE(1024);
+extern COSTABLE(2048);
+extern COSTABLE(4096);
+extern COSTABLE(8192);
+extern COSTABLE(16384);
+extern COSTABLE(32768);
+extern COSTABLE(65536);
+extern COSTABLE(131072);
+extern COSTABLE_CONST FFTSample* const FFT_NAME(ff_cos_tabs)[18];
+
+#define ff_fft_init FFT_NAME(ff_fft_init)
+#define ff_fft_end FFT_NAME(ff_fft_end)
+
+/**
+ * Set up a complex FFT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse if 0 perform the forward transform, if 1 perform the inverse
+ */
+int ff_fft_init(FFTContext *s, int nbits, int inverse);
+
+void ff_fft_init_aarch64(FFTContext *s);
+void ff_fft_init_x86(FFTContext *s);
+void ff_fft_init_arm(FFTContext *s);
+void ff_fft_init_mips(FFTContext *s);
+void ff_fft_init_ppc(FFTContext *s);
+
+void ff_fft_end(FFTContext *s);
+
+#define ff_mdct_init FFT_NAME(ff_mdct_init)
+#define ff_mdct_end FFT_NAME(ff_mdct_end)
+
+int ff_mdct_init(FFTContext *s, int nbits, int inverse, double scale);
+void ff_mdct_end(FFTContext *s);
+
+#endif /* AVCODEC_FFT_H */
diff --git a/media/ffvpx/libavcodec/fft_fixed_32.c b/media/ffvpx/libavcodec/fft_fixed_32.c
new file mode 100644
index 0000000000..e18dc83891
--- /dev/null
+++ b/media/ffvpx/libavcodec/fft_fixed_32.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Stanislav Ocovaj (socovaj@mips.com)
+ * Goran Cordasic (goran@mips.com)
+ * Djordje Pesut (djordje@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define FFT_FLOAT 0
+#include "fft_template.c"
diff --git a/media/ffvpx/libavcodec/fft_float.c b/media/ffvpx/libavcodec/fft_float.c
new file mode 100644
index 0000000000..a9fd01978d
--- /dev/null
+++ b/media/ffvpx/libavcodec/fft_float.c
@@ -0,0 +1,20 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define FFT_FLOAT 1
+#include "fft_template.c"
diff --git a/media/ffvpx/libavcodec/fft_init_table.c b/media/ffvpx/libavcodec/fft_init_table.c
new file mode 100644
index 0000000000..83e35ffb7c
--- /dev/null
+++ b/media/ffvpx/libavcodec/fft_init_table.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Stanislav Ocovaj (socovaj@mips.com)
+ * Goran Cordasic (goran@mips.com)
+ * Djordje Pesut (djordje@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * definitions and initialization of LUT table for FFT
+ */
+#include "libavutil/thread.h"
+
+#include "libavcodec/fft_table.h"
+
+const int32_t ff_w_tab_sr[MAX_FFT_SIZE/(4*16)] = {
+2147483647, 2147483016, 2147481121, 2147477963, 2147473542, 2147467857, 2147460908, 2147452697,
+2147443222, 2147432484, 2147420483, 2147407218, 2147392690, 2147376899, 2147359845, 2147341527,
+2147321946, 2147301102, 2147278995, 2147255625, 2147230991, 2147205094, 2147177934, 2147149511,
+2147119825, 2147088876, 2147056664, 2147023188, 2146988450, 2146952448, 2146915184, 2146876656,
+2146836866, 2146795813, 2146753497, 2146709917, 2146665076, 2146618971, 2146571603, 2146522973,
+2146473080, 2146421924, 2146369505, 2146315824, 2146260881, 2146204674, 2146147205, 2146088474,
+2146028480, 2145967224, 2145904705, 2145840924, 2145775880, 2145709574, 2145642006, 2145573176,
+2145503083, 2145431729, 2145359112, 2145285233, 2145210092, 2145133690, 2145056025, 2144977098,
+2144896910, 2144815460, 2144732748, 2144648774, 2144563539, 2144477042, 2144389283, 2144300264,
+2144209982, 2144118439, 2144025635, 2143931570, 2143836244, 2143739656, 2143641807, 2143542697,
+2143442326, 2143340694, 2143237802, 2143133648, 2143028234, 2142921559, 2142813624, 2142704427,
+2142593971, 2142482254, 2142369276, 2142255039, 2142139541, 2142022783, 2141904764, 2141785486,
+2141664948, 2141543150, 2141420092, 2141295774, 2141170197, 2141043360, 2140915264, 2140785908,
+2140655293, 2140523418, 2140390284, 2140255892, 2140120240, 2139983329, 2139845159, 2139705730,
+2139565043, 2139423097, 2139279892, 2139135429, 2138989708, 2138842728, 2138694490, 2138544994,
+2138394240, 2138242228, 2138088958, 2137934430, 2137778644, 2137621601, 2137463301, 2137303743,
+2137142927, 2136980855, 2136817525, 2136652938, 2136487095, 2136319994, 2136151637, 2135982023,
+2135811153, 2135639026, 2135465642, 2135291003, 2135115107, 2134937956, 2134759548, 2134579885,
+2134398966, 2134216791, 2134033361, 2133848675, 2133662734, 2133475538, 2133287087, 2133097381,
+2132906420, 2132714204, 2132520734, 2132326009, 2132130030, 2131932796, 2131734309, 2131534567,
+2131333572, 2131131322, 2130927819, 2130723062, 2130517052, 2130309789, 2130101272, 2129891502,
+2129680480, 2129468204, 2129254676, 2129039895, 2128823862, 2128606576, 2128388038, 2128168248,
+2127947206, 2127724913, 2127501367, 2127276570, 2127050522, 2126823222, 2126594672, 2126364870,
+2126133817, 2125901514, 2125667960, 2125433155, 2125197100, 2124959795, 2124721240, 2124481435,
+2124240380, 2123998076, 2123754522, 2123509718, 2123263666, 2123016364, 2122767814, 2122518015,
+2122266967, 2122014670, 2121761126, 2121506333, 2121250292, 2120993003, 2120734467, 2120474683,
+2120213651, 2119951372, 2119687847, 2119423074, 2119157054, 2118889788, 2118621275, 2118351516,
+2118080511, 2117808259, 2117534762, 2117260020, 2116984031, 2116706797, 2116428319, 2116148595,
+2115867626, 2115585412, 2115301954, 2115017252, 2114731305, 2114444114, 2114155680, 2113866001,
+2113575080, 2113282914, 2112989506, 2112694855, 2112398960, 2112101824, 2111803444, 2111503822,
+2111202959, 2110900853, 2110597505, 2110292916, 2109987085, 2109680013, 2109371700, 2109062146,
+2108751352, 2108439317, 2108126041, 2107811526, 2107495770, 2107178775, 2106860540, 2106541065,
+2106220352, 2105898399, 2105575208, 2105250778, 2104925109, 2104598202, 2104270057, 2103940674,
+2103610054, 2103278196, 2102945101, 2102610768, 2102275199, 2101938393, 2101600350, 2101261071,
+2100920556, 2100578805, 2100235819, 2099891596, 2099546139, 2099199446, 2098851519, 2098502357,
+2098151960, 2097800329, 2097447464, 2097093365, 2096738032, 2096381466, 2096023667, 2095664635,
+2095304370, 2094942872, 2094580142, 2094216179, 2093850985, 2093484559, 2093116901, 2092748012,
+2092377892, 2092006541, 2091633960, 2091260147, 2090885105, 2090508833, 2090131331, 2089752599,
+2089372638, 2088991448, 2088609029, 2088225381, 2087840505, 2087454400, 2087067068, 2086678508,
+2086288720, 2085897705, 2085505463, 2085111994, 2084717298, 2084321376, 2083924228, 2083525854,
+2083126254, 2082725429, 2082323379, 2081920103, 2081515603, 2081109879, 2080702930, 2080294757,
+2079885360, 2079474740, 2079062896, 2078649830, 2078235540, 2077820028, 2077403294, 2076985338,
+2076566160, 2076145760, 2075724139, 2075301296, 2074877233, 2074451950, 2074025446, 2073597721,
+2073168777, 2072738614, 2072307231, 2071874629, 2071440808, 2071005769, 2070569511, 2070132035,
+2069693342, 2069253430, 2068812302, 2068369957, 2067926394, 2067481616, 2067035621, 2066588410,
+2066139983, 2065690341, 2065239484, 2064787411, 2064334124, 2063879623, 2063423908, 2062966978,
+2062508835, 2062049479, 2061588910, 2061127128, 2060664133, 2060199927, 2059734508, 2059267877,
+2058800036, 2058330983, 2057860719, 2057389244, 2056916560, 2056442665, 2055967560, 2055491246,
+2055013723, 2054534991, 2054055050, 2053573901, 2053091544, 2052607979, 2052123207, 2051637227,
+2051150040, 2050661647, 2050172048, 2049681242, 2049189231, 2048696014, 2048201592, 2047705965,
+2047209133, 2046711097, 2046211857, 2045711414, 2045209767, 2044706916, 2044202863, 2043697608,
+2043191150, 2042683490, 2042174628, 2041664565, 2041153301, 2040640837, 2040127172, 2039612306,
+2039096241, 2038578976, 2038060512, 2037540850, 2037019988, 2036497928, 2035974670, 2035450215,
+2034924562, 2034397712, 2033869665, 2033340422, 2032809982, 2032278347, 2031745516, 2031211490,
+2030676269, 2030139853, 2029602243, 2029063439, 2028523442, 2027982251, 2027439867, 2026896291,
+2026351522, 2025805561, 2025258408, 2024710064, 2024160529, 2023609803, 2023057887, 2022504780,
+2021950484, 2021394998, 2020838323, 2020280460, 2019721407, 2019161167, 2018599739, 2018037123,
+2017473321, 2016908331, 2016342155, 2015774793, 2015206245, 2014636511, 2014065592, 2013493489,
+2012920201, 2012345729, 2011770073, 2011193233, 2010615210, 2010036005, 2009455617, 2008874047,
+2008291295, 2007707362, 2007122248, 2006535953, 2005948478, 2005359822, 2004769987, 2004178973,
+2003586779, 2002993407, 2002398857, 2001803128, 2001206222, 2000608139, 2000008879, 1999408442,
+1998806829, 1998204040, 1997600076, 1996994937, 1996388622, 1995781134, 1995172471, 1994562635,
+1993951625, 1993339442, 1992726087, 1992111559, 1991495860, 1990878989, 1990260946, 1989641733,
+1989021350, 1988399796, 1987777073, 1987153180, 1986528118, 1985901888, 1985274489, 1984645923,
+1984016189, 1983385288, 1982753220, 1982119985, 1981485585, 1980850019, 1980213288, 1979575392,
+1978936331, 1978296106, 1977654717, 1977012165, 1976368450, 1975723572, 1975077532, 1974430331,
+1973781967, 1973132443, 1972481757, 1971829912, 1971176906, 1970522741, 1969867417, 1969210933,
+1968553292, 1967894492, 1967234535, 1966573420, 1965911148, 1965247720, 1964583136, 1963917396,
+1963250501, 1962582451, 1961913246, 1961242888, 1960571375, 1959898709, 1959224890, 1958549919,
+1957873796, 1957196520, 1956518093, 1955838516, 1955157788, 1954475909, 1953792881, 1953108703,
+1952423377, 1951736902, 1951049279, 1950360508, 1949670589, 1948979524, 1948287312, 1947593954,
+1946899451, 1946203802, 1945507008, 1944809070, 1944109987, 1943409761, 1942708392, 1942005880,
+1941302225, 1940597428, 1939891490, 1939184411, 1938476190, 1937766830, 1937056329, 1936344689,
+1935631910, 1934917992, 1934202936, 1933486742, 1932769411, 1932050943, 1931331338, 1930610597,
+1929888720, 1929165708, 1928441561, 1927716279, 1926989864, 1926262315, 1925533633, 1924803818,
+1924072871, 1923340791, 1922607581, 1921873239, 1921137767, 1920401165, 1919663432, 1918924571,
+1918184581, 1917443462, 1916701216, 1915957841, 1915213340, 1914467712, 1913720958, 1912973078,
+1912224073, 1911473942, 1910722688, 1909970309, 1909216806, 1908462181, 1907706433, 1906949562,
+1906191570, 1905432457, 1904672222, 1903910867, 1903148392, 1902384797, 1901620084, 1900854251,
+1900087301, 1899319232, 1898550047, 1897779744, 1897008325, 1896235790, 1895462140, 1894687374,
+1893911494, 1893134500, 1892356392, 1891577171, 1890796837, 1890015391, 1889232832, 1888449163,
+1887664383, 1886878492, 1886091491, 1885303381, 1884514161, 1883723833, 1882932397, 1882139853,
+1881346202, 1880551444, 1879755580, 1878958610, 1878160535, 1877361354, 1876561070, 1875759681,
+1874957189, 1874153594, 1873348897, 1872543097, 1871736196, 1870928194, 1870119091, 1869308888,
+1868497586, 1867685184, 1866871683, 1866057085, 1865241388, 1864424594, 1863606704, 1862787717,
+1861967634, 1861146456, 1860324183, 1859500816, 1858676355, 1857850800, 1857024153, 1856196413,
+1855367581, 1854537657, 1853706643, 1852874538, 1852041343, 1851207059, 1850371686, 1849535224,
+1848697674, 1847859036, 1847019312, 1846178501, 1845336604, 1844493621, 1843649553, 1842804401,
+1841958164, 1841110844, 1840262441, 1839412956, 1838562388, 1837710739, 1836858008, 1836004197,
+1835149306, 1834293336, 1833436286, 1832578158, 1831718951, 1830858668, 1829997307, 1829134869,
+1828271356, 1827406767, 1826541103, 1825674364, 1824806552, 1823937666, 1823067707, 1822196675,
+1821324572, 1820451397, 1819577151, 1818701835, 1817825449, 1816947994, 1816069469, 1815189877,
+1814309216, 1813427489, 1812544694, 1811660833, 1810775906, 1809889915, 1809002858, 1808114737,
+1807225553, 1806335305, 1805443995, 1804551623, 1803658189, 1802763694, 1801868139, 1800971523,
+1800073849, 1799175115, 1798275323, 1797374472, 1796472565, 1795569601, 1794665580, 1793760504,
+1792854372, 1791947186, 1791038946, 1790129652, 1789219305, 1788307905, 1787395453, 1786481950,
+1785567396, 1784651792, 1783735137, 1782817434, 1781898681, 1780978881, 1780058032, 1779136137,
+1778213194, 1777289206, 1776364172, 1775438094, 1774510970, 1773582803, 1772653593, 1771723340,
+1770792044, 1769859707, 1768926328, 1767991909, 1767056450, 1766119952, 1765182414, 1764243838,
+1763304224, 1762363573, 1761421885, 1760479161, 1759535401, 1758590607, 1757644777, 1756697914,
+1755750017, 1754801087, 1753851126, 1752900132, 1751948107, 1750995052, 1750040966, 1749085851,
+1748129707, 1747172535, 1746214334, 1745255107, 1744294853, 1743333573, 1742371267, 1741407936,
+1740443581, 1739478202, 1738511799, 1737544374, 1736575927, 1735606458, 1734635968, 1733664458,
+1732691928, 1731718378, 1730743810, 1729768224, 1728791620, 1727813999, 1726835361, 1725855708,
+1724875040, 1723893357, 1722910659, 1721926948, 1720942225, 1719956488, 1718969740, 1717981981,
+1716993211, 1716003431, 1715012642, 1714020844, 1713028037, 1712034223, 1711039401, 1710043573,
+1709046739, 1708048900, 1707050055, 1706050207, 1705049355, 1704047500, 1703044642, 1702040783,
+1701035922, 1700030061, 1699023199, 1698015339, 1697006479, 1695996621, 1694985765, 1693973912,
+1692961062, 1691947217, 1690932376, 1689916541, 1688899711, 1687881888, 1686863072, 1685843263,
+1684822463, 1683800672, 1682777890, 1681754118, 1680729357, 1679703608, 1678676870, 1677649144,
+1676620432, 1675590733, 1674560049, 1673528379, 1672495725, 1671462087, 1670427466, 1669391862,
+1668355276, 1667317709, 1666279161, 1665239632, 1664199124, 1663157637, 1662115172, 1661071729,
+1660027308, 1658981911, 1657935539, 1656888190, 1655839867, 1654790570, 1653740300, 1652689057,
+1651636841, 1650583654, 1649529496, 1648474367, 1647418269, 1646361202, 1645303166, 1644244162,
+1643184191, 1642123253, 1641061349, 1639998480, 1638934646, 1637869848, 1636804087, 1635737362,
+1634669676, 1633601027, 1632531418, 1631460848, 1630389319, 1629316830, 1628243383, 1627168978,
+1626093616, 1625017297, 1623940023, 1622861793, 1621782608, 1620702469, 1619621377, 1618539332,
+1617456335, 1616372386, 1615287487, 1614201637, 1613114838, 1612027089, 1610938393, 1609848749,
+1608758157, 1607666620, 1606574136, 1605480708, 1604386335, 1603291018, 1602194758, 1601097555,
+1599999411, 1598900325, 1597800299, 1596699333, 1595597428, 1594494583, 1593390801, 1592286082,
+1591180426, 1590073833, 1588966306, 1587857843, 1586748447, 1585638117, 1584526854, 1583414660,
+1582301533, 1581187476, 1580072489, 1578956572, 1577839726, 1576721952, 1575603251, 1574483623,
+1573363068, 1572241588, 1571119183, 1569995854, 1568871601, 1567746425, 1566620327, 1565493307,
+1564365367, 1563236506, 1562106725, 1560976026, 1559844408, 1558711873, 1557578421, 1556444052,
+1555308768, 1554172569, 1553035455, 1551897428, 1550758488, 1549618636, 1548477872, 1547336197,
+1546193612, 1545050118, 1543905714, 1542760402, 1541614183, 1540467057, 1539319024, 1538170087,
+1537020244, 1535869497, 1534717846, 1533565293, 1532411837, 1531257480, 1530102222, 1528946064,
+1527789007, 1526631051, 1525472197, 1524312445, 1523151797, 1521990252, 1520827813, 1519664478,
+1518500250, 1517335128, 1516169114, 1515002208, 1513834411, 1512665723, 1511496145, 1510325678,
+1509154322, 1507982079, 1506808949, 1505634932, 1504460029, 1503284242, 1502107570, 1500930014,
+1499751576, 1498572255, 1497392053, 1496210969, 1495029006, 1493846163, 1492662441, 1491477842,
+1490292364, 1489106011, 1487918781, 1486730675, 1485541696, 1484351842, 1483161115, 1481969516,
+1480777044, 1479583702, 1478389489, 1477194407, 1475998456, 1474801636, 1473603949, 1472405394,
+1471205974, 1470005688, 1468804538, 1467602523, 1466399645, 1465195904, 1463991302, 1462785838,
+1461579514, 1460372329, 1459164286, 1457955385, 1456745625, 1455535009, 1454323536, 1453111208,
+1451898025, 1450683988, 1449469098, 1448253355, 1447036760, 1445819314, 1444601017, 1443381870,
+1442161874, 1440941030, 1439719338, 1438496799, 1437273414, 1436049184, 1434824109, 1433598189,
+1432371426, 1431143821, 1429915374, 1428686085, 1427455956, 1426224988, 1424993180, 1423760534,
+1422527051, 1421292730, 1420057574, 1418821582, 1417584755, 1416347095, 1415108601, 1413869275,
+1412629117, 1411388129, 1410146309, 1408903661, 1407660183, 1406415878, 1405170745, 1403924785,
+1402678000, 1401430389, 1400181954, 1398932695, 1397682613, 1396431709, 1395179984, 1393927438,
+1392674072, 1391419886, 1390164882, 1388909060, 1387652422, 1386394966, 1385136696, 1383877610,
+1382617710, 1381356997, 1380095472, 1378833134, 1377569986, 1376306026, 1375041258, 1373775680,
+1372509294, 1371242101, 1369974101, 1368705296, 1367435685, 1366165269, 1364894050, 1363622028,
+1362349204, 1361075579, 1359801152, 1358525926, 1357249901, 1355973077, 1354695455, 1353417037,
+1352137822, 1350857812, 1349577007, 1348295409, 1347013017, 1345729833, 1344445857, 1343161090,
+1341875533, 1340589187, 1339302052, 1338014129, 1336725419, 1335435923, 1334145641, 1332854574,
+1331562723, 1330270089, 1328976672, 1327682474, 1326387494, 1325091734, 1323795195, 1322497877,
+1321199781, 1319900907, 1318601257, 1317300832, 1315999631, 1314697657, 1313394909, 1312091388,
+1310787095, 1309482032, 1308176198, 1306869594, 1305562222, 1304254082, 1302945174, 1301635500,
+1300325060, 1299013855, 1297701886, 1296389154, 1295075659, 1293761402, 1292446384, 1291130606,
+1289814068, 1288496772, 1287178717, 1285859905, 1284540337, 1283220013, 1281898935, 1280577102,
+1279254516, 1277931177, 1276607086, 1275282245, 1273956653, 1272630312, 1271303222, 1269975384,
+1268646800, 1267317469, 1265987392, 1264656571, 1263325005, 1261992697, 1260659646, 1259325853,
+1257991320, 1256656047, 1255320034, 1253983283, 1252645794, 1251307568, 1249968606, 1248628909,
+1247288478, 1245947312, 1244605414, 1243262783, 1241919421, 1240575329, 1239230506, 1237884955,
+1236538675, 1235191668, 1233843935, 1232495475, 1231146291, 1229796382, 1228445750, 1227094395,
+1225742318, 1224389521, 1223036002, 1221681765, 1220326809, 1218971135, 1217614743, 1216257636,
+1214899813, 1213541275, 1212182024, 1210822059, 1209461382, 1208099993, 1206737894, 1205375085,
+1204011567, 1202647340, 1201282407, 1199916766, 1198550419, 1197183368, 1195815612, 1194447153,
+1193077991, 1191708127, 1190337562, 1188966297, 1187594332, 1186221669, 1184848308, 1183474250,
+1182099496, 1180724046, 1179347902, 1177971064, 1176593533, 1175215310, 1173836395, 1172456790,
+1171076495, 1169695512, 1168313840, 1166931481, 1165548435, 1164164704, 1162780288, 1161395188,
+1160009405, 1158622939, 1157235792, 1155847964, 1154459456, 1153070269, 1151680403, 1150289860,
+1148898640, 1147506745, 1146114174, 1144720929, 1143327011, 1141932420, 1140537158, 1139141224,
+1137744621, 1136347348, 1134949406, 1133550797, 1132151521, 1130751579, 1129350972, 1127949701,
+1126547765, 1125145168, 1123741908, 1122337987, 1120933406, 1119528166, 1118122267, 1116715710,
+1115308496, 1113900627, 1112492101, 1111082922, 1109673089, 1108262603, 1106851465, 1105439676,
+1104027237, 1102614148, 1101200410, 1099786025, 1098370993, 1096955314, 1095538991, 1094122023,
+1092704411, 1091286156, 1089867259, 1088447722, 1087027544, 1085606726, 1084185270, 1082763176,
+1081340445, 1079917078, 1078493076, 1077068439, 1075643169, 1074217266, 1072790730, 1071363564,
+1069935768, 1068507342, 1067078288, 1065648605, 1064218296, 1062787361, 1061355801, 1059923616,
+1058490808, 1057057377, 1055623324, 1054188651, 1052753357, 1051317443, 1049880912, 1048443763,
+1047005996, 1045567615, 1044128617, 1042689006, 1041248781, 1039807944, 1038366495, 1036924436,
+1035481766, 1034038487, 1032594600, 1031150105, 1029705004, 1028259297, 1026812985, 1025366069,
+1023918550, 1022470428, 1021021705, 1019572382, 1018122458, 1016671936, 1015220816, 1013769098,
+1012316784, 1010863875, 1009410370, 1007956272, 1006501581, 1005046298, 1003590424, 1002133959,
+1000676905, 999219262, 997761031, 996302214, 994842810, 993382821, 991922248, 990461091,
+988999351, 987537030, 986074127, 984610645, 983146583, 981681943, 980216726, 978750932,
+977284562, 975817617, 974350098, 972882006, 971413342, 969944106, 968474300, 967003923,
+965532978, 964061465, 962589385, 961116739, 959643527, 958169751, 956695411, 955220508,
+953745043, 952269017, 950792431, 949315286, 947837582, 946359321, 944880503, 943401129,
+941921200, 940440717, 938959681, 937478092, 935995952, 934513261, 933030021, 931546231,
+930061894, 928577010, 927091579, 925605603, 924119082, 922632018, 921144411, 919656262,
+918167572, 916678342, 915188572, 913698265, 912207419, 910716038, 909224120, 907731667,
+906238681, 904745161, 903251110, 901756526, 900261413, 898765769, 897269597, 895772898,
+894275671, 892777918, 891279640, 889780838, 888281512, 886781663, 885281293, 883780402,
+882278992, 880777062, 879274614, 877771649, 876268167, 874764170, 873259659, 871754633,
+870249095, 868743045, 867236484, 865729413, 864221832, 862713743, 861205147, 859696043,
+858186435, 856676321, 855165703, 853654582, 852142959, 850630835, 849118210, 847605086,
+846091463, 844577343, 843062726, 841547612, 840032004, 838515901, 836999305, 835482217,
+833964638, 832446567, 830928007, 829408958, 827889422, 826369398, 824848888, 823327893,
+821806413, 820284450, 818762005, 817239078, 815715670, 814191782, 812667415, 811142571,
+809617249, 808091450, 806565177, 805038429, 803511207, 801983513, 800455346, 798926709,
+797397602, 795868026, 794337982, 792807470, 791276492, 789745049, 788213141, 786680769,
+785147934, 783614638, 782080880, 780546663, 779011986, 777476851, 775941259, 774405210,
+772868706, 771331747, 769794334, 768256469, 766718151, 765179382, 763640164, 762100496,
+760560380, 759019816, 757478806, 755937350, 754395449, 752853105, 751310318, 749767089,
+748223418, 746679308, 745134758, 743589770, 742044345, 740498483, 738952186, 737405453,
+735858287, 734310688, 732762657, 731214195, 729665303, 728115982, 726566232, 725016055,
+723465451, 721914422, 720362968, 718811090, 717258790, 715706067, 714152924, 712599360,
+711045377, 709490976, 707936158, 706380923, 704825272, 703269207, 701712728, 700155836,
+698598533, 697040818, 695482694, 693924160, 692365218, 690805869, 689246113, 687685952,
+686125387, 684564417, 683003045, 681441272, 679879097, 678316522, 676753549, 675190177,
+673626408, 672062243, 670497682, 668932727, 667367379, 665801638, 664235505, 662668981,
+661102068, 659534766, 657967075, 656398998, 654830535, 653261686, 651692453, 650122837,
+648552838, 646982457, 645411696, 643840556, 642269036, 640697139, 639124865, 637552215,
+635979190, 634405791, 632832018, 631257873, 629683357, 628108471, 626533215, 624957590,
+623381598, 621805239, 620228514, 618651424, 617073971, 615496154, 613917975, 612339436,
+610760536, 609181276, 607601658, 606021683, 604441352, 602860664, 601279623, 599698227,
+598116479, 596534378, 594951927, 593369126, 591785976, 590202477, 588618632, 587034440,
+585449903, 583865021, 582279796, 580694229, 579108320, 577522070, 575935480, 574348552,
+572761285, 571173682, 569585743, 567997469, 566408860, 564819919, 563230645, 561641039,
+560051104, 558460839, 556870245, 555279324, 553688076, 552096502, 550504604, 548912382,
+547319836, 545726969, 544133781, 542540273, 540946445, 539352300, 537757837, 536163058,
+534567963, 532972554, 531376831, 529780796, 528184449, 526587791, 524990824, 523393547,
+521795963, 520198072, 518599875, 517001373, 515402566, 513803457, 512204045, 510604332,
+509004318, 507404005, 505803394, 504202485, 502601279, 500999778, 499397982, 497795892,
+496193509, 494590835, 492987869, 491384614, 489781069, 488177236, 486573117, 484968710,
+483364019, 481759043, 480153784, 478548243, 476942419, 475336316, 473729932, 472123270,
+470516330, 468909114, 467301622, 465693854, 464085813, 462477499, 460868912, 459260055,
+457650927, 456041530, 454431865, 452821933, 451211734, 449601270, 447990541, 446379549,
+444768294, 443156777, 441545000, 439932963, 438320667, 436708113, 435095303, 433482236,
+431868915, 430255339, 428641511, 427027430, 425413098, 423798515, 422183684, 420568604,
+418953276, 417337703, 415721883, 414105819, 412489512, 410872962, 409256170, 407639137,
+406021865, 404404353, 402786604, 401168618, 399550396, 397931939, 396313247, 394694323,
+393075166, 391455778, 389836160, 388216313, 386596237, 384975934, 383355404, 381734649,
+380113669, 378492466, 376871039, 375249392, 373627523, 372005435, 370383128, 368760603,
+367137861, 365514903, 363891730, 362268343, 360644742, 359020930, 357396906, 355772673,
+354148230, 352523578, 350898719, 349273654, 347648383, 346022908, 344397230, 342771348,
+341145265, 339518981, 337892498, 336265816, 334638936, 333011859, 331384586, 329757119,
+328129457, 326501602, 324873555, 323245317, 321616889, 319988272, 318359466, 316730474,
+315101295, 313471930, 311842381, 310212649, 308582734, 306952638, 305322361, 303691904,
+302061269, 300430456, 298799466, 297168301, 295536961, 293905447, 292273760, 290641901,
+289009871, 287377671, 285745302, 284112765, 282480061, 280847190, 279214155, 277580955,
+275947592, 274314066, 272680379, 271046532, 269412525, 267778360, 266144038, 264509558,
+262874923, 261240134, 259605191, 257970095, 256334847, 254699448, 253063900, 251428203,
+249792358, 248156366, 246520228, 244883945, 243247518, 241610947, 239974235, 238337382,
+236700388, 235063255, 233425984, 231788575, 230151030, 228513350, 226875535, 225237587,
+223599506, 221961294, 220322951, 218684479, 217045878, 215407149, 213768293, 212129312,
+210490206, 208850976, 207211624, 205572149, 203932553, 202292838, 200653003, 199013051,
+197372981, 195732795, 194092495, 192452080, 190811551, 189170911, 187530159, 185889297,
+184248325, 182607245, 180966058, 179324764, 177683365, 176041861, 174400254, 172758544,
+171116733, 169474820, 167832808, 166190698, 164548489, 162906184, 161263783, 159621287,
+157978697, 156336015, 154693240, 153050374, 151407418, 149764374, 148121241, 146478021,
+144834714, 143191323, 141547847, 139904288, 138260647, 136616925, 134973122, 133329239,
+131685278, 130041240, 128397125, 126752935, 125108670, 123464332, 121819921, 120175438,
+118530885, 116886262, 115241570, 113596810, 111951983, 110307091, 108662134, 107017112,
+105372028, 103726882, 102081675, 100436408, 98791081, 97145697, 95500255, 93854758,
+ 92209205, 90563597, 88917937, 87272224, 85626460, 83980645, 82334782, 80688869,
+ 79042909, 77396903, 75750851, 74104755, 72458615, 70812432, 69166208, 67519943,
+ 65873638, 64227295, 62580914, 60934496, 59288042, 57641553, 55995030, 54348475,
+ 52701887, 51055268, 49408620, 47761942, 46115236, 44468503, 42821744, 41174960,
+ 39528151, 37881320, 36234466, 34587590, 32940695, 31293780, 29646846, 27999895,
+ 26352928, 24705945, 23058947, 21411936, 19764913, 18117878, 16470832, 14823776,
+ 13176712, 11529640, 9882561, 8235476, 6588387, 4941294, 3294197, 1647099
+};
+
+uint16_t ff_fft_offsets_lut[21845];
+
+static void fft_lut_init(uint16_t *table, int off, int size, int *index)
+{
+ if (size < 16) {
+ table[*index] = off >> 2;
+ (*index)++;
+ }
+ else {
+ fft_lut_init(table, off, size >> 1, index);
+ fft_lut_init(table, off + (size >> 1), size >> 2, index);
+ fft_lut_init(table, off + 3 * (size >> 2), size >> 2, index);
+ }
+}
+
+static void fft_lut_init_start(void)
+{
+ int n = 0;
+
+ fft_lut_init(ff_fft_offsets_lut, 0, 1 << 17, &n);
+}
+
+void ff_fft_lut_init(void)
+{
+ static AVOnce init_once = AV_ONCE_INIT;
+
+ ff_thread_once(&init_once, fft_lut_init_start);
+}
diff --git a/media/ffvpx/libavcodec/fft_table.h b/media/ffvpx/libavcodec/fft_table.h
new file mode 100644
index 0000000000..09df49f2b8
--- /dev/null
+++ b/media/ffvpx/libavcodec/fft_table.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Stanislav Ocovaj (socovaj@mips.com)
+ * Goran Cordasic (goran@mips.com)
+ * Djordje Pesut (djordje@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * definitions and tables for FFT
+ */
+#ifndef AVCODEC_FFT_TABLE_H
+#define AVCODEC_FFT_TABLE_H
+
+#include "libavcodec/fft.h"
+
+#define MAX_LOG2_NFFT 17 //!< Specifies maximum allowed fft size
+#define MAX_FFT_SIZE (1 << MAX_LOG2_NFFT)
+
+extern const int32_t ff_w_tab_sr[];
+extern uint16_t ff_fft_offsets_lut[];
+void ff_fft_lut_init(void);
+
+#endif /* AVCODEC_FFT_TABLE_H */
diff --git a/media/ffvpx/libavcodec/fft_template.c b/media/ffvpx/libavcodec/fft_template.c
new file mode 100644
index 0000000000..f2742a3ae8
--- /dev/null
+++ b/media/ffvpx/libavcodec/fft_template.c
@@ -0,0 +1,628 @@
+/*
+ * FFT/IFFT transforms
+ * Copyright (c) 2008 Loren Merritt
+ * Copyright (c) 2002 Fabrice Bellard
+ * Partly based on libdjbfft by D. J. Bernstein
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FFT/IFFT transforms.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include "libavutil/mathematics.h"
+#include "libavutil/thread.h"
+#include "fft.h"
+#include "fft-internal.h"
+
+#if !FFT_FLOAT
+#include "fft_table.h"
+#else /* !FFT_FLOAT */
+
+/* cos(2*pi*x/n) for 0<=x<=n/4, followed by its reverse */
+#if !CONFIG_HARDCODED_TABLES
+COSTABLE(16);
+COSTABLE(32);
+COSTABLE(64);
+COSTABLE(128);
+COSTABLE(256);
+COSTABLE(512);
+COSTABLE(1024);
+COSTABLE(2048);
+COSTABLE(4096);
+COSTABLE(8192);
+COSTABLE(16384);
+COSTABLE(32768);
+COSTABLE(65536);
+COSTABLE(131072);
+
+static av_cold void init_ff_cos_tabs(int index)
+{
+ int i;
+ int m = 1<<index;
+ double freq = 2*M_PI/m;
+ FFTSample *tab = FFT_NAME(ff_cos_tabs)[index];
+ for(i=0; i<=m/4; i++)
+ tab[i] = FIX15(cos(i*freq));
+ for(i=1; i<m/4; i++)
+ tab[m/2-i] = tab[i];
+}
+
+typedef struct CosTabsInitOnce {
+ void (*func)(void);
+ AVOnce control;
+} CosTabsInitOnce;
+
+#define INIT_FF_COS_TABS_FUNC(index, size) \
+static av_cold void init_ff_cos_tabs_ ## size (void)\
+{ \
+ init_ff_cos_tabs(index); \
+}
+
+INIT_FF_COS_TABS_FUNC(4, 16)
+INIT_FF_COS_TABS_FUNC(5, 32)
+INIT_FF_COS_TABS_FUNC(6, 64)
+INIT_FF_COS_TABS_FUNC(7, 128)
+INIT_FF_COS_TABS_FUNC(8, 256)
+INIT_FF_COS_TABS_FUNC(9, 512)
+INIT_FF_COS_TABS_FUNC(10, 1024)
+INIT_FF_COS_TABS_FUNC(11, 2048)
+INIT_FF_COS_TABS_FUNC(12, 4096)
+INIT_FF_COS_TABS_FUNC(13, 8192)
+INIT_FF_COS_TABS_FUNC(14, 16384)
+INIT_FF_COS_TABS_FUNC(15, 32768)
+INIT_FF_COS_TABS_FUNC(16, 65536)
+INIT_FF_COS_TABS_FUNC(17, 131072)
+
+static CosTabsInitOnce cos_tabs_init_once[] = {
+ { NULL },
+ { NULL },
+ { NULL },
+ { NULL },
+ { init_ff_cos_tabs_16, AV_ONCE_INIT },
+ { init_ff_cos_tabs_32, AV_ONCE_INIT },
+ { init_ff_cos_tabs_64, AV_ONCE_INIT },
+ { init_ff_cos_tabs_128, AV_ONCE_INIT },
+ { init_ff_cos_tabs_256, AV_ONCE_INIT },
+ { init_ff_cos_tabs_512, AV_ONCE_INIT },
+ { init_ff_cos_tabs_1024, AV_ONCE_INIT },
+ { init_ff_cos_tabs_2048, AV_ONCE_INIT },
+ { init_ff_cos_tabs_4096, AV_ONCE_INIT },
+ { init_ff_cos_tabs_8192, AV_ONCE_INIT },
+ { init_ff_cos_tabs_16384, AV_ONCE_INIT },
+ { init_ff_cos_tabs_32768, AV_ONCE_INIT },
+ { init_ff_cos_tabs_65536, AV_ONCE_INIT },
+ { init_ff_cos_tabs_131072, AV_ONCE_INIT },
+};
+
+av_cold void ff_init_ff_cos_tabs(int index)
+{
+ ff_thread_once(&cos_tabs_init_once[index].control, cos_tabs_init_once[index].func);
+}
+#endif
+COSTABLE_CONST FFTSample * const FFT_NAME(ff_cos_tabs)[] = {
+ NULL, NULL, NULL, NULL,
+ FFT_NAME(ff_cos_16),
+ FFT_NAME(ff_cos_32),
+ FFT_NAME(ff_cos_64),
+ FFT_NAME(ff_cos_128),
+ FFT_NAME(ff_cos_256),
+ FFT_NAME(ff_cos_512),
+ FFT_NAME(ff_cos_1024),
+ FFT_NAME(ff_cos_2048),
+ FFT_NAME(ff_cos_4096),
+ FFT_NAME(ff_cos_8192),
+ FFT_NAME(ff_cos_16384),
+ FFT_NAME(ff_cos_32768),
+ FFT_NAME(ff_cos_65536),
+ FFT_NAME(ff_cos_131072),
+};
+
+#endif /* FFT_FLOAT */
+
+static void fft_permute_c(FFTContext *s, FFTComplex *z);
+static void fft_calc_c(FFTContext *s, FFTComplex *z);
+
+static int split_radix_permutation(int i, int n, int inverse)
+{
+ int m;
+ if(n <= 2) return i&1;
+ m = n >> 1;
+ if(!(i&m)) return split_radix_permutation(i, m, inverse)*2;
+ m >>= 1;
+ if(inverse == !(i&m)) return split_radix_permutation(i, m, inverse)*4 + 1;
+ else return split_radix_permutation(i, m, inverse)*4 - 1;
+}
+
+
+static const int avx_tab[] = {
+ 0, 4, 1, 5, 8, 12, 9, 13, 2, 6, 3, 7, 10, 14, 11, 15
+};
+
+static int is_second_half_of_fft32(int i, int n)
+{
+ if (n <= 32)
+ return i >= 16;
+ else if (i < n/2)
+ return is_second_half_of_fft32(i, n/2);
+ else if (i < 3*n/4)
+ return is_second_half_of_fft32(i - n/2, n/4);
+ else
+ return is_second_half_of_fft32(i - 3*n/4, n/4);
+}
+
+static av_cold void fft_perm_avx(FFTContext *s)
+{
+ int i;
+ int n = 1 << s->nbits;
+
+ for (i = 0; i < n; i += 16) {
+ int k;
+ if (is_second_half_of_fft32(i, n)) {
+ for (k = 0; k < 16; k++)
+ s->revtab[-split_radix_permutation(i + k, n, s->inverse) & (n - 1)] =
+ i + avx_tab[k];
+
+ } else {
+ for (k = 0; k < 16; k++) {
+ int j = i + k;
+ j = (j & ~7) | ((j >> 1) & 3) | ((j << 2) & 4);
+ s->revtab[-split_radix_permutation(i + k, n, s->inverse) & (n - 1)] = j;
+ }
+ }
+ }
+}
+
+av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
+{
+ int i, j, n;
+
+ s->revtab = NULL;
+ s->revtab32 = NULL;
+
+ if (nbits < 2 || nbits > 17)
+ goto fail;
+ s->nbits = nbits;
+ n = 1 << nbits;
+
+ if (nbits <= 16) {
+ s->revtab = av_malloc(n * sizeof(uint16_t));
+ if (!s->revtab)
+ goto fail;
+ } else {
+ s->revtab32 = av_malloc(n * sizeof(uint32_t));
+ if (!s->revtab32)
+ goto fail;
+ }
+ s->tmp_buf = av_malloc(n * sizeof(FFTComplex));
+ if (!s->tmp_buf)
+ goto fail;
+ s->inverse = inverse;
+ s->fft_permutation = FF_FFT_PERM_DEFAULT;
+
+ s->fft_permute = fft_permute_c;
+ s->fft_calc = fft_calc_c;
+#if CONFIG_MDCT
+ s->imdct_calc = ff_imdct_calc_c;
+ s->imdct_half = ff_imdct_half_c;
+ s->mdct_calc = ff_mdct_calc_c;
+#endif
+
+#if FFT_FLOAT
+#if ARCH_AARCH64
+ ff_fft_init_aarch64(s);
+#elif ARCH_ARM
+ ff_fft_init_arm(s);
+#elif ARCH_PPC
+ ff_fft_init_ppc(s);
+#elif ARCH_X86
+ ff_fft_init_x86(s);
+#endif
+#if HAVE_MIPSFPU
+ ff_fft_init_mips(s);
+#endif
+ for(j=4; j<=nbits; j++) {
+ ff_init_ff_cos_tabs(j);
+ }
+#else /* FFT_FLOAT */
+ ff_fft_lut_init();
+#endif
+
+
+ if (ARCH_X86 && FFT_FLOAT && s->fft_permutation == FF_FFT_PERM_AVX) {
+ fft_perm_avx(s);
+ } else {
+#define PROCESS_FFT_PERM_SWAP_LSBS(num) do {\
+ for(i = 0; i < n; i++) {\
+ int k;\
+ j = i;\
+ j = (j & ~3) | ((j >> 1) & 1) | ((j << 1) & 2);\
+ k = -split_radix_permutation(i, n, s->inverse) & (n - 1);\
+ s->revtab##num[k] = j;\
+ } \
+} while(0);
+
+#define PROCESS_FFT_PERM_DEFAULT(num) do {\
+ for(i = 0; i < n; i++) {\
+ int k;\
+ j = i;\
+ k = -split_radix_permutation(i, n, s->inverse) & (n - 1);\
+ s->revtab##num[k] = j;\
+ } \
+} while(0);
+
+#define SPLIT_RADIX_PERMUTATION(num) do { \
+ if (s->fft_permutation == FF_FFT_PERM_SWAP_LSBS) {\
+ PROCESS_FFT_PERM_SWAP_LSBS(num) \
+ } else {\
+ PROCESS_FFT_PERM_DEFAULT(num) \
+ }\
+} while(0);
+
+ if (s->revtab)
+ SPLIT_RADIX_PERMUTATION()
+ if (s->revtab32)
+ SPLIT_RADIX_PERMUTATION(32)
+
+#undef PROCESS_FFT_PERM_DEFAULT
+#undef PROCESS_FFT_PERM_SWAP_LSBS
+#undef SPLIT_RADIX_PERMUTATION
+ }
+
+ return 0;
+ fail:
+ av_freep(&s->revtab);
+ av_freep(&s->revtab32);
+ av_freep(&s->tmp_buf);
+ return -1;
+}
+
+static void fft_permute_c(FFTContext *s, FFTComplex *z)
+{
+ int j, np;
+ const uint16_t *revtab = s->revtab;
+ const uint32_t *revtab32 = s->revtab32;
+ np = 1 << s->nbits;
+ /* TODO: handle split-radix permute in a more optimal way, probably in-place */
+ if (revtab) {
+ for(j=0;j<np;j++) s->tmp_buf[revtab[j]] = z[j];
+ } else
+ for(j=0;j<np;j++) s->tmp_buf[revtab32[j]] = z[j];
+
+ memcpy(z, s->tmp_buf, np * sizeof(FFTComplex));
+}
+
+av_cold void ff_fft_end(FFTContext *s)
+{
+ av_freep(&s->revtab);
+ av_freep(&s->revtab32);
+ av_freep(&s->tmp_buf);
+}
+
+#if !FFT_FLOAT
+
+static void fft_calc_c(FFTContext *s, FFTComplex *z) {
+
+ int nbits, i, n, num_transforms, offset, step;
+ int n4, n2, n34;
+ unsigned tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ FFTComplex *tmpz;
+ const int fft_size = (1 << s->nbits);
+ int64_t accu;
+
+ num_transforms = (0x2aab >> (16 - s->nbits)) | 1;
+
+ for (n=0; n<num_transforms; n++){
+ offset = ff_fft_offsets_lut[n] << 2;
+ tmpz = z + offset;
+
+ tmp1 = tmpz[0].re + (unsigned)tmpz[1].re;
+ tmp5 = tmpz[2].re + (unsigned)tmpz[3].re;
+ tmp2 = tmpz[0].im + (unsigned)tmpz[1].im;
+ tmp6 = tmpz[2].im + (unsigned)tmpz[3].im;
+ tmp3 = tmpz[0].re - (unsigned)tmpz[1].re;
+ tmp8 = tmpz[2].im - (unsigned)tmpz[3].im;
+ tmp4 = tmpz[0].im - (unsigned)tmpz[1].im;
+ tmp7 = tmpz[2].re - (unsigned)tmpz[3].re;
+
+ tmpz[0].re = tmp1 + tmp5;
+ tmpz[2].re = tmp1 - tmp5;
+ tmpz[0].im = tmp2 + tmp6;
+ tmpz[2].im = tmp2 - tmp6;
+ tmpz[1].re = tmp3 + tmp8;
+ tmpz[3].re = tmp3 - tmp8;
+ tmpz[1].im = tmp4 - tmp7;
+ tmpz[3].im = tmp4 + tmp7;
+ }
+
+ if (fft_size < 8)
+ return;
+
+ num_transforms = (num_transforms >> 1) | 1;
+
+ for (n=0; n<num_transforms; n++){
+ offset = ff_fft_offsets_lut[n] << 3;
+ tmpz = z + offset;
+
+ tmp1 = tmpz[4].re + (unsigned)tmpz[5].re;
+ tmp3 = tmpz[6].re + (unsigned)tmpz[7].re;
+ tmp2 = tmpz[4].im + (unsigned)tmpz[5].im;
+ tmp4 = tmpz[6].im + (unsigned)tmpz[7].im;
+ tmp5 = tmp1 + tmp3;
+ tmp7 = tmp1 - tmp3;
+ tmp6 = tmp2 + tmp4;
+ tmp8 = tmp2 - tmp4;
+
+ tmp1 = tmpz[4].re - (unsigned)tmpz[5].re;
+ tmp2 = tmpz[4].im - (unsigned)tmpz[5].im;
+ tmp3 = tmpz[6].re - (unsigned)tmpz[7].re;
+ tmp4 = tmpz[6].im - (unsigned)tmpz[7].im;
+
+ tmpz[4].re = tmpz[0].re - tmp5;
+ tmpz[0].re = tmpz[0].re + tmp5;
+ tmpz[4].im = tmpz[0].im - tmp6;
+ tmpz[0].im = tmpz[0].im + tmp6;
+ tmpz[6].re = tmpz[2].re - tmp8;
+ tmpz[2].re = tmpz[2].re + tmp8;
+ tmpz[6].im = tmpz[2].im + tmp7;
+ tmpz[2].im = tmpz[2].im - tmp7;
+
+ accu = (int64_t)Q31(M_SQRT1_2)*(int)(tmp1 + tmp2);
+ tmp5 = (int32_t)((accu + 0x40000000) >> 31);
+ accu = (int64_t)Q31(M_SQRT1_2)*(int)(tmp3 - tmp4);
+ tmp7 = (int32_t)((accu + 0x40000000) >> 31);
+ accu = (int64_t)Q31(M_SQRT1_2)*(int)(tmp2 - tmp1);
+ tmp6 = (int32_t)((accu + 0x40000000) >> 31);
+ accu = (int64_t)Q31(M_SQRT1_2)*(int)(tmp3 + tmp4);
+ tmp8 = (int32_t)((accu + 0x40000000) >> 31);
+ tmp1 = tmp5 + tmp7;
+ tmp3 = tmp5 - tmp7;
+ tmp2 = tmp6 + tmp8;
+ tmp4 = tmp6 - tmp8;
+
+ tmpz[5].re = tmpz[1].re - tmp1;
+ tmpz[1].re = tmpz[1].re + tmp1;
+ tmpz[5].im = tmpz[1].im - tmp2;
+ tmpz[1].im = tmpz[1].im + tmp2;
+ tmpz[7].re = tmpz[3].re - tmp4;
+ tmpz[3].re = tmpz[3].re + tmp4;
+ tmpz[7].im = tmpz[3].im + tmp3;
+ tmpz[3].im = tmpz[3].im - tmp3;
+ }
+
+ step = 1 << ((MAX_LOG2_NFFT-4) - 4);
+ n4 = 4;
+
+ for (nbits=4; nbits<=s->nbits; nbits++){
+ n2 = 2*n4;
+ n34 = 3*n4;
+ num_transforms = (num_transforms >> 1) | 1;
+
+ for (n=0; n<num_transforms; n++){
+ const FFTSample *w_re_ptr = ff_w_tab_sr + step;
+ const FFTSample *w_im_ptr = ff_w_tab_sr + MAX_FFT_SIZE/(4*16) - step;
+ offset = ff_fft_offsets_lut[n] << nbits;
+ tmpz = z + offset;
+
+ tmp5 = tmpz[ n2].re + (unsigned)tmpz[n34].re;
+ tmp1 = tmpz[ n2].re - (unsigned)tmpz[n34].re;
+ tmp6 = tmpz[ n2].im + (unsigned)tmpz[n34].im;
+ tmp2 = tmpz[ n2].im - (unsigned)tmpz[n34].im;
+
+ tmpz[ n2].re = tmpz[ 0].re - tmp5;
+ tmpz[ 0].re = tmpz[ 0].re + tmp5;
+ tmpz[ n2].im = tmpz[ 0].im - tmp6;
+ tmpz[ 0].im = tmpz[ 0].im + tmp6;
+ tmpz[n34].re = tmpz[n4].re - tmp2;
+ tmpz[ n4].re = tmpz[n4].re + tmp2;
+ tmpz[n34].im = tmpz[n4].im + tmp1;
+ tmpz[ n4].im = tmpz[n4].im - tmp1;
+
+ for (i=1; i<n4; i++){
+ FFTSample w_re = w_re_ptr[0];
+ FFTSample w_im = w_im_ptr[0];
+ accu = (int64_t)w_re*tmpz[ n2+i].re;
+ accu += (int64_t)w_im*tmpz[ n2+i].im;
+ tmp1 = (int32_t)((accu + 0x40000000) >> 31);
+ accu = (int64_t)w_re*tmpz[ n2+i].im;
+ accu -= (int64_t)w_im*tmpz[ n2+i].re;
+ tmp2 = (int32_t)((accu + 0x40000000) >> 31);
+ accu = (int64_t)w_re*tmpz[n34+i].re;
+ accu -= (int64_t)w_im*tmpz[n34+i].im;
+ tmp3 = (int32_t)((accu + 0x40000000) >> 31);
+ accu = (int64_t)w_re*tmpz[n34+i].im;
+ accu += (int64_t)w_im*tmpz[n34+i].re;
+ tmp4 = (int32_t)((accu + 0x40000000) >> 31);
+
+ tmp5 = tmp1 + tmp3;
+ tmp1 = tmp1 - tmp3;
+ tmp6 = tmp2 + tmp4;
+ tmp2 = tmp2 - tmp4;
+
+ tmpz[ n2+i].re = tmpz[ i].re - tmp5;
+ tmpz[ i].re = tmpz[ i].re + tmp5;
+ tmpz[ n2+i].im = tmpz[ i].im - tmp6;
+ tmpz[ i].im = tmpz[ i].im + tmp6;
+ tmpz[n34+i].re = tmpz[n4+i].re - tmp2;
+ tmpz[ n4+i].re = tmpz[n4+i].re + tmp2;
+ tmpz[n34+i].im = tmpz[n4+i].im + tmp1;
+ tmpz[ n4+i].im = tmpz[n4+i].im - tmp1;
+
+ w_re_ptr += step;
+ w_im_ptr -= step;
+ }
+ }
+ step >>= 1;
+ n4 <<= 1;
+ }
+}
+
+#else /* !FFT_FLOAT */
+
+#define BUTTERFLIES(a0,a1,a2,a3) {\
+ BF(t3, t5, t5, t1);\
+ BF(a2.re, a0.re, a0.re, t5);\
+ BF(a3.im, a1.im, a1.im, t3);\
+ BF(t4, t6, t2, t6);\
+ BF(a3.re, a1.re, a1.re, t4);\
+ BF(a2.im, a0.im, a0.im, t6);\
+}
+
+// force loading all the inputs before storing any.
+// this is slightly slower for small data, but avoids store->load aliasing
+// for addresses separated by large powers of 2.
+#define BUTTERFLIES_BIG(a0,a1,a2,a3) {\
+ FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\
+ BF(t3, t5, t5, t1);\
+ BF(a2.re, a0.re, r0, t5);\
+ BF(a3.im, a1.im, i1, t3);\
+ BF(t4, t6, t2, t6);\
+ BF(a3.re, a1.re, r1, t4);\
+ BF(a2.im, a0.im, i0, t6);\
+}
+
+#define TRANSFORM(a0,a1,a2,a3,wre,wim) {\
+ CMUL(t1, t2, a2.re, a2.im, wre, -wim);\
+ CMUL(t5, t6, a3.re, a3.im, wre, wim);\
+ BUTTERFLIES(a0,a1,a2,a3)\
+}
+
+#define TRANSFORM_ZERO(a0,a1,a2,a3) {\
+ t1 = a2.re;\
+ t2 = a2.im;\
+ t5 = a3.re;\
+ t6 = a3.im;\
+ BUTTERFLIES(a0,a1,a2,a3)\
+}
+
+/* z[0...8n-1], w[1...2n-1] */
+#define PASS(name)\
+static void name(FFTComplex *z, const FFTSample *wre, unsigned int n)\
+{\
+ FFTDouble t1, t2, t3, t4, t5, t6;\
+ int o1 = 2*n;\
+ int o2 = 4*n;\
+ int o3 = 6*n;\
+ const FFTSample *wim = wre+o1;\
+ n--;\
+\
+ TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3]);\
+ TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\
+ do {\
+ z += 2;\
+ wre += 2;\
+ wim -= 2;\
+ TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0]);\
+ TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\
+ } while(--n);\
+}
+
+PASS(pass)
+#if !CONFIG_SMALL
+#undef BUTTERFLIES
+#define BUTTERFLIES BUTTERFLIES_BIG
+PASS(pass_big)
+#endif
+
+#define DECL_FFT(n,n2,n4)\
+static void fft##n(FFTComplex *z)\
+{\
+ fft##n2(z);\
+ fft##n4(z+n4*2);\
+ fft##n4(z+n4*3);\
+ pass(z,FFT_NAME(ff_cos_##n),n4/2);\
+}
+
+static void fft4(FFTComplex *z)
+{
+ FFTDouble t1, t2, t3, t4, t5, t6, t7, t8;
+
+ BF(t3, t1, z[0].re, z[1].re);
+ BF(t8, t6, z[3].re, z[2].re);
+ BF(z[2].re, z[0].re, t1, t6);
+ BF(t4, t2, z[0].im, z[1].im);
+ BF(t7, t5, z[2].im, z[3].im);
+ BF(z[3].im, z[1].im, t4, t8);
+ BF(z[3].re, z[1].re, t3, t7);
+ BF(z[2].im, z[0].im, t2, t5);
+}
+
+static void fft8(FFTComplex *z)
+{
+ FFTDouble t1, t2, t3, t4, t5, t6;
+
+ fft4(z);
+
+ BF(t1, z[5].re, z[4].re, -z[5].re);
+ BF(t2, z[5].im, z[4].im, -z[5].im);
+ BF(t5, z[7].re, z[6].re, -z[7].re);
+ BF(t6, z[7].im, z[6].im, -z[7].im);
+
+ BUTTERFLIES(z[0],z[2],z[4],z[6]);
+ TRANSFORM(z[1],z[3],z[5],z[7],sqrthalf,sqrthalf);
+}
+
+#if !CONFIG_SMALL
+static void fft16(FFTComplex *z)
+{
+ FFTDouble t1, t2, t3, t4, t5, t6;
+ FFTSample cos_16_1 = FFT_NAME(ff_cos_16)[1];
+ FFTSample cos_16_3 = FFT_NAME(ff_cos_16)[3];
+
+ fft8(z);
+ fft4(z+8);
+ fft4(z+12);
+
+ TRANSFORM_ZERO(z[0],z[4],z[8],z[12]);
+ TRANSFORM(z[2],z[6],z[10],z[14],sqrthalf,sqrthalf);
+ TRANSFORM(z[1],z[5],z[9],z[13],cos_16_1,cos_16_3);
+ TRANSFORM(z[3],z[7],z[11],z[15],cos_16_3,cos_16_1);
+}
+#else
+DECL_FFT(16,8,4)
+#endif
+DECL_FFT(32,16,8)
+DECL_FFT(64,32,16)
+DECL_FFT(128,64,32)
+DECL_FFT(256,128,64)
+DECL_FFT(512,256,128)
+#if !CONFIG_SMALL
+#define pass pass_big
+#endif
+DECL_FFT(1024,512,256)
+DECL_FFT(2048,1024,512)
+DECL_FFT(4096,2048,1024)
+DECL_FFT(8192,4096,2048)
+DECL_FFT(16384,8192,4096)
+DECL_FFT(32768,16384,8192)
+DECL_FFT(65536,32768,16384)
+DECL_FFT(131072,65536,32768)
+
+static void (* const fft_dispatch[])(FFTComplex*) = {
+ fft4, fft8, fft16, fft32, fft64, fft128, fft256, fft512, fft1024,
+ fft2048, fft4096, fft8192, fft16384, fft32768, fft65536, fft131072
+};
+
+static void fft_calc_c(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch[s->nbits-2](z);
+}
+#endif /* !FFT_FLOAT */
diff --git a/media/ffvpx/libavcodec/flac.c b/media/ffvpx/libavcodec/flac.c
new file mode 100644
index 0000000000..174b4801be
--- /dev/null
+++ b/media/ffvpx/libavcodec/flac.c
@@ -0,0 +1,225 @@
+/*
+ * FLAC common code
+ * Copyright (c) 2009 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/crc.h"
+#include "libavutil/log.h"
+#include "bytestream.h"
+#include "get_bits.h"
+#include "flac.h"
+#include "flacdata.h"
+#include "flac_parse.h"
+
+static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 32 };
+
+static const AVChannelLayout flac_channel_layouts[8] = {
+ AV_CHANNEL_LAYOUT_MONO,
+ AV_CHANNEL_LAYOUT_STEREO,
+ AV_CHANNEL_LAYOUT_SURROUND,
+ AV_CHANNEL_LAYOUT_QUAD,
+ AV_CHANNEL_LAYOUT_5POINT0,
+ AV_CHANNEL_LAYOUT_5POINT1,
+ AV_CHANNEL_LAYOUT_6POINT1,
+ AV_CHANNEL_LAYOUT_7POINT1
+};
+
+static int64_t get_utf8(GetBitContext *gb)
+{
+ int64_t val;
+ GET_UTF8(val, get_bits(gb, 8), return -1;)
+ return val;
+}
+
+int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
+ FLACFrameInfo *fi, int log_level_offset)
+{
+ int bs_code, sr_code, bps_code;
+
+ /* frame sync code */
+ if ((get_bits(gb, 15) & 0x7FFF) != 0x7FFC) {
+ av_log(avctx, AV_LOG_ERROR + log_level_offset, "invalid sync code\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* variable block size stream code */
+ fi->is_var_size = get_bits1(gb);
+
+ /* block size and sample rate codes */
+ bs_code = get_bits(gb, 4);
+ sr_code = get_bits(gb, 4);
+
+ /* channels and decorrelation */
+ fi->ch_mode = get_bits(gb, 4);
+ if (fi->ch_mode < FLAC_MAX_CHANNELS) {
+ fi->channels = fi->ch_mode + 1;
+ fi->ch_mode = FLAC_CHMODE_INDEPENDENT;
+ } else if (fi->ch_mode < FLAC_MAX_CHANNELS + FLAC_CHMODE_MID_SIDE) {
+ fi->channels = 2;
+ fi->ch_mode -= FLAC_MAX_CHANNELS - 1;
+ } else {
+ av_log(avctx, AV_LOG_ERROR + log_level_offset,
+ "invalid channel mode: %d\n", fi->ch_mode);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* bits per sample */
+ bps_code = get_bits(gb, 3);
+ if (bps_code == 3) {
+ av_log(avctx, AV_LOG_ERROR + log_level_offset,
+ "invalid sample size code (%d)\n",
+ bps_code);
+ return AVERROR_INVALIDDATA;
+ }
+ fi->bps = sample_size_table[bps_code];
+
+ /* reserved bit */
+ if (get_bits1(gb)) {
+ av_log(avctx, AV_LOG_ERROR + log_level_offset,
+ "broken stream, invalid padding\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* sample or frame count */
+ fi->frame_or_sample_num = get_utf8(gb);
+ if (fi->frame_or_sample_num < 0) {
+ av_log(avctx, AV_LOG_ERROR + log_level_offset,
+ "sample/frame number invalid; utf8 fscked\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* blocksize */
+ if (bs_code == 0) {
+ av_log(avctx, AV_LOG_ERROR + log_level_offset,
+ "reserved blocksize code: 0\n");
+ return AVERROR_INVALIDDATA;
+ } else if (bs_code == 6) {
+ fi->blocksize = get_bits(gb, 8) + 1;
+ } else if (bs_code == 7) {
+ fi->blocksize = get_bits(gb, 16) + 1;
+ } else {
+ fi->blocksize = ff_flac_blocksize_table[bs_code];
+ }
+
+ /* sample rate */
+ if (sr_code < 12) {
+ fi->samplerate = ff_flac_sample_rate_table[sr_code];
+ } else if (sr_code == 12) {
+ fi->samplerate = get_bits(gb, 8) * 1000;
+ } else if (sr_code == 13) {
+ fi->samplerate = get_bits(gb, 16);
+ } else if (sr_code == 14) {
+ fi->samplerate = get_bits(gb, 16) * 10;
+ } else {
+ av_log(avctx, AV_LOG_ERROR + log_level_offset,
+ "illegal sample rate code %d\n",
+ sr_code);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* header CRC-8 check */
+ skip_bits(gb, 8);
+ if (av_crc(av_crc_get_table(AV_CRC_8_ATM), 0, gb->buffer,
+ get_bits_count(gb)/8)) {
+ av_log(avctx, AV_LOG_ERROR + log_level_offset,
+ "header crc mismatch\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ return 0;
+}
+
+int ff_flac_is_extradata_valid(AVCodecContext *avctx,
+ uint8_t **streaminfo_start)
+{
+ if (!avctx->extradata || avctx->extradata_size < FLAC_STREAMINFO_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "extradata NULL or too small.\n");
+ return 0;
+ }
+ if (AV_RL32(avctx->extradata) != MKTAG('f','L','a','C')) {
+ /* extradata contains STREAMINFO only */
+ if (avctx->extradata_size != FLAC_STREAMINFO_SIZE) {
+ av_log(avctx, AV_LOG_WARNING, "extradata contains %d bytes too many.\n",
+ FLAC_STREAMINFO_SIZE-avctx->extradata_size);
+ }
+ *streaminfo_start = avctx->extradata;
+ } else {
+ if (avctx->extradata_size < 8+FLAC_STREAMINFO_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "extradata too small.\n");
+ return 0;
+ }
+ *streaminfo_start = &avctx->extradata[8];
+ }
+ return 1;
+}
+
+void ff_flac_set_channel_layout(AVCodecContext *avctx, int channels)
+{
+ if (channels == avctx->ch_layout.nb_channels &&
+ avctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC)
+ return;
+
+ av_channel_layout_uninit(&avctx->ch_layout);
+ if (channels <= FF_ARRAY_ELEMS(flac_channel_layouts))
+ avctx->ch_layout = flac_channel_layouts[channels - 1];
+ else
+ avctx->ch_layout = (AVChannelLayout){ .order = AV_CHANNEL_ORDER_UNSPEC,
+ .nb_channels = channels };
+}
+
+int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
+ const uint8_t *buffer)
+{
+ GetBitContext gb;
+ init_get_bits(&gb, buffer, FLAC_STREAMINFO_SIZE*8);
+
+ skip_bits(&gb, 16); /* skip min blocksize */
+ s->max_blocksize = get_bits(&gb, 16);
+ if (s->max_blocksize < FLAC_MIN_BLOCKSIZE) {
+ av_log(avctx, AV_LOG_WARNING, "invalid max blocksize: %d\n",
+ s->max_blocksize);
+ s->max_blocksize = 16;
+ return AVERROR_INVALIDDATA;
+ }
+
+ skip_bits(&gb, 24); /* skip min frame size */
+ s->max_framesize = get_bits(&gb, 24);
+
+ s->samplerate = get_bits(&gb, 20);
+ s->channels = get_bits(&gb, 3) + 1;
+ s->bps = get_bits(&gb, 5) + 1;
+
+ if (s->bps < 4) {
+ av_log(avctx, AV_LOG_ERROR, "invalid bps: %d\n", s->bps);
+ s->bps = 16;
+ return AVERROR_INVALIDDATA;
+ }
+
+ avctx->sample_rate = s->samplerate;
+ avctx->bits_per_raw_sample = s->bps;
+ ff_flac_set_channel_layout(avctx, s->channels);
+
+ s->samples = get_bits64(&gb, 36);
+
+ skip_bits_long(&gb, 64); /* md5 sum */
+ skip_bits_long(&gb, 64); /* md5 sum */
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/flac.h b/media/ffvpx/libavcodec/flac.h
new file mode 100644
index 0000000000..00e631ed20
--- /dev/null
+++ b/media/ffvpx/libavcodec/flac.h
@@ -0,0 +1,75 @@
+/*
+ * FLAC (Free Lossless Audio Codec) common stuff
+ * Copyright (c) 2008 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FLAC (Free Lossless Audio Codec) common stuff
+ */
+
+#ifndef AVCODEC_FLAC_H
+#define AVCODEC_FLAC_H
+
+#include "libavutil/intreadwrite.h"
+
+#define FLAC_STREAMINFO_SIZE 34
+#define FLAC_MAX_CHANNELS 8
+#define FLAC_MIN_BLOCKSIZE 16
+#define FLAC_MAX_BLOCKSIZE 65535
+#define FLAC_MIN_FRAME_SIZE 10
+
+enum {
+ FLAC_CHMODE_INDEPENDENT = 0,
+ FLAC_CHMODE_LEFT_SIDE = 1,
+ FLAC_CHMODE_RIGHT_SIDE = 2,
+ FLAC_CHMODE_MID_SIDE = 3,
+};
+
+enum {
+ FLAC_METADATA_TYPE_STREAMINFO = 0,
+ FLAC_METADATA_TYPE_PADDING,
+ FLAC_METADATA_TYPE_APPLICATION,
+ FLAC_METADATA_TYPE_SEEKTABLE,
+ FLAC_METADATA_TYPE_VORBIS_COMMENT,
+ FLAC_METADATA_TYPE_CUESHEET,
+ FLAC_METADATA_TYPE_PICTURE,
+ FLAC_METADATA_TYPE_INVALID = 127
+};
+
+/**
+ * Parse the metadata block parameters from the header.
+ * @param[in] block_header header data, at least 4 bytes
+ * @param[out] last indicator for last metadata block
+ * @param[out] type metadata block type
+ * @param[out] size metadata block size
+ */
+static av_always_inline void flac_parse_block_header(const uint8_t *block_header,
+ int *last, int *type, int *size)
+{
+ int tmp = *block_header;
+ if (last)
+ *last = tmp & 0x80;
+ if (type)
+ *type = tmp & 0x7F;
+ if (size)
+ *size = AV_RB24(block_header + 1);
+}
+
+#endif /* AVCODEC_FLAC_H */
diff --git a/media/ffvpx/libavcodec/flac_parse.h b/media/ffvpx/libavcodec/flac_parse.h
new file mode 100644
index 0000000000..67a7320bea
--- /dev/null
+++ b/media/ffvpx/libavcodec/flac_parse.h
@@ -0,0 +1,89 @@
+/*
+ * FLAC (Free Lossless Audio Codec) decoder/parser common functions
+ * Copyright (c) 2008 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FLAC (Free Lossless Audio Codec) decoder/parser common functions
+ */
+
+#ifndef AVCODEC_FLAC_PARSE_H
+#define AVCODEC_FLAC_PARSE_H
+
+#include "avcodec.h"
+#include "get_bits.h"
+
+typedef struct FLACStreaminfo {
+ int samplerate; /**< sample rate */
+ int channels; /**< number of channels */
+ int bps; /**< bits-per-sample */
+ int max_blocksize; /**< maximum block size, in samples */
+ int max_framesize; /**< maximum frame size, in bytes */
+ int64_t samples; /**< total number of samples */
+} FLACStreaminfo;
+
+typedef struct FLACFrameInfo {
+ int samplerate; /**< sample rate */
+ int channels; /**< number of channels */
+ int bps; /**< bits-per-sample */
+ int blocksize; /**< block size of the frame */
+ int ch_mode; /**< channel decorrelation mode */
+ int64_t frame_or_sample_num; /**< frame number or sample number */
+ int is_var_size; /**< specifies if the stream uses variable
+ block sizes or a fixed block size;
+ also determines the meaning of
+ frame_or_sample_num */
+} FLACFrameInfo;
+
+/**
+ * Parse the Streaminfo metadata block
+ * @param[out] avctx codec context to set basic stream parameters
+ * @param[out] s where parsed information is stored
+ * @param[in] buffer pointer to start of 34-byte streaminfo data
+ *
+ * @return negative error code on faiure or >= 0 on success
+ */
+int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
+ const uint8_t *buffer);
+
+/**
+ * Validate the FLAC extradata.
+ * @param[in] avctx codec context containing the extradata.
+ * @param[out] format extradata format.
+ * @param[out] streaminfo_start pointer to start of 34-byte STREAMINFO data.
+ * @return 1 if valid, 0 if not valid.
+ */
+int ff_flac_is_extradata_valid(AVCodecContext *avctx,
+ uint8_t **streaminfo_start);
+
+/**
+ * Validate and decode a frame header.
+ * @param avctx AVCodecContext to use as av_log() context
+ * @param gb GetBitContext from which to read frame header
+ * @param[out] fi frame information
+ * @param log_level_offset log level offset. can be used to silence error messages.
+ * @return non-zero on error, 0 if ok
+ */
+int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
+ FLACFrameInfo *fi, int log_level_offset);
+
+void ff_flac_set_channel_layout(AVCodecContext *avctx, int channels);
+
+#endif /* AVCODEC_FLAC_PARSE_H */
diff --git a/media/ffvpx/libavcodec/flacdata.c b/media/ffvpx/libavcodec/flacdata.c
new file mode 100644
index 0000000000..d96e3e0966
--- /dev/null
+++ b/media/ffvpx/libavcodec/flacdata.c
@@ -0,0 +1,33 @@
+/*
+ * FLAC data
+ * Copyright (c) 2003 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "flacdata.h"
+
+const int ff_flac_sample_rate_table[16] =
+{ 0,
+ 88200, 176400, 192000,
+ 8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
+ 0, 0, 0, 0 };
+
+const int32_t ff_flac_blocksize_table[16] = {
+ 0, 192, 576<<0, 576<<1, 576<<2, 576<<3, 0, 0,
+256<<0, 256<<1, 256<<2, 256<<3, 256<<4, 256<<5, 256<<6, 256<<7
+};
diff --git a/media/ffvpx/libavcodec/flacdata.h b/media/ffvpx/libavcodec/flacdata.h
new file mode 100644
index 0000000000..ef21840777
--- /dev/null
+++ b/media/ffvpx/libavcodec/flacdata.h
@@ -0,0 +1,31 @@
+/*
+ * FLAC data header
+ * Copyright (c) 2003 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FLACDATA_H
+#define AVCODEC_FLACDATA_H
+
+#include <stdint.h>
+
+extern const int ff_flac_sample_rate_table[16];
+
+extern const int32_t ff_flac_blocksize_table[16];
+
+#endif /* AVCODEC_FLACDATA_H */
diff --git a/media/ffvpx/libavcodec/flacdec.c b/media/ffvpx/libavcodec/flacdec.c
new file mode 100644
index 0000000000..cc778a8dff
--- /dev/null
+++ b/media/ffvpx/libavcodec/flacdec.c
@@ -0,0 +1,846 @@
+/*
+ * FLAC (Free Lossless Audio Codec) decoder
+ * Copyright (c) 2003 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FLAC (Free Lossless Audio Codec) decoder
+ * @author Alex Beregszaszi
+ * @see http://flac.sourceforge.net/
+ *
+ * This decoder can be used in 1 of 2 ways: Either raw FLAC data can be fed
+ * through, starting from the initial 'fLaC' signature; or by passing the
+ * 34-byte streaminfo structure through avctx->extradata[_size] followed
+ * by data starting with the 0xFFF8 marker.
+ */
+
+#include <limits.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/crc.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "get_bits.h"
+#include "bytestream.h"
+#include "golomb.h"
+#include "flac.h"
+#include "flacdata.h"
+#include "flacdsp.h"
+#include "flac_parse.h"
+#include "thread.h"
+#include "unary.h"
+
+
+typedef struct FLACContext {
+ AVClass *class;
+ FLACStreaminfo stream_info;
+
+ AVCodecContext *avctx; ///< parent AVCodecContext
+ GetBitContext gb; ///< GetBitContext initialized to start at the current frame
+
+ int blocksize; ///< number of samples in the current frame
+ int sample_shift; ///< shift required to make output samples 16-bit or 32-bit
+ int ch_mode; ///< channel decorrelation type in the current frame
+ int got_streaminfo; ///< indicates if the STREAMINFO has been read
+
+ int32_t *decoded[FLAC_MAX_CHANNELS]; ///< decoded samples
+ uint8_t *decoded_buffer;
+ unsigned int decoded_buffer_size;
+ int64_t *decoded_33bps; ///< decoded samples for a 33 bps subframe
+ uint8_t *decoded_buffer_33bps;
+ unsigned int decoded_buffer_size_33bps;
+ int buggy_lpc; ///< use workaround for old lavc encoded files
+
+ FLACDSPContext dsp;
+} FLACContext;
+
+static int allocate_buffers(FLACContext *s);
+
+static void flac_set_bps(FLACContext *s)
+{
+ enum AVSampleFormat req = s->avctx->request_sample_fmt;
+ int need32 = s->stream_info.bps > 16;
+ int want32 = av_get_bytes_per_sample(req) > 2;
+ int planar = av_sample_fmt_is_planar(req);
+
+ if (need32 || want32) {
+ if (planar)
+ s->avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
+ else
+ s->avctx->sample_fmt = AV_SAMPLE_FMT_S32;
+ s->sample_shift = 32 - s->stream_info.bps;
+ } else {
+ if (planar)
+ s->avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
+ else
+ s->avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ s->sample_shift = 16 - s->stream_info.bps;
+ }
+}
+
+static av_cold int flac_decode_init(AVCodecContext *avctx)
+{
+ uint8_t *streaminfo;
+ int ret;
+ FLACContext *s = avctx->priv_data;
+ s->avctx = avctx;
+
+ /* for now, the raw FLAC header is allowed to be passed to the decoder as
+ frame data instead of extradata. */
+ if (!avctx->extradata)
+ return 0;
+
+ if (!ff_flac_is_extradata_valid(avctx, &streaminfo))
+ return AVERROR_INVALIDDATA;
+
+ /* initialize based on the demuxer-supplied streamdata header */
+ ret = ff_flac_parse_streaminfo(avctx, &s->stream_info, streaminfo);
+ if (ret < 0)
+ return ret;
+ ret = allocate_buffers(s);
+ if (ret < 0)
+ return ret;
+ flac_set_bps(s);
+ ff_flacdsp_init(&s->dsp, avctx->sample_fmt,
+ s->stream_info.channels);
+ s->got_streaminfo = 1;
+
+ return 0;
+}
+
+static void dump_headers(AVCodecContext *avctx, FLACStreaminfo *s)
+{
+ av_log(avctx, AV_LOG_DEBUG, " Max Blocksize: %d\n", s->max_blocksize);
+ av_log(avctx, AV_LOG_DEBUG, " Max Framesize: %d\n", s->max_framesize);
+ av_log(avctx, AV_LOG_DEBUG, " Samplerate: %d\n", s->samplerate);
+ av_log(avctx, AV_LOG_DEBUG, " Channels: %d\n", s->channels);
+ av_log(avctx, AV_LOG_DEBUG, " Bits: %d\n", s->bps);
+}
+
+static int allocate_buffers(FLACContext *s)
+{
+ int buf_size;
+ int ret;
+
+ av_assert0(s->stream_info.max_blocksize);
+
+ buf_size = av_samples_get_buffer_size(NULL, s->stream_info.channels,
+ s->stream_info.max_blocksize,
+ AV_SAMPLE_FMT_S32P, 0);
+ if (buf_size < 0)
+ return buf_size;
+
+ av_fast_malloc(&s->decoded_buffer, &s->decoded_buffer_size, buf_size);
+ if (!s->decoded_buffer)
+ return AVERROR(ENOMEM);
+
+ ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
+ s->decoded_buffer,
+ s->stream_info.channels,
+ s->stream_info.max_blocksize,
+ AV_SAMPLE_FMT_S32P, 0);
+ if (ret >= 0 && s->stream_info.bps == 32 && s->stream_info.channels == 2) {
+ buf_size = av_samples_get_buffer_size(NULL, 1,
+ s->stream_info.max_blocksize,
+ AV_SAMPLE_FMT_S64P, 0);
+ if (buf_size < 0)
+ return buf_size;
+
+ av_fast_malloc(&s->decoded_buffer_33bps, &s->decoded_buffer_size_33bps, buf_size);
+ if (!s->decoded_buffer_33bps)
+ return AVERROR(ENOMEM);
+
+ ret = av_samples_fill_arrays((uint8_t **)&s->decoded_33bps, NULL,
+ s->decoded_buffer_33bps,
+ 1,
+ s->stream_info.max_blocksize,
+ AV_SAMPLE_FMT_S64P, 0);
+
+ }
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * Parse the STREAMINFO from an inline header.
+ * @param s the flac decoding context
+ * @param buf input buffer, starting with the "fLaC" marker
+ * @param buf_size buffer size
+ * @return non-zero if metadata is invalid
+ */
+static int parse_streaminfo(FLACContext *s, const uint8_t *buf, int buf_size)
+{
+ int metadata_type, metadata_size, ret;
+
+ if (buf_size < FLAC_STREAMINFO_SIZE+8) {
+ /* need more data */
+ return 0;
+ }
+ flac_parse_block_header(&buf[4], NULL, &metadata_type, &metadata_size);
+ if (metadata_type != FLAC_METADATA_TYPE_STREAMINFO ||
+ metadata_size != FLAC_STREAMINFO_SIZE) {
+ return AVERROR_INVALIDDATA;
+ }
+ ret = ff_flac_parse_streaminfo(s->avctx, &s->stream_info, &buf[8]);
+ if (ret < 0)
+ return ret;
+ ret = allocate_buffers(s);
+ if (ret < 0)
+ return ret;
+ flac_set_bps(s);
+ ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt,
+ s->stream_info.channels);
+ s->got_streaminfo = 1;
+
+ return 0;
+}
+
+/**
+ * Determine the size of an inline header.
+ * @param buf input buffer, starting with the "fLaC" marker
+ * @param buf_size buffer size
+ * @return number of bytes in the header, or 0 if more data is needed
+ */
+static int get_metadata_size(const uint8_t *buf, int buf_size)
+{
+ int metadata_last, metadata_size;
+ const uint8_t *buf_end = buf + buf_size;
+
+ buf += 4;
+ do {
+ if (buf_end - buf < 4)
+ return AVERROR_INVALIDDATA;
+ flac_parse_block_header(buf, &metadata_last, NULL, &metadata_size);
+ buf += 4;
+ if (buf_end - buf < metadata_size) {
+ /* need more data in order to read the complete header */
+ return AVERROR_INVALIDDATA;
+ }
+ buf += metadata_size;
+ } while (!metadata_last);
+
+ return buf_size - (buf_end - buf);
+}
+
+static int decode_residuals(FLACContext *s, int32_t *decoded, int pred_order)
+{
+ GetBitContext gb = s->gb;
+ int i, tmp, partition, method_type, rice_order;
+ int rice_bits, rice_esc;
+ int samples;
+
+ method_type = get_bits(&gb, 2);
+ rice_order = get_bits(&gb, 4);
+
+ samples = s->blocksize >> rice_order;
+ rice_bits = 4 + method_type;
+ rice_esc = (1 << rice_bits) - 1;
+
+ decoded += pred_order;
+ i = pred_order;
+
+ if (method_type > 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "illegal residual coding method %d\n",
+ method_type);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (samples << rice_order != s->blocksize) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid rice order: %i blocksize %i\n",
+ rice_order, s->blocksize);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (pred_order > samples) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid predictor order: %i > %i\n",
+ pred_order, samples);
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (partition = 0; partition < (1 << rice_order); partition++) {
+ tmp = get_bits(&gb, rice_bits);
+ if (tmp == rice_esc) {
+ tmp = get_bits(&gb, 5);
+ for (; i < samples; i++)
+ *decoded++ = get_sbits_long(&gb, tmp);
+ } else {
+ int real_limit = (tmp > 1) ? (INT_MAX >> (tmp - 1)) + 2 : INT_MAX;
+ for (; i < samples; i++) {
+ int v = get_sr_golomb_flac(&gb, tmp, real_limit, 1);
+ if (v == 0x80000000){
+ av_log(s->avctx, AV_LOG_ERROR, "invalid residual\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ *decoded++ = v;
+ }
+ }
+ i= 0;
+ }
+
+ s->gb = gb;
+
+ return 0;
+}
+
+static int decode_subframe_fixed(FLACContext *s, int32_t *decoded,
+ int pred_order, int bps)
+{
+ const int blocksize = s->blocksize;
+ unsigned av_uninit(a), av_uninit(b), av_uninit(c), av_uninit(d);
+ int i;
+ int ret;
+
+ /* warm up samples */
+ for (i = 0; i < pred_order; i++) {
+ decoded[i] = get_sbits_long(&s->gb, bps);
+ }
+
+ if ((ret = decode_residuals(s, decoded, pred_order)) < 0)
+ return ret;
+
+ if (pred_order > 0)
+ a = decoded[pred_order-1];
+ if (pred_order > 1)
+ b = a - decoded[pred_order-2];
+ if (pred_order > 2)
+ c = b - decoded[pred_order-2] + decoded[pred_order-3];
+ if (pred_order > 3)
+ d = c - decoded[pred_order-2] + 2U*decoded[pred_order-3] - decoded[pred_order-4];
+
+ switch (pred_order) {
+ case 0:
+ break;
+ case 1:
+ for (i = pred_order; i < blocksize; i++)
+ decoded[i] = a += decoded[i];
+ break;
+ case 2:
+ for (i = pred_order; i < blocksize; i++)
+ decoded[i] = a += b += decoded[i];
+ break;
+ case 3:
+ for (i = pred_order; i < blocksize; i++)
+ decoded[i] = a += b += c += decoded[i];
+ break;
+ case 4:
+ for (i = pred_order; i < blocksize; i++)
+ decoded[i] = a += b += c += d += decoded[i];
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "illegal pred order %d\n", pred_order);
+ return AVERROR_INVALIDDATA;
+ }
+
+ return 0;
+}
+
+#define DECODER_SUBFRAME_FIXED_WIDE(residual) { \
+ const int blocksize = s->blocksize; \
+ int ret; \
+ \
+ if ((ret = decode_residuals(s, residual, pred_order)) < 0) \
+ return ret; \
+ \
+ switch (pred_order) { \
+ case 0: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = residual[i]; \
+ break; \
+ case 1: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + (int64_t)decoded[i-1];\
+ break; \
+ case 2: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 2*(int64_t)decoded[i-1] - (int64_t)decoded[i-2]; \
+ break; \
+ case 3: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 3*(int64_t)decoded[i-1] - 3*(int64_t)decoded[i-2] + (int64_t)decoded[i-3]; \
+ break; \
+ case 4: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 4*(int64_t)decoded[i-1] - 6*(int64_t)decoded[i-2] + 4*(int64_t)decoded[i-3] - (int64_t)decoded[i-4]; \
+ break; \
+ default: \
+ av_log(s->avctx, AV_LOG_ERROR, "illegal pred order %d\n", pred_order); \
+ return AVERROR_INVALIDDATA; \
+ } \
+ return 0; \
+}
+
+static int decode_subframe_fixed_wide(FLACContext *s, int32_t *decoded,
+ int pred_order, int bps)
+{
+ /* warm up samples */
+ for (int i = 0; i < pred_order; i++) {
+ decoded[i] = get_sbits_long(&s->gb, bps);
+ }
+ DECODER_SUBFRAME_FIXED_WIDE(decoded);
+}
+
+
+static int decode_subframe_fixed_33bps(FLACContext *s, int64_t *decoded,
+ int32_t *residual, int pred_order)
+{
+ /* warm up samples */ \
+ for (int i = 0; i < pred_order; i++) { \
+ decoded[i] = get_sbits64(&s->gb, 33); \
+ } \
+ DECODER_SUBFRAME_FIXED_WIDE(residual);
+}
+
+static void lpc_analyze_remodulate(SUINT32 *decoded, const int coeffs[32],
+ int order, int qlevel, int len, int bps)
+{
+ int i, j;
+ int ebps = 1 << (bps-1);
+ unsigned sigma = 0;
+
+ for (i = order; i < len; i++)
+ sigma |= decoded[i] + ebps;
+
+ if (sigma < 2*ebps)
+ return;
+
+ for (i = len - 1; i >= order; i--) {
+ int64_t p = 0;
+ for (j = 0; j < order; j++)
+ p += coeffs[j] * (int64_t)(int32_t)decoded[i-order+j];
+ decoded[i] -= p >> qlevel;
+ }
+ for (i = order; i < len; i++, decoded++) {
+ int32_t p = 0;
+ for (j = 0; j < order; j++)
+ p += coeffs[j] * (uint32_t)decoded[j];
+ decoded[j] += p >> qlevel;
+ }
+}
+
+static int decode_subframe_lpc(FLACContext *s, int32_t *decoded, int pred_order,
+ int bps)
+{
+ int i, ret;
+ int coeff_prec, qlevel;
+ int coeffs[32];
+
+ /* warm up samples */
+ for (i = 0; i < pred_order; i++) {
+ decoded[i] = get_sbits_long(&s->gb, bps);
+ }
+
+ coeff_prec = get_bits(&s->gb, 4) + 1;
+ if (coeff_prec == 16) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid coeff precision\n");
+ return AVERROR_INVALIDDATA;
+ }
+ qlevel = get_sbits(&s->gb, 5);
+ if (qlevel < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "qlevel %d not supported, maybe buggy stream\n",
+ qlevel);
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (i = 0; i < pred_order; i++) {
+ coeffs[pred_order - i - 1] = get_sbits(&s->gb, coeff_prec);
+ }
+
+ if ((ret = decode_residuals(s, decoded, pred_order)) < 0)
+ return ret;
+
+ if ( ( s->buggy_lpc && s->stream_info.bps <= 16)
+ || ( !s->buggy_lpc && bps <= 16
+ && bps + coeff_prec + av_log2(pred_order) <= 32)) {
+ s->dsp.lpc16(decoded, coeffs, pred_order, qlevel, s->blocksize);
+ } else {
+ s->dsp.lpc32(decoded, coeffs, pred_order, qlevel, s->blocksize);
+ if (s->stream_info.bps <= 16)
+ lpc_analyze_remodulate(decoded, coeffs, pred_order, qlevel, s->blocksize, bps);
+ }
+
+ return 0;
+}
+
+static int decode_subframe_lpc_33bps(FLACContext *s, int64_t *decoded,
+ int32_t *residual, int pred_order)
+{
+ int i, j, ret;
+ int coeff_prec, qlevel;
+ int coeffs[32];
+
+ /* warm up samples */
+ for (i = 0; i < pred_order; i++) {
+ decoded[i] = get_sbits64(&s->gb, 33);
+ }
+
+ coeff_prec = get_bits(&s->gb, 4) + 1;
+ if (coeff_prec == 16) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid coeff precision\n");
+ return AVERROR_INVALIDDATA;
+ }
+ qlevel = get_sbits(&s->gb, 5);
+ if (qlevel < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "qlevel %d not supported, maybe buggy stream\n",
+ qlevel);
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (i = 0; i < pred_order; i++) {
+ coeffs[pred_order - i - 1] = get_sbits(&s->gb, coeff_prec);
+ }
+
+ if ((ret = decode_residuals(s, residual, pred_order)) < 0)
+ return ret;
+
+ for (i = pred_order; i < s->blocksize; i++, decoded++) {
+ int64_t sum = 0;
+ for (j = 0; j < pred_order; j++)
+ sum += (int64_t)coeffs[j] * decoded[j];
+ decoded[j] = residual[i] + (sum >> qlevel);
+ }
+
+ return 0;
+}
+
+static inline int decode_subframe(FLACContext *s, int channel)
+{
+ int32_t *decoded = s->decoded[channel];
+ int type, wasted = 0;
+ int bps = s->stream_info.bps;
+ int i, ret;
+
+ if (channel == 0) {
+ if (s->ch_mode == FLAC_CHMODE_RIGHT_SIDE)
+ bps++;
+ } else {
+ if (s->ch_mode == FLAC_CHMODE_LEFT_SIDE || s->ch_mode == FLAC_CHMODE_MID_SIDE)
+ bps++;
+ }
+
+ if (get_bits1(&s->gb)) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid subframe padding\n");
+ return AVERROR_INVALIDDATA;
+ }
+ type = get_bits(&s->gb, 6);
+
+ if (get_bits1(&s->gb)) {
+ int left = get_bits_left(&s->gb);
+ if ( left <= 0 ||
+ (left < bps && !show_bits_long(&s->gb, left)) ||
+ !show_bits_long(&s->gb, bps-1)) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Invalid number of wasted bits > available bits (%d) - left=%d\n",
+ bps, left);
+ return AVERROR_INVALIDDATA;
+ }
+ wasted = 1 + get_unary(&s->gb, 1, get_bits_left(&s->gb));
+ bps -= wasted;
+ }
+
+//FIXME use av_log2 for types
+ if (type == 0) {
+ if (bps < 33) {
+ int32_t tmp = get_sbits_long(&s->gb, bps);
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = tmp;
+ } else {
+ int64_t tmp = get_sbits64(&s->gb, 33);
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = tmp;
+ }
+ } else if (type == 1) {
+ if (bps < 33) {
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = get_sbits_long(&s->gb, bps);
+ } else {
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = get_sbits64(&s->gb, 33);
+ }
+ } else if ((type >= 8) && (type <= 12)) {
+ int order = type & ~0x8;
+ if (bps < 33) {
+ if (bps + order <= 32) {
+ if ((ret = decode_subframe_fixed(s, decoded, order, bps)) < 0)
+ return ret;
+ } else {
+ if ((ret = decode_subframe_fixed_wide(s, decoded, order, bps)) < 0)
+ return ret;
+ }
+ } else {
+ if ((ret = decode_subframe_fixed_33bps(s, s->decoded_33bps, decoded, order)) < 0)
+ return ret;
+ }
+ } else if (type >= 32) {
+ if (bps < 33) {
+ if ((ret = decode_subframe_lpc(s, decoded, (type & ~0x20)+1, bps)) < 0)
+ return ret;
+ } else {
+ if ((ret = decode_subframe_lpc_33bps(s, s->decoded_33bps, decoded, (type & ~0x20)+1)) < 0)
+ return ret;
+ }
+ } else {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid coding type\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (wasted) {
+ if (wasted+bps == 33) {
+ int i;
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = (uint64_t)decoded[i] << wasted;
+ } else if (wasted < 32) {
+ int i;
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = (unsigned)decoded[i] << wasted;
+ }
+ }
+
+ return 0;
+}
+
+static int decode_frame(FLACContext *s)
+{
+ int i, ret;
+ GetBitContext *gb = &s->gb;
+ FLACFrameInfo fi;
+
+ if ((ret = ff_flac_decode_frame_header(s->avctx, gb, &fi, 0)) < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid frame header\n");
+ return ret;
+ }
+
+ if ( s->stream_info.channels
+ && fi.channels != s->stream_info.channels
+ && s->got_streaminfo) {
+ s->stream_info.channels = fi.channels;
+ ff_flac_set_channel_layout(s->avctx, fi.channels);
+ ret = allocate_buffers(s);
+ if (ret < 0)
+ return ret;
+ }
+ s->stream_info.channels = fi.channels;
+ ff_flac_set_channel_layout(s->avctx, fi.channels);
+ s->ch_mode = fi.ch_mode;
+
+ if (!s->stream_info.bps && !fi.bps) {
+ av_log(s->avctx, AV_LOG_ERROR, "bps not found in STREAMINFO or frame header\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (!fi.bps) {
+ fi.bps = s->stream_info.bps;
+ } else if (s->stream_info.bps && fi.bps != s->stream_info.bps) {
+ av_log(s->avctx, AV_LOG_ERROR, "switching bps mid-stream is not "
+ "supported\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (!s->stream_info.bps) {
+ s->stream_info.bps = s->avctx->bits_per_raw_sample = fi.bps;
+ flac_set_bps(s);
+ }
+
+ if (!s->stream_info.max_blocksize)
+ s->stream_info.max_blocksize = FLAC_MAX_BLOCKSIZE;
+ if (fi.blocksize > s->stream_info.max_blocksize) {
+ av_log(s->avctx, AV_LOG_ERROR, "blocksize %d > %d\n", fi.blocksize,
+ s->stream_info.max_blocksize);
+ return AVERROR_INVALIDDATA;
+ }
+ s->blocksize = fi.blocksize;
+
+ if (!s->stream_info.samplerate && !fi.samplerate) {
+ av_log(s->avctx, AV_LOG_ERROR, "sample rate not found in STREAMINFO"
+ " or frame header\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (fi.samplerate == 0)
+ fi.samplerate = s->stream_info.samplerate;
+ s->stream_info.samplerate = s->avctx->sample_rate = fi.samplerate;
+
+ if (!s->got_streaminfo) {
+ ret = allocate_buffers(s);
+ if (ret < 0)
+ return ret;
+ s->got_streaminfo = 1;
+ dump_headers(s->avctx, &s->stream_info);
+ }
+ ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt,
+ s->stream_info.channels);
+
+// dump_headers(s->avctx, &s->stream_info);
+
+ /* subframes */
+ for (i = 0; i < s->stream_info.channels; i++) {
+ if ((ret = decode_subframe(s, i)) < 0)
+ return ret;
+ }
+
+ align_get_bits(gb);
+
+ /* frame footer */
+ skip_bits(gb, 16); /* data crc */
+
+ return 0;
+}
+
+static void decorrelate_33bps(int ch_mode, int32_t **decoded, int64_t *decoded_33bps, int len)
+{
+ int i;
+ if (ch_mode == FLAC_CHMODE_LEFT_SIDE ) {
+ for (i = 0; i < len; i++)
+ decoded[1][i] = decoded[0][i] - decoded_33bps[i];
+ } else if (ch_mode == FLAC_CHMODE_RIGHT_SIDE ) {
+ for (i = 0; i < len; i++)
+ decoded[0][i] = decoded[1][i] + decoded_33bps[i];
+ } else if (ch_mode == FLAC_CHMODE_MID_SIDE ) {
+ for (i = 0; i < len; i++) {
+ uint64_t a = decoded[0][i];
+ int64_t b = decoded_33bps[i];
+ a -= b >> 1;
+ decoded[0][i] = (a + b);
+ decoded[1][i] = a;
+ }
+ }
+}
+
+static int flac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ FLACContext *s = avctx->priv_data;
+ int bytes_read = 0;
+ int ret;
+
+ *got_frame_ptr = 0;
+
+ if (buf_size > 5 && !memcmp(buf, "\177FLAC", 5)) {
+ av_log(s->avctx, AV_LOG_DEBUG, "skipping flac header packet 1\n");
+ return buf_size;
+ }
+
+ if (buf_size > 0 && (*buf & 0x7F) == FLAC_METADATA_TYPE_VORBIS_COMMENT) {
+ av_log(s->avctx, AV_LOG_DEBUG, "skipping vorbis comment\n");
+ return buf_size;
+ }
+
+ /* check that there is at least the smallest decodable amount of data.
+ this amount corresponds to the smallest valid FLAC frame possible.
+ FF F8 69 02 00 00 9A 00 00 34 */
+ if (buf_size < FLAC_MIN_FRAME_SIZE)
+ return buf_size;
+
+ /* check for inline header */
+ if (AV_RB32(buf) == MKBETAG('f','L','a','C')) {
+ if (!s->got_streaminfo && (ret = parse_streaminfo(s, buf, buf_size))) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid header\n");
+ return ret;
+ }
+ return get_metadata_size(buf, buf_size);
+ }
+
+ /* decode frame */
+ if ((ret = init_get_bits8(&s->gb, buf, buf_size)) < 0)
+ return ret;
+ if ((ret = decode_frame(s)) < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "decode_frame() failed\n");
+ return ret;
+ }
+ bytes_read = get_bits_count(&s->gb)/8;
+
+ if ((s->avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_COMPLIANT)) &&
+ av_crc(av_crc_get_table(AV_CRC_16_ANSI),
+ 0, buf, bytes_read)) {
+ av_log(s->avctx, AV_LOG_ERROR, "CRC error at PTS %"PRId64"\n", avpkt->pts);
+ if (s->avctx->err_recognition & AV_EF_EXPLODE)
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* get output buffer */
+ frame->nb_samples = s->blocksize;
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
+ return ret;
+
+ if (s->stream_info.bps == 32 && s->ch_mode > 0) {
+ decorrelate_33bps(s->ch_mode, s->decoded, s->decoded_33bps, s->blocksize);
+ s->dsp.decorrelate[0](frame->data, s->decoded, s->stream_info.channels,
+ s->blocksize, s->sample_shift);
+ } else {
+ s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
+ s->stream_info.channels,
+ s->blocksize, s->sample_shift);
+ }
+
+ if (bytes_read > buf_size) {
+ av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", bytes_read - buf_size);
+ return AVERROR_INVALIDDATA;
+ }
+ if (bytes_read < buf_size) {
+ av_log(s->avctx, AV_LOG_DEBUG, "underread: %d orig size: %d\n",
+ buf_size - bytes_read, buf_size);
+ }
+
+ *got_frame_ptr = 1;
+
+ return bytes_read;
+}
+
+static av_cold int flac_decode_close(AVCodecContext *avctx)
+{
+ FLACContext *s = avctx->priv_data;
+
+ av_freep(&s->decoded_buffer);
+ av_freep(&s->decoded_buffer_33bps);
+
+ return 0;
+}
+
+static const AVOption options[] = {
+{ "use_buggy_lpc", "emulate old buggy lavc behavior", offsetof(FLACContext, buggy_lpc), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM },
+{ NULL },
+};
+
+static const AVClass flac_decoder_class = {
+ .class_name = "FLAC decoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+const FFCodec ff_flac_decoder = {
+ .p.name = "flac",
+ CODEC_LONG_NAME("FLAC (Free Lossless Audio Codec)"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_FLAC,
+ .priv_data_size = sizeof(FLACContext),
+ .init = flac_decode_init,
+ .close = flac_decode_close,
+ FF_CODEC_DECODE_CB(flac_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
+ AV_CODEC_CAP_DR1 |
+ AV_CODEC_CAP_FRAME_THREADS,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_NONE },
+ .p.priv_class = &flac_decoder_class,
+};
diff --git a/media/ffvpx/libavcodec/flacdsp.c b/media/ffvpx/libavcodec/flacdsp.c
new file mode 100644
index 0000000000..42e231db53
--- /dev/null
+++ b/media/ffvpx/libavcodec/flacdsp.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2012 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/internal.h"
+#include "libavutil/samplefmt.h"
+#include "flacdsp.h"
+#include "config.h"
+
+#define SAMPLE_SIZE 16
+#define PLANAR 0
+#include "flacdsp_template.c"
+
+#undef PLANAR
+#define PLANAR 1
+#include "flacdsp_template.c"
+
+#undef SAMPLE_SIZE
+#undef PLANAR
+#define SAMPLE_SIZE 32
+#define PLANAR 0
+#include "flacdsp_template.c"
+
+#undef PLANAR
+#define PLANAR 1
+#include "flacdsp_template.c"
+
+static void flac_lpc_16_c(int32_t *decoded, const int coeffs[32],
+ int pred_order, int qlevel, int len)
+{
+ int i, j;
+
+ for (i = pred_order; i < len - 1; i += 2, decoded += 2) {
+ SUINT c = coeffs[0];
+ SUINT d = decoded[0];
+ int s0 = 0, s1 = 0;
+ for (j = 1; j < pred_order; j++) {
+ s0 += c*d;
+ d = decoded[j];
+ s1 += c*d;
+ c = coeffs[j];
+ }
+ s0 += c*d;
+ d = decoded[j] += (SUINT)(s0 >> qlevel);
+ s1 += c*d;
+ decoded[j + 1] += (SUINT)(s1 >> qlevel);
+ }
+ if (i < len) {
+ int sum = 0;
+ for (j = 0; j < pred_order; j++)
+ sum += coeffs[j] * (SUINT)decoded[j];
+ decoded[j] = decoded[j] + (unsigned)(sum >> qlevel);
+ }
+}
+
+static void flac_lpc_32_c(int32_t *decoded, const int coeffs[32],
+ int pred_order, int qlevel, int len)
+{
+ int i, j;
+
+ for (i = pred_order; i < len; i++, decoded++) {
+ int64_t sum = 0;
+ for (j = 0; j < pred_order; j++)
+ sum += (int64_t)coeffs[j] * decoded[j];
+ decoded[j] += sum >> qlevel;
+ }
+
+}
+
+av_cold void ff_flacdsp_init(FLACDSPContext *c, enum AVSampleFormat fmt, int channels)
+{
+ c->lpc16 = flac_lpc_16_c;
+ c->lpc32 = flac_lpc_32_c;
+
+ switch (fmt) {
+ case AV_SAMPLE_FMT_S32:
+ c->decorrelate[0] = flac_decorrelate_indep_c_32;
+ c->decorrelate[1] = flac_decorrelate_ls_c_32;
+ c->decorrelate[2] = flac_decorrelate_rs_c_32;
+ c->decorrelate[3] = flac_decorrelate_ms_c_32;
+ break;
+
+ case AV_SAMPLE_FMT_S32P:
+ c->decorrelate[0] = flac_decorrelate_indep_c_32p;
+ c->decorrelate[1] = flac_decorrelate_ls_c_32p;
+ c->decorrelate[2] = flac_decorrelate_rs_c_32p;
+ c->decorrelate[3] = flac_decorrelate_ms_c_32p;
+ break;
+
+ case AV_SAMPLE_FMT_S16:
+ c->decorrelate[0] = flac_decorrelate_indep_c_16;
+ c->decorrelate[1] = flac_decorrelate_ls_c_16;
+ c->decorrelate[2] = flac_decorrelate_rs_c_16;
+ c->decorrelate[3] = flac_decorrelate_ms_c_16;
+ break;
+
+ case AV_SAMPLE_FMT_S16P:
+ c->decorrelate[0] = flac_decorrelate_indep_c_16p;
+ c->decorrelate[1] = flac_decorrelate_ls_c_16p;
+ c->decorrelate[2] = flac_decorrelate_rs_c_16p;
+ c->decorrelate[3] = flac_decorrelate_ms_c_16p;
+ break;
+ }
+
+#if ARCH_ARM
+ ff_flacdsp_init_arm(c, fmt, channels);
+#elif ARCH_X86
+ ff_flacdsp_init_x86(c, fmt, channels);
+#endif
+}
diff --git a/media/ffvpx/libavcodec/flacdsp.h b/media/ffvpx/libavcodec/flacdsp.h
new file mode 100644
index 0000000000..9f8ed38b66
--- /dev/null
+++ b/media/ffvpx/libavcodec/flacdsp.h
@@ -0,0 +1,43 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FLACDSP_H
+#define AVCODEC_FLACDSP_H
+
+#include <stdint.h>
+
+#include "libavutil/samplefmt.h"
+
+typedef struct FLACDSPContext {
+ void (*decorrelate[4])(uint8_t **out, int32_t **in, int channels,
+ int len, int shift);
+ void (*lpc16)(int32_t *samples, const int coeffs[32], int order,
+ int qlevel, int len);
+ void (*lpc32)(int32_t *samples, const int coeffs[32], int order,
+ int qlevel, int len);
+ void (*lpc16_encode)(int32_t *res, const int32_t *smp, int len, int order,
+ const int32_t coefs[32], int shift);
+ void (*lpc32_encode)(int32_t *res, const int32_t *smp, int len, int order,
+ const int32_t coefs[32], int shift);
+} FLACDSPContext;
+
+void ff_flacdsp_init(FLACDSPContext *c, enum AVSampleFormat fmt, int channels);
+void ff_flacdsp_init_arm(FLACDSPContext *c, enum AVSampleFormat fmt, int channels);
+void ff_flacdsp_init_x86(FLACDSPContext *c, enum AVSampleFormat fmt, int channels);
+
+#endif /* AVCODEC_FLACDSP_H */
diff --git a/media/ffvpx/libavcodec/flacdsp_lpc_template.c b/media/ffvpx/libavcodec/flacdsp_lpc_template.c
new file mode 100644
index 0000000000..dd847d3b32
--- /dev/null
+++ b/media/ffvpx/libavcodec/flacdsp_lpc_template.c
@@ -0,0 +1,159 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include "libavutil/common.h"
+#include "mathops.h"
+
+#undef FUNC
+#undef sum_type
+#undef MUL
+#undef CLIP
+#undef FSUF
+
+#define FUNC(n) AV_JOIN(n ## _, SAMPLE_SIZE)
+
+#if SAMPLE_SIZE == 32
+# define sum_type int64_t
+# define MUL(a, b) MUL64(a, b)
+# define CLIP(x) av_clipl_int32(x)
+#else
+# define sum_type int32_t
+# define MUL(a, b) ((a) * (b))
+# define CLIP(x) (x)
+#endif
+
+#define LPC1(x) { \
+ int c = coefs[(x)-1]; \
+ p0 += MUL(c, s); \
+ s = smp[i-(x)+1]; \
+ p1 += MUL(c, s); \
+}
+
+static av_always_inline void FUNC(lpc_encode_unrolled)(int32_t *res,
+ const int32_t *smp, int len, int order,
+ const int32_t *coefs, int shift, int big)
+{
+ int i;
+ for (i = order; i < len; i += 2) {
+ int s = smp[i-order];
+ sum_type p0 = 0, p1 = 0;
+ if (big) {
+ switch (order) {
+ case 32: LPC1(32)
+ case 31: LPC1(31)
+ case 30: LPC1(30)
+ case 29: LPC1(29)
+ case 28: LPC1(28)
+ case 27: LPC1(27)
+ case 26: LPC1(26)
+ case 25: LPC1(25)
+ case 24: LPC1(24)
+ case 23: LPC1(23)
+ case 22: LPC1(22)
+ case 21: LPC1(21)
+ case 20: LPC1(20)
+ case 19: LPC1(19)
+ case 18: LPC1(18)
+ case 17: LPC1(17)
+ case 16: LPC1(16)
+ case 15: LPC1(15)
+ case 14: LPC1(14)
+ case 13: LPC1(13)
+ case 12: LPC1(12)
+ case 11: LPC1(11)
+ case 10: LPC1(10)
+ case 9: LPC1( 9)
+ LPC1( 8)
+ LPC1( 7)
+ LPC1( 6)
+ LPC1( 5)
+ LPC1( 4)
+ LPC1( 3)
+ LPC1( 2)
+ LPC1( 1)
+ }
+ } else {
+ switch (order) {
+ case 8: LPC1( 8)
+ case 7: LPC1( 7)
+ case 6: LPC1( 6)
+ case 5: LPC1( 5)
+ case 4: LPC1( 4)
+ case 3: LPC1( 3)
+ case 2: LPC1( 2)
+ case 1: LPC1( 1)
+ }
+ }
+ res[i ] = smp[i ] - CLIP(p0 >> shift);
+ res[i+1] = smp[i+1] - CLIP(p1 >> shift);
+ }
+}
+
+static void FUNC(flac_lpc_encode_c)(int32_t *res, const int32_t *smp, int len,
+ int order, const int32_t *coefs, int shift)
+{
+ int i;
+ for (i = 0; i < order; i++)
+ res[i] = smp[i];
+#if CONFIG_SMALL
+ for (i = order; i < len; i += 2) {
+ int j;
+ int s = smp[i];
+ sum_type p0 = 0, p1 = 0;
+ for (j = 0; j < order; j++) {
+ int c = coefs[j];
+ p1 += MUL(c, s);
+ s = smp[i-j-1];
+ p0 += MUL(c, s);
+ }
+ res[i ] = smp[i ] - CLIP(p0 >> shift);
+ res[i+1] = smp[i+1] - CLIP(p1 >> shift);
+ }
+#else
+ switch (order) {
+ case 1: FUNC(lpc_encode_unrolled)(res, smp, len, 1, coefs, shift, 0); break;
+ case 2: FUNC(lpc_encode_unrolled)(res, smp, len, 2, coefs, shift, 0); break;
+ case 3: FUNC(lpc_encode_unrolled)(res, smp, len, 3, coefs, shift, 0); break;
+ case 4: FUNC(lpc_encode_unrolled)(res, smp, len, 4, coefs, shift, 0); break;
+ case 5: FUNC(lpc_encode_unrolled)(res, smp, len, 5, coefs, shift, 0); break;
+ case 6: FUNC(lpc_encode_unrolled)(res, smp, len, 6, coefs, shift, 0); break;
+ case 7: FUNC(lpc_encode_unrolled)(res, smp, len, 7, coefs, shift, 0); break;
+ case 8: FUNC(lpc_encode_unrolled)(res, smp, len, 8, coefs, shift, 0); break;
+ default: FUNC(lpc_encode_unrolled)(res, smp, len, order, coefs, shift, 1); break;
+ }
+#endif
+}
+
+/* Comment for clarity/de-obfuscation.
+ *
+ * for (int i = order; i < len; i++) {
+ * int32_t p = 0;
+ * for (int j = 0; j < order; j++) {
+ * int c = coefs[j];
+ * int s = smp[(i-1)-j];
+ * p += c*s;
+ * }
+ * res[i] = smp[i] - (p >> shift);
+ * }
+ *
+ * The CONFIG_SMALL code above simplifies to this, in the case of SAMPLE_SIZE
+ * not being equal to 32 (at the present time that means for 16-bit audio). The
+ * code above does 2 samples per iteration. Commit bfdd5bc (made all the way
+ * back in 2007) says that way is faster.
+ */
diff --git a/media/ffvpx/libavcodec/flacdsp_template.c b/media/ffvpx/libavcodec/flacdsp_template.c
new file mode 100644
index 0000000000..0a6fe59e28
--- /dev/null
+++ b/media/ffvpx/libavcodec/flacdsp_template.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2012 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include "libavutil/macros.h"
+
+#undef FUNC
+#undef FSUF
+#undef sample
+#undef sample_type
+#undef OUT
+#undef S
+
+#if SAMPLE_SIZE == 32
+# define sample_type int32_t
+#else
+# define sample_type int16_t
+#endif
+
+#if PLANAR
+# define FSUF AV_JOIN(SAMPLE_SIZE, p)
+# define sample sample_type *
+# define OUT(n) n
+# define S(s, c, i) (s[c][i])
+#else
+# define FSUF SAMPLE_SIZE
+# define sample sample_type
+# define OUT(n) n[0]
+# define S(s, c, i) (*s++)
+#endif
+
+#define FUNC(n) AV_JOIN(n ## _, FSUF)
+
+static void FUNC(flac_decorrelate_indep_c)(uint8_t **out, int32_t **in,
+ int channels, int len, int shift)
+{
+ sample *samples = (sample *) OUT(out);
+ int i, j;
+
+ for (j = 0; j < len; j++)
+ for (i = 0; i < channels; i++)
+ S(samples, i, j) = (int)((unsigned)in[i][j] << shift);
+}
+
+static void FUNC(flac_decorrelate_ls_c)(uint8_t **out, int32_t **in,
+ int channels, int len, int shift)
+{
+ sample *samples = (sample *) OUT(out);
+ int i;
+
+ for (i = 0; i < len; i++) {
+ unsigned a = in[0][i];
+ unsigned b = in[1][i];
+ S(samples, 0, i) = a << shift;
+ S(samples, 1, i) = (a - b) << shift;
+ }
+}
+
+static void FUNC(flac_decorrelate_rs_c)(uint8_t **out, int32_t **in,
+ int channels, int len, int shift)
+{
+ sample *samples = (sample *) OUT(out);
+ int i;
+
+ for (i = 0; i < len; i++) {
+ unsigned a = in[0][i];
+ unsigned b = in[1][i];
+ S(samples, 0, i) = (a + b) << shift;
+ S(samples, 1, i) = b << shift;
+ }
+}
+
+static void FUNC(flac_decorrelate_ms_c)(uint8_t **out, int32_t **in,
+ int channels, int len, int shift)
+{
+ sample *samples = (sample *) OUT(out);
+ int i;
+
+ for (i = 0; i < len; i++) {
+ unsigned a = in[0][i];
+ int b = in[1][i];
+ a -= b >> 1;
+ S(samples, 0, i) = (a + b) << shift;
+ S(samples, 1, i) = a << shift;
+ }
+}
diff --git a/media/ffvpx/libavcodec/frame_thread_encoder.h b/media/ffvpx/libavcodec/frame_thread_encoder.h
new file mode 100644
index 0000000000..201cba2a8f
--- /dev/null
+++ b/media/ffvpx/libavcodec/frame_thread_encoder.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_FRAME_THREAD_ENCODER_H
+#define AVCODEC_FRAME_THREAD_ENCODER_H
+
+#include "avcodec.h"
+
+/**
+ * Initialize frame thread encoder.
+ * @note hardware encoders are not supported
+ */
+int ff_frame_thread_encoder_init(AVCodecContext *avctx);
+void ff_frame_thread_encoder_free(AVCodecContext *avctx);
+int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ AVFrame *frame, int *got_packet_ptr);
+
+#endif /* AVCODEC_FRAME_THREAD_ENCODER_H */
diff --git a/media/ffvpx/libavcodec/get_bits.h b/media/ffvpx/libavcodec/get_bits.h
new file mode 100644
index 0000000000..65dc080ddb
--- /dev/null
+++ b/media/ffvpx/libavcodec/get_bits.h
@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * bitstream reader API header.
+ */
+
+#ifndef AVCODEC_GET_BITS_H
+#define AVCODEC_GET_BITS_H
+
+#include <stdint.h>
+
+#include "libavutil/common.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avassert.h"
+
+#include "defs.h"
+#include "mathops.h"
+#include "vlc.h"
+
+/*
+ * Safe bitstream reading:
+ * optionally, the get_bits API can check to ensure that we
+ * don't read past input buffer boundaries. This is protected
+ * with CONFIG_SAFE_BITSTREAM_READER at the global level, and
+ * then below that with UNCHECKED_BITSTREAM_READER at the per-
+ * decoder level. This means that decoders that check internally
+ * can "#define UNCHECKED_BITSTREAM_READER 1" to disable
+ * overread checks.
+ * Boundary checking causes a minor performance penalty so for
+ * applications that won't want/need this, it can be disabled
+ * globally using "#define CONFIG_SAFE_BITSTREAM_READER 0".
+ */
+#ifndef UNCHECKED_BITSTREAM_READER
+#define UNCHECKED_BITSTREAM_READER !CONFIG_SAFE_BITSTREAM_READER
+#endif
+
+#ifndef CACHED_BITSTREAM_READER
+#define CACHED_BITSTREAM_READER 0
+#endif
+
+#if CACHED_BITSTREAM_READER
+
+// we always want the LE implementation, to provide get_bits_le()
+#define BITSTREAM_LE
+
+#ifndef BITSTREAM_READER_LE
+# define BITSTREAM_BE
+# define BITSTREAM_DEFAULT_BE
+#endif
+
+#include "bitstream.h"
+
+#undef BITSTREAM_LE
+#undef BITSTREAM_BE
+#undef BITSTREAM_DEFAULT_BE
+
+typedef BitstreamContext GetBitContext;
+
+#define get_bits_count bits_tell
+#define get_bits_left bits_left
+#define skip_bits_long bits_skip
+#define skip_bits bits_skip
+#define get_bits bits_read_nz
+#define get_bitsz bits_read
+#define get_bits_long bits_read
+#define get_bits1 bits_read_bit
+#define get_bits64 bits_read_64
+#define get_xbits bits_read_xbits
+#define get_sbits bits_read_signed_nz
+#define get_sbits_long bits_read_signed
+#define show_bits bits_peek
+#define show_bits_long bits_peek
+#define init_get_bits bits_init
+#define init_get_bits8 bits_init8
+#define align_get_bits bits_align
+#define get_vlc2 bits_read_vlc
+
+#define init_get_bits8_le(s, buffer, byte_size) bits_init8_le((BitstreamContextLE*)s, buffer, byte_size)
+#define get_bits_le(s, n) bits_read_le((BitstreamContextLE*)s, n)
+
+#define show_bits1(s) bits_peek(s, 1)
+#define skip_bits1(s) bits_skip(s, 1)
+
+#define skip_1stop_8data_bits bits_skip_1stop_8data
+
+#else // CACHED_BITSTREAM_READER
+
+typedef struct GetBitContext {
+ const uint8_t *buffer, *buffer_end;
+ int index;
+ int size_in_bits;
+ int size_in_bits_plus8;
+} GetBitContext;
+
+static inline unsigned int get_bits(GetBitContext *s, int n);
+static inline void skip_bits(GetBitContext *s, int n);
+static inline unsigned int show_bits(GetBitContext *s, int n);
+
+/* Bitstream reader API docs:
+ * name
+ * arbitrary name which is used as prefix for the internal variables
+ *
+ * gb
+ * getbitcontext
+ *
+ * OPEN_READER(name, gb)
+ * load gb into local variables
+ *
+ * CLOSE_READER(name, gb)
+ * store local vars in gb
+ *
+ * UPDATE_CACHE(name, gb)
+ * Refill the internal cache from the bitstream.
+ * After this call at least MIN_CACHE_BITS will be available.
+ *
+ * GET_CACHE(name, gb)
+ * Will output the contents of the internal cache,
+ * next bit is MSB of 32 or 64 bits (FIXME 64 bits).
+ *
+ * SHOW_UBITS(name, gb, num)
+ * Will return the next num bits.
+ *
+ * SHOW_SBITS(name, gb, num)
+ * Will return the next num bits and do sign extension.
+ *
+ * SKIP_BITS(name, gb, num)
+ * Will skip over the next num bits.
+ * Note, this is equivalent to SKIP_CACHE; SKIP_COUNTER.
+ *
+ * SKIP_CACHE(name, gb, num)
+ * Will remove the next num bits from the cache (note SKIP_COUNTER
+ * MUST be called before UPDATE_CACHE / CLOSE_READER).
+ *
+ * SKIP_COUNTER(name, gb, num)
+ * Will increment the internal bit counter (see SKIP_CACHE & SKIP_BITS).
+ *
+ * LAST_SKIP_BITS(name, gb, num)
+ * Like SKIP_BITS, to be used if next call is UPDATE_CACHE or CLOSE_READER.
+ *
+ * BITS_LEFT(name, gb)
+ * Return the number of bits left
+ *
+ * For examples see get_bits, show_bits, skip_bits, get_vlc.
+ */
+
+#if defined LONG_BITSTREAM_READER
+# define MIN_CACHE_BITS 32
+#else
+# define MIN_CACHE_BITS 25
+#endif
+
+#define OPEN_READER_NOSIZE(name, gb) \
+ unsigned int name ## _index = (gb)->index; \
+ unsigned int av_unused name ## _cache
+
+#if UNCHECKED_BITSTREAM_READER
+#define OPEN_READER(name, gb) OPEN_READER_NOSIZE(name, gb)
+
+#define BITS_AVAILABLE(name, gb) 1
+#else
+#define OPEN_READER(name, gb) \
+ OPEN_READER_NOSIZE(name, gb); \
+ unsigned int name ## _size_plus8 = (gb)->size_in_bits_plus8
+
+#define BITS_AVAILABLE(name, gb) name ## _index < name ## _size_plus8
+#endif
+
+#define CLOSE_READER(name, gb) (gb)->index = name ## _index
+
+# ifdef LONG_BITSTREAM_READER
+
+# define UPDATE_CACHE_LE(name, gb) name ## _cache = \
+ AV_RL64((gb)->buffer + (name ## _index >> 3)) >> (name ## _index & 7)
+
+# define UPDATE_CACHE_BE(name, gb) name ## _cache = \
+ AV_RB64((gb)->buffer + (name ## _index >> 3)) >> (32 - (name ## _index & 7))
+
+#else
+
+# define UPDATE_CACHE_LE(name, gb) name ## _cache = \
+ AV_RL32((gb)->buffer + (name ## _index >> 3)) >> (name ## _index & 7)
+
+# define UPDATE_CACHE_BE(name, gb) name ## _cache = \
+ AV_RB32((gb)->buffer + (name ## _index >> 3)) << (name ## _index & 7)
+
+#endif
+
+
+#ifdef BITSTREAM_READER_LE
+
+# define UPDATE_CACHE(name, gb) UPDATE_CACHE_LE(name, gb)
+
+# define SKIP_CACHE(name, gb, num) name ## _cache >>= (num)
+
+#else
+
+# define UPDATE_CACHE(name, gb) UPDATE_CACHE_BE(name, gb)
+
+# define SKIP_CACHE(name, gb, num) name ## _cache <<= (num)
+
+#endif
+
+#if UNCHECKED_BITSTREAM_READER
+# define SKIP_COUNTER(name, gb, num) name ## _index += (num)
+#else
+# define SKIP_COUNTER(name, gb, num) \
+ name ## _index = FFMIN(name ## _size_plus8, name ## _index + (num))
+#endif
+
+#define BITS_LEFT(name, gb) ((int)((gb)->size_in_bits - name ## _index))
+
+#define SKIP_BITS(name, gb, num) \
+ do { \
+ SKIP_CACHE(name, gb, num); \
+ SKIP_COUNTER(name, gb, num); \
+ } while (0)
+
+#define LAST_SKIP_BITS(name, gb, num) SKIP_COUNTER(name, gb, num)
+
+#define SHOW_UBITS_LE(name, gb, num) zero_extend(name ## _cache, num)
+#define SHOW_SBITS_LE(name, gb, num) sign_extend(name ## _cache, num)
+
+#define SHOW_UBITS_BE(name, gb, num) NEG_USR32(name ## _cache, num)
+#define SHOW_SBITS_BE(name, gb, num) NEG_SSR32(name ## _cache, num)
+
+#ifdef BITSTREAM_READER_LE
+# define SHOW_UBITS(name, gb, num) SHOW_UBITS_LE(name, gb, num)
+# define SHOW_SBITS(name, gb, num) SHOW_SBITS_LE(name, gb, num)
+#else
+# define SHOW_UBITS(name, gb, num) SHOW_UBITS_BE(name, gb, num)
+# define SHOW_SBITS(name, gb, num) SHOW_SBITS_BE(name, gb, num)
+#endif
+
+#define GET_CACHE(name, gb) ((uint32_t) name ## _cache)
+
+
+static inline int get_bits_count(const GetBitContext *s)
+{
+ return s->index;
+}
+
+/**
+ * Skips the specified number of bits.
+ * @param n the number of bits to skip,
+ * For the UNCHECKED_BITSTREAM_READER this must not cause the distance
+ * from the start to overflow int32_t. Staying within the bitstream + padding
+ * is sufficient, too.
+ */
+static inline void skip_bits_long(GetBitContext *s, int n)
+{
+#if UNCHECKED_BITSTREAM_READER
+ s->index += n;
+#else
+ s->index += av_clip(n, -s->index, s->size_in_bits_plus8 - s->index);
+#endif
+}
+
+/**
+ * Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
+ * if MSB not set it is negative
+ * @param n length in bits
+ */
+static inline int get_xbits(GetBitContext *s, int n)
+{
+ register int sign;
+ register int32_t cache;
+ OPEN_READER(re, s);
+ av_assert2(n>0 && n<=25);
+ UPDATE_CACHE(re, s);
+ cache = GET_CACHE(re, s);
+ sign = ~cache >> 31;
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+ return (NEG_USR32(sign ^ cache, n) ^ sign) - sign;
+}
+
+static inline int get_xbits_le(GetBitContext *s, int n)
+{
+ register int sign;
+ register int32_t cache;
+ OPEN_READER(re, s);
+ av_assert2(n>0 && n<=25);
+ UPDATE_CACHE_LE(re, s);
+ cache = GET_CACHE(re, s);
+ sign = sign_extend(~cache, n) >> 31;
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+ return (zero_extend(sign ^ cache, n) ^ sign) - sign;
+}
+
+static inline int get_sbits(GetBitContext *s, int n)
+{
+ register int tmp;
+ OPEN_READER(re, s);
+ av_assert2(n>0 && n<=25);
+ UPDATE_CACHE(re, s);
+ tmp = SHOW_SBITS(re, s, n);
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+ return tmp;
+}
+
+/**
+ * Read 1-25 bits.
+ */
+static inline unsigned int get_bits(GetBitContext *s, int n)
+{
+ register unsigned int tmp;
+ OPEN_READER(re, s);
+ av_assert2(n>0 && n<=25);
+ UPDATE_CACHE(re, s);
+ tmp = SHOW_UBITS(re, s, n);
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+ av_assert2(tmp < UINT64_C(1) << n);
+ return tmp;
+}
+
+/**
+ * Read 0-25 bits.
+ */
+static av_always_inline int get_bitsz(GetBitContext *s, int n)
+{
+ return n ? get_bits(s, n) : 0;
+}
+
+static inline unsigned int get_bits_le(GetBitContext *s, int n)
+{
+ register int tmp;
+ OPEN_READER(re, s);
+ av_assert2(n>0 && n<=25);
+ UPDATE_CACHE_LE(re, s);
+ tmp = SHOW_UBITS_LE(re, s, n);
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+ return tmp;
+}
+
+/**
+ * Show 1-25 bits.
+ */
+static inline unsigned int show_bits(GetBitContext *s, int n)
+{
+ register unsigned int tmp;
+ OPEN_READER_NOSIZE(re, s);
+ av_assert2(n>0 && n<=25);
+ UPDATE_CACHE(re, s);
+ tmp = SHOW_UBITS(re, s, n);
+ return tmp;
+}
+
+static inline void skip_bits(GetBitContext *s, int n)
+{
+ OPEN_READER(re, s);
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+}
+
+static inline unsigned int get_bits1(GetBitContext *s)
+{
+ unsigned int index = s->index;
+ uint8_t result = s->buffer[index >> 3];
+#ifdef BITSTREAM_READER_LE
+ result >>= index & 7;
+ result &= 1;
+#else
+ result <<= index & 7;
+ result >>= 8 - 1;
+#endif
+#if !UNCHECKED_BITSTREAM_READER
+ if (s->index < s->size_in_bits_plus8)
+#endif
+ index++;
+ s->index = index;
+
+ return result;
+}
+
+static inline unsigned int show_bits1(GetBitContext *s)
+{
+ return show_bits(s, 1);
+}
+
+static inline void skip_bits1(GetBitContext *s)
+{
+ skip_bits(s, 1);
+}
+
+/**
+ * Read 0-32 bits.
+ */
+static inline unsigned int get_bits_long(GetBitContext *s, int n)
+{
+ av_assert2(n>=0 && n<=32);
+ if (!n) {
+ return 0;
+ } else if (n <= MIN_CACHE_BITS) {
+ return get_bits(s, n);
+ } else {
+#ifdef BITSTREAM_READER_LE
+ unsigned ret = get_bits(s, 16);
+ return ret | (get_bits(s, n - 16) << 16);
+#else
+ unsigned ret = get_bits(s, 16) << (n - 16);
+ return ret | get_bits(s, n - 16);
+#endif
+ }
+}
+
+/**
+ * Read 0-64 bits.
+ */
+static inline uint64_t get_bits64(GetBitContext *s, int n)
+{
+ if (n <= 32) {
+ return get_bits_long(s, n);
+ } else {
+#ifdef BITSTREAM_READER_LE
+ uint64_t ret = get_bits_long(s, 32);
+ return ret | (uint64_t) get_bits_long(s, n - 32) << 32;
+#else
+ uint64_t ret = (uint64_t) get_bits_long(s, n - 32) << 32;
+ return ret | get_bits_long(s, 32);
+#endif
+ }
+}
+
+/**
+ * Read 0-32 bits as a signed integer.
+ */
+static inline int get_sbits_long(GetBitContext *s, int n)
+{
+ // sign_extend(x, 0) is undefined
+ if (!n)
+ return 0;
+
+ return sign_extend(get_bits_long(s, n), n);
+}
+
+/**
+ * Read 0-64 bits as a signed integer.
+ */
+static inline int64_t get_sbits64(GetBitContext *s, int n)
+{
+ // sign_extend(x, 0) is undefined
+ if (!n)
+ return 0;
+
+ return sign_extend64(get_bits64(s, n), n);
+}
+
+/**
+ * Show 0-32 bits.
+ */
+static inline unsigned int show_bits_long(GetBitContext *s, int n)
+{
+ if (n <= MIN_CACHE_BITS) {
+ return show_bits(s, n);
+ } else {
+ GetBitContext gb = *s;
+ return get_bits_long(&gb, n);
+ }
+}
+
+
+/**
+ * Initialize GetBitContext.
+ * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
+ * larger than the actual read bits because some optimized bitstream
+ * readers read 32 or 64 bit at once and could read over the end
+ * @param bit_size the size of the buffer in bits
+ * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
+ */
+static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
+ int bit_size)
+{
+ int buffer_size;
+ int ret = 0;
+
+ if (bit_size >= INT_MAX - FFMAX(7, AV_INPUT_BUFFER_PADDING_SIZE*8) || bit_size < 0 || !buffer) {
+ bit_size = 0;
+ buffer = NULL;
+ ret = AVERROR_INVALIDDATA;
+ }
+
+ buffer_size = (bit_size + 7) >> 3;
+
+ s->buffer = buffer;
+ s->size_in_bits = bit_size;
+ s->size_in_bits_plus8 = bit_size + 8;
+ s->buffer_end = buffer + buffer_size;
+ s->index = 0;
+
+ return ret;
+}
+
+/**
+ * Initialize GetBitContext.
+ * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
+ * larger than the actual read bits because some optimized bitstream
+ * readers read 32 or 64 bit at once and could read over the end
+ * @param byte_size the size of the buffer in bytes
+ * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
+ */
+static inline int init_get_bits8(GetBitContext *s, const uint8_t *buffer,
+ int byte_size)
+{
+ if (byte_size > INT_MAX / 8 || byte_size < 0)
+ byte_size = -1;
+ return init_get_bits(s, buffer, byte_size * 8);
+}
+
+static inline int init_get_bits8_le(GetBitContext *s, const uint8_t *buffer,
+ int byte_size)
+{
+ if (byte_size > INT_MAX / 8 || byte_size < 0)
+ byte_size = -1;
+ return init_get_bits(s, buffer, byte_size * 8);
+}
+
+static inline const uint8_t *align_get_bits(GetBitContext *s)
+{
+ int n = -get_bits_count(s) & 7;
+ if (n)
+ skip_bits(s, n);
+ return s->buffer + (s->index >> 3);
+}
+
+/**
+ * If the vlc code is invalid and max_depth=1, then no bits will be removed.
+ * If the vlc code is invalid and max_depth>1, then the number of bits removed
+ * is undefined.
+ */
+#define GET_VLC(code, name, gb, table, bits, max_depth) \
+ do { \
+ int n, nb_bits; \
+ unsigned int index; \
+ \
+ index = SHOW_UBITS(name, gb, bits); \
+ code = table[index].sym; \
+ n = table[index].len; \
+ \
+ if (max_depth > 1 && n < 0) { \
+ LAST_SKIP_BITS(name, gb, bits); \
+ UPDATE_CACHE(name, gb); \
+ \
+ nb_bits = -n; \
+ \
+ index = SHOW_UBITS(name, gb, nb_bits) + code; \
+ code = table[index].sym; \
+ n = table[index].len; \
+ if (max_depth > 2 && n < 0) { \
+ LAST_SKIP_BITS(name, gb, nb_bits); \
+ UPDATE_CACHE(name, gb); \
+ \
+ nb_bits = -n; \
+ \
+ index = SHOW_UBITS(name, gb, nb_bits) + code; \
+ code = table[index].sym; \
+ n = table[index].len; \
+ } \
+ } \
+ SKIP_BITS(name, gb, n); \
+ } while (0)
+
+#define GET_RL_VLC(level, run, name, gb, table, bits, \
+ max_depth, need_update) \
+ do { \
+ int n, nb_bits; \
+ unsigned int index; \
+ \
+ index = SHOW_UBITS(name, gb, bits); \
+ level = table[index].level; \
+ n = table[index].len; \
+ \
+ if (max_depth > 1 && n < 0) { \
+ SKIP_BITS(name, gb, bits); \
+ if (need_update) { \
+ UPDATE_CACHE(name, gb); \
+ } \
+ \
+ nb_bits = -n; \
+ \
+ index = SHOW_UBITS(name, gb, nb_bits) + level; \
+ level = table[index].level; \
+ n = table[index].len; \
+ if (max_depth > 2 && n < 0) { \
+ LAST_SKIP_BITS(name, gb, nb_bits); \
+ if (need_update) { \
+ UPDATE_CACHE(name, gb); \
+ } \
+ nb_bits = -n; \
+ \
+ index = SHOW_UBITS(name, gb, nb_bits) + level; \
+ level = table[index].level; \
+ n = table[index].len; \
+ } \
+ } \
+ run = table[index].run; \
+ SKIP_BITS(name, gb, n); \
+ } while (0)
+
+/**
+ * Parse a vlc code.
+ * @param bits is the number of bits which will be read at once, must be
+ * identical to nb_bits in init_vlc()
+ * @param max_depth is the number of times bits bits must be read to completely
+ * read the longest vlc code
+ * = (max_vlc_length + bits - 1) / bits
+ * @returns the code parsed or -1 if no vlc matches
+ */
+static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table,
+ int bits, int max_depth)
+{
+ int code;
+
+ OPEN_READER(re, s);
+ UPDATE_CACHE(re, s);
+
+ GET_VLC(code, re, s, table, bits, max_depth);
+
+ CLOSE_READER(re, s);
+
+ return code;
+}
+
+static inline int decode012(GetBitContext *gb)
+{
+ int n;
+ n = get_bits1(gb);
+ if (n == 0)
+ return 0;
+ else
+ return get_bits1(gb) + 1;
+}
+
+static inline int decode210(GetBitContext *gb)
+{
+ if (get_bits1(gb))
+ return 0;
+ else
+ return 2 - get_bits1(gb);
+}
+
+static inline int get_bits_left(GetBitContext *gb)
+{
+ return gb->size_in_bits - get_bits_count(gb);
+}
+
+static inline int skip_1stop_8data_bits(GetBitContext *gb)
+{
+ if (get_bits_left(gb) <= 0)
+ return AVERROR_INVALIDDATA;
+
+ while (get_bits1(gb)) {
+ skip_bits(gb, 8);
+ if (get_bits_left(gb) <= 0)
+ return AVERROR_INVALIDDATA;
+ }
+
+ return 0;
+}
+
+#endif // CACHED_BITSTREAM_READER
+
+#endif /* AVCODEC_GET_BITS_H */
diff --git a/media/ffvpx/libavcodec/get_buffer.c b/media/ffvpx/libavcodec/get_buffer.c
new file mode 100644
index 0000000000..a04fd878de
--- /dev/null
+++ b/media/ffvpx/libavcodec/get_buffer.c
@@ -0,0 +1,304 @@
+/*
+ * The default get_buffer2() implementation
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/mem.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/version.h"
+
+#include "avcodec.h"
+#include "internal.h"
+
+typedef struct FramePool {
+ /**
+ * Pools for each data plane. For audio all the planes have the same size,
+ * so only pools[0] is used.
+ */
+ AVBufferPool *pools[4];
+
+ /*
+ * Pool parameters
+ */
+ int format;
+ int width, height;
+ int stride_align[AV_NUM_DATA_POINTERS];
+ int linesize[4];
+ int planes;
+ int channels;
+ int samples;
+} FramePool;
+
+static void frame_pool_free(void *opaque, uint8_t *data)
+{
+ FramePool *pool = (FramePool*)data;
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
+ av_buffer_pool_uninit(&pool->pools[i]);
+
+ av_freep(&data);
+}
+
+static AVBufferRef *frame_pool_alloc(void)
+{
+ FramePool *pool = av_mallocz(sizeof(*pool));
+ AVBufferRef *buf;
+
+ if (!pool)
+ return NULL;
+
+ buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
+ frame_pool_free, NULL, 0);
+ if (!buf) {
+ av_freep(&pool);
+ return NULL;
+ }
+
+ return buf;
+}
+
+static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
+{
+ FramePool *pool = avctx->internal->pool ?
+ (FramePool*)avctx->internal->pool->data : NULL;
+ AVBufferRef *pool_buf;
+ int i, ret, ch, planes;
+
+ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ int planar = av_sample_fmt_is_planar(frame->format);
+ ch = frame->ch_layout.nb_channels;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!ch)
+ ch = frame->channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ planes = planar ? ch : 1;
+ }
+
+ if (pool && pool->format == frame->format) {
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
+ pool->width == frame->width && pool->height == frame->height)
+ return 0;
+ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
+ pool->channels == ch && frame->nb_samples == pool->samples)
+ return 0;
+ }
+
+ pool_buf = frame_pool_alloc();
+ if (!pool_buf)
+ return AVERROR(ENOMEM);
+ pool = (FramePool*)pool_buf->data;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO: {
+ int linesize[4];
+ int w = frame->width;
+ int h = frame->height;
+ int unaligned;
+ ptrdiff_t linesize1[4];
+ size_t size[4];
+
+ avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
+
+ do {
+ // NOTE: do not align linesizes individually, this breaks e.g. assumptions
+ // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
+ ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
+ if (ret < 0)
+ goto fail;
+ // increase alignment of w for next try (rhs gives the lowest bit set in w)
+ w += w & ~(w - 1);
+
+ unaligned = 0;
+ for (i = 0; i < 4; i++)
+ unaligned |= linesize[i] % pool->stride_align[i];
+ } while (unaligned);
+
+ for (i = 0; i < 4; i++)
+ linesize1[i] = linesize[i];
+ ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1);
+ if (ret < 0)
+ goto fail;
+
+ for (i = 0; i < 4; i++) {
+ pool->linesize[i] = linesize[i];
+ if (size[i]) {
+ if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
+ CONFIG_MEMORY_POISONING ?
+ NULL :
+ av_buffer_allocz);
+ if (!pool->pools[i]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+ }
+ pool->format = frame->format;
+ pool->width = frame->width;
+ pool->height = frame->height;
+
+ break;
+ }
+ case AVMEDIA_TYPE_AUDIO: {
+ ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
+ frame->nb_samples, frame->format, 0);
+ if (ret < 0)
+ goto fail;
+
+ pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
+ if (!pool->pools[0]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ pool->format = frame->format;
+ pool->planes = planes;
+ pool->channels = ch;
+ pool->samples = frame->nb_samples;
+ break;
+ }
+ default: av_assert0(0);
+ }
+
+ av_buffer_unref(&avctx->internal->pool);
+ avctx->internal->pool = pool_buf;
+
+ return 0;
+fail:
+ av_buffer_unref(&pool_buf);
+ return ret;
+}
+
+static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+ FramePool *pool = (FramePool*)avctx->internal->pool->data;
+ int planes = pool->planes;
+ int i;
+
+ frame->linesize[0] = pool->linesize[0];
+
+ if (planes > AV_NUM_DATA_POINTERS) {
+ frame->extended_data = av_calloc(planes, sizeof(*frame->extended_data));
+ frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
+ frame->extended_buf = av_calloc(frame->nb_extended_buf,
+ sizeof(*frame->extended_buf));
+ if (!frame->extended_data || !frame->extended_buf) {
+ av_freep(&frame->extended_data);
+ av_freep(&frame->extended_buf);
+ return AVERROR(ENOMEM);
+ }
+ } else {
+ frame->extended_data = frame->data;
+ av_assert0(frame->nb_extended_buf == 0);
+ }
+
+ for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
+ frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
+ if (!frame->buf[i])
+ goto fail;
+ frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
+ }
+ for (i = 0; i < frame->nb_extended_buf; i++) {
+ frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
+ if (!frame->extended_buf[i])
+ goto fail;
+ frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
+ }
+
+ if (avctx->debug & FF_DEBUG_BUFFERS)
+ av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
+
+ return 0;
+fail:
+ av_frame_unref(frame);
+ return AVERROR(ENOMEM);
+}
+
+static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
+{
+ FramePool *pool = (FramePool*)s->internal->pool->data;
+ int i;
+
+ if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
+ av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
+ return -1;
+ }
+
+ memset(pic->data, 0, sizeof(pic->data));
+ pic->extended_data = pic->data;
+
+ for (i = 0; i < 4 && pool->pools[i]; i++) {
+ pic->linesize[i] = pool->linesize[i];
+
+ pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
+ if (!pic->buf[i])
+ goto fail;
+
+ pic->data[i] = pic->buf[i]->data;
+ }
+ for (; i < AV_NUM_DATA_POINTERS; i++) {
+ pic->data[i] = NULL;
+ pic->linesize[i] = 0;
+ }
+
+ if (s->debug & FF_DEBUG_BUFFERS)
+ av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
+
+ return 0;
+fail:
+ av_frame_unref(pic);
+ return AVERROR(ENOMEM);
+}
+
+int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+ int ret;
+
+ if (avctx->hw_frames_ctx) {
+ ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
+ frame->width = avctx->coded_width;
+ frame->height = avctx->coded_height;
+ return ret;
+ }
+
+ if ((ret = update_frame_pool(avctx, frame)) < 0)
+ return ret;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ return video_get_buffer(avctx, frame);
+ case AVMEDIA_TYPE_AUDIO:
+ return audio_get_buffer(avctx, frame);
+ default:
+ return -1;
+ }
+}
diff --git a/media/ffvpx/libavcodec/golomb.c b/media/ffvpx/libavcodec/golomb.c
new file mode 100644
index 0000000000..f9ca8149eb
--- /dev/null
+++ b/media/ffvpx/libavcodec/golomb.c
@@ -0,0 +1,173 @@
+/*
+ * exp golomb vlc stuff
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @brief
+ * exp golomb vlc stuff
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include <stdint.h>
+
+const uint8_t ff_golomb_vlc_len[512]={
+19,17,15,15,13,13,13,13,11,11,11,11,11,11,11,11,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
+};
+
+const uint8_t ff_ue_golomb_vlc_code[512]={
+32,32,32,32,32,32,32,32,31,32,32,32,32,32,32,32,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,
+ 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9,10,10,10,10,11,11,11,11,12,12,12,12,13,13,13,13,14,14,14,14,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+const int8_t ff_se_golomb_vlc_code[512]={
+ 17, 17, 17, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 8, -8, 9, -9, 10,-10, 11,-11, 12,-12, 13,-13, 14,-14, 15,-15,
+ 4, 4, 4, 4, -4, -4, -4, -4, 5, 5, 5, 5, -5, -5, -5, -5, 6, 6, 6, 6, -6, -6, -6, -6, 7, 7, 7, 7, -7, -7, -7, -7,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+
+const uint8_t ff_ue_golomb_len[256]={
+ 1, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,11,
+11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,13,
+13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,
+13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,17,
+};
+
+const uint8_t ff_interleaved_golomb_vlc_len[256]={
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+};
+
+const uint8_t ff_interleaved_ue_golomb_vlc_code[256]={
+ 15,16,7, 7, 17,18,8, 8, 3, 3, 3, 3, 3, 3, 3, 3,
+ 19,20,9, 9, 21,22,10,10,4, 4, 4, 4, 4, 4, 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 23,24,11,11,25,26,12,12,5, 5, 5, 5, 5, 5, 5, 5,
+ 27,28,13,13,29,30,14,14,6, 6, 6, 6, 6, 6, 6, 6,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+const int8_t ff_interleaved_se_golomb_vlc_code[256]={
+ 8, -8, 4, 4, 9, -9, -4, -4, 2, 2, 2, 2, 2, 2, 2, 2,
+ 10,-10, 5, 5, 11,-11, -5, -5, -2, -2, -2, -2, -2, -2, -2, -2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 12,-12, 6, 6, 13,-13, -6, -6, 3, 3, 3, 3, 3, 3, 3, 3,
+ 14,-14, 7, 7, 15,-15, -7, -7, -3, -3, -3, -3, -3, -3, -3, -3,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+const uint8_t ff_interleaved_dirac_golomb_vlc_code[256]={
+0, 1, 0, 0, 2, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+4, 5, 2, 2, 6, 7, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+8, 9, 4, 4, 10,11,5, 5, 2, 2, 2, 2, 2, 2, 2, 2,
+12,13,6, 6, 14,15,7, 7, 3, 3, 3, 3, 3, 3, 3, 3,
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,};
diff --git a/media/ffvpx/libavcodec/golomb.h b/media/ffvpx/libavcodec/golomb.h
new file mode 100644
index 0000000000..164c2583b6
--- /dev/null
+++ b/media/ffvpx/libavcodec/golomb.h
@@ -0,0 +1,616 @@
+/*
+ * exp golomb vlc stuff
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2004 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @brief
+ * exp golomb vlc stuff
+ * @author Michael Niedermayer <michaelni@gmx.at> and Alex Beregszaszi
+ */
+
+#ifndef AVCODEC_GOLOMB_H
+#define AVCODEC_GOLOMB_H
+
+#include <stdint.h>
+
+#include "get_bits.h"
+
+#define INVALID_VLC 0x80000000
+
+extern const uint8_t ff_golomb_vlc_len[512];
+extern const uint8_t ff_ue_golomb_vlc_code[512];
+extern const int8_t ff_se_golomb_vlc_code[512];
+
+extern const uint8_t ff_interleaved_golomb_vlc_len[256];
+extern const uint8_t ff_interleaved_ue_golomb_vlc_code[256];
+extern const int8_t ff_interleaved_se_golomb_vlc_code[256];
+extern const uint8_t ff_interleaved_dirac_golomb_vlc_code[256];
+
+/**
+ * Read an unsigned Exp-Golomb code in the range 0 to 8190.
+ *
+ * @returns the read value or a negative error code.
+ */
+static inline int get_ue_golomb(GetBitContext *gb)
+{
+ unsigned int buf;
+
+#if CACHED_BITSTREAM_READER
+ buf = show_bits_long(gb, 32);
+
+ if (buf >= (1 << 27)) {
+ buf >>= 32 - 9;
+ skip_bits_long(gb, ff_golomb_vlc_len[buf]);
+
+ return ff_ue_golomb_vlc_code[buf];
+ } else {
+ int log = 2 * av_log2(buf) - 31;
+
+ skip_bits_long(gb, 32 - log);
+ if (log < 7)
+ return AVERROR_INVALIDDATA;
+ buf >>= log;
+ buf--;
+
+ return buf;
+ }
+#else
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ if (buf >= (1 << 27)) {
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_ue_golomb_vlc_code[buf];
+ } else {
+ int log = 2 * av_log2(buf) - 31;
+ LAST_SKIP_BITS(re, gb, 32 - log);
+ CLOSE_READER(re, gb);
+ if (log < 7)
+ return AVERROR_INVALIDDATA;
+ buf >>= log;
+ buf--;
+
+ return buf;
+ }
+#endif
+}
+
+/**
+ * Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
+ */
+static inline unsigned get_ue_golomb_long(GetBitContext *gb)
+{
+ unsigned buf, log;
+
+ buf = show_bits_long(gb, 32);
+ log = 31 - av_log2(buf);
+ skip_bits_long(gb, log);
+
+ return get_bits_long(gb, log + 1) - 1;
+}
+
+/**
+ * read unsigned exp golomb code, constraint to a max of 31.
+ * If the value encountered is not in 0..31, the return value
+ * is outside the range 0..30.
+ */
+static inline int get_ue_golomb_31(GetBitContext *gb)
+{
+ unsigned int buf;
+
+#if CACHED_BITSTREAM_READER
+ buf = show_bits_long(gb, 32);
+
+ buf >>= 32 - 9;
+ skip_bits_long(gb, ff_golomb_vlc_len[buf]);
+#else
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+#endif
+
+ return ff_ue_golomb_vlc_code[buf];
+}
+
+static inline unsigned get_interleaved_ue_golomb(GetBitContext *gb)
+{
+ uint32_t buf;
+
+#if CACHED_BITSTREAM_READER
+ buf = show_bits_long(gb, 32);
+
+ if (buf & 0xAA800000) {
+ buf >>= 32 - 8;
+ skip_bits_long(gb, ff_interleaved_golomb_vlc_len[buf]);
+
+ return ff_interleaved_ue_golomb_vlc_code[buf];
+ } else {
+ unsigned ret = 1;
+
+ do {
+ buf >>= 32 - 8;
+ skip_bits_long(gb, FFMIN(ff_interleaved_golomb_vlc_len[buf], 8));
+
+ if (ff_interleaved_golomb_vlc_len[buf] != 9) {
+ ret <<= (ff_interleaved_golomb_vlc_len[buf] - 1) >> 1;
+ ret |= ff_interleaved_dirac_golomb_vlc_code[buf];
+ break;
+ }
+ ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
+ buf = show_bits_long(gb, 32);
+ } while (get_bits_left(gb) > 0);
+
+ return ret - 1;
+ }
+#else
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ if (buf & 0xAA800000) {
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_interleaved_ue_golomb_vlc_code[buf];
+ } else {
+ unsigned ret = 1;
+
+ do {
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb,
+ FFMIN(ff_interleaved_golomb_vlc_len[buf], 8));
+
+ if (ff_interleaved_golomb_vlc_len[buf] != 9) {
+ ret <<= (ff_interleaved_golomb_vlc_len[buf] - 1) >> 1;
+ ret |= ff_interleaved_dirac_golomb_vlc_code[buf];
+ break;
+ }
+ ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+ } while (ret<0x8000000U && BITS_AVAILABLE(re, gb));
+
+ CLOSE_READER(re, gb);
+ return ret - 1;
+ }
+#endif
+}
+
+/**
+ * read unsigned truncated exp golomb code.
+ */
+static inline int get_te0_golomb(GetBitContext *gb, int range)
+{
+ av_assert2(range >= 1);
+
+ if (range == 1)
+ return 0;
+ else if (range == 2)
+ return get_bits1(gb) ^ 1;
+ else
+ return get_ue_golomb(gb);
+}
+
+/**
+ * read unsigned truncated exp golomb code.
+ */
+static inline int get_te_golomb(GetBitContext *gb, int range)
+{
+ av_assert2(range >= 1);
+
+ if (range == 2)
+ return get_bits1(gb) ^ 1;
+ else
+ return get_ue_golomb(gb);
+}
+
+/**
+ * read signed exp golomb code.
+ */
+static inline int get_se_golomb(GetBitContext *gb)
+{
+ unsigned int buf;
+
+#if CACHED_BITSTREAM_READER
+ buf = show_bits_long(gb, 32);
+
+ if (buf >= (1 << 27)) {
+ buf >>= 32 - 9;
+ skip_bits_long(gb, ff_golomb_vlc_len[buf]);
+
+ return ff_se_golomb_vlc_code[buf];
+ } else {
+ int log = 2 * av_log2(buf) - 31;
+ buf >>= log;
+
+ skip_bits_long(gb, 32 - log);
+
+ if (buf & 1)
+ buf = -(buf >> 1);
+ else
+ buf = (buf >> 1);
+
+ return buf;
+ }
+#else
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ if (buf >= (1 << 27)) {
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_se_golomb_vlc_code[buf];
+ } else {
+ int log = av_log2(buf), sign;
+ LAST_SKIP_BITS(re, gb, 31 - log);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ buf >>= log;
+
+ LAST_SKIP_BITS(re, gb, 32 - log);
+ CLOSE_READER(re, gb);
+
+ sign = -(buf & 1);
+ buf = ((buf >> 1) ^ sign) - sign;
+
+ return buf;
+ }
+#endif
+}
+
+static inline int get_se_golomb_long(GetBitContext *gb)
+{
+ unsigned int buf = get_ue_golomb_long(gb);
+ int sign = (buf & 1) - 1;
+ return ((buf >> 1) ^ sign) + 1;
+}
+
+static inline int get_interleaved_se_golomb(GetBitContext *gb)
+{
+ unsigned int buf;
+
+#if CACHED_BITSTREAM_READER
+ buf = show_bits_long(gb, 32);
+
+ if (buf & 0xAA800000) {
+ buf >>= 32 - 8;
+ skip_bits_long(gb, ff_interleaved_golomb_vlc_len[buf]);
+
+ return ff_interleaved_se_golomb_vlc_code[buf];
+ } else {
+ int log;
+ skip_bits(gb, 8);
+ buf |= 1 | show_bits(gb, 24);
+
+ if ((buf & 0xAAAAAAAA) == 0)
+ return INVALID_VLC;
+
+ for (log = 31; (buf & 0x80000000) == 0; log--)
+ buf = (buf << 2) - ((buf << log) >> (log - 1)) + (buf >> 30);
+
+ skip_bits_long(gb, 63 - 2 * log - 8);
+
+ return (signed) (((((buf << log) >> log) - 1) ^ -(buf & 0x1)) + 1) >> 1;
+ }
+#else
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ if (buf & 0xAA800000) {
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_interleaved_se_golomb_vlc_code[buf];
+ } else {
+ int log;
+ LAST_SKIP_BITS(re, gb, 8);
+ UPDATE_CACHE(re, gb);
+ buf |= 1 | (GET_CACHE(re, gb) >> 8);
+
+ if ((buf & 0xAAAAAAAA) == 0)
+ return INVALID_VLC;
+
+ for (log = 31; (buf & 0x80000000) == 0; log--)
+ buf = (buf << 2) - ((buf << log) >> (log - 1)) + (buf >> 30);
+
+ LAST_SKIP_BITS(re, gb, 63 - 2 * log - 8);
+ CLOSE_READER(re, gb);
+
+ return (signed) (((((buf << log) >> log) - 1) ^ -(buf & 0x1)) + 1) >> 1;
+ }
+#endif
+}
+
+static inline int dirac_get_se_golomb(GetBitContext *gb)
+{
+ uint32_t ret = get_interleaved_ue_golomb(gb);
+
+ if (ret) {
+ int sign = -get_bits1(gb);
+ ret = (ret ^ sign) - sign;
+ }
+
+ return ret;
+}
+
+/**
+ * read unsigned golomb rice code (ffv1).
+ */
+static inline int get_ur_golomb(GetBitContext *gb, int k, int limit,
+ int esc_len)
+{
+ unsigned int buf;
+ int log;
+
+#if CACHED_BITSTREAM_READER
+ buf = show_bits_long(gb, 32);
+
+ log = av_log2(buf);
+
+ if (log > 31 - limit) {
+ buf >>= log - k;
+ buf += (30 - log) << k;
+ skip_bits_long(gb, 32 + k - log);
+
+ return buf;
+ } else {
+ skip_bits_long(gb, limit);
+ buf = get_bits_long(gb, esc_len);
+
+ return buf + limit - 1;
+ }
+#else
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ log = av_log2(buf);
+
+ if (log > 31 - limit) {
+ buf >>= log - k;
+ buf += (30U - log) << k;
+ LAST_SKIP_BITS(re, gb, 32 + k - log);
+ CLOSE_READER(re, gb);
+
+ return buf;
+ } else {
+ LAST_SKIP_BITS(re, gb, limit);
+ UPDATE_CACHE(re, gb);
+
+ buf = SHOW_UBITS(re, gb, esc_len);
+
+ LAST_SKIP_BITS(re, gb, esc_len);
+ CLOSE_READER(re, gb);
+
+ return buf + limit - 1;
+ }
+#endif
+}
+
+/**
+ * read unsigned golomb rice code (jpegls).
+ */
+static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit,
+ int esc_len)
+{
+ unsigned int buf;
+ int log;
+
+#if CACHED_BITSTREAM_READER
+ buf = show_bits_long(gb, 32);
+
+ log = av_log2(buf);
+
+ if (log - k >= 1 && 32 - log < limit) {
+ buf >>= log - k;
+ buf += (30 - log) << k;
+ skip_bits_long(gb, 32 + k - log);
+
+ return buf;
+ } else {
+ int i;
+ for (i = 0;
+ i < limit && get_bits1(gb) == 0 && get_bits_left(gb) > 0;
+ i++);
+
+ if (i < limit - 1) {
+ buf = get_bits_long(gb, k);
+
+ return buf + (i << k);
+ } else if (i == limit - 1) {
+ buf = get_bits_long(gb, esc_len);
+
+ return buf + 1;
+ } else
+ return -1;
+ }
+#else
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ log = av_log2(buf);
+
+ av_assert2(k <= 31);
+
+ if (log - k >= 32 - MIN_CACHE_BITS + (MIN_CACHE_BITS == 32) &&
+ 32 - log < limit) {
+ buf >>= log - k;
+ buf += (30U - log) << k;
+ LAST_SKIP_BITS(re, gb, 32 + k - log);
+ CLOSE_READER(re, gb);
+
+ return buf;
+ } else {
+ int i;
+ for (i = 0; i + MIN_CACHE_BITS <= limit && SHOW_UBITS(re, gb, MIN_CACHE_BITS) == 0; i += MIN_CACHE_BITS) {
+ if (gb->size_in_bits <= re_index) {
+ CLOSE_READER(re, gb);
+ return -1;
+ }
+ LAST_SKIP_BITS(re, gb, MIN_CACHE_BITS);
+ UPDATE_CACHE(re, gb);
+ }
+ for (; i < limit && SHOW_UBITS(re, gb, 1) == 0; i++) {
+ SKIP_BITS(re, gb, 1);
+ }
+ LAST_SKIP_BITS(re, gb, 1);
+ UPDATE_CACHE(re, gb);
+
+ if (i < limit - 1) {
+ if (k) {
+ if (k > MIN_CACHE_BITS - 1) {
+ buf = SHOW_UBITS(re, gb, 16) << (k-16);
+ LAST_SKIP_BITS(re, gb, 16);
+ UPDATE_CACHE(re, gb);
+ buf |= SHOW_UBITS(re, gb, k-16);
+ LAST_SKIP_BITS(re, gb, k-16);
+ } else {
+ buf = SHOW_UBITS(re, gb, k);
+ LAST_SKIP_BITS(re, gb, k);
+ }
+ } else {
+ buf = 0;
+ }
+
+ buf += ((SUINT)i << k);
+ } else if (i == limit - 1) {
+ buf = SHOW_UBITS(re, gb, esc_len);
+ LAST_SKIP_BITS(re, gb, esc_len);
+
+ buf ++;
+ } else {
+ buf = -1;
+ }
+ CLOSE_READER(re, gb);
+ return buf;
+ }
+#endif
+}
+
+/**
+ * read signed golomb rice code (ffv1).
+ */
+static inline int get_sr_golomb(GetBitContext *gb, int k, int limit,
+ int esc_len)
+{
+ unsigned v = get_ur_golomb(gb, k, limit, esc_len);
+ return (v >> 1) ^ -(v & 1);
+}
+
+/**
+ * read signed golomb rice code (flac).
+ */
+static inline int get_sr_golomb_flac(GetBitContext *gb, int k, int limit,
+ int esc_len)
+{
+ unsigned v = get_ur_golomb_jpegls(gb, k, limit, esc_len);
+ return (v >> 1) ^ -(v & 1);
+}
+
+/**
+ * read unsigned golomb rice code (shorten).
+ */
+static inline unsigned int get_ur_golomb_shorten(GetBitContext *gb, int k)
+{
+ return get_ur_golomb_jpegls(gb, k, INT_MAX, 0);
+}
+
+/**
+ * read signed golomb rice code (shorten).
+ */
+static inline int get_sr_golomb_shorten(GetBitContext *gb, int k)
+{
+ int uvar = get_ur_golomb_jpegls(gb, k + 1, INT_MAX, 0);
+ return (uvar >> 1) ^ -(uvar & 1);
+}
+
+#ifdef TRACE
+
+static inline int get_ue(GetBitContext *s, const char *file, const char *func,
+ int line)
+{
+ int show = show_bits(s, 24);
+ int pos = get_bits_count(s);
+ int i = get_ue_golomb(s);
+ int len = get_bits_count(s) - pos;
+ int bits = show >> (24 - len);
+
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d ue @%5d in %s %s:%d\n",
+ bits, len, i, pos, file, func, line);
+
+ return i;
+}
+
+static inline int get_se(GetBitContext *s, const char *file, const char *func,
+ int line)
+{
+ int show = show_bits(s, 24);
+ int pos = get_bits_count(s);
+ int i = get_se_golomb(s);
+ int len = get_bits_count(s) - pos;
+ int bits = show >> (24 - len);
+
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d se @%5d in %s %s:%d\n",
+ bits, len, i, pos, file, func, line);
+
+ return i;
+}
+
+static inline int get_te(GetBitContext *s, int r, char *file, const char *func,
+ int line)
+{
+ int show = show_bits(s, 24);
+ int pos = get_bits_count(s);
+ int i = get_te0_golomb(s, r);
+ int len = get_bits_count(s) - pos;
+ int bits = show >> (24 - len);
+
+ av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d te @%5d in %s %s:%d\n",
+ bits, len, i, pos, file, func, line);
+
+ return i;
+}
+
+#define get_ue_golomb(a) get_ue(a, __FILE__, __func__, __LINE__)
+#define get_se_golomb(a) get_se(a, __FILE__, __func__, __LINE__)
+#define get_te_golomb(a, r) get_te(a, r, __FILE__, __func__, __LINE__)
+#define get_te0_golomb(a, r) get_te(a, r, __FILE__, __func__, __LINE__)
+
+#endif /* TRACE */
+#endif /* AVCODEC_GOLOMB_H */
diff --git a/media/ffvpx/libavcodec/h263dsp.h b/media/ffvpx/libavcodec/h263dsp.h
new file mode 100644
index 0000000000..1abea3ca8c
--- /dev/null
+++ b/media/ffvpx/libavcodec/h263dsp.h
@@ -0,0 +1,35 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_H263DSP_H
+#define AVCODEC_H263DSP_H
+
+#include <stdint.h>
+
+extern const uint8_t ff_h263_loop_filter_strength[32];
+
+typedef struct H263DSPContext {
+ void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale);
+ void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale);
+} H263DSPContext;
+
+void ff_h263dsp_init(H263DSPContext *ctx);
+void ff_h263dsp_init_x86(H263DSPContext *ctx);
+void ff_h263dsp_init_mips(H263DSPContext *ctx);
+
+#endif /* AVCODEC_H263DSP_H */
diff --git a/media/ffvpx/libavcodec/h264chroma.h b/media/ffvpx/libavcodec/h264chroma.h
new file mode 100644
index 0000000000..b8f9c8f4fc
--- /dev/null
+++ b/media/ffvpx/libavcodec/h264chroma.h
@@ -0,0 +1,41 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_H264CHROMA_H
+#define AVCODEC_H264CHROMA_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef void (*h264_chroma_mc_func)(uint8_t *dst /*align 8*/, const uint8_t *src /*align 1*/, ptrdiff_t srcStride, int h, int x, int y);
+
+typedef struct H264ChromaContext {
+ h264_chroma_mc_func put_h264_chroma_pixels_tab[4];
+ h264_chroma_mc_func avg_h264_chroma_pixels_tab[4];
+} H264ChromaContext;
+
+void ff_h264chroma_init(H264ChromaContext *c, int bit_depth);
+
+void ff_h264chroma_init_aarch64(H264ChromaContext *c, int bit_depth);
+void ff_h264chroma_init_arm(H264ChromaContext *c, int bit_depth);
+void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth);
+void ff_h264chroma_init_x86(H264ChromaContext *c, int bit_depth);
+void ff_h264chroma_init_mips(H264ChromaContext *c, int bit_depth);
+void ff_h264chroma_init_loongarch(H264ChromaContext *c, int bit_depth);
+
+#endif /* AVCODEC_H264CHROMA_H */
diff --git a/media/ffvpx/libavcodec/h264dsp.h b/media/ffvpx/libavcodec/h264dsp.h
new file mode 100644
index 0000000000..e0880c4d88
--- /dev/null
+++ b/media/ffvpx/libavcodec/h264dsp.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2003-2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * H.264 DSP functions.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_H264DSP_H
+#define AVCODEC_H264DSP_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+typedef void (*h264_weight_func)(uint8_t *block, ptrdiff_t stride, int height,
+ int log2_denom, int weight, int offset);
+typedef void (*h264_biweight_func)(uint8_t *dst, uint8_t *src,
+ ptrdiff_t stride, int height, int log2_denom,
+ int weightd, int weights, int offset);
+
+/**
+ * Context for storing H.264 DSP functions
+ */
+typedef struct H264DSPContext {
+ /* weighted MC */
+ h264_weight_func weight_h264_pixels_tab[4];
+ h264_biweight_func biweight_h264_pixels_tab[4];
+
+ /* loop filter */
+ void (*h264_v_loop_filter_luma)(uint8_t *pix /*align 16*/, ptrdiff_t stride,
+ int alpha, int beta, int8_t *tc0);
+ void (*h264_h_loop_filter_luma)(uint8_t *pix /*align 4 */, ptrdiff_t stride,
+ int alpha, int beta, int8_t *tc0);
+ void (*h264_h_loop_filter_luma_mbaff)(uint8_t *pix /*align 16*/, ptrdiff_t stride,
+ int alpha, int beta, int8_t *tc0);
+ /* v/h_loop_filter_luma_intra: align 16 */
+ void (*h264_v_loop_filter_luma_intra)(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+ void (*h264_h_loop_filter_luma_intra)(uint8_t *pix, ptrdiff_t stride,
+ int alpha, int beta);
+ void (*h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix /*align 16*/,
+ ptrdiff_t stride, int alpha, int beta);
+ void (*h264_v_loop_filter_chroma)(uint8_t *pix /*align 8*/, ptrdiff_t stride,
+ int alpha, int beta, int8_t *tc0);
+ void (*h264_h_loop_filter_chroma)(uint8_t *pix /*align 4*/, ptrdiff_t stride,
+ int alpha, int beta, int8_t *tc0);
+ void (*h264_h_loop_filter_chroma_mbaff)(uint8_t *pix /*align 8*/,
+ ptrdiff_t stride, int alpha, int beta,
+ int8_t *tc0);
+ void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix /*align 8*/,
+ ptrdiff_t stride, int alpha, int beta);
+ void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix /*align 8*/,
+ ptrdiff_t stride, int alpha, int beta);
+ void (*h264_h_loop_filter_chroma_mbaff_intra)(uint8_t *pix /*align 8*/,
+ ptrdiff_t stride, int alpha, int beta);
+ // h264_loop_filter_strength: simd only. the C version is inlined in h264_loopfilter.c
+ void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40],
+ int8_t ref[2][40], int16_t mv[2][40][2],
+ int bidir, int edges, int step,
+ int mask_mv0, int mask_mv1, int field);
+
+ /* IDCT */
+ void (*h264_idct_add)(uint8_t *dst /*align 4*/,
+ int16_t *block /*align 16*/, int stride);
+ void (*h264_idct8_add)(uint8_t *dst /*align 8*/,
+ int16_t *block /*align 16*/, int stride);
+ void (*h264_idct_dc_add)(uint8_t *dst /*align 4*/,
+ int16_t *block /*align 16*/, int stride);
+ void (*h264_idct8_dc_add)(uint8_t *dst /*align 8*/,
+ int16_t *block /*align 16*/, int stride);
+
+ void (*h264_idct_add16)(uint8_t *dst /*align 16*/, const int *blockoffset,
+ int16_t *block /*align 16*/, int stride,
+ const uint8_t nnzc[5 * 8]);
+ void (*h264_idct8_add4)(uint8_t *dst /*align 16*/, const int *blockoffset,
+ int16_t *block /*align 16*/, int stride,
+ const uint8_t nnzc[5 * 8]);
+ void (*h264_idct_add8)(uint8_t **dst /*align 16*/, const int *blockoffset,
+ int16_t *block /*align 16*/, int stride,
+ const uint8_t nnzc[15 * 8]);
+ void (*h264_idct_add16intra)(uint8_t *dst /*align 16*/, const int *blockoffset,
+ int16_t *block /*align 16*/,
+ int stride, const uint8_t nnzc[5 * 8]);
+ void (*h264_luma_dc_dequant_idct)(int16_t *output,
+ int16_t *input /*align 16*/, int qmul);
+ void (*h264_chroma_dc_dequant_idct)(int16_t *block, int qmul);
+
+ /* bypass-transform */
+ void (*h264_add_pixels8_clear)(uint8_t *dst, int16_t *block, int stride);
+ void (*h264_add_pixels4_clear)(uint8_t *dst, int16_t *block, int stride);
+
+ /**
+ * Search buf from the start for up to size bytes. Return the index
+ * of a zero byte, or >= size if not found. Ideally, use lookahead
+ * to filter out any zero bytes that are known to not be followed by
+ * one or more further zero bytes and a one byte. Better still, filter
+ * out any bytes that form the trailing_zero_8bits syntax element too.
+ */
+ int (*startcode_find_candidate)(const uint8_t *buf, int size);
+} H264DSPContext;
+
+void ff_h264dsp_init(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc);
+void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc);
+void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc);
+void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc);
+void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc);
+void ff_h264dsp_init_mips(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc);
+void ff_h264dsp_init_loongarch(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc);
+
+#endif /* AVCODEC_H264DSP_H */
diff --git a/media/ffvpx/libavcodec/h264pred.c b/media/ffvpx/libavcodec/h264pred.c
new file mode 100644
index 0000000000..25f9995a0b
--- /dev/null
+++ b/media/ffvpx/libavcodec/h264pred.c
@@ -0,0 +1,602 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * H.264 / AVC / MPEG-4 part10 prediction functions.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/intreadwrite.h"
+#include "codec_id.h"
+#include "h264pred.h"
+#include "mathops.h"
+
+#define BIT_DEPTH 8
+#include "h264pred_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 9
+#include "h264pred_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 10
+#include "h264pred_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 12
+#include "h264pred_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 14
+#include "h264pred_template.c"
+#undef BIT_DEPTH
+
+static void pred4x4_127_dc_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ int stride = _stride;
+ const uint32_t a = 0x7F7F7F7FU;
+
+ AV_WN32A(src + 0 * stride, a);
+ AV_WN32A(src + 1 * stride, a);
+ AV_WN32A(src + 2 * stride, a);
+ AV_WN32A(src + 3 * stride, a);
+}
+
+static void pred4x4_129_dc_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ int stride = _stride;
+ const uint32_t a = 0x81818181U;
+
+ AV_WN32A(src + 0 * stride, a);
+ AV_WN32A(src + 1 * stride, a);
+ AV_WN32A(src + 2 * stride, a);
+ AV_WN32A(src + 3 * stride, a);
+}
+
+static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ const unsigned lt = src[-1-1*stride];
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+ uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
+ (t0 + 2*t1 + t2 + 2) >> 2,
+ (t1 + 2*t2 + t3 + 2) >> 2,
+ (t2 + 2*t3 + t4 + 2) >> 2);
+
+ AV_WN32A(src+0*stride, v);
+ AV_WN32A(src+1*stride, v);
+ AV_WN32A(src+2*stride, v);
+ AV_WN32A(src+3*stride, v);
+}
+
+static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ const unsigned lt = src[-1-1*stride];
+ LOAD_LEFT_EDGE
+
+ AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
+ AV_WN32A(src+1*stride, ((l0 + 2*l1 + l2 + 2) >> 2)*0x01010101);
+ AV_WN32A(src+2*stride, ((l1 + 2*l2 + l3 + 2) >> 2)*0x01010101);
+ AV_WN32A(src+3*stride, ((l2 + 2*l3 + l3 + 2) >> 2)*0x01010101);
+}
+
+static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=(l1 + t1)>>1;
+ src[1+0*stride]=
+ src[0+1*stride]=(l2 + t2)>>1;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=
+ src[3+2*stride]=
+ src[2+3*stride]=
+ src[3+3*stride]=(l3 + t3)>>1;
+}
+
+static void pred4x4_down_left_rv40_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+
+ src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
+ src[1+0*stride]=
+ src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
+ src[3+2*stride]=
+ src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
+ src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
+}
+
+static void pred4x4_down_left_rv40_nodown_c(uint8_t *src,
+ const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
+ src[1+0*stride]=
+ src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
+ src[3+2*stride]=
+ src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
+ src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
+}
+
+static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride,
+ const int l0, const int l1, const int l2,
+ const int l3, const int l4)
+{
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
+ src[1+0*stride]=
+ src[0+2*stride]=(t1 + t2 + 1)>>1;
+ src[2+0*stride]=
+ src[1+2*stride]=(t2 + t3 + 1)>>1;
+ src[3+0*stride]=
+ src[2+2*stride]=(t3 + t4+ 1)>>1;
+ src[3+2*stride]=(t4 + t5+ 1)>>1;
+ src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
+ src[1+1*stride]=
+ src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[2+1*stride]=
+ src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
+ src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
+}
+
+static void pred4x4_vertical_left_rv40_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+
+ pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l4);
+}
+
+static void pred4x4_vertical_left_rv40_nodown_c(uint8_t *src,
+ const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ LOAD_LEFT_EDGE
+
+ pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l3);
+}
+
+static void pred4x4_vertical_left_vp8_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t0 + t1 + 1)>>1;
+ src[1+0*stride]=
+ src[0+2*stride]=(t1 + t2 + 1)>>1;
+ src[2+0*stride]=
+ src[1+2*stride]=(t2 + t3 + 1)>>1;
+ src[3+0*stride]=
+ src[2+2*stride]=(t3 + t4 + 1)>>1;
+ src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[1+1*stride]=
+ src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[2+1*stride]=
+ src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
+ src[3+2*stride]=(t4 + 2*t5 + t6 + 2)>>2;
+ src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
+}
+
+static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ LOAD_LEFT_EDGE
+ LOAD_DOWN_LEFT_EDGE
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
+ src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
+ src[2+0*stride]=
+ src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
+ src[3+0*stride]=
+ src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
+ src[2+1*stride]=
+ src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
+ src[3+1*stride]=
+ src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
+ src[3+2*stride]=
+ src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
+ src[0+3*stride]=
+ src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
+ src[2+3*stride]=(l4 + l5 + 1)>>1;
+ src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
+}
+
+static void pred4x4_horizontal_up_rv40_nodown_c(uint8_t *src,
+ const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ LOAD_LEFT_EDGE
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
+ src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
+ src[2+0*stride]=
+ src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
+ src[3+0*stride]=
+ src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
+ src[2+1*stride]=
+ src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
+ src[3+1*stride]=
+ src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
+ src[3+2*stride]=
+ src[1+3*stride]=l3;
+ src[0+3*stride]=
+ src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
+ src[2+3*stride]=
+ src[3+3*stride]=l3;
+}
+
+static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride)
+{
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
+ uint8_t *top = src-stride;
+ int y;
+
+ for (y = 0; y < 4; y++) {
+ const uint8_t *cm_in = cm + src[-1];
+ src[0] = cm_in[top[0]];
+ src[1] = cm_in[top[1]];
+ src[2] = cm_in[top[2]];
+ src[3] = cm_in[top[3]];
+ src += stride;
+ }
+}
+
+static void pred16x16_plane_svq3_c(uint8_t *src, ptrdiff_t stride)
+{
+ pred16x16_plane_compat_8_c(src, stride, 1, 0);
+}
+
+static void pred16x16_plane_rv40_c(uint8_t *src, ptrdiff_t stride)
+{
+ pred16x16_plane_compat_8_c(src, stride, 0, 1);
+}
+
+static void pred16x16_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
+{
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
+ uint8_t *top = src-stride;
+ int y;
+
+ for (y = 0; y < 16; y++) {
+ const uint8_t *cm_in = cm + src[-1];
+ src[0] = cm_in[top[0]];
+ src[1] = cm_in[top[1]];
+ src[2] = cm_in[top[2]];
+ src[3] = cm_in[top[3]];
+ src[4] = cm_in[top[4]];
+ src[5] = cm_in[top[5]];
+ src[6] = cm_in[top[6]];
+ src[7] = cm_in[top[7]];
+ src[8] = cm_in[top[8]];
+ src[9] = cm_in[top[9]];
+ src[10] = cm_in[top[10]];
+ src[11] = cm_in[top[11]];
+ src[12] = cm_in[top[12]];
+ src[13] = cm_in[top[13]];
+ src[14] = cm_in[top[14]];
+ src[15] = cm_in[top[15]];
+ src += stride;
+ }
+}
+
+static void pred8x8_left_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
+{
+ int i;
+ unsigned dc0;
+
+ dc0=0;
+ for(i=0;i<8; i++)
+ dc0+= src[-1+i*stride];
+ dc0= 0x01010101*((dc0 + 4)>>3);
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+static void pred8x8_top_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
+{
+ int i;
+ unsigned dc0;
+
+ dc0=0;
+ for(i=0;i<8; i++)
+ dc0+= src[i-stride];
+ dc0= 0x01010101*((dc0 + 4)>>3);
+
+ for(i=0; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]=
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+static void pred8x8_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
+{
+ int i;
+ unsigned dc0 = 0;
+
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride] + src[i-stride];
+ dc0+= src[4+i-stride];
+ dc0+= src[-1+(i+4)*stride];
+ }
+ dc0= 0x01010101*((dc0 + 8)>>4);
+
+ for(i=0; i<4; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+ for(i=4; i<8; i++){
+ ((uint32_t*)(src+i*stride))[0]= dc0;
+ ((uint32_t*)(src+i*stride))[1]= dc0;
+ }
+}
+
+static void pred8x8_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
+{
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
+ uint8_t *top = src-stride;
+ int y;
+
+ for (y = 0; y < 8; y++) {
+ const uint8_t *cm_in = cm + src[-1];
+ src[0] = cm_in[top[0]];
+ src[1] = cm_in[top[1]];
+ src[2] = cm_in[top[2]];
+ src[3] = cm_in[top[3]];
+ src[4] = cm_in[top[4]];
+ src[5] = cm_in[top[5]];
+ src[6] = cm_in[top[6]];
+ src[7] = cm_in[top[7]];
+ src += stride;
+ }
+}
+
+/**
+ * Set the intra prediction function pointers.
+ */
+av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
+ const int bit_depth,
+ int chroma_format_idc)
+{
+#undef FUNC
+#undef FUNCC
+#define FUNC(a, depth) a ## _ ## depth
+#define FUNCC(a, depth) a ## _ ## depth ## _c
+#define FUNCD(a) a ## _c
+
+#define H264_PRED(depth) \
+ h->pred4x4[VERT_PRED ] = FUNCC(pred4x4_vertical, depth);\
+ h->pred4x4[HOR_PRED ] = FUNCC(pred4x4_horizontal, depth);\
+ h->pred4x4[DC_PRED ] = FUNCC(pred4x4_dc, depth);\
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ] = FUNCC(pred4x4_down_left, depth);\
+ h->pred4x4[DIAG_DOWN_RIGHT_PRED] = FUNCC(pred4x4_down_right, depth);\
+ h->pred4x4[VERT_RIGHT_PRED ] = FUNCC(pred4x4_vertical_right, depth);\
+ h->pred4x4[HOR_DOWN_PRED ] = FUNCC(pred4x4_horizontal_down, depth);\
+ h->pred4x4[VERT_LEFT_PRED ] = FUNCC(pred4x4_vertical_left, depth);\
+ h->pred4x4[HOR_UP_PRED ] = FUNCC(pred4x4_horizontal_up, depth);\
+ h->pred4x4[LEFT_DC_PRED ] = FUNCC(pred4x4_left_dc, depth);\
+ h->pred4x4[TOP_DC_PRED ] = FUNCC(pred4x4_top_dc, depth);\
+ if (depth > 8 || codec_id != AV_CODEC_ID_VP8)\
+ h->pred4x4[DC_128_PRED ] = FUNCC(pred4x4_128_dc, depth);\
+\
+ h->pred8x8l[VERT_PRED ]= FUNCC(pred8x8l_vertical , depth);\
+ h->pred8x8l[HOR_PRED ]= FUNCC(pred8x8l_horizontal , depth);\
+ h->pred8x8l[DC_PRED ]= FUNCC(pred8x8l_dc , depth);\
+ h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred8x8l_down_left , depth);\
+ h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred8x8l_down_right , depth);\
+ h->pred8x8l[VERT_RIGHT_PRED ]= FUNCC(pred8x8l_vertical_right , depth);\
+ h->pred8x8l[HOR_DOWN_PRED ]= FUNCC(pred8x8l_horizontal_down , depth);\
+ h->pred8x8l[VERT_LEFT_PRED ]= FUNCC(pred8x8l_vertical_left , depth);\
+ h->pred8x8l[HOR_UP_PRED ]= FUNCC(pred8x8l_horizontal_up , depth);\
+ h->pred8x8l[LEFT_DC_PRED ]= FUNCC(pred8x8l_left_dc , depth);\
+ h->pred8x8l[TOP_DC_PRED ]= FUNCC(pred8x8l_top_dc , depth);\
+ h->pred8x8l[DC_128_PRED ]= FUNCC(pred8x8l_128_dc , depth);\
+\
+ if (chroma_format_idc <= 1) {\
+ h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
+ h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
+ h->pred8x8[PLANE_PRED8x8] = FUNCC(pred8x8_plane, depth);\
+ } else {\
+ h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x16_vertical , depth);\
+ h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
+ h->pred8x8[PLANE_PRED8x8] = FUNCC(pred8x16_plane, depth);\
+ }\
+ if (depth > 8 || (codec_id != AV_CODEC_ID_RV40 && \
+ codec_id != AV_CODEC_ID_VP7 && \
+ codec_id != AV_CODEC_ID_VP8)) { \
+ if (chroma_format_idc <= 1) {\
+ h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
+ h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
+ h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc , depth);\
+ h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l0t, depth);\
+ h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0lt, depth);\
+ h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l00, depth);\
+ h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0l0, depth);\
+ } else {\
+ h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x16_dc , depth);\
+ h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x16_left_dc , depth);\
+ h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x16_top_dc , depth);\
+ h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_l0t, depth);\
+ h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_0lt, depth);\
+ h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_l00, depth);\
+ h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_0l0, depth);\
+ }\
+ }else{\
+ h->pred8x8[DC_PRED8x8 ]= FUNCD(pred8x8_dc_rv40);\
+ h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\
+ h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\
+ }\
+ if (chroma_format_idc <= 1) {\
+ h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
+ } else {\
+ h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x16_128_dc , depth);\
+ }\
+\
+ h->pred16x16[DC_PRED8x8 ]= FUNCC(pred16x16_dc , depth);\
+ h->pred16x16[VERT_PRED8x8 ]= FUNCC(pred16x16_vertical , depth);\
+ h->pred16x16[HOR_PRED8x8 ]= FUNCC(pred16x16_horizontal , depth);\
+ h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane , depth);\
+ h->pred16x16[LEFT_DC_PRED8x8]= FUNCC(pred16x16_left_dc , depth);\
+ h->pred16x16[TOP_DC_PRED8x8 ]= FUNCC(pred16x16_top_dc , depth);\
+ h->pred16x16[DC_128_PRED8x8 ]= FUNCC(pred16x16_128_dc , depth);\
+\
+ /* special lossless h/v prediction for H.264 */ \
+ h->pred4x4_add [VERT_PRED ]= FUNCC(pred4x4_vertical_add , depth);\
+ h->pred4x4_add [ HOR_PRED ]= FUNCC(pred4x4_horizontal_add , depth);\
+ h->pred8x8l_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_add , depth);\
+ h->pred8x8l_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_add , depth);\
+ h->pred8x8l_filter_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_filter_add , depth);\
+ h->pred8x8l_filter_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_filter_add , depth);\
+ if (chroma_format_idc <= 1) {\
+ h->pred8x8_add[VERT_PRED8x8] = FUNCC(pred8x8_vertical_add, depth);\
+ h->pred8x8_add[ HOR_PRED8x8] = FUNCC(pred8x8_horizontal_add, depth);\
+ } else {\
+ h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x16_vertical_add , depth);\
+ h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x16_horizontal_add , depth);\
+ }\
+ h->pred16x16_add[VERT_PRED8x8]= FUNCC(pred16x16_vertical_add , depth);\
+ h->pred16x16_add[ HOR_PRED8x8]= FUNCC(pred16x16_horizontal_add , depth);\
+
+ switch (bit_depth) {
+ case 9:
+ H264_PRED(9)
+ break;
+ case 10:
+ H264_PRED(10)
+ break;
+ case 12:
+ H264_PRED(12)
+ break;
+ case 14:
+ H264_PRED(14)
+ break;
+ default:
+ av_assert0(bit_depth<=8);
+ H264_PRED(8)
+ switch (codec_id) {
+ case AV_CODEC_ID_SVQ3:
+ h->pred4x4[DIAG_DOWN_LEFT_PRED] = FUNCD(pred4x4_down_left_svq3);
+ h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_plane_svq3);
+ break;
+ case AV_CODEC_ID_RV40:
+ h->pred4x4[DIAG_DOWN_LEFT_PRED] = FUNCD(pred4x4_down_left_rv40);
+ h->pred4x4[VERT_LEFT_PRED ] = FUNCD(pred4x4_vertical_left_rv40);
+ h->pred4x4[HOR_UP_PRED ] = FUNCD(pred4x4_horizontal_up_rv40);
+ h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN] = FUNCD(pred4x4_down_left_rv40_nodown);
+ h->pred4x4[HOR_UP_PRED_RV40_NODOWN] = FUNCD(pred4x4_horizontal_up_rv40_nodown);
+ h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN] = FUNCD(pred4x4_vertical_left_rv40_nodown);
+ h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_plane_rv40);
+ break;
+ case AV_CODEC_ID_VP7:
+ case AV_CODEC_ID_VP8:
+ h->pred4x4[VERT_PRED ] = FUNCD(pred4x4_vertical_vp8);
+ h->pred4x4[HOR_PRED ] = FUNCD(pred4x4_horizontal_vp8);
+ h->pred4x4[VERT_LEFT_PRED ] = FUNCD(pred4x4_vertical_left_vp8);
+ h->pred4x4[TM_VP8_PRED ] = FUNCD(pred4x4_tm_vp8);
+ h->pred4x4[VERT_VP8_PRED ] = FUNCC(pred4x4_vertical, 8);
+ h->pred4x4[DC_127_PRED ] = FUNCD(pred4x4_127_dc);
+ h->pred4x4[DC_129_PRED ] = FUNCD(pred4x4_129_dc);
+ h->pred4x4[HOR_VP8_PRED ] = FUNCC(pred4x4_horizontal, 8);
+ h->pred8x8[PLANE_PRED8x8 ] = FUNCD(pred8x8_tm_vp8);
+ h->pred8x8[DC_127_PRED8x8 ] = FUNCC(pred8x8_127_dc, 8);
+ h->pred8x8[DC_129_PRED8x8 ] = FUNCC(pred8x8_129_dc, 8);
+ h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_tm_vp8);
+ h->pred16x16[DC_127_PRED8x8] = FUNCC(pred16x16_127_dc, 8);
+ h->pred16x16[DC_129_PRED8x8] = FUNCC(pred16x16_129_dc, 8);
+ break;
+ }
+ break;
+ }
+
+#if ARCH_AARCH64
+ ff_h264_pred_init_aarch64(h, codec_id, bit_depth, chroma_format_idc);
+#elif ARCH_ARM
+ ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
+#elif ARCH_X86
+ ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
+#elif ARCH_MIPS
+ ff_h264_pred_init_mips(h, codec_id, bit_depth, chroma_format_idc);
+#elif ARCH_LOONGARCH
+ ff_h264_pred_init_loongarch(h, codec_id, bit_depth, chroma_format_idc);
+#endif
+}
diff --git a/media/ffvpx/libavcodec/h264pred.h b/media/ffvpx/libavcodec/h264pred.h
new file mode 100644
index 0000000000..cb008548fc
--- /dev/null
+++ b/media/ffvpx/libavcodec/h264pred.h
@@ -0,0 +1,130 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * H.264 / AVC / MPEG-4 prediction functions.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_H264PRED_H
+#define AVCODEC_H264PRED_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * Prediction types
+ */
+//@{
+#define VERT_PRED 0
+#define HOR_PRED 1
+#define DC_PRED 2
+#define DIAG_DOWN_LEFT_PRED 3
+#define DIAG_DOWN_RIGHT_PRED 4
+#define VERT_RIGHT_PRED 5
+#define HOR_DOWN_PRED 6
+#define VERT_LEFT_PRED 7
+#define HOR_UP_PRED 8
+
+// DC edge (not for VP8)
+#define LEFT_DC_PRED 9
+#define TOP_DC_PRED 10
+#define DC_128_PRED 11
+
+// RV40 specific
+#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN 12
+#define HOR_UP_PRED_RV40_NODOWN 13
+#define VERT_LEFT_PRED_RV40_NODOWN 14
+
+// VP8 specific
+#define TM_VP8_PRED 9 ///< "True Motion", used instead of plane
+#define VERT_VP8_PRED 10 ///< for VP8, #VERT_PRED is the average of
+ ///< (left col+cur col x2+right col) / 4;
+ ///< this is the "unaveraged" one
+#define HOR_VP8_PRED 14 ///< unaveraged version of #HOR_PRED, see
+ ///< #VERT_VP8_PRED for details
+#define DC_127_PRED 12
+#define DC_129_PRED 13
+
+#define DC_PRED8x8 0
+#define HOR_PRED8x8 1
+#define VERT_PRED8x8 2
+#define PLANE_PRED8x8 3
+
+// DC edge
+#define LEFT_DC_PRED8x8 4
+#define TOP_DC_PRED8x8 5
+#define DC_128_PRED8x8 6
+
+// H.264/SVQ3 (8x8) specific
+#define ALZHEIMER_DC_L0T_PRED8x8 7
+#define ALZHEIMER_DC_0LT_PRED8x8 8
+#define ALZHEIMER_DC_L00_PRED8x8 9
+#define ALZHEIMER_DC_0L0_PRED8x8 10
+
+// VP8 specific
+#define DC_127_PRED8x8 7
+#define DC_129_PRED8x8 8
+//@}
+
+#define PART_NOT_AVAILABLE -2
+
+/**
+ * Context for storing H.264 prediction functions
+ */
+typedef struct H264PredContext {
+ void(*pred4x4[9 + 3 + 3])(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t stride);
+ void(*pred8x8l[9 + 3])(uint8_t *src, int topleft, int topright,
+ ptrdiff_t stride);
+ void(*pred8x8[4 + 3 + 4])(uint8_t *src, ptrdiff_t stride);
+ void(*pred16x16[4 + 3 + 2])(uint8_t *src, ptrdiff_t stride);
+
+ void(*pred4x4_add[2])(uint8_t *pix /*align 4*/,
+ int16_t *block /*align 16*/, ptrdiff_t stride);
+ void(*pred8x8l_add[2])(uint8_t *pix /*align 8*/,
+ int16_t *block /*align 16*/, ptrdiff_t stride);
+ void(*pred8x8l_filter_add[2])(uint8_t *pix /*align 8*/,
+ int16_t *block /*align 16*/, int topleft, int topright, ptrdiff_t stride);
+ void(*pred8x8_add[3])(uint8_t *pix /*align 8*/,
+ const int *block_offset,
+ int16_t *block /*align 16*/, ptrdiff_t stride);
+ void(*pred16x16_add[3])(uint8_t *pix /*align 16*/,
+ const int *block_offset,
+ int16_t *block /*align 16*/, ptrdiff_t stride);
+} H264PredContext;
+
+void ff_h264_pred_init(H264PredContext *h, int codec_id,
+ const int bit_depth, const int chroma_format_idc);
+void ff_h264_pred_init_aarch64(H264PredContext *h, int codec_id,
+ const int bit_depth,
+ const int chroma_format_idc);
+void ff_h264_pred_init_arm(H264PredContext *h, int codec_id,
+ const int bit_depth, const int chroma_format_idc);
+void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
+ const int bit_depth, const int chroma_format_idc);
+void ff_h264_pred_init_mips(H264PredContext *h, int codec_id,
+ const int bit_depth, const int chroma_format_idc);
+void ff_h264_pred_init_loongarch(H264PredContext *h, int codec_id,
+ const int bit_depth, const int chroma_format_idc);
+
+#endif /* AVCODEC_H264PRED_H */
diff --git a/media/ffvpx/libavcodec/h264pred_template.c b/media/ffvpx/libavcodec/h264pred_template.c
new file mode 100644
index 0000000000..b5bc942a5e
--- /dev/null
+++ b/media/ffvpx/libavcodec/h264pred_template.c
@@ -0,0 +1,1333 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003-2011 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * H.264 / AVC / MPEG-4 part10 prediction functions.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "libavutil/intreadwrite.h"
+
+#include "mathops.h"
+
+#include "bit_depth_template.c"
+
+static void FUNCC(pred4x4_vertical)(uint8_t *_src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a= AV_RN4PA(src-stride);
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
+}
+
+static void FUNCC(pred4x4_horizontal)(uint8_t *_src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ AV_WN4PA(src+0*stride, PIXEL_SPLAT_X4(src[-1+0*stride]));
+ AV_WN4PA(src+1*stride, PIXEL_SPLAT_X4(src[-1+1*stride]));
+ AV_WN4PA(src+2*stride, PIXEL_SPLAT_X4(src[-1+2*stride]));
+ AV_WN4PA(src+3*stride, PIXEL_SPLAT_X4(src[-1+3*stride]));
+}
+
+static void FUNCC(pred4x4_dc)(uint8_t *_src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
+ + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
+ const pixel4 a = PIXEL_SPLAT_X4(dc);
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
+}
+
+static void FUNCC(pred4x4_left_dc)(uint8_t *_src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
+ const pixel4 a = PIXEL_SPLAT_X4(dc);
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
+}
+
+static void FUNCC(pred4x4_top_dc)(uint8_t *_src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
+ const pixel4 a = PIXEL_SPLAT_X4(dc);
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
+}
+
+static void FUNCC(pred4x4_128_dc)(uint8_t *_src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a = PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1));
+
+ AV_WN4PA(src+0*stride, a);
+ AV_WN4PA(src+1*stride, a);
+ AV_WN4PA(src+2*stride, a);
+ AV_WN4PA(src+3*stride, a);
+}
+
+
+#define LOAD_TOP_RIGHT_EDGE\
+ const unsigned av_unused t4 = topright[0];\
+ const unsigned av_unused t5 = topright[1];\
+ const unsigned av_unused t6 = topright[2];\
+ const unsigned av_unused t7 = topright[3];\
+
+#define LOAD_DOWN_LEFT_EDGE\
+ const unsigned av_unused l4 = src[-1+4*stride];\
+ const unsigned av_unused l5 = src[-1+5*stride];\
+ const unsigned av_unused l6 = src[-1+6*stride];\
+ const unsigned av_unused l7 = src[-1+7*stride];\
+
+#define LOAD_LEFT_EDGE\
+ const unsigned av_unused l0 = src[-1+0*stride];\
+ const unsigned av_unused l1 = src[-1+1*stride];\
+ const unsigned av_unused l2 = src[-1+2*stride];\
+ const unsigned av_unused l3 = src[-1+3*stride];\
+
+#define LOAD_TOP_EDGE\
+ const unsigned av_unused t0 = src[ 0-1*stride];\
+ const unsigned av_unused t1 = src[ 1-1*stride];\
+ const unsigned av_unused t2 = src[ 2-1*stride];\
+ const unsigned av_unused t3 = src[ 3-1*stride];\
+
+static void FUNCC(pred4x4_down_right)(uint8_t *_src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const int lt= src[-1-1*stride];
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
+ src[0+2*stride]=
+ src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
+ src[0+1*stride]=
+ src[1+2*stride]=
+ src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
+ src[0+0*stride]=
+ src[1+1*stride]=
+ src[2+2*stride]=
+ src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
+ src[1+0*stride]=
+ src[2+1*stride]=
+ src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
+ src[2+0*stride]=
+ src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+}
+
+static void FUNCC(pred4x4_down_left)(uint8_t *_src, const uint8_t *_topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ const pixel *topright = (const pixel*)_topright;
+ int stride = _stride>>(sizeof(pixel)-1);
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+// LOAD_LEFT_EDGE
+
+ src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
+ src[1+0*stride]=
+ src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
+ src[2+0*stride]=
+ src[1+1*stride]=
+ src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
+ src[3+0*stride]=
+ src[2+1*stride]=
+ src[1+2*stride]=
+ src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+2*stride]=
+ src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
+ src[3+2*stride]=
+ src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
+ src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
+}
+
+static void FUNCC(pred4x4_vertical_right)(uint8_t *_src,
+ const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const int lt= src[-1-1*stride];
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=
+ src[1+2*stride]=(lt + t0 + 1)>>1;
+ src[1+0*stride]=
+ src[2+2*stride]=(t0 + t1 + 1)>>1;
+ src[2+0*stride]=
+ src[3+2*stride]=(t1 + t2 + 1)>>1;
+ src[3+0*stride]=(t2 + t3 + 1)>>1;
+ src[0+1*stride]=
+ src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
+ src[1+1*stride]=
+ src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
+ src[2+1*stride]=
+ src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
+ src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
+}
+
+static void FUNCC(pred4x4_vertical_left)(uint8_t *_src,
+ const uint8_t *_topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ const pixel *topright = (const pixel*)_topright;
+ int stride = _stride>>(sizeof(pixel)-1);
+ LOAD_TOP_EDGE
+ LOAD_TOP_RIGHT_EDGE
+
+ src[0+0*stride]=(t0 + t1 + 1)>>1;
+ src[1+0*stride]=
+ src[0+2*stride]=(t1 + t2 + 1)>>1;
+ src[2+0*stride]=
+ src[1+2*stride]=(t2 + t3 + 1)>>1;
+ src[3+0*stride]=
+ src[2+2*stride]=(t3 + t4+ 1)>>1;
+ src[3+2*stride]=(t4 + t5+ 1)>>1;
+ src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[1+1*stride]=
+ src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
+ src[2+1*stride]=
+ src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
+ src[3+1*stride]=
+ src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
+ src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
+}
+
+static void FUNCC(pred4x4_horizontal_up)(uint8_t *_src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=(l0 + l1 + 1)>>1;
+ src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
+ src[2+0*stride]=
+ src[0+1*stride]=(l1 + l2 + 1)>>1;
+ src[3+0*stride]=
+ src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
+ src[2+1*stride]=
+ src[0+2*stride]=(l2 + l3 + 1)>>1;
+ src[3+1*stride]=
+ src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
+ src[3+2*stride]=
+ src[1+3*stride]=
+ src[0+3*stride]=
+ src[2+2*stride]=
+ src[2+3*stride]=
+ src[3+3*stride]=l3;
+}
+
+static void FUNCC(pred4x4_horizontal_down)(uint8_t *_src,
+ const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const int lt= src[-1-1*stride];
+ LOAD_TOP_EDGE
+ LOAD_LEFT_EDGE
+
+ src[0+0*stride]=
+ src[2+1*stride]=(lt + l0 + 1)>>1;
+ src[1+0*stride]=
+ src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
+ src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
+ src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
+ src[0+1*stride]=
+ src[2+2*stride]=(l0 + l1 + 1)>>1;
+ src[1+1*stride]=
+ src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
+ src[0+2*stride]=
+ src[2+3*stride]=(l1 + l2+ 1)>>1;
+ src[1+2*stride]=
+ src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
+ src[0+3*stride]=(l2 + l3 + 1)>>1;
+ src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
+}
+
+static void FUNCC(pred16x16_vertical)(uint8_t *_src, ptrdiff_t _stride)
+{
+ int i;
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a = AV_RN4PA(((pixel4*)(src-stride))+0);
+ const pixel4 b = AV_RN4PA(((pixel4*)(src-stride))+1);
+ const pixel4 c = AV_RN4PA(((pixel4*)(src-stride))+2);
+ const pixel4 d = AV_RN4PA(((pixel4*)(src-stride))+3);
+
+ for(i=0; i<16; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, b);
+ AV_WN4PA(((pixel4*)(src+i*stride))+2, c);
+ AV_WN4PA(((pixel4*)(src+i*stride))+3, d);
+ }
+}
+
+static void FUNCC(pred16x16_horizontal)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ for(i=0; i<16; i++){
+ const pixel4 a = PIXEL_SPLAT_X4(src[-1+i*stride]);
+
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+2, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+3, a);
+ }
+}
+
+#define PREDICT_16x16_DC(v)\
+ for(i=0; i<16; i++){\
+ AV_WN4PA(src+ 0, v);\
+ AV_WN4PA(src+ 4, v);\
+ AV_WN4PA(src+ 8, v);\
+ AV_WN4PA(src+12, v);\
+ src += stride;\
+ }
+
+static void FUNCC(pred16x16_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i, dc=0;
+ pixel *src = (pixel*)_src;
+ pixel4 dcsplat;
+ stride >>= sizeof(pixel)-1;
+
+ for(i=0;i<16; i++){
+ dc+= src[-1+i*stride];
+ }
+
+ for(i=0;i<16; i++){
+ dc+= src[i-stride];
+ }
+
+ dcsplat = PIXEL_SPLAT_X4((dc+16)>>5);
+ PREDICT_16x16_DC(dcsplat);
+}
+
+static void FUNCC(pred16x16_left_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i, dc=0;
+ pixel *src = (pixel*)_src;
+ pixel4 dcsplat;
+ stride >>= sizeof(pixel)-1;
+
+ for(i=0;i<16; i++){
+ dc+= src[-1+i*stride];
+ }
+
+ dcsplat = PIXEL_SPLAT_X4((dc+8)>>4);
+ PREDICT_16x16_DC(dcsplat);
+}
+
+static void FUNCC(pred16x16_top_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i, dc=0;
+ pixel *src = (pixel*)_src;
+ pixel4 dcsplat;
+ stride >>= sizeof(pixel)-1;
+
+ for(i=0;i<16; i++){
+ dc+= src[i-stride];
+ }
+
+ dcsplat = PIXEL_SPLAT_X4((dc+8)>>4);
+ PREDICT_16x16_DC(dcsplat);
+}
+
+#define PRED16x16_X(n, v) \
+static void FUNCC(pred16x16_##n##_dc)(uint8_t *_src, ptrdiff_t stride)\
+{\
+ int i;\
+ pixel *src = (pixel*)_src;\
+ stride >>= sizeof(pixel)-1;\
+ PREDICT_16x16_DC(PIXEL_SPLAT_X4(v));\
+}
+
+PRED16x16_X(128, (1<<(BIT_DEPTH-1))+0)
+#if BIT_DEPTH == 8
+PRED16x16_X(127, (1<<(BIT_DEPTH-1))-1)
+PRED16x16_X(129, (1<<(BIT_DEPTH-1))+1)
+#endif
+
+static inline void FUNCC(pred16x16_plane_compat)(uint8_t *_src,
+ ptrdiff_t _stride,
+ const int svq3,
+ const int rv40)
+{
+ int i, j, k;
+ int a;
+ INIT_CLIP
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel * const src0 = src +7-stride;
+ const pixel * src1 = src +8*stride-1;
+ const pixel * src2 = src1-2*stride; // == src+6*stride-1;
+ int H = src0[1] - src0[-1];
+ int V = src1[0] - src2[ 0];
+ for(k=2; k<=8; ++k) {
+ src1 += stride; src2 -= stride;
+ H += k*(src0[k] - src0[-k]);
+ V += k*(src1[0] - src2[ 0]);
+ }
+ if(svq3){
+ H = ( 5*(H/4) ) / 16;
+ V = ( 5*(V/4) ) / 16;
+
+ /* required for 100% accuracy */
+ i = H; H = V; V = i;
+ }else if(rv40){
+ H = ( H + (H>>2) ) >> 4;
+ V = ( V + (V>>2) ) >> 4;
+ }else{
+ H = ( 5*H+32 ) >> 6;
+ V = ( 5*V+32 ) >> 6;
+ }
+
+ a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
+ for(j=16; j>0; --j) {
+ int b = a;
+ a += V;
+ for(i=-16; i<0; i+=4) {
+ src[16+i] = CLIP((b ) >> 5);
+ src[17+i] = CLIP((b+ H) >> 5);
+ src[18+i] = CLIP((b+2*H) >> 5);
+ src[19+i] = CLIP((b+3*H) >> 5);
+ b += 4*H;
+ }
+ src += stride;
+ }
+}
+
+static void FUNCC(pred16x16_plane)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred16x16_plane_compat)(src, stride, 0, 0);
+}
+
+static void FUNCC(pred8x8_vertical)(uint8_t *_src, ptrdiff_t _stride)
+{
+ int i;
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a= AV_RN4PA(((pixel4*)(src-stride))+0);
+ const pixel4 b= AV_RN4PA(((pixel4*)(src-stride))+1);
+
+ for(i=0; i<8; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, b);
+ }
+}
+
+static void FUNCC(pred8x16_vertical)(uint8_t *_src, ptrdiff_t _stride)
+{
+ int i;
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a= AV_RN4PA(((pixel4*)(src-stride))+0);
+ const pixel4 b= AV_RN4PA(((pixel4*)(src-stride))+1);
+
+ for(i=0; i<16; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, b);
+ }
+}
+
+static void FUNCC(pred8x8_horizontal)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ for(i=0; i<8; i++){
+ const pixel4 a = PIXEL_SPLAT_X4(src[-1+i*stride]);
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, a);
+ }
+}
+
+static void FUNCC(pred8x16_horizontal)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+ for(i=0; i<16; i++){
+ const pixel4 a = PIXEL_SPLAT_X4(src[-1+i*stride]);
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, a);
+ }
+}
+
+#define PRED8x8_X(n, v)\
+static void FUNCC(pred8x8_##n##_dc)(uint8_t *_src, ptrdiff_t stride)\
+{\
+ int i;\
+ const pixel4 a = PIXEL_SPLAT_X4(v);\
+ pixel *src = (pixel*)_src;\
+ stride >>= sizeof(pixel)-1;\
+ for(i=0; i<8; i++){\
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);\
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, a);\
+ }\
+}
+
+PRED8x8_X(128, (1<<(BIT_DEPTH-1))+0)
+#if BIT_DEPTH == 8
+PRED8x8_X(127, (1<<(BIT_DEPTH-1))-1)
+PRED8x8_X(129, (1<<(BIT_DEPTH-1))+1)
+#endif
+
+static void FUNCC(pred8x16_128_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ FUNCC(pred8x8_128_dc)(_src, stride);
+ FUNCC(pred8x8_128_dc)(_src+8*stride, stride);
+}
+
+static void FUNCC(pred8x8_left_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i;
+ int dc0, dc2;
+ pixel4 dc0splat, dc2splat;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ dc0=dc2=0;
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride];
+ dc2+= src[-1+(i+4)*stride];
+ }
+ dc0splat = PIXEL_SPLAT_X4((dc0 + 2)>>2);
+ dc2splat = PIXEL_SPLAT_X4((dc2 + 2)>>2);
+
+ for(i=0; i<4; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc0splat);
+ }
+ for(i=4; i<8; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc2splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc2splat);
+ }
+}
+
+static void FUNCC(pred8x16_left_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ FUNCC(pred8x8_left_dc)(_src, stride);
+ FUNCC(pred8x8_left_dc)(_src+8*stride, stride);
+}
+
+static void FUNCC(pred8x8_top_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i;
+ int dc0, dc1;
+ pixel4 dc0splat, dc1splat;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ dc0=dc1=0;
+ for(i=0;i<4; i++){
+ dc0+= src[i-stride];
+ dc1+= src[4+i-stride];
+ }
+ dc0splat = PIXEL_SPLAT_X4((dc0 + 2)>>2);
+ dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
+
+ for(i=0; i<4; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
+ }
+ for(i=4; i<8; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
+ }
+}
+
+static void FUNCC(pred8x16_top_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i;
+ int dc0, dc1;
+ pixel4 dc0splat, dc1splat;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ dc0=dc1=0;
+ for(i=0;i<4; i++){
+ dc0+= src[i-stride];
+ dc1+= src[4+i-stride];
+ }
+ dc0splat = PIXEL_SPLAT_X4((dc0 + 2)>>2);
+ dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
+
+ for(i=0; i<16; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
+ }
+}
+
+static void FUNCC(pred8x8_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i;
+ int dc0, dc1, dc2;
+ pixel4 dc0splat, dc1splat, dc2splat, dc3splat;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ dc0=dc1=dc2=0;
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride] + src[i-stride];
+ dc1+= src[4+i-stride];
+ dc2+= src[-1+(i+4)*stride];
+ }
+ dc0splat = PIXEL_SPLAT_X4((dc0 + 4)>>3);
+ dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
+ dc2splat = PIXEL_SPLAT_X4((dc2 + 2)>>2);
+ dc3splat = PIXEL_SPLAT_X4((dc1 + dc2 + 4)>>3);
+
+ for(i=0; i<4; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
+ }
+ for(i=4; i<8; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc2splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc3splat);
+ }
+}
+
+static void FUNCC(pred8x16_dc)(uint8_t *_src, ptrdiff_t stride)
+{
+ int i;
+ int dc0, dc1, dc2, dc3, dc4;
+ pixel4 dc0splat, dc1splat, dc2splat, dc3splat, dc4splat, dc5splat, dc6splat, dc7splat;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ dc0=dc1=dc2=dc3=dc4=0;
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride] + src[i-stride];
+ dc1+= src[4+i-stride];
+ dc2+= src[-1+(i+4)*stride];
+ dc3+= src[-1+(i+8)*stride];
+ dc4+= src[-1+(i+12)*stride];
+ }
+ dc0splat = PIXEL_SPLAT_X4((dc0 + 4)>>3);
+ dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
+ dc2splat = PIXEL_SPLAT_X4((dc2 + 2)>>2);
+ dc3splat = PIXEL_SPLAT_X4((dc1 + dc2 + 4)>>3);
+ dc4splat = PIXEL_SPLAT_X4((dc3 + 2)>>2);
+ dc5splat = PIXEL_SPLAT_X4((dc1 + dc3 + 4)>>3);
+ dc6splat = PIXEL_SPLAT_X4((dc4 + 2)>>2);
+ dc7splat = PIXEL_SPLAT_X4((dc1 + dc4 + 4)>>3);
+
+ for(i=0; i<4; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
+ }
+ for(i=4; i<8; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc2splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc3splat);
+ }
+ for(i=8; i<12; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc4splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc5splat);
+ }
+ for(i=12; i<16; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc6splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc7splat);
+ }
+}
+
+//the following 4 function should not be optimized!
+static void FUNC(pred8x8_mad_cow_dc_l0t)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred8x8_top_dc)(src, stride);
+ FUNCC(pred4x4_dc)(src, NULL, stride);
+}
+
+static void FUNC(pred8x16_mad_cow_dc_l0t)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred8x16_top_dc)(src, stride);
+ FUNCC(pred4x4_dc)(src, NULL, stride);
+}
+
+static void FUNC(pred8x8_mad_cow_dc_0lt)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred8x8_dc)(src, stride);
+ FUNCC(pred4x4_top_dc)(src, NULL, stride);
+}
+
+static void FUNC(pred8x16_mad_cow_dc_0lt)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred8x16_dc)(src, stride);
+ FUNCC(pred4x4_top_dc)(src, NULL, stride);
+}
+
+static void FUNC(pred8x8_mad_cow_dc_l00)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred8x8_left_dc)(src, stride);
+ FUNCC(pred4x4_128_dc)(src + 4*stride , NULL, stride);
+ FUNCC(pred4x4_128_dc)(src + 4*stride + 4*sizeof(pixel), NULL, stride);
+}
+
+static void FUNC(pred8x16_mad_cow_dc_l00)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred8x16_left_dc)(src, stride);
+ FUNCC(pred4x4_128_dc)(src + 4*stride , NULL, stride);
+ FUNCC(pred4x4_128_dc)(src + 4*stride + 4*sizeof(pixel), NULL, stride);
+}
+
+static void FUNC(pred8x8_mad_cow_dc_0l0)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred8x8_left_dc)(src, stride);
+ FUNCC(pred4x4_128_dc)(src , NULL, stride);
+ FUNCC(pred4x4_128_dc)(src + 4*sizeof(pixel), NULL, stride);
+}
+
+static void FUNC(pred8x16_mad_cow_dc_0l0)(uint8_t *src, ptrdiff_t stride)
+{
+ FUNCC(pred8x16_left_dc)(src, stride);
+ FUNCC(pred4x4_128_dc)(src , NULL, stride);
+ FUNCC(pred4x4_128_dc)(src + 4*sizeof(pixel), NULL, stride);
+}
+
+static void FUNCC(pred8x8_plane)(uint8_t *_src, ptrdiff_t _stride)
+{
+ int j, k;
+ int a;
+ INIT_CLIP
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel * const src0 = src +3-stride;
+ const pixel * src1 = src +4*stride-1;
+ const pixel * src2 = src1-2*stride; // == src+2*stride-1;
+ int H = src0[1] - src0[-1];
+ int V = src1[0] - src2[ 0];
+ for(k=2; k<=4; ++k) {
+ src1 += stride; src2 -= stride;
+ H += k*(src0[k] - src0[-k]);
+ V += k*(src1[0] - src2[ 0]);
+ }
+ H = ( 17*H+16 ) >> 5;
+ V = ( 17*V+16 ) >> 5;
+
+ a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
+ for(j=8; j>0; --j) {
+ int b = a;
+ a += V;
+ src[0] = CLIP((b ) >> 5);
+ src[1] = CLIP((b+ H) >> 5);
+ src[2] = CLIP((b+2*H) >> 5);
+ src[3] = CLIP((b+3*H) >> 5);
+ src[4] = CLIP((b+4*H) >> 5);
+ src[5] = CLIP((b+5*H) >> 5);
+ src[6] = CLIP((b+6*H) >> 5);
+ src[7] = CLIP((b+7*H) >> 5);
+ src += stride;
+ }
+}
+
+static void FUNCC(pred8x16_plane)(uint8_t *_src, ptrdiff_t _stride)
+{
+ int j, k;
+ int a;
+ INIT_CLIP
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel * const src0 = src +3-stride;
+ const pixel * src1 = src +8*stride-1;
+ const pixel * src2 = src1-2*stride; // == src+6*stride-1;
+ int H = src0[1] - src0[-1];
+ int V = src1[0] - src2[ 0];
+
+ for (k = 2; k <= 4; ++k) {
+ src1 += stride; src2 -= stride;
+ H += k*(src0[k] - src0[-k]);
+ V += k*(src1[0] - src2[ 0]);
+ }
+ for (; k <= 8; ++k) {
+ src1 += stride; src2 -= stride;
+ V += k*(src1[0] - src2[0]);
+ }
+
+ H = (17*H+16) >> 5;
+ V = (5*V+32) >> 6;
+
+ a = 16*(src1[0] + src2[8] + 1) - 7*V - 3*H;
+ for(j=16; j>0; --j) {
+ int b = a;
+ a += V;
+ src[0] = CLIP((b ) >> 5);
+ src[1] = CLIP((b+ H) >> 5);
+ src[2] = CLIP((b+2*H) >> 5);
+ src[3] = CLIP((b+3*H) >> 5);
+ src[4] = CLIP((b+4*H) >> 5);
+ src[5] = CLIP((b+5*H) >> 5);
+ src[6] = CLIP((b+6*H) >> 5);
+ src[7] = CLIP((b+7*H) >> 5);
+ src += stride;
+ }
+}
+
+#define SRC(x,y) src[(x)+(y)*stride]
+#define PL(y) \
+ const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
+#define PREDICT_8x8_LOAD_LEFT \
+ const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
+ + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
+ PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
+ const int l7 av_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
+
+#define PT(x) \
+ const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
+#define PREDICT_8x8_LOAD_TOP \
+ const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
+ + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
+ PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
+ const int t7 av_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
+ + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
+
+#define PTR(x) \
+ t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
+#define PREDICT_8x8_LOAD_TOPRIGHT \
+ int t8, t9, t10, t11, t12, t13, t14, t15; \
+ if(has_topright) { \
+ PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
+ t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
+ } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
+
+#define PREDICT_8x8_LOAD_TOPLEFT \
+ const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2
+
+#define PREDICT_8x8_DC(v) \
+ int y; \
+ for( y = 0; y < 8; y++ ) { \
+ AV_WN4PA(((pixel4*)src)+0, v); \
+ AV_WN4PA(((pixel4*)src)+1, v); \
+ src += stride; \
+ }
+
+static void FUNCC(pred8x8l_128_dc)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+
+ PREDICT_8x8_DC(PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1)));
+}
+static void FUNCC(pred8x8l_left_dc)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+
+ PREDICT_8x8_LOAD_LEFT;
+ const pixel4 dc = PIXEL_SPLAT_X4((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3);
+ PREDICT_8x8_DC(dc);
+}
+static void FUNCC(pred8x8l_top_dc)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+
+ PREDICT_8x8_LOAD_TOP;
+ const pixel4 dc = PIXEL_SPLAT_X4((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3);
+ PREDICT_8x8_DC(dc);
+}
+static void FUNCC(pred8x8l_dc)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+
+ PREDICT_8x8_LOAD_LEFT;
+ PREDICT_8x8_LOAD_TOP;
+ const pixel4 dc = PIXEL_SPLAT_X4((l0+l1+l2+l3+l4+l5+l6+l7
+ +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4);
+ PREDICT_8x8_DC(dc);
+}
+static void FUNCC(pred8x8l_horizontal)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ pixel4 a;
+
+ PREDICT_8x8_LOAD_LEFT;
+#define ROW(y) a = PIXEL_SPLAT_X4(l##y); \
+ AV_WN4PA(src+y*stride, a); \
+ AV_WN4PA(src+y*stride+4, a);
+ ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
+#undef ROW
+}
+static void FUNCC(pred8x8l_vertical)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ int y;
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ pixel4 a, b;
+
+ PREDICT_8x8_LOAD_TOP;
+ src[0] = t0;
+ src[1] = t1;
+ src[2] = t2;
+ src[3] = t3;
+ src[4] = t4;
+ src[5] = t5;
+ src[6] = t6;
+ src[7] = t7;
+ a = AV_RN4PA(((pixel4*)src)+0);
+ b = AV_RN4PA(((pixel4*)src)+1);
+ for( y = 1; y < 8; y++ ) {
+ AV_WN4PA(((pixel4*)(src+y*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+y*stride))+1, b);
+ }
+}
+static void FUNCC(pred8x8l_down_left)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_TOPRIGHT;
+ SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
+ SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
+ SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
+ SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
+ SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
+ SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
+ SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
+ SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
+ SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
+ SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
+ SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
+ SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
+ SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
+ SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
+ SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
+}
+static void FUNCC(pred8x8l_down_right)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_LEFT;
+ PREDICT_8x8_LOAD_TOPLEFT;
+ SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
+ SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
+ SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
+ SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
+ SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
+ SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
+ SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
+ SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
+ SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
+ SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
+ SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
+ SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
+ SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
+ SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
+ SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
+}
+static void FUNCC(pred8x8l_vertical_right)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_LEFT;
+ PREDICT_8x8_LOAD_TOPLEFT;
+ SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
+ SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
+ SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
+ SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
+ SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
+ SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
+ SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
+ SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
+ SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
+ SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
+ SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
+ SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
+ SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
+ SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
+ SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
+ SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
+ SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
+ SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
+ SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
+ SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
+ SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
+ SRC(7,0)= (t6 + t7 + 1) >> 1;
+}
+static void FUNCC(pred8x8l_horizontal_down)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_LEFT;
+ PREDICT_8x8_LOAD_TOPLEFT;
+ SRC(0,7)= (l6 + l7 + 1) >> 1;
+ SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
+ SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
+ SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
+ SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
+ SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
+ SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
+ SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
+ SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
+ SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
+ SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
+ SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
+ SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
+ SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
+ SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
+ SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
+ SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
+ SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
+ SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
+ SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
+ SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
+ SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
+}
+static void FUNCC(pred8x8l_vertical_left)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ PREDICT_8x8_LOAD_TOP;
+ PREDICT_8x8_LOAD_TOPRIGHT;
+ SRC(0,0)= (t0 + t1 + 1) >> 1;
+ SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
+ SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
+ SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
+ SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
+ SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
+ SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
+ SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
+ SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
+ SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
+ SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
+ SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
+ SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
+ SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
+ SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
+ SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
+ SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
+ SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
+ SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
+ SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
+ SRC(7,6)= (t10 + t11 + 1) >> 1;
+ SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
+}
+static void FUNCC(pred8x8l_horizontal_up)(uint8_t *_src, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ PREDICT_8x8_LOAD_LEFT;
+ SRC(0,0)= (l0 + l1 + 1) >> 1;
+ SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
+ SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
+ SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
+ SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
+ SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
+ SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
+ SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
+ SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
+ SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
+ SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
+ SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
+ SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
+ SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
+ SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
+ SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
+ SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
+ SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
+}
+
+static void FUNCC(pred8x8l_vertical_filter_add)(uint8_t *_src, int16_t *_block, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ int i;
+ pixel *src = (pixel*)_src;
+ const dctcoef *block = (const dctcoef*)_block;
+ pixel pix[8];
+ int stride = _stride>>(sizeof(pixel)-1);
+ PREDICT_8x8_LOAD_TOP;
+
+ pix[0] = t0;
+ pix[1] = t1;
+ pix[2] = t2;
+ pix[3] = t3;
+ pix[4] = t4;
+ pix[5] = t5;
+ pix[6] = t6;
+ pix[7] = t7;
+
+ for(i=0; i<8; i++){
+ pixel v = pix[i];
+ src[0*stride]= v += block[0];
+ src[1*stride]= v += block[8];
+ src[2*stride]= v += block[16];
+ src[3*stride]= v += block[24];
+ src[4*stride]= v += block[32];
+ src[5*stride]= v += block[40];
+ src[6*stride]= v += block[48];
+ src[7*stride]= v + block[56];
+ src++;
+ block++;
+ }
+
+ memset(_block, 0, sizeof(dctcoef) * 64);
+}
+
+static void FUNCC(pred8x8l_horizontal_filter_add)(uint8_t *_src, int16_t *_block, int has_topleft,
+ int has_topright, ptrdiff_t _stride)
+{
+ int i;
+ pixel *src = (pixel*)_src;
+ const dctcoef *block = (const dctcoef*)_block;
+ pixel pix[8];
+ int stride = _stride>>(sizeof(pixel)-1);
+ PREDICT_8x8_LOAD_LEFT;
+
+ pix[0] = l0;
+ pix[1] = l1;
+ pix[2] = l2;
+ pix[3] = l3;
+ pix[4] = l4;
+ pix[5] = l5;
+ pix[6] = l6;
+ pix[7] = l7;
+
+ for(i=0; i<8; i++){
+ pixel v = pix[i];
+ src[0]= v += block[0];
+ src[1]= v += block[1];
+ src[2]= v += block[2];
+ src[3]= v += block[3];
+ src[4]= v += block[4];
+ src[5]= v += block[5];
+ src[6]= v += block[6];
+ src[7]= v + block[7];
+ src+= stride;
+ block+= 8;
+ }
+
+ memset(_block, 0, sizeof(dctcoef) * 64);
+}
+
+#undef PREDICT_8x8_LOAD_LEFT
+#undef PREDICT_8x8_LOAD_TOP
+#undef PREDICT_8x8_LOAD_TOPLEFT
+#undef PREDICT_8x8_LOAD_TOPRIGHT
+#undef PREDICT_8x8_DC
+#undef PTR
+#undef PT
+#undef PL
+#undef SRC
+
+static void FUNCC(pred4x4_vertical_add)(uint8_t *_pix, int16_t *_block,
+ ptrdiff_t stride)
+{
+ int i;
+ pixel *pix = (pixel*)_pix;
+ const dctcoef *block = (const dctcoef*)_block;
+ stride >>= sizeof(pixel)-1;
+ pix -= stride;
+ for(i=0; i<4; i++){
+ pixel v = pix[0];
+ pix[1*stride]= v += block[0];
+ pix[2*stride]= v += block[4];
+ pix[3*stride]= v += block[8];
+ pix[4*stride]= v + block[12];
+ pix++;
+ block++;
+ }
+
+ memset(_block, 0, sizeof(dctcoef) * 16);
+}
+
+static void FUNCC(pred4x4_horizontal_add)(uint8_t *_pix, int16_t *_block,
+ ptrdiff_t stride)
+{
+ int i;
+ pixel *pix = (pixel*)_pix;
+ const dctcoef *block = (const dctcoef*)_block;
+ stride >>= sizeof(pixel)-1;
+ for(i=0; i<4; i++){
+ pixel v = pix[-1];
+ pix[0]= v += block[0];
+ pix[1]= v += block[1];
+ pix[2]= v += block[2];
+ pix[3]= v + block[3];
+ pix+= stride;
+ block+= 4;
+ }
+
+ memset(_block, 0, sizeof(dctcoef) * 16);
+}
+
+static void FUNCC(pred8x8l_vertical_add)(uint8_t *_pix, int16_t *_block,
+ ptrdiff_t stride)
+{
+ int i;
+ pixel *pix = (pixel*)_pix;
+ const dctcoef *block = (const dctcoef*)_block;
+ stride >>= sizeof(pixel)-1;
+ pix -= stride;
+ for(i=0; i<8; i++){
+ pixel v = pix[0];
+ pix[1*stride]= v += block[0];
+ pix[2*stride]= v += block[8];
+ pix[3*stride]= v += block[16];
+ pix[4*stride]= v += block[24];
+ pix[5*stride]= v += block[32];
+ pix[6*stride]= v += block[40];
+ pix[7*stride]= v += block[48];
+ pix[8*stride]= v + block[56];
+ pix++;
+ block++;
+ }
+
+ memset(_block, 0, sizeof(dctcoef) * 64);
+}
+
+static void FUNCC(pred8x8l_horizontal_add)(uint8_t *_pix, int16_t *_block,
+ ptrdiff_t stride)
+{
+ int i;
+ pixel *pix = (pixel*)_pix;
+ const dctcoef *block = (const dctcoef*)_block;
+ stride >>= sizeof(pixel)-1;
+ for(i=0; i<8; i++){
+ pixel v = pix[-1];
+ pix[0]= v += block[0];
+ pix[1]= v += block[1];
+ pix[2]= v += block[2];
+ pix[3]= v += block[3];
+ pix[4]= v += block[4];
+ pix[5]= v += block[5];
+ pix[6]= v += block[6];
+ pix[7]= v + block[7];
+ pix+= stride;
+ block+= 8;
+ }
+
+ memset(_block, 0, sizeof(dctcoef) * 64);
+}
+
+static void FUNCC(pred16x16_vertical_add)(uint8_t *pix, const int *block_offset,
+ int16_t *block,
+ ptrdiff_t stride)
+{
+ int i;
+ for(i=0; i<16; i++)
+ FUNCC(pred4x4_vertical_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
+}
+
+static void FUNCC(pred16x16_horizontal_add)(uint8_t *pix,
+ const int *block_offset,
+ int16_t *block,
+ ptrdiff_t stride)
+{
+ int i;
+ for(i=0; i<16; i++)
+ FUNCC(pred4x4_horizontal_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
+}
+
+static void FUNCC(pred8x8_vertical_add)(uint8_t *pix, const int *block_offset,
+ int16_t *block, ptrdiff_t stride)
+{
+ int i;
+ for(i=0; i<4; i++)
+ FUNCC(pred4x4_vertical_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
+}
+
+static void FUNCC(pred8x16_vertical_add)(uint8_t *pix, const int *block_offset,
+ int16_t *block, ptrdiff_t stride)
+{
+ int i;
+ for(i=0; i<4; i++)
+ FUNCC(pred4x4_vertical_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
+ for(i=4; i<8; i++)
+ FUNCC(pred4x4_vertical_add)(pix + block_offset[i+4], block + i*16*sizeof(pixel), stride);
+}
+
+static void FUNCC(pred8x8_horizontal_add)(uint8_t *pix, const int *block_offset,
+ int16_t *block,
+ ptrdiff_t stride)
+{
+ int i;
+ for(i=0; i<4; i++)
+ FUNCC(pred4x4_horizontal_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
+}
+
+static void FUNCC(pred8x16_horizontal_add)(uint8_t *pix,
+ const int *block_offset,
+ int16_t *block, ptrdiff_t stride)
+{
+ int i;
+ for(i=0; i<4; i++)
+ FUNCC(pred4x4_horizontal_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
+ for(i=4; i<8; i++)
+ FUNCC(pred4x4_horizontal_add)(pix + block_offset[i+4], block + i*16*sizeof(pixel), stride);
+}
diff --git a/media/ffvpx/libavcodec/hpeldsp.h b/media/ffvpx/libavcodec/hpeldsp.h
new file mode 100644
index 0000000000..45e81b10a5
--- /dev/null
+++ b/media/ffvpx/libavcodec/hpeldsp.h
@@ -0,0 +1,107 @@
+/*
+ * Half-pel DSP functions.
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Half-pel DSP functions.
+ */
+
+#ifndef AVCODEC_HPELDSP_H
+#define AVCODEC_HPELDSP_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+/* add and put pixel (decoding) */
+// blocksizes for hpel_pixels_func are 8x4,8x8 16x8 16x16
+// h for hpel_pixels_func is limited to {width/2, width} but never larger
+// than 16 and never smaller than 4
+typedef void (*op_pixels_func)(uint8_t *block /*align width (8 or 16)*/,
+ const uint8_t *pixels /*align 1*/,
+ ptrdiff_t line_size, int h);
+
+/**
+ * Half-pel DSP context.
+ */
+typedef struct HpelDSPContext {
+ /**
+ * Halfpel motion compensation with rounding (a+b+1)>>1.
+ * this is an array[4][4] of motion compensation functions for 4
+ * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
+ * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
+ * @param block destination where the result is stored
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ */
+ op_pixels_func put_pixels_tab[4][4];
+
+ /**
+ * Halfpel motion compensation with rounding (a+b+1)>>1.
+ * This is an array[4][4] of motion compensation functions for 4
+ * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
+ * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
+ * @param block destination into which the result is averaged (a+b+1)>>1
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ */
+ op_pixels_func avg_pixels_tab[4][4];
+
+ /**
+ * Halfpel motion compensation with no rounding (a+b)>>1.
+ * this is an array[4][4] of motion compensation functions for 2
+ * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
+ * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
+ * @param block destination where the result is stored
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ * @note The size is kept at [4][4] to match the above pixel_tabs and avoid
+ * out of bounds reads in the motion estimation code.
+ */
+ op_pixels_func put_no_rnd_pixels_tab[4][4];
+
+ /**
+ * Halfpel motion compensation with no rounding (a+b)>>1.
+ * this is an array[4] of motion compensation functions for 1
+ * horizontal blocksize (16) and the 4 halfpel positions<br>
+ * *pixels_tab[0][ xhalfpel + 2*yhalfpel ]
+ * @param block destination into which the result is averaged (a+b)>>1
+ * @param pixels source
+ * @param line_size number of bytes in a horizontal line of block
+ * @param h height
+ */
+ op_pixels_func avg_no_rnd_pixels_tab[4];
+} HpelDSPContext;
+
+void ff_hpeldsp_init(HpelDSPContext *c, int flags);
+
+void ff_hpeldsp_init_aarch64(HpelDSPContext *c, int flags);
+void ff_hpeldsp_init_alpha(HpelDSPContext *c, int flags);
+void ff_hpeldsp_init_arm(HpelDSPContext *c, int flags);
+void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags);
+void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags);
+void ff_hpeldsp_init_mips(HpelDSPContext *c, int flags);
+void ff_hpeldsp_init_loongarch(HpelDSPContext *c, int flags);
+
+#endif /* AVCODEC_HPELDSP_H */
diff --git a/media/ffvpx/libavcodec/hwaccel.h b/media/ffvpx/libavcodec/hwaccel.h
new file mode 100644
index 0000000000..3aaa92571c
--- /dev/null
+++ b/media/ffvpx/libavcodec/hwaccel.h
@@ -0,0 +1,84 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_HWACCEL_H
+#define AVCODEC_HWACCEL_H
+
+#include "avcodec.h"
+#include "hwaccels.h"
+
+
+#define HWACCEL_CAP_ASYNC_SAFE (1 << 0)
+
+
+typedef struct AVCodecHWConfigInternal {
+ /**
+ * This is the structure which will be returned to the user by
+ * avcodec_get_hw_config().
+ */
+ AVCodecHWConfig public;
+ /**
+ * If this configuration uses a hwaccel, a pointer to it.
+ * If not, NULL.
+ */
+ const AVHWAccel *hwaccel;
+} AVCodecHWConfigInternal;
+
+
+// These macros are used to simplify AVCodecHWConfigInternal definitions.
+
+#define HW_CONFIG_HWACCEL(device, frames, ad_hoc, format, device_type_, name) \
+ &(const AVCodecHWConfigInternal) { \
+ .public = { \
+ .pix_fmt = AV_PIX_FMT_ ## format, \
+ .methods = (device ? AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX : 0) | \
+ (frames ? AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX : 0) | \
+ (ad_hoc ? AV_CODEC_HW_CONFIG_METHOD_AD_HOC : 0), \
+ .device_type = AV_HWDEVICE_TYPE_ ## device_type_, \
+ }, \
+ .hwaccel = &name, \
+ }
+
+#define HW_CONFIG_INTERNAL(format) \
+ &(const AVCodecHWConfigInternal) { \
+ .public = { \
+ .pix_fmt = AV_PIX_FMT_ ## format, \
+ .methods = AV_CODEC_HW_CONFIG_METHOD_INTERNAL, \
+ .device_type = AV_HWDEVICE_TYPE_NONE, \
+ }, \
+ .hwaccel = NULL, \
+ }
+
+#define HWACCEL_DXVA2(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 1, DXVA2_VLD, DXVA2, ff_ ## codec ## _dxva2_hwaccel)
+#define HWACCEL_D3D11VA2(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 0, D3D11, D3D11VA, ff_ ## codec ## _d3d11va2_hwaccel)
+#define HWACCEL_NVDEC(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 0, CUDA, CUDA, ff_ ## codec ## _nvdec_hwaccel)
+#define HWACCEL_VAAPI(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 1, VAAPI, VAAPI, ff_ ## codec ## _vaapi_hwaccel)
+#define HWACCEL_VDPAU(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 1, VDPAU, VDPAU, ff_ ## codec ## _vdpau_hwaccel)
+#define HWACCEL_VIDEOTOOLBOX(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 1, VIDEOTOOLBOX, VIDEOTOOLBOX, ff_ ## codec ## _videotoolbox_hwaccel)
+#define HWACCEL_D3D11VA(codec) \
+ HW_CONFIG_HWACCEL(0, 0, 1, D3D11VA_VLD, NONE, ff_ ## codec ## _d3d11va_hwaccel)
+#define HWACCEL_XVMC(codec) \
+ HW_CONFIG_HWACCEL(0, 0, 1, XVMC, NONE, ff_ ## codec ## _xvmc_hwaccel)
+
+#endif /* AVCODEC_HWACCEL_H */
diff --git a/media/ffvpx/libavcodec/hwaccels.h b/media/ffvpx/libavcodec/hwaccels.h
new file mode 100644
index 0000000000..aca55831f3
--- /dev/null
+++ b/media/ffvpx/libavcodec/hwaccels.h
@@ -0,0 +1,85 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_HWACCELS_H
+#define AVCODEC_HWACCELS_H
+
+#include "avcodec.h"
+
+extern const AVHWAccel ff_av1_d3d11va_hwaccel;
+extern const AVHWAccel ff_av1_d3d11va2_hwaccel;
+extern const AVHWAccel ff_av1_dxva2_hwaccel;
+extern const AVHWAccel ff_av1_nvdec_hwaccel;
+extern const AVHWAccel ff_av1_vaapi_hwaccel;
+extern const AVHWAccel ff_av1_vdpau_hwaccel;
+extern const AVHWAccel ff_h263_vaapi_hwaccel;
+extern const AVHWAccel ff_h263_videotoolbox_hwaccel;
+extern const AVHWAccel ff_h264_d3d11va_hwaccel;
+extern const AVHWAccel ff_h264_d3d11va2_hwaccel;
+extern const AVHWAccel ff_h264_dxva2_hwaccel;
+extern const AVHWAccel ff_h264_nvdec_hwaccel;
+extern const AVHWAccel ff_h264_vaapi_hwaccel;
+extern const AVHWAccel ff_h264_vdpau_hwaccel;
+extern const AVHWAccel ff_h264_videotoolbox_hwaccel;
+extern const AVHWAccel ff_hevc_d3d11va_hwaccel;
+extern const AVHWAccel ff_hevc_d3d11va2_hwaccel;
+extern const AVHWAccel ff_hevc_dxva2_hwaccel;
+extern const AVHWAccel ff_hevc_nvdec_hwaccel;
+extern const AVHWAccel ff_hevc_vaapi_hwaccel;
+extern const AVHWAccel ff_hevc_vdpau_hwaccel;
+extern const AVHWAccel ff_hevc_videotoolbox_hwaccel;
+extern const AVHWAccel ff_mjpeg_nvdec_hwaccel;
+extern const AVHWAccel ff_mjpeg_vaapi_hwaccel;
+extern const AVHWAccel ff_mpeg1_nvdec_hwaccel;
+extern const AVHWAccel ff_mpeg1_vdpau_hwaccel;
+extern const AVHWAccel ff_mpeg1_videotoolbox_hwaccel;
+extern const AVHWAccel ff_mpeg2_d3d11va_hwaccel;
+extern const AVHWAccel ff_mpeg2_d3d11va2_hwaccel;
+extern const AVHWAccel ff_mpeg2_nvdec_hwaccel;
+extern const AVHWAccel ff_mpeg2_dxva2_hwaccel;
+extern const AVHWAccel ff_mpeg2_vaapi_hwaccel;
+extern const AVHWAccel ff_mpeg2_vdpau_hwaccel;
+extern const AVHWAccel ff_mpeg2_videotoolbox_hwaccel;
+extern const AVHWAccel ff_mpeg4_nvdec_hwaccel;
+extern const AVHWAccel ff_mpeg4_vaapi_hwaccel;
+extern const AVHWAccel ff_mpeg4_vdpau_hwaccel;
+extern const AVHWAccel ff_mpeg4_videotoolbox_hwaccel;
+extern const AVHWAccel ff_prores_videotoolbox_hwaccel;
+extern const AVHWAccel ff_vc1_d3d11va_hwaccel;
+extern const AVHWAccel ff_vc1_d3d11va2_hwaccel;
+extern const AVHWAccel ff_vc1_dxva2_hwaccel;
+extern const AVHWAccel ff_vc1_nvdec_hwaccel;
+extern const AVHWAccel ff_vc1_vaapi_hwaccel;
+extern const AVHWAccel ff_vc1_vdpau_hwaccel;
+extern const AVHWAccel ff_vp8_nvdec_hwaccel;
+extern const AVHWAccel ff_vp8_vaapi_hwaccel;
+extern const AVHWAccel ff_vp9_d3d11va_hwaccel;
+extern const AVHWAccel ff_vp9_d3d11va2_hwaccel;
+extern const AVHWAccel ff_vp9_dxva2_hwaccel;
+extern const AVHWAccel ff_vp9_nvdec_hwaccel;
+extern const AVHWAccel ff_vp9_vaapi_hwaccel;
+extern const AVHWAccel ff_vp9_vdpau_hwaccel;
+extern const AVHWAccel ff_vp9_videotoolbox_hwaccel;
+extern const AVHWAccel ff_wmv3_d3d11va_hwaccel;
+extern const AVHWAccel ff_wmv3_d3d11va2_hwaccel;
+extern const AVHWAccel ff_wmv3_dxva2_hwaccel;
+extern const AVHWAccel ff_wmv3_nvdec_hwaccel;
+extern const AVHWAccel ff_wmv3_vaapi_hwaccel;
+extern const AVHWAccel ff_wmv3_vdpau_hwaccel;
+
+#endif /* AVCODEC_HWACCELS_H */
diff --git a/media/ffvpx/libavcodec/hwconfig.h b/media/ffvpx/libavcodec/hwconfig.h
new file mode 100644
index 0000000000..721424912c
--- /dev/null
+++ b/media/ffvpx/libavcodec/hwconfig.h
@@ -0,0 +1,100 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_HWCONFIG_H
+#define AVCODEC_HWCONFIG_H
+
+#include "avcodec.h"
+#include "hwaccels.h"
+
+
+#define HWACCEL_CAP_ASYNC_SAFE (1 << 0)
+
+
+typedef struct AVCodecHWConfigInternal {
+ /**
+ * This is the structure which will be returned to the user by
+ * avcodec_get_hw_config().
+ */
+ AVCodecHWConfig public;
+ /**
+ * If this configuration uses a hwaccel, a pointer to it.
+ * If not, NULL.
+ */
+ const AVHWAccel *hwaccel;
+} AVCodecHWConfigInternal;
+
+
+// These macros are used to simplify AVCodecHWConfigInternal definitions.
+
+#define HW_CONFIG_HWACCEL(device, frames, ad_hoc, format, device_type_, name) \
+ &(const AVCodecHWConfigInternal) { \
+ .public = { \
+ .pix_fmt = AV_PIX_FMT_ ## format, \
+ .methods = (device ? AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX : 0) | \
+ (frames ? AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX : 0) | \
+ (ad_hoc ? AV_CODEC_HW_CONFIG_METHOD_AD_HOC : 0), \
+ .device_type = AV_HWDEVICE_TYPE_ ## device_type_, \
+ }, \
+ .hwaccel = &name, \
+ }
+
+#define HW_CONFIG_INTERNAL(format) \
+ &(const AVCodecHWConfigInternal) { \
+ .public = { \
+ .pix_fmt = AV_PIX_FMT_ ## format, \
+ .methods = AV_CODEC_HW_CONFIG_METHOD_INTERNAL, \
+ .device_type = AV_HWDEVICE_TYPE_NONE, \
+ }, \
+ .hwaccel = NULL, \
+ }
+
+#define HWACCEL_DXVA2(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 1, DXVA2_VLD, DXVA2, ff_ ## codec ## _dxva2_hwaccel)
+#define HWACCEL_D3D11VA2(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 0, D3D11, D3D11VA, ff_ ## codec ## _d3d11va2_hwaccel)
+#define HWACCEL_NVDEC(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 0, CUDA, CUDA, ff_ ## codec ## _nvdec_hwaccel)
+#define HWACCEL_VAAPI(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 1, VAAPI, VAAPI, ff_ ## codec ## _vaapi_hwaccel)
+#define HWACCEL_VDPAU(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 1, VDPAU, VDPAU, ff_ ## codec ## _vdpau_hwaccel)
+#define HWACCEL_VIDEOTOOLBOX(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 1, VIDEOTOOLBOX, VIDEOTOOLBOX, ff_ ## codec ## _videotoolbox_hwaccel)
+#define HWACCEL_D3D11VA(codec) \
+ HW_CONFIG_HWACCEL(0, 0, 1, D3D11VA_VLD, NONE, ff_ ## codec ## _d3d11va_hwaccel)
+
+#define HW_CONFIG_ENCODER(device, frames, ad_hoc, format, device_type_) \
+ &(const AVCodecHWConfigInternal) { \
+ .public = { \
+ .pix_fmt = AV_PIX_FMT_ ## format, \
+ .methods = (device ? AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX : 0) | \
+ (frames ? AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX : 0) | \
+ (ad_hoc ? AV_CODEC_HW_CONFIG_METHOD_AD_HOC : 0), \
+ .device_type = AV_HWDEVICE_TYPE_ ## device_type_, \
+ }, \
+ .hwaccel = NULL, \
+ }
+
+#define HW_CONFIG_ENCODER_DEVICE(format, device_type_) \
+ HW_CONFIG_ENCODER(1, 0, 0, format, device_type_)
+
+#define HW_CONFIG_ENCODER_FRAMES(format, device_type_) \
+ HW_CONFIG_ENCODER(0, 1, 0, format, device_type_)
+
+#endif /* AVCODEC_HWCONFIG_H */
diff --git a/media/ffvpx/libavcodec/idctdsp.c b/media/ffvpx/libavcodec/idctdsp.c
new file mode 100644
index 0000000000..7216afb094
--- /dev/null
+++ b/media/ffvpx/libavcodec/idctdsp.c
@@ -0,0 +1,315 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "config_components.h"
+#include "libavutil/attributes.h"
+#include "libavutil/common.h"
+#include "avcodec.h"
+#include "dct.h"
+#include "faanidct.h"
+#include "idctdsp.h"
+#include "simple_idct.h"
+#include "xvididct.h"
+
+av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64],
+ const uint8_t permutation[64])
+{
+ for (int i = 0; i < 64; i++) {
+ int j = src[i];
+ dst[i] = permutation[j];
+ }
+}
+
+av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
+ enum idct_permutation_type perm_type)
+{
+ int i;
+
+#if ARCH_X86
+ if (ff_init_scantable_permutation_x86(idct_permutation,
+ perm_type))
+ return;
+#endif
+
+ switch (perm_type) {
+ case FF_IDCT_PERM_NONE:
+ for (i = 0; i < 64; i++)
+ idct_permutation[i] = i;
+ break;
+ case FF_IDCT_PERM_LIBMPEG2:
+ for (i = 0; i < 64; i++)
+ idct_permutation[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
+ break;
+ case FF_IDCT_PERM_TRANSPOSE:
+ for (i = 0; i < 64; i++)
+ idct_permutation[i] = ((i & 7) << 3) | (i >> 3);
+ break;
+ case FF_IDCT_PERM_PARTTRANS:
+ for (i = 0; i < 64; i++)
+ idct_permutation[i] = (i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3);
+ break;
+ default:
+ av_log(NULL, AV_LOG_ERROR,
+ "Internal error, IDCT permutation not set\n");
+ }
+}
+
+void ff_put_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
+ ptrdiff_t line_size)
+{
+ int i;
+
+ /* read the pixels */
+ for (i = 0; i < 8; i++) {
+ pixels[0] = av_clip_uint8(block[0]);
+ pixels[1] = av_clip_uint8(block[1]);
+ pixels[2] = av_clip_uint8(block[2]);
+ pixels[3] = av_clip_uint8(block[3]);
+ pixels[4] = av_clip_uint8(block[4]);
+ pixels[5] = av_clip_uint8(block[5]);
+ pixels[6] = av_clip_uint8(block[6]);
+ pixels[7] = av_clip_uint8(block[7]);
+
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void put_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
+ int line_size)
+{
+ int i;
+
+ /* read the pixels */
+ for(i=0;i<4;i++) {
+ pixels[0] = av_clip_uint8(block[0]);
+ pixels[1] = av_clip_uint8(block[1]);
+ pixels[2] = av_clip_uint8(block[2]);
+ pixels[3] = av_clip_uint8(block[3]);
+
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void put_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
+ int line_size)
+{
+ int i;
+
+ /* read the pixels */
+ for(i=0;i<2;i++) {
+ pixels[0] = av_clip_uint8(block[0]);
+ pixels[1] = av_clip_uint8(block[1]);
+
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void put_signed_pixels_clamped_c(const int16_t *block,
+ uint8_t *av_restrict pixels,
+ ptrdiff_t line_size)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++) {
+ if (*block < -128)
+ *pixels = 0;
+ else if (*block > 127)
+ *pixels = 255;
+ else
+ *pixels = (uint8_t) (*block + 128);
+ block++;
+ pixels++;
+ }
+ pixels += (line_size - 8);
+ }
+}
+
+void ff_add_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
+ ptrdiff_t line_size)
+{
+ int i;
+
+ /* read the pixels */
+ for (i = 0; i < 8; i++) {
+ pixels[0] = av_clip_uint8(pixels[0] + block[0]);
+ pixels[1] = av_clip_uint8(pixels[1] + block[1]);
+ pixels[2] = av_clip_uint8(pixels[2] + block[2]);
+ pixels[3] = av_clip_uint8(pixels[3] + block[3]);
+ pixels[4] = av_clip_uint8(pixels[4] + block[4]);
+ pixels[5] = av_clip_uint8(pixels[5] + block[5]);
+ pixels[6] = av_clip_uint8(pixels[6] + block[6]);
+ pixels[7] = av_clip_uint8(pixels[7] + block[7]);
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void add_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
+ int line_size)
+{
+ int i;
+
+ /* read the pixels */
+ for(i=0;i<4;i++) {
+ pixels[0] = av_clip_uint8(pixels[0] + block[0]);
+ pixels[1] = av_clip_uint8(pixels[1] + block[1]);
+ pixels[2] = av_clip_uint8(pixels[2] + block[2]);
+ pixels[3] = av_clip_uint8(pixels[3] + block[3]);
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void add_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
+ int line_size)
+{
+ int i;
+
+ /* read the pixels */
+ for(i=0;i<2;i++) {
+ pixels[0] = av_clip_uint8(pixels[0] + block[0]);
+ pixels[1] = av_clip_uint8(pixels[1] + block[1]);
+ pixels += line_size;
+ block += 8;
+ }
+}
+
+static void ff_jref_idct4_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ ff_j_rev_dct4 (block);
+ put_pixels_clamped4_c(block, dest, line_size);
+}
+static void ff_jref_idct4_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ ff_j_rev_dct4 (block);
+ add_pixels_clamped4_c(block, dest, line_size);
+}
+
+static void ff_jref_idct2_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ ff_j_rev_dct2 (block);
+ put_pixels_clamped2_c(block, dest, line_size);
+}
+static void ff_jref_idct2_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ ff_j_rev_dct2 (block);
+ add_pixels_clamped2_c(block, dest, line_size);
+}
+
+static void ff_jref_idct1_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ dest[0] = av_clip_uint8((block[0] + 4)>>3);
+}
+static void ff_jref_idct1_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
+}
+
+av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
+{
+ av_unused const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
+
+ if (avctx->lowres==1) {
+ c->idct_put = ff_jref_idct4_put;
+ c->idct_add = ff_jref_idct4_add;
+ c->idct = ff_j_rev_dct4;
+ c->perm_type = FF_IDCT_PERM_NONE;
+ } else if (avctx->lowres==2) {
+ c->idct_put = ff_jref_idct2_put;
+ c->idct_add = ff_jref_idct2_add;
+ c->idct = ff_j_rev_dct2;
+ c->perm_type = FF_IDCT_PERM_NONE;
+ } else if (avctx->lowres==3) {
+ c->idct_put = ff_jref_idct1_put;
+ c->idct_add = ff_jref_idct1_add;
+ c->idct = ff_j_rev_dct1;
+ c->perm_type = FF_IDCT_PERM_NONE;
+ } else {
+ if (avctx->bits_per_raw_sample == 10 || avctx->bits_per_raw_sample == 9) {
+ /* 10-bit MPEG-4 Simple Studio Profile requires a higher precision IDCT
+ However, it only uses idct_put */
+ if (c->mpeg4_studio_profile) {
+ c->idct_put = ff_simple_idct_put_int32_10bit;
+ c->idct_add = NULL;
+ c->idct = NULL;
+ } else {
+ c->idct_put = ff_simple_idct_put_int16_10bit;
+ c->idct_add = ff_simple_idct_add_int16_10bit;
+ c->idct = ff_simple_idct_int16_10bit;
+ }
+ c->perm_type = FF_IDCT_PERM_NONE;
+ } else if (avctx->bits_per_raw_sample == 12) {
+ c->idct_put = ff_simple_idct_put_int16_12bit;
+ c->idct_add = ff_simple_idct_add_int16_12bit;
+ c->idct = ff_simple_idct_int16_12bit;
+ c->perm_type = FF_IDCT_PERM_NONE;
+ } else {
+ if (avctx->idct_algo == FF_IDCT_INT) {
+ c->idct_put = ff_jref_idct_put;
+ c->idct_add = ff_jref_idct_add;
+ c->idct = ff_j_rev_dct;
+ c->perm_type = FF_IDCT_PERM_LIBMPEG2;
+#if CONFIG_FAANIDCT
+ } else if (avctx->idct_algo == FF_IDCT_FAAN) {
+ c->idct_put = ff_faanidct_put;
+ c->idct_add = ff_faanidct_add;
+ c->idct = ff_faanidct;
+ c->perm_type = FF_IDCT_PERM_NONE;
+#endif /* CONFIG_FAANIDCT */
+ } else { // accurate/default
+ c->idct_put = ff_simple_idct_put_int16_8bit;
+ c->idct_add = ff_simple_idct_add_int16_8bit;
+ c->idct = ff_simple_idct_int16_8bit;
+ c->perm_type = FF_IDCT_PERM_NONE;
+ }
+ }
+ }
+
+ c->put_pixels_clamped = ff_put_pixels_clamped_c;
+ c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
+ c->add_pixels_clamped = ff_add_pixels_clamped_c;
+
+ if (CONFIG_MPEG4_DECODER && avctx->idct_algo == FF_IDCT_XVID)
+ ff_xvid_idct_init(c, avctx);
+
+#if ARCH_AARCH64
+ ff_idctdsp_init_aarch64(c, avctx, high_bit_depth);
+#elif ARCH_ALPHA
+ ff_idctdsp_init_alpha(c, avctx, high_bit_depth);
+#elif ARCH_ARM
+ ff_idctdsp_init_arm(c, avctx, high_bit_depth);
+#elif ARCH_PPC
+ ff_idctdsp_init_ppc(c, avctx, high_bit_depth);
+#elif ARCH_RISCV
+ ff_idctdsp_init_riscv(c, avctx, high_bit_depth);
+#elif ARCH_X86
+ ff_idctdsp_init_x86(c, avctx, high_bit_depth);
+#elif ARCH_MIPS
+ ff_idctdsp_init_mips(c, avctx, high_bit_depth);
+#elif ARCH_LOONGARCH
+ ff_idctdsp_init_loongarch(c, avctx, high_bit_depth);
+#endif
+
+ ff_init_scantable_permutation(c->idct_permutation,
+ c->perm_type);
+}
diff --git a/media/ffvpx/libavcodec/idctdsp.h b/media/ffvpx/libavcodec/idctdsp.h
new file mode 100644
index 0000000000..7224463349
--- /dev/null
+++ b/media/ffvpx/libavcodec/idctdsp.h
@@ -0,0 +1,117 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_IDCTDSP_H
+#define AVCODEC_IDCTDSP_H
+
+#include <stdint.h>
+
+#include "config.h"
+
+#include "avcodec.h"
+
+enum idct_permutation_type {
+ FF_IDCT_PERM_NONE,
+ FF_IDCT_PERM_LIBMPEG2,
+ FF_IDCT_PERM_SIMPLE,
+ FF_IDCT_PERM_TRANSPOSE,
+ FF_IDCT_PERM_PARTTRANS,
+ FF_IDCT_PERM_SSE2,
+};
+
+void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64],
+ const uint8_t permutation[64]);
+void ff_init_scantable_permutation(uint8_t *idct_permutation,
+ enum idct_permutation_type perm_type);
+int ff_init_scantable_permutation_x86(uint8_t *idct_permutation,
+ enum idct_permutation_type perm_type);
+
+typedef struct IDCTDSPContext {
+ /* pixel ops : interface with DCT */
+ void (*put_pixels_clamped)(const int16_t *block /* align 16 */,
+ uint8_t *av_restrict pixels /* align 8 */,
+ ptrdiff_t line_size);
+ void (*put_signed_pixels_clamped)(const int16_t *block /* align 16 */,
+ uint8_t *av_restrict pixels /* align 8 */,
+ ptrdiff_t line_size);
+ void (*add_pixels_clamped)(const int16_t *block /* align 16 */,
+ uint8_t *av_restrict pixels /* align 8 */,
+ ptrdiff_t line_size);
+
+ void (*idct)(int16_t *block /* align 16 */);
+
+ /**
+ * block -> idct -> clip to unsigned 8 bit -> dest.
+ * (-1392, 0, 0, ...) -> idct -> (-174, -174, ...) -> put -> (0, 0, ...)
+ * @param line_size size in bytes of a horizontal line of dest
+ */
+ void (*idct_put)(uint8_t *dest /* align 8 */,
+ ptrdiff_t line_size, int16_t *block /* align 16 */);
+
+ /**
+ * block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
+ * @param line_size size in bytes of a horizontal line of dest
+ */
+ void (*idct_add)(uint8_t *dest /* align 8 */,
+ ptrdiff_t line_size, int16_t *block /* align 16 */);
+
+ /**
+ * IDCT input permutation.
+ * Several optimized IDCTs need a permutated input (relative to the
+ * normal order of the reference IDCT).
+ * This permutation must be performed before the idct_put/add.
+ * Note, normally this can be merged with the zigzag/alternate scan<br>
+ * An example to avoid confusion:
+ * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...)
+ * - (x -> reference DCT -> reference IDCT -> x)
+ * - (x -> reference DCT -> simple_mmx_perm = idct_permutation
+ * -> simple_idct_mmx -> x)
+ * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant
+ * -> simple_idct_mmx -> ...)
+ */
+ uint8_t idct_permutation[64];
+ enum idct_permutation_type perm_type;
+
+ int mpeg4_studio_profile;
+} IDCTDSPContext;
+
+void ff_put_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
+ ptrdiff_t line_size);
+void ff_add_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
+ ptrdiff_t line_size);
+
+void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx);
+
+void ff_idctdsp_init_aarch64(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_alpha(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_arm(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_ppc(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_riscv(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_mips(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_idctdsp_init_loongarch(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+
+#endif /* AVCODEC_IDCTDSP_H */
diff --git a/media/ffvpx/libavcodec/imgconvert.c b/media/ffvpx/libavcodec/imgconvert.c
new file mode 100644
index 0000000000..96511ac7d6
--- /dev/null
+++ b/media/ffvpx/libavcodec/imgconvert.c
@@ -0,0 +1,48 @@
+/*
+ * Misc image conversion routines
+ * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * misc image conversion routines
+ */
+
+#include "avcodec.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixfmt.h"
+
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr){
+ int i;
+
+ enum AVPixelFormat best = AV_PIX_FMT_NONE;
+ int loss;
+
+ for (i=0; pix_fmt_list[i] != AV_PIX_FMT_NONE; i++) {
+ loss = loss_ptr ? *loss_ptr : 0;
+ best = av_find_best_pix_fmt_of_2(best, pix_fmt_list[i], src_pix_fmt, has_alpha, &loss);
+ }
+
+ if (loss_ptr)
+ *loss_ptr = loss;
+ return best;
+}
+
diff --git a/media/ffvpx/libavcodec/internal.h b/media/ffvpx/libavcodec/internal.h
new file mode 100644
index 0000000000..a283c52e01
--- /dev/null
+++ b/media/ffvpx/libavcodec/internal.h
@@ -0,0 +1,247 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal api header.
+ */
+
+#ifndef AVCODEC_INTERNAL_H
+#define AVCODEC_INTERNAL_H
+
+#include <stdint.h>
+
+#include "libavutil/buffer.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/pixfmt.h"
+#include "avcodec.h"
+#include "config.h"
+
+#if CONFIG_LCMS2
+# include "fflcms2.h"
+#endif
+
+#define FF_SANE_NB_CHANNELS 512U
+
+#if HAVE_SIMD_ALIGN_64
+# define STRIDE_ALIGN 64 /* AVX-512 */
+#elif HAVE_SIMD_ALIGN_32
+# define STRIDE_ALIGN 32
+#elif HAVE_SIMD_ALIGN_16
+# define STRIDE_ALIGN 16
+#else
+# define STRIDE_ALIGN 8
+#endif
+
+typedef struct AVCodecInternal {
+ /**
+ * When using frame-threaded decoding, this field is set for the first
+ * worker thread (e.g. to decode extradata just once).
+ */
+ int is_copy;
+
+ /**
+ * An audio frame with less than required samples has been submitted (and
+ * potentially padded with silence). Reject all subsequent frames.
+ */
+ int last_audio_frame;
+
+ /**
+ * Audio encoders can set this flag during init to indicate that they
+ * want the small last frame to be padded to a multiple of pad_samples.
+ */
+ int pad_samples;
+
+ AVBufferRef *pool;
+
+ void *thread_ctx;
+
+ /**
+ * This packet is used to hold the packet given to decoders
+ * implementing the .decode API; it is unused by the generic
+ * code for decoders implementing the .receive_frame API and
+ * may be freely used (but not freed) by them with the caveat
+ * that the packet will be unreferenced generically in
+ * avcodec_flush_buffers().
+ */
+ AVPacket *in_pkt;
+ struct AVBSFContext *bsf;
+
+ /**
+ * Properties (timestamps+side data) extracted from the last packet passed
+ * for decoding.
+ */
+ AVPacket *last_pkt_props;
+
+ /**
+ * temporary buffer used for encoders to store their bitstream
+ */
+ uint8_t *byte_buffer;
+ unsigned int byte_buffer_size;
+
+ /**
+ * This is set to AV_PKT_FLAG_KEY for encoders that encode intra-only
+ * formats (i.e. whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set).
+ * This is used to set said flag generically for said encoders.
+ */
+ int intra_only_flag;
+
+ void *frame_thread_encoder;
+
+ /**
+ * The input frame is stored here for encoders implementing the simple
+ * encode API.
+ *
+ * Not allocated in other cases.
+ */
+ AVFrame *in_frame;
+
+ /**
+ * When the AV_CODEC_FLAG_RECON_FRAME flag is used. the encoder should store
+ * here the reconstructed frame corresponding to the last returned packet.
+ *
+ * Not allocated in other cases.
+ */
+ AVFrame *recon_frame;
+
+ /**
+ * If this is set, then FFCodec->close (if existing) needs to be called
+ * for the parent AVCodecContext.
+ */
+ int needs_close;
+
+ /**
+ * Number of audio samples to skip at the start of the next decoded frame
+ */
+ int skip_samples;
+
+ /**
+ * hwaccel-specific private data
+ */
+ void *hwaccel_priv_data;
+
+ /**
+ * checks API usage: after codec draining, flush is required to resume operation
+ */
+ int draining;
+
+ /**
+ * Temporary buffers for newly received or not yet output packets/frames.
+ */
+ AVPacket *buffer_pkt;
+ AVFrame *buffer_frame;
+ int draining_done;
+
+ int showed_multi_packet_warning;
+
+ /* to prevent infinite loop on errors when draining */
+ int nb_draining_errors;
+
+ /* used when avctx flag AV_CODEC_FLAG_DROPCHANGED is set */
+ int changed_frames_dropped;
+ int initial_format;
+ int initial_width, initial_height;
+ int initial_sample_rate;
+ AVChannelLayout initial_ch_layout;
+
+#if CONFIG_LCMS2
+ FFIccContext icc; /* used to read and write embedded ICC profiles */
+#endif
+} AVCodecInternal;
+
+/**
+ * Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
+ * If there is no such matching pair then size is returned.
+ */
+int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b);
+
+unsigned int ff_toupper4(unsigned int x);
+
+void ff_color_frame(AVFrame *frame, const int color[4]);
+
+/**
+ * Maximum size in bytes of extradata.
+ * This value was chosen such that every bit of the buffer is
+ * addressable by a 32-bit signed integer as used by get_bits.
+ */
+#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - AV_INPUT_BUFFER_PADDING_SIZE)
+
+/**
+ * 2^(x) for integer x
+ * @return correctly rounded float
+ */
+static av_always_inline float ff_exp2fi(int x) {
+ /* Normal range */
+ if (-126 <= x && x <= 128)
+ return av_int2float((x+127) << 23);
+ /* Too large */
+ else if (x > 128)
+ return INFINITY;
+ /* Subnormal numbers */
+ else if (x > -150)
+ return av_int2float(1 << (x+149));
+ /* Negligibly small */
+ else
+ return 0;
+}
+
+int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx);
+
+int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec);
+
+/**
+ * Add a CPB properties side data to an encoding context.
+ */
+AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx);
+
+/**
+ * Check AVFrame for S12M timecode side data and allocate and fill TC SEI message with timecode info
+ *
+ * @param frame Raw frame to get S12M timecode side data from
+ * @param rate The frame rate
+ * @param prefix_len Number of bytes to allocate before SEI message
+ * @param data Pointer to a variable to store allocated memory
+ * Upon return the variable will hold NULL on error or if frame has no S12M timecode info.
+ * Otherwise it will point to prefix_len uninitialized bytes followed by
+ * *sei_size SEI message
+ * @param sei_size Pointer to a variable to store generated SEI message length
+ * @return Zero on success, negative error code on failure
+ */
+int ff_alloc_timecode_sei(const AVFrame *frame, AVRational rate, size_t prefix_len,
+ void **data, size_t *sei_size);
+
+/**
+ * Get an estimated video bitrate based on frame size, frame rate and coded
+ * bits per pixel.
+ */
+int64_t ff_guess_coded_bitrate(AVCodecContext *avctx);
+
+/**
+ * Check if a value is in the list. If not, return the default value
+ *
+ * @param ctx Context for the log msg
+ * @param val_name Name of the checked value, for log msg
+ * @param array_valid_values Array of valid int, ended with INT_MAX
+ * @param default_value Value return if checked value is not in the array
+ * @return Value or default_value.
+ */
+int ff_int_from_list_or_default(void *ctx, const char * val_name, int val,
+ const int * array_valid_values, int default_value);
+
+#endif /* AVCODEC_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/jfdctfst.c b/media/ffvpx/libavcodec/jfdctfst.c
new file mode 100644
index 0000000000..805e05808c
--- /dev/null
+++ b/media/ffvpx/libavcodec/jfdctfst.c
@@ -0,0 +1,331 @@
+/*
+ * This file is part of the Independent JPEG Group's software.
+ *
+ * The authors make NO WARRANTY or representation, either express or implied,
+ * with respect to this software, its quality, accuracy, merchantability, or
+ * fitness for a particular purpose. This software is provided "AS IS", and
+ * you, its user, assume the entire risk as to its quality and accuracy.
+ *
+ * This software is copyright (C) 1994-1996, Thomas G. Lane.
+ * All Rights Reserved except as specified below.
+ *
+ * Permission is hereby granted to use, copy, modify, and distribute this
+ * software (or portions thereof) for any purpose, without fee, subject to
+ * these conditions:
+ * (1) If any part of the source code for this software is distributed, then
+ * this README file must be included, with this copyright and no-warranty
+ * notice unaltered; and any additions, deletions, or changes to the original
+ * files must be clearly indicated in accompanying documentation.
+ * (2) If only executable code is distributed, then the accompanying
+ * documentation must state that "this software is based in part on the work
+ * of the Independent JPEG Group".
+ * (3) Permission for use of this software is granted only if the user accepts
+ * full responsibility for any undesirable consequences; the authors accept
+ * NO LIABILITY for damages of any kind.
+ *
+ * These conditions apply to any software derived from or based on the IJG
+ * code, not just to the unmodified library. If you use our work, you ought
+ * to acknowledge us.
+ *
+ * Permission is NOT granted for the use of any IJG author's name or company
+ * name in advertising or publicity relating to this software or products
+ * derived from it. This software may be referred to only as "the Independent
+ * JPEG Group's software".
+ *
+ * We specifically permit and encourage the use of this software as the basis
+ * of commercial products, provided that all warranty or liability claims are
+ * assumed by the product vendor.
+ *
+ * This file contains a fast, not so accurate integer implementation of the
+ * forward DCT (Discrete Cosine Transform).
+ *
+ * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT
+ * on each column. Direct algorithms are also available, but they are
+ * much more complex and seem not to be any faster when reduced to code.
+ *
+ * This implementation is based on Arai, Agui, and Nakajima's algorithm for
+ * scaled DCT. Their original paper (Trans. IEICE E-71(11):1095) is in
+ * Japanese, but the algorithm is described in the Pennebaker & Mitchell
+ * JPEG textbook (see REFERENCES section in file README). The following code
+ * is based directly on figure 4-8 in P&M.
+ * While an 8-point DCT cannot be done in less than 11 multiplies, it is
+ * possible to arrange the computation so that many of the multiplies are
+ * simple scalings of the final outputs. These multiplies can then be
+ * folded into the multiplications or divisions by the JPEG quantization
+ * table entries. The AA&N method leaves only 5 multiplies and 29 adds
+ * to be done in the DCT itself.
+ * The primary disadvantage of this method is that with fixed-point math,
+ * accuracy is lost due to imprecise representation of the scaled
+ * quantization values. The smaller the quantization table entry, the less
+ * precise the scaled value, so this implementation does worse with high-
+ * quality-setting files than with low-quality ones.
+ */
+
+/**
+ * @file
+ * Independent JPEG Group's fast AAN dct.
+ */
+
+#include <stdint.h>
+#include "libavutil/attributes.h"
+#include "dct.h"
+
+#define DCTSIZE 8
+#define GLOBAL(x) x
+#define RIGHT_SHIFT(x, n) ((x) >> (n))
+
+/*
+ * This module is specialized to the case DCTSIZE = 8.
+ */
+
+#if DCTSIZE != 8
+ Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */
+#endif
+
+
+/* Scaling decisions are generally the same as in the LL&M algorithm;
+ * see jfdctint.c for more details. However, we choose to descale
+ * (right shift) multiplication products as soon as they are formed,
+ * rather than carrying additional fractional bits into subsequent additions.
+ * This compromises accuracy slightly, but it lets us save a few shifts.
+ * More importantly, 16-bit arithmetic is then adequate (for 8-bit samples)
+ * everywhere except in the multiplications proper; this saves a good deal
+ * of work on 16-bit-int machines.
+ *
+ * Again to save a few shifts, the intermediate results between pass 1 and
+ * pass 2 are not upscaled, but are represented only to integral precision.
+ *
+ * A final compromise is to represent the multiplicative constants to only
+ * 8 fractional bits, rather than 13. This saves some shifting work on some
+ * machines, and may also reduce the cost of multiplication (since there
+ * are fewer one-bits in the constants).
+ */
+
+#define CONST_BITS 8
+
+
+/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
+ * causing a lot of useless floating-point operations at run time.
+ * To get around this we use the following pre-calculated constants.
+ * If you change CONST_BITS you may want to add appropriate values.
+ * (With a reasonable C compiler, you can just rely on the FIX() macro...)
+ */
+
+#if CONST_BITS == 8
+#define FIX_0_382683433 ((int32_t) 98) /* FIX(0.382683433) */
+#define FIX_0_541196100 ((int32_t) 139) /* FIX(0.541196100) */
+#define FIX_0_707106781 ((int32_t) 181) /* FIX(0.707106781) */
+#define FIX_1_306562965 ((int32_t) 334) /* FIX(1.306562965) */
+#else
+#define FIX_0_382683433 FIX(0.382683433)
+#define FIX_0_541196100 FIX(0.541196100)
+#define FIX_0_707106781 FIX(0.707106781)
+#define FIX_1_306562965 FIX(1.306562965)
+#endif
+
+
+/* We can gain a little more speed, with a further compromise in accuracy,
+ * by omitting the addition in a descaling shift. This yields an incorrectly
+ * rounded result half the time...
+ */
+
+#ifndef USE_ACCURATE_ROUNDING
+#undef DESCALE
+#define DESCALE(x,n) RIGHT_SHIFT(x, n)
+#endif
+
+
+/* Multiply a int16_t variable by an int32_t constant, and immediately
+ * descale to yield a int16_t result.
+ */
+
+#define MULTIPLY(var,const) ((int16_t) DESCALE((var) * (const), CONST_BITS))
+
+static av_always_inline void row_fdct(int16_t * data){
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1, z2, z3, z4, z5, z11, z13;
+ int16_t *dataptr;
+ int ctr;
+
+ /* Pass 1: process rows. */
+
+ dataptr = data;
+ for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
+ tmp0 = dataptr[0] + dataptr[7];
+ tmp7 = dataptr[0] - dataptr[7];
+ tmp1 = dataptr[1] + dataptr[6];
+ tmp6 = dataptr[1] - dataptr[6];
+ tmp2 = dataptr[2] + dataptr[5];
+ tmp5 = dataptr[2] - dataptr[5];
+ tmp3 = dataptr[3] + dataptr[4];
+ tmp4 = dataptr[3] - dataptr[4];
+
+ /* Even part */
+
+ tmp10 = tmp0 + tmp3; /* phase 2 */
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+
+ dataptr[0] = tmp10 + tmp11; /* phase 3 */
+ dataptr[4] = tmp10 - tmp11;
+
+ z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); /* c4 */
+ dataptr[2] = tmp13 + z1; /* phase 5 */
+ dataptr[6] = tmp13 - z1;
+
+ /* Odd part */
+
+ tmp10 = tmp4 + tmp5; /* phase 2 */
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp6 + tmp7;
+
+ /* The rotator is modified from fig 4-8 to avoid extra negations. */
+ z5 = MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */
+ z2 = MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */
+ z4 = MULTIPLY(tmp12, FIX_1_306562965) + z5; /* c2+c6 */
+ z3 = MULTIPLY(tmp11, FIX_0_707106781); /* c4 */
+
+ z11 = tmp7 + z3; /* phase 5 */
+ z13 = tmp7 - z3;
+
+ dataptr[5] = z13 + z2; /* phase 6 */
+ dataptr[3] = z13 - z2;
+ dataptr[1] = z11 + z4;
+ dataptr[7] = z11 - z4;
+
+ dataptr += DCTSIZE; /* advance pointer to next row */
+ }
+}
+
+/*
+ * Perform the forward DCT on one block of samples.
+ */
+
+GLOBAL(void)
+ff_fdct_ifast (int16_t * data)
+{
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1, z2, z3, z4, z5, z11, z13;
+ int16_t *dataptr;
+ int ctr;
+
+ row_fdct(data);
+
+ /* Pass 2: process columns. */
+
+ dataptr = data;
+ for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
+ tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7];
+ tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7];
+ tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6];
+ tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6];
+ tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5];
+ tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5];
+ tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4];
+ tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
+
+ /* Even part */
+
+ tmp10 = tmp0 + tmp3; /* phase 2 */
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+
+ dataptr[DCTSIZE*0] = tmp10 + tmp11; /* phase 3 */
+ dataptr[DCTSIZE*4] = tmp10 - tmp11;
+
+ z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); /* c4 */
+ dataptr[DCTSIZE*2] = tmp13 + z1; /* phase 5 */
+ dataptr[DCTSIZE*6] = tmp13 - z1;
+
+ /* Odd part */
+
+ tmp10 = tmp4 + tmp5; /* phase 2 */
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp6 + tmp7;
+
+ /* The rotator is modified from fig 4-8 to avoid extra negations. */
+ z5 = MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */
+ z2 = MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */
+ z4 = MULTIPLY(tmp12, FIX_1_306562965) + z5; /* c2+c6 */
+ z3 = MULTIPLY(tmp11, FIX_0_707106781); /* c4 */
+
+ z11 = tmp7 + z3; /* phase 5 */
+ z13 = tmp7 - z3;
+
+ dataptr[DCTSIZE*5] = z13 + z2; /* phase 6 */
+ dataptr[DCTSIZE*3] = z13 - z2;
+ dataptr[DCTSIZE*1] = z11 + z4;
+ dataptr[DCTSIZE*7] = z11 - z4;
+
+ dataptr++; /* advance pointer to next column */
+ }
+}
+
+/*
+ * Perform the forward 2-4-8 DCT on one block of samples.
+ */
+
+GLOBAL(void)
+ff_fdct_ifast248 (int16_t * data)
+{
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1;
+ int16_t *dataptr;
+ int ctr;
+
+ row_fdct(data);
+
+ /* Pass 2: process columns. */
+
+ dataptr = data;
+ for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
+ tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*1];
+ tmp1 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3];
+ tmp2 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5];
+ tmp3 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7];
+ tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1];
+ tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3];
+ tmp6 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5];
+ tmp7 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7];
+
+ /* Even part */
+
+ tmp10 = tmp0 + tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ tmp13 = tmp0 - tmp3;
+
+ dataptr[DCTSIZE*0] = tmp10 + tmp11;
+ dataptr[DCTSIZE*4] = tmp10 - tmp11;
+
+ z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781);
+ dataptr[DCTSIZE*2] = tmp13 + z1;
+ dataptr[DCTSIZE*6] = tmp13 - z1;
+
+ tmp10 = tmp4 + tmp7;
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp5 - tmp6;
+ tmp13 = tmp4 - tmp7;
+
+ dataptr[DCTSIZE*1] = tmp10 + tmp11;
+ dataptr[DCTSIZE*5] = tmp10 - tmp11;
+
+ z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781);
+ dataptr[DCTSIZE*3] = tmp13 + z1;
+ dataptr[DCTSIZE*7] = tmp13 - z1;
+
+ dataptr++; /* advance pointer to next column */
+ }
+}
+
+
+#undef GLOBAL
+#undef CONST_BITS
+#undef DESCALE
+#undef FIX_0_541196100
+#undef FIX_1_306562965
diff --git a/media/ffvpx/libavcodec/jfdctint.c b/media/ffvpx/libavcodec/jfdctint.c
new file mode 100644
index 0000000000..6a39578f88
--- /dev/null
+++ b/media/ffvpx/libavcodec/jfdctint.c
@@ -0,0 +1,25 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BIT_DEPTH 8
+#include "jfdctint_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 10
+#include "jfdctint_template.c"
+#undef BIT_DEPTH
diff --git a/media/ffvpx/libavcodec/jfdctint_template.c b/media/ffvpx/libavcodec/jfdctint_template.c
new file mode 100644
index 0000000000..67fb77b5e1
--- /dev/null
+++ b/media/ffvpx/libavcodec/jfdctint_template.c
@@ -0,0 +1,398 @@
+/*
+ * This file is part of the Independent JPEG Group's software.
+ *
+ * The authors make NO WARRANTY or representation, either express or implied,
+ * with respect to this software, its quality, accuracy, merchantability, or
+ * fitness for a particular purpose. This software is provided "AS IS", and
+ * you, its user, assume the entire risk as to its quality and accuracy.
+ *
+ * This software is copyright (C) 1991-1996, Thomas G. Lane.
+ * All Rights Reserved except as specified below.
+ *
+ * Permission is hereby granted to use, copy, modify, and distribute this
+ * software (or portions thereof) for any purpose, without fee, subject to
+ * these conditions:
+ * (1) If any part of the source code for this software is distributed, then
+ * this README file must be included, with this copyright and no-warranty
+ * notice unaltered; and any additions, deletions, or changes to the original
+ * files must be clearly indicated in accompanying documentation.
+ * (2) If only executable code is distributed, then the accompanying
+ * documentation must state that "this software is based in part on the work
+ * of the Independent JPEG Group".
+ * (3) Permission for use of this software is granted only if the user accepts
+ * full responsibility for any undesirable consequences; the authors accept
+ * NO LIABILITY for damages of any kind.
+ *
+ * These conditions apply to any software derived from or based on the IJG
+ * code, not just to the unmodified library. If you use our work, you ought
+ * to acknowledge us.
+ *
+ * Permission is NOT granted for the use of any IJG author's name or company
+ * name in advertising or publicity relating to this software or products
+ * derived from it. This software may be referred to only as "the Independent
+ * JPEG Group's software".
+ *
+ * We specifically permit and encourage the use of this software as the basis
+ * of commercial products, provided that all warranty or liability claims are
+ * assumed by the product vendor.
+ *
+ * This file contains a slow-but-accurate integer implementation of the
+ * forward DCT (Discrete Cosine Transform).
+ *
+ * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT
+ * on each column. Direct algorithms are also available, but they are
+ * much more complex and seem not to be any faster when reduced to code.
+ *
+ * This implementation is based on an algorithm described in
+ * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT
+ * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics,
+ * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991.
+ * The primary algorithm described there uses 11 multiplies and 29 adds.
+ * We use their alternate method with 12 multiplies and 32 adds.
+ * The advantage of this method is that no data path contains more than one
+ * multiplication; this allows a very simple and accurate implementation in
+ * scaled fixed-point arithmetic, with a minimal number of shifts.
+ */
+
+/**
+ * @file
+ * Independent JPEG Group's slow & accurate dct.
+ */
+
+#include "libavutil/common.h"
+#include "dct.h"
+
+#include "bit_depth_template.c"
+
+#define DCTSIZE 8
+#define BITS_IN_JSAMPLE BIT_DEPTH
+#define GLOBAL(x) x
+#define RIGHT_SHIFT(x, n) ((x) >> (n))
+#define MULTIPLY16C16(var,const) ((var)*(const))
+#define DESCALE(x,n) RIGHT_SHIFT((x) + (1 << ((n) - 1)), n)
+
+
+/*
+ * This module is specialized to the case DCTSIZE = 8.
+ */
+
+#if DCTSIZE != 8
+#error "Sorry, this code only copes with 8x8 DCTs."
+#endif
+
+
+/*
+ * The poop on this scaling stuff is as follows:
+ *
+ * Each 1-D DCT step produces outputs which are a factor of sqrt(N)
+ * larger than the true DCT outputs. The final outputs are therefore
+ * a factor of N larger than desired; since N=8 this can be cured by
+ * a simple right shift at the end of the algorithm. The advantage of
+ * this arrangement is that we save two multiplications per 1-D DCT,
+ * because the y0 and y4 outputs need not be divided by sqrt(N).
+ * In the IJG code, this factor of 8 is removed by the quantization step
+ * (in jcdctmgr.c), NOT in this module.
+ *
+ * We have to do addition and subtraction of the integer inputs, which
+ * is no problem, and multiplication by fractional constants, which is
+ * a problem to do in integer arithmetic. We multiply all the constants
+ * by CONST_SCALE and convert them to integer constants (thus retaining
+ * CONST_BITS bits of precision in the constants). After doing a
+ * multiplication we have to divide the product by CONST_SCALE, with proper
+ * rounding, to produce the correct output. This division can be done
+ * cheaply as a right shift of CONST_BITS bits. We postpone shifting
+ * as long as possible so that partial sums can be added together with
+ * full fractional precision.
+ *
+ * The outputs of the first pass are scaled up by PASS1_BITS bits so that
+ * they are represented to better-than-integral precision. These outputs
+ * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
+ * with the recommended scaling. (For 12-bit sample data, the intermediate
+ * array is int32_t anyway.)
+ *
+ * To avoid overflow of the 32-bit intermediate results in pass 2, we must
+ * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
+ * shows that the values given below are the most effective.
+ */
+
+#undef CONST_BITS
+#undef PASS1_BITS
+#undef OUT_SHIFT
+
+#if BITS_IN_JSAMPLE == 8
+#define CONST_BITS 13
+#define PASS1_BITS 4 /* set this to 2 if 16x16 multiplies are faster */
+#define OUT_SHIFT PASS1_BITS
+#else
+#define CONST_BITS 13
+#define PASS1_BITS 1 /* lose a little precision to avoid overflow */
+#define OUT_SHIFT (PASS1_BITS + 1)
+#endif
+
+/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
+ * causing a lot of useless floating-point operations at run time.
+ * To get around this we use the following pre-calculated constants.
+ * If you change CONST_BITS you may want to add appropriate values.
+ * (With a reasonable C compiler, you can just rely on the FIX() macro...)
+ */
+
+#if CONST_BITS == 13
+#define FIX_0_298631336 ((int32_t) 2446) /* FIX(0.298631336) */
+#define FIX_0_390180644 ((int32_t) 3196) /* FIX(0.390180644) */
+#define FIX_0_541196100 ((int32_t) 4433) /* FIX(0.541196100) */
+#define FIX_0_765366865 ((int32_t) 6270) /* FIX(0.765366865) */
+#define FIX_0_899976223 ((int32_t) 7373) /* FIX(0.899976223) */
+#define FIX_1_175875602 ((int32_t) 9633) /* FIX(1.175875602) */
+#define FIX_1_501321110 ((int32_t) 12299) /* FIX(1.501321110) */
+#define FIX_1_847759065 ((int32_t) 15137) /* FIX(1.847759065) */
+#define FIX_1_961570560 ((int32_t) 16069) /* FIX(1.961570560) */
+#define FIX_2_053119869 ((int32_t) 16819) /* FIX(2.053119869) */
+#define FIX_2_562915447 ((int32_t) 20995) /* FIX(2.562915447) */
+#define FIX_3_072711026 ((int32_t) 25172) /* FIX(3.072711026) */
+#else
+#define FIX_0_298631336 FIX(0.298631336)
+#define FIX_0_390180644 FIX(0.390180644)
+#define FIX_0_541196100 FIX(0.541196100)
+#define FIX_0_765366865 FIX(0.765366865)
+#define FIX_0_899976223 FIX(0.899976223)
+#define FIX_1_175875602 FIX(1.175875602)
+#define FIX_1_501321110 FIX(1.501321110)
+#define FIX_1_847759065 FIX(1.847759065)
+#define FIX_1_961570560 FIX(1.961570560)
+#define FIX_2_053119869 FIX(2.053119869)
+#define FIX_2_562915447 FIX(2.562915447)
+#define FIX_3_072711026 FIX(3.072711026)
+#endif
+
+
+/* Multiply an int32_t variable by an int32_t constant to yield an int32_t result.
+ * For 8-bit samples with the recommended scaling, all the variable
+ * and constant values involved are no more than 16 bits wide, so a
+ * 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
+ * For 12-bit samples, a full 32-bit multiplication will be needed.
+ */
+
+#if BITS_IN_JSAMPLE == 8 && CONST_BITS<=13 && PASS1_BITS<=2
+#define MULTIPLY(var,const) MULTIPLY16C16(var,const)
+#else
+#define MULTIPLY(var,const) ((var) * (const))
+#endif
+
+
+static av_always_inline void FUNC(row_fdct)(int16_t *data)
+{
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1, z2, z3, z4, z5;
+ int16_t *dataptr;
+ int ctr;
+
+ /* Pass 1: process rows. */
+ /* Note results are scaled up by sqrt(8) compared to a true DCT; */
+ /* furthermore, we scale the results by 2**PASS1_BITS. */
+
+ dataptr = data;
+ for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
+ tmp0 = dataptr[0] + dataptr[7];
+ tmp7 = dataptr[0] - dataptr[7];
+ tmp1 = dataptr[1] + dataptr[6];
+ tmp6 = dataptr[1] - dataptr[6];
+ tmp2 = dataptr[2] + dataptr[5];
+ tmp5 = dataptr[2] - dataptr[5];
+ tmp3 = dataptr[3] + dataptr[4];
+ tmp4 = dataptr[3] - dataptr[4];
+
+ /* Even part per LL&M figure 1 --- note that published figure is faulty;
+ * rotator "sqrt(2)*c1" should be "sqrt(2)*c6".
+ */
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+
+ dataptr[0] = (int16_t) ((tmp10 + tmp11) * (1 << PASS1_BITS));
+ dataptr[4] = (int16_t) ((tmp10 - tmp11) * (1 << PASS1_BITS));
+
+ z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
+ dataptr[2] = (int16_t) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
+ CONST_BITS-PASS1_BITS);
+ dataptr[6] = (int16_t) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
+ CONST_BITS-PASS1_BITS);
+
+ /* Odd part per figure 8 --- note paper omits factor of sqrt(2).
+ * cK represents cos(K*pi/16).
+ * i0..i3 in the paper are tmp4..tmp7 here.
+ */
+
+ z1 = tmp4 + tmp7;
+ z2 = tmp5 + tmp6;
+ z3 = tmp4 + tmp6;
+ z4 = tmp5 + tmp7;
+ z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
+
+ tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
+ tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
+ tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
+ tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
+ z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
+ z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
+ z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
+ z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
+
+ z3 += z5;
+ z4 += z5;
+
+ dataptr[7] = (int16_t) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS);
+ dataptr[5] = (int16_t) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS);
+ dataptr[3] = (int16_t) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS);
+ dataptr[1] = (int16_t) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS);
+
+ dataptr += DCTSIZE; /* advance pointer to next row */
+ }
+}
+
+/*
+ * Perform the forward DCT on one block of samples.
+ */
+
+GLOBAL(void)
+FUNC(ff_jpeg_fdct_islow)(int16_t *data)
+{
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1, z2, z3, z4, z5;
+ int16_t *dataptr;
+ int ctr;
+
+ FUNC(row_fdct)(data);
+
+ /* Pass 2: process columns.
+ * We remove the PASS1_BITS scaling, but leave the results scaled up
+ * by an overall factor of 8.
+ */
+
+ dataptr = data;
+ for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
+ tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7];
+ tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7];
+ tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6];
+ tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6];
+ tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5];
+ tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5];
+ tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4];
+ tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
+
+ /* Even part per LL&M figure 1 --- note that published figure is faulty;
+ * rotator "sqrt(2)*c1" should be "sqrt(2)*c6".
+ */
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+
+ dataptr[DCTSIZE*0] = DESCALE(tmp10 + tmp11, OUT_SHIFT);
+ dataptr[DCTSIZE*4] = DESCALE(tmp10 - tmp11, OUT_SHIFT);
+
+ z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
+ dataptr[DCTSIZE*2] = DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
+ CONST_BITS + OUT_SHIFT);
+ dataptr[DCTSIZE*6] = DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
+ CONST_BITS + OUT_SHIFT);
+
+ /* Odd part per figure 8 --- note paper omits factor of sqrt(2).
+ * cK represents cos(K*pi/16).
+ * i0..i3 in the paper are tmp4..tmp7 here.
+ */
+
+ z1 = tmp4 + tmp7;
+ z2 = tmp5 + tmp6;
+ z3 = tmp4 + tmp6;
+ z4 = tmp5 + tmp7;
+ z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
+
+ tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
+ tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
+ tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
+ tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
+ z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
+ z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
+ z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
+ z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
+
+ z3 += z5;
+ z4 += z5;
+
+ dataptr[DCTSIZE*7] = DESCALE(tmp4 + z1 + z3, CONST_BITS + OUT_SHIFT);
+ dataptr[DCTSIZE*5] = DESCALE(tmp5 + z2 + z4, CONST_BITS + OUT_SHIFT);
+ dataptr[DCTSIZE*3] = DESCALE(tmp6 + z2 + z3, CONST_BITS + OUT_SHIFT);
+ dataptr[DCTSIZE*1] = DESCALE(tmp7 + z1 + z4, CONST_BITS + OUT_SHIFT);
+
+ dataptr++; /* advance pointer to next column */
+ }
+}
+
+/*
+ * The secret of DCT2-4-8 is really simple -- you do the usual 1-DCT
+ * on the rows and then, instead of doing even and odd, part on the columns
+ * you do even part two times.
+ */
+GLOBAL(void)
+FUNC(ff_fdct248_islow)(int16_t *data)
+{
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z1;
+ int16_t *dataptr;
+ int ctr;
+
+ FUNC(row_fdct)(data);
+
+ /* Pass 2: process columns.
+ * We remove the PASS1_BITS scaling, but leave the results scaled up
+ * by an overall factor of 8.
+ */
+
+ dataptr = data;
+ for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
+ tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*1];
+ tmp1 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3];
+ tmp2 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5];
+ tmp3 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7];
+ tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1];
+ tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3];
+ tmp6 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5];
+ tmp7 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7];
+
+ tmp10 = tmp0 + tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ tmp13 = tmp0 - tmp3;
+
+ dataptr[DCTSIZE*0] = DESCALE(tmp10 + tmp11, OUT_SHIFT);
+ dataptr[DCTSIZE*4] = DESCALE(tmp10 - tmp11, OUT_SHIFT);
+
+ z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
+ dataptr[DCTSIZE*2] = DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
+ CONST_BITS+OUT_SHIFT);
+ dataptr[DCTSIZE*6] = DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
+ CONST_BITS+OUT_SHIFT);
+
+ tmp10 = tmp4 + tmp7;
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp5 - tmp6;
+ tmp13 = tmp4 - tmp7;
+
+ dataptr[DCTSIZE*1] = DESCALE(tmp10 + tmp11, OUT_SHIFT);
+ dataptr[DCTSIZE*5] = DESCALE(tmp10 - tmp11, OUT_SHIFT);
+
+ z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
+ dataptr[DCTSIZE*3] = DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
+ CONST_BITS + OUT_SHIFT);
+ dataptr[DCTSIZE*7] = DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
+ CONST_BITS + OUT_SHIFT);
+
+ dataptr++; /* advance pointer to next column */
+ }
+}
diff --git a/media/ffvpx/libavcodec/jrevdct.c b/media/ffvpx/libavcodec/jrevdct.c
new file mode 100644
index 0000000000..7f1863515f
--- /dev/null
+++ b/media/ffvpx/libavcodec/jrevdct.c
@@ -0,0 +1,1172 @@
+/*
+ * This file is part of the Independent JPEG Group's software.
+ *
+ * The authors make NO WARRANTY or representation, either express or implied,
+ * with respect to this software, its quality, accuracy, merchantability, or
+ * fitness for a particular purpose. This software is provided "AS IS", and
+ * you, its user, assume the entire risk as to its quality and accuracy.
+ *
+ * This software is copyright (C) 1991, 1992, Thomas G. Lane.
+ * All Rights Reserved except as specified below.
+ *
+ * Permission is hereby granted to use, copy, modify, and distribute this
+ * software (or portions thereof) for any purpose, without fee, subject to
+ * these conditions:
+ * (1) If any part of the source code for this software is distributed, then
+ * this README file must be included, with this copyright and no-warranty
+ * notice unaltered; and any additions, deletions, or changes to the original
+ * files must be clearly indicated in accompanying documentation.
+ * (2) If only executable code is distributed, then the accompanying
+ * documentation must state that "this software is based in part on the work
+ * of the Independent JPEG Group".
+ * (3) Permission for use of this software is granted only if the user accepts
+ * full responsibility for any undesirable consequences; the authors accept
+ * NO LIABILITY for damages of any kind.
+ *
+ * These conditions apply to any software derived from or based on the IJG
+ * code, not just to the unmodified library. If you use our work, you ought
+ * to acknowledge us.
+ *
+ * Permission is NOT granted for the use of any IJG author's name or company
+ * name in advertising or publicity relating to this software or products
+ * derived from it. This software may be referred to only as "the Independent
+ * JPEG Group's software".
+ *
+ * We specifically permit and encourage the use of this software as the basis
+ * of commercial products, provided that all warranty or liability claims are
+ * assumed by the product vendor.
+ *
+ * This file contains the basic inverse-DCT transformation subroutine.
+ *
+ * This implementation is based on an algorithm described in
+ * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT
+ * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics,
+ * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991.
+ * The primary algorithm described there uses 11 multiplies and 29 adds.
+ * We use their alternate method with 12 multiplies and 32 adds.
+ * The advantage of this method is that no data path contains more than one
+ * multiplication; this allows a very simple and accurate implementation in
+ * scaled fixed-point arithmetic, with a minimal number of shifts.
+ *
+ * I've made lots of modifications to attempt to take advantage of the
+ * sparse nature of the DCT matrices we're getting. Although the logic
+ * is cumbersome, it's straightforward and the resulting code is much
+ * faster.
+ *
+ * A better way to do this would be to pass in the DCT block as a sparse
+ * matrix, perhaps with the difference cases encoded.
+ */
+
+/**
+ * @file
+ * Independent JPEG Group's LLM idct.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/intreadwrite.h"
+
+#include "dct.h"
+#include "idctdsp.h"
+
+#define EIGHT_BIT_SAMPLES
+
+#define DCTSIZE 8
+#define DCTSIZE2 64
+
+#define GLOBAL
+
+#define RIGHT_SHIFT(x, n) ((x) >> (n))
+
+typedef int16_t DCTBLOCK[DCTSIZE2];
+
+#define CONST_BITS 13
+
+/*
+ * This routine is specialized to the case DCTSIZE = 8.
+ */
+
+#if DCTSIZE != 8
+ Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */
+#endif
+
+
+/*
+ * A 2-D IDCT can be done by 1-D IDCT on each row followed by 1-D IDCT
+ * on each column. Direct algorithms are also available, but they are
+ * much more complex and seem not to be any faster when reduced to code.
+ *
+ * The poop on this scaling stuff is as follows:
+ *
+ * Each 1-D IDCT step produces outputs which are a factor of sqrt(N)
+ * larger than the true IDCT outputs. The final outputs are therefore
+ * a factor of N larger than desired; since N=8 this can be cured by
+ * a simple right shift at the end of the algorithm. The advantage of
+ * this arrangement is that we save two multiplications per 1-D IDCT,
+ * because the y0 and y4 inputs need not be divided by sqrt(N).
+ *
+ * We have to do addition and subtraction of the integer inputs, which
+ * is no problem, and multiplication by fractional constants, which is
+ * a problem to do in integer arithmetic. We multiply all the constants
+ * by CONST_SCALE and convert them to integer constants (thus retaining
+ * CONST_BITS bits of precision in the constants). After doing a
+ * multiplication we have to divide the product by CONST_SCALE, with proper
+ * rounding, to produce the correct output. This division can be done
+ * cheaply as a right shift of CONST_BITS bits. We postpone shifting
+ * as long as possible so that partial sums can be added together with
+ * full fractional precision.
+ *
+ * The outputs of the first pass are scaled up by PASS1_BITS bits so that
+ * they are represented to better-than-integral precision. These outputs
+ * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
+ * with the recommended scaling. (To scale up 12-bit sample data further, an
+ * intermediate int32 array would be needed.)
+ *
+ * To avoid overflow of the 32-bit intermediate results in pass 2, we must
+ * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
+ * shows that the values given below are the most effective.
+ */
+
+#ifdef EIGHT_BIT_SAMPLES
+#define PASS1_BITS 2
+#else
+#define PASS1_BITS 1 /* lose a little precision to avoid overflow */
+#endif
+
+#define ONE ((int32_t) 1)
+
+#define CONST_SCALE (ONE << CONST_BITS)
+
+/* Convert a positive real constant to an integer scaled by CONST_SCALE.
+ * IMPORTANT: if your compiler doesn't do this arithmetic at compile time,
+ * you will pay a significant penalty in run time. In that case, figure
+ * the correct integer constant values and insert them by hand.
+ */
+
+/* Actually FIX is no longer used, we precomputed them all */
+#define FIX(x) ((int32_t) ((x) * CONST_SCALE + 0.5))
+
+/* Descale and correctly round an int32_t value that's scaled by N bits.
+ * We assume RIGHT_SHIFT rounds towards minus infinity, so adding
+ * the fudge factor is correct for either sign of X.
+ */
+
+#define DESCALE(x,n) RIGHT_SHIFT((x) + (ONE << ((n)-1)), n)
+
+/* Multiply an int32_t variable by an int32_t constant to yield an int32_t result.
+ * For 8-bit samples with the recommended scaling, all the variable
+ * and constant values involved are no more than 16 bits wide, so a
+ * 16x16->32 bit multiply can be used instead of a full 32x32 multiply;
+ * this provides a useful speedup on many machines.
+ * There is no way to specify a 16x16->32 multiply in portable C, but
+ * some C compilers will do the right thing if you provide the correct
+ * combination of casts.
+ * NB: for 12-bit samples, a full 32-bit multiplication will be needed.
+ */
+
+#ifdef EIGHT_BIT_SAMPLES
+#ifdef SHORTxSHORT_32 /* may work if 'int' is 32 bits */
+#define MULTIPLY(var,const) (((int16_t) (var)) * ((int16_t) (const)))
+#endif
+#ifdef SHORTxLCONST_32 /* known to work with Microsoft C 6.0 */
+#define MULTIPLY(var,const) (((int16_t) (var)) * ((int32_t) (const)))
+#endif
+#endif
+
+#ifndef MULTIPLY /* default definition */
+#define MULTIPLY(var,const) ((var) * (const))
+#endif
+
+
+/*
+ Unlike our decoder where we approximate the FIXes, we need to use exact
+ones here or successive P-frames will drift too much with Reference frame coding
+*/
+#define FIX_0_211164243 1730
+#define FIX_0_275899380 2260
+#define FIX_0_298631336 2446
+#define FIX_0_390180644 3196
+#define FIX_0_509795579 4176
+#define FIX_0_541196100 4433
+#define FIX_0_601344887 4926
+#define FIX_0_765366865 6270
+#define FIX_0_785694958 6436
+#define FIX_0_899976223 7373
+#define FIX_1_061594337 8697
+#define FIX_1_111140466 9102
+#define FIX_1_175875602 9633
+#define FIX_1_306562965 10703
+#define FIX_1_387039845 11363
+#define FIX_1_451774981 11893
+#define FIX_1_501321110 12299
+#define FIX_1_662939225 13623
+#define FIX_1_847759065 15137
+#define FIX_1_961570560 16069
+#define FIX_2_053119869 16819
+#define FIX_2_172734803 17799
+#define FIX_2_562915447 20995
+#define FIX_3_072711026 25172
+
+/*
+ * Perform the inverse DCT on one block of coefficients.
+ */
+
+void ff_j_rev_dct(DCTBLOCK data)
+{
+ int32_t tmp0, tmp1, tmp2, tmp3;
+ int32_t tmp10, tmp11, tmp12, tmp13;
+ int32_t z1, z2, z3, z4, z5;
+ int32_t d0, d1, d2, d3, d4, d5, d6, d7;
+ register int16_t *dataptr;
+ int rowctr;
+
+ /* Pass 1: process rows. */
+ /* Note results are scaled up by sqrt(8) compared to a true IDCT; */
+ /* furthermore, we scale the results by 2**PASS1_BITS. */
+
+ dataptr = data;
+
+ for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
+ /* Due to quantization, we will usually find that many of the input
+ * coefficients are zero, especially the AC terms. We can exploit this
+ * by short-circuiting the IDCT calculation for any row in which all
+ * the AC terms are zero. In that case each output is equal to the
+ * DC coefficient (with scale factor as needed).
+ * With typical images and quantization tables, half or more of the
+ * row DCT calculations can be simplified this way.
+ */
+
+ register uint8_t *idataptr = (uint8_t*)dataptr;
+
+ /* WARNING: we do the same permutation as MMX idct to simplify the
+ video core */
+ d0 = dataptr[0];
+ d2 = dataptr[1];
+ d4 = dataptr[2];
+ d6 = dataptr[3];
+ d1 = dataptr[4];
+ d3 = dataptr[5];
+ d5 = dataptr[6];
+ d7 = dataptr[7];
+
+ if ((d1 | d2 | d3 | d4 | d5 | d6 | d7) == 0) {
+ /* AC terms all zero */
+ if (d0) {
+ /* Compute a 32 bit value to assign. */
+ int16_t dcval = (int16_t) (d0 * (1 << PASS1_BITS));
+ register unsigned v = (dcval & 0xffff) | ((uint32_t)dcval << 16);
+
+ AV_WN32A(&idataptr[ 0], v);
+ AV_WN32A(&idataptr[ 4], v);
+ AV_WN32A(&idataptr[ 8], v);
+ AV_WN32A(&idataptr[12], v);
+ }
+
+ dataptr += DCTSIZE; /* advance pointer to next row */
+ continue;
+ }
+
+ /* Even part: reverse the even part of the forward DCT. */
+ /* The rotator is sqrt(2)*c(-6). */
+{
+ if (d6) {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
+ z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
+ tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
+ tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
+
+ tmp0 = (d0 + d4) * CONST_SCALE;
+ tmp1 = (d0 - d4) * CONST_SCALE;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
+ tmp2 = MULTIPLY(-d6, FIX_1_306562965);
+ tmp3 = MULTIPLY(d6, FIX_0_541196100);
+
+ tmp0 = (d0 + d4) * CONST_SCALE;
+ tmp1 = (d0 - d4) * CONST_SCALE;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ }
+ } else {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
+ tmp2 = MULTIPLY(d2, FIX_0_541196100);
+ tmp3 = MULTIPLY(d2, FIX_1_306562965);
+
+ tmp0 = (d0 + d4) * CONST_SCALE;
+ tmp1 = (d0 - d4) * CONST_SCALE;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
+ tmp10 = tmp13 = (d0 + d4) * CONST_SCALE;
+ tmp11 = tmp12 = (d0 - d4) * CONST_SCALE;
+ }
+ }
+
+ /* Odd part per figure 8; the matrix is unitary and hence its
+ * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
+ */
+
+ if (d7) {
+ if (d5) {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 != 0, d7 != 0 */
+ z1 = d7 + d1;
+ z2 = d5 + d3;
+ z3 = d7 + d3;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(z3 + z4, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 != 0, d7 != 0 */
+ z2 = d5 + d3;
+ z3 = d7 + d3;
+ z5 = MULTIPLY(z3 + d5, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 = z1 + z4;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 != 0, d7 != 0 */
+ z1 = d7 + d1;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(d7 + z4, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 = z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 == 0, d5 != 0, d7 != 0 */
+ tmp0 = MULTIPLY(-d7, FIX_0_601344887);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ tmp1 = MULTIPLY(-d5, FIX_0_509795579);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+ z5 = MULTIPLY(d5 + d7, FIX_1_175875602);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z3;
+ tmp1 += z4;
+ tmp2 = z2 + z3;
+ tmp3 = z1 + z4;
+ }
+ }
+ } else {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 == 0, d7 != 0 */
+ z1 = d7 + d1;
+ z3 = d7 + d3;
+ z5 = MULTIPLY(z3 + d1, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-d3, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-d1, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 = z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 == 0, d7 != 0 */
+ z3 = d7 + d3;
+
+ tmp0 = MULTIPLY(-d7, FIX_0_601344887);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ tmp2 = MULTIPLY(d3, FIX_0_509795579);
+ z2 = MULTIPLY(-d3, FIX_2_562915447);
+ z5 = MULTIPLY(z3, FIX_1_175875602);
+ z3 = MULTIPLY(-z3, FIX_0_785694958);
+
+ tmp0 += z3;
+ tmp1 = z2 + z5;
+ tmp2 += z3;
+ tmp3 = z1 + z5;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 == 0, d7 != 0 */
+ z1 = d7 + d1;
+ z5 = MULTIPLY(z1, FIX_1_175875602);
+
+ z1 = MULTIPLY(z1, FIX_0_275899380);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ tmp0 = MULTIPLY(-d7, FIX_1_662939225);
+ z4 = MULTIPLY(-d1, FIX_0_390180644);
+ tmp3 = MULTIPLY(d1, FIX_1_111140466);
+
+ tmp0 += z1;
+ tmp1 = z4 + z5;
+ tmp2 = z3 + z5;
+ tmp3 += z1;
+ } else {
+ /* d1 == 0, d3 == 0, d5 == 0, d7 != 0 */
+ tmp0 = MULTIPLY(-d7, FIX_1_387039845);
+ tmp1 = MULTIPLY(d7, FIX_1_175875602);
+ tmp2 = MULTIPLY(-d7, FIX_0_785694958);
+ tmp3 = MULTIPLY(d7, FIX_0_275899380);
+ }
+ }
+ }
+ } else {
+ if (d5) {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 != 0, d7 == 0 */
+ z2 = d5 + d3;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(d3 + z4, FIX_1_175875602);
+
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-d1, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-d3, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 = z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 != 0, d7 == 0 */
+ z2 = d5 + d3;
+
+ z5 = MULTIPLY(z2, FIX_1_175875602);
+ tmp1 = MULTIPLY(d5, FIX_1_662939225);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+ z2 = MULTIPLY(-z2, FIX_1_387039845);
+ tmp2 = MULTIPLY(d3, FIX_1_111140466);
+ z3 = MULTIPLY(-d3, FIX_1_961570560);
+
+ tmp0 = z3 + z5;
+ tmp1 += z2;
+ tmp2 += z2;
+ tmp3 = z4 + z5;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 != 0, d7 == 0 */
+ z4 = d5 + d1;
+
+ z5 = MULTIPLY(z4, FIX_1_175875602);
+ z1 = MULTIPLY(-d1, FIX_0_899976223);
+ tmp3 = MULTIPLY(d1, FIX_0_601344887);
+ tmp1 = MULTIPLY(-d5, FIX_0_509795579);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z4 = MULTIPLY(z4, FIX_0_785694958);
+
+ tmp0 = z1 + z5;
+ tmp1 += z4;
+ tmp2 = z2 + z5;
+ tmp3 += z4;
+ } else {
+ /* d1 == 0, d3 == 0, d5 != 0, d7 == 0 */
+ tmp0 = MULTIPLY(d5, FIX_1_175875602);
+ tmp1 = MULTIPLY(d5, FIX_0_275899380);
+ tmp2 = MULTIPLY(-d5, FIX_1_387039845);
+ tmp3 = MULTIPLY(d5, FIX_0_785694958);
+ }
+ }
+ } else {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 == 0, d7 == 0 */
+ z5 = d1 + d3;
+ tmp3 = MULTIPLY(d1, FIX_0_211164243);
+ tmp2 = MULTIPLY(-d3, FIX_1_451774981);
+ z1 = MULTIPLY(d1, FIX_1_061594337);
+ z2 = MULTIPLY(-d3, FIX_2_172734803);
+ z4 = MULTIPLY(z5, FIX_0_785694958);
+ z5 = MULTIPLY(z5, FIX_1_175875602);
+
+ tmp0 = z1 - z4;
+ tmp1 = z2 + z4;
+ tmp2 += z5;
+ tmp3 += z5;
+ } else {
+ /* d1 == 0, d3 != 0, d5 == 0, d7 == 0 */
+ tmp0 = MULTIPLY(-d3, FIX_0_785694958);
+ tmp1 = MULTIPLY(-d3, FIX_1_387039845);
+ tmp2 = MULTIPLY(-d3, FIX_0_275899380);
+ tmp3 = MULTIPLY(d3, FIX_1_175875602);
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 == 0, d7 == 0 */
+ tmp0 = MULTIPLY(d1, FIX_0_275899380);
+ tmp1 = MULTIPLY(d1, FIX_0_785694958);
+ tmp2 = MULTIPLY(d1, FIX_1_175875602);
+ tmp3 = MULTIPLY(d1, FIX_1_387039845);
+ } else {
+ /* d1 == 0, d3 == 0, d5 == 0, d7 == 0 */
+ tmp0 = tmp1 = tmp2 = tmp3 = 0;
+ }
+ }
+ }
+ }
+}
+ /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
+
+ dataptr[0] = (int16_t) DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
+ dataptr[7] = (int16_t) DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
+ dataptr[1] = (int16_t) DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
+ dataptr[6] = (int16_t) DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
+ dataptr[2] = (int16_t) DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
+ dataptr[5] = (int16_t) DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
+ dataptr[3] = (int16_t) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
+ dataptr[4] = (int16_t) DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
+
+ dataptr += DCTSIZE; /* advance pointer to next row */
+ }
+
+ /* Pass 2: process columns. */
+ /* Note that we must descale the results by a factor of 8 == 2**3, */
+ /* and also undo the PASS1_BITS scaling. */
+
+ dataptr = data;
+ for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
+ /* Columns of zeroes can be exploited in the same way as we did with rows.
+ * However, the row calculation has created many nonzero AC terms, so the
+ * simplification applies less often (typically 5% to 10% of the time).
+ * On machines with very fast multiplication, it's possible that the
+ * test takes more time than it's worth. In that case this section
+ * may be commented out.
+ */
+
+ d0 = dataptr[DCTSIZE*0];
+ d1 = dataptr[DCTSIZE*1];
+ d2 = dataptr[DCTSIZE*2];
+ d3 = dataptr[DCTSIZE*3];
+ d4 = dataptr[DCTSIZE*4];
+ d5 = dataptr[DCTSIZE*5];
+ d6 = dataptr[DCTSIZE*6];
+ d7 = dataptr[DCTSIZE*7];
+
+ /* Even part: reverse the even part of the forward DCT. */
+ /* The rotator is sqrt(2)*c(-6). */
+ if (d6) {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
+ z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
+ tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
+ tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
+
+ tmp0 = (d0 + d4) * CONST_SCALE;
+ tmp1 = (d0 - d4) * CONST_SCALE;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
+ tmp2 = MULTIPLY(-d6, FIX_1_306562965);
+ tmp3 = MULTIPLY(d6, FIX_0_541196100);
+
+ tmp0 = (d0 + d4) * CONST_SCALE;
+ tmp1 = (d0 - d4) * CONST_SCALE;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ }
+ } else {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
+ tmp2 = MULTIPLY(d2, FIX_0_541196100);
+ tmp3 = MULTIPLY(d2, FIX_1_306562965);
+
+ tmp0 = (d0 + d4) * CONST_SCALE;
+ tmp1 = (d0 - d4) * CONST_SCALE;
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
+ tmp10 = tmp13 = (d0 + d4) * CONST_SCALE;
+ tmp11 = tmp12 = (d0 - d4) * CONST_SCALE;
+ }
+ }
+
+ /* Odd part per figure 8; the matrix is unitary and hence its
+ * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
+ */
+ if (d7) {
+ if (d5) {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 != 0, d7 != 0 */
+ z1 = d7 + d1;
+ z2 = d5 + d3;
+ z3 = d7 + d3;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(z3 + z4, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 != 0, d7 != 0 */
+ z2 = d5 + d3;
+ z3 = d7 + d3;
+ z5 = MULTIPLY(z3 + d5, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 = z1 + z4;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 != 0, d7 != 0 */
+ z1 = d7 + d1;
+ z3 = d7;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(z3 + z4, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 = z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 == 0, d5 != 0, d7 != 0 */
+ tmp0 = MULTIPLY(-d7, FIX_0_601344887);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ tmp1 = MULTIPLY(-d5, FIX_0_509795579);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+ z5 = MULTIPLY(d5 + d7, FIX_1_175875602);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z3;
+ tmp1 += z4;
+ tmp2 = z2 + z3;
+ tmp3 = z1 + z4;
+ }
+ }
+ } else {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 == 0, d7 != 0 */
+ z1 = d7 + d1;
+ z3 = d7 + d3;
+ z5 = MULTIPLY(z3 + d1, FIX_1_175875602);
+
+ tmp0 = MULTIPLY(d7, FIX_0_298631336);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-z1, FIX_0_899976223);
+ z2 = MULTIPLY(-d3, FIX_2_562915447);
+ z3 = MULTIPLY(-z3, FIX_1_961570560);
+ z4 = MULTIPLY(-d1, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 += z1 + z3;
+ tmp1 = z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 == 0, d7 != 0 */
+ z3 = d7 + d3;
+
+ tmp0 = MULTIPLY(-d7, FIX_0_601344887);
+ z1 = MULTIPLY(-d7, FIX_0_899976223);
+ tmp2 = MULTIPLY(d3, FIX_0_509795579);
+ z2 = MULTIPLY(-d3, FIX_2_562915447);
+ z5 = MULTIPLY(z3, FIX_1_175875602);
+ z3 = MULTIPLY(-z3, FIX_0_785694958);
+
+ tmp0 += z3;
+ tmp1 = z2 + z5;
+ tmp2 += z3;
+ tmp3 = z1 + z5;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 == 0, d7 != 0 */
+ z1 = d7 + d1;
+ z5 = MULTIPLY(z1, FIX_1_175875602);
+
+ z1 = MULTIPLY(z1, FIX_0_275899380);
+ z3 = MULTIPLY(-d7, FIX_1_961570560);
+ tmp0 = MULTIPLY(-d7, FIX_1_662939225);
+ z4 = MULTIPLY(-d1, FIX_0_390180644);
+ tmp3 = MULTIPLY(d1, FIX_1_111140466);
+
+ tmp0 += z1;
+ tmp1 = z4 + z5;
+ tmp2 = z3 + z5;
+ tmp3 += z1;
+ } else {
+ /* d1 == 0, d3 == 0, d5 == 0, d7 != 0 */
+ tmp0 = MULTIPLY(-d7, FIX_1_387039845);
+ tmp1 = MULTIPLY(d7, FIX_1_175875602);
+ tmp2 = MULTIPLY(-d7, FIX_0_785694958);
+ tmp3 = MULTIPLY(d7, FIX_0_275899380);
+ }
+ }
+ }
+ } else {
+ if (d5) {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 != 0, d7 == 0 */
+ z2 = d5 + d3;
+ z4 = d5 + d1;
+ z5 = MULTIPLY(d3 + z4, FIX_1_175875602);
+
+ tmp1 = MULTIPLY(d5, FIX_2_053119869);
+ tmp2 = MULTIPLY(d3, FIX_3_072711026);
+ tmp3 = MULTIPLY(d1, FIX_1_501321110);
+ z1 = MULTIPLY(-d1, FIX_0_899976223);
+ z2 = MULTIPLY(-z2, FIX_2_562915447);
+ z3 = MULTIPLY(-d3, FIX_1_961570560);
+ z4 = MULTIPLY(-z4, FIX_0_390180644);
+
+ z3 += z5;
+ z4 += z5;
+
+ tmp0 = z1 + z3;
+ tmp1 += z2 + z4;
+ tmp2 += z2 + z3;
+ tmp3 += z1 + z4;
+ } else {
+ /* d1 == 0, d3 != 0, d5 != 0, d7 == 0 */
+ z2 = d5 + d3;
+
+ z5 = MULTIPLY(z2, FIX_1_175875602);
+ tmp1 = MULTIPLY(d5, FIX_1_662939225);
+ z4 = MULTIPLY(-d5, FIX_0_390180644);
+ z2 = MULTIPLY(-z2, FIX_1_387039845);
+ tmp2 = MULTIPLY(d3, FIX_1_111140466);
+ z3 = MULTIPLY(-d3, FIX_1_961570560);
+
+ tmp0 = z3 + z5;
+ tmp1 += z2;
+ tmp2 += z2;
+ tmp3 = z4 + z5;
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 != 0, d7 == 0 */
+ z4 = d5 + d1;
+
+ z5 = MULTIPLY(z4, FIX_1_175875602);
+ z1 = MULTIPLY(-d1, FIX_0_899976223);
+ tmp3 = MULTIPLY(d1, FIX_0_601344887);
+ tmp1 = MULTIPLY(-d5, FIX_0_509795579);
+ z2 = MULTIPLY(-d5, FIX_2_562915447);
+ z4 = MULTIPLY(z4, FIX_0_785694958);
+
+ tmp0 = z1 + z5;
+ tmp1 += z4;
+ tmp2 = z2 + z5;
+ tmp3 += z4;
+ } else {
+ /* d1 == 0, d3 == 0, d5 != 0, d7 == 0 */
+ tmp0 = MULTIPLY(d5, FIX_1_175875602);
+ tmp1 = MULTIPLY(d5, FIX_0_275899380);
+ tmp2 = MULTIPLY(-d5, FIX_1_387039845);
+ tmp3 = MULTIPLY(d5, FIX_0_785694958);
+ }
+ }
+ } else {
+ if (d3) {
+ if (d1) {
+ /* d1 != 0, d3 != 0, d5 == 0, d7 == 0 */
+ z5 = d1 + d3;
+ tmp3 = MULTIPLY(d1, FIX_0_211164243);
+ tmp2 = MULTIPLY(-d3, FIX_1_451774981);
+ z1 = MULTIPLY(d1, FIX_1_061594337);
+ z2 = MULTIPLY(-d3, FIX_2_172734803);
+ z4 = MULTIPLY(z5, FIX_0_785694958);
+ z5 = MULTIPLY(z5, FIX_1_175875602);
+
+ tmp0 = z1 - z4;
+ tmp1 = z2 + z4;
+ tmp2 += z5;
+ tmp3 += z5;
+ } else {
+ /* d1 == 0, d3 != 0, d5 == 0, d7 == 0 */
+ tmp0 = MULTIPLY(-d3, FIX_0_785694958);
+ tmp1 = MULTIPLY(-d3, FIX_1_387039845);
+ tmp2 = MULTIPLY(-d3, FIX_0_275899380);
+ tmp3 = MULTIPLY(d3, FIX_1_175875602);
+ }
+ } else {
+ if (d1) {
+ /* d1 != 0, d3 == 0, d5 == 0, d7 == 0 */
+ tmp0 = MULTIPLY(d1, FIX_0_275899380);
+ tmp1 = MULTIPLY(d1, FIX_0_785694958);
+ tmp2 = MULTIPLY(d1, FIX_1_175875602);
+ tmp3 = MULTIPLY(d1, FIX_1_387039845);
+ } else {
+ /* d1 == 0, d3 == 0, d5 == 0, d7 == 0 */
+ tmp0 = tmp1 = tmp2 = tmp3 = 0;
+ }
+ }
+ }
+ }
+
+ /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
+
+ dataptr[DCTSIZE*0] = (int16_t) DESCALE(tmp10 + tmp3,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*7] = (int16_t) DESCALE(tmp10 - tmp3,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*1] = (int16_t) DESCALE(tmp11 + tmp2,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*6] = (int16_t) DESCALE(tmp11 - tmp2,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*2] = (int16_t) DESCALE(tmp12 + tmp1,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*5] = (int16_t) DESCALE(tmp12 - tmp1,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*3] = (int16_t) DESCALE(tmp13 + tmp0,
+ CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSIZE*4] = (int16_t) DESCALE(tmp13 - tmp0,
+ CONST_BITS+PASS1_BITS+3);
+
+ dataptr++; /* advance pointer to next column */
+ }
+}
+
+#undef DCTSIZE
+#define DCTSIZE 4
+#define DCTSTRIDE 8
+
+void ff_j_rev_dct4(DCTBLOCK data)
+{
+ int32_t tmp0, tmp1, tmp2, tmp3;
+ int32_t tmp10, tmp11, tmp12, tmp13;
+ int32_t z1;
+ int32_t d0, d2, d4, d6;
+ register int16_t *dataptr;
+ int rowctr;
+
+ /* Pass 1: process rows. */
+ /* Note results are scaled up by sqrt(8) compared to a true IDCT; */
+ /* furthermore, we scale the results by 2**PASS1_BITS. */
+
+ data[0] += 4;
+
+ dataptr = data;
+
+ for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
+ /* Due to quantization, we will usually find that many of the input
+ * coefficients are zero, especially the AC terms. We can exploit this
+ * by short-circuiting the IDCT calculation for any row in which all
+ * the AC terms are zero. In that case each output is equal to the
+ * DC coefficient (with scale factor as needed).
+ * With typical images and quantization tables, half or more of the
+ * row DCT calculations can be simplified this way.
+ */
+
+ register uint8_t *idataptr = (uint8_t*)dataptr;
+
+ d0 = dataptr[0];
+ d2 = dataptr[1];
+ d4 = dataptr[2];
+ d6 = dataptr[3];
+
+ if ((d2 | d4 | d6) == 0) {
+ /* AC terms all zero */
+ if (d0) {
+ /* Compute a 32 bit value to assign. */
+ int16_t dcval = (int16_t) (d0 * (1 << PASS1_BITS));
+ register unsigned v = (dcval & 0xffff) | ((uint32_t)dcval << 16);
+
+ AV_WN32A(&idataptr[0], v);
+ AV_WN32A(&idataptr[4], v);
+ }
+
+ dataptr += DCTSTRIDE; /* advance pointer to next row */
+ continue;
+ }
+
+ /* Even part: reverse the even part of the forward DCT. */
+ /* The rotator is sqrt(2)*c(-6). */
+ if (d6) {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
+ z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
+ tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
+ tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
+
+ tmp0 = (d0 + d4) * (1 << CONST_BITS);
+ tmp1 = (d0 - d4) * (1 << CONST_BITS);
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
+ tmp2 = MULTIPLY(-d6, FIX_1_306562965);
+ tmp3 = MULTIPLY(d6, FIX_0_541196100);
+
+ tmp0 = (d0 + d4) * (1 << CONST_BITS);
+ tmp1 = (d0 - d4) * (1 << CONST_BITS);
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ }
+ } else {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
+ tmp2 = MULTIPLY(d2, FIX_0_541196100);
+ tmp3 = MULTIPLY(d2, FIX_1_306562965);
+
+ tmp0 = (d0 + d4) * (1 << CONST_BITS);
+ tmp1 = (d0 - d4) * (1 << CONST_BITS);
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
+ tmp10 = tmp13 = (d0 + d4) * (1 << CONST_BITS);
+ tmp11 = tmp12 = (d0 - d4) * (1 << CONST_BITS);
+ }
+ }
+
+ /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
+
+ dataptr[0] = (int16_t) DESCALE(tmp10, CONST_BITS-PASS1_BITS);
+ dataptr[1] = (int16_t) DESCALE(tmp11, CONST_BITS-PASS1_BITS);
+ dataptr[2] = (int16_t) DESCALE(tmp12, CONST_BITS-PASS1_BITS);
+ dataptr[3] = (int16_t) DESCALE(tmp13, CONST_BITS-PASS1_BITS);
+
+ dataptr += DCTSTRIDE; /* advance pointer to next row */
+ }
+
+ /* Pass 2: process columns. */
+ /* Note that we must descale the results by a factor of 8 == 2**3, */
+ /* and also undo the PASS1_BITS scaling. */
+
+ dataptr = data;
+ for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
+ /* Columns of zeroes can be exploited in the same way as we did with rows.
+ * However, the row calculation has created many nonzero AC terms, so the
+ * simplification applies less often (typically 5% to 10% of the time).
+ * On machines with very fast multiplication, it's possible that the
+ * test takes more time than it's worth. In that case this section
+ * may be commented out.
+ */
+
+ d0 = dataptr[DCTSTRIDE*0];
+ d2 = dataptr[DCTSTRIDE*1];
+ d4 = dataptr[DCTSTRIDE*2];
+ d6 = dataptr[DCTSTRIDE*3];
+
+ /* Even part: reverse the even part of the forward DCT. */
+ /* The rotator is sqrt(2)*c(-6). */
+ if (d6) {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
+ z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
+ tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
+ tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
+
+ tmp0 = (d0 + d4) * (1 << CONST_BITS);
+ tmp1 = (d0 - d4) * (1 << CONST_BITS);
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
+ tmp2 = MULTIPLY(-d6, FIX_1_306562965);
+ tmp3 = MULTIPLY(d6, FIX_0_541196100);
+
+ tmp0 = (d0 + d4) * (1 << CONST_BITS);
+ tmp1 = (d0 - d4) * (1 << CONST_BITS);
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ }
+ } else {
+ if (d2) {
+ /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
+ tmp2 = MULTIPLY(d2, FIX_0_541196100);
+ tmp3 = MULTIPLY(d2, FIX_1_306562965);
+
+ tmp0 = (d0 + d4) * (1 << CONST_BITS);
+ tmp1 = (d0 - d4) * (1 << CONST_BITS);
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ } else {
+ /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
+ tmp10 = tmp13 = (d0 + d4) * (1 << CONST_BITS);
+ tmp11 = tmp12 = (d0 - d4) * (1 << CONST_BITS);
+ }
+ }
+
+ /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
+
+ dataptr[DCTSTRIDE*0] = tmp10 >> (CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSTRIDE*1] = tmp11 >> (CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSTRIDE*2] = tmp12 >> (CONST_BITS+PASS1_BITS+3);
+ dataptr[DCTSTRIDE*3] = tmp13 >> (CONST_BITS+PASS1_BITS+3);
+
+ dataptr++; /* advance pointer to next column */
+ }
+}
+
+void ff_j_rev_dct2(DCTBLOCK data){
+ int d00, d01, d10, d11;
+
+ data[0] += 4;
+ d00 = data[0+0*DCTSTRIDE] + data[1+0*DCTSTRIDE];
+ d01 = data[0+0*DCTSTRIDE] - data[1+0*DCTSTRIDE];
+ d10 = data[0+1*DCTSTRIDE] + data[1+1*DCTSTRIDE];
+ d11 = data[0+1*DCTSTRIDE] - data[1+1*DCTSTRIDE];
+
+ data[0+0*DCTSTRIDE]= (d00 + d10)>>3;
+ data[1+0*DCTSTRIDE]= (d01 + d11)>>3;
+ data[0+1*DCTSTRIDE]= (d00 - d10)>>3;
+ data[1+1*DCTSTRIDE]= (d01 - d11)>>3;
+}
+
+void ff_j_rev_dct1(DCTBLOCK data){
+ data[0] = (data[0] + 4)>>3;
+}
+
+#undef FIX
+#undef CONST_BITS
+
+void ff_jref_idct_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ ff_j_rev_dct(block);
+ ff_put_pixels_clamped_c(block, dest, line_size);
+}
+
+void ff_jref_idct_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ ff_j_rev_dct(block);
+ ff_add_pixels_clamped_c(block, dest, line_size);
+}
diff --git a/media/ffvpx/libavcodec/libdav1d.c b/media/ffvpx/libavcodec/libdav1d.c
new file mode 100644
index 0000000000..2488a709c7
--- /dev/null
+++ b/media/ffvpx/libavcodec/libdav1d.c
@@ -0,0 +1,642 @@
+/*
+ * Copyright (c) 2018 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (c) 2018 James Almer <jamrial gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <dav1d/dav1d.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/cpu.h"
+#include "libavutil/film_grain_params.h"
+#include "libavutil/mastering_display_metadata.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+
+#include "atsc_a53.h"
+#include "avcodec.h"
+#include "bytestream.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "internal.h"
+
+#define FF_DAV1D_VERSION_AT_LEAST(x,y) \
+ (DAV1D_API_VERSION_MAJOR > (x) || DAV1D_API_VERSION_MAJOR == (x) && DAV1D_API_VERSION_MINOR >= (y))
+
+typedef struct Libdav1dContext {
+ AVClass *class;
+ Dav1dContext *c;
+ AVBufferPool *pool;
+ int pool_size;
+
+ Dav1dData data;
+ int tile_threads;
+ int frame_threads;
+ int max_frame_delay;
+ int apply_grain;
+ int operating_point;
+ int all_layers;
+} Libdav1dContext;
+
+static const enum AVPixelFormat pix_fmt[][3] = {
+ [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12 },
+ [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12 },
+ [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12 },
+ [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12 },
+};
+
+static const enum AVPixelFormat pix_fmt_rgb[3] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12,
+};
+
+static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
+{
+ AVCodecContext *c = opaque;
+
+ av_vlog(c, AV_LOG_ERROR, fmt, vl);
+}
+
+static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
+{
+ Libdav1dContext *dav1d = cookie;
+ enum AVPixelFormat format = pix_fmt[p->p.layout][p->seq_hdr->hbd];
+ int ret, linesize[4], h = FFALIGN(p->p.h, 128), w = FFALIGN(p->p.w, 128);
+ uint8_t *aligned_ptr, *data[4];
+ AVBufferRef *buf;
+
+ ret = av_image_get_buffer_size(format, w, h, DAV1D_PICTURE_ALIGNMENT);
+ if (ret < 0)
+ return ret;
+
+ if (ret != dav1d->pool_size) {
+ av_buffer_pool_uninit(&dav1d->pool);
+ // Use twice the amount of required padding bytes for aligned_ptr below.
+ dav1d->pool = av_buffer_pool_init(ret + DAV1D_PICTURE_ALIGNMENT * 2, NULL);
+ if (!dav1d->pool) {
+ dav1d->pool_size = 0;
+ return AVERROR(ENOMEM);
+ }
+ dav1d->pool_size = ret;
+ }
+ buf = av_buffer_pool_get(dav1d->pool);
+ if (!buf)
+ return AVERROR(ENOMEM);
+
+ // libdav1d requires DAV1D_PICTURE_ALIGNMENT aligned buffers, which av_malloc()
+ // doesn't guarantee for example when AVX is disabled at configure time.
+ // Use the extra DAV1D_PICTURE_ALIGNMENT padding bytes in the buffer to align it
+ // if required.
+ aligned_ptr = (uint8_t *)FFALIGN((uintptr_t)buf->data, DAV1D_PICTURE_ALIGNMENT);
+ ret = av_image_fill_arrays(data, linesize, aligned_ptr, format, w, h,
+ DAV1D_PICTURE_ALIGNMENT);
+ if (ret < 0) {
+ av_buffer_unref(&buf);
+ return ret;
+ }
+
+ p->data[0] = data[0];
+ p->data[1] = data[1];
+ p->data[2] = data[2];
+ p->stride[0] = linesize[0];
+ p->stride[1] = linesize[1];
+ p->allocator_data = buf;
+
+ return 0;
+}
+
+static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
+{
+ AVBufferRef *buf = p->allocator_data;
+
+ av_buffer_unref(&buf);
+}
+
+static void libdav1d_init_params(AVCodecContext *c, const Dav1dSequenceHeader *seq)
+{
+ c->profile = seq->profile;
+ c->level = ((seq->operating_points[0].major_level - 2) << 2)
+ | seq->operating_points[0].minor_level;
+
+ switch (seq->chr) {
+ case DAV1D_CHR_VERTICAL:
+ c->chroma_sample_location = AVCHROMA_LOC_LEFT;
+ break;
+ case DAV1D_CHR_COLOCATED:
+ c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
+ break;
+ }
+ c->colorspace = (enum AVColorSpace) seq->mtrx;
+ c->color_primaries = (enum AVColorPrimaries) seq->pri;
+ c->color_trc = (enum AVColorTransferCharacteristic) seq->trc;
+ c->color_range = seq->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+
+ if (seq->layout == DAV1D_PIXEL_LAYOUT_I444 &&
+ seq->mtrx == DAV1D_MC_IDENTITY &&
+ seq->pri == DAV1D_COLOR_PRI_BT709 &&
+ seq->trc == DAV1D_TRC_SRGB)
+ c->pix_fmt = pix_fmt_rgb[seq->hbd];
+ else
+ c->pix_fmt = pix_fmt[seq->layout][seq->hbd];
+
+ if (seq->num_units_in_tick && seq->time_scale) {
+ av_reduce(&c->framerate.den, &c->framerate.num,
+ seq->num_units_in_tick, seq->time_scale, INT_MAX);
+ if (seq->equal_picture_interval)
+ c->ticks_per_frame = seq->num_ticks_per_picture;
+ }
+
+ if (seq->film_grain_present)
+ c->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
+ else
+ c->properties &= ~FF_CODEC_PROPERTY_FILM_GRAIN;
+}
+
+static av_cold int libdav1d_parse_extradata(AVCodecContext *c)
+{
+ Dav1dSequenceHeader seq;
+ size_t offset = 0;
+ int res;
+
+ if (!c->extradata || c->extradata_size <= 0)
+ return 0;
+
+ if (c->extradata[0] & 0x80) {
+ int version = c->extradata[0] & 0x7F;
+
+ if (version != 1 || c->extradata_size < 4) {
+ int explode = !!(c->err_recognition & AV_EF_EXPLODE);
+ av_log(c, explode ? AV_LOG_ERROR : AV_LOG_WARNING,
+ "Error decoding extradata\n");
+ return explode ? AVERROR_INVALIDDATA : 0;
+ }
+
+ // Do nothing if there are no configOBUs to parse
+ if (c->extradata_size == 4)
+ return 0;
+
+ offset = 4;
+ }
+
+ res = dav1d_parse_sequence_header(&seq, c->extradata + offset,
+ c->extradata_size - offset);
+ if (res < 0)
+ return 0; // Assume no seqhdr OBUs are present
+
+ libdav1d_init_params(c, &seq);
+ res = ff_set_dimensions(c, seq.max_width, seq.max_height);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static av_cold int libdav1d_init(AVCodecContext *c)
+{
+ Libdav1dContext *dav1d = c->priv_data;
+ Dav1dSettings s;
+#if FF_DAV1D_VERSION_AT_LEAST(6,0)
+ int threads = c->thread_count;
+#else
+ int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2;
+#endif
+ int res;
+
+ av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
+
+ dav1d_default_settings(&s);
+ s.logger.cookie = c;
+ s.logger.callback = libdav1d_log_callback;
+ s.allocator.cookie = dav1d;
+ s.allocator.alloc_picture_callback = libdav1d_picture_allocator;
+ s.allocator.release_picture_callback = libdav1d_picture_release;
+ s.frame_size_limit = c->max_pixels;
+ if (dav1d->apply_grain >= 0)
+ s.apply_grain = dav1d->apply_grain;
+ else
+ s.apply_grain = !(c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
+
+ s.all_layers = dav1d->all_layers;
+ if (dav1d->operating_point >= 0)
+ s.operating_point = dav1d->operating_point;
+#if FF_DAV1D_VERSION_AT_LEAST(6,2)
+ s.strict_std_compliance = c->strict_std_compliance > 0;
+#endif
+
+#if FF_DAV1D_VERSION_AT_LEAST(6,0)
+ if (dav1d->frame_threads || dav1d->tile_threads)
+ s.n_threads = FFMAX(dav1d->frame_threads, dav1d->tile_threads);
+ else
+ s.n_threads = FFMIN(threads, DAV1D_MAX_THREADS);
+ if (dav1d->max_frame_delay > 0 && (c->flags & AV_CODEC_FLAG_LOW_DELAY))
+ av_log(c, AV_LOG_WARNING, "Low delay mode requested, forcing max_frame_delay 1\n");
+ s.max_frame_delay = (c->flags & AV_CODEC_FLAG_LOW_DELAY) ? 1 : dav1d->max_frame_delay;
+ av_log(c, AV_LOG_DEBUG, "Using %d threads, %d max_frame_delay\n",
+ s.n_threads, s.max_frame_delay);
+#else
+ s.n_tile_threads = dav1d->tile_threads
+ ? dav1d->tile_threads
+ : FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
+ s.n_frame_threads = dav1d->frame_threads
+ ? dav1d->frame_threads
+ : FFMIN(ceil(threads / s.n_tile_threads), DAV1D_MAX_FRAME_THREADS);
+ if (dav1d->max_frame_delay > 0)
+ s.n_frame_threads = FFMIN(s.n_frame_threads, dav1d->max_frame_delay);
+ av_log(c, AV_LOG_DEBUG, "Using %d frame threads, %d tile threads\n",
+ s.n_frame_threads, s.n_tile_threads);
+#endif
+
+#if FF_DAV1D_VERSION_AT_LEAST(6,8)
+ if (c->skip_frame >= AVDISCARD_NONKEY)
+ s.decode_frame_type = DAV1D_DECODEFRAMETYPE_KEY;
+ else if (c->skip_frame >= AVDISCARD_NONINTRA)
+ s.decode_frame_type = DAV1D_DECODEFRAMETYPE_INTRA;
+ else if (c->skip_frame >= AVDISCARD_NONREF)
+ s.decode_frame_type = DAV1D_DECODEFRAMETYPE_REFERENCE;
+#endif
+
+ res = libdav1d_parse_extradata(c);
+ if (res < 0)
+ return res;
+
+ res = dav1d_open(&dav1d->c, &s);
+ if (res < 0)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static void libdav1d_flush(AVCodecContext *c)
+{
+ Libdav1dContext *dav1d = c->priv_data;
+
+ dav1d_data_unref(&dav1d->data);
+ dav1d_flush(dav1d->c);
+}
+
+typedef struct OpaqueData {
+ void *pkt_orig_opaque;
+#if FF_API_REORDERED_OPAQUE
+ int64_t reordered_opaque;
+#endif
+} OpaqueData;
+
+static void libdav1d_data_free(const uint8_t *data, void *opaque) {
+ AVBufferRef *buf = opaque;
+
+ av_buffer_unref(&buf);
+}
+
+static void libdav1d_user_data_free(const uint8_t *data, void *opaque) {
+ AVPacket *pkt = opaque;
+ av_assert0(data == opaque);
+ av_free(pkt->opaque);
+ av_packet_free(&pkt);
+}
+
+static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
+{
+ Libdav1dContext *dav1d = c->priv_data;
+ Dav1dData *data = &dav1d->data;
+ Dav1dPicture pic = { 0 }, *p = &pic;
+ AVPacket *pkt;
+ OpaqueData *od = NULL;
+#if FF_DAV1D_VERSION_AT_LEAST(5,1)
+ enum Dav1dEventFlags event_flags = 0;
+#endif
+ int res;
+
+ if (!data->sz) {
+ pkt = av_packet_alloc();
+
+ if (!pkt)
+ return AVERROR(ENOMEM);
+
+ res = ff_decode_get_packet(c, pkt);
+ if (res < 0 && res != AVERROR_EOF) {
+ av_packet_free(&pkt);
+ return res;
+ }
+
+ if (pkt->size) {
+ res = dav1d_data_wrap(data, pkt->data, pkt->size,
+ libdav1d_data_free, pkt->buf);
+ if (res < 0) {
+ av_packet_free(&pkt);
+ return res;
+ }
+
+ pkt->buf = NULL;
+
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (
+#if FF_API_REORDERED_OPAQUE
+ c->reordered_opaque != AV_NOPTS_VALUE ||
+#endif
+ (pkt->opaque && (c->flags & AV_CODEC_FLAG_COPY_OPAQUE))) {
+ od = av_mallocz(sizeof(*od));
+ if (!od) {
+ av_packet_free(&pkt);
+ dav1d_data_unref(data);
+ return AVERROR(ENOMEM);
+ }
+ od->pkt_orig_opaque = pkt->opaque;
+#if FF_API_REORDERED_OPAQUE
+ od->reordered_opaque = c->reordered_opaque;
+#endif
+FF_ENABLE_DEPRECATION_WARNINGS
+ }
+ pkt->opaque = od;
+
+ res = dav1d_data_wrap_user_data(data, (const uint8_t *)pkt,
+ libdav1d_user_data_free, pkt);
+ if (res < 0) {
+ av_free(pkt->opaque);
+ av_packet_free(&pkt);
+ dav1d_data_unref(data);
+ return res;
+ }
+ pkt = NULL;
+ } else {
+ av_packet_free(&pkt);
+ if (res >= 0)
+ return AVERROR(EAGAIN);
+ }
+ }
+
+ res = dav1d_send_data(dav1d->c, data);
+ if (res < 0) {
+ if (res == AVERROR(EINVAL))
+ res = AVERROR_INVALIDDATA;
+ if (res != AVERROR(EAGAIN)) {
+ dav1d_data_unref(data);
+ return res;
+ }
+ }
+
+ res = dav1d_get_picture(dav1d->c, p);
+ if (res < 0) {
+ if (res == AVERROR(EINVAL))
+ res = AVERROR_INVALIDDATA;
+ else if (res == AVERROR(EAGAIN) && c->internal->draining)
+ res = AVERROR_EOF;
+
+ return res;
+ }
+
+ av_assert0(p->data[0] && p->allocator_data);
+
+ // This requires the custom allocator above
+ frame->buf[0] = av_buffer_ref(p->allocator_data);
+ if (!frame->buf[0]) {
+ dav1d_picture_unref(p);
+ return AVERROR(ENOMEM);
+ }
+
+ frame->data[0] = p->data[0];
+ frame->data[1] = p->data[1];
+ frame->data[2] = p->data[2];
+ frame->linesize[0] = p->stride[0];
+ frame->linesize[1] = p->stride[1];
+ frame->linesize[2] = p->stride[1];
+
+#if FF_DAV1D_VERSION_AT_LEAST(5,1)
+ dav1d_get_event_flags(dav1d->c, &event_flags);
+ if (c->pix_fmt == AV_PIX_FMT_NONE ||
+ event_flags & DAV1D_EVENT_FLAG_NEW_SEQUENCE)
+#endif
+ libdav1d_init_params(c, p->seq_hdr);
+ res = ff_decode_frame_props(c, frame);
+ if (res < 0)
+ goto fail;
+
+ frame->width = p->p.w;
+ frame->height = p->p.h;
+ if (c->width != p->p.w || c->height != p->p.h) {
+ res = ff_set_dimensions(c, p->p.w, p->p.h);
+ if (res < 0)
+ goto fail;
+ }
+
+ av_reduce(&frame->sample_aspect_ratio.num,
+ &frame->sample_aspect_ratio.den,
+ frame->height * (int64_t)p->frame_hdr->render_width,
+ frame->width * (int64_t)p->frame_hdr->render_height,
+ INT_MAX);
+ ff_set_sar(c, frame->sample_aspect_ratio);
+
+ pkt = (AVPacket *)p->m.user_data.data;
+ od = pkt->opaque;
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (od && od->reordered_opaque != AV_NOPTS_VALUE)
+ frame->reordered_opaque = od->reordered_opaque;
+ else
+ frame->reordered_opaque = AV_NOPTS_VALUE;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ // restore the original user opaque value for
+ // ff_decode_frame_props_from_pkt()
+ pkt->opaque = od ? od->pkt_orig_opaque : NULL;
+ av_freep(&od);
+
+ // match timestamps and packet size
+ res = ff_decode_frame_props_from_pkt(c, frame, pkt);
+ pkt->opaque = NULL;
+ if (res < 0)
+ goto fail;
+
+ frame->pkt_dts = pkt->pts;
+ frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
+
+ switch (p->frame_hdr->frame_type) {
+ case DAV1D_FRAME_TYPE_KEY:
+ case DAV1D_FRAME_TYPE_INTRA:
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ break;
+ case DAV1D_FRAME_TYPE_INTER:
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ break;
+ case DAV1D_FRAME_TYPE_SWITCH:
+ frame->pict_type = AV_PICTURE_TYPE_SP;
+ break;
+ default:
+ res = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ if (p->mastering_display) {
+ AVMasteringDisplayMetadata *mastering = av_mastering_display_metadata_create_side_data(frame);
+ if (!mastering) {
+ res = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (int i = 0; i < 3; i++) {
+ mastering->display_primaries[i][0] = av_make_q(p->mastering_display->primaries[i][0], 1 << 16);
+ mastering->display_primaries[i][1] = av_make_q(p->mastering_display->primaries[i][1], 1 << 16);
+ }
+ mastering->white_point[0] = av_make_q(p->mastering_display->white_point[0], 1 << 16);
+ mastering->white_point[1] = av_make_q(p->mastering_display->white_point[1], 1 << 16);
+
+ mastering->max_luminance = av_make_q(p->mastering_display->max_luminance, 1 << 8);
+ mastering->min_luminance = av_make_q(p->mastering_display->min_luminance, 1 << 14);
+
+ mastering->has_primaries = 1;
+ mastering->has_luminance = 1;
+ }
+ if (p->content_light) {
+ AVContentLightMetadata *light = av_content_light_metadata_create_side_data(frame);
+ if (!light) {
+ res = AVERROR(ENOMEM);
+ goto fail;
+ }
+ light->MaxCLL = p->content_light->max_content_light_level;
+ light->MaxFALL = p->content_light->max_frame_average_light_level;
+ }
+ if (p->itut_t35) {
+ GetByteContext gb;
+ unsigned int user_identifier;
+
+ bytestream2_init(&gb, p->itut_t35->payload, p->itut_t35->payload_size);
+ bytestream2_skip(&gb, 1); // terminal provider code
+ bytestream2_skip(&gb, 1); // terminal provider oriented code
+ user_identifier = bytestream2_get_be32(&gb);
+ switch (user_identifier) {
+ case MKBETAG('G', 'A', '9', '4'): { // closed captions
+ AVBufferRef *buf = NULL;
+
+ res = ff_parse_a53_cc(&buf, gb.buffer, bytestream2_get_bytes_left(&gb));
+ if (res < 0)
+ goto fail;
+ if (!res)
+ break;
+
+ if (!av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_A53_CC, buf))
+ av_buffer_unref(&buf);
+
+ c->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
+ break;
+ }
+ default: // ignore unsupported identifiers
+ break;
+ }
+ }
+ if (p->frame_hdr->film_grain.present && (!dav1d->apply_grain ||
+ (c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN))) {
+ AVFilmGrainParams *fgp = av_film_grain_params_create_side_data(frame);
+ if (!fgp) {
+ res = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ fgp->type = AV_FILM_GRAIN_PARAMS_AV1;
+ fgp->seed = p->frame_hdr->film_grain.data.seed;
+ fgp->codec.aom.num_y_points = p->frame_hdr->film_grain.data.num_y_points;
+ fgp->codec.aom.chroma_scaling_from_luma = p->frame_hdr->film_grain.data.chroma_scaling_from_luma;
+ fgp->codec.aom.scaling_shift = p->frame_hdr->film_grain.data.scaling_shift;
+ fgp->codec.aom.ar_coeff_lag = p->frame_hdr->film_grain.data.ar_coeff_lag;
+ fgp->codec.aom.ar_coeff_shift = p->frame_hdr->film_grain.data.ar_coeff_shift;
+ fgp->codec.aom.grain_scale_shift = p->frame_hdr->film_grain.data.grain_scale_shift;
+ fgp->codec.aom.overlap_flag = p->frame_hdr->film_grain.data.overlap_flag;
+ fgp->codec.aom.limit_output_range = p->frame_hdr->film_grain.data.clip_to_restricted_range;
+
+ memcpy(&fgp->codec.aom.y_points, &p->frame_hdr->film_grain.data.y_points,
+ sizeof(fgp->codec.aom.y_points));
+ memcpy(&fgp->codec.aom.num_uv_points, &p->frame_hdr->film_grain.data.num_uv_points,
+ sizeof(fgp->codec.aom.num_uv_points));
+ memcpy(&fgp->codec.aom.uv_points, &p->frame_hdr->film_grain.data.uv_points,
+ sizeof(fgp->codec.aom.uv_points));
+ memcpy(&fgp->codec.aom.ar_coeffs_y, &p->frame_hdr->film_grain.data.ar_coeffs_y,
+ sizeof(fgp->codec.aom.ar_coeffs_y));
+ memcpy(&fgp->codec.aom.ar_coeffs_uv[0], &p->frame_hdr->film_grain.data.ar_coeffs_uv[0],
+ sizeof(fgp->codec.aom.ar_coeffs_uv[0]));
+ memcpy(&fgp->codec.aom.ar_coeffs_uv[1], &p->frame_hdr->film_grain.data.ar_coeffs_uv[1],
+ sizeof(fgp->codec.aom.ar_coeffs_uv[1]));
+ memcpy(&fgp->codec.aom.uv_mult, &p->frame_hdr->film_grain.data.uv_mult,
+ sizeof(fgp->codec.aom.uv_mult));
+ memcpy(&fgp->codec.aom.uv_mult_luma, &p->frame_hdr->film_grain.data.uv_luma_mult,
+ sizeof(fgp->codec.aom.uv_mult_luma));
+ memcpy(&fgp->codec.aom.uv_offset, &p->frame_hdr->film_grain.data.uv_offset,
+ sizeof(fgp->codec.aom.uv_offset));
+ }
+
+ res = 0;
+fail:
+ dav1d_picture_unref(p);
+ if (res < 0)
+ av_frame_unref(frame);
+ return res;
+}
+
+static av_cold int libdav1d_close(AVCodecContext *c)
+{
+ Libdav1dContext *dav1d = c->priv_data;
+
+ av_buffer_pool_uninit(&dav1d->pool);
+ dav1d_data_unref(&dav1d->data);
+ dav1d_close(&dav1d->c);
+
+ return 0;
+}
+
+#ifndef DAV1D_MAX_FRAME_THREADS
+#define DAV1D_MAX_FRAME_THREADS DAV1D_MAX_THREADS
+#endif
+#ifndef DAV1D_MAX_TILE_THREADS
+#define DAV1D_MAX_TILE_THREADS DAV1D_MAX_THREADS
+#endif
+#ifndef DAV1D_MAX_FRAME_DELAY
+#define DAV1D_MAX_FRAME_DELAY DAV1D_MAX_FRAME_THREADS
+#endif
+
+#define OFFSET(x) offsetof(Libdav1dContext, x)
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+static const AVOption libdav1d_options[] = {
+ { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD | AV_OPT_FLAG_DEPRECATED },
+ { "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD | AV_OPT_FLAG_DEPRECATED },
+ { "max_frame_delay", "Max frame delay", OFFSET(max_frame_delay), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_DELAY, VD },
+ { "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD | AV_OPT_FLAG_DEPRECATED },
+ { "oppoint", "Select an operating point of the scalable bitstream", OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31, VD },
+ { "alllayers", "Output all spatial layers", OFFSET(all_layers), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
+ { NULL }
+};
+
+static const AVClass libdav1d_class = {
+ .class_name = "libdav1d decoder",
+ .item_name = av_default_item_name,
+ .option = libdav1d_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+const FFCodec ff_libdav1d_decoder = {
+ .p.name = "libdav1d",
+ CODEC_LONG_NAME("dav1d AV1 decoder by VideoLAN"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_AV1,
+ .priv_data_size = sizeof(Libdav1dContext),
+ .init = libdav1d_init,
+ .close = libdav1d_close,
+ .flush = libdav1d_flush,
+ FF_CODEC_RECEIVE_FRAME_CB(libdav1d_receive_frame),
+ .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
+ .caps_internal = FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_SETS_FRAME_PROPS |
+ FF_CODEC_CAP_AUTO_THREADS,
+ .p.priv_class = &libdav1d_class,
+ .p.wrapper_name = "libdav1d",
+};
diff --git a/media/ffvpx/libavcodec/log2_tab.c b/media/ffvpx/libavcodec/log2_tab.c
new file mode 100644
index 0000000000..47a1df03b7
--- /dev/null
+++ b/media/ffvpx/libavcodec/log2_tab.c
@@ -0,0 +1 @@
+#include "libavutil/log2_tab.c"
diff --git a/media/ffvpx/libavcodec/mathops.h b/media/ffvpx/libavcodec/mathops.h
new file mode 100644
index 0000000000..a1dc323304
--- /dev/null
+++ b/media/ffvpx/libavcodec/mathops.h
@@ -0,0 +1,255 @@
+/*
+ * simple math operations
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef AVCODEC_MATHOPS_H
+#define AVCODEC_MATHOPS_H
+
+#include <stdint.h>
+
+#include "libavutil/attributes_internal.h"
+#include "libavutil/common.h"
+#include "config.h"
+
+#define MAX_NEG_CROP 1024
+
+extern const uint32_t ff_inverse[257];
+extern const uint8_t ff_log2_run[41];
+extern const uint8_t ff_sqrt_tab[256];
+extern const uint8_t attribute_visibility_hidden ff_crop_tab[256 + 2 * MAX_NEG_CROP];
+extern const uint8_t ff_zigzag_direct[64];
+extern const uint8_t ff_zigzag_scan[16+1];
+
+#if ARCH_ARM
+# include "arm/mathops.h"
+#elif ARCH_AVR32
+# include "avr32/mathops.h"
+#elif ARCH_MIPS
+# include "mips/mathops.h"
+#elif ARCH_PPC
+# include "ppc/mathops.h"
+#elif ARCH_X86
+# include "x86/mathops.h"
+#endif
+
+/* generic implementation */
+
+#ifndef MUL64
+# define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
+#endif
+
+#ifndef MULL
+# define MULL(a,b,s) (MUL64(a, b) >> (s))
+#endif
+
+#ifndef MULH
+static av_always_inline int MULH(int a, int b){
+ return MUL64(a, b) >> 32;
+}
+#endif
+
+#ifndef UMULH
+static av_always_inline unsigned UMULH(unsigned a, unsigned b){
+ return ((uint64_t)(a) * (uint64_t)(b))>>32;
+}
+#endif
+
+#ifndef MAC64
+# define MAC64(d, a, b) ((d) += MUL64(a, b))
+#endif
+
+#ifndef MLS64
+# define MLS64(d, a, b) ((d) -= MUL64(a, b))
+#endif
+
+/* signed 16x16 -> 32 multiply add accumulate */
+#ifndef MAC16
+# define MAC16(rt, ra, rb) rt += (ra) * (rb)
+#endif
+
+/* signed 16x16 -> 32 multiply */
+#ifndef MUL16
+# define MUL16(ra, rb) ((ra) * (rb))
+#endif
+
+#ifndef MLS16
+# define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
+#endif
+
+/* median of 3 */
+#ifndef mid_pred
+#define mid_pred mid_pred
+static inline av_const int mid_pred(int a, int b, int c)
+{
+ if(a>b){
+ if(c>b){
+ if(c>a) b=a;
+ else b=c;
+ }
+ }else{
+ if(b>c){
+ if(c>a) b=c;
+ else b=a;
+ }
+ }
+ return b;
+}
+#endif
+
+#ifndef median4
+#define median4 median4
+static inline av_const int median4(int a, int b, int c, int d)
+{
+ if (a < b) {
+ if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
+ else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
+ } else {
+ if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
+ else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
+ }
+}
+#endif
+
+#define FF_SIGNBIT(x) ((x) >> CHAR_BIT * sizeof(x) - 1)
+
+#ifndef sign_extend
+static inline av_const int sign_extend(int val, unsigned bits)
+{
+ unsigned shift = 8 * sizeof(int) - bits;
+ union { unsigned u; int s; } v = { (unsigned) val << shift };
+ return v.s >> shift;
+}
+#endif
+
+#ifndef sign_extend64
+static inline av_const int64_t sign_extend64(int64_t val, unsigned bits)
+{
+ unsigned shift = 8 * sizeof(int64_t) - bits;
+ union { uint64_t u; int64_t s; } v = { (uint64_t) val << shift };
+ return v.s >> shift;
+}
+#endif
+
+#ifndef zero_extend
+static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
+{
+ return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
+}
+#endif
+
+#ifndef COPY3_IF_LT
+#define COPY3_IF_LT(x, y, a, b, c, d)\
+if ((y) < (x)) {\
+ (x) = (y);\
+ (a) = (b);\
+ (c) = (d);\
+}
+#endif
+
+#ifndef MASK_ABS
+#define MASK_ABS(mask, level) do { \
+ mask = level >> 31; \
+ level = (level ^ mask) - mask; \
+ } while (0)
+#endif
+
+#ifndef NEG_SSR32
+# define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
+#endif
+
+#ifndef NEG_USR32
+# define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
+#endif
+
+#if HAVE_BIGENDIAN
+# ifndef PACK_2U8
+# define PACK_2U8(a,b) (((a) << 8) | (b))
+# endif
+# ifndef PACK_4U8
+# define PACK_4U8(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
+# endif
+# ifndef PACK_2U16
+# define PACK_2U16(a,b) (((a) << 16) | (b))
+# endif
+#else
+# ifndef PACK_2U8
+# define PACK_2U8(a,b) (((b) << 8) | (a))
+# endif
+# ifndef PACK_4U2
+# define PACK_4U8(a,b,c,d) (((d) << 24) | ((c) << 16) | ((b) << 8) | (a))
+# endif
+# ifndef PACK_2U16
+# define PACK_2U16(a,b) (((b) << 16) | (a))
+# endif
+#endif
+
+#ifndef PACK_2S8
+# define PACK_2S8(a,b) PACK_2U8((a)&255, (b)&255)
+#endif
+#ifndef PACK_4S8
+# define PACK_4S8(a,b,c,d) PACK_4U8((a)&255, (b)&255, (c)&255, (d)&255)
+#endif
+#ifndef PACK_2S16
+# define PACK_2S16(a,b) PACK_2U16((a)&0xffff, (b)&0xffff)
+#endif
+
+#ifndef FASTDIV
+# define FASTDIV(a,b) ((uint32_t)((((uint64_t)a) * ff_inverse[b]) >> 32))
+#endif /* FASTDIV */
+
+#ifndef ff_sqrt
+#define ff_sqrt ff_sqrt
+static inline av_const unsigned int ff_sqrt(unsigned int a)
+{
+ unsigned int b;
+
+ if (a < 255) return (ff_sqrt_tab[a + 1] - 1) >> 4;
+ else if (a < (1 << 12)) b = ff_sqrt_tab[a >> 4] >> 2;
+#if !CONFIG_SMALL
+ else if (a < (1 << 14)) b = ff_sqrt_tab[a >> 6] >> 1;
+ else if (a < (1 << 16)) b = ff_sqrt_tab[a >> 8] ;
+#endif
+ else {
+ int s = av_log2_16bit(a >> 16) >> 1;
+ unsigned int c = a >> (s + 2);
+ b = ff_sqrt_tab[c >> (s + 8)];
+ b = FASTDIV(c,b) + (b << s);
+ }
+
+ return b - (a < b * b);
+}
+#endif
+
+static inline av_const float ff_sqrf(float a)
+{
+ return a*a;
+}
+
+static inline int8_t ff_u8_to_s8(uint8_t a)
+{
+ union {
+ uint8_t u8;
+ int8_t s8;
+ } b;
+ b.u8 = a;
+ return b.s8;
+}
+
+#endif /* AVCODEC_MATHOPS_H */
diff --git a/media/ffvpx/libavcodec/mathtables.c b/media/ffvpx/libavcodec/mathtables.c
new file mode 100644
index 0000000000..8b0031eb00
--- /dev/null
+++ b/media/ffvpx/libavcodec/mathtables.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "mathops.h"
+
+/* a*inverse[b]>>32 == a/b for all 0<=a<=16909558 && 2<=b<=256
+ * for a>16909558, is an overestimate by less than 1 part in 1<<24 */
+const uint32_t ff_inverse[257]={
+ 0, 4294967295U,2147483648U,1431655766, 1073741824, 858993460, 715827883, 613566757,
+ 536870912, 477218589, 429496730, 390451573, 357913942, 330382100, 306783379, 286331154,
+ 268435456, 252645136, 238609295, 226050911, 214748365, 204522253, 195225787, 186737709,
+ 178956971, 171798692, 165191050, 159072863, 153391690, 148102321, 143165577, 138547333,
+ 134217728, 130150525, 126322568, 122713352, 119304648, 116080198, 113025456, 110127367,
+ 107374183, 104755300, 102261127, 99882961, 97612894, 95443718, 93368855, 91382283,
+ 89478486, 87652394, 85899346, 84215046, 82595525, 81037119, 79536432, 78090315,
+ 76695845, 75350304, 74051161, 72796056, 71582789, 70409300, 69273667, 68174085,
+ 67108864, 66076420, 65075263, 64103990, 63161284, 62245903, 61356676, 60492498,
+ 59652324, 58835169, 58040099, 57266231, 56512728, 55778797, 55063684, 54366675,
+ 53687092, 53024288, 52377650, 51746594, 51130564, 50529028, 49941481, 49367441,
+ 48806447, 48258060, 47721859, 47197443, 46684428, 46182445, 45691142, 45210183,
+ 44739243, 44278014, 43826197, 43383509, 42949673, 42524429, 42107523, 41698712,
+ 41297763, 40904451, 40518560, 40139882, 39768216, 39403370, 39045158, 38693400,
+ 38347923, 38008561, 37675152, 37347542, 37025581, 36709123, 36398028, 36092163,
+ 35791395, 35495598, 35204650, 34918434, 34636834, 34359739, 34087043, 33818641,
+ 33554432, 33294321, 33038210, 32786010, 32537632, 32292988, 32051995, 31814573,
+ 31580642, 31350127, 31122952, 30899046, 30678338, 30460761, 30246249, 30034737,
+ 29826162, 29620465, 29417585, 29217465, 29020050, 28825284, 28633116, 28443493,
+ 28256364, 28071682, 27889399, 27709467, 27531842, 27356480, 27183338, 27012373,
+ 26843546, 26676816, 26512144, 26349493, 26188825, 26030105, 25873297, 25718368,
+ 25565282, 25414008, 25264514, 25116768, 24970741, 24826401, 24683721, 24542671,
+ 24403224, 24265352, 24129030, 23994231, 23860930, 23729102, 23598722, 23469767,
+ 23342214, 23216040, 23091223, 22967740, 22845571, 22724695, 22605092, 22486740,
+ 22369622, 22253717, 22139007, 22025474, 21913099, 21801865, 21691755, 21582751,
+ 21474837, 21367997, 21262215, 21157475, 21053762, 20951060, 20849356, 20748635,
+ 20648882, 20550083, 20452226, 20355296, 20259280, 20164166, 20069941, 19976593,
+ 19884108, 19792477, 19701685, 19611723, 19522579, 19434242, 19346700, 19259944,
+ 19173962, 19088744, 19004281, 18920561, 18837576, 18755316, 18673771, 18592933,
+ 18512791, 18433337, 18354562, 18276457, 18199014, 18122225, 18046082, 17970575,
+ 17895698, 17821442, 17747799, 17674763, 17602325, 17530479, 17459217, 17388532,
+ 17318417, 17248865, 17179870, 17111424, 17043522, 16976156, 16909321, 16843010,
+ 16777216
+};
+
+const uint8_t ff_sqrt_tab[256]={
+ 0, 16, 23, 28, 32, 36, 40, 43, 46, 48, 51, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 77, 79, 80, 82, 84, 85, 87, 88, 90,
+ 91, 92, 94, 95, 96, 98, 99,100,102,103,104,105,107,108,109,110,111,112,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
+128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,144,145,146,147,148,149,150,151,151,152,153,154,155,156,156,
+157,158,159,160,160,161,162,163,164,164,165,166,167,168,168,169,170,171,171,172,173,174,174,175,176,176,177,178,179,179,180,181,
+182,182,183,184,184,185,186,186,187,188,188,189,190,190,191,192,192,193,194,194,195,196,196,197,198,198,199,200,200,201,202,202,
+203,204,204,205,205,206,207,207,208,208,209,210,210,211,212,212,213,213,214,215,215,216,216,217,218,218,219,219,220,220,221,222,
+222,223,223,224,224,225,226,226,227,227,228,228,229,230,230,231,231,232,232,233,233,234,235,235,236,236,237,237,238,238,239,239,
+240,240,241,242,242,243,243,244,244,245,245,246,246,247,247,248,248,249,249,250,250,251,251,252,252,253,253,254,254,255,255,255
+};
+
+#define times4(x) x, x, x, x
+#define times256(x) times4(times4(times4(times4(times4(x)))))
+
+const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP] = {
+times256(0x00),
+0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,
+0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F,
+0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,0x29,0x2A,0x2B,0x2C,0x2D,0x2E,0x2F,
+0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,
+0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4A,0x4B,0x4C,0x4D,0x4E,0x4F,
+0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5A,0x5B,0x5C,0x5D,0x5E,0x5F,
+0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F,
+0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x7B,0x7C,0x7D,0x7E,0x7F,
+0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F,
+0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F,
+0xA0,0xA1,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF,
+0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF,
+0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF,
+0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF,
+0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF,
+0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF,
+times256(0xFF)
+};
+
+const uint8_t ff_zigzag_direct[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+const uint8_t ff_zigzag_scan[16+1] = {
+ 0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4,
+ 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4,
+ 1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4,
+ 3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
+};
+
+const uint8_t ff_log2_run[41] = {
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+16, 17, 18, 19, 20, 21, 22, 23,
+24,
+};
diff --git a/media/ffvpx/libavcodec/me_cmp.h b/media/ffvpx/libavcodec/me_cmp.h
new file mode 100644
index 0000000000..90ea76c891
--- /dev/null
+++ b/media/ffvpx/libavcodec/me_cmp.h
@@ -0,0 +1,96 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ME_CMP_H
+#define AVCODEC_ME_CMP_H
+
+#include <stdint.h>
+
+#include "libavutil/attributes_internal.h"
+
+#include "avcodec.h"
+
+extern const uint32_t attribute_visibility_hidden ff_square_tab[512];
+
+
+/* minimum alignment rules ;)
+ * If you notice errors in the align stuff, need more alignment for some ASM code
+ * for some CPU or need to use a function with less aligned data then send a mail
+ * to the ffmpeg-devel mailing list, ...
+ *
+ * !warning These alignments might not match reality, (missing attribute((align))
+ * stuff somewhere possible).
+ * I (Michael) did not check them, these are just the alignments which I think
+ * could be reached easily ...
+ *
+ * !future video codecs might need functions with less strict alignment
+ */
+
+struct MpegEncContext;
+/* Motion estimation:
+ * h is limited to { width / 2, width, 2 * width },
+ * but never larger than 16 and never smaller than 2.
+ * Although currently h < 4 is not used as functions with
+ * width < 8 are neither used nor implemented. */
+typedef int (*me_cmp_func)(struct MpegEncContext *c,
+ const uint8_t *blk1 /* align width (8 or 16) */,
+ const uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
+ int h);
+
+typedef struct MECmpContext {
+ int (*sum_abs_dctelem)(const int16_t *block /* align 16 */);
+
+ me_cmp_func sad[6]; /* identical to pix_absAxA except additional void * */
+ me_cmp_func sse[6];
+ me_cmp_func hadamard8_diff[6];
+ me_cmp_func dct_sad[6];
+ me_cmp_func quant_psnr[6];
+ me_cmp_func bit[6];
+ me_cmp_func rd[6];
+ me_cmp_func vsad[6];
+ me_cmp_func vsse[6];
+ me_cmp_func nsse[6];
+ me_cmp_func w53[6];
+ me_cmp_func w97[6];
+ me_cmp_func dct_max[6];
+ me_cmp_func dct264_sad[6];
+
+ me_cmp_func me_pre_cmp[6];
+ me_cmp_func me_cmp[6];
+ me_cmp_func me_sub_cmp[6];
+ me_cmp_func mb_cmp[6];
+ me_cmp_func ildct_cmp[6]; // only width 16 used
+ me_cmp_func frame_skip_cmp[6]; // only width 8 used
+
+ me_cmp_func pix_abs[2][4];
+ me_cmp_func median_sad[6];
+} MECmpContext;
+
+void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx);
+void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx);
+void ff_me_cmp_init_alpha(MECmpContext *c, AVCodecContext *avctx);
+void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx);
+void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx);
+void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx);
+void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx);
+
+void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type);
+
+void ff_dsputil_init_dwt(MECmpContext *c);
+
+#endif /* AVCODEC_ME_CMP_H */
diff --git a/media/ffvpx/libavcodec/motion_est.h b/media/ffvpx/libavcodec/motion_est.h
new file mode 100644
index 0000000000..f6a563b08c
--- /dev/null
+++ b/media/ffvpx/libavcodec/motion_est.h
@@ -0,0 +1,132 @@
+/*
+ * Motion estimation
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MOTION_EST_H
+#define AVCODEC_MOTION_EST_H
+
+#include <stdint.h>
+
+#include "avcodec.h"
+#include "hpeldsp.h"
+#include "qpeldsp.h"
+
+struct MpegEncContext;
+
+#if ARCH_IA64 // Limit static arrays to avoid gcc failing "short data segment overflowed"
+#define MAX_MV 1024
+#else
+#define MAX_MV 4096
+#endif
+#define MAX_DMV (2*MAX_MV)
+#define ME_MAP_SIZE 64
+
+#define FF_ME_ZERO 0
+#define FF_ME_EPZS 1
+#define FF_ME_XONE 2
+
+/**
+ * Motion estimation context.
+ */
+typedef struct MotionEstContext {
+ AVCodecContext *avctx;
+ int skip; ///< set if ME is skipped for the current MB
+ int co_located_mv[4][2]; ///< mv from last P-frame for direct mode ME
+ int direct_basis_mv[4][2];
+ uint8_t *scratchpad; /**< data area for the ME algo, so that
+ * the ME does not need to malloc/free. */
+ uint8_t *temp;
+ uint32_t *map; ///< map to avoid duplicate evaluations
+ uint32_t *score_map; ///< map to store the scores
+ unsigned map_generation;
+ int pre_penalty_factor;
+ int penalty_factor; /**< an estimate of the bits required to
+ * code a given mv value, e.g. (1,0) takes
+ * more bits than (0,0). We have to
+ * estimate whether any reduction in
+ * residual is worth the extra bits. */
+ int sub_penalty_factor;
+ int mb_penalty_factor;
+ int flags;
+ int sub_flags;
+ int mb_flags;
+ int pre_pass; ///< = 1 for the pre pass
+ int dia_size;
+ int xmin;
+ int xmax;
+ int ymin;
+ int ymax;
+ int pred_x;
+ int pred_y;
+ const uint8_t *src[4][4];
+ const uint8_t *ref[4][4];
+ int stride;
+ int uvstride;
+ /* temp variables for picture complexity calculation */
+ int64_t mc_mb_var_sum_temp;
+ int64_t mb_var_sum_temp;
+ int scene_change_score;
+
+ op_pixels_func(*hpel_put)[4];
+ op_pixels_func(*hpel_avg)[4];
+ qpel_mc_func(*qpel_put)[16];
+ qpel_mc_func(*qpel_avg)[16];
+ const uint8_t (*mv_penalty)[MAX_DMV * 2 + 1]; ///< bit amount needed to encode a MV
+ const uint8_t *current_mv_penalty;
+ int (*sub_motion_search)(struct MpegEncContext *s,
+ int *mx_ptr, int *my_ptr, int dmin,
+ int src_index, int ref_index,
+ int size, int h);
+} MotionEstContext;
+
+static inline int ff_h263_round_chroma(int x)
+{
+ //FIXME static or not?
+ static const uint8_t h263_chroma_roundtab[16] = {
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1,
+ };
+ return h263_chroma_roundtab[x & 0xf] + (x >> 3);
+}
+
+int ff_init_me(struct MpegEncContext *s);
+
+void ff_estimate_p_frame_motion(struct MpegEncContext *s, int mb_x, int mb_y);
+void ff_estimate_b_frame_motion(struct MpegEncContext *s, int mb_x, int mb_y);
+
+int ff_pre_estimate_p_frame_motion(struct MpegEncContext *s,
+ int mb_x, int mb_y);
+
+int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr,
+ int P[10][2], int src_index, int ref_index,
+ const int16_t (*last_mv)[2], int ref_mv_scale,
+ int size, int h);
+
+int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index,
+ int ref_index, int size, int h, int add_rate);
+
+int ff_get_best_fcode(struct MpegEncContext *s,
+ const int16_t (*mv_table)[2], int type);
+
+void ff_fix_long_p_mvs(struct MpegEncContext *s, int type);
+void ff_fix_long_mvs(struct MpegEncContext *s, uint8_t *field_select_table,
+ int field_select, int16_t (*mv_table)[2], int f_code,
+ int type, int truncate);
+
+#endif /* AVCODEC_MOTION_EST_H */
diff --git a/media/ffvpx/libavcodec/moz.build b/media/ffvpx/libavcodec/moz.build
new file mode 100644
index 0000000000..61d9962a71
--- /dev/null
+++ b/media/ffvpx/libavcodec/moz.build
@@ -0,0 +1,143 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Due to duplicate file names, we compile libavutil/x86 in its own
+# moz.build file.
+if CONFIG['FFVPX_ASFLAGS']:
+ if CONFIG['CPU_ARCH'] == 'x86' or CONFIG['CPU_ARCH'] == 'x86_64':
+ DIRS += ['x86']
+ elif CONFIG['CPU_ARCH'] == 'arm':
+ DIRS += ['arm']
+
+if CONFIG['CPU_ARCH'] == 'aarch64':
+ DIRS += ['aarch64']
+
+SharedLibrary('mozavcodec')
+SOURCES += [
+ 'allcodecs.c',
+ 'avcodec.c',
+ 'avdct.c',
+ 'avpacket.c',
+ 'bitstream.c',
+ 'bitstream_filters.c',
+ 'bsf.c',
+ 'codec_desc.c',
+ 'codec_par.c',
+ 'dct.c',
+ 'dct32_fixed.c',
+ 'dct32_float.c',
+ 'decode.c',
+ 'encode.c',
+ 'faandct.c',
+ 'faanidct.c',
+ 'fdctdsp.c',
+ 'fft_fixed_32.c',
+ 'fft_float.c',
+ 'fft_init_table.c',
+ 'flac.c',
+ 'flacdata.c',
+ 'flacdec.c',
+ 'flacdsp.c',
+ 'get_buffer.c',
+ 'idctdsp.c',
+ 'jfdctfst.c',
+ 'jfdctint.c',
+ 'jrevdct.c',
+ 'log2_tab.c',
+ 'mpegaudio.c',
+ 'mpegaudiodata.c',
+ 'mpegaudiodec_common.c',
+ 'mpegaudiodec_fixed.c',
+ 'mpegaudiodecheader.c',
+ 'mpegaudiodsp.c',
+ 'mpegaudiodsp_data.c',
+ 'mpegaudiodsp_fixed.c',
+ 'mpegaudiodsp_float.c',
+ 'mpegaudiotabs.c',
+ 'null_bsf.c',
+ 'options.c',
+ 'parser.c',
+ 'parsers.c',
+ 'profiles.c',
+ 'pthread.c',
+ 'pthread_frame.c',
+ 'pthread_slice.c',
+ 'rdft.c',
+ 'reverse.c',
+ 'simple_idct.c',
+ 'utils.c',
+ 'version.c',
+ 'vlc.c',
+ 'vorbis_parser.c',
+ 'xiph.c'
+]
+
+if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
+ SOURCES += [
+ 'atsc_a53.c',
+ 'av1_frame_split_bsf.c',
+ 'av1dec.c',
+ 'avpicture.c',
+ 'cbs.c',
+ 'cbs_av1.c',
+ 'golomb.c',
+ 'h264pred.c',
+ 'imgconvert.c',
+ 'libdav1d.c',
+ 'mathtables.c',
+ 'qsv_api.c',
+ 'raw.c',
+ 'videodsp.c',
+ 'vp8.c',
+ 'vp8_parser.c',
+ 'vp8dsp.c',
+ 'vp9.c',
+ 'vp9_parser.c',
+ 'vp9_superframe_split_bsf.c',
+ 'vp9block.c',
+ 'vp9data.c',
+ 'vp9dsp.c',
+ 'vp9dsp_10bpp.c',
+ 'vp9dsp_12bpp.c',
+ 'vp9dsp_8bpp.c',
+ 'vp9lpf.c',
+ 'vp9mvs.c',
+ 'vp9prob.c',
+ 'vp9recon.c',
+ 'vpx_rac.c',
+ ]
+ USE_LIBS += [
+ 'dav1d',
+ 'media_libdav1d_asm',
+ ]
+ if CONFIG['MOZ_WAYLAND']:
+ LOCAL_INCLUDES += ['/media/mozva']
+ SOURCES += [
+ 'vaapi_av1.c',
+ 'vaapi_decode.c',
+ 'vaapi_vp8.c',
+ 'vaapi_vp9.c',
+ ]
+ USE_LIBS += [
+ 'mozva'
+ ]
+
+if CONFIG['MOZ_LIBAV_FFT']:
+ SOURCES += [
+ 'avfft.c',
+ ]
+
+SYMBOLS_FILE = 'avcodec.symbols'
+NoVisibilityFlags()
+
+USE_LIBS += [
+ 'mozavutil'
+]
+
+if CONFIG['OS_TARGET'] != 'WINNT':
+ OS_LIBS += ['m']
+
+include("../ffvpxcommon.mozbuild")
diff --git a/media/ffvpx/libavcodec/mpeg12data.h b/media/ffvpx/libavcodec/mpeg12data.h
new file mode 100644
index 0000000000..bc39655fbf
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpeg12data.h
@@ -0,0 +1,53 @@
+/*
+ * MPEG-1/2 tables
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * MPEG-1/2 tables.
+ */
+
+#ifndef AVCODEC_MPEG12DATA_H
+#define AVCODEC_MPEG12DATA_H
+
+#include <stdint.h>
+#include "libavutil/rational.h"
+
+extern const uint16_t ff_mpeg1_default_intra_matrix[];
+extern const uint16_t ff_mpeg1_default_non_intra_matrix[64];
+
+extern const uint16_t ff_mpeg12_vlc_dc_lum_code[12];
+extern const unsigned char ff_mpeg12_vlc_dc_lum_bits[12];
+extern const uint16_t ff_mpeg12_vlc_dc_chroma_code[12];
+extern const unsigned char ff_mpeg12_vlc_dc_chroma_bits[12];
+
+extern const uint8_t ff_mpeg12_mbAddrIncrTable[36][2];
+extern const uint8_t ff_mpeg12_mbPatTable[64][2];
+
+extern const uint8_t ff_mpeg12_mbMotionVectorTable[17][2];
+
+extern const AVRational ff_mpeg12_frame_rate_tab[];
+extern const AVRational ff_mpeg2_frame_rate_tab[];
+
+extern const float ff_mpeg1_aspect[16];
+extern const AVRational ff_mpeg2_aspect[16];
+
+#endif /* AVCODEC_MPEG12DATA_H */
diff --git a/media/ffvpx/libavcodec/mpegaudio.c b/media/ffvpx/libavcodec/mpegaudio.c
new file mode 100644
index 0000000000..cba52992ef
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudio.c
@@ -0,0 +1,50 @@
+/*
+ * MPEG Audio common code
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * MPEG Audio common code.
+ */
+
+#include "mpegaudio.h"
+
+
+/* bitrate is in kb/s */
+int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf)
+{
+ int ch_bitrate, table;
+
+ ch_bitrate = bitrate / nb_channels;
+ if (!lsf) {
+ if ((freq == 48000 && ch_bitrate >= 56) ||
+ (ch_bitrate >= 56 && ch_bitrate <= 80))
+ table = 0;
+ else if (freq != 48000 && ch_bitrate >= 96)
+ table = 1;
+ else if (freq != 32000 && ch_bitrate <= 48)
+ table = 2;
+ else
+ table = 3;
+ } else {
+ table = 4;
+ }
+ return table;
+}
diff --git a/media/ffvpx/libavcodec/mpegaudio.h b/media/ffvpx/libavcodec/mpegaudio.h
new file mode 100644
index 0000000000..74590a8e8b
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudio.h
@@ -0,0 +1,81 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * mpeg audio declarations for both encoder and decoder.
+ */
+
+#ifndef AVCODEC_MPEGAUDIO_H
+#define AVCODEC_MPEGAUDIO_H
+
+#ifndef USE_FLOATS
+# define USE_FLOATS 0
+#endif
+
+#include <stdint.h>
+#include "libavutil/internal.h"
+
+/* max frame size, in samples */
+#define MPA_FRAME_SIZE 1152
+
+/* max compressed frame size */
+#define MPA_MAX_CODED_FRAME_SIZE 1792
+
+#define MPA_MAX_CHANNELS 2
+
+#define SBLIMIT 32 /* number of subbands */
+
+#define MPA_STEREO 0
+#define MPA_JSTEREO 1
+#define MPA_DUAL 2
+#define MPA_MONO 3
+
+#ifndef FRAC_BITS
+#define FRAC_BITS 23 /* fractional bits for sb_samples and dct */
+#define WFRAC_BITS 16 /* fractional bits for window */
+#endif
+
+#define IMDCT_SCALAR 1.759
+
+#define FRAC_ONE (1 << FRAC_BITS)
+
+#define FIX(a) ((int)((a) * FRAC_ONE))
+
+#if USE_FLOATS
+# define INTFLOAT float
+# define SUINTFLOAT float
+typedef float MPA_INT;
+typedef float OUT_INT;
+#elif FRAC_BITS <= 15
+# define INTFLOAT int
+# define SUINTFLOAT SUINT
+typedef int16_t MPA_INT;
+typedef int16_t OUT_INT;
+#else
+# define INTFLOAT int
+# define SUINTFLOAT SUINT
+typedef int32_t MPA_INT;
+typedef int16_t OUT_INT;
+#endif
+
+int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf);
+
+#endif /* AVCODEC_MPEGAUDIO_H */
diff --git a/media/ffvpx/libavcodec/mpegaudio_tablegen.h b/media/ffvpx/libavcodec/mpegaudio_tablegen.h
new file mode 100644
index 0000000000..bae6962ac0
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudio_tablegen.h
@@ -0,0 +1,89 @@
+/*
+ * Header file for hardcoded mpegaudiodec tables
+ *
+ * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGAUDIO_TABLEGEN_H
+#define AVCODEC_MPEGAUDIO_TABLEGEN_H
+
+#include <stdint.h>
+#include <math.h>
+#include "libavutil/attributes.h"
+
+#if CONFIG_HARDCODED_TABLES
+#define mpegaudio_tableinit()
+#include "libavcodec/mpegaudio_tables.h"
+#else
+#if defined(BUILD_TABLES) || !USE_FLOATS
+#define FIXED_TABLE
+static uint32_t exp_table_fixed[512];
+static uint32_t expval_table_fixed[512][16];
+#endif
+
+#if defined(BUILD_TABLES) || USE_FLOATS
+#define FLOAT_TABLE
+static float exp_table_float[512];
+static float expval_table_float[512][16];
+#endif
+
+#define IMDCT_SCALAR 1.759
+
+static av_cold void mpegaudio_tableinit(void)
+{
+ int i, value, exponent;
+ static const double exp2_lut[4] = {
+ 1.00000000000000000000, /* 2 ^ (0 * 0.25) */
+ 1.18920711500272106672, /* 2 ^ (1 * 0.25) */
+ M_SQRT2 , /* 2 ^ (2 * 0.25) */
+ 1.68179283050742908606, /* 2 ^ (3 * 0.25) */
+ };
+ double pow43_lut[16];
+ double exp2_base = 2.11758236813575084767080625169910490512847900390625e-22; // 2^(-72)
+ double exp2_val;
+
+ for (i = 0; i < 16; ++i)
+ pow43_lut[i] = i * cbrt(i);
+
+ for (exponent = 0; exponent < 512; exponent++) {
+ if (exponent && (exponent & 3) == 0)
+ exp2_base *= 2;
+ exp2_val = exp2_base * exp2_lut[exponent & 3] / IMDCT_SCALAR;
+ for (value = 0; value < 16; value++) {
+ double f = pow43_lut[value] * exp2_val;
+#ifdef FIXED_TABLE
+ expval_table_fixed[exponent][value] = (f < 0xFFFFFFFF ? llrint(f) : 0xFFFFFFFF);
+#endif
+#ifdef FLOAT_TABLE
+ expval_table_float[exponent][value] = f;
+#endif
+ }
+#ifdef FIXED_TABLE
+ exp_table_fixed[exponent] = expval_table_fixed[exponent][1];
+#endif
+#ifdef FLOAT_TABLE
+ exp_table_float[exponent] = expval_table_float[exponent][1];
+#endif
+ }
+}
+#undef FLOAT_TABLE
+#undef FIXED_TABLE
+#endif /* CONFIG_HARDCODED_TABLES */
+
+#endif /* AVCODEC_MPEGAUDIO_TABLEGEN_H */
diff --git a/media/ffvpx/libavcodec/mpegaudiodata.c b/media/ffvpx/libavcodec/mpegaudiodata.c
new file mode 100644
index 0000000000..669590908f
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodata.c
@@ -0,0 +1,133 @@
+/*
+ * MPEG Audio common tables
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * mpeg audio layer common tables.
+ */
+
+#include "mpegaudiodata.h"
+
+/*******************************************************/
+/* layer 2 tables */
+
+const int ff_mpa_sblimit_table[5] = { 27 , 30 , 8, 12 , 30 };
+
+const int ff_mpa_quant_steps[17] = {
+ 3, 5, 7, 9, 15,
+ 31, 63, 127, 255, 511,
+ 1023, 2047, 4095, 8191, 16383,
+ 32767, 65535
+};
+
+/* we use a negative value if grouped */
+const int ff_mpa_quant_bits[17] = {
+ -5, -7, 3, -10, 4,
+ 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14,
+ 15, 16
+};
+
+/* encoding tables which give the quantization index. Note how it is
+ possible to store them efficiently ! */
+static const unsigned char alloc_table_1[] = {
+ 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 3, 0, 1, 2, 3, 4, 5, 16,
+ 2, 0, 1, 16,
+ 2, 0, 1, 16,
+ 2, 0, 1, 16,
+ 2, 0, 1, 16,
+ 2, 0, 1, 16,
+ 2, 0, 1, 16,
+ 2, 0, 1, 16,
+};
+
+static const unsigned char alloc_table_3[] = {
+ 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+};
+
+static const unsigned char alloc_table_4[] = {
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 3, 0, 1, 3, 4, 5, 6, 7,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+ 2, 0, 1, 3,
+};
+
+const unsigned char * const ff_mpa_alloc_tables[5] =
+{ alloc_table_1, alloc_table_1, alloc_table_3, alloc_table_3, alloc_table_4, };
diff --git a/media/ffvpx/libavcodec/mpegaudiodata.h b/media/ffvpx/libavcodec/mpegaudiodata.h
new file mode 100644
index 0000000000..a4148a1ffe
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodata.h
@@ -0,0 +1,82 @@
+/*
+ * MPEG Audio common tables
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * mpeg audio layer common tables.
+ */
+
+#ifndef AVCODEC_MPEGAUDIODATA_H
+#define AVCODEC_MPEGAUDIODATA_H
+
+#include <stdint.h>
+
+#include "config.h"
+
+#include "vlc.h"
+
+#define MODE_EXT_MS_STEREO 2
+#define MODE_EXT_I_STEREO 1
+
+extern const uint16_t ff_mpa_bitrate_tab[2][3][15];
+extern const uint16_t ff_mpa_freq_tab[3];
+extern const int ff_mpa_sblimit_table[5];
+extern const int ff_mpa_quant_steps[17];
+extern const int ff_mpa_quant_bits[17];
+extern const unsigned char * const ff_mpa_alloc_tables[5];
+
+#define TABLE_4_3_SIZE ((8191 + 16)*4)
+#if CONFIG_HARDCODED_TABLES
+extern const int8_t ff_table_4_3_exp [TABLE_4_3_SIZE];
+extern const uint32_t ff_table_4_3_value[TABLE_4_3_SIZE];
+#else
+extern int8_t ff_table_4_3_exp [TABLE_4_3_SIZE];
+extern uint32_t ff_table_4_3_value[TABLE_4_3_SIZE];
+#endif
+
+/* VLCs for decoding layer 3 huffman tables */
+extern VLC ff_huff_vlc[16];
+extern VLC ff_huff_quad_vlc[2];
+
+/* layer3 scale factor size */
+extern const uint8_t ff_slen_table[2][16];
+/* number of lsf scale factors for a given size */
+extern const uint8_t ff_lsf_nsf_table[6][3][4];
+extern const uint8_t ff_mpa_huff_data[32][2];
+
+/* band size tables */
+extern const uint8_t ff_band_size_long[9][22];
+extern const uint8_t ff_band_size_short[9][13];
+/* computed from ff_band_size_long */
+extern uint16_t ff_band_index_long[9][23];
+
+extern int16_t *const ff_division_tabs[4];
+
+/* lower 2 bits: modulo 3, higher bits: shift */
+extern uint16_t ff_scale_factor_modshift[64];
+
+extern const uint8_t ff_mpa_pretab[2][22];
+
+/* Initialize tables shared between the fixed and
+ * floating point MPEG audio decoders. */
+void ff_mpegaudiodec_common_init_static(void);
+
+#endif /* AVCODEC_MPEGAUDIODATA_H */
diff --git a/media/ffvpx/libavcodec/mpegaudiodec_common.c b/media/ffvpx/libavcodec/mpegaudiodec_common.c
new file mode 100644
index 0000000000..5fcb39b325
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodec_common.c
@@ -0,0 +1,483 @@
+/*
+ * MPEG Audio decoder
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * mpeg audio layer decoder tables.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/libm.h"
+#include "libavutil/thread.h"
+
+#include "mpegaudiodata.h"
+
+#include "mpegaudiodec_common_tablegen.h"
+
+uint16_t ff_scale_factor_modshift[64];
+
+static int16_t division_tab3[1 << 6 ];
+static int16_t division_tab5[1 << 8 ];
+static int16_t division_tab9[1 << 11];
+
+int16_t *const ff_division_tabs[4] = {
+ division_tab3, division_tab5, NULL, division_tab9
+};
+
+
+/*******************************************************/
+/* layer 3 tables */
+
+const uint8_t ff_slen_table[2][16] = {
+ { 0, 0, 0, 0, 3, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4 },
+ { 0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 2, 3 },
+};
+
+const uint8_t ff_lsf_nsf_table[6][3][4] = {
+ { { 6, 5, 5, 5 }, { 9, 9, 9, 9 }, { 6, 9, 9, 9 } },
+ { { 6, 5, 7, 3 }, { 9, 9, 12, 6 }, { 6, 9, 12, 6 } },
+ { { 11, 10, 0, 0 }, { 18, 18, 0, 0 }, { 15, 18, 0, 0 } },
+ { { 7, 7, 7, 0 }, { 12, 12, 12, 0 }, { 6, 15, 12, 0 } },
+ { { 6, 6, 6, 3 }, { 12, 9, 9, 6 }, { 6, 12, 9, 6 } },
+ { { 8, 8, 5, 0 }, { 15, 12, 9, 0 }, { 6, 18, 9, 0 } },
+};
+
+/* mpegaudio layer 3 huffman tables */
+VLC ff_huff_vlc[16];
+static VLCElem huff_vlc_tables[128 + 128 + 128 + 130 + 128 + 154 + 166 + 142 +
+ 204 + 190 + 170 + 542 + 460 + 662 + 414];
+VLC ff_huff_quad_vlc[2];
+static VLCElem huff_quad_vlc_tables[64 + 16];
+
+static const uint8_t mpa_hufflens[] = {
+ /* Huffman table 1 - 4 entries */
+ 3, 3, 2, 1,
+ /* Huffman table 2 - 9 entries */
+ 6, 6, 5, 5, 5, 3, 3, 3, 1,
+ /* Huffman table 3 - 9 entries */
+ 6, 6, 5, 5, 5, 3, 2, 2, 2,
+ /* Huffman table 5 - 16 entries */
+ 8, 8, 7, 6, 7, 7, 7, 7, 6, 6, 6, 6, 3, 3, 3, 1,
+ /* Huffman table 6 - 16 entries */
+ 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 2, 3, 3,
+ /* Huffman table 7 - 36 entries */
+ 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 9, 9, 8, 9, 9, 8, 8, 7, 7,
+ 7, 8, 8, 8, 8, 7, 7, 7, 7, 6, 5, 6, 6, 4, 3, 3, 1,
+ /* Huffman table 8 - 36 entries */
+ 11, 11, 10, 9, 10, 10, 9, 9, 9, 8, 8, 9, 9, 9, 9, 8, 8, 8, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 6, 6, 6, 4, 4, 2, 3, 3, 2,
+ /* Huffman table 9 - 36 entries */
+ 9, 9, 8, 8, 9, 9, 8, 8, 8, 8, 7, 7, 7, 8, 8, 7, 7, 7, 7,
+ 6, 6, 6, 6, 5, 5, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3,
+ /* Huffman table 10 - 64 entries */
+ 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 11, 11, 10, 9, 9, 10,
+ 10, 9, 9, 10, 10, 9, 10, 10, 8, 8, 9, 9, 10, 10, 9, 9, 10, 10, 8,
+ 8, 8, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 6,
+ 6, 6, 6, 4, 3, 3, 1,
+ /* Huffman table 11 - 64 entries */
+ 10, 10, 10, 10, 10, 10, 10, 11, 11, 10, 10, 9, 9, 9, 10, 10, 10, 10, 8,
+ 8, 9, 9, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 8, 7, 8, 8, 7, 7,
+ 8, 8, 8, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 7, 7, 6, 5,
+ 4, 5, 5, 3, 3, 3, 2,
+ /* Huffman table 12 - 64 entries */
+ 10, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 9, 9, 8, 8, 8, 8, 8, 8,
+ 9, 9, 8, 8, 8, 8, 8, 9, 9, 7, 7, 7, 8, 8, 8, 8, 8, 8, 7,
+ 7, 7, 7, 8, 8, 7, 7, 7, 6, 6, 6, 6, 7, 7, 6, 5, 5, 5, 4,
+ 4, 5, 5, 4, 3, 3, 3,
+ /* Huffman table 13 - 256 entries */
+ 19, 19, 18, 17, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 15, 15, 16,
+ 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 15, 16, 16, 14, 14, 15,
+ 15, 15, 15, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 14, 13, 14,
+ 14, 13, 13, 14, 14, 13, 14, 14, 13, 14, 14, 13, 14, 14, 13, 13, 14, 14, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 12, 13, 13, 12, 12, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 12, 12, 13, 13, 12, 12, 12, 12, 13, 13, 13, 13, 12,
+ 13, 13, 12, 11, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 12, 12, 11,
+ 11, 12, 12, 11, 12, 12, 12, 12, 11, 11, 12, 12, 11, 12, 12, 11, 12, 12, 11,
+ 12, 12, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 11, 11,
+ 10, 11, 11, 10, 11, 11, 11, 11, 10, 10, 11, 11, 10, 10, 11, 11, 11, 11, 11,
+ 11, 9, 9, 10, 10, 10, 10, 10, 11, 11, 9, 9, 9, 10, 10, 9, 9, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 8, 9, 9, 9, 9, 9, 9, 10, 10, 9, 9,
+ 9, 8, 8, 9, 9, 9, 9, 9, 9, 8, 7, 8, 8, 8, 8, 7, 7, 7, 7,
+ 7, 6, 6, 6, 6, 4, 4, 3, 1,
+ /* Huffman table 15 - 256 entries */
+ 13, 13, 13, 13, 12, 13, 13, 13, 13, 13, 13, 12, 13, 13, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13,
+ 13, 11, 11, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 12, 12, 11, 11, 11, 11,
+ 11, 11, 11, 11, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 11, 11, 11, 11, 11,
+ 11, 10, 11, 11, 11, 11, 11, 11, 10, 10, 11, 11, 10, 10, 10, 10, 11, 11, 10,
+ 10, 10, 10, 10, 10, 10, 11, 11, 10, 10, 10, 10, 10, 11, 11, 9, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 10, 10, 10, 10, 9, 10, 10, 9, 10,
+ 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 10, 10, 9, 9, 9,
+ 9, 9, 9, 10, 10, 9, 9, 9, 9, 9, 9, 8, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8,
+ 8, 8, 9, 9, 8, 8, 8, 8, 8, 8, 8, 9, 9, 8, 7, 8, 8, 7, 7,
+ 7, 7, 8, 8, 7, 7, 7, 7, 7, 6, 7, 7, 6, 6, 7, 7, 6, 6, 6,
+ 5, 5, 5, 5, 5, 3, 4, 4, 3,
+ /* Huffman table 16 - 256 entries */
+ 11, 11, 11, 11, 11, 11, 11, 11, 10, 11, 11, 11, 11, 10, 10, 10, 10, 10, 8,
+ 10, 10, 9, 9, 9, 9, 10, 16, 17, 17, 15, 15, 16, 16, 14, 15, 15, 14, 14,
+ 15, 15, 14, 14, 15, 15, 15, 15, 14, 15, 15, 14, 13, 8, 9, 9, 8, 8, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 13, 13, 14, 14, 14, 14, 13, 14, 14,
+ 13, 13, 13, 14, 14, 14, 14, 13, 13, 14, 14, 13, 14, 14, 12, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 13, 13, 13, 13, 13, 13, 12, 13,
+ 13, 12, 12, 13, 13, 11, 12, 12, 12, 12, 12, 12, 12, 13, 13, 11, 12, 12, 12,
+ 12, 11, 12, 12, 12, 12, 12, 12, 12, 12, 11, 12, 12, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 12, 12, 11, 12, 12, 11, 12, 12, 11, 12, 12, 11, 12, 12, 11,
+ 10, 10, 11, 11, 11, 11, 11, 11, 10, 10, 11, 11, 10, 10, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 11, 11, 10, 10, 10, 11, 11, 10, 10, 11, 11, 10, 10, 11, 11,
+ 10, 9, 9, 10, 10, 10, 10, 10, 10, 9, 9, 9, 10, 10, 9, 10, 10, 9, 9,
+ 8, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 9, 9, 8, 8, 7, 7, 8, 8,
+ 7, 6, 6, 6, 6, 4, 4, 3, 1,
+ /* Huffman table 24 - 256 entries */
+ 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 7, 7, 8, 8, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 9, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 4, 11, 11, 11, 11, 12, 12, 11, 10, 11, 11, 10, 10, 10, 10, 11, 11, 10,
+ 10, 10, 10, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 10, 11, 11, 10, 9, 10, 10, 10, 10, 11, 11, 10, 9, 9, 10,
+ 10, 9, 10, 10, 10, 10, 9, 9, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 9, 9, 9, 10, 10, 8, 9, 9, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 8, 8, 8, 8, 8,
+ 8, 9, 9, 7, 8, 8, 7, 7, 7, 7, 7, 8, 8, 7, 7, 6, 6, 7, 7,
+ 6, 5, 5, 6, 6, 4, 4, 4, 4,
+};
+
+static const uint8_t mpa_huffsymbols[] = {
+ /* Huffman table 1 - 4 entries */
+ 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 2 - 9 entries */
+ 0x22, 0x02, 0x12, 0x21, 0x20, 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 3 - 9 entries */
+ 0x22, 0x02, 0x12, 0x21, 0x20, 0x10, 0x11, 0x01, 0x00,
+ /* Huffman table 5 - 16 entries */
+ 0x33, 0x23, 0x32, 0x31, 0x13, 0x03, 0x30, 0x22, 0x12, 0x21, 0x02, 0x20,
+ 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 6 - 16 entries */
+ 0x33, 0x03, 0x23, 0x32, 0x30, 0x13, 0x31, 0x22, 0x02, 0x12, 0x21, 0x20,
+ 0x01, 0x11, 0x10, 0x00,
+ /* Huffman table 7 - 36 entries */
+ 0x55, 0x45, 0x54, 0x53, 0x35, 0x44, 0x25, 0x52, 0x15, 0x51, 0x05, 0x34,
+ 0x50, 0x43, 0x33, 0x24, 0x42, 0x14, 0x41, 0x40, 0x04, 0x23, 0x32, 0x03,
+ 0x13, 0x31, 0x30, 0x22, 0x12, 0x21, 0x02, 0x20, 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 8 - 36 entries */
+ 0x55, 0x54, 0x45, 0x53, 0x35, 0x44, 0x25, 0x52, 0x05, 0x15, 0x51, 0x34,
+ 0x43, 0x50, 0x33, 0x24, 0x42, 0x14, 0x41, 0x04, 0x40, 0x23, 0x32, 0x13,
+ 0x31, 0x03, 0x30, 0x22, 0x02, 0x20, 0x12, 0x21, 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 9 - 36 entries */
+ 0x55, 0x45, 0x35, 0x53, 0x54, 0x05, 0x44, 0x25, 0x52, 0x15, 0x51, 0x34,
+ 0x43, 0x50, 0x04, 0x24, 0x42, 0x33, 0x40, 0x14, 0x41, 0x23, 0x32, 0x13,
+ 0x31, 0x03, 0x30, 0x22, 0x02, 0x12, 0x21, 0x20, 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 10 - 64 entries */
+ 0x77, 0x67, 0x76, 0x57, 0x75, 0x66, 0x47, 0x74, 0x56, 0x65, 0x37, 0x73,
+ 0x46, 0x55, 0x54, 0x63, 0x27, 0x72, 0x64, 0x07, 0x70, 0x62, 0x45, 0x35,
+ 0x06, 0x53, 0x44, 0x17, 0x71, 0x36, 0x26, 0x25, 0x52, 0x15, 0x51, 0x34,
+ 0x43, 0x16, 0x61, 0x60, 0x05, 0x50, 0x24, 0x42, 0x33, 0x04, 0x14, 0x41,
+ 0x40, 0x23, 0x32, 0x03, 0x13, 0x31, 0x30, 0x22, 0x12, 0x21, 0x02, 0x20,
+ 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 11 - 64 entries */
+ 0x77, 0x67, 0x76, 0x75, 0x66, 0x47, 0x74, 0x57, 0x55, 0x56, 0x65, 0x37,
+ 0x73, 0x46, 0x45, 0x54, 0x35, 0x53, 0x27, 0x72, 0x64, 0x07, 0x71, 0x17,
+ 0x70, 0x36, 0x63, 0x60, 0x44, 0x25, 0x52, 0x05, 0x15, 0x62, 0x26, 0x06,
+ 0x16, 0x61, 0x51, 0x34, 0x50, 0x43, 0x33, 0x24, 0x42, 0x14, 0x41, 0x04,
+ 0x40, 0x23, 0x32, 0x13, 0x31, 0x03, 0x30, 0x22, 0x21, 0x12, 0x02, 0x20,
+ 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 12 - 64 entries */
+ 0x77, 0x67, 0x76, 0x57, 0x75, 0x66, 0x47, 0x74, 0x65, 0x56, 0x37, 0x73,
+ 0x55, 0x27, 0x72, 0x46, 0x64, 0x17, 0x71, 0x07, 0x70, 0x36, 0x63, 0x45,
+ 0x54, 0x44, 0x06, 0x05, 0x26, 0x62, 0x61, 0x16, 0x60, 0x35, 0x53, 0x25,
+ 0x52, 0x15, 0x51, 0x34, 0x43, 0x50, 0x04, 0x24, 0x42, 0x14, 0x33, 0x41,
+ 0x23, 0x32, 0x40, 0x03, 0x30, 0x13, 0x31, 0x22, 0x12, 0x21, 0x02, 0x20,
+ 0x00, 0x11, 0x01, 0x10,
+ /* Huffman table 13 - 256 entries */
+ 0xFE, 0xFC, 0xFD, 0xED, 0xFF, 0xEF, 0xDF, 0xEE, 0xCF, 0xDE, 0xBF, 0xFB,
+ 0xCE, 0xDC, 0xAF, 0xE9, 0xEC, 0xDD, 0xFA, 0xCD, 0xBE, 0xEB, 0x9F, 0xF9,
+ 0xEA, 0xBD, 0xDB, 0x8F, 0xF8, 0xCC, 0xAE, 0x9E, 0x8E, 0x7F, 0x7E, 0xF7,
+ 0xDA, 0xAD, 0xBC, 0xCB, 0xF6, 0x6F, 0xE8, 0x5F, 0x9D, 0xD9, 0xF5, 0xE7,
+ 0xAC, 0xBB, 0x4F, 0xF4, 0xCA, 0xE6, 0xF3, 0x3F, 0x8D, 0xD8, 0x2F, 0xF2,
+ 0x6E, 0x9C, 0x0F, 0xC9, 0x5E, 0xAB, 0x7D, 0xD7, 0x4E, 0xC8, 0xD6, 0x3E,
+ 0xB9, 0x9B, 0xAA, 0x1F, 0xF1, 0xF0, 0xBA, 0xE5, 0xE4, 0x8C, 0x6D, 0xE3,
+ 0xE2, 0x2E, 0x0E, 0x1E, 0xE1, 0xE0, 0x5D, 0xD5, 0x7C, 0xC7, 0x4D, 0x8B,
+ 0xB8, 0xD4, 0x9A, 0xA9, 0x6C, 0xC6, 0x3D, 0xD3, 0x7B, 0x2D, 0xD2, 0x1D,
+ 0xB7, 0x5C, 0xC5, 0x99, 0x7A, 0xC3, 0xA7, 0x97, 0x4B, 0xD1, 0x0D, 0xD0,
+ 0x8A, 0xA8, 0x4C, 0xC4, 0x6B, 0xB6, 0x3C, 0x2C, 0xC2, 0x5B, 0xB5, 0x89,
+ 0x1C, 0xC1, 0x98, 0x0C, 0xC0, 0xB4, 0x6A, 0xA6, 0x79, 0x3B, 0xB3, 0x88,
+ 0x5A, 0x2B, 0xA5, 0x69, 0xA4, 0x78, 0x87, 0x94, 0x77, 0x76, 0xB2, 0x1B,
+ 0xB1, 0x0B, 0xB0, 0x96, 0x4A, 0x3A, 0xA3, 0x59, 0x95, 0x2A, 0xA2, 0x1A,
+ 0xA1, 0x0A, 0x68, 0xA0, 0x86, 0x49, 0x93, 0x39, 0x58, 0x85, 0x67, 0x29,
+ 0x92, 0x57, 0x75, 0x38, 0x83, 0x66, 0x47, 0x74, 0x56, 0x65, 0x73, 0x19,
+ 0x91, 0x09, 0x90, 0x48, 0x84, 0x72, 0x46, 0x64, 0x28, 0x82, 0x18, 0x37,
+ 0x27, 0x17, 0x71, 0x55, 0x07, 0x70, 0x36, 0x63, 0x45, 0x54, 0x26, 0x62,
+ 0x35, 0x81, 0x08, 0x80, 0x16, 0x61, 0x06, 0x60, 0x53, 0x44, 0x25, 0x52,
+ 0x05, 0x15, 0x51, 0x34, 0x43, 0x50, 0x24, 0x42, 0x33, 0x14, 0x41, 0x04,
+ 0x40, 0x23, 0x32, 0x13, 0x31, 0x03, 0x30, 0x22, 0x12, 0x21, 0x02, 0x20,
+ 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 15 - 256 entries */
+ 0xFF, 0xEF, 0xFE, 0xDF, 0xEE, 0xFD, 0xCF, 0xFC, 0xDE, 0xED, 0xBF, 0xFB,
+ 0xCE, 0xEC, 0xDD, 0xAF, 0xFA, 0xBE, 0xEB, 0xCD, 0xDC, 0x9F, 0xF9, 0xEA,
+ 0xBD, 0xDB, 0x8F, 0xF8, 0xCC, 0x9E, 0xE9, 0x7F, 0xF7, 0xAD, 0xDA, 0xBC,
+ 0x6F, 0xAE, 0x0F, 0xCB, 0xF6, 0x8E, 0xE8, 0x5F, 0x9D, 0xF5, 0x7E, 0xE7,
+ 0xAC, 0xCA, 0xBB, 0xD9, 0x8D, 0x4F, 0xF4, 0x3F, 0xF3, 0xD8, 0xE6, 0x2F,
+ 0xF2, 0x6E, 0xF0, 0x1F, 0xF1, 0x9C, 0xC9, 0x5E, 0xAB, 0xBA, 0xE5, 0x7D,
+ 0xD7, 0x4E, 0xE4, 0x8C, 0xC8, 0x3E, 0x6D, 0xD6, 0xE3, 0x9B, 0xB9, 0x2E,
+ 0xAA, 0xE2, 0x1E, 0xE1, 0x0E, 0xE0, 0x5D, 0xD5, 0x7C, 0xC7, 0x4D, 0x8B,
+ 0xD4, 0xB8, 0x9A, 0xA9, 0x6C, 0xC6, 0x3D, 0xD3, 0xD2, 0x2D, 0x0D, 0x1D,
+ 0x7B, 0xB7, 0xD1, 0x5C, 0xD0, 0xC5, 0x8A, 0xA8, 0x4C, 0xC4, 0x6B, 0xB6,
+ 0x99, 0x0C, 0x3C, 0xC3, 0x7A, 0xA7, 0xA6, 0xC0, 0x0B, 0xC2, 0x2C, 0x5B,
+ 0xB5, 0x1C, 0x89, 0x98, 0xC1, 0x4B, 0xB4, 0x6A, 0x3B, 0x79, 0xB3, 0x97,
+ 0x88, 0x2B, 0x5A, 0xB2, 0xA5, 0x1B, 0xB1, 0xB0, 0x69, 0x96, 0x4A, 0xA4,
+ 0x78, 0x87, 0x3A, 0xA3, 0x59, 0x95, 0x2A, 0xA2, 0x1A, 0xA1, 0x0A, 0xA0,
+ 0x68, 0x86, 0x49, 0x94, 0x39, 0x93, 0x77, 0x09, 0x58, 0x85, 0x29, 0x67,
+ 0x76, 0x92, 0x91, 0x19, 0x90, 0x48, 0x84, 0x57, 0x75, 0x38, 0x83, 0x66,
+ 0x47, 0x28, 0x82, 0x18, 0x81, 0x74, 0x08, 0x80, 0x56, 0x65, 0x37, 0x73,
+ 0x46, 0x27, 0x72, 0x64, 0x17, 0x55, 0x71, 0x07, 0x70, 0x36, 0x63, 0x45,
+ 0x54, 0x26, 0x62, 0x16, 0x06, 0x60, 0x35, 0x61, 0x53, 0x44, 0x25, 0x52,
+ 0x15, 0x51, 0x05, 0x50, 0x34, 0x43, 0x24, 0x42, 0x33, 0x41, 0x14, 0x04,
+ 0x23, 0x32, 0x40, 0x03, 0x13, 0x31, 0x30, 0x22, 0x12, 0x21, 0x02, 0x20,
+ 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 16 - 256 entries */
+ 0xEF, 0xFE, 0xDF, 0xFD, 0xCF, 0xFC, 0xBF, 0xFB, 0xAF, 0xFA, 0x9F, 0xF9,
+ 0xF8, 0x8F, 0x7F, 0xF7, 0x6F, 0xF6, 0xFF, 0x5F, 0xF5, 0x4F, 0xF4, 0xF3,
+ 0xF0, 0x3F, 0xCE, 0xEC, 0xDD, 0xDE, 0xE9, 0xEA, 0xD9, 0xEE, 0xED, 0xEB,
+ 0xBE, 0xCD, 0xDC, 0xDB, 0xAE, 0xCC, 0xAD, 0xDA, 0x7E, 0xAC, 0xCA, 0xC9,
+ 0x7D, 0x5E, 0xBD, 0xF2, 0x2F, 0x0F, 0x1F, 0xF1, 0x9E, 0xBC, 0xCB, 0x8E,
+ 0xE8, 0x9D, 0xE7, 0xBB, 0x8D, 0xD8, 0x6E, 0xE6, 0x9C, 0xAB, 0xBA, 0xE5,
+ 0xD7, 0x4E, 0xE4, 0x8C, 0xC8, 0x3E, 0x6D, 0xD6, 0x9B, 0xB9, 0xAA, 0xE1,
+ 0xD4, 0xB8, 0xA9, 0x7B, 0xB7, 0xD0, 0xE3, 0x0E, 0xE0, 0x5D, 0xD5, 0x7C,
+ 0xC7, 0x4D, 0x8B, 0x9A, 0x6C, 0xC6, 0x3D, 0x5C, 0xC5, 0x0D, 0x8A, 0xA8,
+ 0x99, 0x4C, 0xB6, 0x7A, 0x3C, 0x5B, 0x89, 0x1C, 0xC0, 0x98, 0x79, 0xE2,
+ 0x2E, 0x1E, 0xD3, 0x2D, 0xD2, 0xD1, 0x3B, 0x97, 0x88, 0x1D, 0xC4, 0x6B,
+ 0xC3, 0xA7, 0x2C, 0xC2, 0xB5, 0xC1, 0x0C, 0x4B, 0xB4, 0x6A, 0xA6, 0xB3,
+ 0x5A, 0xA5, 0x2B, 0xB2, 0x1B, 0xB1, 0x0B, 0xB0, 0x69, 0x96, 0x4A, 0xA4,
+ 0x78, 0x87, 0xA3, 0x3A, 0x59, 0x2A, 0x95, 0x68, 0xA1, 0x86, 0x77, 0x94,
+ 0x49, 0x57, 0x67, 0xA2, 0x1A, 0x0A, 0xA0, 0x39, 0x93, 0x58, 0x85, 0x29,
+ 0x92, 0x76, 0x09, 0x19, 0x91, 0x90, 0x48, 0x84, 0x75, 0x38, 0x83, 0x66,
+ 0x28, 0x82, 0x47, 0x74, 0x18, 0x81, 0x80, 0x08, 0x56, 0x37, 0x73, 0x65,
+ 0x46, 0x27, 0x72, 0x64, 0x55, 0x07, 0x17, 0x71, 0x70, 0x36, 0x63, 0x45,
+ 0x54, 0x26, 0x62, 0x16, 0x61, 0x06, 0x60, 0x53, 0x35, 0x44, 0x25, 0x52,
+ 0x51, 0x15, 0x05, 0x34, 0x43, 0x50, 0x24, 0x42, 0x33, 0x14, 0x41, 0x04,
+ 0x40, 0x23, 0x32, 0x13, 0x31, 0x03, 0x30, 0x22, 0x12, 0x21, 0x02, 0x20,
+ 0x11, 0x01, 0x10, 0x00,
+ /* Huffman table 24 - 256 entries */
+ 0xEF, 0xFE, 0xDF, 0xFD, 0xCF, 0xFC, 0xBF, 0xFB, 0xFA, 0xAF, 0x9F, 0xF9,
+ 0xF8, 0x8F, 0x7F, 0xF7, 0x6F, 0xF6, 0x5F, 0xF5, 0x4F, 0xF4, 0x3F, 0xF3,
+ 0x2F, 0xF2, 0xF1, 0x1F, 0xF0, 0x0F, 0xEE, 0xDE, 0xED, 0xCE, 0xEC, 0xDD,
+ 0xBE, 0xEB, 0xCD, 0xDC, 0xAE, 0xEA, 0xBD, 0xDB, 0xCC, 0x9E, 0xE9, 0xAD,
+ 0xDA, 0xBC, 0xCB, 0x8E, 0xE8, 0x9D, 0xD9, 0x7E, 0xE7, 0xAC, 0xFF, 0xCA,
+ 0xBB, 0x8D, 0xD8, 0x0E, 0xE0, 0x0D, 0xE6, 0x6E, 0x9C, 0xC9, 0x5E, 0xBA,
+ 0xE5, 0xAB, 0x7D, 0xD7, 0xE4, 0x8C, 0xC8, 0x4E, 0x2E, 0x3E, 0x6D, 0xD6,
+ 0xE3, 0x9B, 0xB9, 0xAA, 0xE2, 0x1E, 0xE1, 0x5D, 0xD5, 0x7C, 0xC7, 0x4D,
+ 0x8B, 0xB8, 0xD4, 0x9A, 0xA9, 0x6C, 0xC6, 0x3D, 0xD3, 0x2D, 0xD2, 0x1D,
+ 0x7B, 0xB7, 0xD1, 0x5C, 0xC5, 0x8A, 0xA8, 0x99, 0x4C, 0xC4, 0x6B, 0xB6,
+ 0xD0, 0x0C, 0x3C, 0xC3, 0x7A, 0xA7, 0x2C, 0xC2, 0x5B, 0xB5, 0x1C, 0x89,
+ 0x98, 0xC1, 0x4B, 0xC0, 0x0B, 0x3B, 0xB0, 0x0A, 0x1A, 0xB4, 0x6A, 0xA6,
+ 0x79, 0x97, 0xA0, 0x09, 0x90, 0xB3, 0x88, 0x2B, 0x5A, 0xB2, 0xA5, 0x1B,
+ 0xB1, 0x69, 0x96, 0xA4, 0x4A, 0x78, 0x87, 0x3A, 0xA3, 0x59, 0x95, 0x2A,
+ 0xA2, 0xA1, 0x68, 0x86, 0x77, 0x49, 0x94, 0x39, 0x93, 0x58, 0x85, 0x29,
+ 0x67, 0x76, 0x92, 0x19, 0x91, 0x48, 0x84, 0x57, 0x75, 0x38, 0x83, 0x66,
+ 0x28, 0x82, 0x18, 0x47, 0x74, 0x81, 0x08, 0x80, 0x56, 0x65, 0x17, 0x07,
+ 0x70, 0x73, 0x37, 0x27, 0x72, 0x46, 0x64, 0x55, 0x71, 0x36, 0x63, 0x45,
+ 0x54, 0x26, 0x62, 0x16, 0x61, 0x06, 0x60, 0x35, 0x53, 0x44, 0x25, 0x52,
+ 0x15, 0x05, 0x50, 0x51, 0x34, 0x43, 0x24, 0x42, 0x33, 0x14, 0x41, 0x04,
+ 0x40, 0x23, 0x32, 0x13, 0x31, 0x03, 0x30, 0x22, 0x12, 0x21, 0x02, 0x20,
+ 0x11, 0x01, 0x10, 0x00,
+};
+
+static const uint8_t mpa_huff_sizes_minus_one[] =
+{
+ 3, 8, 8, 15, 15, 35, 35, 35, 63, 63, 63, 255, 255, 255, 255
+};
+
+const uint8_t ff_mpa_huff_data[32][2] = {
+{ 0, 0 },
+{ 1, 0 },
+{ 2, 0 },
+{ 3, 0 },
+{ 0, 0 },
+{ 4, 0 },
+{ 5, 0 },
+{ 6, 0 },
+{ 7, 0 },
+{ 8, 0 },
+{ 9, 0 },
+{ 10, 0 },
+{ 11, 0 },
+{ 12, 0 },
+{ 0, 0 },
+{ 13, 0 },
+{ 14, 1 },
+{ 14, 2 },
+{ 14, 3 },
+{ 14, 4 },
+{ 14, 6 },
+{ 14, 8 },
+{ 14, 10 },
+{ 14, 13 },
+{ 15, 4 },
+{ 15, 5 },
+{ 15, 6 },
+{ 15, 7 },
+{ 15, 8 },
+{ 15, 9 },
+{ 15, 11 },
+{ 15, 13 },
+};
+
+
+/* huffman tables for quadrules */
+static const uint8_t mpa_quad_codes[2][16] = {
+ { 1, 5, 4, 5, 6, 5, 4, 4, 7, 3, 6, 0, 7, 2, 3, 1, },
+ { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, },
+};
+
+static const uint8_t mpa_quad_bits[2][16] = {
+ { 1, 4, 4, 5, 4, 6, 5, 6, 4, 5, 5, 6, 5, 6, 6, 6, },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, },
+};
+
+const uint8_t ff_band_size_long[9][22] = {
+{ 4, 4, 4, 4, 4, 4, 6, 6, 8, 8, 10,
+ 12, 16, 20, 24, 28, 34, 42, 50, 54, 76, 158, }, /* 44100 */
+{ 4, 4, 4, 4, 4, 4, 6, 6, 6, 8, 10,
+ 12, 16, 18, 22, 28, 34, 40, 46, 54, 54, 192, }, /* 48000 */
+{ 4, 4, 4, 4, 4, 4, 6, 6, 8, 10, 12,
+ 16, 20, 24, 30, 38, 46, 56, 68, 84, 102, 26, }, /* 32000 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 22050 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 18, 22, 26, 32, 38, 46, 54, 62, 70, 76, 36, }, /* 24000 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 16000 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 11025 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 12000 */
+{ 12, 12, 12, 12, 12, 12, 16, 20, 24, 28, 32,
+ 40, 48, 56, 64, 76, 90, 2, 2, 2, 2, 2, }, /* 8000 */
+};
+
+const uint8_t ff_band_size_short[9][13] = {
+{ 4, 4, 4, 4, 6, 8, 10, 12, 14, 18, 22, 30, 56, }, /* 44100 */
+{ 4, 4, 4, 4, 6, 6, 10, 12, 14, 16, 20, 26, 66, }, /* 48000 */
+{ 4, 4, 4, 4, 6, 8, 12, 16, 20, 26, 34, 42, 12, }, /* 32000 */
+{ 4, 4, 4, 6, 6, 8, 10, 14, 18, 26, 32, 42, 18, }, /* 22050 */
+{ 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 32, 44, 12, }, /* 24000 */
+{ 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 16000 */
+{ 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 11025 */
+{ 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 12000 */
+{ 8, 8, 8, 12, 16, 20, 24, 28, 36, 2, 2, 2, 26, }, /* 8000 */
+};
+
+uint16_t ff_band_index_long[9][23];
+
+const uint8_t ff_mpa_pretab[2][22] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, 2, 0 },
+};
+
+static av_cold void mpegaudiodec_common_init_static(void)
+{
+ const uint8_t *huff_sym = mpa_huffsymbols, *huff_lens = mpa_hufflens;
+ int offset;
+
+ /* scale factors table for layer 1/2 */
+ for (int i = 0; i < 64; i++) {
+ int shift, mod;
+ /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
+ shift = i / 3;
+ mod = i % 3;
+ ff_scale_factor_modshift[i] = mod | (shift << 2);
+ }
+
+ /* huffman decode tables */
+ offset = 0;
+ for (int i = 0; i < 15;) {
+ uint16_t tmp_symbols[256];
+ int nb_codes_minus_one = mpa_huff_sizes_minus_one[i];
+ int j;
+
+ for (j = 0; j <= nb_codes_minus_one; j++) {
+ uint8_t high = huff_sym[j] & 0xF0, low = huff_sym[j] & 0xF;
+
+ tmp_symbols[j] = high << 1 | ((high && low) << 4) | low;
+ }
+
+ ff_huff_vlc[++i].table = huff_vlc_tables + offset;
+ ff_huff_vlc[i].table_allocated = FF_ARRAY_ELEMS(huff_vlc_tables) - offset;
+ ff_init_vlc_from_lengths(&ff_huff_vlc[i], 7, j,
+ huff_lens, 1, tmp_symbols, 2, 2,
+ 0, INIT_VLC_STATIC_OVERLONG, NULL);
+ offset += ff_huff_vlc[i].table_size;
+ huff_lens += j;
+ huff_sym += j;
+ }
+ av_assert0(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
+
+ offset = 0;
+ for (int i = 0; i < 2; i++) {
+ int bits = i == 0 ? 6 : 4;
+ ff_huff_quad_vlc[i].table = huff_quad_vlc_tables + offset;
+ ff_huff_quad_vlc[i].table_allocated = 1 << bits;
+ offset += 1 << bits;
+ init_vlc(&ff_huff_quad_vlc[i], bits, 16,
+ mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1,
+ INIT_VLC_USE_NEW_STATIC);
+ }
+ av_assert0(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
+
+ for (int i = 0; i < 9; i++) {
+ int k = 0;
+ for (int j = 0; j < 22; j++) {
+ ff_band_index_long[i][j] = k;
+ k += ff_band_size_long[i][j] >> 1;
+ }
+ ff_band_index_long[i][22] = k;
+ }
+
+ for (int i = 0; i < 4; i++) {
+ if (ff_mpa_quant_bits[i] < 0) {
+ for (int j = 0; j < (1 << (-ff_mpa_quant_bits[i] + 1)); j++) {
+ int val1, val2, val3, steps;
+ int val = j;
+ steps = ff_mpa_quant_steps[i];
+ val1 = val % steps;
+ val /= steps;
+ val2 = val % steps;
+ val3 = val / steps;
+ ff_division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8);
+ }
+ }
+ }
+ mpegaudiodec_common_tableinit();
+}
+
+av_cold void ff_mpegaudiodec_common_init_static(void)
+{
+ static AVOnce init_static_once = AV_ONCE_INIT;
+
+ ff_thread_once(&init_static_once, mpegaudiodec_common_init_static);
+}
diff --git a/media/ffvpx/libavcodec/mpegaudiodec_common_tablegen.h b/media/ffvpx/libavcodec/mpegaudiodec_common_tablegen.h
new file mode 100644
index 0000000000..bf402c9d84
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodec_common_tablegen.h
@@ -0,0 +1,72 @@
+/*
+ * Header file for hardcoded shared mpegaudiodec tables
+ *
+ * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
+ * Copyright (c) 2020 Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGAUDIODEC_COMMON_TABLEGEN_H
+#define AVCODEC_MPEGAUDIODEC_COMMON_TABLEGEN_H
+
+#include <stdint.h>
+
+#define TABLE_4_3_SIZE ((8191 + 16)*4)
+
+#if CONFIG_HARDCODED_TABLES
+#define mpegaudiodec_common_tableinit()
+#include "libavcodec/mpegaudiodec_common_tables.h"
+#else
+#include <math.h>
+#include "libavutil/attributes.h"
+
+int8_t ff_table_4_3_exp [TABLE_4_3_SIZE];
+uint32_t ff_table_4_3_value[TABLE_4_3_SIZE];
+
+#define FRAC_BITS 23
+#define IMDCT_SCALAR 1.759
+
+static av_cold void mpegaudiodec_common_tableinit(void)
+{
+ static const double exp2_lut[4] = {
+ 1.00000000000000000000, /* 2 ^ (0 * 0.25) */
+ 1.18920711500272106672, /* 2 ^ (1 * 0.25) */
+ M_SQRT2 , /* 2 ^ (2 * 0.25) */
+ 1.68179283050742908606, /* 2 ^ (3 * 0.25) */
+ };
+ double pow43_val = 0;
+
+ for (int i = 1; i < TABLE_4_3_SIZE; i++) {
+ double f, fm;
+ int e, m;
+ double value = i / 4;
+ if ((i & 3) == 0)
+ pow43_val = value / IMDCT_SCALAR * cbrt(value);
+ f = pow43_val * exp2_lut[i & 3];
+ fm = frexp(f, &e);
+ m = llrint(fm * (1LL << 31));
+ e += FRAC_BITS - 31 + 5 - 100;
+
+ /* normalized to FRAC_BITS */
+ ff_table_4_3_value[i] = m;
+ ff_table_4_3_exp [i] = -e;
+ }
+}
+
+#endif /* CONFIG_HARDCODED_TABLES */
+#endif /* AVCODEC_MPEGAUDIODEC_COMMON_TABLEGEN_H */
diff --git a/media/ffvpx/libavcodec/mpegaudiodec_fixed.c b/media/ffvpx/libavcodec/mpegaudiodec_fixed.c
new file mode 100644
index 0000000000..b5b6822a19
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodec_fixed.c
@@ -0,0 +1,148 @@
+/*
+ * Fixed-point MPEG audio decoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "config_components.h"
+#include "libavutil/samplefmt.h"
+
+#define USE_FLOATS 0
+
+#include "codec_internal.h"
+#include "mpegaudio.h"
+
+#define SHR(a,b) (((int)(a))>>(b))
+/* WARNING: only correct for positive numbers */
+#define FIXR_OLD(a) ((int)((a) * FRAC_ONE + 0.5))
+#define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))
+#define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
+#define MULH3(x, y, s) MULH((s)*(x), y)
+#define MULLx(x, y, s) MULL((int)(x),(y),s)
+#define RENAME(a) a ## _fixed
+#define OUT_FMT AV_SAMPLE_FMT_S16
+#define OUT_FMT_P AV_SAMPLE_FMT_S16P
+
+/* Intensity stereo table. See commit b91d46614df189e7905538e7f5c4ed9c7ed0d274
+ * (float based mp1/mp2/mp3 decoders.) for how they were created. */
+static const int32_t is_table[2][16] = {
+ { 0x000000, 0x1B0CB1, 0x2ED9EC, 0x400000, 0x512614, 0x64F34F, 0x800000 },
+ { 0x800000, 0x64F34F, 0x512614, 0x400000, 0x2ED9EC, 0x1B0CB1, 0x000000 }
+};
+
+/* Antialiasing table. See commit ce4a29c066cddfc180979ed86396812f24337985
+ * (optimize antialias) for how they were created. */
+static const int32_t csa_table[8][4] = {
+ { 0x36E129F8, 0xDF128056, 0x15F3AA4E, 0xA831565E },
+ { 0x386E75F2, 0xE1CF24A5, 0x1A3D9A97, 0xA960AEB3 },
+ { 0x3CC6B73A, 0xEBF19FA6, 0x28B856E0, 0xAF2AE86C },
+ { 0x3EEEA054, 0xF45B88BC, 0x334A2910, 0xB56CE868 },
+ { 0x3FB6905C, 0xF9F27F18, 0x39A90F74, 0xBA3BEEBC },
+ { 0x3FF23F20, 0xFD60D1E4, 0x3D531104, 0xBD6E92C4 },
+ { 0x3FFE5932, 0xFF175EE4, 0x3F15B816, 0xBF1905B2 },
+ { 0x3FFFE34A, 0xFFC3612F, 0x3FC34479, 0xBFC37DE5 }
+};
+
+#include "mpegaudiodec_template.c"
+
+#if CONFIG_MP1_DECODER
+const FFCodec ff_mp1_decoder = {
+ .p.name = "mp1",
+ CODEC_LONG_NAME("MP1 (MPEG audio layer 1)"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_MP1,
+ .priv_data_size = sizeof(MPADecodeContext),
+ .init = decode_init,
+ FF_CODEC_DECODE_CB(decode_frame),
+ .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
+ AV_CODEC_CAP_DR1,
+ .flush = flush,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+};
+#endif
+#if CONFIG_MP2_DECODER
+const FFCodec ff_mp2_decoder = {
+ .p.name = "mp2",
+ CODEC_LONG_NAME("MP2 (MPEG audio layer 2)"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_MP2,
+ .priv_data_size = sizeof(MPADecodeContext),
+ .init = decode_init,
+ FF_CODEC_DECODE_CB(decode_frame),
+ .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
+ AV_CODEC_CAP_DR1,
+ .flush = flush,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+};
+#endif
+#if CONFIG_MP3_DECODER
+const FFCodec ff_mp3_decoder = {
+ .p.name = "mp3",
+ CODEC_LONG_NAME("MP3 (MPEG audio layer 3)"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_MP3,
+ .priv_data_size = sizeof(MPADecodeContext),
+ .init = decode_init,
+ FF_CODEC_DECODE_CB(decode_frame),
+ .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
+ AV_CODEC_CAP_DR1,
+ .flush = flush,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+};
+#endif
+#if CONFIG_MP3ADU_DECODER
+const FFCodec ff_mp3adu_decoder = {
+ .p.name = "mp3adu",
+ CODEC_LONG_NAME("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_MP3ADU,
+ .priv_data_size = sizeof(MPADecodeContext),
+ .init = decode_init,
+ FF_CODEC_DECODE_CB(decode_frame_adu),
+ .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
+ AV_CODEC_CAP_DR1,
+ .flush = flush,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+};
+#endif
+#if CONFIG_MP3ON4_DECODER
+const FFCodec ff_mp3on4_decoder = {
+ .p.name = "mp3on4",
+ CODEC_LONG_NAME("MP3onMP4"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_MP3ON4,
+ .priv_data_size = sizeof(MP3On4DecodeContext),
+ .init = decode_init_mp3on4,
+ .close = decode_close_mp3on4,
+ FF_CODEC_DECODE_CB(decode_frame_mp3on4),
+ .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
+ AV_CODEC_CAP_DR1,
+ .flush = flush_mp3on4,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_NONE },
+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
+};
+#endif
diff --git a/media/ffvpx/libavcodec/mpegaudiodec_template.c b/media/ffvpx/libavcodec/mpegaudiodec_template.c
new file mode 100644
index 0000000000..3e4ee79be6
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodec_template.c
@@ -0,0 +1,1899 @@
+/*
+ * MPEG Audio decoder
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * MPEG Audio decoder
+ */
+
+#include "config_components.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/crc.h"
+#include "libavutil/float_dsp.h"
+#include "libavutil/libm.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/thread.h"
+
+#include "avcodec.h"
+#include "decode.h"
+#include "get_bits.h"
+#include "mathops.h"
+#include "mpegaudiodsp.h"
+
+/*
+ * TODO:
+ * - test lsf / mpeg25 extensively.
+ */
+
+#include "mpegaudio.h"
+#include "mpegaudiodecheader.h"
+
+#define BACKSTEP_SIZE 512
+#define EXTRABYTES 24
+#define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES
+
+/* layer 3 "granule" */
+typedef struct GranuleDef {
+ uint8_t scfsi;
+ int part2_3_length;
+ int big_values;
+ int global_gain;
+ int scalefac_compress;
+ uint8_t block_type;
+ uint8_t switch_point;
+ int table_select[3];
+ int subblock_gain[3];
+ uint8_t scalefac_scale;
+ uint8_t count1table_select;
+ int region_size[3]; /* number of huffman codes in each region */
+ int preflag;
+ int short_start, long_end; /* long/short band indexes */
+ uint8_t scale_factors[40];
+ DECLARE_ALIGNED(16, INTFLOAT, sb_hybrid)[SBLIMIT * 18]; /* 576 samples */
+} GranuleDef;
+
+typedef struct MPADecodeContext {
+ MPA_DECODE_HEADER
+ uint8_t last_buf[LAST_BUF_SIZE];
+ int last_buf_size;
+ int extrasize;
+ /* next header (used in free format parsing) */
+ uint32_t free_format_next_header;
+ GetBitContext gb;
+ GetBitContext in_gb;
+ DECLARE_ALIGNED(32, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512 * 2];
+ int synth_buf_offset[MPA_MAX_CHANNELS];
+ DECLARE_ALIGNED(32, INTFLOAT, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT];
+ INTFLOAT mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */
+ GranuleDef granules[2][2]; /* Used in Layer 3 */
+ int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
+ int dither_state;
+ int err_recognition;
+ AVCodecContext* avctx;
+ MPADSPContext mpadsp;
+ void (*butterflies_float)(float *av_restrict v1, float *av_restrict v2, int len);
+ AVFrame *frame;
+ uint32_t crc;
+} MPADecodeContext;
+
+#define HEADER_SIZE 4
+
+#include "mpegaudiodata.h"
+
+#include "mpegaudio_tablegen.h"
+/* intensity stereo coef table */
+static INTFLOAT is_table_lsf[2][2][16];
+
+/* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
+static int32_t scale_factor_mult[15][3];
+/* mult table for layer 2 group quantization */
+
+#define SCALE_GEN(v) \
+{ FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
+
+static const int32_t scale_factor_mult2[3][3] = {
+ SCALE_GEN(4.0 / 3.0), /* 3 steps */
+ SCALE_GEN(4.0 / 5.0), /* 5 steps */
+ SCALE_GEN(4.0 / 9.0), /* 9 steps */
+};
+
+/**
+ * Convert region offsets to region sizes and truncate
+ * size to big_values.
+ */
+static void region_offset2size(GranuleDef *g)
+{
+ int i, k, j = 0;
+ g->region_size[2] = 576 / 2;
+ for (i = 0; i < 3; i++) {
+ k = FFMIN(g->region_size[i], g->big_values);
+ g->region_size[i] = k - j;
+ j = k;
+ }
+}
+
+static void init_short_region(MPADecodeContext *s, GranuleDef *g)
+{
+ if (g->block_type == 2) {
+ if (s->sample_rate_index != 8)
+ g->region_size[0] = (36 / 2);
+ else
+ g->region_size[0] = (72 / 2);
+ } else {
+ if (s->sample_rate_index <= 2)
+ g->region_size[0] = (36 / 2);
+ else if (s->sample_rate_index != 8)
+ g->region_size[0] = (54 / 2);
+ else
+ g->region_size[0] = (108 / 2);
+ }
+ g->region_size[1] = (576 / 2);
+}
+
+static void init_long_region(MPADecodeContext *s, GranuleDef *g,
+ int ra1, int ra2)
+{
+ int l;
+ g->region_size[0] = ff_band_index_long[s->sample_rate_index][ra1 + 1];
+ /* should not overflow */
+ l = FFMIN(ra1 + ra2 + 2, 22);
+ g->region_size[1] = ff_band_index_long[s->sample_rate_index][ l];
+}
+
+static void compute_band_indexes(MPADecodeContext *s, GranuleDef *g)
+{
+ if (g->block_type == 2) {
+ if (g->switch_point) {
+ if(s->sample_rate_index == 8)
+ avpriv_request_sample(s->avctx, "switch point in 8khz");
+ /* if switched mode, we handle the 36 first samples as
+ long blocks. For 8000Hz, we handle the 72 first
+ exponents as long blocks */
+ if (s->sample_rate_index <= 2)
+ g->long_end = 8;
+ else
+ g->long_end = 6;
+
+ g->short_start = 3;
+ } else {
+ g->long_end = 0;
+ g->short_start = 0;
+ }
+ } else {
+ g->short_start = 13;
+ g->long_end = 22;
+ }
+}
+
+/* layer 1 unscaling */
+/* n = number of bits of the mantissa minus 1 */
+static inline int l1_unscale(int n, int mant, int scale_factor)
+{
+ int shift, mod;
+ int64_t val;
+
+ shift = ff_scale_factor_modshift[scale_factor];
+ mod = shift & 3;
+ shift >>= 2;
+ val = MUL64((int)(mant + (-1U << n) + 1), scale_factor_mult[n-1][mod]);
+ shift += n;
+ /* NOTE: at this point, 1 <= shift >= 21 + 15 */
+ return (int)((val + (1LL << (shift - 1))) >> shift);
+}
+
+static inline int l2_unscale_group(int steps, int mant, int scale_factor)
+{
+ int shift, mod, val;
+
+ shift = ff_scale_factor_modshift[scale_factor];
+ mod = shift & 3;
+ shift >>= 2;
+
+ val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
+ /* NOTE: at this point, 0 <= shift <= 21 */
+ if (shift > 0)
+ val = (val + (1 << (shift - 1))) >> shift;
+ return val;
+}
+
+/* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
+static inline int l3_unscale(int value, int exponent)
+{
+ unsigned int m;
+ int e;
+
+ e = ff_table_4_3_exp [4 * value + (exponent & 3)];
+ m = ff_table_4_3_value[4 * value + (exponent & 3)];
+ e -= exponent >> 2;
+#ifdef DEBUG
+ if(e < 1)
+ av_log(NULL, AV_LOG_WARNING, "l3_unscale: e is %d\n", e);
+#endif
+ if (e > (SUINT)31)
+ return 0;
+ m = (m + ((1U << e) >> 1)) >> e;
+
+ return m;
+}
+
+static av_cold void decode_init_static(void)
+{
+ int i, j;
+
+ /* scale factor multiply for layer 1 */
+ for (i = 0; i < 15; i++) {
+ int n, norm;
+ n = i + 2;
+ norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
+ scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
+ scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
+ scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
+ ff_dlog(NULL, "%d: norm=%x s=%"PRIx32" %"PRIx32" %"PRIx32"\n", i,
+ (unsigned)norm,
+ scale_factor_mult[i][0],
+ scale_factor_mult[i][1],
+ scale_factor_mult[i][2]);
+ }
+
+ /* compute n ^ (4/3) and store it in mantissa/exp format */
+
+ mpegaudio_tableinit();
+
+ for (i = 0; i < 16; i++) {
+ double f;
+ int e, k;
+
+ for (j = 0; j < 2; j++) {
+ e = -(j + 1) * ((i + 1) >> 1);
+ f = exp2(e / 4.0);
+ k = i & 1;
+ is_table_lsf[j][k ^ 1][i] = FIXR(f);
+ is_table_lsf[j][k ][i] = FIXR(1.0);
+ ff_dlog(NULL, "is_table_lsf %d %d: %f %f\n",
+ i, j, (float) is_table_lsf[j][0][i],
+ (float) is_table_lsf[j][1][i]);
+ }
+ }
+ RENAME(ff_mpa_synth_init)();
+ ff_mpegaudiodec_common_init_static();
+}
+
+static av_cold int decode_init(AVCodecContext * avctx)
+{
+ static AVOnce init_static_once = AV_ONCE_INIT;
+ MPADecodeContext *s = avctx->priv_data;
+
+ s->avctx = avctx;
+
+#if USE_FLOATS
+ {
+ AVFloatDSPContext *fdsp;
+ fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!fdsp)
+ return AVERROR(ENOMEM);
+ s->butterflies_float = fdsp->butterflies_float;
+ av_free(fdsp);
+ }
+#endif
+
+ ff_mpadsp_init(&s->mpadsp);
+
+ if (avctx->request_sample_fmt == OUT_FMT &&
+ avctx->codec_id != AV_CODEC_ID_MP3ON4)
+ avctx->sample_fmt = OUT_FMT;
+ else
+ avctx->sample_fmt = OUT_FMT_P;
+ s->err_recognition = avctx->err_recognition;
+
+ if (avctx->codec_id == AV_CODEC_ID_MP3ADU)
+ s->adu_mode = 1;
+
+ ff_thread_once(&init_static_once, decode_init_static);
+
+ return 0;
+}
+
+#define C3 FIXHR(0.86602540378443864676/2)
+#define C4 FIXHR(0.70710678118654752439/2) //0.5 / cos(pi*(9)/36)
+#define C5 FIXHR(0.51763809020504152469/2) //0.5 / cos(pi*(5)/36)
+#define C6 FIXHR(1.93185165257813657349/4) //0.5 / cos(pi*(15)/36)
+
+/* 12 points IMDCT. We compute it "by hand" by factorizing obvious
+ cases. */
+static void imdct12(INTFLOAT *out, SUINTFLOAT *in)
+{
+ SUINTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
+
+ in0 = in[0*3];
+ in1 = in[1*3] + in[0*3];
+ in2 = in[2*3] + in[1*3];
+ in3 = in[3*3] + in[2*3];
+ in4 = in[4*3] + in[3*3];
+ in5 = in[5*3] + in[4*3];
+ in5 += in3;
+ in3 += in1;
+
+ in2 = MULH3(in2, C3, 2);
+ in3 = MULH3(in3, C3, 4);
+
+ t1 = in0 - in4;
+ t2 = MULH3(in1 - in5, C4, 2);
+
+ out[ 7] =
+ out[10] = t1 + t2;
+ out[ 1] =
+ out[ 4] = t1 - t2;
+
+ in0 += SHR(in4, 1);
+ in4 = in0 + in2;
+ in5 += 2*in1;
+ in1 = MULH3(in5 + in3, C5, 1);
+ out[ 8] =
+ out[ 9] = in4 + in1;
+ out[ 2] =
+ out[ 3] = in4 - in1;
+
+ in0 -= in2;
+ in5 = MULH3(in5 - in3, C6, 2);
+ out[ 0] =
+ out[ 5] = in0 - in5;
+ out[ 6] =
+ out[11] = in0 + in5;
+}
+
+static int handle_crc(MPADecodeContext *s, int sec_len)
+{
+ if (s->error_protection && (s->err_recognition & AV_EF_CRCCHECK)) {
+ const uint8_t *buf = s->gb.buffer - HEADER_SIZE;
+ int sec_byte_len = sec_len >> 3;
+ int sec_rem_bits = sec_len & 7;
+ const AVCRC *crc_tab = av_crc_get_table(AV_CRC_16_ANSI);
+ uint8_t tmp_buf[4];
+ uint32_t crc_val = av_crc(crc_tab, UINT16_MAX, &buf[2], 2);
+ crc_val = av_crc(crc_tab, crc_val, &buf[6], sec_byte_len);
+
+ AV_WB32(tmp_buf,
+ ((buf[6 + sec_byte_len] & (0xFF00U >> sec_rem_bits)) << 24) +
+ ((s->crc << 16) >> sec_rem_bits));
+
+ crc_val = av_crc(crc_tab, crc_val, tmp_buf, 3);
+
+ if (crc_val) {
+ av_log(s->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", crc_val);
+ if (s->err_recognition & AV_EF_EXPLODE)
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ return 0;
+}
+
+/* return the number of decoded frames */
+static int mp_decode_layer1(MPADecodeContext *s)
+{
+ int bound, i, v, n, ch, j, mant;
+ uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
+ uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
+ int ret;
+
+ ret = handle_crc(s, (s->nb_channels == 1) ? 8*16 : 8*32);
+ if (ret < 0)
+ return ret;
+
+ if (s->mode == MPA_JSTEREO)
+ bound = (s->mode_ext + 1) * 4;
+ else
+ bound = SBLIMIT;
+
+ /* allocation bits */
+ for (i = 0; i < bound; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ allocation[ch][i] = get_bits(&s->gb, 4);
+ }
+ }
+ for (i = bound; i < SBLIMIT; i++)
+ allocation[0][i] = get_bits(&s->gb, 4);
+
+ /* scale factors */
+ for (i = 0; i < bound; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ if (allocation[ch][i])
+ scale_factors[ch][i] = get_bits(&s->gb, 6);
+ }
+ }
+ for (i = bound; i < SBLIMIT; i++) {
+ if (allocation[0][i]) {
+ scale_factors[0][i] = get_bits(&s->gb, 6);
+ scale_factors[1][i] = get_bits(&s->gb, 6);
+ }
+ }
+
+ /* compute samples */
+ for (j = 0; j < 12; j++) {
+ for (i = 0; i < bound; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ n = allocation[ch][i];
+ if (n) {
+ mant = get_bits(&s->gb, n + 1);
+ v = l1_unscale(n, mant, scale_factors[ch][i]);
+ } else {
+ v = 0;
+ }
+ s->sb_samples[ch][j][i] = v;
+ }
+ }
+ for (i = bound; i < SBLIMIT; i++) {
+ n = allocation[0][i];
+ if (n) {
+ mant = get_bits(&s->gb, n + 1);
+ v = l1_unscale(n, mant, scale_factors[0][i]);
+ s->sb_samples[0][j][i] = v;
+ v = l1_unscale(n, mant, scale_factors[1][i]);
+ s->sb_samples[1][j][i] = v;
+ } else {
+ s->sb_samples[0][j][i] = 0;
+ s->sb_samples[1][j][i] = 0;
+ }
+ }
+ }
+ return 12;
+}
+
+static int mp_decode_layer2(MPADecodeContext *s)
+{
+ int sblimit; /* number of used subbands */
+ const unsigned char *alloc_table;
+ int table, bit_alloc_bits, i, j, ch, bound, v;
+ unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
+ unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
+ unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
+ int scale, qindex, bits, steps, k, l, m, b;
+ int ret;
+
+ /* select decoding table */
+ table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
+ s->sample_rate, s->lsf);
+ sblimit = ff_mpa_sblimit_table[table];
+ alloc_table = ff_mpa_alloc_tables[table];
+
+ if (s->mode == MPA_JSTEREO)
+ bound = (s->mode_ext + 1) * 4;
+ else
+ bound = sblimit;
+
+ ff_dlog(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
+
+ /* sanity check */
+ if (bound > sblimit)
+ bound = sblimit;
+
+ /* parse bit allocation */
+ j = 0;
+ for (i = 0; i < bound; i++) {
+ bit_alloc_bits = alloc_table[j];
+ for (ch = 0; ch < s->nb_channels; ch++)
+ bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
+ j += 1 << bit_alloc_bits;
+ }
+ for (i = bound; i < sblimit; i++) {
+ bit_alloc_bits = alloc_table[j];
+ v = get_bits(&s->gb, bit_alloc_bits);
+ bit_alloc[0][i] = v;
+ bit_alloc[1][i] = v;
+ j += 1 << bit_alloc_bits;
+ }
+
+ /* scale codes */
+ for (i = 0; i < sblimit; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ if (bit_alloc[ch][i])
+ scale_code[ch][i] = get_bits(&s->gb, 2);
+ }
+ }
+
+ ret = handle_crc(s, get_bits_count(&s->gb) - 16);
+ if (ret < 0)
+ return ret;
+
+ /* scale factors */
+ for (i = 0; i < sblimit; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ if (bit_alloc[ch][i]) {
+ sf = scale_factors[ch][i];
+ switch (scale_code[ch][i]) {
+ default:
+ case 0:
+ sf[0] = get_bits(&s->gb, 6);
+ sf[1] = get_bits(&s->gb, 6);
+ sf[2] = get_bits(&s->gb, 6);
+ break;
+ case 2:
+ sf[0] = get_bits(&s->gb, 6);
+ sf[1] = sf[0];
+ sf[2] = sf[0];
+ break;
+ case 1:
+ sf[0] = get_bits(&s->gb, 6);
+ sf[2] = get_bits(&s->gb, 6);
+ sf[1] = sf[0];
+ break;
+ case 3:
+ sf[0] = get_bits(&s->gb, 6);
+ sf[2] = get_bits(&s->gb, 6);
+ sf[1] = sf[2];
+ break;
+ }
+ }
+ }
+ }
+
+ /* samples */
+ for (k = 0; k < 3; k++) {
+ for (l = 0; l < 12; l += 3) {
+ j = 0;
+ for (i = 0; i < bound; i++) {
+ bit_alloc_bits = alloc_table[j];
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ b = bit_alloc[ch][i];
+ if (b) {
+ scale = scale_factors[ch][i][k];
+ qindex = alloc_table[j+b];
+ bits = ff_mpa_quant_bits[qindex];
+ if (bits < 0) {
+ int v2;
+ /* 3 values at the same time */
+ v = get_bits(&s->gb, -bits);
+ v2 = ff_division_tabs[qindex][v];
+ steps = ff_mpa_quant_steps[qindex];
+
+ s->sb_samples[ch][k * 12 + l + 0][i] =
+ l2_unscale_group(steps, v2 & 15, scale);
+ s->sb_samples[ch][k * 12 + l + 1][i] =
+ l2_unscale_group(steps, (v2 >> 4) & 15, scale);
+ s->sb_samples[ch][k * 12 + l + 2][i] =
+ l2_unscale_group(steps, v2 >> 8 , scale);
+ } else {
+ for (m = 0; m < 3; m++) {
+ v = get_bits(&s->gb, bits);
+ v = l1_unscale(bits - 1, v, scale);
+ s->sb_samples[ch][k * 12 + l + m][i] = v;
+ }
+ }
+ } else {
+ s->sb_samples[ch][k * 12 + l + 0][i] = 0;
+ s->sb_samples[ch][k * 12 + l + 1][i] = 0;
+ s->sb_samples[ch][k * 12 + l + 2][i] = 0;
+ }
+ }
+ /* next subband in alloc table */
+ j += 1 << bit_alloc_bits;
+ }
+ /* XXX: find a way to avoid this duplication of code */
+ for (i = bound; i < sblimit; i++) {
+ bit_alloc_bits = alloc_table[j];
+ b = bit_alloc[0][i];
+ if (b) {
+ int mant, scale0, scale1;
+ scale0 = scale_factors[0][i][k];
+ scale1 = scale_factors[1][i][k];
+ qindex = alloc_table[j + b];
+ bits = ff_mpa_quant_bits[qindex];
+ if (bits < 0) {
+ /* 3 values at the same time */
+ v = get_bits(&s->gb, -bits);
+ steps = ff_mpa_quant_steps[qindex];
+ mant = v % steps;
+ v = v / steps;
+ s->sb_samples[0][k * 12 + l + 0][i] =
+ l2_unscale_group(steps, mant, scale0);
+ s->sb_samples[1][k * 12 + l + 0][i] =
+ l2_unscale_group(steps, mant, scale1);
+ mant = v % steps;
+ v = v / steps;
+ s->sb_samples[0][k * 12 + l + 1][i] =
+ l2_unscale_group(steps, mant, scale0);
+ s->sb_samples[1][k * 12 + l + 1][i] =
+ l2_unscale_group(steps, mant, scale1);
+ s->sb_samples[0][k * 12 + l + 2][i] =
+ l2_unscale_group(steps, v, scale0);
+ s->sb_samples[1][k * 12 + l + 2][i] =
+ l2_unscale_group(steps, v, scale1);
+ } else {
+ for (m = 0; m < 3; m++) {
+ mant = get_bits(&s->gb, bits);
+ s->sb_samples[0][k * 12 + l + m][i] =
+ l1_unscale(bits - 1, mant, scale0);
+ s->sb_samples[1][k * 12 + l + m][i] =
+ l1_unscale(bits - 1, mant, scale1);
+ }
+ }
+ } else {
+ s->sb_samples[0][k * 12 + l + 0][i] = 0;
+ s->sb_samples[0][k * 12 + l + 1][i] = 0;
+ s->sb_samples[0][k * 12 + l + 2][i] = 0;
+ s->sb_samples[1][k * 12 + l + 0][i] = 0;
+ s->sb_samples[1][k * 12 + l + 1][i] = 0;
+ s->sb_samples[1][k * 12 + l + 2][i] = 0;
+ }
+ /* next subband in alloc table */
+ j += 1 << bit_alloc_bits;
+ }
+ /* fill remaining samples to zero */
+ for (i = sblimit; i < SBLIMIT; i++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ s->sb_samples[ch][k * 12 + l + 0][i] = 0;
+ s->sb_samples[ch][k * 12 + l + 1][i] = 0;
+ s->sb_samples[ch][k * 12 + l + 2][i] = 0;
+ }
+ }
+ }
+ }
+ return 3 * 12;
+}
+
+#define SPLIT(dst,sf,n) \
+ if (n == 3) { \
+ int m = (sf * 171) >> 9; \
+ dst = sf - 3 * m; \
+ sf = m; \
+ } else if (n == 4) { \
+ dst = sf & 3; \
+ sf >>= 2; \
+ } else if (n == 5) { \
+ int m = (sf * 205) >> 10; \
+ dst = sf - 5 * m; \
+ sf = m; \
+ } else if (n == 6) { \
+ int m = (sf * 171) >> 10; \
+ dst = sf - 6 * m; \
+ sf = m; \
+ } else { \
+ dst = 0; \
+ }
+
+static av_always_inline void lsf_sf_expand(int *slen, int sf, int n1, int n2,
+ int n3)
+{
+ SPLIT(slen[3], sf, n3)
+ SPLIT(slen[2], sf, n2)
+ SPLIT(slen[1], sf, n1)
+ slen[0] = sf;
+}
+
+static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g,
+ int16_t *exponents)
+{
+ const uint8_t *bstab, *pretab;
+ int len, i, j, k, l, v0, shift, gain, gains[3];
+ int16_t *exp_ptr;
+
+ exp_ptr = exponents;
+ gain = g->global_gain - 210;
+ shift = g->scalefac_scale + 1;
+
+ bstab = ff_band_size_long[s->sample_rate_index];
+ pretab = ff_mpa_pretab[g->preflag];
+ for (i = 0; i < g->long_end; i++) {
+ v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
+ len = bstab[i];
+ for (j = len; j > 0; j--)
+ *exp_ptr++ = v0;
+ }
+
+ if (g->short_start < 13) {
+ bstab = ff_band_size_short[s->sample_rate_index];
+ gains[0] = gain - (g->subblock_gain[0] << 3);
+ gains[1] = gain - (g->subblock_gain[1] << 3);
+ gains[2] = gain - (g->subblock_gain[2] << 3);
+ k = g->long_end;
+ for (i = g->short_start; i < 13; i++) {
+ len = bstab[i];
+ for (l = 0; l < 3; l++) {
+ v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
+ for (j = len; j > 0; j--)
+ *exp_ptr++ = v0;
+ }
+ }
+ }
+}
+
+static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos,
+ int *end_pos2)
+{
+ if (s->in_gb.buffer && *pos >= s->gb.size_in_bits - s->extrasize * 8) {
+ s->gb = s->in_gb;
+ s->in_gb.buffer = NULL;
+ s->extrasize = 0;
+ av_assert2((get_bits_count(&s->gb) & 7) == 0);
+ skip_bits_long(&s->gb, *pos - *end_pos);
+ *end_pos2 =
+ *end_pos = *end_pos2 + get_bits_count(&s->gb) - *pos;
+ *pos = get_bits_count(&s->gb);
+ }
+}
+
+/* Following is an optimized code for
+ INTFLOAT v = *src
+ if(get_bits1(&s->gb))
+ v = -v;
+ *dst = v;
+*/
+#if USE_FLOATS
+#define READ_FLIP_SIGN(dst,src) \
+ v = AV_RN32A(src) ^ (get_bits1(&s->gb) << 31); \
+ AV_WN32A(dst, v);
+#else
+#define READ_FLIP_SIGN(dst,src) \
+ v = -get_bits1(&s->gb); \
+ *(dst) = (*(src) ^ v) - v;
+#endif
+
+static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
+ int16_t *exponents, int end_pos2)
+{
+ int s_index;
+ int i;
+ int last_pos, bits_left;
+ VLC *vlc;
+ int end_pos = FFMIN(end_pos2, s->gb.size_in_bits - s->extrasize * 8);
+
+ /* low frequencies (called big values) */
+ s_index = 0;
+ for (i = 0; i < 3; i++) {
+ int j, k, l, linbits;
+ j = g->region_size[i];
+ if (j == 0)
+ continue;
+ /* select vlc table */
+ k = g->table_select[i];
+ l = ff_mpa_huff_data[k][0];
+ linbits = ff_mpa_huff_data[k][1];
+ vlc = &ff_huff_vlc[l];
+
+ if (!l) {
+ memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j);
+ s_index += 2 * j;
+ continue;
+ }
+
+ /* read huffcode and compute each couple */
+ for (; j > 0; j--) {
+ int exponent, x, y;
+ int v;
+ int pos = get_bits_count(&s->gb);
+
+ if (pos >= end_pos){
+ switch_buffer(s, &pos, &end_pos, &end_pos2);
+ if (pos >= end_pos)
+ break;
+ }
+ y = get_vlc2(&s->gb, vlc->table, 7, 3);
+
+ if (!y) {
+ g->sb_hybrid[s_index ] =
+ g->sb_hybrid[s_index + 1] = 0;
+ s_index += 2;
+ continue;
+ }
+
+ exponent= exponents[s_index];
+
+ ff_dlog(s->avctx, "region=%d n=%d y=%d exp=%d\n",
+ i, g->region_size[i] - j, y, exponent);
+ if (y & 16) {
+ x = y >> 5;
+ y = y & 0x0f;
+ if (x < 15) {
+ READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x)
+ } else {
+ x += get_bitsz(&s->gb, linbits);
+ v = l3_unscale(x, exponent);
+ if (get_bits1(&s->gb))
+ v = -v;
+ g->sb_hybrid[s_index] = v;
+ }
+ if (y < 15) {
+ READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y)
+ } else {
+ y += get_bitsz(&s->gb, linbits);
+ v = l3_unscale(y, exponent);
+ if (get_bits1(&s->gb))
+ v = -v;
+ g->sb_hybrid[s_index + 1] = v;
+ }
+ } else {
+ x = y >> 5;
+ y = y & 0x0f;
+ x += y;
+ if (x < 15) {
+ READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x)
+ } else {
+ x += get_bitsz(&s->gb, linbits);
+ v = l3_unscale(x, exponent);
+ if (get_bits1(&s->gb))
+ v = -v;
+ g->sb_hybrid[s_index+!!y] = v;
+ }
+ g->sb_hybrid[s_index + !y] = 0;
+ }
+ s_index += 2;
+ }
+ }
+
+ /* high frequencies */
+ vlc = &ff_huff_quad_vlc[g->count1table_select];
+ last_pos = 0;
+ while (s_index <= 572) {
+ int pos, code;
+ pos = get_bits_count(&s->gb);
+ if (pos >= end_pos) {
+ if (pos > end_pos2 && last_pos) {
+ /* some encoders generate an incorrect size for this
+ part. We must go back into the data */
+ s_index -= 4;
+ skip_bits_long(&s->gb, last_pos - pos);
+ av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
+ if(s->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT))
+ s_index=0;
+ break;
+ }
+ switch_buffer(s, &pos, &end_pos, &end_pos2);
+ if (pos >= end_pos)
+ break;
+ }
+ last_pos = pos;
+
+ code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
+ ff_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
+ g->sb_hybrid[s_index + 0] =
+ g->sb_hybrid[s_index + 1] =
+ g->sb_hybrid[s_index + 2] =
+ g->sb_hybrid[s_index + 3] = 0;
+ while (code) {
+ static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
+ int v;
+ int pos = s_index + idxtab[code];
+ code ^= 8 >> idxtab[code];
+ READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos])
+ }
+ s_index += 4;
+ }
+ /* skip extension bits */
+ bits_left = end_pos2 - get_bits_count(&s->gb);
+ if (bits_left < 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_COMPLIANT))) {
+ av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
+ s_index=0;
+ } else if (bits_left > 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_AGGRESSIVE))) {
+ av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
+ s_index = 0;
+ }
+ memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index));
+ skip_bits_long(&s->gb, bits_left);
+
+ i = get_bits_count(&s->gb);
+ switch_buffer(s, &i, &end_pos, &end_pos2);
+
+ return 0;
+}
+
+/* Reorder short blocks from bitstream order to interleaved order. It
+ would be faster to do it in parsing, but the code would be far more
+ complicated */
+static void reorder_block(MPADecodeContext *s, GranuleDef *g)
+{
+ int i, j, len;
+ INTFLOAT *ptr, *dst, *ptr1;
+ INTFLOAT tmp[576];
+
+ if (g->block_type != 2)
+ return;
+
+ if (g->switch_point) {
+ if (s->sample_rate_index != 8)
+ ptr = g->sb_hybrid + 36;
+ else
+ ptr = g->sb_hybrid + 72;
+ } else {
+ ptr = g->sb_hybrid;
+ }
+
+ for (i = g->short_start; i < 13; i++) {
+ len = ff_band_size_short[s->sample_rate_index][i];
+ ptr1 = ptr;
+ dst = tmp;
+ for (j = len; j > 0; j--) {
+ *dst++ = ptr[0*len];
+ *dst++ = ptr[1*len];
+ *dst++ = ptr[2*len];
+ ptr++;
+ }
+ ptr += 2 * len;
+ memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
+ }
+}
+
+#define ISQRT2 FIXR(0.70710678118654752440)
+
+static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1)
+{
+ int i, j, k, l;
+ int sf_max, sf, len, non_zero_found;
+ INTFLOAT *tab0, *tab1, v1, v2;
+ const INTFLOAT (*is_tab)[16];
+ SUINTFLOAT tmp0, tmp1;
+ int non_zero_found_short[3];
+
+ /* intensity stereo */
+ if (s->mode_ext & MODE_EXT_I_STEREO) {
+ if (!s->lsf) {
+ is_tab = is_table;
+ sf_max = 7;
+ } else {
+ is_tab = is_table_lsf[g1->scalefac_compress & 1];
+ sf_max = 16;
+ }
+
+ tab0 = g0->sb_hybrid + 576;
+ tab1 = g1->sb_hybrid + 576;
+
+ non_zero_found_short[0] = 0;
+ non_zero_found_short[1] = 0;
+ non_zero_found_short[2] = 0;
+ k = (13 - g1->short_start) * 3 + g1->long_end - 3;
+ for (i = 12; i >= g1->short_start; i--) {
+ /* for last band, use previous scale factor */
+ if (i != 11)
+ k -= 3;
+ len = ff_band_size_short[s->sample_rate_index][i];
+ for (l = 2; l >= 0; l--) {
+ tab0 -= len;
+ tab1 -= len;
+ if (!non_zero_found_short[l]) {
+ /* test if non zero band. if so, stop doing i-stereo */
+ for (j = 0; j < len; j++) {
+ if (tab1[j] != 0) {
+ non_zero_found_short[l] = 1;
+ goto found1;
+ }
+ }
+ sf = g1->scale_factors[k + l];
+ if (sf >= sf_max)
+ goto found1;
+
+ v1 = is_tab[0][sf];
+ v2 = is_tab[1][sf];
+ for (j = 0; j < len; j++) {
+ tmp0 = tab0[j];
+ tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
+ tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
+ }
+ } else {
+found1:
+ if (s->mode_ext & MODE_EXT_MS_STEREO) {
+ /* lower part of the spectrum : do ms stereo
+ if enabled */
+ for (j = 0; j < len; j++) {
+ tmp0 = tab0[j];
+ tmp1 = tab1[j];
+ tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
+ tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
+ }
+ }
+ }
+ }
+ }
+
+ non_zero_found = non_zero_found_short[0] |
+ non_zero_found_short[1] |
+ non_zero_found_short[2];
+
+ for (i = g1->long_end - 1;i >= 0;i--) {
+ len = ff_band_size_long[s->sample_rate_index][i];
+ tab0 -= len;
+ tab1 -= len;
+ /* test if non zero band. if so, stop doing i-stereo */
+ if (!non_zero_found) {
+ for (j = 0; j < len; j++) {
+ if (tab1[j] != 0) {
+ non_zero_found = 1;
+ goto found2;
+ }
+ }
+ /* for last band, use previous scale factor */
+ k = (i == 21) ? 20 : i;
+ sf = g1->scale_factors[k];
+ if (sf >= sf_max)
+ goto found2;
+ v1 = is_tab[0][sf];
+ v2 = is_tab[1][sf];
+ for (j = 0; j < len; j++) {
+ tmp0 = tab0[j];
+ tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
+ tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
+ }
+ } else {
+found2:
+ if (s->mode_ext & MODE_EXT_MS_STEREO) {
+ /* lower part of the spectrum : do ms stereo
+ if enabled */
+ for (j = 0; j < len; j++) {
+ tmp0 = tab0[j];
+ tmp1 = tab1[j];
+ tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
+ tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
+ }
+ }
+ }
+ }
+ } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
+ /* ms stereo ONLY */
+ /* NOTE: the 1/sqrt(2) normalization factor is included in the
+ global gain */
+#if USE_FLOATS
+ s->butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576);
+#else
+ tab0 = g0->sb_hybrid;
+ tab1 = g1->sb_hybrid;
+ for (i = 0; i < 576; i++) {
+ tmp0 = tab0[i];
+ tmp1 = tab1[i];
+ tab0[i] = tmp0 + tmp1;
+ tab1[i] = tmp0 - tmp1;
+ }
+#endif
+ }
+}
+
+#if USE_FLOATS
+#if HAVE_MIPSFPU
+# include "mips/compute_antialias_float.h"
+#endif /* HAVE_MIPSFPU */
+#else
+#if HAVE_MIPSDSP
+# include "mips/compute_antialias_fixed.h"
+#endif /* HAVE_MIPSDSP */
+#endif /* USE_FLOATS */
+
+#ifndef compute_antialias
+#if USE_FLOATS
+#define AA(j) do { \
+ float tmp0 = ptr[-1-j]; \
+ float tmp1 = ptr[ j]; \
+ ptr[-1-j] = tmp0 * csa_table[j][0] - tmp1 * csa_table[j][1]; \
+ ptr[ j] = tmp0 * csa_table[j][1] + tmp1 * csa_table[j][0]; \
+ } while (0)
+#else
+#define AA(j) do { \
+ SUINT tmp0 = ptr[-1-j]; \
+ SUINT tmp1 = ptr[ j]; \
+ SUINT tmp2 = MULH(tmp0 + tmp1, csa_table[j][0]); \
+ ptr[-1-j] = 4 * (tmp2 - MULH(tmp1, csa_table[j][2])); \
+ ptr[ j] = 4 * (tmp2 + MULH(tmp0, csa_table[j][3])); \
+ } while (0)
+#endif
+
+static void compute_antialias(MPADecodeContext *s, GranuleDef *g)
+{
+ INTFLOAT *ptr;
+ int n, i;
+
+ /* we antialias only "long" bands */
+ if (g->block_type == 2) {
+ if (!g->switch_point)
+ return;
+ /* XXX: check this for 8000Hz case */
+ n = 1;
+ } else {
+ n = SBLIMIT - 1;
+ }
+
+ ptr = g->sb_hybrid + 18;
+ for (i = n; i > 0; i--) {
+ AA(0);
+ AA(1);
+ AA(2);
+ AA(3);
+ AA(4);
+ AA(5);
+ AA(6);
+ AA(7);
+
+ ptr += 18;
+ }
+}
+#endif /* compute_antialias */
+
+static void compute_imdct(MPADecodeContext *s, GranuleDef *g,
+ INTFLOAT *sb_samples, INTFLOAT *mdct_buf)
+{
+ INTFLOAT *win, *out_ptr, *ptr, *buf, *ptr1;
+ INTFLOAT out2[12];
+ int i, j, mdct_long_end, sblimit;
+
+ /* find last non zero block */
+ ptr = g->sb_hybrid + 576;
+ ptr1 = g->sb_hybrid + 2 * 18;
+ while (ptr >= ptr1) {
+ int32_t *p;
+ ptr -= 6;
+ p = (int32_t*)ptr;
+ if (p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
+ break;
+ }
+ sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
+
+ if (g->block_type == 2) {
+ /* XXX: check for 8000 Hz */
+ if (g->switch_point)
+ mdct_long_end = 2;
+ else
+ mdct_long_end = 0;
+ } else {
+ mdct_long_end = sblimit;
+ }
+
+ s->mpadsp.RENAME(imdct36_blocks)(sb_samples, mdct_buf, g->sb_hybrid,
+ mdct_long_end, g->switch_point,
+ g->block_type);
+
+ buf = mdct_buf + 4*18*(mdct_long_end >> 2) + (mdct_long_end & 3);
+ ptr = g->sb_hybrid + 18 * mdct_long_end;
+
+ for (j = mdct_long_end; j < sblimit; j++) {
+ /* select frequency inversion */
+ win = RENAME(ff_mdct_win)[2 + (4 & -(j & 1))];
+ out_ptr = sb_samples + j;
+
+ for (i = 0; i < 6; i++) {
+ *out_ptr = buf[4*i];
+ out_ptr += SBLIMIT;
+ }
+ imdct12(out2, ptr + 0);
+ for (i = 0; i < 6; i++) {
+ *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*1)];
+ buf[4*(i + 6*2)] = MULH3(out2[i + 6], win[i + 6], 1);
+ out_ptr += SBLIMIT;
+ }
+ imdct12(out2, ptr + 1);
+ for (i = 0; i < 6; i++) {
+ *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*2)];
+ buf[4*(i + 6*0)] = MULH3(out2[i + 6], win[i + 6], 1);
+ out_ptr += SBLIMIT;
+ }
+ imdct12(out2, ptr + 2);
+ for (i = 0; i < 6; i++) {
+ buf[4*(i + 6*0)] = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*0)];
+ buf[4*(i + 6*1)] = MULH3(out2[i + 6], win[i + 6], 1);
+ buf[4*(i + 6*2)] = 0;
+ }
+ ptr += 18;
+ buf += (j&3) != 3 ? 1 : (4*18-3);
+ }
+ /* zero bands */
+ for (j = sblimit; j < SBLIMIT; j++) {
+ /* overlap */
+ out_ptr = sb_samples + j;
+ for (i = 0; i < 18; i++) {
+ *out_ptr = buf[4*i];
+ buf[4*i] = 0;
+ out_ptr += SBLIMIT;
+ }
+ buf += (j&3) != 3 ? 1 : (4*18-3);
+ }
+}
+
+/* main layer3 decoding function */
+static int mp_decode_layer3(MPADecodeContext *s)
+{
+ int nb_granules, main_data_begin;
+ int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
+ GranuleDef *g;
+ int16_t exponents[576]; //FIXME try INTFLOAT
+ int ret;
+
+ /* read side info */
+ if (s->lsf) {
+ ret = handle_crc(s, ((s->nb_channels == 1) ? 8*9 : 8*17));
+ main_data_begin = get_bits(&s->gb, 8);
+ skip_bits(&s->gb, s->nb_channels);
+ nb_granules = 1;
+ } else {
+ ret = handle_crc(s, ((s->nb_channels == 1) ? 8*17 : 8*32));
+ main_data_begin = get_bits(&s->gb, 9);
+ if (s->nb_channels == 2)
+ skip_bits(&s->gb, 3);
+ else
+ skip_bits(&s->gb, 5);
+ nb_granules = 2;
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
+ s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
+ }
+ }
+ if (ret < 0)
+ return ret;
+
+ for (gr = 0; gr < nb_granules; gr++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ ff_dlog(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
+ g = &s->granules[ch][gr];
+ g->part2_3_length = get_bits(&s->gb, 12);
+ g->big_values = get_bits(&s->gb, 9);
+ if (g->big_values > 288) {
+ av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ g->global_gain = get_bits(&s->gb, 8);
+ /* if MS stereo only is selected, we precompute the
+ 1/sqrt(2) renormalization factor */
+ if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
+ MODE_EXT_MS_STEREO)
+ g->global_gain -= 2;
+ if (s->lsf)
+ g->scalefac_compress = get_bits(&s->gb, 9);
+ else
+ g->scalefac_compress = get_bits(&s->gb, 4);
+ blocksplit_flag = get_bits1(&s->gb);
+ if (blocksplit_flag) {
+ g->block_type = get_bits(&s->gb, 2);
+ if (g->block_type == 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
+ return AVERROR_INVALIDDATA;
+ }
+ g->switch_point = get_bits1(&s->gb);
+ for (i = 0; i < 2; i++)
+ g->table_select[i] = get_bits(&s->gb, 5);
+ for (i = 0; i < 3; i++)
+ g->subblock_gain[i] = get_bits(&s->gb, 3);
+ init_short_region(s, g);
+ } else {
+ int region_address1, region_address2;
+ g->block_type = 0;
+ g->switch_point = 0;
+ for (i = 0; i < 3; i++)
+ g->table_select[i] = get_bits(&s->gb, 5);
+ /* compute huffman coded region sizes */
+ region_address1 = get_bits(&s->gb, 4);
+ region_address2 = get_bits(&s->gb, 3);
+ ff_dlog(s->avctx, "region1=%d region2=%d\n",
+ region_address1, region_address2);
+ init_long_region(s, g, region_address1, region_address2);
+ }
+ region_offset2size(g);
+ compute_band_indexes(s, g);
+
+ g->preflag = 0;
+ if (!s->lsf)
+ g->preflag = get_bits1(&s->gb);
+ g->scalefac_scale = get_bits1(&s->gb);
+ g->count1table_select = get_bits1(&s->gb);
+ ff_dlog(s->avctx, "block_type=%d switch_point=%d\n",
+ g->block_type, g->switch_point);
+ }
+ }
+
+ if (!s->adu_mode) {
+ int skip;
+ const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb) >> 3);
+ s->extrasize = av_clip((get_bits_left(&s->gb) >> 3) - s->extrasize, 0,
+ FFMAX(0, LAST_BUF_SIZE - s->last_buf_size));
+ av_assert1((get_bits_count(&s->gb) & 7) == 0);
+ /* now we get bits from the main_data_begin offset */
+ ff_dlog(s->avctx, "seekback:%d, lastbuf:%d\n",
+ main_data_begin, s->last_buf_size);
+
+ memcpy(s->last_buf + s->last_buf_size, ptr, s->extrasize);
+ s->in_gb = s->gb;
+ init_get_bits(&s->gb, s->last_buf, (s->last_buf_size + s->extrasize) * 8);
+ s->last_buf_size <<= 3;
+ for (gr = 0; gr < nb_granules && (s->last_buf_size >> 3) < main_data_begin; gr++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ g = &s->granules[ch][gr];
+ s->last_buf_size += g->part2_3_length;
+ memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
+ compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
+ }
+ }
+ skip = s->last_buf_size - 8 * main_data_begin;
+ if (skip >= s->gb.size_in_bits - s->extrasize * 8 && s->in_gb.buffer) {
+ skip_bits_long(&s->in_gb, skip - s->gb.size_in_bits + s->extrasize * 8);
+ s->gb = s->in_gb;
+ s->in_gb.buffer = NULL;
+ s->extrasize = 0;
+ } else {
+ skip_bits_long(&s->gb, skip);
+ }
+ } else {
+ gr = 0;
+ s->extrasize = 0;
+ }
+
+ for (; gr < nb_granules; gr++) {
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ g = &s->granules[ch][gr];
+ bits_pos = get_bits_count(&s->gb);
+
+ if (!s->lsf) {
+ uint8_t *sc;
+ int slen, slen1, slen2;
+
+ /* MPEG-1 scale factors */
+ slen1 = ff_slen_table[0][g->scalefac_compress];
+ slen2 = ff_slen_table[1][g->scalefac_compress];
+ ff_dlog(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
+ if (g->block_type == 2) {
+ n = g->switch_point ? 17 : 18;
+ j = 0;
+ if (slen1) {
+ for (i = 0; i < n; i++)
+ g->scale_factors[j++] = get_bits(&s->gb, slen1);
+ } else {
+ for (i = 0; i < n; i++)
+ g->scale_factors[j++] = 0;
+ }
+ if (slen2) {
+ for (i = 0; i < 18; i++)
+ g->scale_factors[j++] = get_bits(&s->gb, slen2);
+ for (i = 0; i < 3; i++)
+ g->scale_factors[j++] = 0;
+ } else {
+ for (i = 0; i < 21; i++)
+ g->scale_factors[j++] = 0;
+ }
+ } else {
+ sc = s->granules[ch][0].scale_factors;
+ j = 0;
+ for (k = 0; k < 4; k++) {
+ n = k == 0 ? 6 : 5;
+ if ((g->scfsi & (0x8 >> k)) == 0) {
+ slen = (k < 2) ? slen1 : slen2;
+ if (slen) {
+ for (i = 0; i < n; i++)
+ g->scale_factors[j++] = get_bits(&s->gb, slen);
+ } else {
+ for (i = 0; i < n; i++)
+ g->scale_factors[j++] = 0;
+ }
+ } else {
+ /* simply copy from last granule */
+ for (i = 0; i < n; i++) {
+ g->scale_factors[j] = sc[j];
+ j++;
+ }
+ }
+ }
+ g->scale_factors[j++] = 0;
+ }
+ } else {
+ int tindex, tindex2, slen[4], sl, sf;
+
+ /* LSF scale factors */
+ if (g->block_type == 2)
+ tindex = g->switch_point ? 2 : 1;
+ else
+ tindex = 0;
+
+ sf = g->scalefac_compress;
+ if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
+ /* intensity stereo case */
+ sf >>= 1;
+ if (sf < 180) {
+ lsf_sf_expand(slen, sf, 6, 6, 0);
+ tindex2 = 3;
+ } else if (sf < 244) {
+ lsf_sf_expand(slen, sf - 180, 4, 4, 0);
+ tindex2 = 4;
+ } else {
+ lsf_sf_expand(slen, sf - 244, 3, 0, 0);
+ tindex2 = 5;
+ }
+ } else {
+ /* normal case */
+ if (sf < 400) {
+ lsf_sf_expand(slen, sf, 5, 4, 4);
+ tindex2 = 0;
+ } else if (sf < 500) {
+ lsf_sf_expand(slen, sf - 400, 5, 4, 0);
+ tindex2 = 1;
+ } else {
+ lsf_sf_expand(slen, sf - 500, 3, 0, 0);
+ tindex2 = 2;
+ g->preflag = 1;
+ }
+ }
+
+ j = 0;
+ for (k = 0; k < 4; k++) {
+ n = ff_lsf_nsf_table[tindex2][tindex][k];
+ sl = slen[k];
+ if (sl) {
+ for (i = 0; i < n; i++)
+ g->scale_factors[j++] = get_bits(&s->gb, sl);
+ } else {
+ for (i = 0; i < n; i++)
+ g->scale_factors[j++] = 0;
+ }
+ }
+ /* XXX: should compute exact size */
+ for (; j < 40; j++)
+ g->scale_factors[j] = 0;
+ }
+
+ exponents_from_scale_factors(s, g, exponents);
+
+ /* read Huffman coded residue */
+ huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
+ } /* ch */
+
+ if (s->mode == MPA_JSTEREO)
+ compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
+
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ g = &s->granules[ch][gr];
+
+ reorder_block(s, g);
+ compute_antialias(s, g);
+ compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
+ }
+ } /* gr */
+ if (get_bits_count(&s->gb) < 0)
+ skip_bits_long(&s->gb, -get_bits_count(&s->gb));
+ return nb_granules * 18;
+}
+
+static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples,
+ const uint8_t *buf, int buf_size)
+{
+ int i, nb_frames, ch, ret;
+ OUT_INT *samples_ptr;
+
+ init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
+ if (s->error_protection)
+ s->crc = get_bits(&s->gb, 16);
+
+ switch(s->layer) {
+ case 1:
+ s->avctx->frame_size = 384;
+ nb_frames = mp_decode_layer1(s);
+ break;
+ case 2:
+ s->avctx->frame_size = 1152;
+ nb_frames = mp_decode_layer2(s);
+ break;
+ case 3:
+ s->avctx->frame_size = s->lsf ? 576 : 1152;
+ default:
+ nb_frames = mp_decode_layer3(s);
+
+ s->last_buf_size=0;
+ if (s->in_gb.buffer) {
+ align_get_bits(&s->gb);
+ i = (get_bits_left(&s->gb) >> 3) - s->extrasize;
+ if (i >= 0 && i <= BACKSTEP_SIZE) {
+ memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb) >> 3), i);
+ s->last_buf_size=i;
+ } else
+ av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
+ s->gb = s->in_gb;
+ s->in_gb.buffer = NULL;
+ s->extrasize = 0;
+ }
+
+ align_get_bits(&s->gb);
+ av_assert1((get_bits_count(&s->gb) & 7) == 0);
+ i = (get_bits_left(&s->gb) >> 3) - s->extrasize;
+ if (i < 0 || i > BACKSTEP_SIZE || nb_frames < 0) {
+ if (i < 0)
+ av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
+ i = FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
+ }
+ av_assert1(i <= buf_size - HEADER_SIZE && i >= 0);
+ memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
+ s->last_buf_size += i;
+ }
+
+ if(nb_frames < 0)
+ return nb_frames;
+
+ /* get output buffer */
+ if (!samples) {
+ av_assert0(s->frame);
+ s->frame->nb_samples = s->avctx->frame_size;
+ if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0)
+ return ret;
+ samples = (OUT_INT **)s->frame->extended_data;
+ }
+
+ /* apply the synthesis filter */
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ int sample_stride;
+ if (s->avctx->sample_fmt == OUT_FMT_P) {
+ samples_ptr = samples[ch];
+ sample_stride = 1;
+ } else {
+ samples_ptr = samples[0] + ch;
+ sample_stride = s->nb_channels;
+ }
+ for (i = 0; i < nb_frames; i++) {
+ RENAME(ff_mpa_synth_filter)(&s->mpadsp, s->synth_buf[ch],
+ &(s->synth_buf_offset[ch]),
+ RENAME(ff_mpa_synth_window),
+ &s->dither_state, samples_ptr,
+ sample_stride, s->sb_samples[ch][i]);
+ samples_ptr += 32 * sample_stride;
+ }
+ }
+
+ return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
+}
+
+static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ MPADecodeContext *s = avctx->priv_data;
+ uint32_t header;
+ int ret;
+
+ int skipped = 0;
+ while(buf_size && !*buf){
+ buf++;
+ buf_size--;
+ skipped++;
+ }
+
+ if (buf_size < HEADER_SIZE)
+ return AVERROR_INVALIDDATA;
+
+ header = AV_RB32(buf);
+ if (header >> 8 == AV_RB32("TAG") >> 8) {
+ av_log(avctx, AV_LOG_DEBUG, "discarding ID3 tag\n");
+ return buf_size + skipped;
+ }
+ ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Header missing\n");
+ return AVERROR_INVALIDDATA;
+ } else if (ret == 1) {
+ /* free format: prepare to compute frame size */
+ s->frame_size = -1;
+ return AVERROR_INVALIDDATA;
+ }
+ /* update codec info */
+ av_channel_layout_uninit(&avctx->ch_layout);
+ avctx->ch_layout = s->nb_channels == 1 ? (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO :
+ (AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO;
+ if (!avctx->bit_rate)
+ avctx->bit_rate = s->bit_rate;
+
+ if (s->frame_size <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
+ return AVERROR_INVALIDDATA;
+ } else if (s->frame_size < buf_size) {
+ av_log(avctx, AV_LOG_DEBUG, "incorrect frame size - multiple frames in buffer?\n");
+ buf_size= s->frame_size;
+ }
+
+ s->frame = frame;
+
+ ret = mp_decode_frame(s, NULL, buf, buf_size);
+ if (ret >= 0) {
+ s->frame->nb_samples = avctx->frame_size;
+ *got_frame_ptr = 1;
+ avctx->sample_rate = s->sample_rate;
+ //FIXME maybe move the other codec info stuff from above here too
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
+ /* Only return an error if the bad frame makes up the whole packet or
+ * the error is related to buffer management.
+ * If there is more data in the packet, just consume the bad frame
+ * instead of returning an error, which would discard the whole
+ * packet. */
+ *got_frame_ptr = 0;
+ if (buf_size == avpkt->size || ret != AVERROR_INVALIDDATA)
+ return ret;
+ }
+ s->frame_size = 0;
+ return buf_size + skipped;
+}
+
+static void mp_flush(MPADecodeContext *ctx)
+{
+ memset(ctx->synth_buf, 0, sizeof(ctx->synth_buf));
+ memset(ctx->mdct_buf, 0, sizeof(ctx->mdct_buf));
+ ctx->last_buf_size = 0;
+ ctx->dither_state = 0;
+}
+
+static void flush(AVCodecContext *avctx)
+{
+ mp_flush(avctx->priv_data);
+}
+
+#if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
+static int decode_frame_adu(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ MPADecodeContext *s = avctx->priv_data;
+ uint32_t header;
+ int len, ret;
+
+ len = buf_size;
+
+ // Discard too short frames
+ if (buf_size < HEADER_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+
+ if (len > MPA_MAX_CODED_FRAME_SIZE)
+ len = MPA_MAX_CODED_FRAME_SIZE;
+
+ // Get header and restore sync word
+ header = AV_RB32(buf) | 0xffe00000;
+
+ ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid frame header\n");
+ return ret;
+ }
+ /* update codec info */
+ avctx->sample_rate = s->sample_rate;
+ av_channel_layout_uninit(&avctx->ch_layout);
+ avctx->ch_layout = s->nb_channels == 1 ? (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO :
+ (AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO;
+ if (!avctx->bit_rate)
+ avctx->bit_rate = s->bit_rate;
+
+ s->frame_size = len;
+
+ s->frame = frame;
+
+ ret = mp_decode_frame(s, NULL, buf, buf_size);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
+ return ret;
+ }
+
+ *got_frame_ptr = 1;
+
+ return buf_size;
+}
+#endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
+
+#if CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER
+
+/**
+ * Context for MP3On4 decoder
+ */
+typedef struct MP3On4DecodeContext {
+ int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
+ int syncword; ///< syncword patch
+ const uint8_t *coff; ///< channel offsets in output buffer
+ MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
+} MP3On4DecodeContext;
+
+#include "mpeg4audio.h"
+
+/* Next 3 arrays are indexed by channel config number (passed via codecdata) */
+
+/* number of mp3 decoder instances */
+static const uint8_t mp3Frames[8] = { 0, 1, 1, 2, 3, 3, 4, 5 };
+
+/* offsets into output buffer, assume output order is FL FR C LFE BL BR SL SR */
+static const uint8_t chan_offset[8][5] = {
+ { 0 },
+ { 0 }, // C
+ { 0 }, // FLR
+ { 2, 0 }, // C FLR
+ { 2, 0, 3 }, // C FLR BS
+ { 2, 0, 3 }, // C FLR BLRS
+ { 2, 0, 4, 3 }, // C FLR BLRS LFE
+ { 2, 0, 6, 4, 3 }, // C FLR BLRS BLR LFE
+};
+
+/* mp3on4 channel layouts */
+static const int16_t chan_layout[8] = {
+ 0,
+ AV_CH_LAYOUT_MONO,
+ AV_CH_LAYOUT_STEREO,
+ AV_CH_LAYOUT_SURROUND,
+ AV_CH_LAYOUT_4POINT0,
+ AV_CH_LAYOUT_5POINT0,
+ AV_CH_LAYOUT_5POINT1,
+ AV_CH_LAYOUT_7POINT1
+};
+
+static av_cold int decode_close_mp3on4(AVCodecContext * avctx)
+{
+ MP3On4DecodeContext *s = avctx->priv_data;
+ int i;
+
+ for (i = 0; i < s->frames; i++)
+ av_freep(&s->mp3decctx[i]);
+
+ return 0;
+}
+
+
+static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
+{
+ MP3On4DecodeContext *s = avctx->priv_data;
+ MPEG4AudioConfig cfg;
+ int i, ret;
+
+ if ((avctx->extradata_size < 2) || !avctx->extradata) {
+ av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ avpriv_mpeg4audio_get_config2(&cfg, avctx->extradata,
+ avctx->extradata_size, 1, avctx);
+ if (!cfg.chan_config || cfg.chan_config > 7) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
+ return AVERROR_INVALIDDATA;
+ }
+ s->frames = mp3Frames[cfg.chan_config];
+ s->coff = chan_offset[cfg.chan_config];
+ av_channel_layout_uninit(&avctx->ch_layout);
+ av_channel_layout_from_mask(&avctx->ch_layout, chan_layout[cfg.chan_config]);
+
+ if (cfg.sample_rate < 16000)
+ s->syncword = 0xffe00000;
+ else
+ s->syncword = 0xfff00000;
+
+ /* Init the first mp3 decoder in standard way, so that all tables get builded
+ * We replace avctx->priv_data with the context of the first decoder so that
+ * decode_init() does not have to be changed.
+ * Other decoders will be initialized here copying data from the first context
+ */
+ // Allocate zeroed memory for the first decoder context
+ s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
+ if (!s->mp3decctx[0])
+ return AVERROR(ENOMEM);
+ // Put decoder context in place to make init_decode() happy
+ avctx->priv_data = s->mp3decctx[0];
+ ret = decode_init(avctx);
+ // Restore mp3on4 context pointer
+ avctx->priv_data = s;
+ if (ret < 0)
+ return ret;
+ s->mp3decctx[0]->adu_mode = 1; // Set adu mode
+
+ /* Create a separate codec/context for each frame (first is already ok).
+ * Each frame is 1 or 2 channels - up to 5 frames allowed
+ */
+ for (i = 1; i < s->frames; i++) {
+ s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
+ if (!s->mp3decctx[i])
+ return AVERROR(ENOMEM);
+ s->mp3decctx[i]->adu_mode = 1;
+ s->mp3decctx[i]->avctx = avctx;
+ s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
+ s->mp3decctx[i]->butterflies_float = s->mp3decctx[0]->butterflies_float;
+ }
+
+ return 0;
+}
+
+
+static void flush_mp3on4(AVCodecContext *avctx)
+{
+ int i;
+ MP3On4DecodeContext *s = avctx->priv_data;
+
+ for (i = 0; i < s->frames; i++)
+ mp_flush(s->mp3decctx[i]);
+}
+
+
+static int decode_frame_mp3on4(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ MP3On4DecodeContext *s = avctx->priv_data;
+ MPADecodeContext *m;
+ int fsize, len = buf_size, out_size = 0;
+ uint32_t header;
+ OUT_INT **out_samples;
+ OUT_INT *outptr[2];
+ int fr, ch, ret;
+
+ /* get output buffer */
+ frame->nb_samples = MPA_FRAME_SIZE;
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
+ return ret;
+ out_samples = (OUT_INT **)frame->extended_data;
+
+ // Discard too short frames
+ if (buf_size < HEADER_SIZE)
+ return AVERROR_INVALIDDATA;
+
+ avctx->bit_rate = 0;
+
+ ch = 0;
+ for (fr = 0; fr < s->frames; fr++) {
+ fsize = AV_RB16(buf) >> 4;
+ fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
+ m = s->mp3decctx[fr];
+ av_assert1(m);
+
+ if (fsize < HEADER_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "Frame size smaller than header size\n");
+ return AVERROR_INVALIDDATA;
+ }
+ header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
+
+ ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Bad header, discard block\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (ch + m->nb_channels > avctx->ch_layout.nb_channels ||
+ s->coff[fr] + m->nb_channels > avctx->ch_layout.nb_channels) {
+ av_log(avctx, AV_LOG_ERROR, "frame channel count exceeds codec "
+ "channel count\n");
+ return AVERROR_INVALIDDATA;
+ }
+ ch += m->nb_channels;
+
+ outptr[0] = out_samples[s->coff[fr]];
+ if (m->nb_channels > 1)
+ outptr[1] = out_samples[s->coff[fr] + 1];
+
+ if ((ret = mp_decode_frame(m, outptr, buf, fsize)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "failed to decode channel %d\n", ch);
+ memset(outptr[0], 0, MPA_FRAME_SIZE*sizeof(OUT_INT));
+ if (m->nb_channels > 1)
+ memset(outptr[1], 0, MPA_FRAME_SIZE*sizeof(OUT_INT));
+ ret = m->nb_channels * MPA_FRAME_SIZE*sizeof(OUT_INT);
+ }
+
+ out_size += ret;
+ buf += fsize;
+ len -= fsize;
+
+ avctx->bit_rate += m->bit_rate;
+ }
+ if (ch != avctx->ch_layout.nb_channels) {
+ av_log(avctx, AV_LOG_ERROR, "failed to decode all channels\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* update codec info */
+ avctx->sample_rate = s->mp3decctx[0]->sample_rate;
+
+ frame->nb_samples = out_size / (avctx->ch_layout.nb_channels * sizeof(OUT_INT));
+ *got_frame_ptr = 1;
+
+ return buf_size;
+}
+#endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */
diff --git a/media/ffvpx/libavcodec/mpegaudiodecheader.c b/media/ffvpx/libavcodec/mpegaudiodecheader.c
new file mode 100644
index 0000000000..ef63befbf4
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodecheader.c
@@ -0,0 +1,152 @@
+/*
+ * MPEG Audio header decoder
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * MPEG Audio header decoder.
+ */
+
+#include "libavutil/macros.h"
+
+#include "mpegaudio.h"
+#include "mpegaudiodata.h"
+#include "mpegaudiodecheader.h"
+
+
+int avpriv_mpegaudio_decode_header(MPADecodeHeader *s, uint32_t header)
+{
+ int sample_rate, frame_size, mpeg25, padding;
+ int sample_rate_index, bitrate_index;
+ int ret;
+
+ ret = ff_mpa_check_header(header);
+ if (ret < 0)
+ return ret;
+
+ if (header & (1<<20)) {
+ s->lsf = (header & (1<<19)) ? 0 : 1;
+ mpeg25 = 0;
+ } else {
+ s->lsf = 1;
+ mpeg25 = 1;
+ }
+
+ s->layer = 4 - ((header >> 17) & 3);
+ /* extract frequency */
+ sample_rate_index = (header >> 10) & 3;
+ if (sample_rate_index >= FF_ARRAY_ELEMS(ff_mpa_freq_tab))
+ sample_rate_index = 0;
+ sample_rate = ff_mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25);
+ sample_rate_index += 3 * (s->lsf + mpeg25);
+ s->sample_rate_index = sample_rate_index;
+ s->error_protection = ((header >> 16) & 1) ^ 1;
+ s->sample_rate = sample_rate;
+
+ bitrate_index = (header >> 12) & 0xf;
+ padding = (header >> 9) & 1;
+ //extension = (header >> 8) & 1;
+ s->mode = (header >> 6) & 3;
+ s->mode_ext = (header >> 4) & 3;
+ //copyright = (header >> 3) & 1;
+ //original = (header >> 2) & 1;
+ //emphasis = header & 3;
+
+ if (s->mode == MPA_MONO)
+ s->nb_channels = 1;
+ else
+ s->nb_channels = 2;
+
+ if (bitrate_index != 0) {
+ frame_size = ff_mpa_bitrate_tab[s->lsf][s->layer - 1][bitrate_index];
+ s->bit_rate = frame_size * 1000;
+ switch(s->layer) {
+ case 1:
+ frame_size = (frame_size * 12000) / sample_rate;
+ frame_size = (frame_size + padding) * 4;
+ break;
+ case 2:
+ frame_size = (frame_size * 144000) / sample_rate;
+ frame_size += padding;
+ break;
+ default:
+ case 3:
+ frame_size = (frame_size * 144000) / (sample_rate << s->lsf);
+ frame_size += padding;
+ break;
+ }
+ s->frame_size = frame_size;
+ } else {
+ /* if no frame size computed, signal it */
+ return 1;
+ }
+
+#if defined(DEBUG)
+ ff_dlog(NULL, "layer%d, %d Hz, %d kbits/s, ",
+ s->layer, s->sample_rate, s->bit_rate);
+ if (s->nb_channels == 2) {
+ if (s->layer == 3) {
+ if (s->mode_ext & MODE_EXT_MS_STEREO)
+ ff_dlog(NULL, "ms-");
+ if (s->mode_ext & MODE_EXT_I_STEREO)
+ ff_dlog(NULL, "i-");
+ }
+ ff_dlog(NULL, "stereo");
+ } else {
+ ff_dlog(NULL, "mono");
+ }
+ ff_dlog(NULL, "\n");
+#endif
+ return 0;
+}
+
+int ff_mpa_decode_header(uint32_t head, int *sample_rate, int *channels, int *frame_size, int *bit_rate, enum AVCodecID *codec_id)
+{
+ MPADecodeHeader s1, *s = &s1;
+
+ if (avpriv_mpegaudio_decode_header(s, head) != 0) {
+ return -1;
+ }
+
+ switch(s->layer) {
+ case 1:
+ *codec_id = AV_CODEC_ID_MP1;
+ *frame_size = 384;
+ break;
+ case 2:
+ *codec_id = AV_CODEC_ID_MP2;
+ *frame_size = 1152;
+ break;
+ default:
+ case 3:
+ if (*codec_id != AV_CODEC_ID_MP3ADU)
+ *codec_id = AV_CODEC_ID_MP3;
+ if (s->lsf)
+ *frame_size = 576;
+ else
+ *frame_size = 1152;
+ break;
+ }
+
+ *sample_rate = s->sample_rate;
+ *channels = s->nb_channels;
+ *bit_rate = s->bit_rate;
+ return s->frame_size;
+}
diff --git a/media/ffvpx/libavcodec/mpegaudiodecheader.h b/media/ffvpx/libavcodec/mpegaudiodecheader.h
new file mode 100644
index 0000000000..ed5d1f3b33
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodecheader.h
@@ -0,0 +1,81 @@
+/*
+ * MPEG Audio header decoder
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * MPEG Audio header decoder.
+ */
+
+#ifndef AVCODEC_MPEGAUDIODECHEADER_H
+#define AVCODEC_MPEGAUDIODECHEADER_H
+
+#include <stdint.h>
+#include "codec_id.h"
+
+#define MP3_MASK 0xFFFE0CCF
+
+#define MPA_DECODE_HEADER \
+ int frame_size; \
+ int error_protection; \
+ int layer; \
+ int sample_rate; \
+ int sample_rate_index; /* between 0 and 8 */ \
+ int bit_rate; \
+ int nb_channels; \
+ int mode; \
+ int mode_ext; \
+ int lsf;
+
+typedef struct MPADecodeHeader {
+ MPA_DECODE_HEADER
+} MPADecodeHeader;
+
+/* header decoding. MUST check the header before because no
+ consistency check is done there. Return 1 if free format found and
+ that the frame size must be computed externally */
+int avpriv_mpegaudio_decode_header(MPADecodeHeader *s, uint32_t header);
+
+/* useful helper to get MPEG audio stream info. Return -1 if error in
+ header, otherwise the coded frame size in bytes */
+int ff_mpa_decode_header(uint32_t head, int *sample_rate,
+ int *channels, int *frame_size, int *bitrate, enum AVCodecID *codec_id);
+
+/* fast header check for resync */
+static inline int ff_mpa_check_header(uint32_t header){
+ /* header */
+ if ((header & 0xffe00000) != 0xffe00000)
+ return -1;
+ /* version check */
+ if ((header & (3<<19)) == 1<<19)
+ return -1;
+ /* layer check */
+ if ((header & (3<<17)) == 0)
+ return -1;
+ /* bit rate */
+ if ((header & (0xf<<12)) == 0xf<<12)
+ return -1;
+ /* frequency */
+ if ((header & (3<<10)) == 3<<10)
+ return -1;
+ return 0;
+}
+
+#endif /* AVCODEC_MPEGAUDIODECHEADER_H */
diff --git a/media/ffvpx/libavcodec/mpegaudiodectab.h b/media/ffvpx/libavcodec/mpegaudiodectab.h
new file mode 100644
index 0000000000..accd12b8e2
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodectab.h
@@ -0,0 +1,615 @@
+/*
+ * MPEG Audio decoder
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * mpeg audio layer decoder tables.
+ */
+
+#ifndef AVCODEC_MPEGAUDIODECTAB_H
+#define AVCODEC_MPEGAUDIODECTAB_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "mpegaudio.h"
+
+/*******************************************************/
+/* layer 3 tables */
+
+/* layer 3 huffman tables */
+typedef struct HuffTable {
+ int xsize;
+ const uint8_t *bits;
+ const uint16_t *codes;
+} HuffTable;
+
+/* layer3 scale factor size */
+static const uint8_t slen_table[2][16] = {
+ { 0, 0, 0, 0, 3, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4 },
+ { 0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 2, 3 },
+};
+
+/* number of lsf scale factors for a given size */
+static const uint8_t lsf_nsf_table[6][3][4] = {
+ { { 6, 5, 5, 5 }, { 9, 9, 9, 9 }, { 6, 9, 9, 9 } },
+ { { 6, 5, 7, 3 }, { 9, 9, 12, 6 }, { 6, 9, 12, 6 } },
+ { { 11, 10, 0, 0 }, { 18, 18, 0, 0 }, { 15, 18, 0, 0 } },
+ { { 7, 7, 7, 0 }, { 12, 12, 12, 0 }, { 6, 15, 12, 0 } },
+ { { 6, 6, 6, 3 }, { 12, 9, 9, 6 }, { 6, 12, 9, 6 } },
+ { { 8, 8, 5, 0 }, { 15, 12, 9, 0 }, { 6, 18, 9, 0 } },
+};
+
+/* mpegaudio layer 3 huffman tables */
+
+static const uint16_t mpa_huffcodes_1[4] = {
+ 0x0001, 0x0001, 0x0001, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_1[4] = {
+ 1, 3, 2, 3,
+};
+
+static const uint16_t mpa_huffcodes_2[9] = {
+ 0x0001, 0x0002, 0x0001, 0x0003, 0x0001, 0x0001, 0x0003, 0x0002,
+ 0x0000,
+};
+
+static const uint8_t mpa_huffbits_2[9] = {
+ 1, 3, 6, 3, 3, 5, 5, 5,
+ 6,
+};
+
+static const uint16_t mpa_huffcodes_3[9] = {
+ 0x0003, 0x0002, 0x0001, 0x0001, 0x0001, 0x0001, 0x0003, 0x0002,
+ 0x0000,
+};
+
+static const uint8_t mpa_huffbits_3[9] = {
+ 2, 2, 6, 3, 2, 5, 5, 5,
+ 6,
+};
+
+static const uint16_t mpa_huffcodes_5[16] = {
+ 0x0001, 0x0002, 0x0006, 0x0005, 0x0003, 0x0001, 0x0004, 0x0004,
+ 0x0007, 0x0005, 0x0007, 0x0001, 0x0006, 0x0001, 0x0001, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_5[16] = {
+ 1, 3, 6, 7, 3, 3, 6, 7,
+ 6, 6, 7, 8, 7, 6, 7, 8,
+};
+
+static const uint16_t mpa_huffcodes_6[16] = {
+ 0x0007, 0x0003, 0x0005, 0x0001, 0x0006, 0x0002, 0x0003, 0x0002,
+ 0x0005, 0x0004, 0x0004, 0x0001, 0x0003, 0x0003, 0x0002, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_6[16] = {
+ 3, 3, 5, 7, 3, 2, 4, 5,
+ 4, 4, 5, 6, 6, 5, 6, 7,
+};
+
+static const uint16_t mpa_huffcodes_7[36] = {
+ 0x0001, 0x0002, 0x000a, 0x0013, 0x0010, 0x000a, 0x0003, 0x0003,
+ 0x0007, 0x000a, 0x0005, 0x0003, 0x000b, 0x0004, 0x000d, 0x0011,
+ 0x0008, 0x0004, 0x000c, 0x000b, 0x0012, 0x000f, 0x000b, 0x0002,
+ 0x0007, 0x0006, 0x0009, 0x000e, 0x0003, 0x0001, 0x0006, 0x0004,
+ 0x0005, 0x0003, 0x0002, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_7[36] = {
+ 1, 3, 6, 8, 8, 9, 3, 4,
+ 6, 7, 7, 8, 6, 5, 7, 8,
+ 8, 9, 7, 7, 8, 9, 9, 9,
+ 7, 7, 8, 9, 9, 10, 8, 8,
+ 9, 10, 10, 10,
+};
+
+static const uint16_t mpa_huffcodes_8[36] = {
+ 0x0003, 0x0004, 0x0006, 0x0012, 0x000c, 0x0005, 0x0005, 0x0001,
+ 0x0002, 0x0010, 0x0009, 0x0003, 0x0007, 0x0003, 0x0005, 0x000e,
+ 0x0007, 0x0003, 0x0013, 0x0011, 0x000f, 0x000d, 0x000a, 0x0004,
+ 0x000d, 0x0005, 0x0008, 0x000b, 0x0005, 0x0001, 0x000c, 0x0004,
+ 0x0004, 0x0001, 0x0001, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_8[36] = {
+ 2, 3, 6, 8, 8, 9, 3, 2,
+ 4, 8, 8, 8, 6, 4, 6, 8,
+ 8, 9, 8, 8, 8, 9, 9, 10,
+ 8, 7, 8, 9, 10, 10, 9, 8,
+ 9, 9, 11, 11,
+};
+
+static const uint16_t mpa_huffcodes_9[36] = {
+ 0x0007, 0x0005, 0x0009, 0x000e, 0x000f, 0x0007, 0x0006, 0x0004,
+ 0x0005, 0x0005, 0x0006, 0x0007, 0x0007, 0x0006, 0x0008, 0x0008,
+ 0x0008, 0x0005, 0x000f, 0x0006, 0x0009, 0x000a, 0x0005, 0x0001,
+ 0x000b, 0x0007, 0x0009, 0x0006, 0x0004, 0x0001, 0x000e, 0x0004,
+ 0x0006, 0x0002, 0x0006, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_9[36] = {
+ 3, 3, 5, 6, 8, 9, 3, 3,
+ 4, 5, 6, 8, 4, 4, 5, 6,
+ 7, 8, 6, 5, 6, 7, 7, 8,
+ 7, 6, 7, 7, 8, 9, 8, 7,
+ 8, 8, 9, 9,
+};
+
+static const uint16_t mpa_huffcodes_10[64] = {
+ 0x0001, 0x0002, 0x000a, 0x0017, 0x0023, 0x001e, 0x000c, 0x0011,
+ 0x0003, 0x0003, 0x0008, 0x000c, 0x0012, 0x0015, 0x000c, 0x0007,
+ 0x000b, 0x0009, 0x000f, 0x0015, 0x0020, 0x0028, 0x0013, 0x0006,
+ 0x000e, 0x000d, 0x0016, 0x0022, 0x002e, 0x0017, 0x0012, 0x0007,
+ 0x0014, 0x0013, 0x0021, 0x002f, 0x001b, 0x0016, 0x0009, 0x0003,
+ 0x001f, 0x0016, 0x0029, 0x001a, 0x0015, 0x0014, 0x0005, 0x0003,
+ 0x000e, 0x000d, 0x000a, 0x000b, 0x0010, 0x0006, 0x0005, 0x0001,
+ 0x0009, 0x0008, 0x0007, 0x0008, 0x0004, 0x0004, 0x0002, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_10[64] = {
+ 1, 3, 6, 8, 9, 9, 9, 10,
+ 3, 4, 6, 7, 8, 9, 8, 8,
+ 6, 6, 7, 8, 9, 10, 9, 9,
+ 7, 7, 8, 9, 10, 10, 9, 10,
+ 8, 8, 9, 10, 10, 10, 10, 10,
+ 9, 9, 10, 10, 11, 11, 10, 11,
+ 8, 8, 9, 10, 10, 10, 11, 11,
+ 9, 8, 9, 10, 10, 11, 11, 11,
+};
+
+static const uint16_t mpa_huffcodes_11[64] = {
+ 0x0003, 0x0004, 0x000a, 0x0018, 0x0022, 0x0021, 0x0015, 0x000f,
+ 0x0005, 0x0003, 0x0004, 0x000a, 0x0020, 0x0011, 0x000b, 0x000a,
+ 0x000b, 0x0007, 0x000d, 0x0012, 0x001e, 0x001f, 0x0014, 0x0005,
+ 0x0019, 0x000b, 0x0013, 0x003b, 0x001b, 0x0012, 0x000c, 0x0005,
+ 0x0023, 0x0021, 0x001f, 0x003a, 0x001e, 0x0010, 0x0007, 0x0005,
+ 0x001c, 0x001a, 0x0020, 0x0013, 0x0011, 0x000f, 0x0008, 0x000e,
+ 0x000e, 0x000c, 0x0009, 0x000d, 0x000e, 0x0009, 0x0004, 0x0001,
+ 0x000b, 0x0004, 0x0006, 0x0006, 0x0006, 0x0003, 0x0002, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_11[64] = {
+ 2, 3, 5, 7, 8, 9, 8, 9,
+ 3, 3, 4, 6, 8, 8, 7, 8,
+ 5, 5, 6, 7, 8, 9, 8, 8,
+ 7, 6, 7, 9, 8, 10, 8, 9,
+ 8, 8, 8, 9, 9, 10, 9, 10,
+ 8, 8, 9, 10, 10, 11, 10, 11,
+ 8, 7, 7, 8, 9, 10, 10, 10,
+ 8, 7, 8, 9, 10, 10, 10, 10,
+};
+
+static const uint16_t mpa_huffcodes_12[64] = {
+ 0x0009, 0x0006, 0x0010, 0x0021, 0x0029, 0x0027, 0x0026, 0x001a,
+ 0x0007, 0x0005, 0x0006, 0x0009, 0x0017, 0x0010, 0x001a, 0x000b,
+ 0x0011, 0x0007, 0x000b, 0x000e, 0x0015, 0x001e, 0x000a, 0x0007,
+ 0x0011, 0x000a, 0x000f, 0x000c, 0x0012, 0x001c, 0x000e, 0x0005,
+ 0x0020, 0x000d, 0x0016, 0x0013, 0x0012, 0x0010, 0x0009, 0x0005,
+ 0x0028, 0x0011, 0x001f, 0x001d, 0x0011, 0x000d, 0x0004, 0x0002,
+ 0x001b, 0x000c, 0x000b, 0x000f, 0x000a, 0x0007, 0x0004, 0x0001,
+ 0x001b, 0x000c, 0x0008, 0x000c, 0x0006, 0x0003, 0x0001, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_12[64] = {
+ 4, 3, 5, 7, 8, 9, 9, 9,
+ 3, 3, 4, 5, 7, 7, 8, 8,
+ 5, 4, 5, 6, 7, 8, 7, 8,
+ 6, 5, 6, 6, 7, 8, 8, 8,
+ 7, 6, 7, 7, 8, 8, 8, 9,
+ 8, 7, 8, 8, 8, 9, 8, 9,
+ 8, 7, 7, 8, 8, 9, 9, 10,
+ 9, 8, 8, 9, 9, 9, 9, 10,
+};
+
+static const uint16_t mpa_huffcodes_13[256] = {
+ 0x0001, 0x0005, 0x000e, 0x0015, 0x0022, 0x0033, 0x002e, 0x0047,
+ 0x002a, 0x0034, 0x0044, 0x0034, 0x0043, 0x002c, 0x002b, 0x0013,
+ 0x0003, 0x0004, 0x000c, 0x0013, 0x001f, 0x001a, 0x002c, 0x0021,
+ 0x001f, 0x0018, 0x0020, 0x0018, 0x001f, 0x0023, 0x0016, 0x000e,
+ 0x000f, 0x000d, 0x0017, 0x0024, 0x003b, 0x0031, 0x004d, 0x0041,
+ 0x001d, 0x0028, 0x001e, 0x0028, 0x001b, 0x0021, 0x002a, 0x0010,
+ 0x0016, 0x0014, 0x0025, 0x003d, 0x0038, 0x004f, 0x0049, 0x0040,
+ 0x002b, 0x004c, 0x0038, 0x0025, 0x001a, 0x001f, 0x0019, 0x000e,
+ 0x0023, 0x0010, 0x003c, 0x0039, 0x0061, 0x004b, 0x0072, 0x005b,
+ 0x0036, 0x0049, 0x0037, 0x0029, 0x0030, 0x0035, 0x0017, 0x0018,
+ 0x003a, 0x001b, 0x0032, 0x0060, 0x004c, 0x0046, 0x005d, 0x0054,
+ 0x004d, 0x003a, 0x004f, 0x001d, 0x004a, 0x0031, 0x0029, 0x0011,
+ 0x002f, 0x002d, 0x004e, 0x004a, 0x0073, 0x005e, 0x005a, 0x004f,
+ 0x0045, 0x0053, 0x0047, 0x0032, 0x003b, 0x0026, 0x0024, 0x000f,
+ 0x0048, 0x0022, 0x0038, 0x005f, 0x005c, 0x0055, 0x005b, 0x005a,
+ 0x0056, 0x0049, 0x004d, 0x0041, 0x0033, 0x002c, 0x002b, 0x002a,
+ 0x002b, 0x0014, 0x001e, 0x002c, 0x0037, 0x004e, 0x0048, 0x0057,
+ 0x004e, 0x003d, 0x002e, 0x0036, 0x0025, 0x001e, 0x0014, 0x0010,
+ 0x0035, 0x0019, 0x0029, 0x0025, 0x002c, 0x003b, 0x0036, 0x0051,
+ 0x0042, 0x004c, 0x0039, 0x0036, 0x0025, 0x0012, 0x0027, 0x000b,
+ 0x0023, 0x0021, 0x001f, 0x0039, 0x002a, 0x0052, 0x0048, 0x0050,
+ 0x002f, 0x003a, 0x0037, 0x0015, 0x0016, 0x001a, 0x0026, 0x0016,
+ 0x0035, 0x0019, 0x0017, 0x0026, 0x0046, 0x003c, 0x0033, 0x0024,
+ 0x0037, 0x001a, 0x0022, 0x0017, 0x001b, 0x000e, 0x0009, 0x0007,
+ 0x0022, 0x0020, 0x001c, 0x0027, 0x0031, 0x004b, 0x001e, 0x0034,
+ 0x0030, 0x0028, 0x0034, 0x001c, 0x0012, 0x0011, 0x0009, 0x0005,
+ 0x002d, 0x0015, 0x0022, 0x0040, 0x0038, 0x0032, 0x0031, 0x002d,
+ 0x001f, 0x0013, 0x000c, 0x000f, 0x000a, 0x0007, 0x0006, 0x0003,
+ 0x0030, 0x0017, 0x0014, 0x0027, 0x0024, 0x0023, 0x0035, 0x0015,
+ 0x0010, 0x0017, 0x000d, 0x000a, 0x0006, 0x0001, 0x0004, 0x0002,
+ 0x0010, 0x000f, 0x0011, 0x001b, 0x0019, 0x0014, 0x001d, 0x000b,
+ 0x0011, 0x000c, 0x0010, 0x0008, 0x0001, 0x0001, 0x0000, 0x0001,
+};
+
+static const uint8_t mpa_huffbits_13[256] = {
+ 1, 4, 6, 7, 8, 9, 9, 10,
+ 9, 10, 11, 11, 12, 12, 13, 13,
+ 3, 4, 6, 7, 8, 8, 9, 9,
+ 9, 9, 10, 10, 11, 12, 12, 12,
+ 6, 6, 7, 8, 9, 9, 10, 10,
+ 9, 10, 10, 11, 11, 12, 13, 13,
+ 7, 7, 8, 9, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 12, 13, 13,
+ 8, 7, 9, 9, 10, 10, 11, 11,
+ 10, 11, 11, 12, 12, 13, 13, 14,
+ 9, 8, 9, 10, 10, 10, 11, 11,
+ 11, 11, 12, 11, 13, 13, 14, 14,
+ 9, 9, 10, 10, 11, 11, 11, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 10, 9, 10, 11, 11, 11, 12, 12,
+ 12, 12, 13, 13, 13, 14, 16, 16,
+ 9, 8, 9, 10, 10, 11, 11, 12,
+ 12, 12, 12, 13, 13, 14, 15, 15,
+ 10, 9, 10, 10, 11, 11, 11, 13,
+ 12, 13, 13, 14, 14, 14, 16, 15,
+ 10, 10, 10, 11, 11, 12, 12, 13,
+ 12, 13, 14, 13, 14, 15, 16, 17,
+ 11, 10, 10, 11, 12, 12, 12, 12,
+ 13, 13, 13, 14, 15, 15, 15, 16,
+ 11, 11, 11, 12, 12, 13, 12, 13,
+ 14, 14, 15, 15, 15, 16, 16, 16,
+ 12, 11, 12, 13, 13, 13, 14, 14,
+ 14, 14, 14, 15, 16, 15, 16, 16,
+ 13, 12, 12, 13, 13, 13, 15, 14,
+ 14, 17, 15, 15, 15, 17, 16, 16,
+ 12, 12, 13, 14, 14, 14, 15, 14,
+ 15, 15, 16, 16, 19, 18, 19, 16,
+};
+
+static const uint16_t mpa_huffcodes_15[256] = {
+ 0x0007, 0x000c, 0x0012, 0x0035, 0x002f, 0x004c, 0x007c, 0x006c,
+ 0x0059, 0x007b, 0x006c, 0x0077, 0x006b, 0x0051, 0x007a, 0x003f,
+ 0x000d, 0x0005, 0x0010, 0x001b, 0x002e, 0x0024, 0x003d, 0x0033,
+ 0x002a, 0x0046, 0x0034, 0x0053, 0x0041, 0x0029, 0x003b, 0x0024,
+ 0x0013, 0x0011, 0x000f, 0x0018, 0x0029, 0x0022, 0x003b, 0x0030,
+ 0x0028, 0x0040, 0x0032, 0x004e, 0x003e, 0x0050, 0x0038, 0x0021,
+ 0x001d, 0x001c, 0x0019, 0x002b, 0x0027, 0x003f, 0x0037, 0x005d,
+ 0x004c, 0x003b, 0x005d, 0x0048, 0x0036, 0x004b, 0x0032, 0x001d,
+ 0x0034, 0x0016, 0x002a, 0x0028, 0x0043, 0x0039, 0x005f, 0x004f,
+ 0x0048, 0x0039, 0x0059, 0x0045, 0x0031, 0x0042, 0x002e, 0x001b,
+ 0x004d, 0x0025, 0x0023, 0x0042, 0x003a, 0x0034, 0x005b, 0x004a,
+ 0x003e, 0x0030, 0x004f, 0x003f, 0x005a, 0x003e, 0x0028, 0x0026,
+ 0x007d, 0x0020, 0x003c, 0x0038, 0x0032, 0x005c, 0x004e, 0x0041,
+ 0x0037, 0x0057, 0x0047, 0x0033, 0x0049, 0x0033, 0x0046, 0x001e,
+ 0x006d, 0x0035, 0x0031, 0x005e, 0x0058, 0x004b, 0x0042, 0x007a,
+ 0x005b, 0x0049, 0x0038, 0x002a, 0x0040, 0x002c, 0x0015, 0x0019,
+ 0x005a, 0x002b, 0x0029, 0x004d, 0x0049, 0x003f, 0x0038, 0x005c,
+ 0x004d, 0x0042, 0x002f, 0x0043, 0x0030, 0x0035, 0x0024, 0x0014,
+ 0x0047, 0x0022, 0x0043, 0x003c, 0x003a, 0x0031, 0x0058, 0x004c,
+ 0x0043, 0x006a, 0x0047, 0x0036, 0x0026, 0x0027, 0x0017, 0x000f,
+ 0x006d, 0x0035, 0x0033, 0x002f, 0x005a, 0x0052, 0x003a, 0x0039,
+ 0x0030, 0x0048, 0x0039, 0x0029, 0x0017, 0x001b, 0x003e, 0x0009,
+ 0x0056, 0x002a, 0x0028, 0x0025, 0x0046, 0x0040, 0x0034, 0x002b,
+ 0x0046, 0x0037, 0x002a, 0x0019, 0x001d, 0x0012, 0x000b, 0x000b,
+ 0x0076, 0x0044, 0x001e, 0x0037, 0x0032, 0x002e, 0x004a, 0x0041,
+ 0x0031, 0x0027, 0x0018, 0x0010, 0x0016, 0x000d, 0x000e, 0x0007,
+ 0x005b, 0x002c, 0x0027, 0x0026, 0x0022, 0x003f, 0x0034, 0x002d,
+ 0x001f, 0x0034, 0x001c, 0x0013, 0x000e, 0x0008, 0x0009, 0x0003,
+ 0x007b, 0x003c, 0x003a, 0x0035, 0x002f, 0x002b, 0x0020, 0x0016,
+ 0x0025, 0x0018, 0x0011, 0x000c, 0x000f, 0x000a, 0x0002, 0x0001,
+ 0x0047, 0x0025, 0x0022, 0x001e, 0x001c, 0x0014, 0x0011, 0x001a,
+ 0x0015, 0x0010, 0x000a, 0x0006, 0x0008, 0x0006, 0x0002, 0x0000,
+};
+
+static const uint8_t mpa_huffbits_15[256] = {
+ 3, 4, 5, 7, 7, 8, 9, 9,
+ 9, 10, 10, 11, 11, 11, 12, 13,
+ 4, 3, 5, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 11, 11,
+ 5, 5, 5, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 11, 11, 11,
+ 6, 6, 6, 7, 7, 8, 8, 9,
+ 9, 9, 10, 10, 10, 11, 11, 11,
+ 7, 6, 7, 7, 8, 8, 9, 9,
+ 9, 9, 10, 10, 10, 11, 11, 11,
+ 8, 7, 7, 8, 8, 8, 9, 9,
+ 9, 9, 10, 10, 11, 11, 11, 12,
+ 9, 7, 8, 8, 8, 9, 9, 9,
+ 9, 10, 10, 10, 11, 11, 12, 12,
+ 9, 8, 8, 9, 9, 9, 9, 10,
+ 10, 10, 10, 10, 11, 11, 11, 12,
+ 9, 8, 8, 9, 9, 9, 9, 10,
+ 10, 10, 10, 11, 11, 12, 12, 12,
+ 9, 8, 9, 9, 9, 9, 10, 10,
+ 10, 11, 11, 11, 11, 12, 12, 12,
+ 10, 9, 9, 9, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 12, 13, 12,
+ 10, 9, 9, 9, 10, 10, 10, 10,
+ 11, 11, 11, 11, 12, 12, 12, 13,
+ 11, 10, 9, 10, 10, 10, 11, 11,
+ 11, 11, 11, 11, 12, 12, 13, 13,
+ 11, 10, 10, 10, 10, 11, 11, 11,
+ 11, 12, 12, 12, 12, 12, 13, 13,
+ 12, 11, 11, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 13, 13, 12, 13,
+ 12, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13,
+};
+
+static const uint16_t mpa_huffcodes_16[256] = {
+ 0x0001, 0x0005, 0x000e, 0x002c, 0x004a, 0x003f, 0x006e, 0x005d,
+ 0x00ac, 0x0095, 0x008a, 0x00f2, 0x00e1, 0x00c3, 0x0178, 0x0011,
+ 0x0003, 0x0004, 0x000c, 0x0014, 0x0023, 0x003e, 0x0035, 0x002f,
+ 0x0053, 0x004b, 0x0044, 0x0077, 0x00c9, 0x006b, 0x00cf, 0x0009,
+ 0x000f, 0x000d, 0x0017, 0x0026, 0x0043, 0x003a, 0x0067, 0x005a,
+ 0x00a1, 0x0048, 0x007f, 0x0075, 0x006e, 0x00d1, 0x00ce, 0x0010,
+ 0x002d, 0x0015, 0x0027, 0x0045, 0x0040, 0x0072, 0x0063, 0x0057,
+ 0x009e, 0x008c, 0x00fc, 0x00d4, 0x00c7, 0x0183, 0x016d, 0x001a,
+ 0x004b, 0x0024, 0x0044, 0x0041, 0x0073, 0x0065, 0x00b3, 0x00a4,
+ 0x009b, 0x0108, 0x00f6, 0x00e2, 0x018b, 0x017e, 0x016a, 0x0009,
+ 0x0042, 0x001e, 0x003b, 0x0038, 0x0066, 0x00b9, 0x00ad, 0x0109,
+ 0x008e, 0x00fd, 0x00e8, 0x0190, 0x0184, 0x017a, 0x01bd, 0x0010,
+ 0x006f, 0x0036, 0x0034, 0x0064, 0x00b8, 0x00b2, 0x00a0, 0x0085,
+ 0x0101, 0x00f4, 0x00e4, 0x00d9, 0x0181, 0x016e, 0x02cb, 0x000a,
+ 0x0062, 0x0030, 0x005b, 0x0058, 0x00a5, 0x009d, 0x0094, 0x0105,
+ 0x00f8, 0x0197, 0x018d, 0x0174, 0x017c, 0x0379, 0x0374, 0x0008,
+ 0x0055, 0x0054, 0x0051, 0x009f, 0x009c, 0x008f, 0x0104, 0x00f9,
+ 0x01ab, 0x0191, 0x0188, 0x017f, 0x02d7, 0x02c9, 0x02c4, 0x0007,
+ 0x009a, 0x004c, 0x0049, 0x008d, 0x0083, 0x0100, 0x00f5, 0x01aa,
+ 0x0196, 0x018a, 0x0180, 0x02df, 0x0167, 0x02c6, 0x0160, 0x000b,
+ 0x008b, 0x0081, 0x0043, 0x007d, 0x00f7, 0x00e9, 0x00e5, 0x00db,
+ 0x0189, 0x02e7, 0x02e1, 0x02d0, 0x0375, 0x0372, 0x01b7, 0x0004,
+ 0x00f3, 0x0078, 0x0076, 0x0073, 0x00e3, 0x00df, 0x018c, 0x02ea,
+ 0x02e6, 0x02e0, 0x02d1, 0x02c8, 0x02c2, 0x00df, 0x01b4, 0x0006,
+ 0x00ca, 0x00e0, 0x00de, 0x00da, 0x00d8, 0x0185, 0x0182, 0x017d,
+ 0x016c, 0x0378, 0x01bb, 0x02c3, 0x01b8, 0x01b5, 0x06c0, 0x0004,
+ 0x02eb, 0x00d3, 0x00d2, 0x00d0, 0x0172, 0x017b, 0x02de, 0x02d3,
+ 0x02ca, 0x06c7, 0x0373, 0x036d, 0x036c, 0x0d83, 0x0361, 0x0002,
+ 0x0179, 0x0171, 0x0066, 0x00bb, 0x02d6, 0x02d2, 0x0166, 0x02c7,
+ 0x02c5, 0x0362, 0x06c6, 0x0367, 0x0d82, 0x0366, 0x01b2, 0x0000,
+ 0x000c, 0x000a, 0x0007, 0x000b, 0x000a, 0x0011, 0x000b, 0x0009,
+ 0x000d, 0x000c, 0x000a, 0x0007, 0x0005, 0x0003, 0x0001, 0x0003,
+};
+
+static const uint8_t mpa_huffbits_16[256] = {
+ 1, 4, 6, 8, 9, 9, 10, 10,
+ 11, 11, 11, 12, 12, 12, 13, 9,
+ 3, 4, 6, 7, 8, 9, 9, 9,
+ 10, 10, 10, 11, 12, 11, 12, 8,
+ 6, 6, 7, 8, 9, 9, 10, 10,
+ 11, 10, 11, 11, 11, 12, 12, 9,
+ 8, 7, 8, 9, 9, 10, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 10,
+ 9, 8, 9, 9, 10, 10, 11, 11,
+ 11, 12, 12, 12, 13, 13, 13, 9,
+ 9, 8, 9, 9, 10, 11, 11, 12,
+ 11, 12, 12, 13, 13, 13, 14, 10,
+ 10, 9, 9, 10, 11, 11, 11, 11,
+ 12, 12, 12, 12, 13, 13, 14, 10,
+ 10, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 13, 13, 15, 15, 10,
+ 10, 10, 10, 11, 11, 11, 12, 12,
+ 13, 13, 13, 13, 14, 14, 14, 10,
+ 11, 10, 10, 11, 11, 12, 12, 13,
+ 13, 13, 13, 14, 13, 14, 13, 11,
+ 11, 11, 10, 11, 12, 12, 12, 12,
+ 13, 14, 14, 14, 15, 15, 14, 10,
+ 12, 11, 11, 11, 12, 12, 13, 14,
+ 14, 14, 14, 14, 14, 13, 14, 11,
+ 12, 12, 12, 12, 12, 13, 13, 13,
+ 13, 15, 14, 14, 14, 14, 16, 11,
+ 14, 12, 12, 12, 13, 13, 14, 14,
+ 14, 16, 15, 15, 15, 17, 15, 11,
+ 13, 13, 11, 12, 14, 14, 13, 14,
+ 14, 15, 16, 15, 17, 15, 14, 11,
+ 9, 8, 8, 9, 9, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 8,
+};
+
+static const uint16_t mpa_huffcodes_24[256] = {
+ 0x000f, 0x000d, 0x002e, 0x0050, 0x0092, 0x0106, 0x00f8, 0x01b2,
+ 0x01aa, 0x029d, 0x028d, 0x0289, 0x026d, 0x0205, 0x0408, 0x0058,
+ 0x000e, 0x000c, 0x0015, 0x0026, 0x0047, 0x0082, 0x007a, 0x00d8,
+ 0x00d1, 0x00c6, 0x0147, 0x0159, 0x013f, 0x0129, 0x0117, 0x002a,
+ 0x002f, 0x0016, 0x0029, 0x004a, 0x0044, 0x0080, 0x0078, 0x00dd,
+ 0x00cf, 0x00c2, 0x00b6, 0x0154, 0x013b, 0x0127, 0x021d, 0x0012,
+ 0x0051, 0x0027, 0x004b, 0x0046, 0x0086, 0x007d, 0x0074, 0x00dc,
+ 0x00cc, 0x00be, 0x00b2, 0x0145, 0x0137, 0x0125, 0x010f, 0x0010,
+ 0x0093, 0x0048, 0x0045, 0x0087, 0x007f, 0x0076, 0x0070, 0x00d2,
+ 0x00c8, 0x00bc, 0x0160, 0x0143, 0x0132, 0x011d, 0x021c, 0x000e,
+ 0x0107, 0x0042, 0x0081, 0x007e, 0x0077, 0x0072, 0x00d6, 0x00ca,
+ 0x00c0, 0x00b4, 0x0155, 0x013d, 0x012d, 0x0119, 0x0106, 0x000c,
+ 0x00f9, 0x007b, 0x0079, 0x0075, 0x0071, 0x00d7, 0x00ce, 0x00c3,
+ 0x00b9, 0x015b, 0x014a, 0x0134, 0x0123, 0x0110, 0x0208, 0x000a,
+ 0x01b3, 0x0073, 0x006f, 0x006d, 0x00d3, 0x00cb, 0x00c4, 0x00bb,
+ 0x0161, 0x014c, 0x0139, 0x012a, 0x011b, 0x0213, 0x017d, 0x0011,
+ 0x01ab, 0x00d4, 0x00d0, 0x00cd, 0x00c9, 0x00c1, 0x00ba, 0x00b1,
+ 0x00a9, 0x0140, 0x012f, 0x011e, 0x010c, 0x0202, 0x0179, 0x0010,
+ 0x014f, 0x00c7, 0x00c5, 0x00bf, 0x00bd, 0x00b5, 0x00ae, 0x014d,
+ 0x0141, 0x0131, 0x0121, 0x0113, 0x0209, 0x017b, 0x0173, 0x000b,
+ 0x029c, 0x00b8, 0x00b7, 0x00b3, 0x00af, 0x0158, 0x014b, 0x013a,
+ 0x0130, 0x0122, 0x0115, 0x0212, 0x017f, 0x0175, 0x016e, 0x000a,
+ 0x028c, 0x015a, 0x00ab, 0x00a8, 0x00a4, 0x013e, 0x0135, 0x012b,
+ 0x011f, 0x0114, 0x0107, 0x0201, 0x0177, 0x0170, 0x016a, 0x0006,
+ 0x0288, 0x0142, 0x013c, 0x0138, 0x0133, 0x012e, 0x0124, 0x011c,
+ 0x010d, 0x0105, 0x0200, 0x0178, 0x0172, 0x016c, 0x0167, 0x0004,
+ 0x026c, 0x012c, 0x0128, 0x0126, 0x0120, 0x011a, 0x0111, 0x010a,
+ 0x0203, 0x017c, 0x0176, 0x0171, 0x016d, 0x0169, 0x0165, 0x0002,
+ 0x0409, 0x0118, 0x0116, 0x0112, 0x010b, 0x0108, 0x0103, 0x017e,
+ 0x017a, 0x0174, 0x016f, 0x016b, 0x0168, 0x0166, 0x0164, 0x0000,
+ 0x002b, 0x0014, 0x0013, 0x0011, 0x000f, 0x000d, 0x000b, 0x0009,
+ 0x0007, 0x0006, 0x0004, 0x0007, 0x0005, 0x0003, 0x0001, 0x0003,
+};
+
+static const uint8_t mpa_huffbits_24[256] = {
+ 4, 4, 6, 7, 8, 9, 9, 10,
+ 10, 11, 11, 11, 11, 11, 12, 9,
+ 4, 4, 5, 6, 7, 8, 8, 9,
+ 9, 9, 10, 10, 10, 10, 10, 8,
+ 6, 5, 6, 7, 7, 8, 8, 9,
+ 9, 9, 9, 10, 10, 10, 11, 7,
+ 7, 6, 7, 7, 8, 8, 8, 9,
+ 9, 9, 9, 10, 10, 10, 10, 7,
+ 8, 7, 7, 8, 8, 8, 8, 9,
+ 9, 9, 10, 10, 10, 10, 11, 7,
+ 9, 7, 8, 8, 8, 8, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 7,
+ 9, 8, 8, 8, 8, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 11, 7,
+ 10, 8, 8, 8, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 11, 11, 8,
+ 10, 9, 9, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 11, 11, 8,
+ 10, 9, 9, 9, 9, 9, 9, 10,
+ 10, 10, 10, 10, 11, 11, 11, 8,
+ 11, 9, 9, 9, 9, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 8,
+ 11, 10, 9, 9, 9, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 8,
+ 11, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 8,
+ 11, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 8,
+ 12, 10, 10, 10, 10, 10, 10, 11,
+ 11, 11, 11, 11, 11, 11, 11, 8,
+ 8, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 8, 8, 8, 8, 4,
+};
+
+static const HuffTable mpa_huff_tables[16] = {
+{ 1, NULL, NULL },
+{ 2, mpa_huffbits_1, mpa_huffcodes_1 },
+{ 3, mpa_huffbits_2, mpa_huffcodes_2 },
+{ 3, mpa_huffbits_3, mpa_huffcodes_3 },
+{ 4, mpa_huffbits_5, mpa_huffcodes_5 },
+{ 4, mpa_huffbits_6, mpa_huffcodes_6 },
+{ 6, mpa_huffbits_7, mpa_huffcodes_7 },
+{ 6, mpa_huffbits_8, mpa_huffcodes_8 },
+{ 6, mpa_huffbits_9, mpa_huffcodes_9 },
+{ 8, mpa_huffbits_10, mpa_huffcodes_10 },
+{ 8, mpa_huffbits_11, mpa_huffcodes_11 },
+{ 8, mpa_huffbits_12, mpa_huffcodes_12 },
+{ 16, mpa_huffbits_13, mpa_huffcodes_13 },
+{ 16, mpa_huffbits_15, mpa_huffcodes_15 },
+{ 16, mpa_huffbits_16, mpa_huffcodes_16 },
+{ 16, mpa_huffbits_24, mpa_huffcodes_24 },
+};
+
+static const uint8_t mpa_huff_data[32][2] = {
+{ 0, 0 },
+{ 1, 0 },
+{ 2, 0 },
+{ 3, 0 },
+{ 0, 0 },
+{ 4, 0 },
+{ 5, 0 },
+{ 6, 0 },
+{ 7, 0 },
+{ 8, 0 },
+{ 9, 0 },
+{ 10, 0 },
+{ 11, 0 },
+{ 12, 0 },
+{ 0, 0 },
+{ 13, 0 },
+{ 14, 1 },
+{ 14, 2 },
+{ 14, 3 },
+{ 14, 4 },
+{ 14, 6 },
+{ 14, 8 },
+{ 14, 10 },
+{ 14, 13 },
+{ 15, 4 },
+{ 15, 5 },
+{ 15, 6 },
+{ 15, 7 },
+{ 15, 8 },
+{ 15, 9 },
+{ 15, 11 },
+{ 15, 13 },
+};
+
+
+/* huffman tables for quadrules */
+static const uint8_t mpa_quad_codes[2][16] = {
+ { 1, 5, 4, 5, 6, 5, 4, 4, 7, 3, 6, 0, 7, 2, 3, 1, },
+ { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, },
+};
+
+static const uint8_t mpa_quad_bits[2][16] = {
+ { 1, 4, 4, 5, 4, 6, 5, 6, 4, 5, 5, 6, 5, 6, 6, 6, },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, },
+};
+
+/* band size tables */
+static const uint8_t band_size_long[9][22] = {
+{ 4, 4, 4, 4, 4, 4, 6, 6, 8, 8, 10,
+ 12, 16, 20, 24, 28, 34, 42, 50, 54, 76, 158, }, /* 44100 */
+{ 4, 4, 4, 4, 4, 4, 6, 6, 6, 8, 10,
+ 12, 16, 18, 22, 28, 34, 40, 46, 54, 54, 192, }, /* 48000 */
+{ 4, 4, 4, 4, 4, 4, 6, 6, 8, 10, 12,
+ 16, 20, 24, 30, 38, 46, 56, 68, 84, 102, 26, }, /* 32000 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 22050 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 18, 22, 26, 32, 38, 46, 52, 64, 70, 76, 36, }, /* 24000 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 16000 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 11025 */
+{ 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16,
+ 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 12000 */
+{ 12, 12, 12, 12, 12, 12, 16, 20, 24, 28, 32,
+ 40, 48, 56, 64, 76, 90, 2, 2, 2, 2, 2, }, /* 8000 */
+};
+
+static const uint8_t band_size_short[9][13] = {
+{ 4, 4, 4, 4, 6, 8, 10, 12, 14, 18, 22, 30, 56, }, /* 44100 */
+{ 4, 4, 4, 4, 6, 6, 10, 12, 14, 16, 20, 26, 66, }, /* 48000 */
+{ 4, 4, 4, 4, 6, 8, 12, 16, 20, 26, 34, 42, 12, }, /* 32000 */
+{ 4, 4, 4, 6, 6, 8, 10, 14, 18, 26, 32, 42, 18, }, /* 22050 */
+{ 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 32, 44, 12, }, /* 24000 */
+{ 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 16000 */
+{ 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 11025 */
+{ 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 12000 */
+{ 8, 8, 8, 12, 16, 20, 24, 28, 36, 2, 2, 2, 26, }, /* 8000 */
+};
+
+static const uint8_t mpa_pretab[2][22] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, 2, 0 },
+};
+
+/* table for alias reduction (XXX: store it as integer !) */
+static const float ci_table[8] = {
+ -0.6, -0.535, -0.33, -0.185, -0.095, -0.041, -0.0142, -0.0037,
+};
+
+#endif /* AVCODEC_MPEGAUDIODECTAB_H */
diff --git a/media/ffvpx/libavcodec/mpegaudiodsp.c b/media/ffvpx/libavcodec/mpegaudiodsp.c
new file mode 100644
index 0000000000..5a5a679d91
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodsp.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/thread.h"
+#include "mpegaudio.h"
+#include "mpegaudiodsp.h"
+#include "dct.h"
+#include "dct32.h"
+
+static AVOnce mpadsp_table_init = AV_ONCE_INIT;
+
+static av_cold void mpadsp_init_tabs(void)
+{
+ int i, j;
+ /* compute mdct windows */
+ for (i = 0; i < 36; i++) {
+ for (j = 0; j < 4; j++) {
+ double d;
+
+ if (j == 2 && i % 3 != 1)
+ continue;
+
+ d = sin(M_PI * (i + 0.5) / 36.0);
+ if (j == 1) {
+ if (i >= 30) d = 0;
+ else if (i >= 24) d = sin(M_PI * (i - 18 + 0.5) / 12.0);
+ else if (i >= 18) d = 1;
+ } else if (j == 3) {
+ if (i < 6) d = 0;
+ else if (i < 12) d = sin(M_PI * (i - 6 + 0.5) / 12.0);
+ else if (i < 18) d = 1;
+ }
+ //merge last stage of imdct into the window coefficients
+ d *= 0.5 * IMDCT_SCALAR / cos(M_PI * (2 * i + 19) / 72);
+
+ if (j == 2) {
+ ff_mdct_win_float[j][i/3] = d / (1 << 5);
+ ff_mdct_win_fixed[j][i/3] = d / (1 << 5) * (1LL << 32) + 0.5;
+ } else {
+ int idx = i < 18 ? i : i + (MDCT_BUF_SIZE/2 - 18);
+ ff_mdct_win_float[j][idx] = d / (1 << 5);
+ ff_mdct_win_fixed[j][idx] = d / (1 << 5) * (1LL << 32) + 0.5;
+ }
+ }
+ }
+
+ /* NOTE: we do frequency inversion after the MDCT by changing
+ the sign of the right window coefs */
+ for (j = 0; j < 4; j++) {
+ for (i = 0; i < MDCT_BUF_SIZE; i += 2) {
+ ff_mdct_win_float[j + 4][i ] = ff_mdct_win_float[j][i ];
+ ff_mdct_win_float[j + 4][i + 1] = -ff_mdct_win_float[j][i + 1];
+ ff_mdct_win_fixed[j + 4][i ] = ff_mdct_win_fixed[j][i ];
+ ff_mdct_win_fixed[j + 4][i + 1] = -ff_mdct_win_fixed[j][i + 1];
+ }
+ }
+
+#if ARCH_X86
+ ff_mpadsp_init_x86_tabs();
+#endif
+}
+
+av_cold void ff_mpadsp_init(MPADSPContext *s)
+{
+ DCTContext dct;
+
+ ff_dct_init(&dct, 5, DCT_II);
+ ff_thread_once(&mpadsp_table_init, &mpadsp_init_tabs);
+
+ s->apply_window_float = ff_mpadsp_apply_window_float;
+ s->apply_window_fixed = ff_mpadsp_apply_window_fixed;
+
+ s->dct32_float = dct.dct32;
+ s->dct32_fixed = ff_dct32_fixed;
+
+ s->imdct36_blocks_float = ff_imdct36_blocks_float;
+ s->imdct36_blocks_fixed = ff_imdct36_blocks_fixed;
+
+#if ARCH_AARCH64
+ ff_mpadsp_init_aarch64(s);
+#elif ARCH_ARM
+ ff_mpadsp_init_arm(s);
+#elif ARCH_PPC
+ ff_mpadsp_init_ppc(s);
+#elif ARCH_X86
+ ff_mpadsp_init_x86(s);
+#endif
+#if HAVE_MIPSFPU
+ ff_mpadsp_init_mipsfpu(s);
+#endif
+#if HAVE_MIPSDSP
+ ff_mpadsp_init_mipsdsp(s);
+#endif
+}
diff --git a/media/ffvpx/libavcodec/mpegaudiodsp.h b/media/ffvpx/libavcodec/mpegaudiodsp.h
new file mode 100644
index 0000000000..7bc635191a
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodsp.h
@@ -0,0 +1,92 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGAUDIODSP_H
+#define AVCODEC_MPEGAUDIODSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/macros.h"
+
+typedef struct MPADSPContext {
+ void (*apply_window_float)(float *synth_buf, float *window,
+ int *dither_state, float *samples,
+ ptrdiff_t incr);
+ void (*apply_window_fixed)(int32_t *synth_buf, int32_t *window,
+ int *dither_state, int16_t *samples,
+ ptrdiff_t incr);
+ void (*dct32_float)(float *dst, const float *src);
+ void (*dct32_fixed)(int *dst, const int *src);
+
+ void (*imdct36_blocks_float)(float *out, float *buf, float *in,
+ int count, int switch_point, int block_type);
+ void (*imdct36_blocks_fixed)(int *out, int *buf, int *in,
+ int count, int switch_point, int block_type);
+} MPADSPContext;
+
+void ff_mpadsp_init(MPADSPContext *s);
+
+extern int32_t ff_mpa_synth_window_fixed[];
+extern float ff_mpa_synth_window_float[];
+
+extern const int32_t ff_mpa_enwindow[257];
+
+void ff_mpa_synth_filter_fixed(MPADSPContext *s,
+ int32_t *synth_buf_ptr, int *synth_buf_offset,
+ int32_t *window, int *dither_state,
+ int16_t *samples, ptrdiff_t incr,
+ int32_t *sb_samples);
+
+void ff_mpa_synth_filter_float(MPADSPContext *s,
+ float *synth_buf_ptr, int *synth_buf_offset,
+ float *window, int *dither_state,
+ float *samples, ptrdiff_t incr,
+ float *sb_samples);
+
+void ff_mpadsp_init_aarch64(MPADSPContext *s);
+void ff_mpadsp_init_arm(MPADSPContext *s);
+void ff_mpadsp_init_ppc(MPADSPContext *s);
+void ff_mpadsp_init_x86(MPADSPContext *s);
+void ff_mpadsp_init_x86_tabs(void);
+void ff_mpadsp_init_mipsfpu(MPADSPContext *s);
+void ff_mpadsp_init_mipsdsp(MPADSPContext *s);
+
+void ff_mpa_synth_init_float(void);
+void ff_mpa_synth_init_fixed(void);
+
+void ff_mpadsp_apply_window_float(float *synth_buf, float *window,
+ int *dither_state, float *samples,
+ ptrdiff_t incr);
+void ff_mpadsp_apply_window_fixed(int32_t *synth_buf, int32_t *window,
+ int *dither_state, int16_t *samples,
+ ptrdiff_t incr);
+
+void ff_imdct36_blocks_float(float *out, float *buf, float *in,
+ int count, int switch_point, int block_type);
+
+void ff_imdct36_blocks_fixed(int *out, int *buf, int *in,
+ int count, int switch_point, int block_type);
+
+/** For SSE implementation, MDCT_BUF_SIZE/2 should be 128-bit aligned */
+#define MDCT_BUF_SIZE FFALIGN(36, 2*4)
+
+extern int ff_mdct_win_fixed[8][MDCT_BUF_SIZE];
+extern float ff_mdct_win_float[8][MDCT_BUF_SIZE];
+
+#endif /* AVCODEC_MPEGAUDIODSP_H */
diff --git a/media/ffvpx/libavcodec/mpegaudiodsp_data.c b/media/ffvpx/libavcodec/mpegaudiodsp_data.c
new file mode 100644
index 0000000000..4550de9b80
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodsp_data.c
@@ -0,0 +1,56 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mpegaudiodsp.h"
+
+/* half mpeg encoding window (full precision) */
+const int32_t ff_mpa_enwindow[257] = {
+ 0, -1, -1, -1, -1, -1, -1, -2,
+ -2, -2, -2, -3, -3, -4, -4, -5,
+ -5, -6, -7, -7, -8, -9, -10, -11,
+ -13, -14, -16, -17, -19, -21, -24, -26,
+ -29, -31, -35, -38, -41, -45, -49, -53,
+ -58, -63, -68, -73, -79, -85, -91, -97,
+ -104, -111, -117, -125, -132, -139, -147, -154,
+ -161, -169, -176, -183, -190, -196, -202, -208,
+ 213, 218, 222, 225, 227, 228, 228, 227,
+ 224, 221, 215, 208, 200, 189, 177, 163,
+ 146, 127, 106, 83, 57, 29, -2, -36,
+ -72, -111, -153, -197, -244, -294, -347, -401,
+ -459, -519, -581, -645, -711, -779, -848, -919,
+ -991, -1064, -1137, -1210, -1283, -1356, -1428, -1498,
+ -1567, -1634, -1698, -1759, -1817, -1870, -1919, -1962,
+ -2001, -2032, -2057, -2075, -2085, -2087, -2080, -2063,
+ 2037, 2000, 1952, 1893, 1822, 1739, 1644, 1535,
+ 1414, 1280, 1131, 970, 794, 605, 402, 185,
+ -45, -288, -545, -814, -1095, -1388, -1692, -2006,
+ -2330, -2663, -3004, -3351, -3705, -4063, -4425, -4788,
+ -5153, -5517, -5879, -6237, -6589, -6935, -7271, -7597,
+ -7910, -8209, -8491, -8755, -8998, -9219, -9416, -9585,
+ -9727, -9838, -9916, -9959, -9966, -9935, -9863, -9750,
+ -9592, -9389, -9139, -8840, -8492, -8092, -7640, -7134,
+ 6574, 5959, 5288, 4561, 3776, 2935, 2037, 1082,
+ 70, -998, -2122, -3300, -4533, -5818, -7154, -8540,
+ -9975,-11455,-12980,-14548,-16155,-17799,-19478,-21189,
+-22929,-24694,-26482,-28289,-30112,-31947,-33791,-35640,
+-37489,-39336,-41176,-43006,-44821,-46617,-48390,-50137,
+-51853,-53534,-55178,-56778,-58333,-59838,-61289,-62684,
+-64019,-65290,-66494,-67629,-68692,-69679,-70590,-71420,
+-72169,-72835,-73415,-73908,-74313,-74630,-74856,-74992,
+ 75038,
+};
diff --git a/media/ffvpx/libavcodec/mpegaudiodsp_fixed.c b/media/ffvpx/libavcodec/mpegaudiodsp_fixed.c
new file mode 100644
index 0000000000..83c9d66095
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodsp_fixed.c
@@ -0,0 +1,20 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define USE_FLOATS 0
+#include "mpegaudiodsp_template.c"
diff --git a/media/ffvpx/libavcodec/mpegaudiodsp_float.c b/media/ffvpx/libavcodec/mpegaudiodsp_float.c
new file mode 100644
index 0000000000..c45b136089
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodsp_float.c
@@ -0,0 +1,20 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define USE_FLOATS 1
+#include "mpegaudiodsp_template.c"
diff --git a/media/ffvpx/libavcodec/mpegaudiodsp_template.c b/media/ffvpx/libavcodec/mpegaudiodsp_template.c
new file mode 100644
index 0000000000..fbbd94e486
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiodsp_template.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/thread.h"
+
+#include "dct32.h"
+#include "mathops.h"
+#include "mpegaudiodsp.h"
+#include "mpegaudio.h"
+
+#if USE_FLOATS
+#define RENAME(n) n##_float
+
+static inline float round_sample(float *sum)
+{
+ float sum1=*sum;
+ *sum = 0;
+ return sum1;
+}
+
+#define MACS(rt, ra, rb) rt+=(ra)*(rb)
+#define MULS(ra, rb) ((ra)*(rb))
+#define MULH3(x, y, s) ((s)*(y)*(x))
+#define MLSS(rt, ra, rb) rt-=(ra)*(rb)
+#define MULLx(x, y, s) ((y)*(x))
+#define FIXHR(x) ((float)(x))
+#define FIXR(x) ((float)(x))
+#define SHR(a,b) ((a)*(1.0f/(1<<(b))))
+
+#else
+
+#define RENAME(n) n##_fixed
+#define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 15)
+
+static inline int round_sample(int64_t *sum)
+{
+ int sum1;
+ sum1 = (int)((*sum) >> OUT_SHIFT);
+ *sum &= (1<<OUT_SHIFT)-1;
+ return av_clip_int16(sum1);
+}
+
+# define MULS(ra, rb) MUL64(ra, rb)
+# define MACS(rt, ra, rb) MAC64(rt, ra, rb)
+# define MLSS(rt, ra, rb) MLS64(rt, ra, rb)
+# define MULH3(x, y, s) MULH((s)*(x), y)
+# define MULLx(x, y, s) MULL((int)(x),(y),s)
+# define SHR(a,b) (((int)(a))>>(b))
+# define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))
+# define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
+#endif
+
+/** Window for MDCT. Actually only the elements in [0,17] and
+ [MDCT_BUF_SIZE/2, MDCT_BUF_SIZE/2 + 17] are actually used. The rest
+ is just to preserve alignment for SIMD implementations.
+*/
+DECLARE_ALIGNED(16, INTFLOAT, RENAME(ff_mdct_win))[8][MDCT_BUF_SIZE];
+
+DECLARE_ALIGNED(16, MPA_INT, RENAME(ff_mpa_synth_window))[512+256];
+
+#define SUM8(op, sum, w, p) \
+{ \
+ op(sum, (w)[0 * 64], (p)[0 * 64]); \
+ op(sum, (w)[1 * 64], (p)[1 * 64]); \
+ op(sum, (w)[2 * 64], (p)[2 * 64]); \
+ op(sum, (w)[3 * 64], (p)[3 * 64]); \
+ op(sum, (w)[4 * 64], (p)[4 * 64]); \
+ op(sum, (w)[5 * 64], (p)[5 * 64]); \
+ op(sum, (w)[6 * 64], (p)[6 * 64]); \
+ op(sum, (w)[7 * 64], (p)[7 * 64]); \
+}
+
+#define SUM8P2(sum1, op1, sum2, op2, w1, w2, p) \
+{ \
+ INTFLOAT tmp;\
+ tmp = p[0 * 64];\
+ op1(sum1, (w1)[0 * 64], tmp);\
+ op2(sum2, (w2)[0 * 64], tmp);\
+ tmp = p[1 * 64];\
+ op1(sum1, (w1)[1 * 64], tmp);\
+ op2(sum2, (w2)[1 * 64], tmp);\
+ tmp = p[2 * 64];\
+ op1(sum1, (w1)[2 * 64], tmp);\
+ op2(sum2, (w2)[2 * 64], tmp);\
+ tmp = p[3 * 64];\
+ op1(sum1, (w1)[3 * 64], tmp);\
+ op2(sum2, (w2)[3 * 64], tmp);\
+ tmp = p[4 * 64];\
+ op1(sum1, (w1)[4 * 64], tmp);\
+ op2(sum2, (w2)[4 * 64], tmp);\
+ tmp = p[5 * 64];\
+ op1(sum1, (w1)[5 * 64], tmp);\
+ op2(sum2, (w2)[5 * 64], tmp);\
+ tmp = p[6 * 64];\
+ op1(sum1, (w1)[6 * 64], tmp);\
+ op2(sum2, (w2)[6 * 64], tmp);\
+ tmp = p[7 * 64];\
+ op1(sum1, (w1)[7 * 64], tmp);\
+ op2(sum2, (w2)[7 * 64], tmp);\
+}
+
+void RENAME(ff_mpadsp_apply_window)(MPA_INT *synth_buf, MPA_INT *window,
+ int *dither_state, OUT_INT *samples,
+ ptrdiff_t incr)
+{
+ register const MPA_INT *w, *w2, *p;
+ int j;
+ OUT_INT *samples2;
+#if USE_FLOATS
+ float sum, sum2;
+#else
+ int64_t sum, sum2;
+#endif
+
+ /* copy to avoid wrap */
+ memcpy(synth_buf + 512, synth_buf, 32 * sizeof(*synth_buf));
+
+ samples2 = samples + 31 * incr;
+ w = window;
+ w2 = window + 31;
+
+ sum = *dither_state;
+ p = synth_buf + 16;
+ SUM8(MACS, sum, w, p);
+ p = synth_buf + 48;
+ SUM8(MLSS, sum, w + 32, p);
+ *samples = round_sample(&sum);
+ samples += incr;
+ w++;
+
+ /* we calculate two samples at the same time to avoid one memory
+ access per two sample */
+ for(j=1;j<16;j++) {
+ sum2 = 0;
+ p = synth_buf + 16 + j;
+ SUM8P2(sum, MACS, sum2, MLSS, w, w2, p);
+ p = synth_buf + 48 - j;
+ SUM8P2(sum, MLSS, sum2, MLSS, w + 32, w2 + 32, p);
+
+ *samples = round_sample(&sum);
+ samples += incr;
+ sum += sum2;
+ *samples2 = round_sample(&sum);
+ samples2 -= incr;
+ w++;
+ w2--;
+ }
+
+ p = synth_buf + 32;
+ SUM8(MLSS, sum, w + 32, p);
+ *samples = round_sample(&sum);
+ *dither_state= sum;
+}
+
+/* 32 sub band synthesis filter. Input: 32 sub band samples, Output:
+ 32 samples. */
+void RENAME(ff_mpa_synth_filter)(MPADSPContext *s, MPA_INT *synth_buf_ptr,
+ int *synth_buf_offset,
+ MPA_INT *window, int *dither_state,
+ OUT_INT *samples, ptrdiff_t incr,
+ MPA_INT *sb_samples)
+{
+ MPA_INT *synth_buf;
+ int offset;
+
+ offset = *synth_buf_offset;
+ synth_buf = synth_buf_ptr + offset;
+
+ s->RENAME(dct32)(synth_buf, sb_samples);
+ s->RENAME(apply_window)(synth_buf, window, dither_state, samples, incr);
+
+ offset = (offset - 32) & 511;
+ *synth_buf_offset = offset;
+}
+
+static av_cold void mpa_synth_init(MPA_INT *window)
+{
+ int i, j;
+
+ /* max = 18760, max sum over all 16 coefs : 44736 */
+ for(i=0;i<257;i++) {
+ INTFLOAT v;
+ v = ff_mpa_enwindow[i];
+#if USE_FLOATS
+ v *= 1.0 / (1LL<<(16 + FRAC_BITS));
+#endif
+ window[i] = v;
+ if ((i & 63) != 0)
+ v = -v;
+ if (i != 0)
+ window[512 - i] = v;
+ }
+
+
+ // Needed for avoiding shuffles in ASM implementations
+ for(i=0; i < 8; i++)
+ for(j=0; j < 16; j++)
+ window[512+16*i+j] = window[64*i+32-j];
+
+ for(i=0; i < 8; i++)
+ for(j=0; j < 16; j++)
+ window[512+128+16*i+j] = window[64*i+48-j];
+}
+
+static av_cold void mpa_synth_window_init(void)
+{
+ mpa_synth_init(RENAME(ff_mpa_synth_window));
+}
+
+av_cold void RENAME(ff_mpa_synth_init)(void)
+{
+ static AVOnce init_static_once = AV_ONCE_INIT;
+ ff_thread_once(&init_static_once, mpa_synth_window_init);
+}
+
+/* cos(pi*i/18) */
+#define C1 FIXHR(0.98480775301220805936/2)
+#define C2 FIXHR(0.93969262078590838405/2)
+#define C3 FIXHR(0.86602540378443864676/2)
+#define C4 FIXHR(0.76604444311897803520/2)
+#define C5 FIXHR(0.64278760968653932632/2)
+#define C6 FIXHR(0.5/2)
+#define C7 FIXHR(0.34202014332566873304/2)
+#define C8 FIXHR(0.17364817766693034885/2)
+
+/* 0.5 / cos(pi*(2*i+1)/36) */
+static const INTFLOAT icos36[9] = {
+ FIXR(0.50190991877167369479),
+ FIXR(0.51763809020504152469), //0
+ FIXR(0.55168895948124587824),
+ FIXR(0.61038729438072803416),
+ FIXR(0.70710678118654752439), //1
+ FIXR(0.87172339781054900991),
+ FIXR(1.18310079157624925896),
+ FIXR(1.93185165257813657349), //2
+ FIXR(5.73685662283492756461),
+};
+
+/* 0.5 / cos(pi*(2*i+1)/36) */
+static const INTFLOAT icos36h[9] = {
+ FIXHR(0.50190991877167369479/2),
+ FIXHR(0.51763809020504152469/2), //0
+ FIXHR(0.55168895948124587824/2),
+ FIXHR(0.61038729438072803416/2),
+ FIXHR(0.70710678118654752439/2), //1
+ FIXHR(0.87172339781054900991/2),
+ FIXHR(1.18310079157624925896/4),
+ FIXHR(1.93185165257813657349/4), //2
+// FIXHR(5.73685662283492756461),
+};
+
+/* using Lee like decomposition followed by hand coded 9 points DCT */
+static void imdct36(INTFLOAT *out, INTFLOAT *buf, SUINTFLOAT *in, INTFLOAT *win)
+{
+ int i, j;
+ SUINTFLOAT t0, t1, t2, t3, s0, s1, s2, s3;
+ SUINTFLOAT tmp[18], *tmp1, *in1;
+
+ for (i = 17; i >= 1; i--)
+ in[i] += in[i-1];
+ for (i = 17; i >= 3; i -= 2)
+ in[i] += in[i-2];
+
+ for (j = 0; j < 2; j++) {
+ tmp1 = tmp + j;
+ in1 = in + j;
+
+ t2 = in1[2*4] + in1[2*8] - in1[2*2];
+
+ t3 = in1[2*0] + SHR(in1[2*6],1);
+ t1 = in1[2*0] - in1[2*6];
+ tmp1[ 6] = t1 - SHR(t2,1);
+ tmp1[16] = t1 + t2;
+
+ t0 = MULH3(in1[2*2] + in1[2*4] , C2, 2);
+ t1 = MULH3(in1[2*4] - in1[2*8] , -2*C8, 1);
+ t2 = MULH3(in1[2*2] + in1[2*8] , -C4, 2);
+
+ tmp1[10] = t3 - t0 - t2;
+ tmp1[ 2] = t3 + t0 + t1;
+ tmp1[14] = t3 + t2 - t1;
+
+ tmp1[ 4] = MULH3(in1[2*5] + in1[2*7] - in1[2*1], -C3, 2);
+ t2 = MULH3(in1[2*1] + in1[2*5], C1, 2);
+ t3 = MULH3(in1[2*5] - in1[2*7], -2*C7, 1);
+ t0 = MULH3(in1[2*3], C3, 2);
+
+ t1 = MULH3(in1[2*1] + in1[2*7], -C5, 2);
+
+ tmp1[ 0] = t2 + t3 + t0;
+ tmp1[12] = t2 + t1 - t0;
+ tmp1[ 8] = t3 - t1 - t0;
+ }
+
+ i = 0;
+ for (j = 0; j < 4; j++) {
+ t0 = tmp[i];
+ t1 = tmp[i + 2];
+ s0 = t1 + t0;
+ s2 = t1 - t0;
+
+ t2 = tmp[i + 1];
+ t3 = tmp[i + 3];
+ s1 = MULH3(t3 + t2, icos36h[ j], 2);
+ s3 = MULLx(t3 - t2, icos36 [8 - j], FRAC_BITS);
+
+ t0 = s0 + s1;
+ t1 = s0 - s1;
+ out[(9 + j) * SBLIMIT] = MULH3(t1, win[ 9 + j], 1) + buf[4*(9 + j)];
+ out[(8 - j) * SBLIMIT] = MULH3(t1, win[ 8 - j], 1) + buf[4*(8 - j)];
+ buf[4 * ( 9 + j )] = MULH3(t0, win[MDCT_BUF_SIZE/2 + 9 + j], 1);
+ buf[4 * ( 8 - j )] = MULH3(t0, win[MDCT_BUF_SIZE/2 + 8 - j], 1);
+
+ t0 = s2 + s3;
+ t1 = s2 - s3;
+ out[(9 + 8 - j) * SBLIMIT] = MULH3(t1, win[ 9 + 8 - j], 1) + buf[4*(9 + 8 - j)];
+ out[ j * SBLIMIT] = MULH3(t1, win[ j], 1) + buf[4*( j)];
+ buf[4 * ( 9 + 8 - j )] = MULH3(t0, win[MDCT_BUF_SIZE/2 + 9 + 8 - j], 1);
+ buf[4 * ( j )] = MULH3(t0, win[MDCT_BUF_SIZE/2 + j], 1);
+ i += 4;
+ }
+
+ s0 = tmp[16];
+ s1 = MULH3(tmp[17], icos36h[4], 2);
+ t0 = s0 + s1;
+ t1 = s0 - s1;
+ out[(9 + 4) * SBLIMIT] = MULH3(t1, win[ 9 + 4], 1) + buf[4*(9 + 4)];
+ out[(8 - 4) * SBLIMIT] = MULH3(t1, win[ 8 - 4], 1) + buf[4*(8 - 4)];
+ buf[4 * ( 9 + 4 )] = MULH3(t0, win[MDCT_BUF_SIZE/2 + 9 + 4], 1);
+ buf[4 * ( 8 - 4 )] = MULH3(t0, win[MDCT_BUF_SIZE/2 + 8 - 4], 1);
+}
+
+void RENAME(ff_imdct36_blocks)(INTFLOAT *out, INTFLOAT *buf, INTFLOAT *in,
+ int count, int switch_point, int block_type)
+{
+ int j;
+ for (j=0 ; j < count; j++) {
+ /* apply window & overlap with previous buffer */
+
+ /* select window */
+ int win_idx = (switch_point && j < 2) ? 0 : block_type;
+ INTFLOAT *win = RENAME(ff_mdct_win)[win_idx + (4 & -(j & 1))];
+
+ imdct36(out, buf, in, win);
+
+ in += 18;
+ buf += ((j&3) != 3 ? 1 : (72-3));
+ out++;
+ }
+}
+
diff --git a/media/ffvpx/libavcodec/mpegaudiotab.h b/media/ffvpx/libavcodec/mpegaudiotab.h
new file mode 100644
index 0000000000..bb2e5de4ea
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiotab.h
@@ -0,0 +1,102 @@
+/*
+ * mpeg audio layer 2 tables. Most of them come from the mpeg audio
+ * specification.
+ *
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * mpeg audio layer 2 tables.
+ * Most of them come from the mpeg audio specification.
+ */
+
+#ifndef AVCODEC_MPEGAUDIOTAB_H
+#define AVCODEC_MPEGAUDIOTAB_H
+
+#include <stdint.h>
+#include "mpegaudio.h"
+
+static const int costab32[30] = {
+ FIX(0.54119610014619701222),
+ FIX(1.3065629648763763537),
+
+ FIX(0.50979557910415917998),
+ FIX(2.5629154477415054814),
+ FIX(0.89997622313641556513),
+ FIX(0.60134488693504528634),
+
+ FIX(0.5024192861881556782),
+ FIX(5.1011486186891552563),
+ FIX(0.78815462345125020249),
+ FIX(0.64682178335999007679),
+ FIX(0.56694403481635768927),
+ FIX(1.0606776859903470633),
+ FIX(1.7224470982383341955),
+ FIX(0.52249861493968885462),
+
+ FIX(10.19000812354803287),
+ FIX(0.674808341455005678),
+ FIX(1.1694399334328846596),
+ FIX(0.53104259108978413284),
+ FIX(2.0577810099534108446),
+ FIX(0.58293496820613388554),
+ FIX(0.83934964541552681272),
+ FIX(0.50547095989754364798),
+ FIX(3.4076084184687189804),
+ FIX(0.62250412303566482475),
+ FIX(0.97256823786196078263),
+ FIX(0.51544730992262455249),
+ FIX(1.4841646163141661852),
+ FIX(0.5531038960344445421),
+ FIX(0.74453627100229857749),
+ FIX(0.5006029982351962726),
+};
+
+static const int bitinv32[32] = {
+ 0, 16, 8, 24, 4, 20, 12, 28,
+ 2, 18, 10, 26, 6, 22, 14, 30,
+ 1, 17, 9, 25, 5, 21, 13, 29,
+ 3, 19, 11, 27, 7, 23, 15, 31
+};
+
+
+/* signal to noise ratio of each quantification step (could be
+ computed from quant_steps[]). The values are dB multiplied by 10
+*/
+static const unsigned short quant_snr[17] = {
+ 70, 110, 160, 208,
+ 253, 316, 378, 439,
+ 499, 559, 620, 680,
+ 740, 800, 861, 920,
+ 980
+};
+
+/* fixed psycho acoustic model. Values of SNR taken from the 'toolame'
+ project */
+static const float fixed_smr[SBLIMIT] = {
+ 30, 17, 16, 10, 3, 12, 8, 2.5,
+ 5, 5, 6, 6, 5, 6, 10, 6,
+ -4, -10, -21, -30, -42, -55, -68, -75,
+ -75, -75, -75, -75, -91, -107, -110, -108
+};
+
+static const unsigned char nb_scale_factors[4] = { 3, 2, 1, 2 };
+
+#endif /* AVCODEC_MPEGAUDIOTAB_H */
diff --git a/media/ffvpx/libavcodec/mpegaudiotabs.c b/media/ffvpx/libavcodec/mpegaudiotabs.c
new file mode 100644
index 0000000000..eaa380c808
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiotabs.c
@@ -0,0 +1,22 @@
+/*
+ * MPEG Audio common tables
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mpegaudiotabs.h"
diff --git a/media/ffvpx/libavcodec/mpegaudiotabs.h b/media/ffvpx/libavcodec/mpegaudiotabs.h
new file mode 100644
index 0000000000..671b83848d
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegaudiotabs.h
@@ -0,0 +1,39 @@
+/*
+ * MPEG Audio common tables
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGAUDIOTABS_H
+#define AVCODEC_MPEGAUDIOTABS_H
+
+#include <stdint.h>
+
+const uint16_t ff_mpa_bitrate_tab[2][3][15] = {
+ { { 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 },
+ { 0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 },
+ { 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 } },
+ { { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256 },
+ { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 },
+ { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 }
+ }
+};
+
+const uint16_t ff_mpa_freq_tab[3] = { 44100, 48000, 32000 };
+
+#endif
diff --git a/media/ffvpx/libavcodec/mpegpicture.h b/media/ffvpx/libavcodec/mpegpicture.h
new file mode 100644
index 0000000000..7919aa402c
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegpicture.h
@@ -0,0 +1,105 @@
+/*
+ * Mpeg video formats-related defines and utility functions
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGPICTURE_H
+#define AVCODEC_MPEGPICTURE_H
+
+#include <stdint.h>
+
+#include "libavutil/frame.h"
+
+#include "avcodec.h"
+#include "motion_est.h"
+#include "threadframe.h"
+
+#define MPEGVIDEO_MAX_PLANES 4
+#define MAX_PICTURE_COUNT 36
+#define EDGE_WIDTH 16
+
+typedef struct ScratchpadContext {
+ uint8_t *edge_emu_buffer; ///< temporary buffer for if MVs point to out-of-frame data
+ uint8_t *rd_scratchpad; ///< scratchpad for rate distortion mb decision
+ uint8_t *obmc_scratchpad;
+ uint8_t *b_scratchpad; ///< scratchpad used for writing into write only buffers
+} ScratchpadContext;
+
+/**
+ * Picture.
+ */
+typedef struct Picture {
+ struct AVFrame *f;
+ ThreadFrame tf;
+
+ AVBufferRef *qscale_table_buf;
+ int8_t *qscale_table;
+
+ AVBufferRef *motion_val_buf[2];
+ int16_t (*motion_val[2])[2];
+
+ AVBufferRef *mb_type_buf;
+ uint32_t *mb_type; ///< types and macros are defined in mpegutils.h
+
+ AVBufferRef *mbskip_table_buf;
+ uint8_t *mbskip_table;
+
+ AVBufferRef *ref_index_buf[2];
+ int8_t *ref_index[2];
+
+ int alloc_mb_width; ///< mb_width used to allocate tables
+ int alloc_mb_height; ///< mb_height used to allocate tables
+ int alloc_mb_stride; ///< mb_stride used to allocate tables
+
+ AVBufferRef *hwaccel_priv_buf;
+ void *hwaccel_picture_private; ///< Hardware accelerator private data
+
+ int field_picture; ///< whether or not the picture was encoded in separate fields
+
+ int b_frame_score;
+ int needs_realloc; ///< Picture needs to be reallocated (eg due to a frame size change)
+
+ int reference;
+ int shared;
+
+ int display_picture_number;
+ int coded_picture_number;
+} Picture;
+
+/**
+ * Allocate a Picture.
+ * The pixels are allocated/set by calling get_buffer() if shared = 0.
+ */
+int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me,
+ ScratchpadContext *sc, int shared, int encoding,
+ int chroma_x_shift, int chroma_y_shift, int out_format,
+ int mb_stride, int mb_width, int mb_height, int b8_stride,
+ ptrdiff_t *linesize, ptrdiff_t *uvlinesize);
+
+int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
+ ScratchpadContext *sc, int linesize);
+
+int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src);
+void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *picture);
+
+void ff_mpv_picture_free(AVCodecContext *avctx, Picture *pic);
+int ff_update_picture_tables(Picture *dst, const Picture *src);
+
+int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared);
+
+#endif /* AVCODEC_MPEGPICTURE_H */
diff --git a/media/ffvpx/libavcodec/mpegutils.h b/media/ffvpx/libavcodec/mpegutils.h
new file mode 100644
index 0000000000..386110bb8c
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegutils.h
@@ -0,0 +1,142 @@
+/*
+ * Mpeg video formats-related defines and utility functions
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGUTILS_H
+#define AVCODEC_MPEGUTILS_H
+
+#include <stdint.h>
+
+#include "libavutil/frame.h"
+
+#include "avcodec.h"
+
+/**
+ * Return value for header parsers if frame is not coded.
+ * */
+#define FRAME_SKIPPED 100
+
+/* picture type */
+#define PICT_TOP_FIELD 1
+#define PICT_BOTTOM_FIELD 2
+#define PICT_FRAME 3
+
+#define MAX_MB_BYTES (30 * 16 * 16 * 3 / 8 + 120)
+#define MAX_FCODE 7
+
+/* MB types */
+#define MB_TYPE_INTRA4x4 (1 << 0)
+#define MB_TYPE_INTRA16x16 (1 << 1) // FIXME H.264-specific
+#define MB_TYPE_INTRA_PCM (1 << 2) // FIXME H.264-specific
+#define MB_TYPE_16x16 (1 << 3)
+#define MB_TYPE_16x8 (1 << 4)
+#define MB_TYPE_8x16 (1 << 5)
+#define MB_TYPE_8x8 (1 << 6)
+#define MB_TYPE_INTERLACED (1 << 7)
+#define MB_TYPE_DIRECT2 (1 << 8) // FIXME
+#define MB_TYPE_ACPRED (1 << 9)
+#define MB_TYPE_GMC (1 << 10)
+#define MB_TYPE_SKIP (1 << 11)
+#define MB_TYPE_P0L0 (1 << 12)
+#define MB_TYPE_P1L0 (1 << 13)
+#define MB_TYPE_P0L1 (1 << 14)
+#define MB_TYPE_P1L1 (1 << 15)
+#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
+#define MB_TYPE_QUANT (1 << 16)
+#define MB_TYPE_CBP (1 << 17)
+
+#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 // default mb_type if there is just one type
+
+#define IS_INTRA4x4(a) ((a) & MB_TYPE_INTRA4x4)
+#define IS_INTRA16x16(a) ((a) & MB_TYPE_INTRA16x16)
+#define IS_PCM(a) ((a) & MB_TYPE_INTRA_PCM)
+#define IS_INTRA(a) ((a) & 7)
+#define IS_INTER(a) ((a) & (MB_TYPE_16x16 | MB_TYPE_16x8 | \
+ MB_TYPE_8x16 | MB_TYPE_8x8))
+#define IS_SKIP(a) ((a) & MB_TYPE_SKIP)
+#define IS_INTRA_PCM(a) ((a) & MB_TYPE_INTRA_PCM)
+#define IS_INTERLACED(a) ((a) & MB_TYPE_INTERLACED)
+#define IS_DIRECT(a) ((a) & MB_TYPE_DIRECT2)
+#define IS_GMC(a) ((a) & MB_TYPE_GMC)
+#define IS_16X16(a) ((a) & MB_TYPE_16x16)
+#define IS_16X8(a) ((a) & MB_TYPE_16x8)
+#define IS_8X16(a) ((a) & MB_TYPE_8x16)
+#define IS_8X8(a) ((a) & MB_TYPE_8x8)
+#define IS_SUB_8X8(a) ((a) & MB_TYPE_16x16) // note reused
+#define IS_SUB_8X4(a) ((a) & MB_TYPE_16x8) // note reused
+#define IS_SUB_4X8(a) ((a) & MB_TYPE_8x16) // note reused
+#define IS_SUB_4X4(a) ((a) & MB_TYPE_8x8) // note reused
+#define IS_ACPRED(a) ((a) & MB_TYPE_ACPRED)
+#define IS_QUANT(a) ((a) & MB_TYPE_QUANT)
+#define IS_DIR(a, part, list) ((a) & (MB_TYPE_P0L0 << ((part) + 2 * (list))))
+
+// does this mb use listX, note does not work if subMBs
+#define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0 | MB_TYPE_P1L0) << (2 * (list))))
+
+#define HAS_CBP(a) ((a) & MB_TYPE_CBP)
+
+/* MB types for encoding */
+#define CANDIDATE_MB_TYPE_INTRA (1 << 0)
+#define CANDIDATE_MB_TYPE_INTER (1 << 1)
+#define CANDIDATE_MB_TYPE_INTER4V (1 << 2)
+#define CANDIDATE_MB_TYPE_SKIPPED (1 << 3)
+
+#define CANDIDATE_MB_TYPE_DIRECT (1 << 4)
+#define CANDIDATE_MB_TYPE_FORWARD (1 << 5)
+#define CANDIDATE_MB_TYPE_BACKWARD (1 << 6)
+#define CANDIDATE_MB_TYPE_BIDIR (1 << 7)
+
+#define CANDIDATE_MB_TYPE_INTER_I (1 << 8)
+#define CANDIDATE_MB_TYPE_FORWARD_I (1 << 9)
+#define CANDIDATE_MB_TYPE_BACKWARD_I (1 << 10)
+#define CANDIDATE_MB_TYPE_BIDIR_I (1 << 11)
+
+#define CANDIDATE_MB_TYPE_DIRECT0 (1 << 12)
+
+#define INPLACE_OFFSET 16
+
+enum OutputFormat {
+ FMT_MPEG1,
+ FMT_H261,
+ FMT_H263,
+ FMT_MJPEG,
+ FMT_SPEEDHQ,
+};
+
+
+/**
+ * Draw a horizontal band if supported.
+ *
+ * @param h is the normal height, this will be reduced automatically if needed
+ */
+void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last,
+ int y, int h, int picture_structure, int first_field,
+ int low_delay);
+
+/**
+ * Print debugging info for the given picture.
+ */
+void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict,
+ const uint8_t *mbskip_table, const uint32_t *mbtype_table,
+ const int8_t *qscale_table, int16_t (*const motion_val[2])[2],
+ int mb_width, int mb_height, int mb_stride, int quarter_sample);
+
+#endif /* AVCODEC_MPEGUTILS_H */
diff --git a/media/ffvpx/libavcodec/mpegvideo.h b/media/ffvpx/libavcodec/mpegvideo.h
new file mode 100644
index 0000000000..55828e6102
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegvideo.h
@@ -0,0 +1,612 @@
+/*
+ * Generic DCT based hybrid video encoder
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * mpegvideo header.
+ */
+
+#ifndef AVCODEC_MPEGVIDEO_H
+#define AVCODEC_MPEGVIDEO_H
+
+#include "avcodec.h"
+#include "blockdsp.h"
+#include "error_resilience.h"
+#include "fdctdsp.h"
+#include "get_bits.h"
+#include "h264chroma.h"
+#include "h263dsp.h"
+#include "hpeldsp.h"
+#include "idctdsp.h"
+#include "me_cmp.h"
+#include "motion_est.h"
+#include "mpegpicture.h"
+#include "mpegvideoencdsp.h"
+#include "pixblockdsp.h"
+#include "put_bits.h"
+#include "ratecontrol.h"
+#include "mpegutils.h"
+#include "qpeldsp.h"
+#include "videodsp.h"
+
+#define MAX_THREADS 32
+
+#define MAX_B_FRAMES 16
+
+/**
+ * Scantable.
+ */
+typedef struct ScanTable {
+ const uint8_t *scantable;
+ uint8_t permutated[64];
+ uint8_t raster_end[64];
+} ScanTable;
+
+/**
+ * MpegEncContext.
+ */
+typedef struct MpegEncContext {
+ AVClass *class;
+
+ int y_dc_scale, c_dc_scale;
+ int ac_pred;
+ int block_last_index[12]; ///< last non zero coefficient in block
+ int h263_aic; ///< Advanced INTRA Coding (AIC)
+
+ /* scantables */
+ ScanTable inter_scantable; ///< if inter == intra then intra should be used to reduce the cache usage
+
+ /* WARNING: changes above this line require updates to hardcoded
+ * offsets used in ASM. */
+
+ ScanTable intra_scantable;
+ uint8_t permutated_intra_h_scantable[64];
+ uint8_t permutated_intra_v_scantable[64];
+
+ struct AVCodecContext *avctx;
+ /* The following pointer is intended for codecs sharing code
+ * between decoder and encoder and in need of a common context to do so. */
+ void *private_ctx;
+ /* the following parameters must be initialized before encoding */
+ int width, height;///< picture size. must be a multiple of 16
+ int gop_size;
+ int intra_only; ///< if true, only intra pictures are generated
+ int64_t bit_rate; ///< wanted bit rate
+ enum OutputFormat out_format; ///< output format
+ int h263_pred; ///< use MPEG-4/H.263 ac/dc predictions
+ int pb_frame; ///< PB-frame mode (0 = none, 1 = base, 2 = improved)
+
+/* the following codec id fields are deprecated in favor of codec_id */
+ int h263_plus; ///< H.263+ headers
+ int h263_flv; ///< use flv H.263 header
+
+ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
+ int fixed_qscale; ///< fixed qscale if non zero
+ int encoding; ///< true if we are encoding (vs decoding)
+ int max_b_frames; ///< max number of B-frames for encoding
+ int luma_elim_threshold;
+ int chroma_elim_threshold;
+ int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically
+ int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag
+ /* the following fields are managed internally by the encoder */
+
+ /* sequence parameters */
+ int context_initialized;
+ int input_picture_number; ///< used to set pic->display_picture_number, should not be used for/by anything else
+ int coded_picture_number; ///< used to set pic->coded_picture_number, should not be used for/by anything else
+ int picture_number; //FIXME remove, unclear definition
+ int extradata_parsed;
+ int picture_in_gop_number; ///< 0-> first pic in gop, ...
+ int mb_width, mb_height; ///< number of MBs horizontally & vertically
+ int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
+ int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
+ int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication)
+ int mb_num; ///< number of MBs of a picture
+ ptrdiff_t linesize; ///< line size, in bytes, may be different from width
+ ptrdiff_t uvlinesize; ///< line size, for chroma in bytes, may be different from width
+ Picture *picture; ///< main picture buffer
+ Picture **input_picture; ///< next pictures on display order for encoding
+ Picture **reordered_input_picture; ///< pointer to the next pictures in coded order for encoding
+
+ int64_t user_specified_pts; ///< last non-zero pts from AVFrame which was passed into avcodec_send_frame()
+ /**
+ * pts difference between the first and second input frame, used for
+ * calculating dts of the first frame when there's a delay */
+ int64_t dts_delta;
+ /**
+ * reordered pts to be used as dts for the next output frame when there's
+ * a delay */
+ int64_t reordered_pts;
+
+ /** bit output */
+ PutBitContext pb;
+
+ int start_mb_y; ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
+ int end_mb_y; ///< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
+ struct MpegEncContext *thread_context[MAX_THREADS];
+ int slice_context_count; ///< number of used thread_contexts
+
+ /**
+ * copy of the previous picture structure.
+ * note, linesize & data, might not match the previous picture (for field pictures)
+ */
+ Picture last_picture;
+
+ /**
+ * copy of the next picture structure.
+ * note, linesize & data, might not match the next picture (for field pictures)
+ */
+ Picture next_picture;
+
+ /**
+ * Reference to the source picture for encoding.
+ * note, linesize & data, might not match the source picture (for field pictures)
+ */
+ AVFrame *new_picture;
+
+ /**
+ * copy of the current picture structure.
+ * note, linesize & data, might not match the current picture (for field pictures)
+ */
+ Picture current_picture; ///< buffer to store the decompressed current picture
+
+ Picture *last_picture_ptr; ///< pointer to the previous picture.
+ Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred)
+ Picture *current_picture_ptr; ///< pointer to the current picture
+ int skipped_last_frame;
+ int last_dc[3]; ///< last DC values for MPEG-1
+ int16_t *dc_val_base;
+ int16_t *dc_val[3]; ///< used for MPEG-4 DC prediction, all 3 arrays must be continuous
+ const uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table
+ const uint8_t *c_dc_scale_table; ///< qscale -> c_dc_scale table
+ const uint8_t *chroma_qscale_table; ///< qscale -> chroma_qscale (H.263)
+ uint8_t *coded_block_base;
+ uint8_t *coded_block; ///< used for coded block pattern prediction (msmpeg4v3, wmv1)
+ int16_t (*ac_val_base)[16];
+ int16_t (*ac_val[3])[16]; ///< used for MPEG-4 AC prediction, all 3 arrays must be continuous
+ int mb_skipped; ///< MUST BE SET only during DECODING
+ uint8_t *mbskip_table; /**< used to avoid copy if macroblock skipped (for black regions for example)
+ and used for B-frame encoding & decoding (contains skip table of next P-frame) */
+ uint8_t *mbintra_table; ///< used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
+ uint8_t *cbp_table; ///< used to store cbp, ac_pred for partitioned decoding
+ uint8_t *pred_dir_table; ///< used to store pred_dir for partitioned decoding
+
+ ScratchpadContext sc;
+
+ int qscale; ///< QP
+ int chroma_qscale; ///< chroma QP
+ unsigned int lambda; ///< Lagrange multiplier used in rate distortion
+ unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT
+ int *lambda_table;
+ int adaptive_quant; ///< use adaptive quantization
+ int dquant; ///< qscale difference to prev qscale
+ int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
+ int vbv_delay;
+ int last_pict_type; //FIXME removes
+ int last_non_b_pict_type; ///< used for MPEG-4 gmc B-frames & ratecontrol
+ int droppable;
+ int last_lambda_for[5]; ///< last lambda for a specific pict type
+ int skipdct; ///< skip dct and code zero residual
+
+ /* motion compensation */
+ int unrestricted_mv; ///< mv can point outside of the coded picture
+ int h263_long_vectors; ///< use horrible H.263v1 long vector mode
+
+ BlockDSPContext bdsp;
+ FDCTDSPContext fdsp;
+ H264ChromaContext h264chroma;
+ HpelDSPContext hdsp;
+ IDCTDSPContext idsp;
+ MECmpContext mecc;
+ MpegvideoEncDSPContext mpvencdsp;
+ PixblockDSPContext pdsp;
+ QpelDSPContext qdsp;
+ VideoDSPContext vdsp;
+ H263DSPContext h263dsp;
+ int f_code; ///< forward MV resolution
+ int b_code; ///< backward MV resolution for B-frames (MPEG-4)
+ int16_t (*p_mv_table_base)[2];
+ int16_t (*b_forw_mv_table_base)[2];
+ int16_t (*b_back_mv_table_base)[2];
+ int16_t (*b_bidir_forw_mv_table_base)[2];
+ int16_t (*b_bidir_back_mv_table_base)[2];
+ int16_t (*b_direct_mv_table_base)[2];
+ int16_t (*p_field_mv_table_base)[2];
+ int16_t (*b_field_mv_table_base)[2];
+ int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) P-frame encoding
+ int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode B-frame encoding
+ int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode B-frame encoding
+ int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode B-frame encoding
+ int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode B-frame encoding
+ int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode B-frame encoding
+ int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced P-frame encoding
+ int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced B-frame encoding
+ uint8_t (*p_field_select_table[2]); ///< Only the first element is allocated
+ uint8_t (*b_field_select_table[2][2]); ///< Only the first element is allocated
+
+ /* The following fields are encoder-only */
+ uint16_t *mb_var; ///< Table for MB variances
+ uint16_t *mc_mb_var; ///< Table for motion compensated MB variances
+ uint8_t *mb_mean; ///< Table for MB luminance
+ int64_t mb_var_sum; ///< sum of MB variance for current frame
+ int64_t mc_mb_var_sum; ///< motion compensated MB variance for current frame
+ uint64_t encoding_error[MPEGVIDEO_MAX_PLANES];
+
+ int motion_est; ///< ME algorithm
+ int me_penalty_compensation;
+ int me_pre; ///< prepass for motion estimation
+ int mv_dir;
+#define MV_DIR_FORWARD 1
+#define MV_DIR_BACKWARD 2
+#define MV_DIRECT 4 ///< bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
+ int mv_type;
+#define MV_TYPE_16X16 0 ///< 1 vector for the whole mb
+#define MV_TYPE_8X8 1 ///< 4 vectors (H.263, MPEG-4 4MV)
+#define MV_TYPE_16X8 2 ///< 2 vectors, one per 16x8 block
+#define MV_TYPE_FIELD 3 ///< 2 vectors, one per field
+#define MV_TYPE_DMV 4 ///< 2 vectors, special mpeg2 Dual Prime Vectors
+ /**motion vectors for a macroblock
+ first coordinate : 0 = forward 1 = backward
+ second " : depend on type
+ third " : 0 = x, 1 = y
+ */
+ int mv[2][4][2];
+ int field_select[2][2];
+ int last_mv[2][2][2]; ///< last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
+ const uint8_t *fcode_tab; ///< smallest fcode needed for each MV
+ int16_t direct_scale_mv[2][64]; ///< precomputed to avoid divisions in ff_mpeg4_set_direct_mv
+
+ MotionEstContext me;
+
+ int no_rounding; /**< apply no rounding to motion compensation (MPEG-4, msmpeg4, ...)
+ for B-frames rounding mode is always 0 */
+
+ /* macroblock layer */
+ int mb_x, mb_y;
+ int mb_skip_run;
+ int mb_intra;
+ uint16_t *mb_type; ///< Table for candidate MB types for encoding (defines in mpegutils.h)
+
+ int block_index[6]; ///< index to current MB in block based arrays with edges
+ int block_wrap[6];
+ uint8_t *dest[3];
+
+ int *mb_index2xy; ///< mb_index -> mb_x + mb_y*mb_stride
+
+ /** matrix transmitted in the bitstream */
+ uint16_t intra_matrix[64];
+ uint16_t chroma_intra_matrix[64];
+ uint16_t inter_matrix[64];
+ uint16_t chroma_inter_matrix[64];
+
+ int intra_quant_bias; ///< bias for the quantizer
+ int inter_quant_bias; ///< bias for the quantizer
+ int min_qcoeff; ///< minimum encodable coefficient
+ int max_qcoeff; ///< maximum encodable coefficient
+ int ac_esc_length; ///< num of bits needed to encode the longest esc
+ uint8_t *intra_ac_vlc_length;
+ uint8_t *intra_ac_vlc_last_length;
+ uint8_t *intra_chroma_ac_vlc_length;
+ uint8_t *intra_chroma_ac_vlc_last_length;
+ uint8_t *inter_ac_vlc_length;
+ uint8_t *inter_ac_vlc_last_length;
+ uint8_t *luma_dc_vlc_length;
+
+ int coded_score[12];
+
+ /** precomputed matrix (combine qscale and DCT renorm) */
+ int (*q_intra_matrix)[64];
+ int (*q_chroma_intra_matrix)[64];
+ int (*q_inter_matrix)[64];
+ /** identical to the above but for MMX & these are not permutated, second 64 entries are bias*/
+ uint16_t (*q_intra_matrix16)[2][64];
+ uint16_t (*q_chroma_intra_matrix16)[2][64];
+ uint16_t (*q_inter_matrix16)[2][64];
+
+ /* noise reduction */
+ int (*dct_error_sum)[64];
+ int dct_count[2];
+ uint16_t (*dct_offset)[64];
+
+ /* bit rate control */
+ int64_t total_bits;
+ int frame_bits; ///< bits used for the current frame
+ int stuffing_bits; ///< bits used for stuffing
+ int next_lambda; ///< next lambda used for retrying to encode a frame
+ RateControlContext rc_context; ///< contains stuff only accessed in ratecontrol.c
+
+ /* statistics, used for 2-pass encoding */
+ int mv_bits;
+ int header_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int i_count;
+ int skip_count;
+ int misc_bits; ///< cbp, mb_type
+ int last_bits; ///< temp var used for calculating the above vars
+
+ /* error concealment / resync */
+ int resync_mb_x; ///< x position of last resync marker
+ int resync_mb_y; ///< y position of last resync marker
+ GetBitContext last_resync_gb; ///< used to search for the next resync marker
+ int mb_num_left; ///< number of MBs left in this video packet (for partitioned Slices only)
+
+ /* H.263 specific */
+ int gob_index;
+ int obmc; ///< overlapped block motion compensation
+ int mb_info; ///< interval for outputting info about mb offsets as side data
+ int prev_mb_info, last_mb_info;
+ uint8_t *mb_info_ptr;
+ int mb_info_size;
+ int ehc_mode;
+
+ /* H.263+ specific */
+ int umvplus; ///< == H.263+ && unrestricted_mv
+ int h263_aic_dir; ///< AIC direction: 0 = left, 1 = top
+ int h263_slice_structured;
+ int alt_inter_vlc; ///< alternative inter vlc
+ int modified_quant;
+ int loop_filter;
+ int custom_pcf;
+
+ /* MPEG-4 specific */
+ int studio_profile;
+ int dct_precision;
+ ///< number of bits to represent the fractional part of time (encoder only)
+ int time_increment_bits;
+ int last_time_base;
+ int time_base; ///< time in seconds of last I,P,S Frame
+ int64_t time; ///< time of current frame
+ int64_t last_non_b_time;
+ uint16_t pp_time; ///< time distance between the last 2 p,s,i frames
+ uint16_t pb_time; ///< time distance between the last b and p,s,i frame
+ uint16_t pp_field_time;
+ uint16_t pb_field_time; ///< like above, just for interlaced
+ int mcsel;
+ int quant_precision;
+ int quarter_sample; ///< 1->qpel, 0->half pel ME/MC
+ int data_partitioning; ///< data partitioning flag from header
+ int partitioned_frame; ///< is current frame partitioned
+ int low_delay; ///< no reordering needed / has no B-frames
+ PutBitContext tex_pb; ///< used for data partitioned VOPs
+ PutBitContext pb2; ///< used for data partitioned VOPs
+ int mpeg_quant;
+ int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG-4
+
+ /* divx specific, used to workaround (many) bugs in divx5 */
+ int divx_packed;
+ uint8_t *bitstream_buffer; //Divx 5.01 puts several frames in a single one, this is used to reorder them
+ int bitstream_buffer_size;
+ unsigned int allocated_bitstream_buffer_size;
+
+ /* RV10 specific */
+ int rv10_version; ///< RV10 version: 0 or 3
+ int rv10_first_dc_coded[3];
+
+ /* MJPEG specific */
+ struct MJpegContext *mjpeg_ctx;
+ int esc_pos;
+
+ /* MSMPEG4 specific */
+ int mv_table_index;
+ int rl_table_index;
+ int rl_chroma_table_index;
+ int dc_table_index;
+ int use_skip_mb_code;
+ int slice_height; ///< in macroblocks
+ int first_slice_line; ///< used in MPEG-4 too to handle resync markers
+ int flipflop_rounding;
+ int msmpeg4_version; ///< 0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
+ int per_mb_rl_table;
+ int esc3_level_length;
+ int esc3_run_length;
+ int inter_intra_pred;
+ int mspel;
+
+ /* decompression specific */
+ GetBitContext gb;
+
+ /* MPEG-1 specific */
+ int last_mv_dir; ///< last mv_dir, used for B-frame encoding
+ int vbv_delay_pos; ///< offset of vbv_delay in the bitstream
+
+ /* MPEG-2-specific - I wished not to have to support this mess. */
+ int progressive_sequence;
+ int mpeg_f_code[2][2];
+
+ // picture structure defines are loaded from mpegutils.h
+ int picture_structure;
+
+ int intra_dc_precision;
+ int frame_pred_frame_dct;
+ int top_field_first;
+ int concealment_motion_vectors;
+ int q_scale_type;
+ int brd_scale;
+ int intra_vlc_format;
+ int alternate_scan;
+ int repeat_first_field;
+ int chroma_420_type;
+ int chroma_format;
+#define CHROMA_420 1
+#define CHROMA_422 2
+#define CHROMA_444 3
+ int chroma_x_shift;//depend on pix_format, that depend on chroma_format
+ int chroma_y_shift;
+
+ int progressive_frame;
+ int full_pel[2];
+ int interlaced_dct;
+ int first_field; ///< is 1 for the first field of a field picture 0 otherwise
+
+ /* RTP specific */
+ int rtp_mode;
+ int rtp_payload_size;
+
+ uint8_t *ptr_lastgob;
+ int16_t (*pblocks[12])[64];
+
+ int16_t (*block)[64]; ///< points to one of the following blocks
+ int16_t (*blocks)[12][64]; // for HQ mode we need to keep the best block
+ int (*decode_mb)(struct MpegEncContext *s, int16_t block[12][64]); // used by some codecs to avoid a switch()
+
+#define SLICE_OK 0
+#define SLICE_ERROR -1
+#define SLICE_END -2 ///<end marker found
+#define SLICE_NOEND -3 ///<no end marker or error found but mb count exceeded
+
+ void (*dct_unquantize_mpeg1_intra)(struct MpegEncContext *s,
+ int16_t *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_mpeg1_inter)(struct MpegEncContext *s,
+ int16_t *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_mpeg2_intra)(struct MpegEncContext *s,
+ int16_t *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_mpeg2_inter)(struct MpegEncContext *s,
+ int16_t *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_h263_intra)(struct MpegEncContext *s,
+ int16_t *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_h263_inter)(struct MpegEncContext *s,
+ int16_t *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_intra)(struct MpegEncContext *s, // unquantizer to use (MPEG-4 can use both)
+ int16_t *block/*align 16*/, int n, int qscale);
+ void (*dct_unquantize_inter)(struct MpegEncContext *s, // unquantizer to use (MPEG-4 can use both)
+ int16_t *block/*align 16*/, int n, int qscale);
+ int (*dct_quantize)(struct MpegEncContext *s, int16_t *block/*align 16*/, int n, int qscale, int *overflow);
+ int (*fast_dct_quantize)(struct MpegEncContext *s, int16_t *block/*align 16*/, int n, int qscale, int *overflow);
+ void (*denoise_dct)(struct MpegEncContext *s, int16_t *block);
+
+ int mpv_flags; ///< flags set by private options
+ int quantizer_noise_shaping;
+
+ /**
+ * ratecontrol qmin qmax limiting method
+ * 0-> clipping, 1-> use a nice continuous function to limit qscale within qmin/qmax.
+ */
+ float rc_qsquish;
+ float rc_qmod_amp;
+ int rc_qmod_freq;
+ float rc_initial_cplx;
+ float rc_buffer_aggressivity;
+ float border_masking;
+ int lmin, lmax;
+ int vbv_ignore_qmax;
+
+ char *rc_eq;
+
+ /* temp buffers for rate control */
+ float *cplx_tab, *bits_tab;
+
+ /* flag to indicate a reinitialization is required, e.g. after
+ * a frame size change */
+ int context_reinit;
+
+ ERContext er;
+
+ int error_rate;
+
+ /* temporary frames used by b_frame_strategy = 2 */
+ AVFrame *tmp_frames[MAX_B_FRAMES + 2];
+ int b_frame_strategy;
+ int b_sensitivity;
+
+ /* frame skip options for encoding */
+ int frame_skip_threshold;
+ int frame_skip_factor;
+ int frame_skip_exp;
+ int frame_skip_cmp;
+
+ int scenechange_threshold;
+ int noise_reduction;
+
+ int intra_penalty;
+} MpegEncContext;
+
+
+/**
+ * Set the given MpegEncContext to common defaults (same for encoding
+ * and decoding). The changed fields will not depend upon the prior
+ * state of the MpegEncContext.
+ */
+void ff_mpv_common_defaults(MpegEncContext *s);
+
+int ff_mpv_common_init(MpegEncContext *s);
+void ff_mpv_common_init_arm(MpegEncContext *s);
+void ff_mpv_common_init_axp(MpegEncContext *s);
+void ff_mpv_common_init_neon(MpegEncContext *s);
+void ff_mpv_common_init_ppc(MpegEncContext *s);
+void ff_mpv_common_init_x86(MpegEncContext *s);
+void ff_mpv_common_init_mips(MpegEncContext *s);
+/**
+ * Initialize an MpegEncContext's thread contexts. Presumes that
+ * slice_context_count is already set and that all the fields
+ * that are freed/reset in free_duplicate_context() are NULL.
+ */
+int ff_mpv_init_duplicate_contexts(MpegEncContext *s);
+/**
+ * Initialize and allocates MpegEncContext fields dependent on the resolution.
+ */
+int ff_mpv_init_context_frame(MpegEncContext *s);
+/**
+ * Frees and resets MpegEncContext fields depending on the resolution
+ * as well as the slice thread contexts.
+ * Is used during resolution changes to avoid a full reinitialization of the
+ * codec.
+ */
+void ff_mpv_free_context_frame(MpegEncContext *s);
+
+void ff_mpv_common_end(MpegEncContext *s);
+
+void ff_clean_intra_table_entries(MpegEncContext *s);
+
+int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src);
+void ff_set_qscale(MpegEncContext * s, int qscale);
+
+void ff_mpv_idct_init(MpegEncContext *s);
+void ff_init_scantable(const uint8_t *permutation, ScanTable *st,
+ const uint8_t *src_scantable);
+void ff_init_block_index(MpegEncContext *s);
+
+void ff_mpv_motion(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb,
+ uint8_t *dest_cr, int dir,
+ uint8_t *const *ref_picture,
+ op_pixels_func (*pix_op)[4],
+ qpel_mc_func (*qpix_op)[16]);
+
+static inline void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample,
+ int lowres, int chroma_x_shift)
+{
+ const int bytes_per_pixel = 1 + (bits_per_raw_sample > 8);
+ const int block_size = (8 * bytes_per_pixel) >> lowres;
+
+ s->block_index[0]+=2;
+ s->block_index[1]+=2;
+ s->block_index[2]+=2;
+ s->block_index[3]+=2;
+ s->block_index[4]++;
+ s->block_index[5]++;
+ s->dest[0]+= 2*block_size;
+ s->dest[1] += (2 >> chroma_x_shift) * block_size;
+ s->dest[2] += (2 >> chroma_x_shift) * block_size;
+}
+
+#endif /* AVCODEC_MPEGVIDEO_H */
diff --git a/media/ffvpx/libavcodec/mpegvideodata.h b/media/ffvpx/libavcodec/mpegvideodata.h
new file mode 100644
index 0000000000..42c9d6c293
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegvideodata.h
@@ -0,0 +1,39 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGVIDEODATA_H
+#define AVCODEC_MPEGVIDEODATA_H
+
+#include <stdint.h>
+
+#include "libavutil/attributes_internal.h"
+
+FF_VISIBILITY_PUSH_HIDDEN
+/* encoding scans */
+extern const uint8_t ff_alternate_horizontal_scan[64];
+extern const uint8_t ff_alternate_vertical_scan[64];
+
+extern const uint8_t ff_mpeg12_dc_scale_table[4][32];
+static const uint8_t *const ff_mpeg1_dc_scale_table = ff_mpeg12_dc_scale_table[0];
+
+extern const uint8_t ff_mpeg2_non_linear_qscale[32];
+
+extern const uint8_t ff_default_chroma_qscale_table[32];
+FF_VISIBILITY_POP_HIDDEN
+
+#endif /* AVCODEC_MPEGVIDEODATA_H */
diff --git a/media/ffvpx/libavcodec/mpegvideodsp.h b/media/ffvpx/libavcodec/mpegvideodsp.h
new file mode 100644
index 0000000000..293e2548d3
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegvideodsp.h
@@ -0,0 +1,47 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGVIDEODSP_H
+#define AVCODEC_MPEGVIDEODSP_H
+
+#include <stdint.h>
+
+void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy, int shift, int r,
+ int width, int height);
+
+typedef struct MpegVideoDSPContext {
+ /**
+ * translational global motion compensation.
+ */
+ void (*gmc1)(uint8_t *dst /* align 8 */, uint8_t *src /* align 1 */,
+ int srcStride, int h, int x16, int y16, int rounder);
+ /**
+ * global motion compensation.
+ */
+ void (*gmc)(uint8_t *dst /* align 8 */, uint8_t *src /* align 1 */,
+ int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy,
+ int shift, int r, int width, int height);
+} MpegVideoDSPContext;
+
+void ff_mpegvideodsp_init(MpegVideoDSPContext *c);
+void ff_mpegvideodsp_init_ppc(MpegVideoDSPContext *c);
+void ff_mpegvideodsp_init_x86(MpegVideoDSPContext *c);
+
+#endif /* AVCODEC_MPEGVIDEODSP_H */
diff --git a/media/ffvpx/libavcodec/mpegvideoencdsp.h b/media/ffvpx/libavcodec/mpegvideoencdsp.h
new file mode 100644
index 0000000000..95084679d9
--- /dev/null
+++ b/media/ffvpx/libavcodec/mpegvideoencdsp.h
@@ -0,0 +1,58 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_MPEGVIDEOENCDSP_H
+#define AVCODEC_MPEGVIDEOENCDSP_H
+
+#include <stdint.h>
+
+#include "avcodec.h"
+
+#define BASIS_SHIFT 16
+#define RECON_SHIFT 6
+
+#define EDGE_TOP 1
+#define EDGE_BOTTOM 2
+
+typedef struct MpegvideoEncDSPContext {
+ int (*try_8x8basis)(const int16_t rem[64], const int16_t weight[64],
+ const int16_t basis[64], int scale);
+ void (*add_8x8basis)(int16_t rem[64], const int16_t basis[64], int scale);
+
+ int (*pix_sum)(const uint8_t *pix, int line_size);
+ int (*pix_norm1)(const uint8_t *pix, int line_size);
+
+ void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src,
+ int src_wrap, int width, int height);
+
+ void (*draw_edges)(uint8_t *buf, int wrap, int width, int height,
+ int w, int h, int sides);
+} MpegvideoEncDSPContext;
+
+void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c,
+ AVCodecContext *avctx);
+void ff_mpegvideoencdsp_init_arm(MpegvideoEncDSPContext *c,
+ AVCodecContext *avctx);
+void ff_mpegvideoencdsp_init_ppc(MpegvideoEncDSPContext *c,
+ AVCodecContext *avctx);
+void ff_mpegvideoencdsp_init_x86(MpegvideoEncDSPContext *c,
+ AVCodecContext *avctx);
+void ff_mpegvideoencdsp_init_mips(MpegvideoEncDSPContext *c,
+ AVCodecContext *avctx);
+
+#endif /* AVCODEC_MPEGVIDEOENCDSP_H */
diff --git a/media/ffvpx/libavcodec/null_bsf.c b/media/ffvpx/libavcodec/null_bsf.c
new file mode 100644
index 0000000000..28237076fb
--- /dev/null
+++ b/media/ffvpx/libavcodec/null_bsf.c
@@ -0,0 +1,29 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Null bitstream filter -- pass the input through unchanged.
+ */
+
+#include "bsf_internal.h"
+
+const FFBitStreamFilter ff_null_bsf = {
+ .p.name = "null",
+ .filter = ff_bsf_get_packet_ref,
+};
diff --git a/media/ffvpx/libavcodec/options.c b/media/ffvpx/libavcodec/options.c
new file mode 100644
index 0000000000..a9b35ee1c3
--- /dev/null
+++ b/media/ffvpx/libavcodec/options.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Options definition for AVCodecContext.
+ */
+
+#include "config_components.h"
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "libavutil/avassert.h"
+#include "libavutil/internal.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include <string.h>
+
+FF_DISABLE_DEPRECATION_WARNINGS
+#include "options_table.h"
+FF_ENABLE_DEPRECATION_WARNINGS
+
+static const char* context_to_name(void* ptr) {
+ AVCodecContext *avc= ptr;
+
+ if (avc && avc->codec)
+ return avc->codec->name;
+ else
+ return "NULL";
+}
+
+static void *codec_child_next(void *obj, void *prev)
+{
+ AVCodecContext *s = obj;
+ if (!prev && s->codec && s->codec->priv_class && s->priv_data)
+ return s->priv_data;
+ return NULL;
+}
+
+static const AVClass *codec_child_class_iterate(void **iter)
+{
+ const AVCodec *c;
+ /* find next codec with priv options */
+ while (c = av_codec_iterate(iter))
+ if (c->priv_class)
+ return c->priv_class;
+ return NULL;
+}
+
+static AVClassCategory get_category(void *ptr)
+{
+ AVCodecContext* avctx = ptr;
+ if (avctx->codec && av_codec_is_decoder(avctx->codec))
+ return AV_CLASS_CATEGORY_DECODER;
+ else
+ return AV_CLASS_CATEGORY_ENCODER;
+}
+
+static const AVClass av_codec_context_class = {
+ .class_name = "AVCodecContext",
+ .item_name = context_to_name,
+ .option = avcodec_options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .log_level_offset_offset = offsetof(AVCodecContext, log_level_offset),
+ .child_next = codec_child_next,
+ .child_class_iterate = codec_child_class_iterate,
+ .category = AV_CLASS_CATEGORY_ENCODER,
+ .get_category = get_category,
+};
+
+static int init_context_defaults(AVCodecContext *s, const AVCodec *codec)
+{
+ const FFCodec *const codec2 = ffcodec(codec);
+ int flags=0;
+ memset(s, 0, sizeof(AVCodecContext));
+
+ s->av_class = &av_codec_context_class;
+
+ s->codec_type = codec ? codec->type : AVMEDIA_TYPE_UNKNOWN;
+ if (codec) {
+ s->codec = codec;
+ s->codec_id = codec->id;
+ }
+
+ if(s->codec_type == AVMEDIA_TYPE_AUDIO)
+ flags= AV_OPT_FLAG_AUDIO_PARAM;
+ else if(s->codec_type == AVMEDIA_TYPE_VIDEO)
+ flags= AV_OPT_FLAG_VIDEO_PARAM;
+ else if(s->codec_type == AVMEDIA_TYPE_SUBTITLE)
+ flags= AV_OPT_FLAG_SUBTITLE_PARAM;
+ av_opt_set_defaults2(s, flags, flags);
+
+ av_channel_layout_uninit(&s->ch_layout);
+
+ s->time_base = (AVRational){0,1};
+ s->framerate = (AVRational){ 0, 1 };
+ s->pkt_timebase = (AVRational){ 0, 1 };
+ s->get_buffer2 = avcodec_default_get_buffer2;
+ s->get_format = avcodec_default_get_format;
+ s->get_encode_buffer = avcodec_default_get_encode_buffer;
+ s->execute = avcodec_default_execute;
+ s->execute2 = avcodec_default_execute2;
+ s->sample_aspect_ratio = (AVRational){0,1};
+ s->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ s->pix_fmt = AV_PIX_FMT_NONE;
+ s->sw_pix_fmt = AV_PIX_FMT_NONE;
+ s->sample_fmt = AV_SAMPLE_FMT_NONE;
+
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ s->reordered_opaque = AV_NOPTS_VALUE;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ if(codec && codec2->priv_data_size){
+ s->priv_data = av_mallocz(codec2->priv_data_size);
+ if (!s->priv_data)
+ return AVERROR(ENOMEM);
+ if(codec->priv_class){
+ *(const AVClass**)s->priv_data = codec->priv_class;
+ av_opt_set_defaults(s->priv_data);
+ }
+ }
+ if (codec && codec2->defaults) {
+ int ret;
+ const FFCodecDefault *d = codec2->defaults;
+ while (d->key) {
+ ret = av_opt_set(s, d->key, d->value, 0);
+ av_assert0(ret >= 0);
+ d++;
+ }
+ }
+ return 0;
+}
+
+AVCodecContext *avcodec_alloc_context3(const AVCodec *codec)
+{
+ AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext));
+
+ if (!avctx)
+ return NULL;
+
+ if (init_context_defaults(avctx, codec) < 0) {
+ av_free(avctx);
+ return NULL;
+ }
+
+ return avctx;
+}
+
+void avcodec_free_context(AVCodecContext **pavctx)
+{
+ AVCodecContext *avctx = *pavctx;
+
+ if (!avctx)
+ return;
+
+ avcodec_close(avctx);
+
+ av_freep(&avctx->extradata);
+ av_freep(&avctx->subtitle_header);
+ av_freep(&avctx->intra_matrix);
+ av_freep(&avctx->inter_matrix);
+ av_freep(&avctx->rc_override);
+ av_channel_layout_uninit(&avctx->ch_layout);
+
+ av_freep(pavctx);
+}
+
+const AVClass *avcodec_get_class(void)
+{
+ return &av_codec_context_class;
+}
+
+#define SROFFSET(x) offsetof(AVSubtitleRect,x)
+
+static const AVOption subtitle_rect_options[]={
+{"x", "", SROFFSET(x), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
+{"y", "", SROFFSET(y), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
+{"w", "", SROFFSET(w), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
+{"h", "", SROFFSET(h), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
+{"type", "", SROFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
+{"flags", "", SROFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, 1, 0, "flags"},
+{"forced", "", SROFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, 1, 0},
+{NULL},
+};
+
+static const AVClass av_subtitle_rect_class = {
+ .class_name = "AVSubtitleRect",
+ .item_name = NULL,
+ .option = subtitle_rect_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+const AVClass *avcodec_get_subtitle_rect_class(void)
+{
+ return &av_subtitle_rect_class;
+}
diff --git a/media/ffvpx/libavcodec/options_table.h b/media/ffvpx/libavcodec/options_table.h
new file mode 100644
index 0000000000..4fea57673a
--- /dev/null
+++ b/media/ffvpx/libavcodec/options_table.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_OPTIONS_TABLE_H
+#define AVCODEC_OPTIONS_TABLE_H
+
+#include "config_components.h"
+
+#include <float.h>
+#include <limits.h>
+#include <stdint.h>
+
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "version_major.h"
+
+#define OFFSET(x) offsetof(AVCodecContext,x)
+#define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
+//these names are too long to be readable
+#define V AV_OPT_FLAG_VIDEO_PARAM
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define S AV_OPT_FLAG_SUBTITLE_PARAM
+#define E AV_OPT_FLAG_ENCODING_PARAM
+#define D AV_OPT_FLAG_DECODING_PARAM
+#define CC AV_OPT_FLAG_CHILD_CONSTS
+
+#define AV_CODEC_DEFAULT_BITRATE 200*1000
+
+static const AVOption avcodec_options[] = {
+{"b", "set bitrate (in bits/s)", OFFSET(bit_rate), AV_OPT_TYPE_INT64, {.i64 = AV_CODEC_DEFAULT_BITRATE }, 0, INT64_MAX, A|V|E},
+{"ab", "set bitrate (in bits/s)", OFFSET(bit_rate), AV_OPT_TYPE_INT64, {.i64 = 128*1000 }, 0, INT_MAX, A|E},
+{"bt", "Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate tolerance specifies how far "
+ "ratecontrol is willing to deviate from the target average bitrate value. This is not related "
+ "to minimum/maximum bitrate. Lowering tolerance too much has an adverse effect on quality.",
+ OFFSET(bit_rate_tolerance), AV_OPT_TYPE_INT, {.i64 = AV_CODEC_DEFAULT_BITRATE*20 }, 1, INT_MAX, V|E},
+{"flags", NULL, OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, UINT_MAX, V|A|S|E|D, "flags"},
+{"unaligned", "allow decoders to produce unaligned output", 0, AV_OPT_TYPE_CONST, { .i64 = AV_CODEC_FLAG_UNALIGNED }, INT_MIN, INT_MAX, V | D, "flags" },
+{"mv4", "use four motion vectors per macroblock (MPEG-4)", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_4MV }, INT_MIN, INT_MAX, V|E, "flags"},
+{"qpel", "use 1/4-pel motion compensation", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QPEL }, INT_MIN, INT_MAX, V|E, "flags"},
+{"loop", "use loop filter", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"},
+{"qscale", "use fixed qscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"},
+{"recon_frame", "export reconstructed frames", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_RECON_FRAME}, .unit = "flags"},
+{"copy_opaque", "propagate opaque values", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_COPY_OPAQUE}, .unit = "flags"},
+{"frame_duration", "use frame durations", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_FRAME_DURATION}, .unit = "flags"},
+{"pass1", "use internal 2-pass ratecontrol in first pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PASS1 }, INT_MIN, INT_MAX, 0, "flags"},
+{"pass2", "use internal 2-pass ratecontrol in second pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PASS2 }, INT_MIN, INT_MAX, 0, "flags"},
+{"gray", "only decode/encode grayscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GRAY }, INT_MIN, INT_MAX, V|E|D, "flags"},
+{"psnr", "error[?] variables will be set during encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PSNR }, INT_MIN, INT_MAX, V|E, "flags"},
+{"ildct", "use interlaced DCT", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"},
+{"low_delay", "force low delay", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"},
+{"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
+{"bitexact", "use only bitexact functions (except (I)DCT)", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"},
+{"aic", "H.263 advanced intra coding / MPEG-4 AC prediction", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"},
+{"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"},
+{"cgop", "closed GOP", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
+{"output_corrupt", "Output even potentially corrupted frames", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_OUTPUT_CORRUPT }, INT_MIN, INT_MAX, V|D, "flags"},
+{"drop_changed", "Drop frames whose parameters differ from first decoded frame", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_DROPCHANGED }, INT_MIN, INT_MAX, A|V|D, "flags"},
+{"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT}, 0, UINT_MAX, V|A|E|D|S, "flags2"},
+{"fast", "allow non-spec-compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"ignorecrop", "ignore cropping information from sps", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_IGNORE_CROP }, INT_MIN, INT_MAX, V|D, "flags2"},
+{"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"},
+{"chunks", "Frame data might be split into multiple chunks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_CHUNKS }, INT_MIN, INT_MAX, V|D, "flags2"},
+{"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"},
+{"export_mvs", "export motion vectors through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_EXPORT_MVS}, INT_MIN, INT_MAX, V|D, "flags2"},
+{"skip_manual", "do not skip samples and export skip information as frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SKIP_MANUAL}, INT_MIN, INT_MAX, A|D, "flags2"},
+{"ass_ro_flush_noop", "do not reset ASS ReadOrder field on flush", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_RO_FLUSH_NOOP}, INT_MIN, INT_MAX, S|D, "flags2"},
+{"icc_profiles", "generate/parse embedded ICC profiles from/to colorimetry tags", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_ICC_PROFILES}, INT_MIN, INT_MAX, S|D, "flags2"},
+{"export_side_data", "Export metadata as side data", OFFSET(export_side_data), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT}, 0, UINT_MAX, A|V|S|D|E, "export_side_data"},
+{"mvs", "export motion vectors through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_MVS}, INT_MIN, INT_MAX, V|D, "export_side_data"},
+{"prft", "export Producer Reference Time through packet side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_PRFT}, INT_MIN, INT_MAX, A|V|S|E, "export_side_data"},
+{"venc_params", "export video encoding parameters through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS}, INT_MIN, INT_MAX, V|D, "export_side_data"},
+{"film_grain", "export film grain parameters through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_FILM_GRAIN}, INT_MIN, INT_MAX, V|D, "export_side_data"},
+{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, INT_MAX},
+{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
+{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
+#if FF_API_OLD_CHANNEL_LAYOUT
+{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
+#endif
+{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
+{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|E},
+{"frame_number", NULL, OFFSET(frame_num), AV_OPT_TYPE_INT64, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
+{"delay", NULL, OFFSET(delay), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
+{"qcomp", "video quantizer scale compression (VBR). Constant of ratecontrol equation. "
+ "Recommended range for default rc_eq: 0.0-1.0",
+ OFFSET(qcompress), AV_OPT_TYPE_FLOAT, {.dbl = 0.5 }, -FLT_MAX, FLT_MAX, V|E},
+{"qblur", "video quantizer scale blur (VBR)", OFFSET(qblur), AV_OPT_TYPE_FLOAT, {.dbl = 0.5 }, -1, FLT_MAX, V|E},
+{"qmin", "minimum video quantizer scale (VBR)", OFFSET(qmin), AV_OPT_TYPE_INT, {.i64 = 2 }, -1, 69, V|E},
+{"qmax", "maximum video quantizer scale (VBR)", OFFSET(qmax), AV_OPT_TYPE_INT, {.i64 = 31 }, -1, 1024, V|E},
+{"qdiff", "maximum difference between the quantizer scales (VBR)", OFFSET(max_qdiff), AV_OPT_TYPE_INT, {.i64 = 3 }, INT_MIN, INT_MAX, V|E},
+{"bf", "set maximum number of B-frames between non-B-frames", OFFSET(max_b_frames), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, -1, INT_MAX, V|E},
+{"b_qfactor", "QP factor between P- and B-frames", OFFSET(b_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = 1.25 }, -FLT_MAX, FLT_MAX, V|E},
+{"codec_tag", NULL, OFFSET(codec_tag), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
+{"bug", "work around not autodetected encoder bugs", OFFSET(workaround_bugs), AV_OPT_TYPE_FLAGS, {.i64 = FF_BUG_AUTODETECT }, INT_MIN, INT_MAX, V|D, "bug"},
+{"autodetect", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_AUTODETECT }, INT_MIN, INT_MAX, V|D, "bug"},
+{"xvid_ilace", "Xvid interlacing bug (autodetected if FOURCC == XVIX)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_XVID_ILACE }, INT_MIN, INT_MAX, V|D, "bug"},
+{"ump4", "(autodetected if FOURCC == UMP4)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_UMP4 }, INT_MIN, INT_MAX, V|D, "bug"},
+{"no_padding", "padding bug (autodetected)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_NO_PADDING }, INT_MIN, INT_MAX, V|D, "bug"},
+{"amv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_AMV }, INT_MIN, INT_MAX, V|D, "bug"},
+{"qpel_chroma", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_QPEL_CHROMA }, INT_MIN, INT_MAX, V|D, "bug"},
+{"std_qpel", "old standard qpel (autodetected per FOURCC/version)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_STD_QPEL }, INT_MIN, INT_MAX, V|D, "bug"},
+{"qpel_chroma2", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_QPEL_CHROMA2 }, INT_MIN, INT_MAX, V|D, "bug"},
+{"direct_blocksize", "direct-qpel-blocksize bug (autodetected per FOURCC/version)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_DIRECT_BLOCKSIZE }, INT_MIN, INT_MAX, V|D, "bug"},
+{"edge", "edge padding bug (autodetected per FOURCC/version)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_EDGE }, INT_MIN, INT_MAX, V|D, "bug"},
+{"hpel_chroma", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_HPEL_CHROMA }, INT_MIN, INT_MAX, V|D, "bug"},
+{"dc_clip", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_DC_CLIP }, INT_MIN, INT_MAX, V|D, "bug"},
+{"ms", "work around various bugs in Microsoft's broken decoders", 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_MS }, INT_MIN, INT_MAX, V|D, "bug"},
+{"trunc", "truncated frames", 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_TRUNCATED}, INT_MIN, INT_MAX, V|D, "bug"},
+{"iedge", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_IEDGE }, INT_MIN, INT_MAX, V|D, "bug"},
+{"strict", "how strictly to follow the standards", OFFSET(strict_std_compliance), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
+{"very", "strictly conform to a older more strict version of the spec or reference software", 0, AV_OPT_TYPE_CONST, {.i64 = FF_COMPLIANCE_VERY_STRICT }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
+{"strict", "strictly conform to all the things in the spec no matter what the consequences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_COMPLIANCE_STRICT }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
+{"normal", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_COMPLIANCE_NORMAL }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
+{"unofficial", "allow unofficial extensions", 0, AV_OPT_TYPE_CONST, {.i64 = FF_COMPLIANCE_UNOFFICIAL }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
+{"experimental", "allow non-standardized experimental things", 0, AV_OPT_TYPE_CONST, {.i64 = FF_COMPLIANCE_EXPERIMENTAL }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
+{"b_qoffset", "QP offset between P- and B-frames", OFFSET(b_quant_offset), AV_OPT_TYPE_FLOAT, {.dbl = 1.25 }, -FLT_MAX, FLT_MAX, V|E},
+{"err_detect", "set error detection flags", OFFSET(err_recognition), AV_OPT_TYPE_FLAGS, {.i64 = 0 }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"crccheck", "verify embedded CRCs", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CRCCHECK }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"bitstream", "detect bitstream specification deviations", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_BITSTREAM }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"buffer", "detect improper bitstream length", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_BUFFER }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"ignore_err", "ignore errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_IGNORE_ERR }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT | AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE | AV_EF_COMPLIANT | AV_EF_CAREFUL}, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"has_b_frames", NULL, OFFSET(has_b_frames), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
+{"block_align", NULL, OFFSET(block_align), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
+{"rc_override_count", NULL, OFFSET(rc_override_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
+{"maxrate", "maximum bitrate (in bits/s). Used for VBV together with bufsize.", OFFSET(rc_max_rate), AV_OPT_TYPE_INT64, {.i64 = DEFAULT }, 0, INT_MAX, V|A|E},
+{"minrate", "minimum bitrate (in bits/s). Most useful in setting up a CBR encode. It is of little use otherwise.",
+ OFFSET(rc_min_rate), AV_OPT_TYPE_INT64, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
+{"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|V|E},
+{"i_qfactor", "QP factor between P- and I-frames", OFFSET(i_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = -0.8 }, -FLT_MAX, FLT_MAX, V|E},
+{"i_qoffset", "QP offset between P- and I-frames", OFFSET(i_quant_offset), AV_OPT_TYPE_FLOAT, {.dbl = 0.0 }, -FLT_MAX, FLT_MAX, V|E},
+{"dct", "DCT algorithm", OFFSET(dct_algo), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, V|E, "dct"},
+{"auto", "autoselect a good one", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_AUTO }, INT_MIN, INT_MAX, V|E, "dct"},
+{"fastint", "fast integer", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_FASTINT }, INT_MIN, INT_MAX, V|E, "dct"},
+{"int", "accurate integer", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_INT }, INT_MIN, INT_MAX, V|E, "dct"},
+{"mmx", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_MMX }, INT_MIN, INT_MAX, V|E, "dct"},
+{"altivec", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_ALTIVEC }, INT_MIN, INT_MAX, V|E, "dct"},
+{"faan", "floating point AAN DCT", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_FAAN }, INT_MIN, INT_MAX, V|E, "dct"},
+{"lumi_mask", "compresses bright areas stronger than medium ones", OFFSET(lumi_masking), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"tcplx_mask", "temporal complexity masking", OFFSET(temporal_cplx_masking), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"scplx_mask", "spatial complexity masking", OFFSET(spatial_cplx_masking), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"p_mask", "inter masking", OFFSET(p_masking), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"dark_mask", "compresses dark areas stronger than medium ones", OFFSET(dark_masking), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, V|E},
+{"idct", "select IDCT implementation", OFFSET(idct_algo), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, V|E|D, "idct"},
+{"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_AUTO }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"int", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_INT }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simple", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLE }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplemmx", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEMMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"arm", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"altivec", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ALTIVEC }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearm", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearmv5te", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARMV5TE }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearmv6", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARMV6 }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simpleneon", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLENEON }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"xvid", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_XVID }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"xvidmmx", "deprecated, for compatibility only", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_XVID }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"faani", "floating point AAN IDCT", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_FAAN }, INT_MIN, INT_MAX, V|D|E, "idct"},
+{"simpleauto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEAUTO }, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"slice_count", NULL, OFFSET(slice_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
+{"ec", "set error concealment strategy", OFFSET(error_concealment), AV_OPT_TYPE_FLAGS, {.i64 = 3 }, INT_MIN, INT_MAX, V|D, "ec"},
+{"guess_mvs", "iterative motion vector (MV) search (slow)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_EC_GUESS_MVS }, INT_MIN, INT_MAX, V|D, "ec"},
+{"deblock", "use strong deblock filter for damaged MBs", 0, AV_OPT_TYPE_CONST, {.i64 = FF_EC_DEBLOCK }, INT_MIN, INT_MAX, V|D, "ec"},
+{"favor_inter", "favor predicting from the previous frame", 0, AV_OPT_TYPE_CONST, {.i64 = FF_EC_FAVOR_INTER }, INT_MIN, INT_MAX, V|D, "ec"},
+{"bits_per_coded_sample", NULL, OFFSET(bits_per_coded_sample), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
+{"aspect", "sample aspect ratio", OFFSET(sample_aspect_ratio), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, 10, V|E},
+{"sar", "sample aspect ratio", OFFSET(sample_aspect_ratio), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, 10, V|E},
+{"debug", "print specific debug info", OFFSET(debug), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, INT_MAX, V|A|S|E|D, "debug"},
+{"pict", "picture info", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_PICT_INFO }, INT_MIN, INT_MAX, V|D, "debug"},
+{"rc", "rate control", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_RC }, INT_MIN, INT_MAX, V|E, "debug"},
+{"bitstream", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_BITSTREAM }, INT_MIN, INT_MAX, V|D, "debug"},
+{"mb_type", "macroblock (MB) type", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_MB_TYPE }, INT_MIN, INT_MAX, V|D, "debug"},
+{"qp", "per-block quantization parameter (QP)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_QP }, INT_MIN, INT_MAX, V|D, "debug"},
+{"dct_coeff", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_DCT_COEFF }, INT_MIN, INT_MAX, V|D, "debug"},
+{"green_metadata", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_GREEN_MD }, INT_MIN, INT_MAX, V|D, "debug"},
+{"skip", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_SKIP }, INT_MIN, INT_MAX, V|D, "debug"},
+{"startcode", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_STARTCODE }, INT_MIN, INT_MAX, V|D, "debug"},
+{"er", "error recognition", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_ER }, INT_MIN, INT_MAX, V|D, "debug"},
+{"mmco", "memory management control operations (H.264)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_MMCO }, INT_MIN, INT_MAX, V|D, "debug"},
+{"bugs", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_BUGS }, INT_MIN, INT_MAX, V|D, "debug"},
+{"buffers", "picture buffer allocations", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_BUFFERS }, INT_MIN, INT_MAX, V|D, "debug"},
+{"thread_ops", "threading operations", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_THREADS }, INT_MIN, INT_MAX, V|A|D, "debug"},
+{"nomc", "skip motion compensation", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_NOMC }, INT_MIN, INT_MAX, V|A|D, "debug"},
+{"dia_size", "diamond type & size for motion estimation", OFFSET(dia_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"last_pred", "amount of motion predictors from the previous frame", OFFSET(last_predictor_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"pre_dia_size", "diamond type & size for motion estimation pre-pass", OFFSET(pre_dia_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"subq", "sub-pel motion estimation quality", OFFSET(me_subpel_quality), AV_OPT_TYPE_INT, {.i64 = 8 }, INT_MIN, INT_MAX, V|E},
+{"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"global_quality", NULL, OFFSET(global_quality), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
+{"slice_flags", NULL, OFFSET(slice_flags), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
+{"mbd", "macroblock decision algorithm (high quality mode)", OFFSET(mb_decision), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, 2, V|E, "mbd"},
+{"simple", "use mbcmp", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MB_DECISION_SIMPLE }, INT_MIN, INT_MAX, V|E, "mbd"},
+{"bits", "use fewest bits", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MB_DECISION_BITS }, INT_MIN, INT_MAX, V|E, "mbd"},
+{"rd", "use best rate distortion", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MB_DECISION_RD }, INT_MIN, INT_MAX, V|E, "mbd"},
+{"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
+{"threads", "set the number of threads", OFFSET(thread_count), AV_OPT_TYPE_INT, {.i64 = 1 }, 0, INT_MAX, V|A|E|D, "threads"},
+{"auto", "autodetect a suitable number of threads to use", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, INT_MIN, INT_MAX, V|E|D, "threads"},
+{"dc", "intra_dc_precision", OFFSET(intra_dc_precision), AV_OPT_TYPE_INT, {.i64 = 0 }, -8, 16, V|E},
+{"nssew", "nsse weight", OFFSET(nsse_weight), AV_OPT_TYPE_INT, {.i64 = 8 }, INT_MIN, INT_MAX, V|E},
+{"skip_top", "number of macroblock rows at the top which are skipped", OFFSET(skip_top), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|D},
+{"skip_bottom", "number of macroblock rows at the bottom which are skipped", OFFSET(skip_bottom), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|D},
+{"profile", NULL, OFFSET(profile), AV_OPT_TYPE_INT, {.i64 = FF_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, V|A|E|CC, "avctx.profile"},
+{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "avctx.profile"},
+{"main10", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_HEVC_MAIN_10 }, INT_MIN, INT_MAX, V|E, "avctx.profile"},
+{"level", NULL, OFFSET(level), AV_OPT_TYPE_INT, {.i64 = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E|CC, "avctx.level"},
+{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "avctx.level"},
+{"lowres", "decode at 1= 1/2, 2=1/4, 3=1/8 resolutions", OFFSET(lowres), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, V|A|D},
+{"cmp", "full-pel ME compare function", OFFSET(me_cmp), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"subcmp", "sub-pel ME compare function", OFFSET(me_sub_cmp), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"mbcmp", "macroblock compare function", OFFSET(mb_cmp), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"ildctcmp", "interlaced DCT compare function", OFFSET(ildct_cmp), AV_OPT_TYPE_INT, {.i64 = FF_CMP_VSAD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"precmp", "pre motion estimation compare function", OFFSET(me_pre_cmp), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"sad", "sum of absolute differences, fast", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_SAD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"sse", "sum of squared errors", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_SSE }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"satd", "sum of absolute Hadamard transformed differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_SATD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"dct", "sum of absolute DCT transformed differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_DCT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"psnr", "sum of squared quantization errors (avoid, low quality)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_PSNR }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"bit", "number of bits needed for the block", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_BIT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"rd", "rate distortion optimal, slow", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_RD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"zero", "0", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_ZERO }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"vsad", "sum of absolute vertical differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_VSAD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"vsse", "sum of squared vertical differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_VSSE }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"nsse", "noise preserving sum of squared differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_NSSE }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+#if CONFIG_SNOW_ENCODER
+{"w53", "5/3 wavelet, only used in snow", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_W53 }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"w97", "9/7 wavelet, only used in snow", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_W97 }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+#endif
+{"dctmax", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_DCTMAX }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"chroma", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_CHROMA }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"msad", "sum of absolute differences, median predicted", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_MEDIAN_SAD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"mblmin", "minimum macroblock Lagrange factor (VBR)", OFFSET(mb_lmin), AV_OPT_TYPE_INT, {.i64 = FF_QP2LAMBDA * 2 }, 1, FF_LAMBDA_MAX, V|E},
+{"mblmax", "maximum macroblock Lagrange factor (VBR)", OFFSET(mb_lmax), AV_OPT_TYPE_INT, {.i64 = FF_QP2LAMBDA * 31 }, 1, FF_LAMBDA_MAX, V|E},
+{"skip_loop_filter", "skip loop filtering process for the selected frames", OFFSET(skip_loop_filter), AV_OPT_TYPE_INT, {.i64 = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"skip_idct" , "skip IDCT/dequantization for the selected frames", OFFSET(skip_idct), AV_OPT_TYPE_INT, {.i64 = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"skip_frame" , "skip decoding for the selected frames", OFFSET(skip_frame), AV_OPT_TYPE_INT, {.i64 = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"none" , "discard no frame", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONE }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"default" , "discard useless frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"noref" , "discard all non-reference frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONREF }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"bidir" , "discard all bidirectional frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_BIDIR }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"nokey" , "discard all frames except keyframes", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONKEY }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"nointra" , "discard all frames except I frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONINTRA}, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"all" , "discard all frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_ALL }, INT_MIN, INT_MAX, V|D, "avdiscard"},
+{"bidir_refine", "refine the two motion vectors used in bidirectional macroblocks", OFFSET(bidir_refine), AV_OPT_TYPE_INT, {.i64 = 1 }, 0, 4, V|E},
+{"keyint_min", "minimum interval between IDR-frames", OFFSET(keyint_min), AV_OPT_TYPE_INT, {.i64 = 25 }, INT_MIN, INT_MAX, V|E},
+{"refs", "reference frames to consider for motion compensation", OFFSET(refs), AV_OPT_TYPE_INT, {.i64 = 1 }, INT_MIN, INT_MAX, V|E},
+{"trellis", "rate-distortion optimal quantization", OFFSET(trellis), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
+{"mv0_threshold", NULL, OFFSET(mv0_threshold), AV_OPT_TYPE_INT, {.i64 = 256 }, 0, INT_MAX, V|E},
+{"compression_level", NULL, OFFSET(compression_level), AV_OPT_TYPE_INT, {.i64 = FF_COMPRESSION_DEFAULT }, INT_MIN, INT_MAX, V|A|E},
+{"bits_per_raw_sample", NULL, OFFSET(bits_per_raw_sample), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
+{"ch_layout", NULL, OFFSET(ch_layout), AV_OPT_TYPE_CHLAYOUT, {.str = NULL }, 0, 0, A|E|D, "ch_layout"},
+#if FF_API_OLD_CHANNEL_LAYOUT
+{"channel_layout", NULL, OFFSET(channel_layout), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64 = DEFAULT }, 0, UINT64_MAX, A|E|D, "channel_layout"},
+{"request_channel_layout", NULL, OFFSET(request_channel_layout), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64 = DEFAULT }, 0, UINT64_MAX, A|D, "request_channel_layout"},
+#endif
+{"rc_max_vbv_use", NULL, OFFSET(rc_max_available_vbv_use), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, 0.0, FLT_MAX, V|E},
+{"rc_min_vbv_use", NULL, OFFSET(rc_min_vbv_overflow_use), AV_OPT_TYPE_FLOAT, {.dbl = 3 }, 0.0, FLT_MAX, V|E},
+{"ticks_per_frame", NULL, OFFSET(ticks_per_frame), AV_OPT_TYPE_INT, {.i64 = 1 }, 1, INT_MAX, A|V|E|D},
+{"color_primaries", "color primaries", OFFSET(color_primaries), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_UNSPECIFIED }, 1, INT_MAX, V|E|D, "color_primaries_type"},
+{"bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"bt470m", "BT.470 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT470M }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"bt470bg", "BT.470 BG", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT470BG }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"smpte170m", "SMPTE 170 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_SMPTE170M }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"smpte240m", "SMPTE 240 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_SMPTE240M }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"film", "Film", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_FILM }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"bt2020", "BT.2020", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT2020 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"smpte428", "SMPTE 428-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_SMPTE428 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"smpte428_1", "SMPTE 428-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_SMPTE428 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"smpte431", "SMPTE 431-2", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_SMPTE431 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"smpte432", "SMPTE 422-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_SMPTE432 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"jedec-p22", "JEDEC P22", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_JEDEC_P22 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"ebu3213", "EBU 3213-E", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_EBU3213 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"unspecified", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"color_trc", "color transfer characteristics", OFFSET(color_trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_UNSPECIFIED }, 1, INT_MAX, V|E|D, "color_trc_type"},
+{"bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"gamma22", "BT.470 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_GAMMA22 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"gamma28", "BT.470 BG", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_GAMMA28 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"smpte170m", "SMPTE 170 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE170M }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"smpte240m", "SMPTE 240 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE240M }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"linear", "Linear", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LINEAR }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"log100", "Log", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"log316", "Log square root", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG_SQRT }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"iec61966-2-4", "IEC 61966-2-4", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_4 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"bt1361e", "BT.1361", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT1361_ECG }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"iec61966-2-1", "IEC 61966-2-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_1 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"bt2020-10", "BT.2020 - 10 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"bt2020-12", "BT.2020 - 12 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_12 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"smpte2084", "SMPTE 2084", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE2084 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"smpte428", "SMPTE 428-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE428 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"arib-std-b67", "ARIB STD-B67", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_ARIB_STD_B67 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"unspecified", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"log", "Log", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"log_sqrt", "Log square root", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG_SQRT }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"iec61966_2_4", "IEC 61966-2-4", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_4 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"bt1361", "BT.1361", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT1361_ECG }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"iec61966_2_1", "IEC 61966-2-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_1 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"bt2020_10bit", "BT.2020 - 10 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"bt2020_12bit", "BT.2020 - 12 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_12 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"smpte428_1", "SMPTE 428-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE428 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
+{"colorspace", "color space", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_UNSPECIFIED }, 0, INT_MAX, V|E|D, "colorspace_type"},
+{"rgb", "RGB", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_RGB }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709 }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"fcc", "FCC", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_FCC }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt470bg", "BT.470 BG", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT470BG }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"smpte170m", "SMPTE 170 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE170M }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"smpte240m", "SMPTE 240 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE240M }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"ycgco", "YCGCO", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_YCGCO }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt2020nc", "BT.2020 NCL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt2020c", "BT.2020 CL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_CL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"smpte2085", "SMPTE 2085", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE2085 }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"chroma-derived-nc", "Chroma-derived NCL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_CHROMA_DERIVED_NCL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"chroma-derived-c", "Chroma-derived CL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_CHROMA_DERIVED_CL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"ictcp", "ICtCp", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_ICTCP }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"unspecified", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"ycocg", "YCGCO", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_YCGCO }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt2020_ncl", "BT.2020 NCL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt2020_cl", "BT.2020 CL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_CL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"color_range", "color range", OFFSET(color_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, INT_MAX, V|E|D, "color_range_type"},
+{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_range_type"},
+{"tv", "MPEG (219*2^(n-8))", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG }, INT_MIN, INT_MAX, V|E|D, "color_range_type"},
+{"pc", "JPEG (2^n-1)", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG }, INT_MIN, INT_MAX, V|E|D, "color_range_type"},
+{"unspecified", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_range_type"},
+{"mpeg", "MPEG (219*2^(n-8))", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG }, INT_MIN, INT_MAX, V|E|D, "color_range_type"},
+{"jpeg", "JPEG (2^n-1)", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG }, INT_MIN, INT_MAX, V|E|D, "color_range_type"},
+{"chroma_sample_location", "chroma sample location", OFFSET(chroma_sample_location), AV_OPT_TYPE_INT, {.i64 = AVCHROMA_LOC_UNSPECIFIED }, 0, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCHROMA_LOC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"left", "Left", 0, AV_OPT_TYPE_CONST, {.i64 = AVCHROMA_LOC_LEFT }, INT_MIN, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"center", "Center", 0, AV_OPT_TYPE_CONST, {.i64 = AVCHROMA_LOC_CENTER }, INT_MIN, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"topleft", "Top-left", 0, AV_OPT_TYPE_CONST, {.i64 = AVCHROMA_LOC_TOPLEFT }, INT_MIN, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"top", "Top", 0, AV_OPT_TYPE_CONST, {.i64 = AVCHROMA_LOC_TOP }, INT_MIN, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"bottomleft", "Bottom-left", 0, AV_OPT_TYPE_CONST, {.i64 = AVCHROMA_LOC_BOTTOMLEFT }, INT_MIN, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"bottom", "Bottom", 0, AV_OPT_TYPE_CONST, {.i64 = AVCHROMA_LOC_BOTTOM }, INT_MIN, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"unspecified", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCHROMA_LOC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "chroma_sample_location_type"},
+{"log_level_offset", "set the log level offset", OFFSET(log_level_offset), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX },
+{"slices", "set the number of slices, used in parallelized encoding", OFFSET(slices), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, V|E},
+{"thread_type", "select multithreading type", OFFSET(thread_type), AV_OPT_TYPE_FLAGS, {.i64 = FF_THREAD_SLICE|FF_THREAD_FRAME }, 0, INT_MAX, V|A|E|D, "thread_type"},
+{"slice", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_THREAD_SLICE }, INT_MIN, INT_MAX, V|E|D, "thread_type"},
+{"frame", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_THREAD_FRAME }, INT_MIN, INT_MAX, V|E|D, "thread_type"},
+{"audio_service_type", "audio service type", OFFSET(audio_service_type), AV_OPT_TYPE_INT, {.i64 = AV_AUDIO_SERVICE_TYPE_MAIN }, 0, AV_AUDIO_SERVICE_TYPE_NB-1, A|E, "audio_service_type"},
+{"ma", "Main Audio Service", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_MAIN }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"ef", "Effects", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_EFFECTS }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"vi", "Visually Impaired", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"hi", "Hearing Impaired", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"di", "Dialogue", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_DIALOGUE }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"co", "Commentary", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_COMMENTARY }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"em", "Emergency", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_EMERGENCY }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"vo", "Voice Over", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_VOICE_OVER }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"ka", "Karaoke", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_KARAOKE }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
+{"request_sample_fmt", "sample format audio decoders should prefer", OFFSET(request_sample_fmt), AV_OPT_TYPE_SAMPLE_FMT, {.i64=AV_SAMPLE_FMT_NONE}, -1, INT_MAX, A|D, "request_sample_fmt"},
+{"pkt_timebase", NULL, OFFSET(pkt_timebase), AV_OPT_TYPE_RATIONAL, {.dbl = 0 }, 0, INT_MAX, 0},
+{"sub_charenc", "set input text subtitles character encoding", OFFSET(sub_charenc), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, S|D},
+{"sub_charenc_mode", "set input text subtitles character encoding mode", OFFSET(sub_charenc_mode), AV_OPT_TYPE_FLAGS, {.i64 = FF_SUB_CHARENC_MODE_AUTOMATIC}, -1, INT_MAX, S|D, "sub_charenc_mode"},
+{"do_nothing", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_DO_NOTHING}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
+{"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_AUTOMATIC}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
+{"pre_decoder", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_PRE_DECODER}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
+{"ignore", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_IGNORE}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
+{"apply_cropping", NULL, OFFSET(apply_cropping), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, V | D },
+{"skip_alpha", "Skip processing alpha", OFFSET(skip_alpha), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, V|D },
+{"field_order", "Field order", OFFSET(field_order), AV_OPT_TYPE_INT, {.i64 = AV_FIELD_UNKNOWN }, 0, 5, V|D|E, "field_order" },
+{"progressive", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_PROGRESSIVE }, 0, 0, V|D|E, "field_order" },
+{"tt", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_TT }, 0, 0, V|D|E, "field_order" },
+{"bb", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_BB }, 0, 0, V|D|E, "field_order" },
+{"tb", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_TB }, 0, 0, V|D|E, "field_order" },
+{"bt", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_BT }, 0, 0, V|D|E, "field_order" },
+{"dump_separator", "set information dump field separator", OFFSET(dump_separator), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, A|V|S|D|E},
+{"codec_whitelist", "List of decoders that are allowed to be used", OFFSET(codec_whitelist), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, A|V|S|D },
+{"pixel_format", "set pixel format", OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, {.i64=AV_PIX_FMT_NONE}, -1, INT_MAX, 0 },
+{"video_size", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str=NULL}, 0, INT_MAX, 0 },
+{"max_pixels", "Maximum number of pixels", OFFSET(max_pixels), AV_OPT_TYPE_INT64, {.i64 = INT_MAX }, 0, INT_MAX, A|V|S|D|E },
+{"max_samples", "Maximum number of samples", OFFSET(max_samples), AV_OPT_TYPE_INT64, {.i64 = INT_MAX }, 0, INT_MAX, A|D|E },
+{"hwaccel_flags", NULL, OFFSET(hwaccel_flags), AV_OPT_TYPE_FLAGS, {.i64 = AV_HWACCEL_FLAG_IGNORE_LEVEL }, 0, UINT_MAX, V|D, "hwaccel_flags"},
+{"ignore_level", "ignore level even if the codec level used is unknown or higher than the maximum supported level reported by the hardware driver", 0, AV_OPT_TYPE_CONST, { .i64 = AV_HWACCEL_FLAG_IGNORE_LEVEL }, INT_MIN, INT_MAX, V | D, "hwaccel_flags" },
+{"allow_high_depth", "allow to output YUV pixel formats with a different chroma sampling than 4:2:0 and/or other than 8 bits per component", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
+{"allow_profile_mismatch", "attempt to decode anyway if HW accelerated decoder's supported profiles do not exactly match the stream", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
+{"unsafe_output", "allow potentially unsafe hwaccel frame output that might require special care to process successfully", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_UNSAFE_OUTPUT }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
+{"extra_hw_frames", "Number of extra hardware frames to allocate for the user", OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, V|D },
+{"discard_damaged_percentage", "Percentage of damaged samples to discard a frame", OFFSET(discard_damaged_percentage), AV_OPT_TYPE_INT, {.i64 = 95 }, 0, 100, V|D },
+{NULL},
+};
+
+#undef A
+#undef V
+#undef S
+#undef E
+#undef D
+#undef CC
+#undef DEFAULT
+#undef OFFSET
+
+#endif /* AVCODEC_OPTIONS_TABLE_H */
diff --git a/media/ffvpx/libavcodec/packet.h b/media/ffvpx/libavcodec/packet.h
new file mode 100644
index 0000000000..f28e7e7011
--- /dev/null
+++ b/media/ffvpx/libavcodec/packet.h
@@ -0,0 +1,731 @@
+/*
+ * AVPacket public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PACKET_H
+#define AVCODEC_PACKET_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/buffer.h"
+#include "libavutil/dict.h"
+#include "libavutil/rational.h"
+#include "libavutil/version.h"
+
+#include "libavcodec/version_major.h"
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+enum AVPacketSideDataType {
+ /**
+ * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE
+ * bytes worth of palette. This side data signals that a new palette is
+ * present.
+ */
+ AV_PKT_DATA_PALETTE,
+
+ /**
+ * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format
+ * that the extradata buffer was changed and the receiving side should
+ * act upon it appropriately. The new extradata is embedded in the side
+ * data buffer and should be immediately used for processing the current
+ * frame or packet.
+ */
+ AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
+ AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
+ AV_PKT_DATA_H263_MB_INFO,
+
+ /**
+ * This side data should be associated with an audio stream and contains
+ * ReplayGain information in form of the AVReplayGain struct.
+ */
+ AV_PKT_DATA_REPLAYGAIN,
+
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the decoded video frames for
+ * correct presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_PKT_DATA_DISPLAYMATRIX,
+
+ /**
+ * This side data should be associated with a video stream and contains
+ * Stereoscopic 3D information in form of the AVStereo3D struct.
+ */
+ AV_PKT_DATA_STEREO3D,
+
+ /**
+ * This side data should be associated with an audio stream and corresponds
+ * to enum AVAudioServiceType.
+ */
+ AV_PKT_DATA_AUDIO_SERVICE_TYPE,
+
+ /**
+ * This side data contains quality related information from the encoder.
+ * @code
+ * u32le quality factor of the compressed frame. Allowed range is between 1 (good) and FF_LAMBDA_MAX (bad).
+ * u8 picture type
+ * u8 error count
+ * u16 reserved
+ * u64le[error count] sum of squared differences between encoder in and output
+ * @endcode
+ */
+ AV_PKT_DATA_QUALITY_STATS,
+
+ /**
+ * This side data contains an integer value representing the stream index
+ * of a "fallback" track. A fallback track indicates an alternate
+ * track to use when the current track can not be decoded for some reason.
+ * e.g. no decoder available for codec.
+ */
+ AV_PKT_DATA_FALLBACK_TRACK,
+
+ /**
+ * This side data corresponds to the AVCPBProperties struct.
+ */
+ AV_PKT_DATA_CPB_PROPERTIES,
+
+ /**
+ * Recommmends skipping the specified number of samples
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_PKT_DATA_SKIP_SAMPLES,
+
+ /**
+ * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
+ * the packet may contain "dual mono" audio specific to Japanese DTV
+ * and if it is true, recommends only the selected channel to be used.
+ * @code
+ * u8 selected channels (0=main/left, 1=sub/right, 2=both)
+ * @endcode
+ */
+ AV_PKT_DATA_JP_DUALMONO,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop.
+ */
+ AV_PKT_DATA_STRINGS_METADATA,
+
+ /**
+ * Subtitle event position
+ * @code
+ * u32le x1
+ * u32le y1
+ * u32le x2
+ * u32le y2
+ * @endcode
+ */
+ AV_PKT_DATA_SUBTITLE_POSITION,
+
+ /**
+ * Data found in BlockAdditional element of matroska container. There is
+ * no end marker for the data, so it is required to rely on the side data
+ * size to recognize the end. 8 byte id (as found in BlockAddId) followed
+ * by data.
+ */
+ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+
+ /**
+ * The optional first identifier line of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_IDENTIFIER,
+
+ /**
+ * The optional settings (rendering instructions) that immediately
+ * follow the timestamp specifier of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_SETTINGS,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop. This
+ * side data includes updated metadata which appeared in the stream.
+ */
+ AV_PKT_DATA_METADATA_UPDATE,
+
+ /**
+ * MPEGTS stream ID as uint8_t, this is required to pass the stream ID
+ * information from the demuxer to the corresponding muxer.
+ */
+ AV_PKT_DATA_MPEGTS_STREAM_ID,
+
+ /**
+ * Mastering display metadata (based on SMPTE-2086:2014). This metadata
+ * should be associated with a video stream and contains data in the form
+ * of the AVMasteringDisplayMetadata struct.
+ */
+ AV_PKT_DATA_MASTERING_DISPLAY_METADATA,
+
+ /**
+ * This side data should be associated with a video stream and corresponds
+ * to the AVSphericalMapping structure.
+ */
+ AV_PKT_DATA_SPHERICAL,
+
+ /**
+ * Content light level (based on CTA-861.3). This metadata should be
+ * associated with a video stream and contains data in the form of the
+ * AVContentLightMetadata struct.
+ */
+ AV_PKT_DATA_CONTENT_LIGHT_LEVEL,
+
+ /**
+ * ATSC A53 Part 4 Closed Captions. This metadata should be associated with
+ * a video stream. A53 CC bitstream is stored as uint8_t in AVPacketSideData.data.
+ * The number of bytes of CC data is AVPacketSideData.size.
+ */
+ AV_PKT_DATA_A53_CC,
+
+ /**
+ * This side data is encryption initialization data.
+ * The format is not part of ABI, use av_encryption_init_info_* methods to
+ * access.
+ */
+ AV_PKT_DATA_ENCRYPTION_INIT_INFO,
+
+ /**
+ * This side data contains encryption info for how to decrypt the packet.
+ * The format is not part of ABI, use av_encryption_info_* methods to access.
+ */
+ AV_PKT_DATA_ENCRYPTION_INFO,
+
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_PKT_DATA_AFD,
+
+ /**
+ * Producer Reference Time data corresponding to the AVProducerReferenceTime struct,
+ * usually exported by some encoders (on demand through the prft flag set in the
+ * AVCodecContext export_side_data field).
+ */
+ AV_PKT_DATA_PRFT,
+
+ /**
+ * ICC profile data consisting of an opaque octet buffer following the
+ * format described by ISO 15076-1.
+ */
+ AV_PKT_DATA_ICC_PROFILE,
+
+ /**
+ * DOVI configuration
+ * ref:
+ * dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2, section 2.2
+ * dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2, section 3.3
+ * Tags are stored in struct AVDOVIDecoderConfigurationRecord.
+ */
+ AV_PKT_DATA_DOVI_CONF,
+
+ /**
+ * Timecode which conforms to SMPTE ST 12-1:2014. The data is an array of 4 uint32_t
+ * where the first uint32_t describes how many (1-3) of the other timecodes are used.
+ * The timecode format is described in the documentation of av_timecode_get_smpte_from_framenum()
+ * function in libavutil/timecode.h.
+ */
+ AV_PKT_DATA_S12M_TIMECODE,
+
+ /**
+ * HDR10+ dynamic metadata associated with a video frame. The metadata is in
+ * the form of the AVDynamicHDRPlus struct and contains
+ * information for color volume transform - application 4 of
+ * SMPTE 2094-40:2016 standard.
+ */
+ AV_PKT_DATA_DYNAMIC_HDR10_PLUS,
+
+ /**
+ * The number of side data types.
+ * This is not part of the public API/ABI in the sense that it may
+ * change when new side data types are added.
+ * This must stay the last enum value.
+ * If its value becomes huge, some code using it
+ * needs to be updated as it assumes it to be smaller than other limits.
+ */
+ AV_PKT_DATA_NB
+};
+
+#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED
+
+typedef struct AVPacketSideData {
+ uint8_t *data;
+ size_t size;
+ enum AVPacketSideDataType type;
+} AVPacketSideData;
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames. Encoders are allowed to output empty
+ * packets, with no compressed data, containing only side data
+ * (e.g. to update some stream parameters at the end of encoding).
+ *
+ * The semantics of data ownership depends on the buf field.
+ * If it is set, the packet data is dynamically allocated and is
+ * valid indefinitely until a call to av_packet_unref() reduces the
+ * reference count to 0.
+ *
+ * If the buf field is not set av_packet_ref() would make a copy instead
+ * of increasing the reference count.
+ *
+ * The side data is always allocated with av_malloc(), copied by
+ * av_packet_ref() and freed by av_packet_unref().
+ *
+ * sizeof(AVPacket) being a part of the public ABI is deprecated. once
+ * av_init_packet() is removed, new packets will only be able to be allocated
+ * with av_packet_alloc(), and new fields may be added to the end of the struct
+ * with a minor bump.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ * @see av_packet_unref
+ */
+typedef struct AVPacket {
+ /**
+ * A reference to the reference-counted buffer where the packet data is
+ * stored.
+ * May be NULL, then the packet data is not reference-counted.
+ */
+ AVBufferRef *buf;
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t *data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ AVPacketSideData *side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int64_t duration;
+
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ /**
+ * for some private data of the user
+ */
+ void *opaque;
+
+ /**
+ * AVBufferRef for free use by the API user. FFmpeg will never check the
+ * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
+ * the packet is unreferenced. av_packet_copy_props() calls create a new
+ * reference with av_buffer_ref() for the target packet's opaque_ref field.
+ *
+ * This is unrelated to the opaque field, although it serves a similar
+ * purpose.
+ */
+ AVBufferRef *opaque_ref;
+
+ /**
+ * Time base of the packet's timestamps.
+ * In the future, this field may be set on packets output by encoders or
+ * demuxers, but its value will be by default ignored on input to decoders
+ * or muxers.
+ */
+ AVRational time_base;
+} AVPacket;
+
+#if FF_API_INIT_PACKET
+attribute_deprecated
+typedef struct AVPacketList {
+ AVPacket pkt;
+ struct AVPacketList *next;
+} AVPacketList;
+#endif
+
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+/**
+ * Flag is used to discard packets which are required to maintain valid
+ * decoder state but are not required for output and should be dropped
+ * after decoding.
+ **/
+#define AV_PKT_FLAG_DISCARD 0x0004
+/**
+ * The packet comes from a trusted source.
+ *
+ * Otherwise-unsafe constructs such as arbitrary pointers to data
+ * outside the packet may be followed.
+ */
+#define AV_PKT_FLAG_TRUSTED 0x0008
+/**
+ * Flag is used to indicate packets that contain frames that can
+ * be discarded by the decoder. I.e. Non-reference frames.
+ */
+#define AV_PKT_FLAG_DISPOSABLE 0x0010
+
+enum AVSideDataParamChangeFlags {
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * @deprecated those are not used by any decoder
+ */
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+#endif
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+
+/**
+ * Allocate an AVPacket and set its fields to default values. The resulting
+ * struct must be freed using av_packet_free().
+ *
+ * @return An AVPacket filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVPacket itself, not the data buffers. Those
+ * must be allocated through other means such as av_new_packet.
+ *
+ * @see av_new_packet
+ */
+AVPacket *av_packet_alloc(void);
+
+/**
+ * Create a new packet that references the same data as src.
+ *
+ * This is a shortcut for av_packet_alloc()+av_packet_ref().
+ *
+ * @return newly created AVPacket on success, NULL on error.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ */
+AVPacket *av_packet_clone(const AVPacket *src);
+
+/**
+ * Free the packet, if the packet is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param pkt packet to be freed. The pointer will be set to NULL.
+ * @note passing NULL is a no-op.
+ */
+void av_packet_free(AVPacket **pkt);
+
+#if FF_API_INIT_PACKET
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ *
+ * @see av_packet_alloc
+ * @see av_packet_unref
+ *
+ * @deprecated This function is deprecated. Once it's removed,
+ sizeof(AVPacket) will not be a part of the ABI anymore.
+ */
+attribute_deprecated
+void av_init_packet(AVPacket *pkt);
+#endif
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket *pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket *pkt, int grow_by);
+
+/**
+ * Initialize a reference-counted packet from av_malloc()ed data.
+ *
+ * @param pkt packet to be initialized. This function will set the data, size,
+ * and buf fields, all others are left untouched.
+ * @param data Data allocated by av_malloc() to be used as packet data. If this
+ * function returns successfully, the data is owned by the underlying AVBuffer.
+ * The caller may not access the data through other means.
+ * @param size size of data in bytes, without the padding. I.e. the full buffer
+ * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t size);
+
+/**
+ * Wrap an existing array as a packet side data.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param data the side data array. It must be allocated with the av_malloc()
+ * family of functions. The ownership of the data is transferred to
+ * pkt.
+ * @param size side information size
+ * @return a non-negative number on success, a negative AVERROR code on
+ * failure. On failure, the packet is unchanged and the data remains
+ * owned by the caller.
+ */
+int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ uint8_t *data, size_t size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size If supplied, *size will be set to the size of the side data
+ * or to zero if the desired side data is not present.
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t *size);
+
+const char *av_packet_side_data_name(enum AVPacketSideDataType type);
+
+/**
+ * Pack a dictionary for use in side_data.
+ *
+ * @param dict The dictionary to pack.
+ * @param size pointer to store the size of the returned data
+ * @return pointer to data if successful, NULL otherwise
+ */
+uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size);
+/**
+ * Unpack a dictionary from side_data.
+ *
+ * @param data data from side_data
+ * @param size size of the data
+ * @param dict the metadata storage dictionary
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_unpack_dictionary(const uint8_t *data, size_t size,
+ AVDictionary **dict);
+
+/**
+ * Convenience function to free all the side data stored.
+ * All the other fields stay untouched.
+ *
+ * @param pkt packet
+ */
+void av_packet_free_side_data(AVPacket *pkt);
+
+/**
+ * Setup a new reference to the data described by a given packet
+ *
+ * If src is reference-counted, setup dst as a new reference to the
+ * buffer in src. Otherwise allocate a new buffer in dst and copy the
+ * data from src into it.
+ *
+ * All the other fields are copied from src.
+ *
+ * @see av_packet_unref
+ *
+ * @param dst Destination packet. Will be completely overwritten.
+ * @param src Source packet
+ *
+ * @return 0 on success, a negative AVERROR on error. On error, dst
+ * will be blank (as if returned by av_packet_alloc()).
+ */
+int av_packet_ref(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Wipe the packet.
+ *
+ * Unreference the buffer referenced by the packet and reset the
+ * remaining packet fields to their default values.
+ *
+ * @param pkt The packet to be unreferenced.
+ */
+void av_packet_unref(AVPacket *pkt);
+
+/**
+ * Move every field in src to dst and reset src.
+ *
+ * @see av_packet_unref
+ *
+ * @param src Source packet, will be reset
+ * @param dst Destination packet
+ */
+void av_packet_move_ref(AVPacket *dst, AVPacket *src);
+
+/**
+ * Copy only "properties" fields from src to dst.
+ *
+ * Properties for the purpose of this function are all the fields
+ * beside those related to the packet data (buf, data, size)
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success AVERROR on failure.
+ */
+int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Ensure the data described by a given packet is reference counted.
+ *
+ * @note This function does not ensure that the reference will be writable.
+ * Use av_packet_make_writable instead for that purpose.
+ *
+ * @see av_packet_ref
+ * @see av_packet_make_writable
+ *
+ * @param pkt packet whose data should be made reference counted.
+ *
+ * @return 0 on success, a negative AVERROR on error. On failure, the
+ * packet is unchanged.
+ */
+int av_packet_make_refcounted(AVPacket *pkt);
+
+/**
+ * Create a writable reference for the data described by a given packet,
+ * avoiding data copy if possible.
+ *
+ * @param pkt Packet whose data should be made writable.
+ *
+ * @return 0 on success, a negative AVERROR on failure. On failure, the
+ * packet is unchanged.
+ */
+int av_packet_make_writable(AVPacket *pkt);
+
+/**
+ * Convert valid timing fields (timestamps / durations) in a packet from one
+ * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
+ * ignored.
+ *
+ * @param pkt packet on which the conversion will be performed
+ * @param tb_src source timebase, in which the timing fields in pkt are
+ * expressed
+ * @param tb_dst destination timebase, to which the timing fields will be
+ * converted
+ */
+void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_PACKET_H
diff --git a/media/ffvpx/libavcodec/packet_internal.h b/media/ffvpx/libavcodec/packet_internal.h
new file mode 100644
index 0000000000..92a0d4e6d5
--- /dev/null
+++ b/media/ffvpx/libavcodec/packet_internal.h
@@ -0,0 +1,73 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PACKET_INTERNAL_H
+#define AVCODEC_PACKET_INTERNAL_H
+
+#include <stdint.h>
+
+#include "packet.h"
+
+typedef struct PacketListEntry {
+ struct PacketListEntry *next;
+ AVPacket pkt;
+} PacketListEntry;
+
+typedef struct PacketList {
+ PacketListEntry *head, *tail;
+} PacketList;
+
+/**
+ * Append an AVPacket to the list.
+ *
+ * @param list A PacketList
+ * @param pkt The packet being appended. The data described in it will
+ * be made reference counted if it isn't already.
+ * @param copy A callback to copy the contents of the packet to the list.
+ May be null, in which case the packet's reference will be
+ moved to the list.
+ * @return 0 on success, negative AVERROR value on failure. On failure,
+ the packet and the list are unchanged.
+ */
+int avpriv_packet_list_put(PacketList *list, AVPacket *pkt,
+ int (*copy)(AVPacket *dst, const AVPacket *src),
+ int flags);
+
+/**
+ * Remove the oldest AVPacket in the list and return it.
+ *
+ * @note The pkt will be overwritten completely on success. The caller
+ * owns the packet and must unref it by itself.
+ *
+ * @param head A pointer to a PacketList struct
+ * @param pkt Pointer to an AVPacket struct
+ * @return 0 on success, and a packet is returned. AVERROR(EAGAIN) if
+ * the list was empty.
+ */
+int avpriv_packet_list_get(PacketList *list, AVPacket *pkt);
+
+/**
+ * Wipe the list and unref all the packets in it.
+ */
+void avpriv_packet_list_free(PacketList *list);
+
+int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type);
+
+int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp);
+
+#endif // AVCODEC_PACKET_INTERNAL_H
diff --git a/media/ffvpx/libavcodec/parser.c b/media/ffvpx/libavcodec/parser.c
new file mode 100644
index 0000000000..49de7e6a57
--- /dev/null
+++ b/media/ffvpx/libavcodec/parser.c
@@ -0,0 +1,289 @@
+/*
+ * Audio and Video frame extraction
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/mem.h"
+
+#include "parser.h"
+
+AVCodecParserContext *av_parser_init(int codec_id)
+{
+ AVCodecParserContext *s = NULL;
+ const AVCodecParser *parser;
+ void *i = 0;
+ int ret;
+
+ if (codec_id == AV_CODEC_ID_NONE)
+ return NULL;
+
+ while ((parser = av_parser_iterate(&i))) {
+ if (parser->codec_ids[0] == codec_id ||
+ parser->codec_ids[1] == codec_id ||
+ parser->codec_ids[2] == codec_id ||
+ parser->codec_ids[3] == codec_id ||
+ parser->codec_ids[4] == codec_id ||
+ parser->codec_ids[5] == codec_id ||
+ parser->codec_ids[6] == codec_id)
+ goto found;
+ }
+ return NULL;
+
+found:
+ s = av_mallocz(sizeof(AVCodecParserContext));
+ if (!s)
+ goto err_out;
+ s->parser = parser;
+ s->priv_data = av_mallocz(parser->priv_data_size);
+ if (!s->priv_data)
+ goto err_out;
+ s->fetch_timestamp=1;
+ s->pict_type = AV_PICTURE_TYPE_I;
+ if (parser->parser_init) {
+ ret = parser->parser_init(s);
+ if (ret != 0)
+ goto err_out;
+ }
+ s->key_frame = -1;
+ s->dts_sync_point = INT_MIN;
+ s->dts_ref_dts_delta = INT_MIN;
+ s->pts_dts_delta = INT_MIN;
+ s->format = -1;
+
+ return s;
+
+err_out:
+ if (s)
+ av_freep(&s->priv_data);
+ av_free(s);
+ return NULL;
+}
+
+void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove, int fuzzy)
+{
+ int i;
+
+ if (!fuzzy) {
+ s->dts =
+ s->pts = AV_NOPTS_VALUE;
+ s->pos = -1;
+ s->offset = 0;
+ }
+ for (i = 0; i < AV_PARSER_PTS_NB; i++) {
+ if (s->cur_offset + off >= s->cur_frame_offset[i] &&
+ (s->frame_offset < s->cur_frame_offset[i] ||
+ (!s->frame_offset && !s->next_frame_offset)) && // first field/frame
+ // check disabled since MPEG-TS does not send complete PES packets
+ /*s->next_frame_offset + off <*/ s->cur_frame_end[i]){
+
+ if (!fuzzy || s->cur_frame_dts[i] != AV_NOPTS_VALUE) {
+ s->dts = s->cur_frame_dts[i];
+ s->pts = s->cur_frame_pts[i];
+ s->pos = s->cur_frame_pos[i];
+ s->offset = s->next_frame_offset - s->cur_frame_offset[i];
+ }
+ if (remove)
+ s->cur_frame_offset[i] = INT64_MAX;
+ if (s->cur_offset + off < s->cur_frame_end[i])
+ break;
+ }
+ }
+}
+
+int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts, int64_t pos)
+{
+ int index, i;
+ uint8_t dummy_buf[AV_INPUT_BUFFER_PADDING_SIZE];
+
+ av_assert1(avctx->codec_id != AV_CODEC_ID_NONE);
+
+ /* Parsers only work for the specified codec ids. */
+ av_assert1(avctx->codec_id == s->parser->codec_ids[0] ||
+ avctx->codec_id == s->parser->codec_ids[1] ||
+ avctx->codec_id == s->parser->codec_ids[2] ||
+ avctx->codec_id == s->parser->codec_ids[3] ||
+ avctx->codec_id == s->parser->codec_ids[4] ||
+ avctx->codec_id == s->parser->codec_ids[5] ||
+ avctx->codec_id == s->parser->codec_ids[6]);
+
+ if (!(s->flags & PARSER_FLAG_FETCHED_OFFSET)) {
+ s->next_frame_offset =
+ s->cur_offset = pos;
+ s->flags |= PARSER_FLAG_FETCHED_OFFSET;
+ }
+
+ if (buf_size == 0) {
+ /* padding is always necessary even if EOF, so we add it here */
+ memset(dummy_buf, 0, sizeof(dummy_buf));
+ buf = dummy_buf;
+ } else if (s->cur_offset + buf_size != s->cur_frame_end[s->cur_frame_start_index]) { /* skip remainder packets */
+ /* add a new packet descriptor */
+ i = (s->cur_frame_start_index + 1) & (AV_PARSER_PTS_NB - 1);
+ s->cur_frame_start_index = i;
+ s->cur_frame_offset[i] = s->cur_offset;
+ s->cur_frame_end[i] = s->cur_offset + buf_size;
+ s->cur_frame_pts[i] = pts;
+ s->cur_frame_dts[i] = dts;
+ s->cur_frame_pos[i] = pos;
+ }
+
+ if (s->fetch_timestamp) {
+ s->fetch_timestamp = 0;
+ s->last_pts = s->pts;
+ s->last_dts = s->dts;
+ s->last_pos = s->pos;
+ ff_fetch_timestamp(s, 0, 0, 0);
+ }
+ /* WARNING: the returned index can be negative */
+ index = s->parser->parser_parse(s, avctx, (const uint8_t **) poutbuf,
+ poutbuf_size, buf, buf_size);
+ av_assert0(index > -0x20000000); // The API does not allow returning AVERROR codes
+#define FILL(name) if(s->name > 0 && avctx->name <= 0) avctx->name = s->name
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ FILL(field_order);
+ }
+
+ /* update the file pointer */
+ if (*poutbuf_size) {
+ /* fill the data for the current frame */
+ s->frame_offset = s->next_frame_offset;
+
+ /* offset of the next frame */
+ s->next_frame_offset = s->cur_offset + index;
+ s->fetch_timestamp = 1;
+ } else {
+ /* Don't return a pointer to dummy_buf. */
+ *poutbuf = NULL;
+ }
+ if (index < 0)
+ index = 0;
+ s->cur_offset += index;
+ return index;
+}
+
+void av_parser_close(AVCodecParserContext *s)
+{
+ if (s) {
+ if (s->parser->parser_close)
+ s->parser->parser_close(s);
+ av_freep(&s->priv_data);
+ av_free(s);
+ }
+}
+
+int ff_combine_frame(ParseContext *pc, int next,
+ const uint8_t **buf, int *buf_size)
+{
+ if (pc->overread) {
+ ff_dlog(NULL, "overread %d, state:%"PRIX32" next:%d index:%d o_index:%d\n",
+ pc->overread, pc->state, next, pc->index, pc->overread_index);
+ ff_dlog(NULL, "%X %X %X %X\n",
+ (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
+ }
+
+ /* Copy overread bytes from last frame into buffer. */
+ for (; pc->overread > 0; pc->overread--)
+ pc->buffer[pc->index++] = pc->buffer[pc->overread_index++];
+
+ if (next > *buf_size)
+ return AVERROR(EINVAL);
+
+ /* flush remaining if EOF */
+ if (!*buf_size && next == END_NOT_FOUND)
+ next = 0;
+
+ pc->last_index = pc->index;
+
+ /* copy into buffer end return */
+ if (next == END_NOT_FOUND) {
+ void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size,
+ *buf_size + pc->index +
+ AV_INPUT_BUFFER_PADDING_SIZE);
+
+ if (!new_buffer) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to reallocate parser buffer to %d\n", *buf_size + pc->index + AV_INPUT_BUFFER_PADDING_SIZE);
+ pc->index = 0;
+ return AVERROR(ENOMEM);
+ }
+ pc->buffer = new_buffer;
+ memcpy(&pc->buffer[pc->index], *buf, *buf_size);
+ pc->index += *buf_size;
+ return -1;
+ }
+
+ av_assert0(next >= 0 || pc->buffer);
+
+ *buf_size =
+ pc->overread_index = pc->index + next;
+
+ /* append to buffer */
+ if (pc->index) {
+ void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size,
+ next + pc->index +
+ AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!new_buffer) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to reallocate parser buffer to %d\n", next + pc->index + AV_INPUT_BUFFER_PADDING_SIZE);
+ pc->overread_index =
+ pc->index = 0;
+ return AVERROR(ENOMEM);
+ }
+ pc->buffer = new_buffer;
+ if (next > -AV_INPUT_BUFFER_PADDING_SIZE)
+ memcpy(&pc->buffer[pc->index], *buf,
+ next + AV_INPUT_BUFFER_PADDING_SIZE);
+ pc->index = 0;
+ *buf = pc->buffer;
+ }
+
+ if (next < -8) {
+ pc->overread += -8 - next;
+ next = -8;
+ }
+ /* store overread bytes */
+ for (; next < 0; next++) {
+ pc->state = pc->state << 8 | pc->buffer[pc->last_index + next];
+ pc->state64 = pc->state64 << 8 | pc->buffer[pc->last_index + next];
+ pc->overread++;
+ }
+
+ if (pc->overread) {
+ ff_dlog(NULL, "overread %d, state:%"PRIX32" next:%d index:%d o_index:%d\n",
+ pc->overread, pc->state, next, pc->index, pc->overread_index);
+ ff_dlog(NULL, "%X %X %X %X\n",
+ (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
+ }
+
+ return 0;
+}
+
+void ff_parse_close(AVCodecParserContext *s)
+{
+ ParseContext *pc = s->priv_data;
+
+ av_freep(&pc->buffer);
+}
diff --git a/media/ffvpx/libavcodec/parser.h b/media/ffvpx/libavcodec/parser.h
new file mode 100644
index 0000000000..2cee5ae4ff
--- /dev/null
+++ b/media/ffvpx/libavcodec/parser.h
@@ -0,0 +1,58 @@
+/*
+ * AVCodecParser prototypes and definitions
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PARSER_H
+#define AVCODEC_PARSER_H
+
+#include "avcodec.h"
+
+typedef struct ParseContext{
+ uint8_t *buffer;
+ int index;
+ int last_index;
+ unsigned int buffer_size;
+ uint32_t state; ///< contains the last few bytes in MSB order
+ int frame_start_found;
+ int overread; ///< the number of bytes which where irreversibly read from the next frame
+ int overread_index; ///< the index into ParseContext.buffer of the overread bytes
+ uint64_t state64; ///< contains the last 8 bytes in MSB order
+} ParseContext;
+
+#define END_NOT_FOUND (-100)
+
+/**
+ * Combine the (truncated) bitstream to a complete frame.
+ * @return -1 if no complete frame could be created,
+ * AVERROR(ENOMEM) if there was a memory allocation error
+ */
+int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size);
+void ff_parse_close(AVCodecParserContext *s);
+
+/**
+ * Fetch timestamps for a specific byte within the current access unit.
+ * @param off byte position within the access unit
+ * @param remove Found timestamps will be removed if set to 1, kept if set to 0.
+ * @param fuzzy Only use found value if it is more informative than what we already have
+ */
+void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove, int fuzzy);
+
+#endif /* AVCODEC_PARSER_H */
diff --git a/media/ffvpx/libavcodec/parser_list.c b/media/ffvpx/libavcodec/parser_list.c
new file mode 100644
index 0000000000..f16ecd05c5
--- /dev/null
+++ b/media/ffvpx/libavcodec/parser_list.c
@@ -0,0 +1,10 @@
+#include "config_components.h"
+
+static const AVCodecParser * const parser_list[] = {
+#if CONFIG_VP8_PARSER
+ &ff_vp8_parser,
+#endif
+#if CONFIG_VP9_PARSER
+ &ff_vp9_parser,
+#endif
+ NULL };
diff --git a/media/ffvpx/libavcodec/parsers.c b/media/ffvpx/libavcodec/parsers.c
new file mode 100644
index 0000000000..d355808018
--- /dev/null
+++ b/media/ffvpx/libavcodec/parsers.c
@@ -0,0 +1,93 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "avcodec.h"
+
+extern const AVCodecParser ff_aac_parser;
+extern const AVCodecParser ff_aac_latm_parser;
+extern const AVCodecParser ff_ac3_parser;
+extern const AVCodecParser ff_adx_parser;
+extern const AVCodecParser ff_amr_parser;
+extern const AVCodecParser ff_av1_parser;
+extern const AVCodecParser ff_avs2_parser;
+extern const AVCodecParser ff_avs3_parser;
+extern const AVCodecParser ff_bmp_parser;
+extern const AVCodecParser ff_cavsvideo_parser;
+extern const AVCodecParser ff_cook_parser;
+extern const AVCodecParser ff_cri_parser;
+extern const AVCodecParser ff_dca_parser;
+extern const AVCodecParser ff_dirac_parser;
+extern const AVCodecParser ff_dnxhd_parser;
+extern const AVCodecParser ff_dolby_e_parser;
+extern const AVCodecParser ff_dpx_parser;
+extern const AVCodecParser ff_dvaudio_parser;
+extern const AVCodecParser ff_dvbsub_parser;
+extern const AVCodecParser ff_dvdsub_parser;
+extern const AVCodecParser ff_dvd_nav_parser;
+extern const AVCodecParser ff_flac_parser;
+extern const AVCodecParser ff_ftr_parser;
+extern const AVCodecParser ff_g723_1_parser;
+extern const AVCodecParser ff_g729_parser;
+extern const AVCodecParser ff_gif_parser;
+extern const AVCodecParser ff_gsm_parser;
+extern const AVCodecParser ff_h261_parser;
+extern const AVCodecParser ff_h263_parser;
+extern const AVCodecParser ff_h264_parser;
+extern const AVCodecParser ff_hevc_parser;
+extern const AVCodecParser ff_hdr_parser;
+extern const AVCodecParser ff_ipu_parser;
+extern const AVCodecParser ff_jpeg2000_parser;
+extern const AVCodecParser ff_misc4_parser;
+extern const AVCodecParser ff_mjpeg_parser;
+extern const AVCodecParser ff_mlp_parser;
+extern const AVCodecParser ff_mpeg4video_parser;
+extern const AVCodecParser ff_mpegaudio_parser;
+extern const AVCodecParser ff_mpegvideo_parser;
+extern const AVCodecParser ff_opus_parser;
+extern const AVCodecParser ff_png_parser;
+extern const AVCodecParser ff_pnm_parser;
+extern const AVCodecParser ff_qoi_parser;
+extern const AVCodecParser ff_rv30_parser;
+extern const AVCodecParser ff_rv40_parser;
+extern const AVCodecParser ff_sbc_parser;
+extern const AVCodecParser ff_sipr_parser;
+extern const AVCodecParser ff_tak_parser;
+extern const AVCodecParser ff_vc1_parser;
+extern const AVCodecParser ff_vorbis_parser;
+extern const AVCodecParser ff_vp3_parser;
+extern const AVCodecParser ff_vp8_parser;
+extern const AVCodecParser ff_vp9_parser;
+extern const AVCodecParser ff_webp_parser;
+extern const AVCodecParser ff_xbm_parser;
+extern const AVCodecParser ff_xma_parser;
+extern const AVCodecParser ff_xwd_parser;
+
+#include "libavcodec/parser_list.c"
+
+const AVCodecParser *av_parser_iterate(void **opaque)
+{
+ uintptr_t i = (uintptr_t)*opaque;
+ const AVCodecParser *p = parser_list[i];
+
+ if (p)
+ *opaque = (void*)(i + 1);
+
+ return p;
+}
diff --git a/media/ffvpx/libavcodec/pixblockdsp.h b/media/ffvpx/libavcodec/pixblockdsp.h
new file mode 100644
index 0000000000..9b002aa3d6
--- /dev/null
+++ b/media/ffvpx/libavcodec/pixblockdsp.h
@@ -0,0 +1,62 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PIXBLOCKDSP_H
+#define AVCODEC_PIXBLOCKDSP_H
+
+#include <stdint.h>
+
+#include "config.h"
+
+#include "avcodec.h"
+
+typedef struct PixblockDSPContext {
+ void (*get_pixels)(int16_t *av_restrict block /* align 16 */,
+ const uint8_t *pixels /* align 8 */,
+ ptrdiff_t stride);
+ void (*get_pixels_unaligned)(int16_t *av_restrict block /* align 16 */,
+ const uint8_t *pixels,
+ ptrdiff_t stride);
+ void (*diff_pixels)(int16_t *av_restrict block /* align 16 */,
+ const uint8_t *s1 /* align 8 */,
+ const uint8_t *s2 /* align 8 */,
+ ptrdiff_t stride);
+ void (*diff_pixels_unaligned)(int16_t *av_restrict block /* align 16 */,
+ const uint8_t *s1,
+ const uint8_t *s2,
+ ptrdiff_t stride);
+
+} PixblockDSPContext;
+
+void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx);
+void ff_pixblockdsp_init_aarch64(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_pixblockdsp_init_alpha(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_pixblockdsp_init_arm(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_pixblockdsp_init_ppc(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_pixblockdsp_init_riscv(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_pixblockdsp_init_x86(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_pixblockdsp_init_mips(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+
+#endif /* AVCODEC_PIXBLOCKDSP_H */
diff --git a/media/ffvpx/libavcodec/profiles.c b/media/ffvpx/libavcodec/profiles.c
new file mode 100644
index 0000000000..2230fc5415
--- /dev/null
+++ b/media/ffvpx/libavcodec/profiles.c
@@ -0,0 +1,185 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "avcodec.h"
+#include "profiles.h"
+
+#if !CONFIG_SMALL
+
+const AVProfile ff_aac_profiles[] = {
+ { FF_PROFILE_AAC_LOW, "LC" },
+ { FF_PROFILE_AAC_HE, "HE-AAC" },
+ { FF_PROFILE_AAC_HE_V2, "HE-AACv2" },
+ { FF_PROFILE_AAC_LD, "LD" },
+ { FF_PROFILE_AAC_ELD, "ELD" },
+ { FF_PROFILE_AAC_MAIN, "Main" },
+ { FF_PROFILE_AAC_SSR, "SSR" },
+ { FF_PROFILE_AAC_LTP, "LTP" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_dca_profiles[] = {
+ { FF_PROFILE_DTS, "DTS" },
+ { FF_PROFILE_DTS_ES, "DTS-ES" },
+ { FF_PROFILE_DTS_96_24, "DTS 96/24" },
+ { FF_PROFILE_DTS_HD_HRA, "DTS-HD HRA" },
+ { FF_PROFILE_DTS_HD_MA, "DTS-HD MA" },
+ { FF_PROFILE_DTS_EXPRESS, "DTS Express" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_dnxhd_profiles[] = {
+ { FF_PROFILE_DNXHD, "DNXHD"},
+ { FF_PROFILE_DNXHR_LB, "DNXHR LB"},
+ { FF_PROFILE_DNXHR_SQ, "DNXHR SQ"},
+ { FF_PROFILE_DNXHR_HQ, "DNXHR HQ" },
+ { FF_PROFILE_DNXHR_HQX, "DNXHR HQX"},
+ { FF_PROFILE_DNXHR_444, "DNXHR 444"},
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_h264_profiles[] = {
+ { FF_PROFILE_H264_BASELINE, "Baseline" },
+ { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
+ { FF_PROFILE_H264_MAIN, "Main" },
+ { FF_PROFILE_H264_EXTENDED, "Extended" },
+ { FF_PROFILE_H264_HIGH, "High" },
+ { FF_PROFILE_H264_HIGH_10, "High 10" },
+ { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
+ { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
+ { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
+ { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
+ { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
+ { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
+ { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
+ { FF_PROFILE_H264_MULTIVIEW_HIGH, "Multiview High" },
+ { FF_PROFILE_H264_STEREO_HIGH, "Stereo High" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_vvc_profiles[] = {
+ { FF_PROFILE_VVC_MAIN_10, "Main 10" },
+ { FF_PROFILE_VVC_MAIN_10_444, "Main 10 4:4:4" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_hevc_profiles[] = {
+ { FF_PROFILE_HEVC_MAIN, "Main" },
+ { FF_PROFILE_HEVC_MAIN_10, "Main 10" },
+ { FF_PROFILE_HEVC_MAIN_STILL_PICTURE, "Main Still Picture" },
+ { FF_PROFILE_HEVC_REXT, "Rext" },
+ { FF_PROFILE_HEVC_SCC, "Scc" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_jpeg2000_profiles[] = {
+ { FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0, "JPEG 2000 codestream restriction 0" },
+ { FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1, "JPEG 2000 codestream restriction 1" },
+ { FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION, "JPEG 2000 no codestream restrictions" },
+ { FF_PROFILE_JPEG2000_DCINEMA_2K, "JPEG 2000 digital cinema 2K" },
+ { FF_PROFILE_JPEG2000_DCINEMA_4K, "JPEG 2000 digital cinema 4K" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_mpeg2_video_profiles[] = {
+ { FF_PROFILE_MPEG2_422, "4:2:2" },
+ { FF_PROFILE_MPEG2_HIGH, "High" },
+ { FF_PROFILE_MPEG2_SS, "Spatially Scalable" },
+ { FF_PROFILE_MPEG2_SNR_SCALABLE, "SNR Scalable" },
+ { FF_PROFILE_MPEG2_MAIN, "Main" },
+ { FF_PROFILE_MPEG2_SIMPLE, "Simple" },
+ { FF_PROFILE_RESERVED, "Reserved" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_mpeg4_video_profiles[] = {
+ { FF_PROFILE_MPEG4_SIMPLE, "Simple Profile" },
+ { FF_PROFILE_MPEG4_SIMPLE_SCALABLE, "Simple Scalable Profile" },
+ { FF_PROFILE_MPEG4_CORE, "Core Profile" },
+ { FF_PROFILE_MPEG4_MAIN, "Main Profile" },
+ { FF_PROFILE_MPEG4_N_BIT, "N-bit Profile" },
+ { FF_PROFILE_MPEG4_SCALABLE_TEXTURE, "Scalable Texture Profile" },
+ { FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION, "Simple Face Animation Profile" },
+ { FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE, "Basic Animated Texture Profile" },
+ { FF_PROFILE_MPEG4_HYBRID, "Hybrid Profile" },
+ { FF_PROFILE_MPEG4_ADVANCED_REAL_TIME, "Advanced Real Time Simple Profile" },
+ { FF_PROFILE_MPEG4_CORE_SCALABLE, "Code Scalable Profile" },
+ { FF_PROFILE_MPEG4_ADVANCED_CODING, "Advanced Coding Profile" },
+ { FF_PROFILE_MPEG4_ADVANCED_CORE, "Advanced Core Profile" },
+ { FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE, "Advanced Scalable Texture Profile" },
+ { FF_PROFILE_MPEG4_SIMPLE_STUDIO, "Simple Studio Profile" },
+ { FF_PROFILE_MPEG4_ADVANCED_SIMPLE, "Advanced Simple Profile" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_vc1_profiles[] = {
+ { FF_PROFILE_VC1_SIMPLE, "Simple" },
+ { FF_PROFILE_VC1_MAIN, "Main" },
+ { FF_PROFILE_VC1_COMPLEX, "Complex" },
+ { FF_PROFILE_VC1_ADVANCED, "Advanced" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_vp9_profiles[] = {
+ { FF_PROFILE_VP9_0, "Profile 0" },
+ { FF_PROFILE_VP9_1, "Profile 1" },
+ { FF_PROFILE_VP9_2, "Profile 2" },
+ { FF_PROFILE_VP9_3, "Profile 3" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_av1_profiles[] = {
+ { FF_PROFILE_AV1_MAIN, "Main" },
+ { FF_PROFILE_AV1_HIGH, "High" },
+ { FF_PROFILE_AV1_PROFESSIONAL, "Professional" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_sbc_profiles[] = {
+ { FF_PROFILE_SBC_MSBC, "mSBC" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+const AVProfile ff_prores_profiles[] = {
+ { FF_PROFILE_PRORES_PROXY, "Proxy" },
+ { FF_PROFILE_PRORES_LT, "LT" },
+ { FF_PROFILE_PRORES_STANDARD, "Standard" },
+ { FF_PROFILE_PRORES_HQ, "HQ" },
+ { FF_PROFILE_PRORES_4444, "4444" },
+ { FF_PROFILE_PRORES_XQ, "XQ" },
+ { FF_PROFILE_UNKNOWN }
+};
+
+const AVProfile ff_mjpeg_profiles[] = {
+ { FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT, "Baseline" },
+ { FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT, "Sequential" },
+ { FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT, "Progressive" },
+ { FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS, "Lossless" },
+ { FF_PROFILE_MJPEG_JPEG_LS, "JPEG LS" },
+ { FF_PROFILE_UNKNOWN }
+};
+
+const AVProfile ff_arib_caption_profiles[] = {
+ { FF_PROFILE_ARIB_PROFILE_A, "Profile A" },
+ { FF_PROFILE_ARIB_PROFILE_C, "Profile C" },
+ { FF_PROFILE_UNKNOWN }
+};
+
+#endif /* !CONFIG_SMALL */
diff --git a/media/ffvpx/libavcodec/profiles.h b/media/ffvpx/libavcodec/profiles.h
new file mode 100644
index 0000000000..41a19aa9ad
--- /dev/null
+++ b/media/ffvpx/libavcodec/profiles.h
@@ -0,0 +1,76 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PROFILES_H
+#define AVCODEC_PROFILES_H
+
+#include "avcodec.h"
+#include "libavutil/opt.h"
+
+#define FF_AVCTX_PROFILE_OPTION(name, description, type, value) \
+ {name, description, 0, AV_OPT_TYPE_CONST, {.i64 = value }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_## type ##_PARAM, "avctx.profile"},
+
+#define FF_AAC_PROFILE_OPTS \
+ FF_AVCTX_PROFILE_OPTION("aac_main", NULL, AUDIO, FF_PROFILE_AAC_MAIN)\
+ FF_AVCTX_PROFILE_OPTION("aac_low", NULL, AUDIO, FF_PROFILE_AAC_LOW)\
+ FF_AVCTX_PROFILE_OPTION("aac_ssr", NULL, AUDIO, FF_PROFILE_AAC_SSR)\
+ FF_AVCTX_PROFILE_OPTION("aac_ltp", NULL, AUDIO, FF_PROFILE_AAC_LTP)\
+ FF_AVCTX_PROFILE_OPTION("aac_he", NULL, AUDIO, FF_PROFILE_AAC_HE)\
+ FF_AVCTX_PROFILE_OPTION("aac_he_v2", NULL, AUDIO, FF_PROFILE_AAC_HE_V2)\
+ FF_AVCTX_PROFILE_OPTION("aac_ld", NULL, AUDIO, FF_PROFILE_AAC_LD)\
+ FF_AVCTX_PROFILE_OPTION("aac_eld", NULL, AUDIO, FF_PROFILE_AAC_ELD)\
+ FF_AVCTX_PROFILE_OPTION("mpeg2_aac_low", NULL, AUDIO, FF_PROFILE_MPEG2_AAC_LOW)\
+ FF_AVCTX_PROFILE_OPTION("mpeg2_aac_he", NULL, AUDIO, FF_PROFILE_MPEG2_AAC_HE)\
+
+#define FF_MPEG4_PROFILE_OPTS \
+ FF_AVCTX_PROFILE_OPTION("mpeg4_sp", NULL, VIDEO, FF_PROFILE_MPEG4_SIMPLE)\
+ FF_AVCTX_PROFILE_OPTION("mpeg4_core", NULL, VIDEO, FF_PROFILE_MPEG4_CORE)\
+ FF_AVCTX_PROFILE_OPTION("mpeg4_main", NULL, VIDEO, FF_PROFILE_MPEG4_MAIN)\
+ FF_AVCTX_PROFILE_OPTION("mpeg4_asp", NULL, VIDEO, FF_PROFILE_MPEG4_ADVANCED_SIMPLE)\
+
+#define FF_MPEG2_PROFILE_OPTS \
+ FF_AVCTX_PROFILE_OPTION("422", NULL, VIDEO, FF_PROFILE_MPEG2_422)\
+ FF_AVCTX_PROFILE_OPTION("high", NULL, VIDEO, FF_PROFILE_MPEG2_HIGH)\
+ FF_AVCTX_PROFILE_OPTION("ss", NULL, VIDEO, FF_PROFILE_MPEG2_SS)\
+ FF_AVCTX_PROFILE_OPTION("snr", NULL, VIDEO, FF_PROFILE_MPEG2_SNR_SCALABLE)\
+ FF_AVCTX_PROFILE_OPTION("main", NULL, VIDEO, FF_PROFILE_MPEG2_MAIN)\
+ FF_AVCTX_PROFILE_OPTION("simple", NULL, VIDEO, FF_PROFILE_MPEG2_SIMPLE)\
+
+#define FF_AV1_PROFILE_OPTS \
+ FF_AVCTX_PROFILE_OPTION("main", NULL, VIDEO, FF_PROFILE_AV1_MAIN)\
+ FF_AVCTX_PROFILE_OPTION("high", NULL, VIDEO, FF_PROFILE_AV1_HIGH)\
+ FF_AVCTX_PROFILE_OPTION("professional", NULL, VIDEO, FF_PROFILE_AV1_PROFESSIONAL)\
+
+extern const AVProfile ff_aac_profiles[];
+extern const AVProfile ff_dca_profiles[];
+extern const AVProfile ff_dnxhd_profiles[];
+extern const AVProfile ff_h264_profiles[];
+extern const AVProfile ff_hevc_profiles[];
+extern const AVProfile ff_vvc_profiles[];
+extern const AVProfile ff_jpeg2000_profiles[];
+extern const AVProfile ff_mpeg2_video_profiles[];
+extern const AVProfile ff_mpeg4_video_profiles[];
+extern const AVProfile ff_vc1_profiles[];
+extern const AVProfile ff_vp9_profiles[];
+extern const AVProfile ff_av1_profiles[];
+extern const AVProfile ff_sbc_profiles[];
+extern const AVProfile ff_prores_profiles[];
+extern const AVProfile ff_mjpeg_profiles[];
+extern const AVProfile ff_arib_caption_profiles[];
+
+#endif /* AVCODEC_PROFILES_H */
diff --git a/media/ffvpx/libavcodec/pthread.c b/media/ffvpx/libavcodec/pthread.c
new file mode 100644
index 0000000000..ca84b81391
--- /dev/null
+++ b/media/ffvpx/libavcodec/pthread.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2004 Roman Shaposhnik
+ * Copyright (c) 2008 Alexander Strange (astrange@ithinksw.com)
+ *
+ * Many thanks to Steven M. Schultz for providing clever ideas and
+ * to Michael Niedermayer <michaelni@gmx.at> for writing initial
+ * implementation.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Multithreading support functions
+ * @see doc/multithreading.txt
+ */
+
+#include "libavutil/thread.h"
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "pthread_internal.h"
+#include "thread.h"
+
+/**
+ * Set the threading algorithms used.
+ *
+ * Threading requires more than one thread.
+ * Frame threading requires entire frames to be passed to the codec,
+ * and introduces extra decoding delay, so is incompatible with low_delay.
+ *
+ * @param avctx The context.
+ */
+static void validate_thread_parameters(AVCodecContext *avctx)
+{
+ int frame_threading_supported = (avctx->codec->capabilities & AV_CODEC_CAP_FRAME_THREADS)
+ && !(avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
+ && !(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS);
+ if (avctx->thread_count == 1) {
+ avctx->active_thread_type = 0;
+ } else if (frame_threading_supported && (avctx->thread_type & FF_THREAD_FRAME)) {
+ avctx->active_thread_type = FF_THREAD_FRAME;
+ } else if (avctx->codec->capabilities & AV_CODEC_CAP_SLICE_THREADS &&
+ avctx->thread_type & FF_THREAD_SLICE) {
+ avctx->active_thread_type = FF_THREAD_SLICE;
+ } else if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_AUTO_THREADS)) {
+ avctx->thread_count = 1;
+ avctx->active_thread_type = 0;
+ }
+
+ if (avctx->thread_count > MAX_AUTO_THREADS)
+ av_log(avctx, AV_LOG_WARNING,
+ "Application has requested %d threads. Using a thread count greater than %d is not recommended.\n",
+ avctx->thread_count, MAX_AUTO_THREADS);
+}
+
+int ff_thread_init(AVCodecContext *avctx)
+{
+ validate_thread_parameters(avctx);
+
+ if (avctx->active_thread_type&FF_THREAD_SLICE)
+ return ff_slice_thread_init(avctx);
+ else if (avctx->active_thread_type&FF_THREAD_FRAME)
+ return ff_frame_thread_init(avctx);
+
+ return 0;
+}
+
+void ff_thread_free(AVCodecContext *avctx)
+{
+ if (avctx->active_thread_type&FF_THREAD_FRAME)
+ ff_frame_thread_free(avctx, avctx->thread_count);
+ else
+ ff_slice_thread_free(avctx);
+}
+
+av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
+{
+ unsigned cnt = *(unsigned*)((char*)obj + offsets[0]);
+ const unsigned *cur_offset = offsets;
+
+ *(unsigned*)((char*)obj + offsets[0]) = 0;
+
+ for (; *(++cur_offset) != THREAD_SENTINEL && cnt; cnt--)
+ pthread_mutex_destroy((pthread_mutex_t*)((char*)obj + *cur_offset));
+ for (; *(++cur_offset) != THREAD_SENTINEL && cnt; cnt--)
+ pthread_cond_destroy ((pthread_cond_t *)((char*)obj + *cur_offset));
+}
+
+av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
+{
+ const unsigned *cur_offset = offsets;
+ unsigned cnt = 0;
+ int err;
+
+#define PTHREAD_INIT_LOOP(type) \
+ for (; *(++cur_offset) != THREAD_SENTINEL; cnt++) { \
+ pthread_ ## type ## _t *dst = (void*)((char*)obj + *cur_offset); \
+ err = pthread_ ## type ## _init(dst, NULL); \
+ if (err) { \
+ err = AVERROR(err); \
+ goto fail; \
+ } \
+ }
+ PTHREAD_INIT_LOOP(mutex)
+ PTHREAD_INIT_LOOP(cond)
+
+fail:
+ *(unsigned*)((char*)obj + offsets[0]) = cnt;
+ return err;
+}
diff --git a/media/ffvpx/libavcodec/pthread_frame.c b/media/ffvpx/libavcodec/pthread_frame.c
new file mode 100644
index 0000000000..d9d5afaa82
--- /dev/null
+++ b/media/ffvpx/libavcodec/pthread_frame.c
@@ -0,0 +1,977 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Frame multithreading support functions
+ * @see doc/multithreading.txt
+ */
+
+#include "config.h"
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "hwconfig.h"
+#include "internal.h"
+#include "pthread_internal.h"
+#include "thread.h"
+#include "threadframe.h"
+#include "version_major.h"
+
+#include "libavutil/avassert.h"
+#include "libavutil/buffer.h"
+#include "libavutil/common.h"
+#include "libavutil/cpu.h"
+#include "libavutil/frame.h"
+#include "libavutil/internal.h"
+#include "libavutil/log.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/thread.h"
+
+enum {
+ ///< Set when the thread is awaiting a packet.
+ STATE_INPUT_READY,
+ ///< Set before the codec has called ff_thread_finish_setup().
+ STATE_SETTING_UP,
+ /**
+ * Set when the codec calls get_buffer().
+ * State is returned to STATE_SETTING_UP afterwards.
+ */
+ STATE_GET_BUFFER,
+ /**
+ * Set when the codec calls get_format().
+ * State is returned to STATE_SETTING_UP afterwards.
+ */
+ STATE_GET_FORMAT,
+ ///< Set after the codec has called ff_thread_finish_setup().
+ STATE_SETUP_FINISHED,
+};
+
+enum {
+ UNINITIALIZED, ///< Thread has not been created, AVCodec->close mustn't be called
+ NEEDS_CLOSE, ///< FFCodec->close needs to be called
+ INITIALIZED, ///< Thread has been properly set up
+};
+
+/**
+ * Context used by codec threads and stored in their AVCodecInternal thread_ctx.
+ */
+typedef struct PerThreadContext {
+ struct FrameThreadContext *parent;
+
+ pthread_t thread;
+ int thread_init;
+ unsigned pthread_init_cnt;///< Number of successfully initialized mutexes/conditions
+ pthread_cond_t input_cond; ///< Used to wait for a new packet from the main thread.
+ pthread_cond_t progress_cond; ///< Used by child threads to wait for progress to change.
+ pthread_cond_t output_cond; ///< Used by the main thread to wait for frames to finish.
+
+ pthread_mutex_t mutex; ///< Mutex used to protect the contents of the PerThreadContext.
+ pthread_mutex_t progress_mutex; ///< Mutex used to protect frame progress values and progress_cond.
+
+ AVCodecContext *avctx; ///< Context used to decode packets passed to this thread.
+
+ AVPacket *avpkt; ///< Input packet (for decoding) or output (for encoding).
+
+ AVFrame *frame; ///< Output frame (for decoding) or input (for encoding).
+ int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call.
+ int result; ///< The result of the last codec decode/encode() call.
+
+ atomic_int state;
+
+ int die; ///< Set when the thread should exit.
+
+ int hwaccel_serializing;
+ int async_serializing;
+
+ atomic_int debug_threads; ///< Set if the FF_DEBUG_THREADS option is set.
+} PerThreadContext;
+
+/**
+ * Context stored in the client AVCodecInternal thread_ctx.
+ */
+typedef struct FrameThreadContext {
+ PerThreadContext *threads; ///< The contexts for each thread.
+ PerThreadContext *prev_thread; ///< The last thread submit_packet() was called on.
+
+ unsigned pthread_init_cnt; ///< Number of successfully initialized mutexes/conditions
+ pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
+ /**
+ * This lock is used for ensuring threads run in serial when hwaccel
+ * is used.
+ */
+ pthread_mutex_t hwaccel_mutex;
+ pthread_mutex_t async_mutex;
+ pthread_cond_t async_cond;
+ int async_lock;
+
+ int next_decoding; ///< The next context to submit a packet to.
+ int next_finished; ///< The next context to return output from.
+
+ int delaying; /**<
+ * Set for the first N packets, where N is the number of threads.
+ * While it is set, ff_thread_en/decode_frame won't return any results.
+ */
+
+ /* hwaccel state is temporarily stored here in order to transfer its ownership
+ * to the next decoding thread without the need for extra synchronization */
+ const AVHWAccel *stash_hwaccel;
+ void *stash_hwaccel_context;
+ void *stash_hwaccel_priv;
+} FrameThreadContext;
+
+static void async_lock(FrameThreadContext *fctx)
+{
+ pthread_mutex_lock(&fctx->async_mutex);
+ while (fctx->async_lock)
+ pthread_cond_wait(&fctx->async_cond, &fctx->async_mutex);
+ fctx->async_lock = 1;
+ pthread_mutex_unlock(&fctx->async_mutex);
+}
+
+static void async_unlock(FrameThreadContext *fctx)
+{
+ pthread_mutex_lock(&fctx->async_mutex);
+ av_assert0(fctx->async_lock);
+ fctx->async_lock = 0;
+ pthread_cond_broadcast(&fctx->async_cond);
+ pthread_mutex_unlock(&fctx->async_mutex);
+}
+
+static void thread_set_name(PerThreadContext *p)
+{
+ AVCodecContext *avctx = p->avctx;
+ int idx = p - p->parent->threads;
+ char name[16];
+
+ snprintf(name, sizeof(name), "av:%.7s:df%d", avctx->codec->name, idx);
+
+ ff_thread_setname(name);
+}
+
+/**
+ * Codec worker thread.
+ *
+ * Automatically calls ff_thread_finish_setup() if the codec does
+ * not provide an update_thread_context method, or if the codec returns
+ * before calling it.
+ */
+static attribute_align_arg void *frame_worker_thread(void *arg)
+{
+ PerThreadContext *p = arg;
+ AVCodecContext *avctx = p->avctx;
+ const FFCodec *codec = ffcodec(avctx->codec);
+
+ thread_set_name(p);
+
+ pthread_mutex_lock(&p->mutex);
+ while (1) {
+ while (atomic_load(&p->state) == STATE_INPUT_READY && !p->die)
+ pthread_cond_wait(&p->input_cond, &p->mutex);
+
+ if (p->die) break;
+
+ if (!codec->update_thread_context)
+ ff_thread_finish_setup(avctx);
+
+ /* If a decoder supports hwaccel, then it must call ff_get_format().
+ * Since that call must happen before ff_thread_finish_setup(), the
+ * decoder is required to implement update_thread_context() and call
+ * ff_thread_finish_setup() manually. Therefore the above
+ * ff_thread_finish_setup() call did not happen and hwaccel_serializing
+ * cannot be true here. */
+ av_assert0(!p->hwaccel_serializing);
+
+ /* if the previous thread uses hwaccel then we take the lock to ensure
+ * the threads don't run concurrently */
+ if (avctx->hwaccel) {
+ pthread_mutex_lock(&p->parent->hwaccel_mutex);
+ p->hwaccel_serializing = 1;
+ }
+
+ av_frame_unref(p->frame);
+ p->got_frame = 0;
+ p->result = codec->cb.decode(avctx, p->frame, &p->got_frame, p->avpkt);
+
+ if ((p->result < 0 || !p->got_frame) && p->frame->buf[0])
+ ff_thread_release_buffer(avctx, p->frame);
+
+ if (atomic_load(&p->state) == STATE_SETTING_UP)
+ ff_thread_finish_setup(avctx);
+
+ if (p->hwaccel_serializing) {
+ /* wipe hwaccel state to avoid stale pointers lying around;
+ * the state was transferred to FrameThreadContext in
+ * ff_thread_finish_setup(), so nothing is leaked */
+ avctx->hwaccel = NULL;
+ avctx->hwaccel_context = NULL;
+ avctx->internal->hwaccel_priv_data = NULL;
+
+ p->hwaccel_serializing = 0;
+ pthread_mutex_unlock(&p->parent->hwaccel_mutex);
+ }
+ av_assert0(!avctx->hwaccel);
+
+ if (p->async_serializing) {
+ p->async_serializing = 0;
+
+ async_unlock(p->parent);
+ }
+
+ pthread_mutex_lock(&p->progress_mutex);
+
+ atomic_store(&p->state, STATE_INPUT_READY);
+
+ pthread_cond_broadcast(&p->progress_cond);
+ pthread_cond_signal(&p->output_cond);
+ pthread_mutex_unlock(&p->progress_mutex);
+ }
+ pthread_mutex_unlock(&p->mutex);
+
+ return NULL;
+}
+
+/**
+ * Update the next thread's AVCodecContext with values from the reference thread's context.
+ *
+ * @param dst The destination context.
+ * @param src The source context.
+ * @param for_user 0 if the destination is a codec thread, 1 if the destination is the user's thread
+ * @return 0 on success, negative error code on failure
+ */
+static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
+{
+ const FFCodec *const codec = ffcodec(dst->codec);
+ int err = 0;
+
+ if (dst != src && (for_user || codec->update_thread_context)) {
+ dst->time_base = src->time_base;
+ dst->framerate = src->framerate;
+ dst->width = src->width;
+ dst->height = src->height;
+ dst->pix_fmt = src->pix_fmt;
+ dst->sw_pix_fmt = src->sw_pix_fmt;
+
+ dst->coded_width = src->coded_width;
+ dst->coded_height = src->coded_height;
+
+ dst->has_b_frames = src->has_b_frames;
+ dst->idct_algo = src->idct_algo;
+ dst->properties = src->properties;
+
+ dst->bits_per_coded_sample = src->bits_per_coded_sample;
+ dst->sample_aspect_ratio = src->sample_aspect_ratio;
+
+ dst->profile = src->profile;
+ dst->level = src->level;
+
+ dst->bits_per_raw_sample = src->bits_per_raw_sample;
+ dst->ticks_per_frame = src->ticks_per_frame;
+ dst->color_primaries = src->color_primaries;
+
+ dst->color_trc = src->color_trc;
+ dst->colorspace = src->colorspace;
+ dst->color_range = src->color_range;
+ dst->chroma_sample_location = src->chroma_sample_location;
+
+ dst->sample_rate = src->sample_rate;
+ dst->sample_fmt = src->sample_fmt;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->channels = src->channels;
+ dst->channel_layout = src->channel_layout;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ err = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
+ if (err < 0)
+ return err;
+
+ if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
+ (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
+ av_buffer_unref(&dst->hw_frames_ctx);
+
+ if (src->hw_frames_ctx) {
+ dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
+ if (!dst->hw_frames_ctx)
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ dst->hwaccel_flags = src->hwaccel_flags;
+
+ err = av_buffer_replace(&dst->internal->pool, src->internal->pool);
+ if (err < 0)
+ return err;
+ }
+
+ if (for_user) {
+ if (codec->update_thread_context_for_user)
+ err = codec->update_thread_context_for_user(dst, src);
+ } else {
+ if (codec->update_thread_context)
+ err = codec->update_thread_context(dst, src);
+ }
+
+ return err;
+}
+
+/**
+ * Update the next thread's AVCodecContext with values set by the user.
+ *
+ * @param dst The destination context.
+ * @param src The source context.
+ * @return 0 on success, negative error code on failure
+ */
+static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
+{
+ int err;
+
+ dst->flags = src->flags;
+
+ dst->draw_horiz_band= src->draw_horiz_band;
+ dst->get_buffer2 = src->get_buffer2;
+
+ dst->opaque = src->opaque;
+ dst->debug = src->debug;
+
+ dst->slice_flags = src->slice_flags;
+ dst->flags2 = src->flags2;
+ dst->export_side_data = src->export_side_data;
+
+ dst->skip_loop_filter = src->skip_loop_filter;
+ dst->skip_idct = src->skip_idct;
+ dst->skip_frame = src->skip_frame;
+
+ dst->frame_num = src->frame_num;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->frame_number = src->frame_number;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->reordered_opaque = src->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (src->slice_count && src->slice_offset) {
+ if (dst->slice_count < src->slice_count) {
+ int err = av_reallocp_array(&dst->slice_offset, src->slice_count,
+ sizeof(*dst->slice_offset));
+ if (err < 0)
+ return err;
+ }
+ memcpy(dst->slice_offset, src->slice_offset,
+ src->slice_count * sizeof(*dst->slice_offset));
+ }
+ dst->slice_count = src->slice_count;
+
+ av_packet_unref(dst->internal->last_pkt_props);
+ err = av_packet_copy_props(dst->internal->last_pkt_props, src->internal->last_pkt_props);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
+ AVPacket *avpkt)
+{
+ FrameThreadContext *fctx = p->parent;
+ PerThreadContext *prev_thread = fctx->prev_thread;
+ const AVCodec *codec = p->avctx->codec;
+ int ret;
+
+ if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY))
+ return 0;
+
+ pthread_mutex_lock(&p->mutex);
+
+ ret = update_context_from_user(p->avctx, user_avctx);
+ if (ret) {
+ pthread_mutex_unlock(&p->mutex);
+ return ret;
+ }
+ atomic_store_explicit(&p->debug_threads,
+ (p->avctx->debug & FF_DEBUG_THREADS) != 0,
+ memory_order_relaxed);
+
+ if (prev_thread) {
+ int err;
+ if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
+ pthread_mutex_lock(&prev_thread->progress_mutex);
+ while (atomic_load(&prev_thread->state) == STATE_SETTING_UP)
+ pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
+ pthread_mutex_unlock(&prev_thread->progress_mutex);
+ }
+
+ err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
+ if (err) {
+ pthread_mutex_unlock(&p->mutex);
+ return err;
+ }
+ }
+
+ /* transfer the stashed hwaccel state, if any */
+ av_assert0(!p->avctx->hwaccel);
+ FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
+ FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
+ FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
+
+ av_packet_unref(p->avpkt);
+ ret = av_packet_ref(p->avpkt, avpkt);
+ if (ret < 0) {
+ pthread_mutex_unlock(&p->mutex);
+ av_log(p->avctx, AV_LOG_ERROR, "av_packet_ref() failed in submit_packet()\n");
+ return ret;
+ }
+
+ atomic_store(&p->state, STATE_SETTING_UP);
+ pthread_cond_signal(&p->input_cond);
+ pthread_mutex_unlock(&p->mutex);
+
+ fctx->prev_thread = p;
+ fctx->next_decoding++;
+
+ return 0;
+}
+
+int ff_thread_decode_frame(AVCodecContext *avctx,
+ AVFrame *picture, int *got_picture_ptr,
+ AVPacket *avpkt)
+{
+ FrameThreadContext *fctx = avctx->internal->thread_ctx;
+ int finished = fctx->next_finished;
+ PerThreadContext *p;
+ int err;
+
+ /* release the async lock, permitting blocked hwaccel threads to
+ * go forward while we are in this function */
+ async_unlock(fctx);
+
+ /*
+ * Submit a packet to the next decoding thread.
+ */
+
+ p = &fctx->threads[fctx->next_decoding];
+ err = submit_packet(p, avctx, avpkt);
+ if (err)
+ goto finish;
+
+ /*
+ * If we're still receiving the initial packets, don't return a frame.
+ */
+
+ if (fctx->next_decoding > (avctx->thread_count-1-(avctx->codec_id == AV_CODEC_ID_FFV1)))
+ fctx->delaying = 0;
+
+ if (fctx->delaying) {
+ *got_picture_ptr=0;
+ if (avpkt->size) {
+ err = avpkt->size;
+ goto finish;
+ }
+ }
+
+ /*
+ * Return the next available frame from the oldest thread.
+ * If we're at the end of the stream, then we have to skip threads that
+ * didn't output a frame/error, because we don't want to accidentally signal
+ * EOF (avpkt->size == 0 && *got_picture_ptr == 0 && err >= 0).
+ */
+
+ do {
+ p = &fctx->threads[finished++];
+
+ if (atomic_load(&p->state) != STATE_INPUT_READY) {
+ pthread_mutex_lock(&p->progress_mutex);
+ while (atomic_load_explicit(&p->state, memory_order_relaxed) != STATE_INPUT_READY)
+ pthread_cond_wait(&p->output_cond, &p->progress_mutex);
+ pthread_mutex_unlock(&p->progress_mutex);
+ }
+
+ av_frame_move_ref(picture, p->frame);
+ *got_picture_ptr = p->got_frame;
+ picture->pkt_dts = p->avpkt->dts;
+ err = p->result;
+
+ /*
+ * A later call with avkpt->size == 0 may loop over all threads,
+ * including this one, searching for a frame/error to return before being
+ * stopped by the "finished != fctx->next_finished" condition.
+ * Make sure we don't mistakenly return the same frame/error again.
+ */
+ p->got_frame = 0;
+ p->result = 0;
+
+ if (finished >= avctx->thread_count) finished = 0;
+ } while (!avpkt->size && !*got_picture_ptr && err >= 0 && finished != fctx->next_finished);
+
+ update_context_from_thread(avctx, p->avctx, 1);
+
+ if (fctx->next_decoding >= avctx->thread_count) fctx->next_decoding = 0;
+
+ fctx->next_finished = finished;
+
+ /* return the size of the consumed packet if no error occurred */
+ if (err >= 0)
+ err = avpkt->size;
+finish:
+ async_lock(fctx);
+ return err;
+}
+
+void ff_thread_report_progress(ThreadFrame *f, int n, int field)
+{
+ PerThreadContext *p;
+ atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
+
+ if (!progress ||
+ atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
+ return;
+
+ p = f->owner[field]->internal->thread_ctx;
+
+ if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
+ av_log(f->owner[field], AV_LOG_DEBUG,
+ "%p finished %d field %d\n", progress, n, field);
+
+ pthread_mutex_lock(&p->progress_mutex);
+
+ atomic_store_explicit(&progress[field], n, memory_order_release);
+
+ pthread_cond_broadcast(&p->progress_cond);
+ pthread_mutex_unlock(&p->progress_mutex);
+}
+
+void ff_thread_await_progress(const ThreadFrame *f, int n, int field)
+{
+ PerThreadContext *p;
+ atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
+
+ if (!progress ||
+ atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
+ return;
+
+ p = f->owner[field]->internal->thread_ctx;
+
+ if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
+ av_log(f->owner[field], AV_LOG_DEBUG,
+ "thread awaiting %d field %d from %p\n", n, field, progress);
+
+ pthread_mutex_lock(&p->progress_mutex);
+ while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
+ pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
+ pthread_mutex_unlock(&p->progress_mutex);
+}
+
+void ff_thread_finish_setup(AVCodecContext *avctx) {
+ PerThreadContext *p = avctx->internal->thread_ctx;
+
+ if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return;
+
+ if (avctx->hwaccel && !p->hwaccel_serializing) {
+ pthread_mutex_lock(&p->parent->hwaccel_mutex);
+ p->hwaccel_serializing = 1;
+ }
+
+ /* this assumes that no hwaccel calls happen before ff_thread_finish_setup() */
+ if (avctx->hwaccel &&
+ !(avctx->hwaccel->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
+ p->async_serializing = 1;
+
+ async_lock(p->parent);
+ }
+
+ /* save hwaccel state for passing to the next thread;
+ * this is done here so that this worker thread can wipe its own hwaccel
+ * state after decoding, without requiring synchronization */
+ av_assert0(!p->parent->stash_hwaccel);
+ p->parent->stash_hwaccel = avctx->hwaccel;
+ p->parent->stash_hwaccel_context = avctx->hwaccel_context;
+ p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data;
+
+ pthread_mutex_lock(&p->progress_mutex);
+ if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
+ av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
+ }
+
+ atomic_store(&p->state, STATE_SETUP_FINISHED);
+
+ pthread_cond_broadcast(&p->progress_cond);
+ pthread_mutex_unlock(&p->progress_mutex);
+}
+
+/// Waits for all threads to finish.
+static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count)
+{
+ int i;
+
+ async_unlock(fctx);
+
+ for (i = 0; i < thread_count; i++) {
+ PerThreadContext *p = &fctx->threads[i];
+
+ if (atomic_load(&p->state) != STATE_INPUT_READY) {
+ pthread_mutex_lock(&p->progress_mutex);
+ while (atomic_load(&p->state) != STATE_INPUT_READY)
+ pthread_cond_wait(&p->output_cond, &p->progress_mutex);
+ pthread_mutex_unlock(&p->progress_mutex);
+ }
+ p->got_frame = 0;
+ }
+
+ async_lock(fctx);
+}
+
+#define OFF(member) offsetof(FrameThreadContext, member)
+DEFINE_OFFSET_ARRAY(FrameThreadContext, thread_ctx, pthread_init_cnt,
+ (OFF(buffer_mutex), OFF(hwaccel_mutex), OFF(async_mutex)),
+ (OFF(async_cond)));
+#undef OFF
+
+#define OFF(member) offsetof(PerThreadContext, member)
+DEFINE_OFFSET_ARRAY(PerThreadContext, per_thread, pthread_init_cnt,
+ (OFF(progress_mutex), OFF(mutex)),
+ (OFF(input_cond), OFF(progress_cond), OFF(output_cond)));
+#undef OFF
+
+void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
+{
+ FrameThreadContext *fctx = avctx->internal->thread_ctx;
+ const FFCodec *codec = ffcodec(avctx->codec);
+ int i;
+
+ park_frame_worker_threads(fctx, thread_count);
+
+ for (i = 0; i < thread_count; i++) {
+ PerThreadContext *p = &fctx->threads[i];
+ AVCodecContext *ctx = p->avctx;
+
+ if (ctx->internal) {
+ if (p->thread_init == INITIALIZED) {
+ pthread_mutex_lock(&p->mutex);
+ p->die = 1;
+ pthread_cond_signal(&p->input_cond);
+ pthread_mutex_unlock(&p->mutex);
+
+ pthread_join(p->thread, NULL);
+ }
+ if (codec->close && p->thread_init != UNINITIALIZED)
+ codec->close(ctx);
+
+ if (ctx->priv_data) {
+ if (codec->p.priv_class)
+ av_opt_free(ctx->priv_data);
+ av_freep(&ctx->priv_data);
+ }
+
+ av_freep(&ctx->slice_offset);
+
+ av_buffer_unref(&ctx->internal->pool);
+ av_packet_free(&ctx->internal->last_pkt_props);
+ av_freep(&ctx->internal);
+ av_buffer_unref(&ctx->hw_frames_ctx);
+ }
+
+ av_frame_free(&p->frame);
+
+ ff_pthread_free(p, per_thread_offsets);
+ av_packet_free(&p->avpkt);
+
+ av_freep(&p->avctx);
+ }
+
+ av_freep(&fctx->threads);
+ ff_pthread_free(fctx, thread_ctx_offsets);
+
+ /* if we have stashed hwaccel state, move it to the user-facing context,
+ * so it will be freed in avcodec_close() */
+ av_assert0(!avctx->hwaccel);
+ FFSWAP(const AVHWAccel*, avctx->hwaccel, fctx->stash_hwaccel);
+ FFSWAP(void*, avctx->hwaccel_context, fctx->stash_hwaccel_context);
+ FFSWAP(void*, avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
+
+ av_freep(&avctx->internal->thread_ctx);
+}
+
+static av_cold int init_thread(PerThreadContext *p, int *threads_to_free,
+ FrameThreadContext *fctx, AVCodecContext *avctx,
+ const FFCodec *codec, int first)
+{
+ AVCodecContext *copy;
+ int err;
+
+ atomic_init(&p->state, STATE_INPUT_READY);
+
+ copy = av_memdup(avctx, sizeof(*avctx));
+ if (!copy)
+ return AVERROR(ENOMEM);
+ copy->priv_data = NULL;
+
+ /* From now on, this PerThreadContext will be cleaned up by
+ * ff_frame_thread_free in case of errors. */
+ (*threads_to_free)++;
+
+ p->parent = fctx;
+ p->avctx = copy;
+
+ copy->internal = av_mallocz(sizeof(*copy->internal));
+ if (!copy->internal)
+ return AVERROR(ENOMEM);
+ copy->internal->thread_ctx = p;
+
+ copy->delay = avctx->delay;
+
+ if (codec->priv_data_size) {
+ copy->priv_data = av_mallocz(codec->priv_data_size);
+ if (!copy->priv_data)
+ return AVERROR(ENOMEM);
+
+ if (codec->p.priv_class) {
+ *(const AVClass **)copy->priv_data = codec->p.priv_class;
+ err = av_opt_copy(copy->priv_data, avctx->priv_data);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ err = ff_pthread_init(p, per_thread_offsets);
+ if (err < 0)
+ return err;
+
+ if (!(p->frame = av_frame_alloc()) ||
+ !(p->avpkt = av_packet_alloc()))
+ return AVERROR(ENOMEM);
+
+ if (!first)
+ copy->internal->is_copy = 1;
+
+ copy->internal->last_pkt_props = av_packet_alloc();
+ if (!copy->internal->last_pkt_props)
+ return AVERROR(ENOMEM);
+
+ if (codec->init) {
+ err = codec->init(copy);
+ if (err < 0) {
+ if (codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP)
+ p->thread_init = NEEDS_CLOSE;
+ return err;
+ }
+ }
+ p->thread_init = NEEDS_CLOSE;
+
+ if (first)
+ update_context_from_thread(avctx, copy, 1);
+
+ atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
+
+ err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
+ if (err < 0)
+ return err;
+ p->thread_init = INITIALIZED;
+
+ return 0;
+}
+
+int ff_frame_thread_init(AVCodecContext *avctx)
+{
+ int thread_count = avctx->thread_count;
+ const FFCodec *codec = ffcodec(avctx->codec);
+ FrameThreadContext *fctx;
+ int err, i = 0;
+
+ if (!thread_count) {
+ int nb_cpus = av_cpu_count();
+ // use number of cores + 1 as thread count if there is more than one
+ if (nb_cpus > 1)
+ thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
+ else
+ thread_count = avctx->thread_count = 1;
+ }
+
+ if (thread_count <= 1) {
+ avctx->active_thread_type = 0;
+ return 0;
+ }
+
+ avctx->internal->thread_ctx = fctx = av_mallocz(sizeof(FrameThreadContext));
+ if (!fctx)
+ return AVERROR(ENOMEM);
+
+ err = ff_pthread_init(fctx, thread_ctx_offsets);
+ if (err < 0) {
+ ff_pthread_free(fctx, thread_ctx_offsets);
+ av_freep(&avctx->internal->thread_ctx);
+ return err;
+ }
+
+ fctx->async_lock = 1;
+ fctx->delaying = 1;
+
+ if (codec->p.type == AVMEDIA_TYPE_VIDEO)
+ avctx->delay = avctx->thread_count - 1;
+
+ fctx->threads = av_calloc(thread_count, sizeof(*fctx->threads));
+ if (!fctx->threads) {
+ err = AVERROR(ENOMEM);
+ goto error;
+ }
+
+ for (; i < thread_count; ) {
+ PerThreadContext *p = &fctx->threads[i];
+ int first = !i;
+
+ err = init_thread(p, &i, fctx, avctx, codec, first);
+ if (err < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ ff_frame_thread_free(avctx, i);
+ return err;
+}
+
+void ff_thread_flush(AVCodecContext *avctx)
+{
+ int i;
+ FrameThreadContext *fctx = avctx->internal->thread_ctx;
+
+ if (!fctx) return;
+
+ park_frame_worker_threads(fctx, avctx->thread_count);
+ if (fctx->prev_thread) {
+ if (fctx->prev_thread != &fctx->threads[0])
+ update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
+ }
+
+ fctx->next_decoding = fctx->next_finished = 0;
+ fctx->delaying = 1;
+ fctx->prev_thread = NULL;
+ for (i = 0; i < avctx->thread_count; i++) {
+ PerThreadContext *p = &fctx->threads[i];
+ // Make sure decode flush calls with size=0 won't return old frames
+ p->got_frame = 0;
+ av_frame_unref(p->frame);
+ p->result = 0;
+
+ if (ffcodec(avctx->codec)->flush)
+ ffcodec(avctx->codec)->flush(p->avctx);
+ }
+}
+
+int ff_thread_can_start_frame(AVCodecContext *avctx)
+{
+ PerThreadContext *p = avctx->internal->thread_ctx;
+
+ if ((avctx->active_thread_type&FF_THREAD_FRAME) && atomic_load(&p->state) != STATE_SETTING_UP &&
+ ffcodec(avctx->codec)->update_thread_context) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static int thread_get_buffer_internal(AVCodecContext *avctx, AVFrame *f, int flags)
+{
+ PerThreadContext *p;
+ int err;
+
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+ return ff_get_buffer(avctx, f, flags);
+
+ p = avctx->internal->thread_ctx;
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (atomic_load(&p->state) != STATE_SETTING_UP &&
+ ffcodec(avctx->codec)->update_thread_context) {
+FF_ENABLE_DEPRECATION_WARNINGS
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
+ return -1;
+ }
+
+ pthread_mutex_lock(&p->parent->buffer_mutex);
+ err = ff_get_buffer(avctx, f, flags);
+
+ pthread_mutex_unlock(&p->parent->buffer_mutex);
+
+ return err;
+}
+
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
+{
+ int ret = thread_get_buffer_internal(avctx, f, flags);
+ if (ret < 0)
+ av_log(avctx, AV_LOG_ERROR, "thread_get_buffer() failed\n");
+ return ret;
+}
+
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+{
+ int ret;
+
+ f->owner[0] = f->owner[1] = avctx;
+ /* Hint: It is possible for this function to be called with codecs
+ * that don't support frame threading at all, namely in case
+ * a frame-threaded decoder shares code with codecs that are not.
+ * This currently affects non-MPEG-4 mpegvideo codecs and and VP7.
+ * The following check will always be true for them. */
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+ return ff_get_buffer(avctx, f->f, flags);
+
+ if (ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
+ atomic_int *progress;
+ f->progress = av_buffer_alloc(2 * sizeof(*progress));
+ if (!f->progress) {
+ return AVERROR(ENOMEM);
+ }
+ progress = (atomic_int*)f->progress->data;
+
+ atomic_init(&progress[0], -1);
+ atomic_init(&progress[1], -1);
+ }
+
+ ret = ff_thread_get_buffer(avctx, f->f, flags);
+ if (ret)
+ av_buffer_unref(&f->progress);
+ return ret;
+}
+
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
+{
+ if (!f)
+ return;
+
+ if (avctx->debug & FF_DEBUG_BUFFERS)
+ av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
+
+ av_frame_unref(f);
+}
+
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
+{
+ av_buffer_unref(&f->progress);
+ f->owner[0] = f->owner[1] = NULL;
+ ff_thread_release_buffer(avctx, f->f);
+}
diff --git a/media/ffvpx/libavcodec/pthread_internal.h b/media/ffvpx/libavcodec/pthread_internal.h
new file mode 100644
index 0000000000..d0b6a7a673
--- /dev/null
+++ b/media/ffvpx/libavcodec/pthread_internal.h
@@ -0,0 +1,66 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PTHREAD_INTERNAL_H
+#define AVCODEC_PTHREAD_INTERNAL_H
+
+#include "avcodec.h"
+
+/* H.264 slice threading seems to be buggy with more than 16 threads,
+ * limit the number of threads to 16 for automatic detection */
+#define MAX_AUTO_THREADS 16
+
+int ff_slice_thread_init(AVCodecContext *avctx);
+void ff_slice_thread_free(AVCodecContext *avctx);
+
+int ff_frame_thread_init(AVCodecContext *avctx);
+void ff_frame_thread_free(AVCodecContext *avctx, int thread_count);
+
+#define THREAD_SENTINEL 0 // This forbids putting a mutex/condition variable at the front.
+/**
+ * Initialize/destroy a list of mutexes/conditions contained in a structure.
+ * The positions of these mutexes/conditions in the structure are given by
+ * their offsets. Because it is undefined behaviour to destroy
+ * an uninitialized mutex/condition, ff_pthread_init() stores the number
+ * of successfully initialized mutexes and conditions in the object itself
+ * and ff_pthread_free() uses this number to destroy exactly the mutexes and
+ * condition variables that have been successfully initialized.
+ *
+ * @param obj The object containing the mutexes/conditions.
+ * @param[in] offsets An array of offsets. Its first member gives the offset
+ * of the variable that contains the count of successfully
+ * initialized mutexes/condition variables; said variable
+ * must be an unsigned int. Two arrays of offsets, each
+ * delimited by a THREAD_SENTINEL follow. The first
+ * contains the offsets of all the mutexes, the second
+ * contains the offsets of all the condition variables.
+ */
+int ff_pthread_init(void *obj, const unsigned offsets[]);
+void ff_pthread_free(void *obj, const unsigned offsets[]);
+
+/**
+ * Macros to help creating the above lists. mutexes and conds need
+ * to be parentheses-enclosed lists of offsets in the containing structure.
+ */
+#define OFFSET_ARRAY(...) __VA_ARGS__, THREAD_SENTINEL
+#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds) \
+static const unsigned name ## _offsets[] = { offsetof(type, cnt_variable), \
+ OFFSET_ARRAY mutexes, \
+ OFFSET_ARRAY conds }
+
+#endif // AVCODEC_PTHREAD_INTERNAL_H
diff --git a/media/ffvpx/libavcodec/pthread_slice.c b/media/ffvpx/libavcodec/pthread_slice.c
new file mode 100644
index 0000000000..a4d31c6f4d
--- /dev/null
+++ b/media/ffvpx/libavcodec/pthread_slice.c
@@ -0,0 +1,260 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Slice multithreading support functions
+ * @see doc/multithreading.txt
+ */
+
+#include "config.h"
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "internal.h"
+#include "pthread_internal.h"
+#include "thread.h"
+
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/thread.h"
+#include "libavutil/slicethread.h"
+
+typedef int (action_func)(AVCodecContext *c, void *arg);
+typedef int (action_func2)(AVCodecContext *c, void *arg, int jobnr, int threadnr);
+typedef int (main_func)(AVCodecContext *c);
+
+typedef struct Progress {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+} Progress;
+
+typedef struct SliceThreadContext {
+ AVSliceThread *thread;
+ action_func *func;
+ action_func2 *func2;
+ main_func *mainfunc;
+ void *args;
+ int *rets;
+ int job_size;
+
+ int *entries;
+ int entries_count;
+ int thread_count;
+ Progress *progress;
+} SliceThreadContext;
+
+static void main_function(void *priv) {
+ AVCodecContext *avctx = priv;
+ SliceThreadContext *c = avctx->internal->thread_ctx;
+ c->mainfunc(avctx);
+}
+
+static void worker_func(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
+{
+ AVCodecContext *avctx = priv;
+ SliceThreadContext *c = avctx->internal->thread_ctx;
+ int ret;
+
+ ret = c->func ? c->func(avctx, (char *)c->args + c->job_size * jobnr)
+ : c->func2(avctx, c->args, jobnr, threadnr);
+ if (c->rets)
+ c->rets[jobnr] = ret;
+}
+
+void ff_slice_thread_free(AVCodecContext *avctx)
+{
+ SliceThreadContext *c = avctx->internal->thread_ctx;
+ int i;
+
+ avpriv_slicethread_free(&c->thread);
+
+ for (i = 0; i < c->thread_count; i++) {
+ Progress *const progress = &c->progress[i];
+ pthread_mutex_destroy(&progress->mutex);
+ pthread_cond_destroy(&progress->cond);
+ }
+
+ av_freep(&c->entries);
+ av_freep(&c->progress);
+ av_freep(&avctx->internal->thread_ctx);
+}
+
+static int thread_execute(AVCodecContext *avctx, action_func* func, void *arg, int *ret, int job_count, int job_size)
+{
+ SliceThreadContext *c = avctx->internal->thread_ctx;
+
+ if (!(avctx->active_thread_type&FF_THREAD_SLICE) || avctx->thread_count <= 1)
+ return avcodec_default_execute(avctx, func, arg, ret, job_count, job_size);
+
+ if (job_count <= 0)
+ return 0;
+
+ c->job_size = job_size;
+ c->args = arg;
+ c->func = func;
+ c->rets = ret;
+
+ avpriv_slicethread_execute(c->thread, job_count, !!c->mainfunc );
+ return 0;
+}
+
+static int thread_execute2(AVCodecContext *avctx, action_func2* func2, void *arg, int *ret, int job_count)
+{
+ SliceThreadContext *c = avctx->internal->thread_ctx;
+ c->func2 = func2;
+ return thread_execute(avctx, NULL, arg, ret, job_count, 0);
+}
+
+int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2* func2, main_func *mainfunc, void *arg, int *ret, int job_count)
+{
+ SliceThreadContext *c = avctx->internal->thread_ctx;
+ c->func2 = func2;
+ c->mainfunc = mainfunc;
+ return thread_execute(avctx, NULL, arg, ret, job_count, 0);
+}
+
+int ff_slice_thread_init(AVCodecContext *avctx)
+{
+ SliceThreadContext *c;
+ int thread_count = avctx->thread_count;
+ void (*mainfunc)(void *);
+
+ // We cannot do this in the encoder init as the threads are created before
+ if (av_codec_is_encoder(avctx->codec) &&
+ avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
+ avctx->height > 2800)
+ thread_count = avctx->thread_count = 1;
+
+ if (!thread_count) {
+ int nb_cpus = av_cpu_count();
+ if (avctx->height)
+ nb_cpus = FFMIN(nb_cpus, (avctx->height+15)/16);
+ // use number of cores + 1 as thread count if there is more than one
+ if (nb_cpus > 1)
+ thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
+ else
+ thread_count = avctx->thread_count = 1;
+ }
+
+ if (thread_count <= 1) {
+ avctx->active_thread_type = 0;
+ return 0;
+ }
+
+ avctx->internal->thread_ctx = c = av_mallocz(sizeof(*c));
+ mainfunc = ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SLICE_THREAD_HAS_MF ? &main_function : NULL;
+ if (!c || (thread_count = avpriv_slicethread_create(&c->thread, avctx, worker_func, mainfunc, thread_count)) <= 1) {
+ if (c)
+ avpriv_slicethread_free(&c->thread);
+ av_freep(&avctx->internal->thread_ctx);
+ avctx->thread_count = 1;
+ avctx->active_thread_type = 0;
+ return 0;
+ }
+ avctx->thread_count = thread_count;
+
+ avctx->execute = thread_execute;
+ avctx->execute2 = thread_execute2;
+ return 0;
+}
+
+int av_cold ff_slice_thread_init_progress(AVCodecContext *avctx)
+{
+ SliceThreadContext *const p = avctx->internal->thread_ctx;
+ int err, i = 0, thread_count = avctx->thread_count;
+
+ p->progress = av_calloc(thread_count, sizeof(*p->progress));
+ if (!p->progress) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (; i < thread_count; i++) {
+ Progress *const progress = &p->progress[i];
+ err = pthread_mutex_init(&progress->mutex, NULL);
+ if (err) {
+ err = AVERROR(err);
+ goto fail;
+ }
+ err = pthread_cond_init (&progress->cond, NULL);
+ if (err) {
+ err = AVERROR(err);
+ pthread_mutex_destroy(&progress->mutex);
+ goto fail;
+ }
+ }
+ err = 0;
+fail:
+ p->thread_count = i;
+ return err;
+}
+
+void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
+{
+ SliceThreadContext *p = avctx->internal->thread_ctx;
+ Progress *const progress = &p->progress[thread];
+ int *entries = p->entries;
+
+ pthread_mutex_lock(&progress->mutex);
+ entries[field] +=n;
+ pthread_cond_signal(&progress->cond);
+ pthread_mutex_unlock(&progress->mutex);
+}
+
+void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
+{
+ SliceThreadContext *p = avctx->internal->thread_ctx;
+ Progress *progress;
+ int *entries = p->entries;
+
+ if (!entries || !field) return;
+
+ thread = thread ? thread - 1 : p->thread_count - 1;
+ progress = &p->progress[thread];
+
+ pthread_mutex_lock(&progress->mutex);
+ while ((entries[field - 1] - entries[field]) < shift){
+ pthread_cond_wait(&progress->cond, &progress->mutex);
+ }
+ pthread_mutex_unlock(&progress->mutex);
+}
+
+int ff_slice_thread_allocz_entries(AVCodecContext *avctx, int count)
+{
+ if (avctx->active_thread_type & FF_THREAD_SLICE) {
+ SliceThreadContext *p = avctx->internal->thread_ctx;
+
+ if (p->entries_count == count) {
+ memset(p->entries, 0, p->entries_count * sizeof(*p->entries));
+ return 0;
+ }
+ av_freep(&p->entries);
+
+ p->entries = av_calloc(count, sizeof(*p->entries));
+ if (!p->entries) {
+ p->entries_count = 0;
+ return AVERROR(ENOMEM);
+ }
+ p->entries_count = count;
+ }
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/put_bits.h b/media/ffvpx/libavcodec/put_bits.h
new file mode 100644
index 0000000000..4561dc131a
--- /dev/null
+++ b/media/ffvpx/libavcodec/put_bits.h
@@ -0,0 +1,428 @@
+/*
+ * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * bitstream writer API
+ */
+
+#ifndef AVCODEC_PUT_BITS_H
+#define AVCODEC_PUT_BITS_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#include "config.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+
+#if ARCH_X86_64
+// TODO: Benchmark and optionally enable on other 64-bit architectures.
+typedef uint64_t BitBuf;
+#define AV_WBBUF AV_WB64
+#define AV_WLBUF AV_WL64
+#else
+typedef uint32_t BitBuf;
+#define AV_WBBUF AV_WB32
+#define AV_WLBUF AV_WL32
+#endif
+
+static const int BUF_BITS = 8 * sizeof(BitBuf);
+
+typedef struct PutBitContext {
+ BitBuf bit_buf;
+ int bit_left;
+ uint8_t *buf, *buf_ptr, *buf_end;
+} PutBitContext;
+
+/**
+ * Initialize the PutBitContext s.
+ *
+ * @param buffer the buffer where to put bits
+ * @param buffer_size the size in bytes of buffer
+ */
+static inline void init_put_bits(PutBitContext *s, uint8_t *buffer,
+ int buffer_size)
+{
+ if (buffer_size < 0) {
+ buffer_size = 0;
+ buffer = NULL;
+ }
+
+ s->buf = buffer;
+ s->buf_end = s->buf + buffer_size;
+ s->buf_ptr = s->buf;
+ s->bit_left = BUF_BITS;
+ s->bit_buf = 0;
+}
+
+/**
+ * @return the total number of bits written to the bitstream.
+ */
+static inline int put_bits_count(PutBitContext *s)
+{
+ return (s->buf_ptr - s->buf) * 8 + BUF_BITS - s->bit_left;
+}
+
+/**
+ * @return the number of bytes output so far; may only be called
+ * when the PutBitContext is freshly initialized or flushed.
+ */
+static inline int put_bytes_output(const PutBitContext *s)
+{
+ av_assert2(s->bit_left == BUF_BITS);
+ return s->buf_ptr - s->buf;
+}
+
+/**
+ * @param round_up When set, the number of bits written so far will be
+ * rounded up to the next byte.
+ * @return the number of bytes output so far.
+ */
+static inline int put_bytes_count(const PutBitContext *s, int round_up)
+{
+ return s->buf_ptr - s->buf + ((BUF_BITS - s->bit_left + (round_up ? 7 : 0)) >> 3);
+}
+
+/**
+ * Rebase the bit writer onto a reallocated buffer.
+ *
+ * @param buffer the buffer where to put bits
+ * @param buffer_size the size in bytes of buffer,
+ * must be large enough to hold everything written so far
+ */
+static inline void rebase_put_bits(PutBitContext *s, uint8_t *buffer,
+ int buffer_size)
+{
+ av_assert0(8*buffer_size >= put_bits_count(s));
+
+ s->buf_end = buffer + buffer_size;
+ s->buf_ptr = buffer + (s->buf_ptr - s->buf);
+ s->buf = buffer;
+}
+
+/**
+ * @return the number of bits available in the bitstream.
+ */
+static inline int put_bits_left(PutBitContext* s)
+{
+ return (s->buf_end - s->buf_ptr) * 8 - BUF_BITS + s->bit_left;
+}
+
+/**
+ * @param round_up When set, the number of bits written will be
+ * rounded up to the next byte.
+ * @return the number of bytes left.
+ */
+static inline int put_bytes_left(const PutBitContext *s, int round_up)
+{
+ return s->buf_end - s->buf_ptr - ((BUF_BITS - s->bit_left + (round_up ? 7 : 0)) >> 3);
+}
+
+/**
+ * Pad the end of the output stream with zeros.
+ */
+static inline void flush_put_bits(PutBitContext *s)
+{
+#ifndef BITSTREAM_WRITER_LE
+ if (s->bit_left < BUF_BITS)
+ s->bit_buf <<= s->bit_left;
+#endif
+ while (s->bit_left < BUF_BITS) {
+ av_assert0(s->buf_ptr < s->buf_end);
+#ifdef BITSTREAM_WRITER_LE
+ *s->buf_ptr++ = s->bit_buf;
+ s->bit_buf >>= 8;
+#else
+ *s->buf_ptr++ = s->bit_buf >> (BUF_BITS - 8);
+ s->bit_buf <<= 8;
+#endif
+ s->bit_left += 8;
+ }
+ s->bit_left = BUF_BITS;
+ s->bit_buf = 0;
+}
+
+static inline void flush_put_bits_le(PutBitContext *s)
+{
+ while (s->bit_left < BUF_BITS) {
+ av_assert0(s->buf_ptr < s->buf_end);
+ *s->buf_ptr++ = s->bit_buf;
+ s->bit_buf >>= 8;
+ s->bit_left += 8;
+ }
+ s->bit_left = BUF_BITS;
+ s->bit_buf = 0;
+}
+
+#ifdef BITSTREAM_WRITER_LE
+#define ff_put_string ff_put_string_unsupported_here
+#define ff_copy_bits ff_copy_bits_unsupported_here
+#else
+
+/**
+ * Put the string string in the bitstream.
+ *
+ * @param terminate_string 0-terminates the written string if value is 1
+ */
+void ff_put_string(PutBitContext *pb, const char *string,
+ int terminate_string);
+
+/**
+ * Copy the content of src to the bitstream.
+ *
+ * @param length the number of bits of src to copy
+ */
+void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length);
+#endif
+
+static inline void put_bits_no_assert(PutBitContext *s, int n, BitBuf value)
+{
+ BitBuf bit_buf;
+ int bit_left;
+
+ bit_buf = s->bit_buf;
+ bit_left = s->bit_left;
+
+ /* XXX: optimize */
+#ifdef BITSTREAM_WRITER_LE
+ bit_buf |= value << (BUF_BITS - bit_left);
+ if (n >= bit_left) {
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WLBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
+ av_assert2(0);
+ }
+ bit_buf = value >> bit_left;
+ bit_left += BUF_BITS;
+ }
+ bit_left -= n;
+#else
+ if (n < bit_left) {
+ bit_buf = (bit_buf << n) | value;
+ bit_left -= n;
+ } else {
+ bit_buf <<= bit_left;
+ bit_buf |= value >> (n - bit_left);
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WBBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
+ av_assert2(0);
+ }
+ bit_left += BUF_BITS - n;
+ bit_buf = value;
+ }
+#endif
+
+ s->bit_buf = bit_buf;
+ s->bit_left = bit_left;
+}
+
+/**
+ * Write up to 31 bits into a bitstream.
+ * Use put_bits32 to write 32 bits.
+ */
+static inline void put_bits(PutBitContext *s, int n, BitBuf value)
+{
+ av_assert2(n <= 31 && value < (1UL << n));
+ put_bits_no_assert(s, n, value);
+}
+
+static inline void put_bits_le(PutBitContext *s, int n, BitBuf value)
+{
+ BitBuf bit_buf;
+ int bit_left;
+
+ av_assert2(n <= 31 && value < (1UL << n));
+
+ bit_buf = s->bit_buf;
+ bit_left = s->bit_left;
+
+ bit_buf |= value << (BUF_BITS - bit_left);
+ if (n >= bit_left) {
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WLBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
+ av_assert2(0);
+ }
+ bit_buf = value >> bit_left;
+ bit_left += BUF_BITS;
+ }
+ bit_left -= n;
+
+ s->bit_buf = bit_buf;
+ s->bit_left = bit_left;
+}
+
+static inline void put_sbits(PutBitContext *pb, int n, int32_t value)
+{
+ av_assert2(n >= 0 && n <= 31);
+
+ put_bits(pb, n, av_mod_uintp2(value, n));
+}
+
+/**
+ * Write exactly 32 bits into a bitstream.
+ */
+static void av_unused put_bits32(PutBitContext *s, uint32_t value)
+{
+ BitBuf bit_buf;
+ int bit_left;
+
+ if (BUF_BITS > 32) {
+ put_bits_no_assert(s, 32, value);
+ return;
+ }
+
+ bit_buf = s->bit_buf;
+ bit_left = s->bit_left;
+
+#ifdef BITSTREAM_WRITER_LE
+ bit_buf |= (BitBuf)value << (BUF_BITS - bit_left);
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WLBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
+ av_assert2(0);
+ }
+ bit_buf = (uint64_t)value >> bit_left;
+#else
+ bit_buf = (uint64_t)bit_buf << bit_left;
+ bit_buf |= (BitBuf)value >> (BUF_BITS - bit_left);
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WBBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
+ av_assert2(0);
+ }
+ bit_buf = value;
+#endif
+
+ s->bit_buf = bit_buf;
+ s->bit_left = bit_left;
+}
+
+/**
+ * Write up to 64 bits into a bitstream.
+ */
+static inline void put_bits64(PutBitContext *s, int n, uint64_t value)
+{
+ av_assert2((n == 64) || (n < 64 && value < (UINT64_C(1) << n)));
+
+ if (n < 32)
+ put_bits(s, n, value);
+ else if (n == 32)
+ put_bits32(s, value);
+ else if (n < 64) {
+ uint32_t lo = value & 0xffffffff;
+ uint32_t hi = value >> 32;
+#ifdef BITSTREAM_WRITER_LE
+ put_bits32(s, lo);
+ put_bits(s, n - 32, hi);
+#else
+ put_bits(s, n - 32, hi);
+ put_bits32(s, lo);
+#endif
+ } else {
+ uint32_t lo = value & 0xffffffff;
+ uint32_t hi = value >> 32;
+#ifdef BITSTREAM_WRITER_LE
+ put_bits32(s, lo);
+ put_bits32(s, hi);
+#else
+ put_bits32(s, hi);
+ put_bits32(s, lo);
+#endif
+
+ }
+}
+
+static inline void put_sbits63(PutBitContext *pb, int n, int64_t value)
+{
+ av_assert2(n >= 0 && n < 64);
+
+ put_bits64(pb, n, (uint64_t)(value) & (~(UINT64_MAX << n)));
+}
+
+/**
+ * Return the pointer to the byte where the bitstream writer will put
+ * the next bit.
+ */
+static inline uint8_t *put_bits_ptr(PutBitContext *s)
+{
+ return s->buf_ptr;
+}
+
+/**
+ * Skip the given number of bytes.
+ * PutBitContext must be flushed & aligned to a byte boundary before calling this.
+ */
+static inline void skip_put_bytes(PutBitContext *s, int n)
+{
+ av_assert2((put_bits_count(s) & 7) == 0);
+ av_assert2(s->bit_left == BUF_BITS);
+ av_assert0(n <= s->buf_end - s->buf_ptr);
+ s->buf_ptr += n;
+}
+
+/**
+ * Skip the given number of bits.
+ * Must only be used if the actual values in the bitstream do not matter.
+ * If n is < 0 the behavior is undefined.
+ */
+static inline void skip_put_bits(PutBitContext *s, int n)
+{
+ unsigned bits = BUF_BITS - s->bit_left + n;
+ s->buf_ptr += sizeof(BitBuf) * (bits / BUF_BITS);
+ s->bit_left = BUF_BITS - (bits & (BUF_BITS - 1));
+}
+
+/**
+ * Change the end of the buffer.
+ *
+ * @param size the new size in bytes of the buffer where to put bits
+ */
+static inline void set_put_bits_buffer_size(PutBitContext *s, int size)
+{
+ av_assert0(size <= INT_MAX/8 - BUF_BITS);
+ s->buf_end = s->buf + size;
+}
+
+/**
+ * Pad the bitstream with zeros up to the next byte boundary.
+ */
+static inline void align_put_bits(PutBitContext *s)
+{
+ put_bits(s, s->bit_left & 7, 0);
+}
+
+#undef AV_WBBUF
+#undef AV_WLBUF
+
+#endif /* AVCODEC_PUT_BITS_H */
diff --git a/media/ffvpx/libavcodec/qpeldsp.h b/media/ffvpx/libavcodec/qpeldsp.h
new file mode 100644
index 0000000000..91019eda9c
--- /dev/null
+++ b/media/ffvpx/libavcodec/qpeldsp.h
@@ -0,0 +1,83 @@
+/*
+ * quarterpel DSP functions
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * quarterpel DSP functions
+ */
+
+#ifndef AVCODEC_QPELDSP_H
+#define AVCODEC_QPELDSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+void ff_put_pixels8x8_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
+void ff_avg_pixels8x8_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
+void ff_put_pixels16x16_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
+void ff_avg_pixels16x16_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
+
+void ff_put_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
+ int dst_stride, int src_stride1, int src_stride2,
+ int h);
+
+#define DEF_OLD_QPEL(name) \
+void ff_put_ ## name(uint8_t *dst /* align width (8 or 16) */, \
+ const uint8_t *src /* align 1 */, \
+ ptrdiff_t stride); \
+void ff_put_no_rnd_ ## name(uint8_t *dst /* align width (8 or 16) */, \
+ const uint8_t *src /* align 1 */, \
+ ptrdiff_t stride); \
+void ff_avg_ ## name(uint8_t *dst /* align width (8 or 16) */, \
+ const uint8_t *src /* align 1 */, \
+ ptrdiff_t stride);
+
+DEF_OLD_QPEL(qpel16_mc11_old_c)
+DEF_OLD_QPEL(qpel16_mc31_old_c)
+DEF_OLD_QPEL(qpel16_mc12_old_c)
+DEF_OLD_QPEL(qpel16_mc32_old_c)
+DEF_OLD_QPEL(qpel16_mc13_old_c)
+DEF_OLD_QPEL(qpel16_mc33_old_c)
+DEF_OLD_QPEL(qpel8_mc11_old_c)
+DEF_OLD_QPEL(qpel8_mc31_old_c)
+DEF_OLD_QPEL(qpel8_mc12_old_c)
+DEF_OLD_QPEL(qpel8_mc32_old_c)
+DEF_OLD_QPEL(qpel8_mc13_old_c)
+DEF_OLD_QPEL(qpel8_mc33_old_c)
+
+typedef void (*qpel_mc_func)(uint8_t *dst /* align width (8 or 16) */,
+ const uint8_t *src /* align 1 */,
+ ptrdiff_t stride);
+
+/**
+ * quarterpel DSP context
+ */
+typedef struct QpelDSPContext {
+ qpel_mc_func put_qpel_pixels_tab[2][16];
+ qpel_mc_func avg_qpel_pixels_tab[2][16];
+ qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
+} QpelDSPContext;
+
+void ff_qpeldsp_init(QpelDSPContext *c);
+
+void ff_qpeldsp_init_x86(QpelDSPContext *c);
+void ff_qpeldsp_init_mips(QpelDSPContext *c);
+
+#endif /* AVCODEC_QPELDSP_H */
diff --git a/media/ffvpx/libavcodec/qsv_api.c b/media/ffvpx/libavcodec/qsv_api.c
new file mode 100644
index 0000000000..327ff7d813
--- /dev/null
+++ b/media/ffvpx/libavcodec/qsv_api.c
@@ -0,0 +1,42 @@
+/*
+ * Intel MediaSDK QSV public API functions
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include <stddef.h>
+
+#include "libavutil/mem.h"
+
+#if CONFIG_QSV
+#include "qsv.h"
+
+AVQSVContext *av_qsv_alloc_context(void)
+{
+ return av_mallocz(sizeof(AVQSVContext));
+}
+#else
+
+struct AVQSVContext *av_qsv_alloc_context(void);
+
+struct AVQSVContext *av_qsv_alloc_context(void)
+{
+ return NULL;
+}
+#endif
diff --git a/media/ffvpx/libavcodec/ratecontrol.h b/media/ffvpx/libavcodec/ratecontrol.h
new file mode 100644
index 0000000000..4de80fad90
--- /dev/null
+++ b/media/ffvpx/libavcodec/ratecontrol.h
@@ -0,0 +1,96 @@
+/*
+ * Ratecontrol
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_RATECONTROL_H
+#define AVCODEC_RATECONTROL_H
+
+/**
+ * @file
+ * ratecontrol header.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include "libavutil/eval.h"
+
+typedef struct Predictor{
+ double coeff;
+ double count;
+ double decay;
+} Predictor;
+
+typedef struct RateControlEntry{
+ int pict_type;
+ float qscale;
+ int mv_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int misc_bits;
+ int header_bits;
+ uint64_t expected_bits;
+ int new_pict_type;
+ float new_qscale;
+ int64_t mc_mb_var_sum;
+ int64_t mb_var_sum;
+ int i_count;
+ int skip_count;
+ int f_code;
+ int b_code;
+}RateControlEntry;
+
+/**
+ * rate control context.
+ */
+typedef struct RateControlContext{
+ int num_entries; ///< number of RateControlEntries
+ RateControlEntry *entry;
+ double buffer_index; ///< amount of bits in the video/audio buffer
+ Predictor pred[5];
+ double short_term_qsum; ///< sum of recent qscales
+ double short_term_qcount; ///< count of recent qscales
+ double pass1_rc_eq_output_sum;///< sum of the output of the rc equation, this is used for normalization
+ double pass1_wanted_bits; ///< bits which should have been output by the pass1 code (including complexity init)
+ double last_qscale;
+ double last_qscale_for[5]; ///< last qscale for a specific pict type, used for max_diff & ipb factor stuff
+ int64_t last_mc_mb_var_sum;
+ int64_t last_mb_var_sum;
+ uint64_t i_cplx_sum[5];
+ uint64_t p_cplx_sum[5];
+ uint64_t mv_bits_sum[5];
+ uint64_t qscale_sum[5];
+ int frame_count[5];
+ int last_non_b_pict_type;
+
+ AVExpr * rc_eq_eval;
+}RateControlContext;
+
+struct MpegEncContext;
+
+/* rate control */
+int ff_rate_control_init(struct MpegEncContext *s);
+float ff_rate_estimate_qscale(struct MpegEncContext *s, int dry_run);
+void ff_write_pass1_stats(struct MpegEncContext *s);
+void ff_rate_control_uninit(struct MpegEncContext *s);
+int ff_vbv_update(struct MpegEncContext *s, int frame_size);
+void ff_get_2pass_fcode(struct MpegEncContext *s);
+
+#endif /* AVCODEC_RATECONTROL_H */
diff --git a/media/ffvpx/libavcodec/raw.c b/media/ffvpx/libavcodec/raw.c
new file mode 100644
index 0000000000..1e5b48d1e0
--- /dev/null
+++ b/media/ffvpx/libavcodec/raw.c
@@ -0,0 +1,370 @@
+/*
+ * Raw Video Codec
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Raw Video Codec
+ */
+
+#include "libavutil/macros.h"
+#include "avcodec.h"
+#include "raw.h"
+
+static const PixelFormatTag raw_pix_fmt_tags[] = {
+ { AV_PIX_FMT_YUV420P, MKTAG('I', '4', '2', '0') }, /* Planar formats */
+ { AV_PIX_FMT_YUV420P, MKTAG('I', 'Y', 'U', 'V') },
+ { AV_PIX_FMT_YUV420P, MKTAG('y', 'v', '1', '2') },
+ { AV_PIX_FMT_YUV420P, MKTAG('Y', 'V', '1', '2') },
+ { AV_PIX_FMT_YUV410P, MKTAG('Y', 'U', 'V', '9') },
+ { AV_PIX_FMT_YUV410P, MKTAG('Y', 'V', 'U', '9') },
+ { AV_PIX_FMT_YUV411P, MKTAG('Y', '4', '1', 'B') },
+ { AV_PIX_FMT_YUV422P, MKTAG('Y', '4', '2', 'B') },
+ { AV_PIX_FMT_YUV422P, MKTAG('P', '4', '2', '2') },
+ { AV_PIX_FMT_YUV422P, MKTAG('Y', 'V', '1', '6') },
+ /* yuvjXXX formats are deprecated hacks specific to libav*,
+ they are identical to yuvXXX */
+ { AV_PIX_FMT_YUVJ420P, MKTAG('I', '4', '2', '0') }, /* Planar formats */
+ { AV_PIX_FMT_YUVJ420P, MKTAG('I', 'Y', 'U', 'V') },
+ { AV_PIX_FMT_YUVJ420P, MKTAG('Y', 'V', '1', '2') },
+ { AV_PIX_FMT_YUVJ422P, MKTAG('Y', '4', '2', 'B') },
+ { AV_PIX_FMT_YUVJ422P, MKTAG('P', '4', '2', '2') },
+ { AV_PIX_FMT_GRAY8, MKTAG('Y', '8', '0', '0') },
+ { AV_PIX_FMT_GRAY8, MKTAG('Y', '8', ' ', ' ') },
+
+ { AV_PIX_FMT_YUYV422, MKTAG('Y', 'U', 'Y', '2') }, /* Packed formats */
+ { AV_PIX_FMT_YUYV422, MKTAG('Y', '4', '2', '2') },
+ { AV_PIX_FMT_YUYV422, MKTAG('V', '4', '2', '2') },
+ { AV_PIX_FMT_YUYV422, MKTAG('V', 'Y', 'U', 'Y') },
+ { AV_PIX_FMT_YUYV422, MKTAG('Y', 'U', 'N', 'V') },
+ { AV_PIX_FMT_YUYV422, MKTAG('Y', 'U', 'Y', 'V') },
+ { AV_PIX_FMT_YVYU422, MKTAG('Y', 'V', 'Y', 'U') }, /* Philips */
+ { AV_PIX_FMT_UYVY422, MKTAG('U', 'Y', 'V', 'Y') },
+ { AV_PIX_FMT_UYVY422, MKTAG('H', 'D', 'Y', 'C') },
+ { AV_PIX_FMT_UYVY422, MKTAG('U', 'Y', 'N', 'V') },
+ { AV_PIX_FMT_UYVY422, MKTAG('U', 'Y', 'N', 'Y') },
+ { AV_PIX_FMT_UYVY422, MKTAG('u', 'y', 'v', '1') },
+ { AV_PIX_FMT_UYVY422, MKTAG('2', 'V', 'u', '1') },
+ { AV_PIX_FMT_UYVY422, MKTAG('A', 'V', 'R', 'n') }, /* Avid AVI Codec 1:1 */
+ { AV_PIX_FMT_UYVY422, MKTAG('A', 'V', '1', 'x') }, /* Avid 1:1x */
+ { AV_PIX_FMT_UYVY422, MKTAG('A', 'V', 'u', 'p') },
+ { AV_PIX_FMT_UYVY422, MKTAG('V', 'D', 'T', 'Z') }, /* SoftLab-NSK VideoTizer */
+ { AV_PIX_FMT_UYVY422, MKTAG('a', 'u', 'v', '2') },
+ { AV_PIX_FMT_UYVY422, MKTAG('c', 'y', 'u', 'v') }, /* CYUV is also Creative YUV */
+ { AV_PIX_FMT_UYYVYY411, MKTAG('Y', '4', '1', '1') },
+ { AV_PIX_FMT_GRAY8, MKTAG('G', 'R', 'E', 'Y') },
+ { AV_PIX_FMT_NV12, MKTAG('N', 'V', '1', '2') },
+ { AV_PIX_FMT_NV21, MKTAG('N', 'V', '2', '1') },
+ { AV_PIX_FMT_VUYA, MKTAG('A', 'Y', 'U', 'V') }, /* MS 4:4:4:4 */
+
+ /* nut */
+ { AV_PIX_FMT_RGB555LE, MKTAG('R', 'G', 'B', 15) },
+ { AV_PIX_FMT_BGR555LE, MKTAG('B', 'G', 'R', 15) },
+ { AV_PIX_FMT_RGB565LE, MKTAG('R', 'G', 'B', 16) },
+ { AV_PIX_FMT_BGR565LE, MKTAG('B', 'G', 'R', 16) },
+ { AV_PIX_FMT_RGB555BE, MKTAG(15 , 'B', 'G', 'R') },
+ { AV_PIX_FMT_BGR555BE, MKTAG(15 , 'R', 'G', 'B') },
+ { AV_PIX_FMT_RGB565BE, MKTAG(16 , 'B', 'G', 'R') },
+ { AV_PIX_FMT_BGR565BE, MKTAG(16 , 'R', 'G', 'B') },
+ { AV_PIX_FMT_RGB444LE, MKTAG('R', 'G', 'B', 12) },
+ { AV_PIX_FMT_BGR444LE, MKTAG('B', 'G', 'R', 12) },
+ { AV_PIX_FMT_RGB444BE, MKTAG(12 , 'B', 'G', 'R') },
+ { AV_PIX_FMT_BGR444BE, MKTAG(12 , 'R', 'G', 'B') },
+ { AV_PIX_FMT_RGBA64LE, MKTAG('R', 'B', 'A', 64 ) },
+ { AV_PIX_FMT_BGRA64LE, MKTAG('B', 'R', 'A', 64 ) },
+ { AV_PIX_FMT_RGBA64BE, MKTAG(64 , 'R', 'B', 'A') },
+ { AV_PIX_FMT_BGRA64BE, MKTAG(64 , 'B', 'R', 'A') },
+ { AV_PIX_FMT_RGBA, MKTAG('R', 'G', 'B', 'A') },
+ { AV_PIX_FMT_RGB0, MKTAG('R', 'G', 'B', 0 ) },
+ { AV_PIX_FMT_BGRA, MKTAG('B', 'G', 'R', 'A') },
+ { AV_PIX_FMT_BGR0, MKTAG('B', 'G', 'R', 0 ) },
+ { AV_PIX_FMT_ABGR, MKTAG('A', 'B', 'G', 'R') },
+ { AV_PIX_FMT_0BGR, MKTAG( 0 , 'B', 'G', 'R') },
+ { AV_PIX_FMT_ARGB, MKTAG('A', 'R', 'G', 'B') },
+ { AV_PIX_FMT_0RGB, MKTAG( 0 , 'R', 'G', 'B') },
+ { AV_PIX_FMT_RGB24, MKTAG('R', 'G', 'B', 24 ) },
+ { AV_PIX_FMT_BGR24, MKTAG('B', 'G', 'R', 24 ) },
+ { AV_PIX_FMT_YUV411P, MKTAG('4', '1', '1', 'P') },
+ { AV_PIX_FMT_YUV422P, MKTAG('4', '2', '2', 'P') },
+ { AV_PIX_FMT_YUVJ422P, MKTAG('4', '2', '2', 'P') },
+ { AV_PIX_FMT_YUV440P, MKTAG('4', '4', '0', 'P') },
+ { AV_PIX_FMT_YUVJ440P, MKTAG('4', '4', '0', 'P') },
+ { AV_PIX_FMT_YUV444P, MKTAG('4', '4', '4', 'P') },
+ { AV_PIX_FMT_YUVJ444P, MKTAG('4', '4', '4', 'P') },
+ { AV_PIX_FMT_MONOWHITE,MKTAG('B', '1', 'W', '0') },
+ { AV_PIX_FMT_MONOBLACK,MKTAG('B', '0', 'W', '1') },
+ { AV_PIX_FMT_BGR8, MKTAG('B', 'G', 'R', 8 ) },
+ { AV_PIX_FMT_RGB8, MKTAG('R', 'G', 'B', 8 ) },
+ { AV_PIX_FMT_BGR4, MKTAG('B', 'G', 'R', 4 ) },
+ { AV_PIX_FMT_RGB4, MKTAG('R', 'G', 'B', 4 ) },
+ { AV_PIX_FMT_RGB4_BYTE,MKTAG('B', '4', 'B', 'Y') },
+ { AV_PIX_FMT_BGR4_BYTE,MKTAG('R', '4', 'B', 'Y') },
+ { AV_PIX_FMT_RGB48LE, MKTAG('R', 'G', 'B', 48 ) },
+ { AV_PIX_FMT_RGB48BE, MKTAG( 48, 'R', 'G', 'B') },
+ { AV_PIX_FMT_BGR48LE, MKTAG('B', 'G', 'R', 48 ) },
+ { AV_PIX_FMT_BGR48BE, MKTAG( 48, 'B', 'G', 'R') },
+ { AV_PIX_FMT_GRAY9LE, MKTAG('Y', '1', 0 , 9 ) },
+ { AV_PIX_FMT_GRAY9BE, MKTAG( 9 , 0 , '1', 'Y') },
+ { AV_PIX_FMT_GRAY10LE, MKTAG('Y', '1', 0 , 10 ) },
+ { AV_PIX_FMT_GRAY10BE, MKTAG(10 , 0 , '1', 'Y') },
+ { AV_PIX_FMT_GRAY12LE, MKTAG('Y', '1', 0 , 12 ) },
+ { AV_PIX_FMT_GRAY12BE, MKTAG(12 , 0 , '1', 'Y') },
+ { AV_PIX_FMT_GRAY14LE, MKTAG('Y', '1', 0 , 14 ) },
+ { AV_PIX_FMT_GRAY14BE, MKTAG(14 , 0 , '1', 'Y') },
+ { AV_PIX_FMT_GRAY16LE, MKTAG('Y', '1', 0 , 16 ) },
+ { AV_PIX_FMT_GRAY16BE, MKTAG(16 , 0 , '1', 'Y') },
+ { AV_PIX_FMT_YUV420P9LE, MKTAG('Y', '3', 11 , 9 ) },
+ { AV_PIX_FMT_YUV420P9BE, MKTAG( 9 , 11 , '3', 'Y') },
+ { AV_PIX_FMT_YUV422P9LE, MKTAG('Y', '3', 10 , 9 ) },
+ { AV_PIX_FMT_YUV422P9BE, MKTAG( 9 , 10 , '3', 'Y') },
+ { AV_PIX_FMT_YUV444P9LE, MKTAG('Y', '3', 0 , 9 ) },
+ { AV_PIX_FMT_YUV444P9BE, MKTAG( 9 , 0 , '3', 'Y') },
+ { AV_PIX_FMT_YUV420P10LE, MKTAG('Y', '3', 11 , 10 ) },
+ { AV_PIX_FMT_YUV420P10BE, MKTAG(10 , 11 , '3', 'Y') },
+ { AV_PIX_FMT_YUV422P10LE, MKTAG('Y', '3', 10 , 10 ) },
+ { AV_PIX_FMT_YUV422P10BE, MKTAG(10 , 10 , '3', 'Y') },
+ { AV_PIX_FMT_YUV444P10LE, MKTAG('Y', '3', 0 , 10 ) },
+ { AV_PIX_FMT_YUV444P10BE, MKTAG(10 , 0 , '3', 'Y') },
+ { AV_PIX_FMT_YUV420P12LE, MKTAG('Y', '3', 11 , 12 ) },
+ { AV_PIX_FMT_YUV420P12BE, MKTAG(12 , 11 , '3', 'Y') },
+ { AV_PIX_FMT_YUV422P12LE, MKTAG('Y', '3', 10 , 12 ) },
+ { AV_PIX_FMT_YUV422P12BE, MKTAG(12 , 10 , '3', 'Y') },
+ { AV_PIX_FMT_YUV444P12LE, MKTAG('Y', '3', 0 , 12 ) },
+ { AV_PIX_FMT_YUV444P12BE, MKTAG(12 , 0 , '3', 'Y') },
+ { AV_PIX_FMT_YUV420P14LE, MKTAG('Y', '3', 11 , 14 ) },
+ { AV_PIX_FMT_YUV420P14BE, MKTAG(14 , 11 , '3', 'Y') },
+ { AV_PIX_FMT_YUV422P14LE, MKTAG('Y', '3', 10 , 14 ) },
+ { AV_PIX_FMT_YUV422P14BE, MKTAG(14 , 10 , '3', 'Y') },
+ { AV_PIX_FMT_YUV444P14LE, MKTAG('Y', '3', 0 , 14 ) },
+ { AV_PIX_FMT_YUV444P14BE, MKTAG(14 , 0 , '3', 'Y') },
+ { AV_PIX_FMT_YUV420P16LE, MKTAG('Y', '3', 11 , 16 ) },
+ { AV_PIX_FMT_YUV420P16BE, MKTAG(16 , 11 , '3', 'Y') },
+ { AV_PIX_FMT_YUV422P16LE, MKTAG('Y', '3', 10 , 16 ) },
+ { AV_PIX_FMT_YUV422P16BE, MKTAG(16 , 10 , '3', 'Y') },
+ { AV_PIX_FMT_YUV444P16LE, MKTAG('Y', '3', 0 , 16 ) },
+ { AV_PIX_FMT_YUV444P16BE, MKTAG(16 , 0 , '3', 'Y') },
+ { AV_PIX_FMT_YUVA420P, MKTAG('Y', '4', 11 , 8 ) },
+ { AV_PIX_FMT_YUVA422P, MKTAG('Y', '4', 10 , 8 ) },
+ { AV_PIX_FMT_YUVA444P, MKTAG('Y', '4', 0 , 8 ) },
+ { AV_PIX_FMT_YA8, MKTAG('Y', '2', 0 , 8 ) },
+ { AV_PIX_FMT_PAL8, MKTAG('P', 'A', 'L', 8 ) },
+
+ { AV_PIX_FMT_YUVA420P9LE, MKTAG('Y', '4', 11 , 9 ) },
+ { AV_PIX_FMT_YUVA420P9BE, MKTAG( 9 , 11 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA422P9LE, MKTAG('Y', '4', 10 , 9 ) },
+ { AV_PIX_FMT_YUVA422P9BE, MKTAG( 9 , 10 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA444P9LE, MKTAG('Y', '4', 0 , 9 ) },
+ { AV_PIX_FMT_YUVA444P9BE, MKTAG( 9 , 0 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA420P10LE, MKTAG('Y', '4', 11 , 10 ) },
+ { AV_PIX_FMT_YUVA420P10BE, MKTAG(10 , 11 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA422P10LE, MKTAG('Y', '4', 10 , 10 ) },
+ { AV_PIX_FMT_YUVA422P10BE, MKTAG(10 , 10 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA444P10LE, MKTAG('Y', '4', 0 , 10 ) },
+ { AV_PIX_FMT_YUVA444P10BE, MKTAG(10 , 0 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA422P12LE, MKTAG('Y', '4', 10 , 12 ) },
+ { AV_PIX_FMT_YUVA422P12BE, MKTAG(12 , 10 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA444P12LE, MKTAG('Y', '4', 0 , 12 ) },
+ { AV_PIX_FMT_YUVA444P12BE, MKTAG(12 , 0 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA420P16LE, MKTAG('Y', '4', 11 , 16 ) },
+ { AV_PIX_FMT_YUVA420P16BE, MKTAG(16 , 11 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA422P16LE, MKTAG('Y', '4', 10 , 16 ) },
+ { AV_PIX_FMT_YUVA422P16BE, MKTAG(16 , 10 , '4', 'Y') },
+ { AV_PIX_FMT_YUVA444P16LE, MKTAG('Y', '4', 0 , 16 ) },
+ { AV_PIX_FMT_YUVA444P16BE, MKTAG(16 , 0 , '4', 'Y') },
+
+ { AV_PIX_FMT_GBRP, MKTAG('G', '3', 00 , 8 ) },
+ { AV_PIX_FMT_GBRP9LE, MKTAG('G', '3', 00 , 9 ) },
+ { AV_PIX_FMT_GBRP9BE, MKTAG( 9 , 00 , '3', 'G') },
+ { AV_PIX_FMT_GBRP10LE, MKTAG('G', '3', 00 , 10 ) },
+ { AV_PIX_FMT_GBRP10BE, MKTAG(10 , 00 , '3', 'G') },
+ { AV_PIX_FMT_GBRP12LE, MKTAG('G', '3', 00 , 12 ) },
+ { AV_PIX_FMT_GBRP12BE, MKTAG(12 , 00 , '3', 'G') },
+ { AV_PIX_FMT_GBRP14LE, MKTAG('G', '3', 00 , 14 ) },
+ { AV_PIX_FMT_GBRP14BE, MKTAG(14 , 00 , '3', 'G') },
+ { AV_PIX_FMT_GBRP16LE, MKTAG('G', '3', 00 , 16 ) },
+ { AV_PIX_FMT_GBRP16BE, MKTAG(16 , 00 , '3', 'G') },
+
+ { AV_PIX_FMT_GBRAP, MKTAG('G', '4', 00 , 8 ) },
+ { AV_PIX_FMT_GBRAP10LE, MKTAG('G', '4', 00 , 10 ) },
+ { AV_PIX_FMT_GBRAP10BE, MKTAG(10 , 00 , '4', 'G') },
+ { AV_PIX_FMT_GBRAP12LE, MKTAG('G', '4', 00 , 12 ) },
+ { AV_PIX_FMT_GBRAP12BE, MKTAG(12 , 00 , '4', 'G') },
+ { AV_PIX_FMT_GBRAP16LE, MKTAG('G', '4', 00 , 16 ) },
+ { AV_PIX_FMT_GBRAP16BE, MKTAG(16 , 00 , '4', 'G') },
+
+ { AV_PIX_FMT_XYZ12LE, MKTAG('X', 'Y', 'Z' , 36 ) },
+ { AV_PIX_FMT_XYZ12BE, MKTAG(36 , 'Z' , 'Y', 'X') },
+
+ { AV_PIX_FMT_BAYER_BGGR8, MKTAG(0xBA, 'B', 'G', 8 ) },
+ { AV_PIX_FMT_BAYER_BGGR16LE, MKTAG(0xBA, 'B', 'G', 16 ) },
+ { AV_PIX_FMT_BAYER_BGGR16BE, MKTAG(16 , 'G', 'B', 0xBA) },
+ { AV_PIX_FMT_BAYER_RGGB8, MKTAG(0xBA, 'R', 'G', 8 ) },
+ { AV_PIX_FMT_BAYER_RGGB16LE, MKTAG(0xBA, 'R', 'G', 16 ) },
+ { AV_PIX_FMT_BAYER_RGGB16BE, MKTAG(16 , 'G', 'R', 0xBA) },
+ { AV_PIX_FMT_BAYER_GBRG8, MKTAG(0xBA, 'G', 'B', 8 ) },
+ { AV_PIX_FMT_BAYER_GBRG16LE, MKTAG(0xBA, 'G', 'B', 16 ) },
+ { AV_PIX_FMT_BAYER_GBRG16BE, MKTAG(16, 'B', 'G', 0xBA) },
+ { AV_PIX_FMT_BAYER_GRBG8, MKTAG(0xBA, 'G', 'R', 8 ) },
+ { AV_PIX_FMT_BAYER_GRBG16LE, MKTAG(0xBA, 'G', 'R', 16 ) },
+ { AV_PIX_FMT_BAYER_GRBG16BE, MKTAG(16, 'R', 'G', 0xBA) },
+
+ /* quicktime */
+ { AV_PIX_FMT_YUV420P, MKTAG('R', '4', '2', '0') }, /* Radius DV YUV PAL */
+ { AV_PIX_FMT_YUV411P, MKTAG('R', '4', '1', '1') }, /* Radius DV YUV NTSC */
+ { AV_PIX_FMT_UYVY422, MKTAG('2', 'v', 'u', 'y') },
+ { AV_PIX_FMT_UYVY422, MKTAG('2', 'V', 'u', 'y') },
+ { AV_PIX_FMT_UYVY422, MKTAG('A', 'V', 'U', 'I') }, /* FIXME merge both fields */
+ { AV_PIX_FMT_UYVY422, MKTAG('b', 'x', 'y', 'v') },
+ { AV_PIX_FMT_YUYV422, MKTAG('y', 'u', 'v', '2') },
+ { AV_PIX_FMT_YUYV422, MKTAG('y', 'u', 'v', 's') },
+ { AV_PIX_FMT_YUYV422, MKTAG('D', 'V', 'O', 'O') }, /* Digital Voodoo SD 8 Bit */
+ { AV_PIX_FMT_RGB555LE,MKTAG('L', '5', '5', '5') },
+ { AV_PIX_FMT_RGB565LE,MKTAG('L', '5', '6', '5') },
+ { AV_PIX_FMT_RGB565BE,MKTAG('B', '5', '6', '5') },
+ { AV_PIX_FMT_BGR24, MKTAG('2', '4', 'B', 'G') },
+ { AV_PIX_FMT_BGR24, MKTAG('b', 'x', 'b', 'g') },
+ { AV_PIX_FMT_BGRA, MKTAG('B', 'G', 'R', 'A') },
+ { AV_PIX_FMT_RGBA, MKTAG('R', 'G', 'B', 'A') },
+ { AV_PIX_FMT_RGB24, MKTAG('b', 'x', 'r', 'g') },
+ { AV_PIX_FMT_ABGR, MKTAG('A', 'B', 'G', 'R') },
+ { AV_PIX_FMT_GRAY16BE,MKTAG('b', '1', '6', 'g') },
+ { AV_PIX_FMT_RGB48BE, MKTAG('b', '4', '8', 'r') },
+ { AV_PIX_FMT_RGBA64BE,MKTAG('b', '6', '4', 'a') },
+ { AV_PIX_FMT_BAYER_RGGB16BE, MKTAG('B', 'G', 'G', 'R') },
+
+ /* vlc */
+ { AV_PIX_FMT_YUV410P, MKTAG('I', '4', '1', '0') },
+ { AV_PIX_FMT_YUV411P, MKTAG('I', '4', '1', '1') },
+ { AV_PIX_FMT_YUV422P, MKTAG('I', '4', '2', '2') },
+ { AV_PIX_FMT_YUV440P, MKTAG('I', '4', '4', '0') },
+ { AV_PIX_FMT_YUV444P, MKTAG('I', '4', '4', '4') },
+ { AV_PIX_FMT_YUVJ420P, MKTAG('J', '4', '2', '0') },
+ { AV_PIX_FMT_YUVJ422P, MKTAG('J', '4', '2', '2') },
+ { AV_PIX_FMT_YUVJ440P, MKTAG('J', '4', '4', '0') },
+ { AV_PIX_FMT_YUVJ444P, MKTAG('J', '4', '4', '4') },
+ { AV_PIX_FMT_YUVA444P, MKTAG('Y', 'U', 'V', 'A') },
+ { AV_PIX_FMT_YUVA420P, MKTAG('I', '4', '0', 'A') },
+ { AV_PIX_FMT_YUVA422P, MKTAG('I', '4', '2', 'A') },
+ { AV_PIX_FMT_RGB8, MKTAG('R', 'G', 'B', '2') },
+ { AV_PIX_FMT_RGB555LE, MKTAG('R', 'V', '1', '5') },
+ { AV_PIX_FMT_RGB565LE, MKTAG('R', 'V', '1', '6') },
+ { AV_PIX_FMT_BGR24, MKTAG('R', 'V', '2', '4') },
+ { AV_PIX_FMT_BGR0, MKTAG('R', 'V', '3', '2') },
+ { AV_PIX_FMT_RGBA, MKTAG('A', 'V', '3', '2') },
+ { AV_PIX_FMT_YUV420P9LE, MKTAG('I', '0', '9', 'L') },
+ { AV_PIX_FMT_YUV420P9BE, MKTAG('I', '0', '9', 'B') },
+ { AV_PIX_FMT_YUV422P9LE, MKTAG('I', '2', '9', 'L') },
+ { AV_PIX_FMT_YUV422P9BE, MKTAG('I', '2', '9', 'B') },
+ { AV_PIX_FMT_YUV444P9LE, MKTAG('I', '4', '9', 'L') },
+ { AV_PIX_FMT_YUV444P9BE, MKTAG('I', '4', '9', 'B') },
+ { AV_PIX_FMT_YUV420P10LE, MKTAG('I', '0', 'A', 'L') },
+ { AV_PIX_FMT_YUV420P10BE, MKTAG('I', '0', 'A', 'B') },
+ { AV_PIX_FMT_YUV422P10LE, MKTAG('I', '2', 'A', 'L') },
+ { AV_PIX_FMT_YUV422P10BE, MKTAG('I', '2', 'A', 'B') },
+ { AV_PIX_FMT_YUV444P10LE, MKTAG('I', '4', 'A', 'L') },
+ { AV_PIX_FMT_YUV444P10BE, MKTAG('I', '4', 'A', 'B') },
+ { AV_PIX_FMT_YUV420P12LE, MKTAG('I', '0', 'C', 'L') },
+ { AV_PIX_FMT_YUV420P12BE, MKTAG('I', '0', 'C', 'B') },
+ { AV_PIX_FMT_YUV422P12LE, MKTAG('I', '2', 'C', 'L') },
+ { AV_PIX_FMT_YUV422P12BE, MKTAG('I', '2', 'C', 'B') },
+ { AV_PIX_FMT_YUV444P12LE, MKTAG('I', '4', 'C', 'L') },
+ { AV_PIX_FMT_YUV444P12BE, MKTAG('I', '4', 'C', 'B') },
+ { AV_PIX_FMT_YUV420P16LE, MKTAG('I', '0', 'F', 'L') },
+ { AV_PIX_FMT_YUV420P16BE, MKTAG('I', '0', 'F', 'B') },
+ { AV_PIX_FMT_YUV444P16LE, MKTAG('I', '4', 'F', 'L') },
+ { AV_PIX_FMT_YUV444P16BE, MKTAG('I', '4', 'F', 'B') },
+
+ /* special */
+ { AV_PIX_FMT_RGB565LE,MKTAG( 3 , 0 , 0 , 0 ) }, /* flipped RGB565LE */
+ { AV_PIX_FMT_YUV444P, MKTAG('Y', 'V', '2', '4') }, /* YUV444P, swapped UV */
+
+ { AV_PIX_FMT_NONE, 0 },
+};
+
+const struct PixelFormatTag *avpriv_get_raw_pix_fmt_tags(void)
+{
+ return raw_pix_fmt_tags;
+}
+
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat fmt)
+{
+ const PixelFormatTag *tags = raw_pix_fmt_tags;
+ while (tags->pix_fmt >= 0) {
+ if (tags->pix_fmt == fmt)
+ return tags->fourcc;
+ tags++;
+ }
+ return 0;
+}
+
+static const PixelFormatTag pix_fmt_bps_avi[] = {
+ { AV_PIX_FMT_PAL8, 1 },
+ { AV_PIX_FMT_PAL8, 2 },
+ { AV_PIX_FMT_PAL8, 4 },
+ { AV_PIX_FMT_PAL8, 8 },
+ { AV_PIX_FMT_RGB444LE, 12 },
+ { AV_PIX_FMT_RGB555LE, 15 },
+ { AV_PIX_FMT_RGB555LE, 16 },
+ { AV_PIX_FMT_BGR24, 24 },
+ { AV_PIX_FMT_BGRA, 32 },
+ { AV_PIX_FMT_NONE, 0 },
+};
+
+static const PixelFormatTag pix_fmt_bps_mov[] = {
+ { AV_PIX_FMT_PAL8, 1 },
+ { AV_PIX_FMT_PAL8, 2 },
+ { AV_PIX_FMT_PAL8, 4 },
+ { AV_PIX_FMT_PAL8, 8 },
+ { AV_PIX_FMT_RGB555BE, 16 },
+ { AV_PIX_FMT_RGB24, 24 },
+ { AV_PIX_FMT_ARGB, 32 },
+ { AV_PIX_FMT_PAL8, 33 },
+ { AV_PIX_FMT_NONE, 0 },
+};
+
+static enum AVPixelFormat find_pix_fmt(const PixelFormatTag *tags,
+ unsigned int fourcc)
+{
+ while (tags->pix_fmt != AV_PIX_FMT_NONE) {
+ if (tags->fourcc == fourcc)
+ return tags->pix_fmt;
+ tags++;
+ }
+ return AV_PIX_FMT_NONE;
+}
+
+enum AVPixelFormat avpriv_pix_fmt_find(enum PixelFormatTagLists list,
+ unsigned fourcc)
+{
+ const PixelFormatTag *tags;
+
+ switch (list) {
+ case PIX_FMT_LIST_RAW:
+ tags = raw_pix_fmt_tags;
+ break;
+ case PIX_FMT_LIST_AVI:
+ tags = pix_fmt_bps_avi;
+ break;
+ case PIX_FMT_LIST_MOV:
+ tags = pix_fmt_bps_mov;
+ break;
+ }
+ return find_pix_fmt(tags, fourcc);
+}
diff --git a/media/ffvpx/libavcodec/raw.h b/media/ffvpx/libavcodec/raw.h
new file mode 100644
index 0000000000..9a4ddef8fc
--- /dev/null
+++ b/media/ffvpx/libavcodec/raw.h
@@ -0,0 +1,48 @@
+/*
+ * Raw Video Codec
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Raw Video Codec
+ */
+
+#ifndef AVCODEC_RAW_H
+#define AVCODEC_RAW_H
+
+#include "libavutil/pixfmt.h"
+
+typedef struct PixelFormatTag {
+ enum AVPixelFormat pix_fmt;
+ unsigned int fourcc;
+} PixelFormatTag;
+
+const struct PixelFormatTag *avpriv_get_raw_pix_fmt_tags(void);
+
+enum PixelFormatTagLists {
+ PIX_FMT_LIST_RAW,
+ PIX_FMT_LIST_AVI,
+ PIX_FMT_LIST_MOV,
+};
+
+enum AVPixelFormat avpriv_pix_fmt_find(enum PixelFormatTagLists list,
+ unsigned fourcc);
+
+#endif /* AVCODEC_RAW_H */
diff --git a/media/ffvpx/libavcodec/rdft.c b/media/ffvpx/libavcodec/rdft.c
new file mode 100644
index 0000000000..ac6f5d6781
--- /dev/null
+++ b/media/ffvpx/libavcodec/rdft.c
@@ -0,0 +1,120 @@
+/*
+ * (I)RDFT transforms
+ * Copyright (c) 2009 Alex Converse <alex dot converse at gmail dot com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <stdlib.h>
+#include <math.h>
+#include "libavutil/error.h"
+#include "libavutil/mathematics.h"
+#include "rdft.h"
+
+/**
+ * @file
+ * (Inverse) Real Discrete Fourier Transforms.
+ */
+
+/** Map one real FFT into two parallel real even and odd FFTs. Then interleave
+ * the two real FFTs into one complex FFT. Unmangle the results.
+ * ref: http://www.engineeringproductivitytools.com/stuff/T0001/PT10.HTM
+ */
+static void rdft_calc_c(RDFTContext *s, FFTSample *data)
+{
+ int i, i1, i2;
+ FFTComplex ev, od, odsum;
+ const int n = 1 << s->nbits;
+ const float k1 = 0.5;
+ const float k2 = 0.5 - s->inverse;
+ const FFTSample *tcos = s->tcos;
+ const FFTSample *tsin = s->tsin;
+
+ if (!s->inverse) {
+ s->fft.fft_permute(&s->fft, (FFTComplex*)data);
+ s->fft.fft_calc(&s->fft, (FFTComplex*)data);
+ }
+ /* i=0 is a special case because of packing, the DC term is real, so we
+ are going to throw the N/2 term (also real) in with it. */
+ ev.re = data[0];
+ data[0] = ev.re+data[1];
+ data[1] = ev.re-data[1];
+
+#define RDFT_UNMANGLE(sign0, sign1) \
+ for (i = 1; i < (n>>2); i++) { \
+ i1 = 2*i; \
+ i2 = n-i1; \
+ /* Separate even and odd FFTs */ \
+ ev.re = k1*(data[i1 ]+data[i2 ]); \
+ od.im = k2*(data[i2 ]-data[i1 ]); \
+ ev.im = k1*(data[i1+1]-data[i2+1]); \
+ od.re = k2*(data[i1+1]+data[i2+1]); \
+ /* Apply twiddle factors to the odd FFT and add to the even FFT */ \
+ odsum.re = od.re*tcos[i] sign0 od.im*tsin[i]; \
+ odsum.im = od.im*tcos[i] sign1 od.re*tsin[i]; \
+ data[i1 ] = ev.re + odsum.re; \
+ data[i1+1] = ev.im + odsum.im; \
+ data[i2 ] = ev.re - odsum.re; \
+ data[i2+1] = odsum.im - ev.im; \
+ }
+
+ if (s->negative_sin) {
+ RDFT_UNMANGLE(+,-)
+ } else {
+ RDFT_UNMANGLE(-,+)
+ }
+
+ data[2*i+1]=s->sign_convention*data[2*i+1];
+ if (s->inverse) {
+ data[0] *= k1;
+ data[1] *= k1;
+ s->fft.fft_permute(&s->fft, (FFTComplex*)data);
+ s->fft.fft_calc(&s->fft, (FFTComplex*)data);
+ }
+}
+
+av_cold int ff_rdft_init(RDFTContext *s, int nbits, enum RDFTransformType trans)
+{
+ int n = 1 << nbits;
+ int ret;
+
+ s->nbits = nbits;
+ s->inverse = trans == IDFT_C2R || trans == DFT_C2R;
+ s->sign_convention = trans == IDFT_R2C || trans == DFT_C2R ? 1 : -1;
+ s->negative_sin = trans == DFT_C2R || trans == DFT_R2C;
+
+ if (nbits < 4 || nbits > 16)
+ return AVERROR(EINVAL);
+
+ if ((ret = ff_fft_init(&s->fft, nbits-1, trans == IDFT_C2R || trans == IDFT_R2C)) < 0)
+ return ret;
+
+ ff_init_ff_cos_tabs(nbits);
+ s->tcos = ff_cos_tabs[nbits];
+ s->tsin = ff_cos_tabs[nbits] + (n >> 2);
+ s->rdft_calc = rdft_calc_c;
+
+#if ARCH_ARM
+ ff_rdft_init_arm(s);
+#endif
+
+ return 0;
+}
+
+av_cold void ff_rdft_end(RDFTContext *s)
+{
+ ff_fft_end(&s->fft);
+}
diff --git a/media/ffvpx/libavcodec/rdft.h b/media/ffvpx/libavcodec/rdft.h
new file mode 100644
index 0000000000..ffafca7f24
--- /dev/null
+++ b/media/ffvpx/libavcodec/rdft.h
@@ -0,0 +1,52 @@
+/*
+ * (I)RDFT transforms
+ * Copyright (c) 2009 Alex Converse <alex dot converse at gmail dot com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#if !defined(AVCODEC_RDFT_H) && (!defined(FFT_FLOAT) || FFT_FLOAT)
+#define AVCODEC_RDFT_H
+
+#include "config.h"
+#include "fft.h"
+
+struct RDFTContext {
+ int nbits;
+ int inverse;
+ int sign_convention;
+
+ /* pre/post rotation tables */
+ const FFTSample *tcos;
+ const FFTSample *tsin;
+ int negative_sin;
+ FFTContext fft;
+ void (*rdft_calc)(struct RDFTContext *s, FFTSample *z);
+};
+
+/**
+ * Set up a real FFT.
+ * @param nbits log2 of the length of the input array
+ * @param trans the type of transform
+ */
+int ff_rdft_init(RDFTContext *s, int nbits, enum RDFTransformType trans);
+void ff_rdft_end(RDFTContext *s);
+
+void ff_rdft_init_arm(RDFTContext *s);
+
+
+#endif /* AVCODEC_RDFT_H */
diff --git a/media/ffvpx/libavcodec/rectangle.h b/media/ffvpx/libavcodec/rectangle.h
new file mode 100644
index 0000000000..df7c18a4e2
--- /dev/null
+++ b/media/ffvpx/libavcodec/rectangle.h
@@ -0,0 +1,124 @@
+/*
+ * rectangle filling function
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * useful rectangle filling function
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVCODEC_RECTANGLE_H
+#define AVCODEC_RECTANGLE_H
+
+#include "config.h"
+#include "libavutil/common.h"
+#include "libavutil/avassert.h"
+
+/**
+ * fill a rectangle.
+ * @param h height of the rectangle, should be a constant
+ * @param w width of the rectangle, should be a constant
+ * @param size the size of val (1, 2 or 4), should be a constant
+ */
+static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){
+ uint8_t *p= (uint8_t*)vp;
+ av_assert2(size==1 || size==2 || size==4);
+ av_assert2(w<=4);
+
+ w *= size;
+ stride *= size;
+
+ av_assert2((((long)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0);
+ av_assert2((stride&(w-1))==0);
+ if(w==2){
+ const uint16_t v= size==4 ? val : val*0x0101;
+ *(uint16_t*)(p + 0*stride)= v;
+ if(h==1) return;
+ *(uint16_t*)(p + 1*stride)= v;
+ if(h==2) return;
+ *(uint16_t*)(p + 2*stride)= v;
+ *(uint16_t*)(p + 3*stride)= v;
+ }else if(w==4){
+ const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101;
+ *(uint32_t*)(p + 0*stride)= v;
+ if(h==1) return;
+ *(uint32_t*)(p + 1*stride)= v;
+ if(h==2) return;
+ *(uint32_t*)(p + 2*stride)= v;
+ *(uint32_t*)(p + 3*stride)= v;
+ }else if(w==8){
+ // gcc cannot optimize 64-bit math on x86_32
+#if HAVE_FAST_64BIT
+ const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL;
+ *(uint64_t*)(p + 0*stride)= v;
+ if(h==1) return;
+ *(uint64_t*)(p + 1*stride)= v;
+ if(h==2) return;
+ *(uint64_t*)(p + 2*stride)= v;
+ *(uint64_t*)(p + 3*stride)= v;
+ }else if(w==16){
+ const uint64_t v= val*0x0100000001ULL;
+ *(uint64_t*)(p + 0+0*stride)= v;
+ *(uint64_t*)(p + 8+0*stride)= v;
+ *(uint64_t*)(p + 0+1*stride)= v;
+ *(uint64_t*)(p + 8+1*stride)= v;
+ if(h==2) return;
+ *(uint64_t*)(p + 0+2*stride)= v;
+ *(uint64_t*)(p + 8+2*stride)= v;
+ *(uint64_t*)(p + 0+3*stride)= v;
+ *(uint64_t*)(p + 8+3*stride)= v;
+#else
+ const uint32_t v= size==2 ? val*0x00010001 : val;
+ *(uint32_t*)(p + 0+0*stride)= v;
+ *(uint32_t*)(p + 4+0*stride)= v;
+ if(h==1) return;
+ *(uint32_t*)(p + 0+1*stride)= v;
+ *(uint32_t*)(p + 4+1*stride)= v;
+ if(h==2) return;
+ *(uint32_t*)(p + 0+2*stride)= v;
+ *(uint32_t*)(p + 4+2*stride)= v;
+ *(uint32_t*)(p + 0+3*stride)= v;
+ *(uint32_t*)(p + 4+3*stride)= v;
+ }else if(w==16){
+ *(uint32_t*)(p + 0+0*stride)= val;
+ *(uint32_t*)(p + 4+0*stride)= val;
+ *(uint32_t*)(p + 8+0*stride)= val;
+ *(uint32_t*)(p +12+0*stride)= val;
+ *(uint32_t*)(p + 0+1*stride)= val;
+ *(uint32_t*)(p + 4+1*stride)= val;
+ *(uint32_t*)(p + 8+1*stride)= val;
+ *(uint32_t*)(p +12+1*stride)= val;
+ if(h==2) return;
+ *(uint32_t*)(p + 0+2*stride)= val;
+ *(uint32_t*)(p + 4+2*stride)= val;
+ *(uint32_t*)(p + 8+2*stride)= val;
+ *(uint32_t*)(p +12+2*stride)= val;
+ *(uint32_t*)(p + 0+3*stride)= val;
+ *(uint32_t*)(p + 4+3*stride)= val;
+ *(uint32_t*)(p + 8+3*stride)= val;
+ *(uint32_t*)(p +12+3*stride)= val;
+#endif
+ }else
+ av_assert2(0);
+ av_assert2(h==4);
+}
+
+#endif /* AVCODEC_RECTANGLE_H */
diff --git a/media/ffvpx/libavcodec/reverse.c b/media/ffvpx/libavcodec/reverse.c
new file mode 100644
index 0000000000..440badaf34
--- /dev/null
+++ b/media/ffvpx/libavcodec/reverse.c
@@ -0,0 +1 @@
+#include "libavutil/reverse.c"
diff --git a/media/ffvpx/libavcodec/rl.h b/media/ffvpx/libavcodec/rl.h
new file mode 100644
index 0000000000..4380fda272
--- /dev/null
+++ b/media/ffvpx/libavcodec/rl.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2000-2002 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rl header.
+ */
+
+#ifndef AVCODEC_RL_H
+#define AVCODEC_RL_H
+
+#include <stdint.h>
+
+#include "vlc.h"
+
+/* run length table */
+#define MAX_RUN 64
+#define MAX_LEVEL 64
+
+/** RLTable. */
+typedef struct RLTable {
+ int n; ///< number of entries of table_vlc minus 1
+ int last; ///< number of values for last = 0
+ const uint16_t (*table_vlc)[2];
+ const int8_t *table_run;
+ const int8_t *table_level;
+ uint8_t *index_run[2]; ///< encoding only
+ int8_t *max_level[2]; ///< encoding & decoding
+ int8_t *max_run[2]; ///< encoding & decoding
+ RL_VLC_ELEM *rl_vlc[32]; ///< decoding only
+} RLTable;
+
+/**
+ * Initialize max_level and index_run from table_run and table_level;
+ * this is equivalent to initializing RLTable.max_level[0] and
+ * RLTable.index_run[0] with ff_rl_init().
+ */
+void ff_rl_init_level_run(uint8_t max_level[MAX_LEVEL + 1],
+ uint8_t index_run[MAX_RUN + 1],
+ const uint8_t table_run[/* n */],
+ const uint8_t table_level[/* n*/], int n);
+
+/**
+ * Initialize index_run, max_level and max_run from n, last, table_vlc,
+ * table_run and table_level.
+ * @param static_store static uint8_t array[2][2*MAX_RUN + MAX_LEVEL + 3]
+ * to hold the level and run tables.
+ * @note This function does not touch rl_vlc at all, hence there is no need
+ * to synchronize calls to ff_rl_init() and ff_rl_init_vlc() using the
+ * same RLTable.
+ */
+void ff_rl_init(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]);
+
+/**
+ * Initialize rl_vlc from n, last, table_vlc, table_run and table_level.
+ * All rl_vlc pointers to be initialized must already point to a static
+ * buffer of `static_size` RL_VLC_ELEM elements; if a pointer is NULL,
+ * initializing further VLCs stops.
+ * @note This function does not touch what ff_rl_init() initializes at all,
+ * hence there is no need to synchronize calls to ff_rl_init() and
+ * ff_rl_init_vlc() using the same RLTable.
+ */
+void ff_rl_init_vlc(RLTable *rl, unsigned static_size);
+
+#define INIT_VLC_RL(rl, static_size)\
+{\
+ static RL_VLC_ELEM rl_vlc_table[32][static_size];\
+\
+ for (int q = 0; q < 32; q++) \
+ rl.rl_vlc[q] = rl_vlc_table[q]; \
+\
+ ff_rl_init_vlc(&rl, static_size); \
+}
+
+#define INIT_FIRST_VLC_RL(rl, static_size) \
+do { \
+ static RL_VLC_ELEM rl_vlc_table[static_size]; \
+ \
+ rl.rl_vlc[0] = rl_vlc_table; \
+ ff_rl_init_vlc(&rl, static_size); \
+} while (0)
+
+static inline int get_rl_index(const RLTable *rl, int last, int run, int level)
+{
+ int index;
+ index = rl->index_run[last][run];
+ if (index >= rl->n)
+ return rl->n;
+ if (level > rl->max_level[last][run])
+ return rl->n;
+ return index + level - 1;
+}
+
+#endif /* AVCODEC_RL_H */
diff --git a/media/ffvpx/libavcodec/rnd_avg.h b/media/ffvpx/libavcodec/rnd_avg.h
new file mode 100644
index 0000000000..344775e31f
--- /dev/null
+++ b/media/ffvpx/libavcodec/rnd_avg.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
+ * Copyright (c) 2011 Oskar Arvidsson
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_RND_AVG_H
+#define AVCODEC_RND_AVG_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define BYTE_VEC32(c) ((c) * 0x01010101UL)
+#define BYTE_VEC64(c) ((c) * 0x0001000100010001UL)
+
+static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
+{
+ return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
+}
+
+static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
+{
+ return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
+}
+
+static inline uint64_t rnd_avg64(uint64_t a, uint64_t b)
+{
+ return (a | b) - (((a ^ b) & ~BYTE_VEC64(0x01)) >> 1);
+}
+
+static inline uint64_t no_rnd_avg64(uint64_t a, uint64_t b)
+{
+ return (a & b) + (((a ^ b) & ~BYTE_VEC64(0x01)) >> 1);
+}
+
+#endif /* AVCODEC_RND_AVG_H */
diff --git a/media/ffvpx/libavcodec/simple_idct.c b/media/ffvpx/libavcodec/simple_idct.c
new file mode 100644
index 0000000000..eb13cff146
--- /dev/null
+++ b/media/ffvpx/libavcodec/simple_idct.c
@@ -0,0 +1,269 @@
+/*
+ * Simple IDCT
+ *
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simpleidct in C.
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "mathops.h"
+#include "simple_idct.h"
+
+#define IN_IDCT_DEPTH 16
+
+#define BIT_DEPTH 8
+#include "simple_idct_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 10
+#include "simple_idct_template.c"
+
+#define EXTRA_SHIFT 2
+#include "simple_idct_template.c"
+
+#undef EXTRA_SHIFT
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 12
+#include "simple_idct_template.c"
+#undef BIT_DEPTH
+#undef IN_IDCT_DEPTH
+
+#define IN_IDCT_DEPTH 32
+#define BIT_DEPTH 10
+#include "simple_idct_template.c"
+#undef BIT_DEPTH
+#undef IN_IDCT_DEPTH
+
+/* 2x4x8 idct */
+
+#define CN_SHIFT 12
+#define C_FIX(x) ((int)((x) * (1 << CN_SHIFT) + 0.5))
+#define C1 C_FIX(0.6532814824)
+#define C2 C_FIX(0.2705980501)
+
+/* row idct is multiple by 16 * sqrt(2.0), col idct4 is normalized,
+ and the butterfly must be multiplied by 0.5 * sqrt(2.0) */
+#define C_SHIFT (4+1+12)
+
+static inline void idct4col_put(uint8_t *dest, ptrdiff_t line_size, const int16_t *col)
+{
+ int c0, c1, c2, c3, a0, a1, a2, a3;
+
+ a0 = col[8*0];
+ a1 = col[8*2];
+ a2 = col[8*4];
+ a3 = col[8*6];
+ c0 = ((a0 + a2) * (1 << CN_SHIFT - 1)) + (1 << (C_SHIFT - 1));
+ c2 = ((a0 - a2) * (1 << CN_SHIFT - 1)) + (1 << (C_SHIFT - 1));
+ c1 = a1 * C1 + a3 * C2;
+ c3 = a1 * C2 - a3 * C1;
+ dest[0] = av_clip_uint8((c0 + c1) >> C_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_uint8((c2 + c3) >> C_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_uint8((c2 - c3) >> C_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_uint8((c0 - c1) >> C_SHIFT);
+}
+
+#define BF(k) \
+{\
+ int a0, a1;\
+ a0 = ptr[k];\
+ a1 = ptr[8 + k];\
+ ptr[k] = a0 + a1;\
+ ptr[8 + k] = a0 - a1;\
+}
+
+/* only used by DV codec. The input must be interlaced. 128 is added
+ to the pixels before clamping to avoid systematic error
+ (1024*sqrt(2)) offset would be needed otherwise. */
+/* XXX: I think a 1.0/sqrt(2) normalization should be needed to
+ compensate the extra butterfly stage - I don't have the full DV
+ specification */
+void ff_simple_idct248_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ int i;
+ int16_t *ptr;
+
+ /* butterfly */
+ ptr = block;
+ for(i=0;i<4;i++) {
+ BF(0);
+ BF(1);
+ BF(2);
+ BF(3);
+ BF(4);
+ BF(5);
+ BF(6);
+ BF(7);
+ ptr += 2 * 8;
+ }
+
+ /* IDCT8 on each line */
+ for(i=0; i<8; i++) {
+ idctRowCondDC_int16_8bit(block + i*8, 0);
+ }
+
+ /* IDCT4 and store */
+ for(i=0;i<8;i++) {
+ idct4col_put(dest + i, 2 * line_size, block + i);
+ idct4col_put(dest + line_size + i, 2 * line_size, block + 8 + i);
+ }
+}
+
+/* 8x4 & 4x8 WMV2 IDCT */
+#undef CN_SHIFT
+#undef C_SHIFT
+#undef C_FIX
+#undef C1
+#undef C2
+#define CN_SHIFT 12
+#define C_FIX(x) ((int)((x) * M_SQRT2 * (1 << CN_SHIFT) + 0.5))
+#define C1 C_FIX(0.6532814824)
+#define C2 C_FIX(0.2705980501)
+#define C3 C_FIX(0.5)
+#define C_SHIFT (4+1+12)
+static inline void idct4col_add(uint8_t *dest, ptrdiff_t line_size, const int16_t *col)
+{
+ int c0, c1, c2, c3, a0, a1, a2, a3;
+
+ a0 = col[8*0];
+ a1 = col[8*1];
+ a2 = col[8*2];
+ a3 = col[8*3];
+ c0 = (a0 + a2)*C3 + (1 << (C_SHIFT - 1));
+ c2 = (a0 - a2)*C3 + (1 << (C_SHIFT - 1));
+ c1 = a1 * C1 + a3 * C2;
+ c3 = a1 * C2 - a3 * C1;
+ dest[0] = av_clip_uint8(dest[0] + ((c0 + c1) >> C_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_uint8(dest[0] + ((c2 + c3) >> C_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_uint8(dest[0] + ((c2 - c3) >> C_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_uint8(dest[0] + ((c0 - c1) >> C_SHIFT));
+}
+
+#define RN_SHIFT 15
+#define R_FIX(x) ((int)((x) * M_SQRT2 * (1 << RN_SHIFT) + 0.5))
+#define R1 R_FIX(0.6532814824)
+#define R2 R_FIX(0.2705980501)
+#define R3 R_FIX(0.5)
+#define R_SHIFT 11
+static inline void idct4row(int16_t *row)
+{
+ unsigned c0, c1, c2, c3;
+ int a0, a1, a2, a3;
+
+ a0 = row[0];
+ a1 = row[1];
+ a2 = row[2];
+ a3 = row[3];
+ c0 = (a0 + a2)*R3 + (1 << (R_SHIFT - 1));
+ c2 = (a0 - a2)*R3 + (1 << (R_SHIFT - 1));
+ c1 = a1 * R1 + a3 * R2;
+ c3 = a1 * R2 - a3 * R1;
+ row[0]= (c0 + c1) >> R_SHIFT;
+ row[1]= (c2 + c3) >> R_SHIFT;
+ row[2]= (c2 - c3) >> R_SHIFT;
+ row[3]= (c0 - c1) >> R_SHIFT;
+}
+
+void ff_simple_idct84_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ int i;
+
+ /* IDCT8 on each line */
+ for(i=0; i<4; i++) {
+ idctRowCondDC_int16_8bit(block + i*8, 0);
+ }
+
+ /* IDCT4 and store */
+ for(i=0;i<8;i++) {
+ idct4col_add(dest + i, line_size, block + i);
+ }
+}
+
+void ff_simple_idct48_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ int i;
+
+ /* IDCT4 on each line */
+ for(i=0; i<8; i++) {
+ idct4row(block + i*8);
+ }
+
+ /* IDCT8 and store */
+ for(i=0; i<4; i++){
+ idctSparseColAdd_int16_8bit(dest + i, line_size, block + i);
+ }
+}
+
+void ff_simple_idct44_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
+{
+ int i;
+
+ /* IDCT4 on each line */
+ for(i=0; i<4; i++) {
+ idct4row(block + i*8);
+ }
+
+ /* IDCT4 and store */
+ for(i=0; i<4; i++){
+ idct4col_add(dest + i, line_size, block + i);
+ }
+}
+
+void ff_prores_idct_10(int16_t *block, const int16_t *qmat)
+{
+ int i;
+
+ for (i = 0; i < 64; i++)
+ block[i] *= qmat[i];
+
+ for (i = 0; i < 8; i++)
+ idctRowCondDC_extrashift_10(block + i*8, 2);
+
+ for (i = 0; i < 8; i++) {
+ block[i] += 8192;
+ idctSparseCol_extrashift_10(block + i);
+ }
+}
+
+void ff_prores_idct_12(int16_t *block, const int16_t *qmat)
+{
+ int i;
+
+ for (i = 0; i < 64; i++)
+ block[i] *= qmat[i];
+
+ for (i = 0; i < 8; i++)
+ idctRowCondDC_int16_12bit(block + i*8, 0);
+
+ for (i = 0; i < 8; i++) {
+ block[i] += 8192;
+ idctSparseCol_int16_12bit(block + i);
+ }
+}
diff --git a/media/ffvpx/libavcodec/simple_idct.h b/media/ffvpx/libavcodec/simple_idct.h
new file mode 100644
index 0000000000..20578b3347
--- /dev/null
+++ b/media/ffvpx/libavcodec/simple_idct.h
@@ -0,0 +1,64 @@
+/*
+ * Simple IDCT
+ *
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple idct header.
+ */
+
+#ifndef AVCODEC_SIMPLE_IDCT_H
+#define AVCODEC_SIMPLE_IDCT_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+void ff_simple_idct_put_int16_8bit(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_add_int16_8bit(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_int16_8bit(int16_t *block);
+
+void ff_simple_idct_put_int16_10bit(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_add_int16_10bit(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_int16_10bit(int16_t *block);
+
+void ff_simple_idct_put_int32_10bit(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_add_int32_10bit(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_int32_10bit(int16_t *block);
+
+void ff_simple_idct_put_int16_12bit(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_add_int16_12bit(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_int16_12bit(int16_t *block);
+
+/**
+ * Special version of ff_simple_idct_int16_10bit() which does dequantization
+ * and scales by a factor of 2 more between the two IDCTs to account
+ * for larger scale of input coefficients.
+ */
+void ff_prores_idct_10(int16_t *block, const int16_t *qmat);
+void ff_prores_idct_12(int16_t *block, const int16_t *qmat);
+
+void ff_simple_idct248_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+void ff_simple_idct84_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct48_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct44_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+#endif /* AVCODEC_SIMPLE_IDCT_H */
diff --git a/media/ffvpx/libavcodec/simple_idct_template.c b/media/ffvpx/libavcodec/simple_idct_template.c
new file mode 100644
index 0000000000..5ddd0b45a2
--- /dev/null
+++ b/media/ffvpx/libavcodec/simple_idct_template.c
@@ -0,0 +1,371 @@
+/*
+ * Simple IDCT
+ *
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simpleidct in C.
+ */
+
+/* Based upon some commented-out C code from mpeg2dec (idct_mmx.c
+ * written by Aaron Holtzman <aholtzma@ess.engr.uvic.ca>). */
+
+#include "simple_idct.h"
+
+#include "bit_depth_template.c"
+
+#undef W1
+#undef W2
+#undef W3
+#undef W4
+#undef W5
+#undef W6
+#undef W7
+#undef ROW_SHIFT
+#undef COL_SHIFT
+#undef DC_SHIFT
+#undef MUL
+#undef MAC
+
+#if BIT_DEPTH == 8
+
+#define W1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#define W7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+
+#define ROW_SHIFT 11
+#define COL_SHIFT 20
+#define DC_SHIFT 3
+
+#define MUL(a, b) MUL16(a, b)
+#define MAC(a, b, c) MAC16(a, b, c)
+
+#elif BIT_DEPTH == 10 || BIT_DEPTH == 12
+
+# if BIT_DEPTH == 10
+#define W1 22725 // 90901
+#define W2 21407 // 85627
+#define W3 19265 // 77062
+#define W4 16384 // 65535
+#define W5 12873 // 51491
+#define W6 8867 // 35468
+#define W7 4520 // 18081
+
+# ifdef EXTRA_SHIFT
+#define ROW_SHIFT 13
+#define COL_SHIFT 18
+#define DC_SHIFT 1
+# elif IN_IDCT_DEPTH == 32
+#define ROW_SHIFT 13
+#define COL_SHIFT 21
+#define DC_SHIFT 2
+# else
+#define ROW_SHIFT 12
+#define COL_SHIFT 19
+#define DC_SHIFT 2
+# endif
+
+# else
+#define W1 45451
+#define W2 42813
+#define W3 38531
+#define W4 32767
+#define W5 25746
+#define W6 17734
+#define W7 9041
+
+#define ROW_SHIFT 16
+#define COL_SHIFT 17
+#define DC_SHIFT -1
+# endif
+
+#define MUL(a, b) ((int)((SUINT)(a) * (b)))
+#define MAC(a, b, c) ((a) += (SUINT)(b) * (c))
+
+#else
+
+#error "Unsupported bitdepth"
+
+#endif
+
+#ifdef EXTRA_SHIFT
+static inline void FUNC(idctRowCondDC_extrashift)(int16_t *row, int extra_shift)
+#else
+static inline void FUNC6(idctRowCondDC)(idctin *row, int extra_shift)
+#endif
+{
+ SUINT a0, a1, a2, a3, b0, b1, b2, b3;
+
+// TODO: Add DC-only support for int32_t input
+#if IN_IDCT_DEPTH == 16
+#if HAVE_FAST_64BIT
+#define ROW0_MASK (0xffffULL << 48 * HAVE_BIGENDIAN)
+ if (((AV_RN64A(row) & ~ROW0_MASK) | AV_RN64A(row+4)) == 0) {
+ uint64_t temp;
+ if (DC_SHIFT - extra_shift >= 0) {
+ temp = (row[0] * (1 << (DC_SHIFT - extra_shift))) & 0xffff;
+ } else {
+ temp = ((row[0] + (1<<(extra_shift - DC_SHIFT-1))) >> (extra_shift - DC_SHIFT)) & 0xffff;
+ }
+ temp += temp * (1 << 16);
+ temp += temp * ((uint64_t) 1 << 32);
+ AV_WN64A(row, temp);
+ AV_WN64A(row + 4, temp);
+ return;
+ }
+#else
+ if (!(AV_RN32A(row+2) |
+ AV_RN32A(row+4) |
+ AV_RN32A(row+6) |
+ row[1])) {
+ uint32_t temp;
+ if (DC_SHIFT - extra_shift >= 0) {
+ temp = (row[0] * (1 << (DC_SHIFT - extra_shift))) & 0xffff;
+ } else {
+ temp = ((row[0] + (1<<(extra_shift - DC_SHIFT-1))) >> (extra_shift - DC_SHIFT)) & 0xffff;
+ }
+ temp += temp * (1 << 16);
+ AV_WN32A(row, temp);
+ AV_WN32A(row+2, temp);
+ AV_WN32A(row+4, temp);
+ AV_WN32A(row+6, temp);
+ return;
+ }
+#endif
+#endif
+
+ a0 = ((SUINT)W4 * row[0]) + (1 << (ROW_SHIFT + extra_shift - 1));
+ a1 = a0;
+ a2 = a0;
+ a3 = a0;
+
+ a0 += (SUINT)W2 * row[2];
+ a1 += (SUINT)W6 * row[2];
+ a2 -= (SUINT)W6 * row[2];
+ a3 -= (SUINT)W2 * row[2];
+
+ b0 = MUL(W1, row[1]);
+ MAC(b0, W3, row[3]);
+ b1 = MUL(W3, row[1]);
+ MAC(b1, -W7, row[3]);
+ b2 = MUL(W5, row[1]);
+ MAC(b2, -W1, row[3]);
+ b3 = MUL(W7, row[1]);
+ MAC(b3, -W5, row[3]);
+
+#if IN_IDCT_DEPTH == 32
+ if (AV_RN64A(row + 4) | AV_RN64A(row + 6)) {
+#else
+ if (AV_RN64A(row + 4)) {
+#endif
+ a0 += (SUINT) W4*row[4] + (SUINT)W6*row[6];
+ a1 += (SUINT)- W4*row[4] - (SUINT)W2*row[6];
+ a2 += (SUINT)- W4*row[4] + (SUINT)W2*row[6];
+ a3 += (SUINT) W4*row[4] - (SUINT)W6*row[6];
+
+ MAC(b0, W5, row[5]);
+ MAC(b0, W7, row[7]);
+
+ MAC(b1, -W1, row[5]);
+ MAC(b1, -W5, row[7]);
+
+ MAC(b2, W7, row[5]);
+ MAC(b2, W3, row[7]);
+
+ MAC(b3, W3, row[5]);
+ MAC(b3, -W1, row[7]);
+ }
+
+ row[0] = (int)(a0 + b0) >> (ROW_SHIFT + extra_shift);
+ row[7] = (int)(a0 - b0) >> (ROW_SHIFT + extra_shift);
+ row[1] = (int)(a1 + b1) >> (ROW_SHIFT + extra_shift);
+ row[6] = (int)(a1 - b1) >> (ROW_SHIFT + extra_shift);
+ row[2] = (int)(a2 + b2) >> (ROW_SHIFT + extra_shift);
+ row[5] = (int)(a2 - b2) >> (ROW_SHIFT + extra_shift);
+ row[3] = (int)(a3 + b3) >> (ROW_SHIFT + extra_shift);
+ row[4] = (int)(a3 - b3) >> (ROW_SHIFT + extra_shift);
+}
+
+#define IDCT_COLS do { \
+ a0 = (SUINT)W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4)); \
+ a1 = a0; \
+ a2 = a0; \
+ a3 = a0; \
+ \
+ a0 += (SUINT) W2*col[8*2]; \
+ a1 += (SUINT) W6*col[8*2]; \
+ a2 += (SUINT)-W6*col[8*2]; \
+ a3 += (SUINT)-W2*col[8*2]; \
+ \
+ b0 = MUL(W1, col[8*1]); \
+ b1 = MUL(W3, col[8*1]); \
+ b2 = MUL(W5, col[8*1]); \
+ b3 = MUL(W7, col[8*1]); \
+ \
+ MAC(b0, W3, col[8*3]); \
+ MAC(b1, -W7, col[8*3]); \
+ MAC(b2, -W1, col[8*3]); \
+ MAC(b3, -W5, col[8*3]); \
+ \
+ if (col[8*4]) { \
+ a0 += (SUINT) W4*col[8*4]; \
+ a1 += (SUINT)-W4*col[8*4]; \
+ a2 += (SUINT)-W4*col[8*4]; \
+ a3 += (SUINT) W4*col[8*4]; \
+ } \
+ \
+ if (col[8*5]) { \
+ MAC(b0, W5, col[8*5]); \
+ MAC(b1, -W1, col[8*5]); \
+ MAC(b2, W7, col[8*5]); \
+ MAC(b3, W3, col[8*5]); \
+ } \
+ \
+ if (col[8*6]) { \
+ a0 += (SUINT) W6*col[8*6]; \
+ a1 += (SUINT)-W2*col[8*6]; \
+ a2 += (SUINT) W2*col[8*6]; \
+ a3 += (SUINT)-W6*col[8*6]; \
+ } \
+ \
+ if (col[8*7]) { \
+ MAC(b0, W7, col[8*7]); \
+ MAC(b1, -W5, col[8*7]); \
+ MAC(b2, W3, col[8*7]); \
+ MAC(b3, -W1, col[8*7]); \
+ } \
+ } while (0)
+
+#ifdef EXTRA_SHIFT
+static inline void FUNC(idctSparseCol_extrashift)(int16_t *col)
+#else
+static inline void FUNC6(idctSparseColPut)(pixel *dest, ptrdiff_t line_size,
+ idctin *col)
+{
+ SUINT a0, a1, a2, a3, b0, b1, b2, b3;
+
+ IDCT_COLS;
+
+ dest[0] = av_clip_pixel((int)(a0 + b0) >> COL_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_pixel((int)(a1 + b1) >> COL_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_pixel((int)(a2 + b2) >> COL_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_pixel((int)(a3 + b3) >> COL_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_pixel((int)(a3 - b3) >> COL_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_pixel((int)(a2 - b2) >> COL_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_pixel((int)(a1 - b1) >> COL_SHIFT);
+ dest += line_size;
+ dest[0] = av_clip_pixel((int)(a0 - b0) >> COL_SHIFT);
+}
+
+static inline void FUNC6(idctSparseColAdd)(pixel *dest, ptrdiff_t line_size,
+ idctin *col)
+{
+ unsigned a0, a1, a2, a3, b0, b1, b2, b3;
+
+ IDCT_COLS;
+
+ dest[0] = av_clip_pixel(dest[0] + ((int)(a0 + b0) >> COL_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_pixel(dest[0] + ((int)(a1 + b1) >> COL_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_pixel(dest[0] + ((int)(a2 + b2) >> COL_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_pixel(dest[0] + ((int)(a3 + b3) >> COL_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_pixel(dest[0] + ((int)(a3 - b3) >> COL_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_pixel(dest[0] + ((int)(a2 - b2) >> COL_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_pixel(dest[0] + ((int)(a1 - b1) >> COL_SHIFT));
+ dest += line_size;
+ dest[0] = av_clip_pixel(dest[0] + ((int)(a0 - b0) >> COL_SHIFT));
+}
+
+static inline void FUNC6(idctSparseCol)(idctin *col)
+#endif
+{
+ unsigned a0, a1, a2, a3, b0, b1, b2, b3;
+
+ IDCT_COLS;
+
+ col[0 ] = ((int)(a0 + b0) >> COL_SHIFT);
+ col[8 ] = ((int)(a1 + b1) >> COL_SHIFT);
+ col[16] = ((int)(a2 + b2) >> COL_SHIFT);
+ col[24] = ((int)(a3 + b3) >> COL_SHIFT);
+ col[32] = ((int)(a3 - b3) >> COL_SHIFT);
+ col[40] = ((int)(a2 - b2) >> COL_SHIFT);
+ col[48] = ((int)(a1 - b1) >> COL_SHIFT);
+ col[56] = ((int)(a0 - b0) >> COL_SHIFT);
+}
+
+#ifndef EXTRA_SHIFT
+void FUNC6(ff_simple_idct_put)(uint8_t *dest_, ptrdiff_t line_size, int16_t *block_)
+{
+ idctin *block = (idctin *)block_;
+ pixel *dest = (pixel *)dest_;
+ int i;
+
+ line_size /= sizeof(pixel);
+
+ for (i = 0; i < 8; i++)
+ FUNC6(idctRowCondDC)(block + i*8, 0);
+
+ for (i = 0; i < 8; i++)
+ FUNC6(idctSparseColPut)(dest + i, line_size, block + i);
+}
+
+#if IN_IDCT_DEPTH == 16
+void FUNC6(ff_simple_idct_add)(uint8_t *dest_, ptrdiff_t line_size, int16_t *block)
+{
+ pixel *dest = (pixel *)dest_;
+ int i;
+
+ line_size /= sizeof(pixel);
+
+ for (i = 0; i < 8; i++)
+ FUNC6(idctRowCondDC)(block + i*8, 0);
+
+ for (i = 0; i < 8; i++)
+ FUNC6(idctSparseColAdd)(dest + i, line_size, block + i);
+}
+
+void FUNC6(ff_simple_idct)(int16_t *block)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ FUNC6(idctRowCondDC)(block + i*8, 0);
+
+ for (i = 0; i < 8; i++)
+ FUNC6(idctSparseCol)(block + i);
+}
+#endif
+#endif
diff --git a/media/ffvpx/libavcodec/startcode.h b/media/ffvpx/libavcodec/startcode.h
new file mode 100644
index 0000000000..8b75832aaf
--- /dev/null
+++ b/media/ffvpx/libavcodec/startcode.h
@@ -0,0 +1,36 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Accelerated start code search function for start codes common to
+ * MPEG-1/2/4 video, VC-1, H.264/5
+ */
+
+#ifndef AVCODEC_STARTCODE_H
+#define AVCODEC_STARTCODE_H
+
+#include <stdint.h>
+
+const uint8_t *avpriv_find_start_code(const uint8_t *p,
+ const uint8_t *end,
+ uint32_t *state);
+
+int ff_startcode_find_candidate_c(const uint8_t *buf, int size);
+
+#endif /* AVCODEC_STARTCODE_H */
diff --git a/media/ffvpx/libavcodec/thread.h b/media/ffvpx/libavcodec/thread.h
new file mode 100644
index 0000000000..88a14cfeb1
--- /dev/null
+++ b/media/ffvpx/libavcodec/thread.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2008 Alexander Strange <astrange@ithinksw.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Multithreading support functions
+ * @author Alexander Strange <astrange@ithinksw.com>
+ */
+
+#ifndef AVCODEC_THREAD_H
+#define AVCODEC_THREAD_H
+
+#include "libavutil/buffer.h"
+
+#include "avcodec.h"
+
+/**
+ * Wait for decoding threads to finish and reset internal state.
+ * Called by avcodec_flush_buffers().
+ *
+ * @param avctx The context.
+ */
+void ff_thread_flush(AVCodecContext *avctx);
+
+/**
+ * Submit a new frame to a decoding thread.
+ * Returns the next available frame in picture. *got_picture_ptr
+ * will be 0 if none is available.
+ * The return value on success is the size of the consumed packet for
+ * compatibility with FFCodec.decode. This means the decoder
+ * has to consume the full packet.
+ *
+ * Parameters are the same as FFCodec.decode.
+ */
+int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr, AVPacket *avpkt);
+
+/**
+ * If the codec defines update_thread_context(), call this
+ * when they are ready for the next thread to start decoding
+ * the next frame. After calling it, do not change any variables
+ * read by the update_thread_context() method, or call ff_thread_get_buffer().
+ *
+ * @param avctx The context.
+ */
+void ff_thread_finish_setup(AVCodecContext *avctx);
+
+#define ff_thread_get_format ff_get_format
+
+/**
+ * Wrapper around get_buffer() for frame-multithreaded codecs.
+ * Call this function instead of ff_get_buffer(f).
+ * Cannot be called after the codec has called ff_thread_finish_setup().
+ *
+ * @param avctx The current context.
+ * @param f The frame to write into.
+ */
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags);
+
+/**
+ * Wrapper around release_buffer() frame-for multithreaded codecs.
+ * Call this function instead of avctx->release_buffer(f).
+ * The AVFrame will be copied and the actual release_buffer() call
+ * will be performed later. The contents of data pointed to by the
+ * AVFrame should not be changed until ff_thread_get_buffer() is called
+ * on it.
+ *
+ * @param avctx The current context.
+ * @param f The picture being released.
+ */
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f);
+
+int ff_thread_init(AVCodecContext *s);
+int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx,
+ int (*action_func2)(AVCodecContext *c, void *arg, int jobnr, int threadnr),
+ int (*main_func)(AVCodecContext *c), void *arg, int *ret, int job_count);
+void ff_thread_free(AVCodecContext *s);
+int ff_slice_thread_allocz_entries(AVCodecContext *avctx, int count);
+int ff_slice_thread_init_progress(AVCodecContext *avctx);
+void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n);
+void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift);
+
+#endif /* AVCODEC_THREAD_H */
diff --git a/media/ffvpx/libavcodec/threadframe.h b/media/ffvpx/libavcodec/threadframe.h
new file mode 100644
index 0000000000..d2f93c5cd0
--- /dev/null
+++ b/media/ffvpx/libavcodec/threadframe.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2022 Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_THREADFRAME_H
+#define AVCODEC_THREADFRAME_H
+
+#include "libavutil/frame.h"
+#include "avcodec.h"
+
+typedef struct ThreadFrame {
+ AVFrame *f;
+ AVCodecContext *owner[2];
+ // progress->data is an array of 2 ints holding progress for top/bottom
+ // fields
+ AVBufferRef *progress;
+} ThreadFrame;
+
+/**
+ * Notify later decoding threads when part of their reference picture is ready.
+ * Call this when some part of the picture is finished decoding.
+ * Later calls with lower values of progress have no effect.
+ *
+ * @param f The picture being decoded.
+ * @param progress Value, in arbitrary units, of how much of the picture has decoded.
+ * @param field The field being decoded, for field-picture codecs.
+ * 0 for top field or frame pictures, 1 for bottom field.
+ */
+void ff_thread_report_progress(ThreadFrame *f, int progress, int field);
+
+/**
+ * Wait for earlier decoding threads to finish reference pictures.
+ * Call this before accessing some part of a picture, with a given
+ * value for progress, and it will return after the responsible decoding
+ * thread calls ff_thread_report_progress() with the same or
+ * higher value for progress.
+ *
+ * @param f The picture being referenced.
+ * @param progress Value, in arbitrary units, to wait for.
+ * @param field The field being referenced, for field-picture codecs.
+ * 0 for top field or frame pictures, 1 for bottom field.
+ */
+void ff_thread_await_progress(const ThreadFrame *f, int progress, int field);
+
+/**
+ * Wrapper around ff_get_buffer() for frame-multithreaded codecs.
+ * Call this function instead of ff_get_buffer() if you might need
+ * to wait for progress on this frame.
+ * Cannot be called after the codec has called ff_thread_finish_setup().
+ *
+ * @param avctx The current context.
+ * @param f The frame to write into.
+ * @note: It is fine to call this with codecs that do not support
+ * frame threading.
+ */
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
+
+/**
+ * Unref a ThreadFrame.
+ *
+ * This is basically a wrapper around av_frame_unref() and should
+ * be called instead of it.
+ *
+ * @param avctx The current context.
+ * @param f The picture being released.
+ */
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
+
+int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
+
+int ff_thread_can_start_frame(AVCodecContext *avctx);
+
+#endif
diff --git a/media/ffvpx/libavcodec/unary.h b/media/ffvpx/libavcodec/unary.h
new file mode 100644
index 0000000000..d57f9f70c5
--- /dev/null
+++ b/media/ffvpx/libavcodec/unary.h
@@ -0,0 +1,69 @@
+/*
+ * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_UNARY_H
+#define AVCODEC_UNARY_H
+
+#include "get_bits.h"
+
+/**
+ * Get unary code of limited length
+ * @param gb GetBitContext
+ * @param[in] stop The bitstop value (unary code of 1's or 0's)
+ * @param[in] len Maximum length
+ * @return unary 0 based code index. This is also the length in bits of the
+ * code excluding the stop bit.
+ * (in case len=1)
+ * 1 0
+ * 0 1
+ * (in case len=2)
+ * 1 0
+ * 01 1
+ * 00 2
+ * (in case len=3)
+ * 1 0
+ * 01 1
+ * 001 2
+ * 000 3
+ */
+static inline int get_unary(GetBitContext *gb, int stop, int len)
+{
+ int i;
+
+ for(i = 0; i < len && get_bits1(gb) != stop; i++);
+ return i;
+}
+
+/**
+ * Get unary code terminated by a 0 with a maximum length of 33
+ * @param gb GetBitContext
+ * @return Unary length/index
+ */
+static inline int get_unary_0_33(GetBitContext *gb)
+{
+ return get_unary(gb, 0, 33);
+}
+
+static inline int get_unary_0_9(GetBitContext *gb)
+{
+ return get_unary(gb, 0, 9);
+}
+
+#endif /* AVCODEC_UNARY_H */
diff --git a/media/ffvpx/libavcodec/utils.c b/media/ffvpx/libavcodec/utils.c
new file mode 100644
index 0000000000..599da21dba
--- /dev/null
+++ b/media/ffvpx/libavcodec/utils.c
@@ -0,0 +1,1170 @@
+/*
+ * utils for libavcodec
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * utils.
+ */
+
+#include "config.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/mem.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixfmt.h"
+#include "avcodec.h"
+#include "codec.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "hwconfig.h"
+#include "thread.h"
+#include "threadframe.h"
+#include "internal.h"
+#include "put_bits.h"
+#include "startcode.h"
+#include <stdlib.h>
+#include <limits.h>
+
+void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
+{
+ uint8_t **p = ptr;
+ if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
+ av_freep(p);
+ *size = 0;
+ return;
+ }
+ av_fast_mallocz(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (*p)
+ memset(*p + min_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+}
+
+void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
+{
+ uint8_t **p = ptr;
+ if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
+ av_freep(p);
+ *size = 0;
+ return;
+ }
+ av_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (*p)
+ memset(*p, 0, min_size + AV_INPUT_BUFFER_PADDING_SIZE);
+}
+
+int av_codec_is_encoder(const AVCodec *avcodec)
+{
+ const FFCodec *const codec = ffcodec(avcodec);
+ return codec && (codec->cb_type == FF_CODEC_CB_TYPE_ENCODE ||
+ codec->cb_type == FF_CODEC_CB_TYPE_ENCODE_SUB ||
+ codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_PACKET);
+}
+
+int av_codec_is_decoder(const AVCodec *avcodec)
+{
+ const FFCodec *const codec = ffcodec(avcodec);
+ return codec && (codec->cb_type == FF_CODEC_CB_TYPE_DECODE ||
+ codec->cb_type == FF_CODEC_CB_TYPE_DECODE_SUB ||
+ codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME);
+}
+
+int ff_set_dimensions(AVCodecContext *s, int width, int height)
+{
+ int ret = av_image_check_size2(width, height, s->max_pixels, AV_PIX_FMT_NONE, 0, s);
+
+ if (ret < 0)
+ width = height = 0;
+
+ s->coded_width = width;
+ s->coded_height = height;
+ s->width = AV_CEIL_RSHIFT(width, s->lowres);
+ s->height = AV_CEIL_RSHIFT(height, s->lowres);
+
+ return ret;
+}
+
+int ff_set_sar(AVCodecContext *avctx, AVRational sar)
+{
+ int ret = av_image_check_sar(avctx->width, avctx->height, sar);
+
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %d/%d\n",
+ sar.num, sar.den);
+ avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
+ return ret;
+ } else {
+ avctx->sample_aspect_ratio = sar;
+ }
+ return 0;
+}
+
+int ff_side_data_update_matrix_encoding(AVFrame *frame,
+ enum AVMatrixEncoding matrix_encoding)
+{
+ AVFrameSideData *side_data;
+ enum AVMatrixEncoding *data;
+
+ side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_MATRIXENCODING);
+ if (!side_data)
+ side_data = av_frame_new_side_data(frame, AV_FRAME_DATA_MATRIXENCODING,
+ sizeof(enum AVMatrixEncoding));
+
+ if (!side_data)
+ return AVERROR(ENOMEM);
+
+ data = (enum AVMatrixEncoding*)side_data->data;
+ *data = matrix_encoding;
+
+ return 0;
+}
+
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+ int linesize_align[AV_NUM_DATA_POINTERS])
+{
+ int i;
+ int w_align = 1;
+ int h_align = 1;
+ AVPixFmtDescriptor const *desc = av_pix_fmt_desc_get(s->pix_fmt);
+
+ if (desc) {
+ w_align = 1 << desc->log2_chroma_w;
+ h_align = 1 << desc->log2_chroma_h;
+ }
+
+ switch (s->pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+ case AV_PIX_FMT_YUYV422:
+ case AV_PIX_FMT_YVYU422:
+ case AV_PIX_FMT_UYVY422:
+ case AV_PIX_FMT_YUV422P:
+ case AV_PIX_FMT_YUV440P:
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_GBRP:
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GRAY8:
+ case AV_PIX_FMT_GRAY16BE:
+ case AV_PIX_FMT_GRAY16LE:
+ case AV_PIX_FMT_YUVJ420P:
+ case AV_PIX_FMT_YUVJ422P:
+ case AV_PIX_FMT_YUVJ440P:
+ case AV_PIX_FMT_YUVJ444P:
+ case AV_PIX_FMT_YUVA420P:
+ case AV_PIX_FMT_YUVA422P:
+ case AV_PIX_FMT_YUVA444P:
+ case AV_PIX_FMT_YUV420P9LE:
+ case AV_PIX_FMT_YUV420P9BE:
+ case AV_PIX_FMT_YUV420P10LE:
+ case AV_PIX_FMT_YUV420P10BE:
+ case AV_PIX_FMT_YUV420P12LE:
+ case AV_PIX_FMT_YUV420P12BE:
+ case AV_PIX_FMT_YUV420P14LE:
+ case AV_PIX_FMT_YUV420P14BE:
+ case AV_PIX_FMT_YUV420P16LE:
+ case AV_PIX_FMT_YUV420P16BE:
+ case AV_PIX_FMT_YUVA420P9LE:
+ case AV_PIX_FMT_YUVA420P9BE:
+ case AV_PIX_FMT_YUVA420P10LE:
+ case AV_PIX_FMT_YUVA420P10BE:
+ case AV_PIX_FMT_YUVA420P16LE:
+ case AV_PIX_FMT_YUVA420P16BE:
+ case AV_PIX_FMT_YUV422P9LE:
+ case AV_PIX_FMT_YUV422P9BE:
+ case AV_PIX_FMT_YUV422P10LE:
+ case AV_PIX_FMT_YUV422P10BE:
+ case AV_PIX_FMT_YUV422P12LE:
+ case AV_PIX_FMT_YUV422P12BE:
+ case AV_PIX_FMT_YUV422P14LE:
+ case AV_PIX_FMT_YUV422P14BE:
+ case AV_PIX_FMT_YUV422P16LE:
+ case AV_PIX_FMT_YUV422P16BE:
+ case AV_PIX_FMT_YUVA422P9LE:
+ case AV_PIX_FMT_YUVA422P9BE:
+ case AV_PIX_FMT_YUVA422P10LE:
+ case AV_PIX_FMT_YUVA422P10BE:
+ case AV_PIX_FMT_YUVA422P12LE:
+ case AV_PIX_FMT_YUVA422P12BE:
+ case AV_PIX_FMT_YUVA422P16LE:
+ case AV_PIX_FMT_YUVA422P16BE:
+ case AV_PIX_FMT_YUV440P10LE:
+ case AV_PIX_FMT_YUV440P10BE:
+ case AV_PIX_FMT_YUV440P12LE:
+ case AV_PIX_FMT_YUV440P12BE:
+ case AV_PIX_FMT_YUV444P9LE:
+ case AV_PIX_FMT_YUV444P9BE:
+ case AV_PIX_FMT_YUV444P10LE:
+ case AV_PIX_FMT_YUV444P10BE:
+ case AV_PIX_FMT_YUV444P12LE:
+ case AV_PIX_FMT_YUV444P12BE:
+ case AV_PIX_FMT_YUV444P14LE:
+ case AV_PIX_FMT_YUV444P14BE:
+ case AV_PIX_FMT_YUV444P16LE:
+ case AV_PIX_FMT_YUV444P16BE:
+ case AV_PIX_FMT_YUVA444P9LE:
+ case AV_PIX_FMT_YUVA444P9BE:
+ case AV_PIX_FMT_YUVA444P10LE:
+ case AV_PIX_FMT_YUVA444P10BE:
+ case AV_PIX_FMT_YUVA444P12LE:
+ case AV_PIX_FMT_YUVA444P12BE:
+ case AV_PIX_FMT_YUVA444P16LE:
+ case AV_PIX_FMT_YUVA444P16BE:
+ case AV_PIX_FMT_GBRP9LE:
+ case AV_PIX_FMT_GBRP9BE:
+ case AV_PIX_FMT_GBRP10LE:
+ case AV_PIX_FMT_GBRP10BE:
+ case AV_PIX_FMT_GBRP12LE:
+ case AV_PIX_FMT_GBRP12BE:
+ case AV_PIX_FMT_GBRP14LE:
+ case AV_PIX_FMT_GBRP14BE:
+ case AV_PIX_FMT_GBRP16LE:
+ case AV_PIX_FMT_GBRP16BE:
+ case AV_PIX_FMT_GBRAP12LE:
+ case AV_PIX_FMT_GBRAP12BE:
+ case AV_PIX_FMT_GBRAP16LE:
+ case AV_PIX_FMT_GBRAP16BE:
+ w_align = 16; //FIXME assume 16 pixel per macroblock
+ h_align = 16 * 2; // interlaced needs 2 macroblocks height
+ if (s->codec_id == AV_CODEC_ID_BINKVIDEO)
+ w_align = 16*2;
+ break;
+ case AV_PIX_FMT_YUV411P:
+ case AV_PIX_FMT_YUVJ411P:
+ case AV_PIX_FMT_UYYVYY411:
+ w_align = 32;
+ h_align = 16 * 2;
+ break;
+ case AV_PIX_FMT_YUV410P:
+ if (s->codec_id == AV_CODEC_ID_SVQ1) {
+ w_align = 64;
+ h_align = 64;
+ }
+ break;
+ case AV_PIX_FMT_RGB555:
+ if (s->codec_id == AV_CODEC_ID_RPZA) {
+ w_align = 4;
+ h_align = 4;
+ }
+ if (s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) {
+ w_align = 8;
+ h_align = 8;
+ }
+ break;
+ case AV_PIX_FMT_PAL8:
+ case AV_PIX_FMT_BGR8:
+ case AV_PIX_FMT_RGB8:
+ if (s->codec_id == AV_CODEC_ID_SMC ||
+ s->codec_id == AV_CODEC_ID_CINEPAK) {
+ w_align = 4;
+ h_align = 4;
+ }
+ if (s->codec_id == AV_CODEC_ID_JV ||
+ s->codec_id == AV_CODEC_ID_ARGO ||
+ s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) {
+ w_align = 8;
+ h_align = 8;
+ }
+ if (s->codec_id == AV_CODEC_ID_MJPEG ||
+ s->codec_id == AV_CODEC_ID_MJPEGB ||
+ s->codec_id == AV_CODEC_ID_LJPEG ||
+ s->codec_id == AV_CODEC_ID_SMVJPEG ||
+ s->codec_id == AV_CODEC_ID_AMV ||
+ s->codec_id == AV_CODEC_ID_SP5X ||
+ s->codec_id == AV_CODEC_ID_JPEGLS) {
+ w_align = 8;
+ h_align = 2*8;
+ }
+ break;
+ case AV_PIX_FMT_BGR24:
+ if ((s->codec_id == AV_CODEC_ID_MSZH) ||
+ (s->codec_id == AV_CODEC_ID_ZLIB)) {
+ w_align = 4;
+ h_align = 4;
+ }
+ break;
+ case AV_PIX_FMT_RGB24:
+ if (s->codec_id == AV_CODEC_ID_CINEPAK) {
+ w_align = 4;
+ h_align = 4;
+ }
+ break;
+ case AV_PIX_FMT_BGR0:
+ if (s->codec_id == AV_CODEC_ID_ARGO) {
+ w_align = 8;
+ h_align = 8;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (s->codec_id == AV_CODEC_ID_IFF_ILBM) {
+ w_align = FFMAX(w_align, 8);
+ }
+
+ *width = FFALIGN(*width, w_align);
+ *height = FFALIGN(*height, h_align);
+ if (s->codec_id == AV_CODEC_ID_H264 || s->lowres ||
+ s->codec_id == AV_CODEC_ID_VC1 || s->codec_id == AV_CODEC_ID_WMV3 ||
+ s->codec_id == AV_CODEC_ID_VP5 || s->codec_id == AV_CODEC_ID_VP6 ||
+ s->codec_id == AV_CODEC_ID_VP6F || s->codec_id == AV_CODEC_ID_VP6A
+ ) {
+ // some of the optimized chroma MC reads one line too much
+ // which is also done in mpeg decoders with lowres > 0
+ *height += 2;
+
+ // H.264 uses edge emulation for out of frame motion vectors, for this
+ // it requires a temporary area large enough to hold a 21x21 block,
+ // increasing witdth ensure that the temporary area is large enough,
+ // the next rounded up width is 32
+ *width = FFMAX(*width, 32);
+ }
+ if (s->codec_id == AV_CODEC_ID_SVQ3) {
+ *width = FFMAX(*width, 32);
+ }
+
+ for (i = 0; i < 4; i++)
+ linesize_align[i] = STRIDE_ALIGN;
+}
+
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt);
+ int chroma_shift = desc->log2_chroma_w;
+ int linesize_align[AV_NUM_DATA_POINTERS];
+ int align;
+
+ avcodec_align_dimensions2(s, width, height, linesize_align);
+ align = FFMAX(linesize_align[0], linesize_align[3]);
+ linesize_align[1] <<= chroma_shift;
+ linesize_align[2] <<= chroma_shift;
+ align = FFMAX3(align, linesize_align[1], linesize_align[2]);
+ *width = FFALIGN(*width, align);
+}
+#if FF_API_AVCODEC_CHROMA_POS
+int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
+{
+ return av_chroma_location_enum_to_pos(xpos, ypos, pos);
+}
+
+enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos)
+{
+ return av_chroma_location_pos_to_enum(xpos, ypos);
+}
+#endif
+
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t *buf,
+ int buf_size, int align)
+{
+ int ch, planar, needed_size, ret = 0;
+
+ needed_size = av_samples_get_buffer_size(NULL, nb_channels,
+ frame->nb_samples, sample_fmt,
+ align);
+ if (buf_size < needed_size)
+ return AVERROR(EINVAL);
+
+ planar = av_sample_fmt_is_planar(sample_fmt);
+ if (planar && nb_channels > AV_NUM_DATA_POINTERS) {
+ if (!FF_ALLOCZ_TYPED_ARRAY(frame->extended_data, nb_channels))
+ return AVERROR(ENOMEM);
+ } else {
+ frame->extended_data = frame->data;
+ }
+
+ if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0],
+ (uint8_t *)(intptr_t)buf, nb_channels, frame->nb_samples,
+ sample_fmt, align)) < 0) {
+ if (frame->extended_data != frame->data)
+ av_freep(&frame->extended_data);
+ return ret;
+ }
+ if (frame->extended_data != frame->data) {
+ for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++)
+ frame->data[ch] = frame->extended_data[ch];
+ }
+
+ return ret;
+}
+
+void ff_color_frame(AVFrame *frame, const int c[4])
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ int p, y;
+
+ av_assert0(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
+
+ for (p = 0; p<desc->nb_components; p++) {
+ uint8_t *dst = frame->data[p];
+ int is_chroma = p == 1 || p == 2;
+ int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width;
+ int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height;
+ if (desc->comp[0].depth >= 9) {
+ ((uint16_t*)dst)[0] = c[p];
+ av_memcpy_backptr(dst + 2, 2, bytes - 2);
+ dst += frame->linesize[p];
+ for (y = 1; y < height; y++) {
+ memcpy(dst, frame->data[p], 2*bytes);
+ dst += frame->linesize[p];
+ }
+ } else {
+ for (y = 0; y < height; y++) {
+ memset(dst, c[p], bytes);
+ dst += frame->linesize[p];
+ }
+ }
+ }
+}
+
+int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec){
+ return !!(ffcodec(codec)->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM);
+}
+
+const char *avcodec_get_name(enum AVCodecID id)
+{
+ const AVCodecDescriptor *cd;
+ const AVCodec *codec;
+
+ if (id == AV_CODEC_ID_NONE)
+ return "none";
+ cd = avcodec_descriptor_get(id);
+ if (cd)
+ return cd->name;
+ av_log(NULL, AV_LOG_WARNING, "Codec 0x%x is not in the full list.\n", id);
+ codec = avcodec_find_decoder(id);
+ if (codec)
+ return codec->name;
+ codec = avcodec_find_encoder(id);
+ if (codec)
+ return codec->name;
+ return "unknown_codec";
+}
+
+const char *av_get_profile_name(const AVCodec *codec, int profile)
+{
+ const AVProfile *p;
+ if (profile == FF_PROFILE_UNKNOWN || !codec->profiles)
+ return NULL;
+
+ for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++)
+ if (p->profile == profile)
+ return p->name;
+
+ return NULL;
+}
+
+const char *avcodec_profile_name(enum AVCodecID codec_id, int profile)
+{
+ const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
+ const AVProfile *p;
+
+ if (profile == FF_PROFILE_UNKNOWN || !desc || !desc->profiles)
+ return NULL;
+
+ for (p = desc->profiles; p->profile != FF_PROFILE_UNKNOWN; p++)
+ if (p->profile == profile)
+ return p->name;
+
+ return NULL;
+}
+
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
+{
+ switch (codec_id) {
+ case AV_CODEC_ID_DFPWM:
+ return 1;
+ case AV_CODEC_ID_8SVX_EXP:
+ case AV_CODEC_ID_8SVX_FIB:
+ case AV_CODEC_ID_ADPCM_ARGO:
+ case AV_CODEC_ID_ADPCM_CT:
+ case AV_CODEC_ID_ADPCM_IMA_ALP:
+ case AV_CODEC_ID_ADPCM_IMA_AMV:
+ case AV_CODEC_ID_ADPCM_IMA_APC:
+ case AV_CODEC_ID_ADPCM_IMA_APM:
+ case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
+ case AV_CODEC_ID_ADPCM_IMA_OKI:
+ case AV_CODEC_ID_ADPCM_IMA_WS:
+ case AV_CODEC_ID_ADPCM_IMA_SSI:
+ case AV_CODEC_ID_ADPCM_G722:
+ case AV_CODEC_ID_ADPCM_YAMAHA:
+ case AV_CODEC_ID_ADPCM_AICA:
+ return 4;
+ case AV_CODEC_ID_DSD_LSBF:
+ case AV_CODEC_ID_DSD_MSBF:
+ case AV_CODEC_ID_DSD_LSBF_PLANAR:
+ case AV_CODEC_ID_DSD_MSBF_PLANAR:
+ case AV_CODEC_ID_PCM_ALAW:
+ case AV_CODEC_ID_PCM_MULAW:
+ case AV_CODEC_ID_PCM_VIDC:
+ case AV_CODEC_ID_PCM_S8:
+ case AV_CODEC_ID_PCM_S8_PLANAR:
+ case AV_CODEC_ID_PCM_SGA:
+ case AV_CODEC_ID_PCM_U8:
+ case AV_CODEC_ID_SDX2_DPCM:
+ case AV_CODEC_ID_CBD2_DPCM:
+ case AV_CODEC_ID_DERF_DPCM:
+ case AV_CODEC_ID_WADY_DPCM:
+ return 8;
+ case AV_CODEC_ID_PCM_S16BE:
+ case AV_CODEC_ID_PCM_S16BE_PLANAR:
+ case AV_CODEC_ID_PCM_S16LE:
+ case AV_CODEC_ID_PCM_S16LE_PLANAR:
+ case AV_CODEC_ID_PCM_U16BE:
+ case AV_CODEC_ID_PCM_U16LE:
+ return 16;
+ case AV_CODEC_ID_PCM_S24DAUD:
+ case AV_CODEC_ID_PCM_S24BE:
+ case AV_CODEC_ID_PCM_S24LE:
+ case AV_CODEC_ID_PCM_S24LE_PLANAR:
+ case AV_CODEC_ID_PCM_U24BE:
+ case AV_CODEC_ID_PCM_U24LE:
+ return 24;
+ case AV_CODEC_ID_PCM_S32BE:
+ case AV_CODEC_ID_PCM_S32LE:
+ case AV_CODEC_ID_PCM_S32LE_PLANAR:
+ case AV_CODEC_ID_PCM_U32BE:
+ case AV_CODEC_ID_PCM_U32LE:
+ case AV_CODEC_ID_PCM_F32BE:
+ case AV_CODEC_ID_PCM_F32LE:
+ case AV_CODEC_ID_PCM_F24LE:
+ case AV_CODEC_ID_PCM_F16LE:
+ return 32;
+ case AV_CODEC_ID_PCM_F64BE:
+ case AV_CODEC_ID_PCM_F64LE:
+ case AV_CODEC_ID_PCM_S64BE:
+ case AV_CODEC_ID_PCM_S64LE:
+ return 64;
+ default:
+ return 0;
+ }
+}
+
+enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
+{
+ static const enum AVCodecID map[][2] = {
+ [AV_SAMPLE_FMT_U8 ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 },
+ [AV_SAMPLE_FMT_S16 ] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE },
+ [AV_SAMPLE_FMT_S32 ] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE },
+ [AV_SAMPLE_FMT_FLT ] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE },
+ [AV_SAMPLE_FMT_DBL ] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE },
+ [AV_SAMPLE_FMT_U8P ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 },
+ [AV_SAMPLE_FMT_S16P] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE },
+ [AV_SAMPLE_FMT_S32P] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE },
+ [AV_SAMPLE_FMT_S64P] = { AV_CODEC_ID_PCM_S64LE, AV_CODEC_ID_PCM_S64BE },
+ [AV_SAMPLE_FMT_FLTP] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE },
+ [AV_SAMPLE_FMT_DBLP] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE },
+ };
+ if (fmt < 0 || fmt >= FF_ARRAY_ELEMS(map))
+ return AV_CODEC_ID_NONE;
+ if (be < 0 || be > 1)
+ be = AV_NE(1, 0);
+ return map[fmt][be];
+}
+
+int av_get_bits_per_sample(enum AVCodecID codec_id)
+{
+ switch (codec_id) {
+ case AV_CODEC_ID_DFPWM:
+ return 1;
+ case AV_CODEC_ID_ADPCM_SBPRO_2:
+ return 2;
+ case AV_CODEC_ID_ADPCM_SBPRO_3:
+ return 3;
+ case AV_CODEC_ID_ADPCM_SBPRO_4:
+ case AV_CODEC_ID_ADPCM_IMA_WAV:
+ case AV_CODEC_ID_ADPCM_IMA_QT:
+ case AV_CODEC_ID_ADPCM_SWF:
+ case AV_CODEC_ID_ADPCM_MS:
+ return 4;
+ default:
+ return av_get_exact_bits_per_sample(codec_id);
+ }
+}
+
+static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
+ uint32_t tag, int bits_per_coded_sample, int64_t bitrate,
+ uint8_t * extradata, int frame_size, int frame_bytes)
+{
+ int bps = av_get_exact_bits_per_sample(id);
+ int framecount = (ba > 0 && frame_bytes / ba > 0) ? frame_bytes / ba : 1;
+
+ /* codecs with an exact constant bits per sample */
+ if (bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 && bps < 32768)
+ return (frame_bytes * 8LL) / (bps * ch);
+ bps = bits_per_coded_sample;
+
+ /* codecs with a fixed packet duration */
+ switch (id) {
+ case AV_CODEC_ID_ADPCM_ADX: return 32;
+ case AV_CODEC_ID_ADPCM_IMA_QT: return 64;
+ case AV_CODEC_ID_ADPCM_EA_XAS: return 128;
+ case AV_CODEC_ID_AMR_NB:
+ case AV_CODEC_ID_EVRC:
+ case AV_CODEC_ID_GSM:
+ case AV_CODEC_ID_QCELP:
+ case AV_CODEC_ID_RA_288: return 160;
+ case AV_CODEC_ID_AMR_WB:
+ case AV_CODEC_ID_GSM_MS: return 320;
+ case AV_CODEC_ID_MP1: return 384;
+ case AV_CODEC_ID_ATRAC1: return 512;
+ case AV_CODEC_ID_ATRAC9:
+ case AV_CODEC_ID_ATRAC3:
+ if (framecount > INT_MAX/1024)
+ return 0;
+ return 1024 * framecount;
+ case AV_CODEC_ID_ATRAC3P: return 2048;
+ case AV_CODEC_ID_MP2:
+ case AV_CODEC_ID_MUSEPACK7: return 1152;
+ case AV_CODEC_ID_AC3: return 1536;
+ case AV_CODEC_ID_FTR: return 1024;
+ }
+
+ if (sr > 0) {
+ /* calc from sample rate */
+ if (id == AV_CODEC_ID_TTA)
+ return 256 * sr / 245;
+ else if (id == AV_CODEC_ID_DST)
+ return 588 * sr / 44100;
+ else if (id == AV_CODEC_ID_BINKAUDIO_DCT) {
+ if (sr / 22050 > 22)
+ return 0;
+ return (480 << (sr / 22050));
+ }
+
+ if (id == AV_CODEC_ID_MP3)
+ return sr <= 24000 ? 576 : 1152;
+ }
+
+ if (ba > 0) {
+ /* calc from block_align */
+ if (id == AV_CODEC_ID_SIPR) {
+ switch (ba) {
+ case 20: return 160;
+ case 19: return 144;
+ case 29: return 288;
+ case 37: return 480;
+ }
+ } else if (id == AV_CODEC_ID_ILBC) {
+ switch (ba) {
+ case 38: return 160;
+ case 50: return 240;
+ }
+ }
+ }
+
+ if (frame_bytes > 0) {
+ /* calc from frame_bytes only */
+ if (id == AV_CODEC_ID_TRUESPEECH)
+ return 240 * (frame_bytes / 32);
+ if (id == AV_CODEC_ID_NELLYMOSER)
+ return 256 * (frame_bytes / 64);
+ if (id == AV_CODEC_ID_RA_144)
+ return 160 * (frame_bytes / 20);
+ if (id == AV_CODEC_ID_APTX)
+ return 4 * (frame_bytes / 4);
+ if (id == AV_CODEC_ID_APTX_HD)
+ return 4 * (frame_bytes / 6);
+
+ if (bps > 0) {
+ /* calc from frame_bytes and bits_per_coded_sample */
+ if (id == AV_CODEC_ID_ADPCM_G726 || id == AV_CODEC_ID_ADPCM_G726LE)
+ return frame_bytes * 8 / bps;
+ }
+
+ if (ch > 0 && ch < INT_MAX/16) {
+ /* calc from frame_bytes and channels */
+ switch (id) {
+ case AV_CODEC_ID_FASTAUDIO:
+ return frame_bytes / (40 * ch) * 256;
+ case AV_CODEC_ID_ADPCM_IMA_MOFLEX:
+ return (frame_bytes - 4 * ch) / (128 * ch) * 256;
+ case AV_CODEC_ID_ADPCM_AFC:
+ return frame_bytes / (9 * ch) * 16;
+ case AV_CODEC_ID_ADPCM_PSX:
+ case AV_CODEC_ID_ADPCM_DTK:
+ frame_bytes /= 16 * ch;
+ if (frame_bytes > INT_MAX / 28)
+ return 0;
+ return frame_bytes * 28;
+ case AV_CODEC_ID_ADPCM_4XM:
+ case AV_CODEC_ID_ADPCM_IMA_ACORN:
+ case AV_CODEC_ID_ADPCM_IMA_DAT4:
+ case AV_CODEC_ID_ADPCM_IMA_ISS:
+ return (frame_bytes - 4 * ch) * 2 / ch;
+ case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
+ return (frame_bytes - 4) * 2 / ch;
+ case AV_CODEC_ID_ADPCM_IMA_AMV:
+ return (frame_bytes - 8) * 2;
+ case AV_CODEC_ID_ADPCM_THP:
+ case AV_CODEC_ID_ADPCM_THP_LE:
+ if (extradata)
+ return frame_bytes * 14LL / (8 * ch);
+ break;
+ case AV_CODEC_ID_ADPCM_XA:
+ return (frame_bytes / 128) * 224 / ch;
+ case AV_CODEC_ID_INTERPLAY_DPCM:
+ return (frame_bytes - 6 - ch) / ch;
+ case AV_CODEC_ID_ROQ_DPCM:
+ return (frame_bytes - 8) / ch;
+ case AV_CODEC_ID_XAN_DPCM:
+ return (frame_bytes - 2 * ch) / ch;
+ case AV_CODEC_ID_MACE3:
+ return 3 * frame_bytes / ch;
+ case AV_CODEC_ID_MACE6:
+ return 6 * frame_bytes / ch;
+ case AV_CODEC_ID_PCM_LXF:
+ return 2 * (frame_bytes / (5 * ch));
+ case AV_CODEC_ID_IAC:
+ case AV_CODEC_ID_IMC:
+ return 4 * frame_bytes / ch;
+ }
+
+ if (tag) {
+ /* calc from frame_bytes, channels, and codec_tag */
+ if (id == AV_CODEC_ID_SOL_DPCM) {
+ if (tag == 3)
+ return frame_bytes / ch;
+ else
+ return frame_bytes * 2 / ch;
+ }
+ }
+
+ if (ba > 0) {
+ /* calc from frame_bytes, channels, and block_align */
+ int blocks = frame_bytes / ba;
+ int64_t tmp = 0;
+ switch (id) {
+ case AV_CODEC_ID_ADPCM_IMA_WAV:
+ if (bps < 2 || bps > 5)
+ return 0;
+ tmp = blocks * (1LL + (ba - 4 * ch) / (bps * ch) * 8);
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_DK3:
+ tmp = blocks * (((ba - 16LL) * 2 / 3 * 4) / ch);
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_DK4:
+ tmp = blocks * (1 + (ba - 4LL * ch) * 2 / ch);
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_RAD:
+ tmp = blocks * ((ba - 4LL * ch) * 2 / ch);
+ break;
+ case AV_CODEC_ID_ADPCM_MS:
+ tmp = blocks * (2 + (ba - 7LL * ch) * 2LL / ch);
+ break;
+ case AV_CODEC_ID_ADPCM_MTAF:
+ tmp = blocks * (ba - 16LL) * 2 / ch;
+ break;
+ case AV_CODEC_ID_ADPCM_XMD:
+ tmp = blocks * 32;
+ break;
+ }
+ if (tmp) {
+ if (tmp != (int)tmp)
+ return 0;
+ return tmp;
+ }
+ }
+
+ if (bps > 0) {
+ /* calc from frame_bytes, channels, and bits_per_coded_sample */
+ switch (id) {
+ case AV_CODEC_ID_PCM_DVD:
+ if(bps<4 || frame_bytes<3)
+ return 0;
+ return 2 * ((frame_bytes - 3) / ((bps * 2 / 8) * ch));
+ case AV_CODEC_ID_PCM_BLURAY:
+ if(bps<4 || frame_bytes<4)
+ return 0;
+ return (frame_bytes - 4) / ((FFALIGN(ch, 2) * bps) / 8);
+ case AV_CODEC_ID_S302M:
+ return 2 * (frame_bytes / ((bps + 4) / 4)) / ch;
+ }
+ }
+ }
+ }
+
+ /* Fall back on using frame_size */
+ if (frame_size > 1 && frame_bytes)
+ return frame_size;
+
+ //For WMA we currently have no other means to calculate duration thus we
+ //do it here by assuming CBR, which is true for all known cases.
+ if (bitrate > 0 && frame_bytes > 0 && sr > 0 && ba > 1) {
+ if (id == AV_CODEC_ID_WMAV1 || id == AV_CODEC_ID_WMAV2)
+ return (frame_bytes * 8LL * sr) / bitrate;
+ }
+
+ return 0;
+}
+
+int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
+{
+ int channels = avctx->ch_layout.nb_channels;
+ int duration;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!channels)
+ channels = avctx->channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ duration = get_audio_frame_duration(avctx->codec_id, avctx->sample_rate,
+ channels, avctx->block_align,
+ avctx->codec_tag, avctx->bits_per_coded_sample,
+ avctx->bit_rate, avctx->extradata, avctx->frame_size,
+ frame_bytes);
+ return FFMAX(0, duration);
+}
+
+int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes)
+{
+ int channels = par->ch_layout.nb_channels;
+ int duration;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!channels)
+ channels = par->channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ duration = get_audio_frame_duration(par->codec_id, par->sample_rate,
+ channels, par->block_align,
+ par->codec_tag, par->bits_per_coded_sample,
+ par->bit_rate, par->extradata, par->frame_size,
+ frame_bytes);
+ return FFMAX(0, duration);
+}
+
+#if !HAVE_THREADS
+int ff_thread_init(AVCodecContext *s)
+{
+ return -1;
+}
+
+#endif
+
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
+{
+ unsigned int n = 0;
+
+ while (v >= 0xff) {
+ *s++ = 0xff;
+ v -= 0xff;
+ n++;
+ }
+ *s = v;
+ n++;
+ return n;
+}
+
+int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
+{
+ int i;
+ for (i = 0; i < size && !(tab[i][0] == a && tab[i][1] == b); i++) ;
+ return i;
+}
+
+const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *avcodec, int index)
+{
+ const FFCodec *const codec = ffcodec(avcodec);
+ int i;
+ if (!codec->hw_configs || index < 0)
+ return NULL;
+ for (i = 0; i <= index; i++)
+ if (!codec->hw_configs[i])
+ return NULL;
+ return &codec->hw_configs[index]->public;
+}
+
+int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
+{
+ int ret;
+
+ dst->owner[0] = src->owner[0];
+ dst->owner[1] = src->owner[1];
+
+ ret = av_frame_ref(dst->f, src->f);
+ if (ret < 0)
+ return ret;
+
+ av_assert0(!dst->progress);
+
+ if (src->progress &&
+ !(dst->progress = av_buffer_ref(src->progress))) {
+ ff_thread_release_ext_buffer(dst->owner[0], dst);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+#if !HAVE_THREADS
+
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
+{
+ return ff_get_buffer(avctx, f, flags);
+}
+
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+{
+ f->owner[0] = f->owner[1] = avctx;
+ return ff_get_buffer(avctx, f->f, flags);
+}
+
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
+{
+ if (f)
+ av_frame_unref(f);
+}
+
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
+{
+ f->owner[0] = f->owner[1] = NULL;
+ if (f->f)
+ av_frame_unref(f->f);
+}
+
+void ff_thread_finish_setup(AVCodecContext *avctx)
+{
+}
+
+void ff_thread_report_progress(ThreadFrame *f, int progress, int field)
+{
+}
+
+void ff_thread_await_progress(const ThreadFrame *f, int progress, int field)
+{
+}
+
+int ff_thread_can_start_frame(AVCodecContext *avctx)
+{
+ return 1;
+}
+
+int ff_slice_thread_init_progress(AVCodecContext *avctx)
+{
+ return 0;
+}
+
+int ff_slice_thread_allocz_entries(AVCodecContext *avctx, int count)
+{
+ return 0;
+}
+
+void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
+{
+}
+
+void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
+{
+}
+
+#endif
+
+const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p,
+ const uint8_t *end,
+ uint32_t *av_restrict state)
+{
+ int i;
+
+ av_assert0(p <= end);
+ if (p >= end)
+ return end;
+
+ for (i = 0; i < 3; i++) {
+ uint32_t tmp = *state << 8;
+ *state = tmp + *(p++);
+ if (tmp == 0x100 || p == end)
+ return p;
+ }
+
+ while (p < end) {
+ if (p[-1] > 1 ) p += 3;
+ else if (p[-2] ) p += 2;
+ else if (p[-3]|(p[-1]-1)) p++;
+ else {
+ p++;
+ break;
+ }
+ }
+
+ p = FFMIN(p, end) - 4;
+ *state = AV_RB32(p);
+
+ return p + 4;
+}
+
+AVCPBProperties *av_cpb_properties_alloc(size_t *size)
+{
+ AVCPBProperties *props = av_mallocz(sizeof(AVCPBProperties));
+ if (!props)
+ return NULL;
+
+ if (size)
+ *size = sizeof(*props);
+
+ props->vbv_delay = UINT64_MAX;
+
+ return props;
+}
+
+AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx)
+{
+ AVPacketSideData *tmp;
+ AVCPBProperties *props;
+ size_t size;
+ int i;
+
+ for (i = 0; i < avctx->nb_coded_side_data; i++)
+ if (avctx->coded_side_data[i].type == AV_PKT_DATA_CPB_PROPERTIES)
+ return (AVCPBProperties *)avctx->coded_side_data[i].data;
+
+ props = av_cpb_properties_alloc(&size);
+ if (!props)
+ return NULL;
+
+ tmp = av_realloc_array(avctx->coded_side_data, avctx->nb_coded_side_data + 1, sizeof(*tmp));
+ if (!tmp) {
+ av_freep(&props);
+ return NULL;
+ }
+
+ avctx->coded_side_data = tmp;
+ avctx->nb_coded_side_data++;
+
+ avctx->coded_side_data[avctx->nb_coded_side_data - 1].type = AV_PKT_DATA_CPB_PROPERTIES;
+ avctx->coded_side_data[avctx->nb_coded_side_data - 1].data = (uint8_t*)props;
+ avctx->coded_side_data[avctx->nb_coded_side_data - 1].size = size;
+
+ return props;
+}
+
+static unsigned bcd2uint(uint8_t bcd)
+{
+ unsigned low = bcd & 0xf;
+ unsigned high = bcd >> 4;
+ if (low > 9 || high > 9)
+ return 0;
+ return low + 10*high;
+}
+
+int ff_alloc_timecode_sei(const AVFrame *frame, AVRational rate, size_t prefix_len,
+ void **data, size_t *sei_size)
+{
+ AVFrameSideData *sd = NULL;
+ uint8_t *sei_data;
+ PutBitContext pb;
+ uint32_t *tc;
+ int m;
+
+ if (frame)
+ sd = av_frame_get_side_data(frame, AV_FRAME_DATA_S12M_TIMECODE);
+
+ if (!sd) {
+ *data = NULL;
+ return 0;
+ }
+ tc = (uint32_t*)sd->data;
+ m = tc[0] & 3;
+
+ *sei_size = sizeof(uint32_t) * 4;
+ *data = av_mallocz(*sei_size + prefix_len);
+ if (!*data)
+ return AVERROR(ENOMEM);
+ sei_data = (uint8_t*)*data + prefix_len;
+
+ init_put_bits(&pb, sei_data, *sei_size);
+ put_bits(&pb, 2, m); // num_clock_ts
+
+ for (int j = 1; j <= m; j++) {
+ uint32_t tcsmpte = tc[j];
+ unsigned hh = bcd2uint(tcsmpte & 0x3f); // 6-bit hours
+ unsigned mm = bcd2uint(tcsmpte>>8 & 0x7f); // 7-bit minutes
+ unsigned ss = bcd2uint(tcsmpte>>16 & 0x7f); // 7-bit seconds
+ unsigned ff = bcd2uint(tcsmpte>>24 & 0x3f); // 6-bit frames
+ unsigned drop = tcsmpte & 1<<30 && !0; // 1-bit drop if not arbitrary bit
+
+ /* Calculate frame number of HEVC by SMPTE ST 12-1:2014 Sec 12.2 if rate > 30FPS */
+ if (av_cmp_q(rate, (AVRational) {30, 1}) == 1) {
+ unsigned pc;
+ ff *= 2;
+ if (av_cmp_q(rate, (AVRational) {50, 1}) == 0)
+ pc = !!(tcsmpte & 1 << 7);
+ else
+ pc = !!(tcsmpte & 1 << 23);
+ ff = (ff + pc) & 0x7f;
+ }
+
+ put_bits(&pb, 1, 1); // clock_timestamp_flag
+ put_bits(&pb, 1, 1); // units_field_based_flag
+ put_bits(&pb, 5, 0); // counting_type
+ put_bits(&pb, 1, 1); // full_timestamp_flag
+ put_bits(&pb, 1, 0); // discontinuity_flag
+ put_bits(&pb, 1, drop);
+ put_bits(&pb, 9, ff);
+ put_bits(&pb, 6, ss);
+ put_bits(&pb, 6, mm);
+ put_bits(&pb, 5, hh);
+ put_bits(&pb, 5, 0);
+ }
+ flush_put_bits(&pb);
+
+ return 0;
+}
+
+int64_t ff_guess_coded_bitrate(AVCodecContext *avctx)
+{
+ AVRational framerate = avctx->framerate;
+ int bits_per_coded_sample = avctx->bits_per_coded_sample;
+ int64_t bitrate;
+
+ if (!(framerate.num && framerate.den))
+ framerate = av_inv_q(avctx->time_base);
+ if (!(framerate.num && framerate.den))
+ return 0;
+
+ if (!bits_per_coded_sample) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
+ bits_per_coded_sample = av_get_bits_per_pixel(desc);
+ }
+ bitrate = (int64_t)bits_per_coded_sample * avctx->width * avctx->height *
+ framerate.num / framerate.den;
+
+ return bitrate;
+}
+
+int ff_int_from_list_or_default(void *ctx, const char * val_name, int val,
+ const int * array_valid_values, int default_value)
+{
+ int i = 0, ref_val;
+
+ while (1) {
+ ref_val = array_valid_values[i];
+ if (ref_val == INT_MAX)
+ break;
+ if (val == ref_val)
+ return val;
+ i++;
+ }
+ /* val is not a valid value */
+ av_log(ctx, AV_LOG_DEBUG,
+ "%s %d are not supported. Set to default value : %d\n", val_name, val, default_value);
+ return default_value;
+}
diff --git a/media/ffvpx/libavcodec/vaapi.h b/media/ffvpx/libavcodec/vaapi.h
new file mode 100644
index 0000000000..2cf7da5889
--- /dev/null
+++ b/media/ffvpx/libavcodec/vaapi.h
@@ -0,0 +1,86 @@
+/*
+ * Video Acceleration API (shared data between FFmpeg and the video player)
+ * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
+ *
+ * Copyright (C) 2008-2009 Splitted-Desktop Systems
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_H
+#define AVCODEC_VAAPI_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vaapi
+ * Public libavcodec VA API header.
+ */
+
+#include <stdint.h>
+#include "libavutil/attributes.h"
+#include "version.h"
+
+#if FF_API_STRUCT_VAAPI_CONTEXT
+
+/**
+ * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
+ * @ingroup lavc_codec_hwaccel
+ * @{
+ */
+
+/**
+ * This structure is used to share data between the FFmpeg library and
+ * the client video application.
+ * This shall be zero-allocated and available as
+ * AVCodecContext.hwaccel_context. All user members can be set once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ *
+ * Deprecated: use AVCodecContext.hw_frames_ctx instead.
+ */
+struct attribute_deprecated vaapi_context {
+ /**
+ * Window system dependent data
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *display;
+
+ /**
+ * Configuration ID
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t config_id;
+
+ /**
+ * Context ID (video decode pipeline)
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t context_id;
+};
+
+/* @} */
+
+#endif /* FF_API_STRUCT_VAAPI_CONTEXT */
+
+#endif /* AVCODEC_VAAPI_H */
diff --git a/media/ffvpx/libavcodec/vaapi_av1.c b/media/ffvpx/libavcodec/vaapi_av1.c
new file mode 100644
index 0000000000..d0339b2705
--- /dev/null
+++ b/media/ffvpx/libavcodec/vaapi_av1.c
@@ -0,0 +1,451 @@
+/*
+ * AV1 HW decode acceleration through VA API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/frame.h"
+#include "libavutil/pixdesc.h"
+#include "hwconfig.h"
+#include "vaapi_decode.h"
+#include "internal.h"
+#include "av1dec.h"
+#include "thread.h"
+
+typedef struct VAAPIAV1FrameRef {
+ AVFrame *frame;
+ int valid;
+} VAAPIAV1FrameRef;
+
+typedef struct VAAPIAV1DecContext {
+ VAAPIDecodeContext base;
+
+ /**
+ * For film grain case, VAAPI generate 2 output for each frame,
+ * current_frame will not apply film grain, and will be used for
+ * references for next frames. Maintain the reference list without
+ * applying film grain here. And current_display_picture will be
+ * used to apply film grain and push to downstream.
+ */
+ VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES];
+ AVFrame *tmp_frame;
+} VAAPIAV1DecContext;
+
+static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
+{
+ if (vf)
+ return ff_vaapi_get_surface_id(vf->f);
+ else
+ return VA_INVALID_SURFACE;
+}
+
+static int8_t vaapi_av1_get_bit_depth_idx(AVCodecContext *avctx)
+{
+ AV1DecContext *s = avctx->priv_data;
+ const AV1RawSequenceHeader *seq = s->raw_seq;
+ int8_t bit_depth = 8;
+
+ if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
+ bit_depth = seq->color_config.twelve_bit ? 12 : 10;
+ else if (seq->seq_profile <= 2)
+ bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
+ else {
+ av_log(avctx, AV_LOG_ERROR,
+ "Couldn't get bit depth from profile:%d.\n", seq->seq_profile);
+ return -1;
+ }
+ return bit_depth == 8 ? 0 : bit_depth == 10 ? 1 : 2;
+}
+
+static int vaapi_av1_decode_init(AVCodecContext *avctx)
+{
+ VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
+
+ ctx->tmp_frame = av_frame_alloc();
+ if (!ctx->tmp_frame) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate frame.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
+ ctx->ref_tab[i].frame = av_frame_alloc();
+ if (!ctx->ref_tab[i].frame) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate reference table frame %d.\n", i);
+ return AVERROR(ENOMEM);
+ }
+ ctx->ref_tab[i].valid = 0;
+ }
+
+ return ff_vaapi_decode_init(avctx);
+}
+
+static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
+{
+ VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
+
+ if (ctx->tmp_frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->tmp_frame);
+ av_frame_free(&ctx->tmp_frame);
+
+ for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
+ if (ctx->ref_tab[i].frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
+ av_frame_free(&ctx->ref_tab[i].frame);
+ }
+
+ return ff_vaapi_decode_uninit(avctx);
+}
+
+
+static int vaapi_av1_start_frame(AVCodecContext *avctx,
+ av_unused const uint8_t *buffer,
+ av_unused uint32_t size)
+{
+ AV1DecContext *s = avctx->priv_data;
+ const AV1RawSequenceHeader *seq = s->raw_seq;
+ const AV1RawFrameHeader *frame_header = s->raw_frame_header;
+ const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
+ VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
+ VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
+ VADecPictureParameterBufferAV1 pic_param;
+ int8_t bit_depth_idx;
+ int err = 0;
+ int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
+ uint8_t remap_lr_type[4] = {AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ};
+ uint8_t segmentation_feature_signed[AV1_SEG_LVL_MAX] = {1, 1, 1, 1, 1, 0, 0, 0};
+ uint8_t segmentation_feature_max[AV1_SEG_LVL_MAX] = {255, AV1_MAX_LOOP_FILTER,
+ AV1_MAX_LOOP_FILTER, AV1_MAX_LOOP_FILTER, AV1_MAX_LOOP_FILTER, 7 , 0 , 0 };
+
+ bit_depth_idx = vaapi_av1_get_bit_depth_idx(avctx);
+ if (bit_depth_idx < 0)
+ goto fail;
+
+ if (apply_grain) {
+ if (ctx->tmp_frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->tmp_frame);
+ err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
+ if (err < 0)
+ goto fail;
+ pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame);
+ } else {
+ pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
+ }
+
+ memset(&pic_param, 0, sizeof(VADecPictureParameterBufferAV1));
+ pic_param = (VADecPictureParameterBufferAV1) {
+ .profile = seq->seq_profile,
+ .order_hint_bits_minus_1 = seq->order_hint_bits_minus_1,
+ .bit_depth_idx = bit_depth_idx,
+ .matrix_coefficients = seq->color_config.matrix_coefficients,
+ .current_frame = pic->output_surface,
+ .current_display_picture = vaapi_av1_surface_id(&s->cur_frame),
+ .frame_width_minus1 = frame_header->frame_width_minus_1,
+ .frame_height_minus1 = frame_header->frame_height_minus_1,
+ .primary_ref_frame = frame_header->primary_ref_frame,
+ .order_hint = frame_header->order_hint,
+ .tile_cols = frame_header->tile_cols,
+ .tile_rows = frame_header->tile_rows,
+ .context_update_tile_id = frame_header->context_update_tile_id,
+ .superres_scale_denominator = frame_header->use_superres ?
+ frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN :
+ AV1_SUPERRES_NUM,
+ .interp_filter = frame_header->interpolation_filter,
+ .filter_level[0] = frame_header->loop_filter_level[0],
+ .filter_level[1] = frame_header->loop_filter_level[1],
+ .filter_level_u = frame_header->loop_filter_level[2],
+ .filter_level_v = frame_header->loop_filter_level[3],
+ .base_qindex = frame_header->base_q_idx,
+ .y_dc_delta_q = frame_header->delta_q_y_dc,
+ .u_dc_delta_q = frame_header->delta_q_u_dc,
+ .u_ac_delta_q = frame_header->delta_q_u_ac,
+ .v_dc_delta_q = frame_header->delta_q_v_dc,
+ .v_ac_delta_q = frame_header->delta_q_v_ac,
+ .cdef_damping_minus_3 = frame_header->cdef_damping_minus_3,
+ .cdef_bits = frame_header->cdef_bits,
+ .seq_info_fields.fields = {
+ .still_picture = seq->still_picture,
+ .use_128x128_superblock = seq->use_128x128_superblock,
+ .enable_filter_intra = seq->enable_filter_intra,
+ .enable_intra_edge_filter = seq->enable_intra_edge_filter,
+ .enable_interintra_compound = seq->enable_interintra_compound,
+ .enable_masked_compound = seq->enable_masked_compound,
+ .enable_dual_filter = seq->enable_dual_filter,
+ .enable_order_hint = seq->enable_order_hint,
+ .enable_jnt_comp = seq->enable_jnt_comp,
+ .enable_cdef = seq->enable_cdef,
+ .mono_chrome = seq->color_config.mono_chrome,
+ .color_range = seq->color_config.color_range,
+ .subsampling_x = seq->color_config.subsampling_x,
+ .subsampling_y = seq->color_config.subsampling_y,
+ .chroma_sample_position = seq->color_config.chroma_sample_position,
+ .film_grain_params_present = seq->film_grain_params_present &&
+ !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN),
+ },
+ .seg_info.segment_info_fields.bits = {
+ .enabled = frame_header->segmentation_enabled,
+ .update_map = frame_header->segmentation_update_map,
+ .temporal_update = frame_header->segmentation_temporal_update,
+ .update_data = frame_header->segmentation_update_data,
+ },
+ .film_grain_info = {
+ .film_grain_info_fields.bits = {
+ .apply_grain = apply_grain,
+ .chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma,
+ .grain_scaling_minus_8 = film_grain->grain_scaling_minus_8,
+ .ar_coeff_lag = film_grain->ar_coeff_lag,
+ .ar_coeff_shift_minus_6 = film_grain->ar_coeff_shift_minus_6,
+ .grain_scale_shift = film_grain->grain_scale_shift,
+ .overlap_flag = film_grain->overlap_flag,
+ .clip_to_restricted_range = film_grain->clip_to_restricted_range,
+ },
+ .grain_seed = film_grain->grain_seed,
+ .num_y_points = film_grain->num_y_points,
+ .num_cb_points = film_grain->num_cb_points,
+ .num_cr_points = film_grain->num_cr_points,
+ .cb_mult = film_grain->cb_mult,
+ .cb_luma_mult = film_grain->cb_luma_mult,
+ .cb_offset = film_grain->cb_offset,
+ .cr_mult = film_grain->cr_mult,
+ .cr_luma_mult = film_grain->cr_luma_mult,
+ .cr_offset = film_grain->cr_offset,
+ },
+ .pic_info_fields.bits = {
+ .frame_type = frame_header->frame_type,
+ .show_frame = frame_header->show_frame,
+ .showable_frame = frame_header->showable_frame,
+ .error_resilient_mode = frame_header->error_resilient_mode,
+ .disable_cdf_update = frame_header->disable_cdf_update,
+ .allow_screen_content_tools = frame_header->allow_screen_content_tools,
+ .force_integer_mv = frame_header->force_integer_mv,
+ .allow_intrabc = frame_header->allow_intrabc,
+ .use_superres = frame_header->use_superres,
+ .allow_high_precision_mv = frame_header->allow_high_precision_mv,
+ .is_motion_mode_switchable = frame_header->is_motion_mode_switchable,
+ .use_ref_frame_mvs = frame_header->use_ref_frame_mvs,
+ .disable_frame_end_update_cdf = frame_header->disable_frame_end_update_cdf,
+ .uniform_tile_spacing_flag = frame_header->uniform_tile_spacing_flag,
+ .allow_warped_motion = frame_header->allow_warped_motion,
+ },
+ .loop_filter_info_fields.bits = {
+ .sharpness_level = frame_header->loop_filter_sharpness,
+ .mode_ref_delta_enabled = frame_header->loop_filter_delta_enabled,
+ .mode_ref_delta_update = frame_header->loop_filter_delta_update,
+ },
+ .mode_control_fields.bits = {
+ .delta_q_present_flag = frame_header->delta_q_present,
+ .log2_delta_q_res = frame_header->delta_q_res,
+ .delta_lf_present_flag = frame_header->delta_lf_present,
+ .log2_delta_lf_res = frame_header->delta_lf_res,
+ .delta_lf_multi = frame_header->delta_lf_multi,
+ .tx_mode = frame_header->tx_mode,
+ .reference_select = frame_header->reference_select,
+ .reduced_tx_set_used = frame_header->reduced_tx_set,
+ .skip_mode_present = frame_header->skip_mode_present,
+ },
+ .loop_restoration_fields.bits = {
+ .yframe_restoration_type = remap_lr_type[frame_header->lr_type[0]],
+ .cbframe_restoration_type = remap_lr_type[frame_header->lr_type[1]],
+ .crframe_restoration_type = remap_lr_type[frame_header->lr_type[2]],
+ .lr_unit_shift = frame_header->lr_unit_shift,
+ .lr_uv_shift = frame_header->lr_uv_shift,
+ },
+ .qmatrix_fields.bits = {
+ .using_qmatrix = frame_header->using_qmatrix,
+ .qm_y = frame_header->qm_y,
+ .qm_u = frame_header->qm_u,
+ .qm_v = frame_header->qm_v,
+ }
+ };
+
+ for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY && frame_header->show_frame)
+ pic_param.ref_frame_map[i] = VA_INVALID_ID;
+ else
+ pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
+ ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) :
+ vaapi_av1_surface_id(&s->ref[i]);
+ }
+ for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
+ pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i];
+ }
+ for (int i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
+ pic_param.ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
+ }
+ for (int i = 0; i < 2; i++) {
+ pic_param.mode_deltas[i] = frame_header->loop_filter_mode_deltas[i];
+ }
+ for (int i = 0; i < (1 << frame_header->cdef_bits); i++) {
+ pic_param.cdef_y_strengths[i] =
+ (frame_header->cdef_y_pri_strength[i] << 2) +
+ frame_header->cdef_y_sec_strength[i];
+ pic_param.cdef_uv_strengths[i] =
+ (frame_header->cdef_uv_pri_strength[i] << 2) +
+ frame_header->cdef_uv_sec_strength[i];
+ }
+ for (int i = 0; i < frame_header->tile_cols; i++) {
+ pic_param.width_in_sbs_minus_1[i] =
+ frame_header->width_in_sbs_minus_1[i];
+ }
+ for (int i = 0; i < frame_header->tile_rows; i++) {
+ pic_param.height_in_sbs_minus_1[i] =
+ frame_header->height_in_sbs_minus_1[i];
+ }
+ for (int i = AV1_REF_FRAME_LAST; i <= AV1_REF_FRAME_ALTREF; i++) {
+ pic_param.wm[i - 1].invalid = s->cur_frame.gm_invalid[i];
+ pic_param.wm[i - 1].wmtype = s->cur_frame.gm_type[i];
+ for (int j = 0; j < 6; j++)
+ pic_param.wm[i - 1].wmmat[j] = s->cur_frame.gm_params[i][j];
+ }
+ for (int i = 0; i < AV1_MAX_SEGMENTS; i++) {
+ for (int j = 0; j < AV1_SEG_LVL_MAX; j++) {
+ pic_param.seg_info.feature_mask[i] |= (frame_header->feature_enabled[i][j] << j);
+ if (segmentation_feature_signed[j])
+ pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
+ -segmentation_feature_max[j], segmentation_feature_max[j]);
+ else
+ pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
+ 0, segmentation_feature_max[j]);
+ }
+ }
+ if (apply_grain) {
+ for (int i = 0; i < film_grain->num_y_points; i++) {
+ pic_param.film_grain_info.point_y_value[i] =
+ film_grain->point_y_value[i];
+ pic_param.film_grain_info.point_y_scaling[i] =
+ film_grain->point_y_scaling[i];
+ }
+ for (int i = 0; i < film_grain->num_cb_points; i++) {
+ pic_param.film_grain_info.point_cb_value[i] =
+ film_grain->point_cb_value[i];
+ pic_param.film_grain_info.point_cb_scaling[i] =
+ film_grain->point_cb_scaling[i];
+ }
+ for (int i = 0; i < film_grain->num_cr_points; i++) {
+ pic_param.film_grain_info.point_cr_value[i] =
+ film_grain->point_cr_value[i];
+ pic_param.film_grain_info.point_cr_scaling[i] =
+ film_grain->point_cr_scaling[i];
+ }
+ for (int i = 0; i < 24; i++) {
+ pic_param.film_grain_info.ar_coeffs_y[i] =
+ film_grain->ar_coeffs_y_plus_128[i] - 128;
+ }
+ for (int i = 0; i < 25; i++) {
+ pic_param.film_grain_info.ar_coeffs_cb[i] =
+ film_grain->ar_coeffs_cb_plus_128[i] - 128;
+ pic_param.film_grain_info.ar_coeffs_cr[i] =
+ film_grain->ar_coeffs_cr_plus_128[i] - 128;
+ }
+ }
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAPictureParameterBufferType,
+ &pic_param, sizeof(pic_param));
+ if (err < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+}
+
+static int vaapi_av1_end_frame(AVCodecContext *avctx)
+{
+ const AV1DecContext *s = avctx->priv_data;
+ const AV1RawFrameHeader *header = s->raw_frame_header;
+ const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
+ VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
+ VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
+
+ int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
+ int ret;
+ ret = ff_vaapi_decode_issue(avctx, pic);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
+ if (header->refresh_frame_flags & (1 << i)) {
+ if (ctx->ref_tab[i].frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
+
+ if (apply_grain) {
+ ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
+ if (ret < 0)
+ return ret;
+ ctx->ref_tab[i].valid = 1;
+ } else {
+ ctx->ref_tab[i].valid = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int vaapi_av1_decode_slice(AVCodecContext *avctx,
+ const uint8_t *buffer,
+ uint32_t size)
+{
+ const AV1DecContext *s = avctx->priv_data;
+ VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
+ VASliceParameterBufferAV1 slice_param;
+ int err = 0;
+
+ for (int i = s->tg_start; i <= s->tg_end; i++) {
+ memset(&slice_param, 0, sizeof(VASliceParameterBufferAV1));
+
+ slice_param = (VASliceParameterBufferAV1) {
+ .slice_data_size = s->tile_group_info[i].tile_size,
+ .slice_data_offset = s->tile_group_info[i].tile_offset,
+ .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
+ .tile_row = s->tile_group_info[i].tile_row,
+ .tile_column = s->tile_group_info[i].tile_column,
+ .tg_start = s->tg_start,
+ .tg_end = s->tg_end,
+ };
+
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &slice_param,
+ sizeof(VASliceParameterBufferAV1),
+ buffer,
+ size);
+ if (err) {
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+const AVHWAccel ff_av1_vaapi_hwaccel = {
+ .name = "av1_vaapi",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AV1,
+ .pix_fmt = AV_PIX_FMT_VAAPI,
+ .start_frame = vaapi_av1_start_frame,
+ .end_frame = vaapi_av1_end_frame,
+ .decode_slice = vaapi_av1_decode_slice,
+ .frame_priv_data_size = sizeof(VAAPIDecodePicture),
+ .init = vaapi_av1_decode_init,
+ .uninit = vaapi_av1_decode_uninit,
+ .frame_params = ff_vaapi_common_frame_params,
+ .priv_data_size = sizeof(VAAPIAV1DecContext),
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
+};
diff --git a/media/ffvpx/libavcodec/vaapi_decode.c b/media/ffvpx/libavcodec/vaapi_decode.c
new file mode 100644
index 0000000000..ab8c12e364
--- /dev/null
+++ b/media/ffvpx/libavcodec/vaapi_decode.c
@@ -0,0 +1,726 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config_components.h"
+
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
+
+#include "avcodec.h"
+#include "decode.h"
+#include "internal.h"
+#include "vaapi_decode.h"
+#include "vaapi_hevc.h"
+
+
+int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic,
+ int type,
+ const void *data,
+ size_t size)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ VABufferID buffer;
+
+ av_assert0(pic->nb_param_buffers + 1 <= MAX_PARAM_BUFFERS);
+
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ type, size, 1, (void*)data, &buffer);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create parameter "
+ "buffer (type %d): %d (%s).\n",
+ type, vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ pic->param_buffers[pic->nb_param_buffers++] = buffer;
+
+ av_log(avctx, AV_LOG_DEBUG, "Param buffer (type %d, %zu bytes) "
+ "is %#x.\n", type, size, buffer);
+ return 0;
+}
+
+
+int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic,
+ const void *params_data,
+ size_t params_size,
+ const void *slice_data,
+ size_t slice_size)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ int index;
+
+ av_assert0(pic->nb_slices <= pic->slices_allocated);
+ if (pic->nb_slices == pic->slices_allocated) {
+ if (pic->slices_allocated > 0)
+ pic->slices_allocated *= 2;
+ else
+ pic->slices_allocated = 64;
+
+ pic->slice_buffers =
+ av_realloc_array(pic->slice_buffers,
+ pic->slices_allocated,
+ 2 * sizeof(*pic->slice_buffers));
+ if (!pic->slice_buffers)
+ return AVERROR(ENOMEM);
+ }
+ av_assert0(pic->nb_slices + 1 <= pic->slices_allocated);
+
+ index = 2 * pic->nb_slices;
+
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ VASliceParameterBufferType,
+ params_size, 1, (void*)params_data,
+ &pic->slice_buffers[index]);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create slice "
+ "parameter buffer: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Slice %d param buffer (%zu bytes) "
+ "is %#x.\n", pic->nb_slices, params_size,
+ pic->slice_buffers[index]);
+
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ VASliceDataBufferType,
+ slice_size, 1, (void*)slice_data,
+ &pic->slice_buffers[index + 1]);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create slice "
+ "data buffer (size %zu): %d (%s).\n",
+ slice_size, vas, vaErrorStr(vas));
+ vaDestroyBuffer(ctx->hwctx->display,
+ pic->slice_buffers[index]);
+ return AVERROR(EIO);
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Slice %d data buffer (%zu bytes) "
+ "is %#x.\n", pic->nb_slices, slice_size,
+ pic->slice_buffers[index + 1]);
+
+ ++pic->nb_slices;
+ return 0;
+}
+
+static void ff_vaapi_decode_destroy_buffers(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ int i;
+
+ for (i = 0; i < pic->nb_param_buffers; i++) {
+ vas = vaDestroyBuffer(ctx->hwctx->display,
+ pic->param_buffers[i]);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to destroy "
+ "parameter buffer %#x: %d (%s).\n",
+ pic->param_buffers[i], vas, vaErrorStr(vas));
+ }
+ }
+
+ for (i = 0; i < 2 * pic->nb_slices; i++) {
+ vas = vaDestroyBuffer(ctx->hwctx->display,
+ pic->slice_buffers[i]);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to destroy slice "
+ "slice buffer %#x: %d (%s).\n",
+ pic->slice_buffers[i], vas, vaErrorStr(vas));
+ }
+ }
+}
+
+int ff_vaapi_decode_issue(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ int err;
+
+ av_log(avctx, AV_LOG_DEBUG, "Decode to surface %#x.\n",
+ pic->output_surface);
+
+ vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context,
+ pic->output_surface);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to begin picture decode "
+ "issue: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_with_picture;
+ }
+
+ vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
+ pic->param_buffers, pic->nb_param_buffers);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to upload decode "
+ "parameters: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_with_picture;
+ }
+
+ vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
+ pic->slice_buffers, 2 * pic->nb_slices);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to upload slices: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_with_picture;
+ }
+
+ vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to end picture decode "
+ "issue: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ if (CONFIG_VAAPI_1 || ctx->hwctx->driver_quirks &
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS)
+ goto fail;
+ else
+ goto fail_at_end;
+ }
+
+ if (CONFIG_VAAPI_1 || ctx->hwctx->driver_quirks &
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS)
+ ff_vaapi_decode_destroy_buffers(avctx, pic);
+
+ err = 0;
+ goto exit;
+
+fail_with_picture:
+ vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to end picture decode "
+ "after error: %d (%s).\n", vas, vaErrorStr(vas));
+ }
+fail:
+ ff_vaapi_decode_destroy_buffers(avctx, pic);
+fail_at_end:
+exit:
+ pic->nb_param_buffers = 0;
+ pic->nb_slices = 0;
+ pic->slices_allocated = 0;
+ av_freep(&pic->slice_buffers);
+
+ return err;
+}
+
+int ff_vaapi_decode_cancel(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic)
+{
+ ff_vaapi_decode_destroy_buffers(avctx, pic);
+
+ pic->nb_param_buffers = 0;
+ pic->nb_slices = 0;
+ pic->slices_allocated = 0;
+ av_freep(&pic->slice_buffers);
+
+ return 0;
+}
+
+static const struct {
+ uint32_t fourcc;
+ enum AVPixelFormat pix_fmt;
+} vaapi_format_map[] = {
+#define MAP(va, av) { VA_FOURCC_ ## va, AV_PIX_FMT_ ## av }
+ // 4:0:0
+ MAP(Y800, GRAY8),
+ // 4:2:0
+ MAP(NV12, NV12),
+ MAP(YV12, YUV420P),
+ MAP(IYUV, YUV420P),
+#ifdef VA_FOURCC_I420
+ MAP(I420, YUV420P),
+#endif
+ MAP(IMC3, YUV420P),
+ // 4:1:1
+ MAP(411P, YUV411P),
+ // 4:2:2
+ MAP(422H, YUV422P),
+#ifdef VA_FOURCC_YV16
+ MAP(YV16, YUV422P),
+#endif
+ MAP(YUY2, YUYV422),
+#ifdef VA_FOURCC_Y210
+ MAP(Y210, Y210),
+#endif
+#ifdef VA_FOURCC_Y212
+ MAP(Y212, Y212),
+#endif
+ // 4:4:0
+ MAP(422V, YUV440P),
+ // 4:4:4
+ MAP(444P, YUV444P),
+#ifdef VA_FOURCC_XYUV
+ MAP(XYUV, VUYX),
+#endif
+#ifdef VA_FOURCC_Y410
+ MAP(Y410, XV30),
+#endif
+#ifdef VA_FOURCC_Y412
+ MAP(Y412, XV36),
+#endif
+ // 4:2:0 10-bit
+#ifdef VA_FOURCC_P010
+ MAP(P010, P010),
+#endif
+#ifdef VA_FOURCC_P012
+ MAP(P012, P012),
+#endif
+#ifdef VA_FOURCC_I010
+ MAP(I010, YUV420P10),
+#endif
+#undef MAP
+};
+
+static int vaapi_decode_find_best_format(AVCodecContext *avctx,
+ AVHWDeviceContext *device,
+ VAConfigID config_id,
+ AVHWFramesContext *frames)
+{
+ AVVAAPIDeviceContext *hwctx = device->hwctx;
+ VAStatus vas;
+ VASurfaceAttrib *attr;
+ enum AVPixelFormat source_format, best_format, format;
+ uint32_t best_fourcc, fourcc;
+ int i, j, nb_attr;
+
+ source_format = avctx->sw_pix_fmt;
+ av_assert0(source_format != AV_PIX_FMT_NONE);
+
+ vas = vaQuerySurfaceAttributes(hwctx->display, config_id,
+ NULL, &nb_attr);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query surface attributes: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(ENOSYS);
+ }
+
+ attr = av_malloc_array(nb_attr, sizeof(*attr));
+ if (!attr)
+ return AVERROR(ENOMEM);
+
+ vas = vaQuerySurfaceAttributes(hwctx->display, config_id,
+ attr, &nb_attr);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query surface attributes: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ av_freep(&attr);
+ return AVERROR(ENOSYS);
+ }
+
+ best_format = AV_PIX_FMT_NONE;
+
+ for (i = 0; i < nb_attr; i++) {
+ if (attr[i].type != VASurfaceAttribPixelFormat)
+ continue;
+
+ fourcc = attr[i].value.value.i;
+ for (j = 0; j < FF_ARRAY_ELEMS(vaapi_format_map); j++) {
+ if (fourcc == vaapi_format_map[j].fourcc)
+ break;
+ }
+ if (j >= FF_ARRAY_ELEMS(vaapi_format_map)) {
+ av_log(avctx, AV_LOG_DEBUG, "Ignoring unknown format %#x.\n",
+ fourcc);
+ continue;
+ }
+ format = vaapi_format_map[j].pix_fmt;
+ av_log(avctx, AV_LOG_DEBUG, "Considering format %#x -> %s.\n",
+ fourcc, av_get_pix_fmt_name(format));
+
+ best_format = av_find_best_pix_fmt_of_2(format, best_format,
+ source_format, 0, NULL);
+ if (format == best_format)
+ best_fourcc = fourcc;
+ }
+
+ av_freep(&attr);
+
+ if (best_format == AV_PIX_FMT_NONE) {
+ av_log(avctx, AV_LOG_ERROR, "No usable formats for decoding!\n");
+ return AVERROR(EINVAL);
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Picked %s (%#x) as best match for %s.\n",
+ av_get_pix_fmt_name(best_format), best_fourcc,
+ av_get_pix_fmt_name(source_format));
+
+ frames->sw_format = best_format;
+ if (avctx->internal->hwaccel_priv_data) {
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ AVVAAPIFramesContext *avfc = frames->hwctx;
+
+ ctx->pixel_format_attribute = (VASurfaceAttrib) {
+ .type = VASurfaceAttribPixelFormat,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = best_fourcc,
+ };
+
+ avfc->attributes = &ctx->pixel_format_attribute;
+ avfc->nb_attributes = 1;
+ }
+
+ return 0;
+}
+
+static const struct {
+ enum AVCodecID codec_id;
+ int codec_profile;
+ VAProfile va_profile;
+ VAProfile (*profile_parser)(AVCodecContext *avctx);
+} vaapi_profile_map[] = {
+#define MAP(c, p, v, ...) { AV_CODEC_ID_ ## c, FF_PROFILE_ ## p, VAProfile ## v, __VA_ARGS__ }
+ MAP(MPEG2VIDEO, MPEG2_SIMPLE, MPEG2Simple ),
+ MAP(MPEG2VIDEO, MPEG2_MAIN, MPEG2Main ),
+ MAP(H263, UNKNOWN, H263Baseline),
+ MAP(MPEG4, MPEG4_SIMPLE, MPEG4Simple ),
+ MAP(MPEG4, MPEG4_ADVANCED_SIMPLE,
+ MPEG4AdvancedSimple),
+ MAP(MPEG4, MPEG4_MAIN, MPEG4Main ),
+ MAP(H264, H264_CONSTRAINED_BASELINE,
+ H264ConstrainedBaseline),
+ MAP(H264, H264_MAIN, H264Main ),
+ MAP(H264, H264_HIGH, H264High ),
+#if VA_CHECK_VERSION(0, 37, 0)
+ MAP(HEVC, HEVC_MAIN, HEVCMain ),
+ MAP(HEVC, HEVC_MAIN_10, HEVCMain10 ),
+ MAP(HEVC, HEVC_MAIN_STILL_PICTURE,
+ HEVCMain ),
+#endif
+#if VA_CHECK_VERSION(1, 2, 0) && CONFIG_HEVC_VAAPI_HWACCEL
+ MAP(HEVC, HEVC_REXT, None,
+ ff_vaapi_parse_hevc_rext_scc_profile ),
+ MAP(HEVC, HEVC_SCC, None,
+ ff_vaapi_parse_hevc_rext_scc_profile ),
+#endif
+ MAP(MJPEG, MJPEG_HUFFMAN_BASELINE_DCT,
+ JPEGBaseline),
+ MAP(WMV3, VC1_SIMPLE, VC1Simple ),
+ MAP(WMV3, VC1_MAIN, VC1Main ),
+ MAP(WMV3, VC1_COMPLEX, VC1Advanced ),
+ MAP(WMV3, VC1_ADVANCED, VC1Advanced ),
+ MAP(VC1, VC1_SIMPLE, VC1Simple ),
+ MAP(VC1, VC1_MAIN, VC1Main ),
+ MAP(VC1, VC1_COMPLEX, VC1Advanced ),
+ MAP(VC1, VC1_ADVANCED, VC1Advanced ),
+ MAP(VP8, UNKNOWN, VP8Version0_3 ),
+#if VA_CHECK_VERSION(0, 38, 0)
+ MAP(VP9, VP9_0, VP9Profile0 ),
+#endif
+#if VA_CHECK_VERSION(0, 39, 0)
+ MAP(VP9, VP9_1, VP9Profile1 ),
+ MAP(VP9, VP9_2, VP9Profile2 ),
+ MAP(VP9, VP9_3, VP9Profile3 ),
+#endif
+#if VA_CHECK_VERSION(1, 8, 0)
+ MAP(AV1, AV1_MAIN, AV1Profile0),
+ MAP(AV1, AV1_HIGH, AV1Profile1),
+#endif
+
+#undef MAP
+};
+
+/*
+ * Set *va_config and the frames_ref fields from the current codec parameters
+ * in avctx.
+ */
+static int vaapi_decode_make_config(AVCodecContext *avctx,
+ AVBufferRef *device_ref,
+ VAConfigID *va_config,
+ AVBufferRef *frames_ref)
+{
+ AVVAAPIHWConfig *hwconfig = NULL;
+ AVHWFramesConstraints *constraints = NULL;
+ VAStatus vas;
+ int err, i, j;
+ const AVCodecDescriptor *codec_desc;
+ VAProfile *profile_list = NULL, matched_va_profile, va_profile;
+ int profile_count, exact_match, matched_ff_profile, codec_profile;
+
+ AVHWDeviceContext *device = (AVHWDeviceContext*)device_ref->data;
+ AVVAAPIDeviceContext *hwctx = device->hwctx;
+
+ codec_desc = avcodec_descriptor_get(avctx->codec_id);
+ if (!codec_desc) {
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ profile_count = vaMaxNumProfiles(hwctx->display);
+ profile_list = av_malloc_array(profile_count,
+ sizeof(VAProfile));
+ if (!profile_list) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ vas = vaQueryConfigProfiles(hwctx->display,
+ profile_list, &profile_count);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query profiles: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(ENOSYS);
+ goto fail;
+ }
+
+ matched_va_profile = VAProfileNone;
+ exact_match = 0;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_profile_map); i++) {
+ int profile_match = 0;
+ if (avctx->codec_id != vaapi_profile_map[i].codec_id)
+ continue;
+ if (avctx->profile == vaapi_profile_map[i].codec_profile ||
+ vaapi_profile_map[i].codec_profile == FF_PROFILE_UNKNOWN)
+ profile_match = 1;
+
+ va_profile = vaapi_profile_map[i].profile_parser ?
+ vaapi_profile_map[i].profile_parser(avctx) :
+ vaapi_profile_map[i].va_profile;
+ codec_profile = vaapi_profile_map[i].codec_profile;
+
+ for (j = 0; j < profile_count; j++) {
+ if (va_profile == profile_list[j]) {
+ exact_match = profile_match;
+ break;
+ }
+ }
+ if (j < profile_count) {
+ matched_va_profile = va_profile;
+ matched_ff_profile = codec_profile;
+ if (exact_match)
+ break;
+ }
+ }
+ av_freep(&profile_list);
+
+ if (matched_va_profile == VAProfileNone) {
+ av_log(avctx, AV_LOG_ERROR, "No support for codec %s "
+ "profile %d.\n", codec_desc->name, avctx->profile);
+ err = AVERROR(ENOSYS);
+ goto fail;
+ }
+ if (!exact_match) {
+ if (avctx->hwaccel_flags &
+ AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH) {
+ av_log(avctx, AV_LOG_VERBOSE, "Codec %s profile %d not "
+ "supported for hardware decode.\n",
+ codec_desc->name, avctx->profile);
+ av_log(avctx, AV_LOG_WARNING, "Using possibly-"
+ "incompatible profile %d instead.\n",
+ matched_ff_profile);
+ } else {
+ av_log(avctx, AV_LOG_VERBOSE, "Codec %s profile %d not "
+ "supported for hardware decode.\n",
+ codec_desc->name, avctx->profile);
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+ }
+
+ vas = vaCreateConfig(hwctx->display, matched_va_profile,
+ VAEntrypointVLD, NULL, 0,
+ va_config);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create decode "
+ "configuration: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ hwconfig = av_hwdevice_hwconfig_alloc(device_ref);
+ if (!hwconfig) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ hwconfig->config_id = *va_config;
+
+ constraints =
+ av_hwdevice_get_hwframe_constraints(device_ref, hwconfig);
+ if (!constraints) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if (avctx->coded_width < constraints->min_width ||
+ avctx->coded_height < constraints->min_height ||
+ avctx->coded_width > constraints->max_width ||
+ avctx->coded_height > constraints->max_height) {
+ av_log(avctx, AV_LOG_ERROR, "Hardware does not support image "
+ "size %dx%d (constraints: width %d-%d height %d-%d).\n",
+ avctx->coded_width, avctx->coded_height,
+ constraints->min_width, constraints->max_width,
+ constraints->min_height, constraints->max_height);
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+ if (!constraints->valid_sw_formats ||
+ constraints->valid_sw_formats[0] == AV_PIX_FMT_NONE) {
+ av_log(avctx, AV_LOG_ERROR, "Hardware does not offer any "
+ "usable surface formats.\n");
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if (frames_ref) {
+ AVHWFramesContext *frames = (AVHWFramesContext *)frames_ref->data;
+
+ frames->format = AV_PIX_FMT_VAAPI;
+ frames->width = avctx->coded_width;
+ frames->height = avctx->coded_height;
+
+ err = vaapi_decode_find_best_format(avctx, device,
+ *va_config, frames);
+ if (err < 0)
+ goto fail;
+
+ frames->initial_pool_size = 1;
+ // Add per-codec number of surfaces used for storing reference frames.
+ switch (avctx->codec_id) {
+ case AV_CODEC_ID_H264:
+ case AV_CODEC_ID_HEVC:
+ case AV_CODEC_ID_AV1:
+ frames->initial_pool_size += 16;
+ break;
+ case AV_CODEC_ID_VP9:
+ frames->initial_pool_size += 8;
+ break;
+ case AV_CODEC_ID_VP8:
+ frames->initial_pool_size += 3;
+ break;
+ default:
+ frames->initial_pool_size += 2;
+ }
+ }
+
+ av_hwframe_constraints_free(&constraints);
+ av_freep(&hwconfig);
+
+ return 0;
+
+fail:
+ av_hwframe_constraints_free(&constraints);
+ av_freep(&hwconfig);
+ if (*va_config != VA_INVALID_ID) {
+ vaDestroyConfig(hwctx->display, *va_config);
+ *va_config = VA_INVALID_ID;
+ }
+ av_freep(&profile_list);
+ return err;
+}
+
+int ff_vaapi_common_frame_params(AVCodecContext *avctx,
+ AVBufferRef *hw_frames_ctx)
+{
+ AVHWFramesContext *hw_frames = (AVHWFramesContext *)hw_frames_ctx->data;
+ AVHWDeviceContext *device_ctx = hw_frames->device_ctx;
+ AVVAAPIDeviceContext *hwctx;
+ VAConfigID va_config = VA_INVALID_ID;
+ int err;
+
+ if (device_ctx->type != AV_HWDEVICE_TYPE_VAAPI)
+ return AVERROR(EINVAL);
+ hwctx = device_ctx->hwctx;
+
+ err = vaapi_decode_make_config(avctx, hw_frames->device_ref, &va_config,
+ hw_frames_ctx);
+ if (err)
+ return err;
+
+ if (va_config != VA_INVALID_ID)
+ vaDestroyConfig(hwctx->display, va_config);
+
+ return 0;
+}
+
+int ff_vaapi_decode_init(AVCodecContext *avctx)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ int err;
+
+ ctx->va_config = VA_INVALID_ID;
+ ctx->va_context = VA_INVALID_ID;
+
+ err = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_VAAPI);
+ if (err < 0)
+ goto fail;
+
+ ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ ctx->hwfc = ctx->frames->hwctx;
+ ctx->device = ctx->frames->device_ctx;
+ ctx->hwctx = ctx->device->hwctx;
+
+ err = vaapi_decode_make_config(avctx, ctx->frames->device_ref,
+ &ctx->va_config, NULL);
+ if (err)
+ goto fail;
+
+ vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
+ avctx->coded_width, avctx->coded_height,
+ VA_PROGRESSIVE,
+ ctx->hwfc->surface_ids,
+ ctx->hwfc->nb_surfaces,
+ &ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create decode "
+ "context: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Decode context initialised: "
+ "%#x/%#x.\n", ctx->va_config, ctx->va_context);
+
+ return 0;
+
+fail:
+ ff_vaapi_decode_uninit(avctx);
+ return err;
+}
+
+int ff_vaapi_decode_uninit(AVCodecContext *avctx)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+
+ if (ctx->va_context != VA_INVALID_ID) {
+ vas = vaDestroyContext(ctx->hwctx->display, ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to destroy decode "
+ "context %#x: %d (%s).\n",
+ ctx->va_context, vas, vaErrorStr(vas));
+ }
+ }
+ if (ctx->va_config != VA_INVALID_ID) {
+ vas = vaDestroyConfig(ctx->hwctx->display, ctx->va_config);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to destroy decode "
+ "configuration %#x: %d (%s).\n",
+ ctx->va_config, vas, vaErrorStr(vas));
+ }
+ }
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/vaapi_decode.h b/media/ffvpx/libavcodec/vaapi_decode.h
new file mode 100644
index 0000000000..6beda14e52
--- /dev/null
+++ b/media/ffvpx/libavcodec/vaapi_decode.h
@@ -0,0 +1,91 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_DECODE_H
+#define AVCODEC_VAAPI_DECODE_H
+
+#include <va/va.h>
+
+#include "libavutil/frame.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_vaapi.h"
+
+#include "avcodec.h"
+
+static inline VASurfaceID ff_vaapi_get_surface_id(AVFrame *pic)
+{
+ return (uintptr_t)pic->data[3];
+}
+
+enum {
+ MAX_PARAM_BUFFERS = 16,
+};
+
+typedef struct VAAPIDecodePicture {
+ VASurfaceID output_surface;
+
+ int nb_param_buffers;
+ VABufferID param_buffers[MAX_PARAM_BUFFERS];
+
+ int nb_slices;
+ VABufferID *slice_buffers;
+ int slices_allocated;
+} VAAPIDecodePicture;
+
+typedef struct VAAPIDecodeContext {
+ VAConfigID va_config;
+ VAContextID va_context;
+
+ AVHWDeviceContext *device;
+ AVVAAPIDeviceContext *hwctx;
+
+ AVHWFramesContext *frames;
+ AVVAAPIFramesContext *hwfc;
+
+ enum AVPixelFormat surface_format;
+ int surface_count;
+
+ VASurfaceAttrib pixel_format_attribute;
+} VAAPIDecodeContext;
+
+
+int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic,
+ int type,
+ const void *data,
+ size_t size);
+
+int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic,
+ const void *params_data,
+ size_t params_size,
+ const void *slice_data,
+ size_t slice_size);
+
+int ff_vaapi_decode_issue(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic);
+int ff_vaapi_decode_cancel(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic);
+
+int ff_vaapi_decode_init(AVCodecContext *avctx);
+int ff_vaapi_decode_uninit(AVCodecContext *avctx);
+
+int ff_vaapi_common_frame_params(AVCodecContext *avctx,
+ AVBufferRef *hw_frames_ctx);
+
+#endif /* AVCODEC_VAAPI_DECODE_H */
diff --git a/media/ffvpx/libavcodec/vaapi_hevc.h b/media/ffvpx/libavcodec/vaapi_hevc.h
new file mode 100644
index 0000000000..449635d0d7
--- /dev/null
+++ b/media/ffvpx/libavcodec/vaapi_hevc.h
@@ -0,0 +1,27 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_HEVC_H
+#define AVCODEC_VAAPI_HEVC_H
+
+#include <va/va.h>
+#include "avcodec.h"
+
+VAProfile ff_vaapi_parse_hevc_rext_scc_profile(AVCodecContext *avctx);
+
+#endif /* AVCODEC_VAAPI_HEVC_H */
diff --git a/media/ffvpx/libavcodec/vaapi_vp8.c b/media/ffvpx/libavcodec/vaapi_vp8.c
new file mode 100644
index 0000000000..5b18bf8f34
--- /dev/null
+++ b/media/ffvpx/libavcodec/vaapi_vp8.c
@@ -0,0 +1,237 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <va/va.h>
+#include <va/va_dec_vp8.h>
+
+#include "hwconfig.h"
+#include "vaapi_decode.h"
+#include "vp8.h"
+
+static VASurfaceID vaapi_vp8_surface_id(VP8Frame *vf)
+{
+ if (vf)
+ return ff_vaapi_get_surface_id(vf->tf.f);
+ else
+ return VA_INVALID_SURFACE;
+}
+
+static int vaapi_vp8_start_frame(AVCodecContext *avctx,
+ av_unused const uint8_t *buffer,
+ av_unused uint32_t size)
+{
+ const VP8Context *s = avctx->priv_data;
+ VAAPIDecodePicture *pic = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
+ VAPictureParameterBufferVP8 pp;
+ VAProbabilityDataBufferVP8 prob;
+ VAIQMatrixBufferVP8 quant;
+ int err, i, j, k;
+
+ pic->output_surface = vaapi_vp8_surface_id(s->framep[VP8_FRAME_CURRENT]);
+
+ pp = (VAPictureParameterBufferVP8) {
+ .frame_width = avctx->width,
+ .frame_height = avctx->height,
+
+ .last_ref_frame = vaapi_vp8_surface_id(s->framep[VP8_FRAME_PREVIOUS]),
+ .golden_ref_frame = vaapi_vp8_surface_id(s->framep[VP8_FRAME_GOLDEN]),
+ .alt_ref_frame = vaapi_vp8_surface_id(s->framep[VP8_FRAME_ALTREF]),
+ .out_of_loop_frame = VA_INVALID_SURFACE,
+
+ .pic_fields.bits = {
+ .key_frame = !s->keyframe,
+ .version = s->profile,
+
+ .segmentation_enabled = s->segmentation.enabled,
+ .update_mb_segmentation_map = s->segmentation.update_map,
+ .update_segment_feature_data = s->segmentation.update_feature_data,
+
+ .filter_type = s->filter.simple,
+ .sharpness_level = s->filter.sharpness,
+
+ .loop_filter_adj_enable = s->lf_delta.enabled,
+ .mode_ref_lf_delta_update = s->lf_delta.update,
+
+ .sign_bias_golden = s->sign_bias[VP8_FRAME_GOLDEN],
+ .sign_bias_alternate = s->sign_bias[VP8_FRAME_ALTREF],
+
+ .mb_no_coeff_skip = s->mbskip_enabled,
+ .loop_filter_disable = s->filter.level == 0,
+ },
+
+ .prob_skip_false = s->prob->mbskip,
+ .prob_intra = s->prob->intra,
+ .prob_last = s->prob->last,
+ .prob_gf = s->prob->golden,
+ };
+
+ for (i = 0; i < 3; i++)
+ pp.mb_segment_tree_probs[i] = s->prob->segmentid[i];
+
+ for (i = 0; i < 4; i++) {
+ if (s->segmentation.enabled) {
+ pp.loop_filter_level[i] = s->segmentation.filter_level[i];
+ if (!s->segmentation.absolute_vals)
+ pp.loop_filter_level[i] += s->filter.level;
+ } else {
+ pp.loop_filter_level[i] = s->filter.level;
+ }
+ pp.loop_filter_level[i] = av_clip_uintp2(pp.loop_filter_level[i], 6);
+ }
+
+ for (i = 0; i < 4; i++) {
+ pp.loop_filter_deltas_ref_frame[i] = s->lf_delta.ref[i];
+ pp.loop_filter_deltas_mode[i] = s->lf_delta.mode[i + 4];
+ }
+
+ if (s->keyframe) {
+ static const uint8_t keyframe_y_mode_probs[4] = {
+ 145, 156, 163, 128
+ };
+ static const uint8_t keyframe_uv_mode_probs[3] = {
+ 142, 114, 183
+ };
+ memcpy(pp.y_mode_probs, keyframe_y_mode_probs, 4);
+ memcpy(pp.uv_mode_probs, keyframe_uv_mode_probs, 3);
+ } else {
+ for (i = 0; i < 4; i++)
+ pp.y_mode_probs[i] = s->prob->pred16x16[i];
+ for (i = 0; i < 3; i++)
+ pp.uv_mode_probs[i] = s->prob->pred8x8c[i];
+ }
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 19; j++)
+ pp.mv_probs[i][j] = s->prob->mvc[i][j];
+
+ pp.bool_coder_ctx.range = s->coder_state_at_header_end.range;
+ pp.bool_coder_ctx.value = s->coder_state_at_header_end.value;
+ pp.bool_coder_ctx.count = s->coder_state_at_header_end.bit_count;
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAPictureParameterBufferType,
+ &pp, sizeof(pp));
+ if (err < 0)
+ goto fail;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++) {
+ static const int coeff_bands_inverse[8] = {
+ 0, 1, 2, 3, 5, 6, 4, 15
+ };
+ int coeff_pos = coeff_bands_inverse[j];
+
+ for (k = 0; k < 3; k++) {
+ memcpy(prob.dct_coeff_probs[i][j][k],
+ s->prob->token[i][coeff_pos][k], 11);
+ }
+ }
+ }
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAProbabilityBufferType,
+ &prob, sizeof(prob));
+ if (err < 0)
+ goto fail;
+
+ for (i = 0; i < 4; i++) {
+ int base_qi = s->segmentation.base_quant[i];
+ if (!s->segmentation.absolute_vals)
+ base_qi += s->quant.yac_qi;
+
+ quant.quantization_index[i][0] = av_clip_uintp2(base_qi, 7);
+ quant.quantization_index[i][1] = av_clip_uintp2(base_qi + s->quant.ydc_delta, 7);
+ quant.quantization_index[i][2] = av_clip_uintp2(base_qi + s->quant.y2dc_delta, 7);
+ quant.quantization_index[i][3] = av_clip_uintp2(base_qi + s->quant.y2ac_delta, 7);
+ quant.quantization_index[i][4] = av_clip_uintp2(base_qi + s->quant.uvdc_delta, 7);
+ quant.quantization_index[i][5] = av_clip_uintp2(base_qi + s->quant.uvac_delta, 7);
+ }
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAIQMatrixBufferType,
+ &quant, sizeof(quant));
+ if (err < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+}
+
+static int vaapi_vp8_end_frame(AVCodecContext *avctx)
+{
+ const VP8Context *s = avctx->priv_data;
+ VAAPIDecodePicture *pic = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
+
+ return ff_vaapi_decode_issue(avctx, pic);
+}
+
+static int vaapi_vp8_decode_slice(AVCodecContext *avctx,
+ const uint8_t *buffer,
+ uint32_t size)
+{
+ const VP8Context *s = avctx->priv_data;
+ VAAPIDecodePicture *pic = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
+ VASliceParameterBufferVP8 sp;
+ int err, i;
+
+ unsigned int header_size = 3 + 7 * s->keyframe;
+ const uint8_t *data = buffer + header_size;
+ unsigned int data_size = size - header_size;
+
+ sp = (VASliceParameterBufferVP8) {
+ .slice_data_size = data_size,
+ .slice_data_offset = 0,
+ .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
+
+ .macroblock_offset = (8 * (s->coder_state_at_header_end.input - data) -
+ s->coder_state_at_header_end.bit_count - 8),
+ .num_of_partitions = s->num_coeff_partitions + 1,
+ };
+
+ sp.partition_size[0] = s->header_partition_size - ((sp.macroblock_offset + 7) / 8);
+ for (i = 0; i < 8; i++)
+ sp.partition_size[i+1] = s->coeff_partition_size[i];
+
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &sp, sizeof(sp), data, data_size);
+ if (err)
+ goto fail;
+
+ return 0;
+
+fail:
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+}
+
+const AVHWAccel ff_vp8_vaapi_hwaccel = {
+ .name = "vp8_vaapi",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP8,
+ .pix_fmt = AV_PIX_FMT_VAAPI,
+ .start_frame = &vaapi_vp8_start_frame,
+ .end_frame = &vaapi_vp8_end_frame,
+ .decode_slice = &vaapi_vp8_decode_slice,
+ .frame_priv_data_size = sizeof(VAAPIDecodePicture),
+ .init = &ff_vaapi_decode_init,
+ .uninit = &ff_vaapi_decode_uninit,
+ .frame_params = &ff_vaapi_common_frame_params,
+ .priv_data_size = sizeof(VAAPIDecodeContext),
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
+};
diff --git a/media/ffvpx/libavcodec/vaapi_vp9.c b/media/ffvpx/libavcodec/vaapi_vp9.c
new file mode 100644
index 0000000000..776382f683
--- /dev/null
+++ b/media/ffvpx/libavcodec/vaapi_vp9.c
@@ -0,0 +1,185 @@
+/*
+ * VP9 HW decode acceleration through VA API
+ *
+ * Copyright (C) 2015 Timo Rothenpieler <timo@rothenpieler.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/pixdesc.h"
+
+#include "hwconfig.h"
+#include "vaapi_decode.h"
+#include "vp9shared.h"
+
+static VASurfaceID vaapi_vp9_surface_id(const VP9Frame *vf)
+{
+ if (vf)
+ return ff_vaapi_get_surface_id(vf->tf.f);
+ else
+ return VA_INVALID_SURFACE;
+}
+
+static int vaapi_vp9_start_frame(AVCodecContext *avctx,
+ av_unused const uint8_t *buffer,
+ av_unused uint32_t size)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ VADecPictureParameterBufferVP9 pic_param;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
+ int err, i;
+
+ pic->output_surface = vaapi_vp9_surface_id(&h->frames[CUR_FRAME]);
+
+ pic_param = (VADecPictureParameterBufferVP9) {
+ .frame_width = avctx->width,
+ .frame_height = avctx->height,
+
+ .pic_fields.bits = {
+ .subsampling_x = pixdesc->log2_chroma_w,
+ .subsampling_y = pixdesc->log2_chroma_h,
+ .frame_type = !h->h.keyframe,
+ .show_frame = !h->h.invisible,
+ .error_resilient_mode = h->h.errorres,
+ .intra_only = h->h.intraonly,
+ .allow_high_precision_mv = h->h.keyframe ? 0 : h->h.highprecisionmvs,
+ .mcomp_filter_type = h->h.filtermode ^ (h->h.filtermode <= 1),
+ .frame_parallel_decoding_mode = h->h.parallelmode,
+ .reset_frame_context = h->h.resetctx,
+ .refresh_frame_context = h->h.refreshctx,
+ .frame_context_idx = h->h.framectxid,
+
+ .segmentation_enabled = h->h.segmentation.enabled,
+ .segmentation_temporal_update = h->h.segmentation.temporal,
+ .segmentation_update_map = h->h.segmentation.update_map,
+
+ .last_ref_frame = h->h.refidx[0],
+ .last_ref_frame_sign_bias = h->h.signbias[0],
+ .golden_ref_frame = h->h.refidx[1],
+ .golden_ref_frame_sign_bias = h->h.signbias[1],
+ .alt_ref_frame = h->h.refidx[2],
+ .alt_ref_frame_sign_bias = h->h.signbias[2],
+ .lossless_flag = h->h.lossless,
+ },
+
+ .filter_level = h->h.filter.level,
+ .sharpness_level = h->h.filter.sharpness,
+ .log2_tile_rows = h->h.tiling.log2_tile_rows,
+ .log2_tile_columns = h->h.tiling.log2_tile_cols,
+
+ .frame_header_length_in_bytes = h->h.uncompressed_header_size,
+ .first_partition_size = h->h.compressed_header_size,
+
+ .profile = h->h.profile,
+ .bit_depth = h->h.bpp,
+ };
+
+ for (i = 0; i < 7; i++)
+ pic_param.mb_segment_tree_probs[i] = h->h.segmentation.prob[i];
+
+ if (h->h.segmentation.temporal) {
+ for (i = 0; i < 3; i++)
+ pic_param.segment_pred_probs[i] = h->h.segmentation.pred_prob[i];
+ } else {
+ memset(pic_param.segment_pred_probs, 255, sizeof(pic_param.segment_pred_probs));
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (h->refs[i].f->buf[0])
+ pic_param.reference_frames[i] = ff_vaapi_get_surface_id(h->refs[i].f);
+ else
+ pic_param.reference_frames[i] = VA_INVALID_ID;
+ }
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAPictureParameterBufferType,
+ &pic_param, sizeof(pic_param));
+ if (err < 0) {
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+ }
+
+ return 0;
+}
+
+static int vaapi_vp9_end_frame(AVCodecContext *avctx)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+
+ return ff_vaapi_decode_issue(avctx, pic);
+}
+
+static int vaapi_vp9_decode_slice(AVCodecContext *avctx,
+ const uint8_t *buffer,
+ uint32_t size)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ VASliceParameterBufferVP9 slice_param;
+ int err, i;
+
+ slice_param = (VASliceParameterBufferVP9) {
+ .slice_data_size = size,
+ .slice_data_offset = 0,
+ .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
+ };
+
+ for (i = 0; i < 8; i++) {
+ slice_param.seg_param[i] = (VASegmentParameterVP9) {
+ .segment_flags.fields = {
+ .segment_reference_enabled = h->h.segmentation.feat[i].ref_enabled,
+ .segment_reference = h->h.segmentation.feat[i].ref_val,
+ .segment_reference_skipped = h->h.segmentation.feat[i].skip_enabled,
+ },
+
+ .luma_dc_quant_scale = h->h.segmentation.feat[i].qmul[0][0],
+ .luma_ac_quant_scale = h->h.segmentation.feat[i].qmul[0][1],
+ .chroma_dc_quant_scale = h->h.segmentation.feat[i].qmul[1][0],
+ .chroma_ac_quant_scale = h->h.segmentation.feat[i].qmul[1][1],
+ };
+
+ memcpy(slice_param.seg_param[i].filter_level, h->h.segmentation.feat[i].lflvl, sizeof(slice_param.seg_param[i].filter_level));
+ }
+
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
+ &slice_param, sizeof(slice_param),
+ buffer, size);
+ if (err) {
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+ }
+
+ return 0;
+}
+
+const AVHWAccel ff_vp9_vaapi_hwaccel = {
+ .name = "vp9_vaapi",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP9,
+ .pix_fmt = AV_PIX_FMT_VAAPI,
+ .start_frame = vaapi_vp9_start_frame,
+ .end_frame = vaapi_vp9_end_frame,
+ .decode_slice = vaapi_vp9_decode_slice,
+ .frame_priv_data_size = sizeof(VAAPIDecodePicture),
+ .init = ff_vaapi_decode_init,
+ .uninit = ff_vaapi_decode_uninit,
+ .frame_params = ff_vaapi_common_frame_params,
+ .priv_data_size = sizeof(VAAPIDecodeContext),
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
+};
diff --git a/media/ffvpx/libavcodec/version.c b/media/ffvpx/libavcodec/version.c
new file mode 100644
index 0000000000..d7966b2015
--- /dev/null
+++ b/media/ffvpx/libavcodec/version.c
@@ -0,0 +1,50 @@
+/*
+ * Version functions.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/avassert.h"
+#include "avcodec.h"
+#include "codec_id.h"
+#include "version.h"
+
+#include "libavutil/ffversion.h"
+const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
+
+unsigned avcodec_version(void)
+{
+ av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563);
+ av_assert0(AV_CODEC_ID_ADPCM_G722==69660);
+ av_assert0(AV_CODEC_ID_SRT==94216);
+ av_assert0(LIBAVCODEC_VERSION_MICRO >= 100);
+
+ return LIBAVCODEC_VERSION_INT;
+}
+
+const char *avcodec_configuration(void)
+{
+ return FFMPEG_CONFIGURATION;
+}
+
+const char *avcodec_license(void)
+{
+#define LICENSE_PREFIX "libavcodec license: "
+ return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
+}
diff --git a/media/ffvpx/libavcodec/version.h b/media/ffvpx/libavcodec/version.h
new file mode 100644
index 0000000000..7aa95fc3f1
--- /dev/null
+++ b/media/ffvpx/libavcodec/version.h
@@ -0,0 +1,45 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_H
+#define AVCODEC_VERSION_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#include "libavutil/version.h"
+
+#include "version_major.h"
+
+#define LIBAVCODEC_VERSION_MINOR 5
+#define LIBAVCODEC_VERSION_MICRO 100
+
+#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+#endif /* AVCODEC_VERSION_H */
diff --git a/media/ffvpx/libavcodec/version_major.h b/media/ffvpx/libavcodec/version_major.h
new file mode 100644
index 0000000000..c2f118b262
--- /dev/null
+++ b/media/ffvpx/libavcodec/version_major.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_MAJOR_H
+#define AVCODEC_VERSION_MAJOR_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#define LIBAVCODEC_VERSION_MAJOR 60
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @note, when bumping the major version it is recommended to manually
+ * disable each FF_API_* in its own commit instead of disabling them all
+ * at once through the bump. This improves the git bisect-ability of the change.
+ */
+
+#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_IDCT_NONE (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_SVTAV1_OPTS (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AYUV_CODECID (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_VT_OUTPUT_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AVCODEC_CHROMA_POS (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_VT_HWACCEL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AVCTX_FRAME_NUMBER (LIBAVCODEC_VERSION_MAJOR < 61)
+
+// reminder to remove CrystalHD decoders on next major bump
+#define FF_CODEC_CRYSTAL_HD (LIBAVCODEC_VERSION_MAJOR < 61)
+
+#endif /* AVCODEC_VERSION_MAJOR_H */
diff --git a/media/ffvpx/libavcodec/videodsp.c b/media/ffvpx/libavcodec/videodsp.c
new file mode 100644
index 0000000000..bdff2e76f5
--- /dev/null
+++ b/media/ffvpx/libavcodec/videodsp.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/macros.h"
+#include "videodsp.h"
+
+#define BIT_DEPTH 8
+#include "videodsp_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 16
+#include "videodsp_template.c"
+#undef BIT_DEPTH
+
+static void just_return(const uint8_t *buf, ptrdiff_t stride, int h)
+{
+}
+
+av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
+{
+ ctx->prefetch = just_return;
+ if (bpc <= 8) {
+ ctx->emulated_edge_mc = ff_emulated_edge_mc_8;
+ } else {
+ ctx->emulated_edge_mc = ff_emulated_edge_mc_16;
+ }
+
+#if ARCH_AARCH64
+ ff_videodsp_init_aarch64(ctx, bpc);
+#elif ARCH_ARM
+ ff_videodsp_init_arm(ctx, bpc);
+#elif ARCH_PPC
+ ff_videodsp_init_ppc(ctx, bpc);
+#elif ARCH_X86
+ ff_videodsp_init_x86(ctx, bpc);
+#elif ARCH_MIPS
+ ff_videodsp_init_mips(ctx, bpc);
+#elif ARCH_LOONGARCH64
+ ff_videodsp_init_loongarch(ctx, bpc);
+#endif
+}
diff --git a/media/ffvpx/libavcodec/videodsp.h b/media/ffvpx/libavcodec/videodsp.h
new file mode 100644
index 0000000000..e8960b609d
--- /dev/null
+++ b/media/ffvpx/libavcodec/videodsp.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2012 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Core video DSP helper functions
+ */
+
+#ifndef AVCODEC_VIDEODSP_H
+#define AVCODEC_VIDEODSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define EMULATED_EDGE(depth) \
+void ff_emulated_edge_mc_ ## depth(uint8_t *dst, const uint8_t *src, \
+ ptrdiff_t dst_stride, ptrdiff_t src_stride, \
+ int block_w, int block_h,\
+ int src_x, int src_y, int w, int h);
+
+EMULATED_EDGE(8)
+
+typedef struct VideoDSPContext {
+ /**
+ * Copy a rectangular area of samples to a temporary buffer and replicate
+ * the border samples.
+ *
+ * @param dst destination buffer
+ * @param dst_stride number of bytes between 2 vertically adjacent samples
+ * in destination buffer
+ * @param src source buffer
+ * @param dst_linesize number of bytes between 2 vertically adjacent
+ * samples in the destination buffer
+ * @param src_linesize number of bytes between 2 vertically adjacent
+ * samples in both the source buffer
+ * @param block_w width of block
+ * @param block_h height of block
+ * @param src_x x coordinate of the top left sample of the block in the
+ * source buffer
+ * @param src_y y coordinate of the top left sample of the block in the
+ * source buffer
+ * @param w width of the source buffer
+ * @param h height of the source buffer
+ */
+ void (*emulated_edge_mc)(uint8_t *dst, const uint8_t *src,
+ ptrdiff_t dst_linesize,
+ ptrdiff_t src_linesize,
+ int block_w, int block_h,
+ int src_x, int src_y, int w, int h);
+
+ /**
+ * Prefetch memory into cache (if supported by hardware).
+ *
+ * @param buf pointer to buffer to prefetch memory from
+ * @param stride distance between two lines of buf (in bytes)
+ * @param h number of lines to prefetch
+ */
+ void (*prefetch)(const uint8_t *buf, ptrdiff_t stride, int h);
+} VideoDSPContext;
+
+void ff_videodsp_init(VideoDSPContext *ctx, int bpc);
+
+/* for internal use only (i.e. called by ff_videodsp_init() */
+void ff_videodsp_init_aarch64(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_arm(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_ppc(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_x86(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_mips(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_loongarch(VideoDSPContext *ctx, int bpc);
+
+#endif /* AVCODEC_VIDEODSP_H */
diff --git a/media/ffvpx/libavcodec/videodsp_template.c b/media/ffvpx/libavcodec/videodsp_template.c
new file mode 100644
index 0000000000..d653f4d524
--- /dev/null
+++ b/media/ffvpx/libavcodec/videodsp_template.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2002-2012 Michael Niedermayer
+ * Copyright (C) 2012 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "bit_depth_template.c"
+#if BIT_DEPTH != 8
+// ff_emulated_edge_mc_8 is used by the x86 MpegVideoDSP API.
+static
+#endif
+void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
+ ptrdiff_t buf_linesize,
+ ptrdiff_t src_linesize,
+ int block_w, int block_h,
+ int src_x, int src_y, int w, int h)
+{
+ int x, y;
+ int start_y, start_x, end_y, end_x;
+
+ if (!w || !h)
+ return;
+
+ av_assert2(block_w * sizeof(pixel) <= FFABS(buf_linesize));
+
+ if (src_y >= h) {
+ src -= src_y * src_linesize;
+ src += (h - 1) * src_linesize;
+ src_y = h - 1;
+ } else if (src_y <= -block_h) {
+ src -= src_y * src_linesize;
+ src += (1 - block_h) * src_linesize;
+ src_y = 1 - block_h;
+ }
+ if (src_x >= w) {
+ // The subtracted expression has an unsigned type and must thus not be negative
+ src -= (1 + src_x - w) * sizeof(pixel);
+ src_x = w - 1;
+ } else if (src_x <= -block_w) {
+ src += (1 - block_w - src_x) * sizeof(pixel);
+ src_x = 1 - block_w;
+ }
+
+ start_y = FFMAX(0, -src_y);
+ start_x = FFMAX(0, -src_x);
+ end_y = FFMIN(block_h, h-src_y);
+ end_x = FFMIN(block_w, w-src_x);
+ av_assert2(start_y < end_y && block_h);
+ av_assert2(start_x < end_x && block_w);
+
+ w = end_x - start_x;
+ src += start_y * src_linesize + start_x * (ptrdiff_t)sizeof(pixel);
+ buf += start_x * sizeof(pixel);
+
+ // top
+ for (y = 0; y < start_y; y++) {
+ memcpy(buf, src, w * sizeof(pixel));
+ buf += buf_linesize;
+ }
+
+ // copy existing part
+ for (; y < end_y; y++) {
+ memcpy(buf, src, w * sizeof(pixel));
+ src += src_linesize;
+ buf += buf_linesize;
+ }
+
+ // bottom
+ src -= src_linesize;
+ for (; y < block_h; y++) {
+ memcpy(buf, src, w * sizeof(pixel));
+ buf += buf_linesize;
+ }
+
+ buf -= block_h * buf_linesize + start_x * (ptrdiff_t)sizeof(pixel);
+ while (block_h--) {
+ pixel *bufp = (pixel *) buf;
+
+ // left
+ for(x = 0; x < start_x; x++) {
+ bufp[x] = bufp[start_x];
+ }
+
+ // right
+ for (x = end_x; x < block_w; x++) {
+ bufp[x] = bufp[end_x - 1];
+ }
+ buf += buf_linesize;
+ }
+}
diff --git a/media/ffvpx/libavcodec/vlc.c b/media/ffvpx/libavcodec/vlc.c
new file mode 100644
index 0000000000..96f2b28c7e
--- /dev/null
+++ b/media/ffvpx/libavcodec/vlc.c
@@ -0,0 +1,378 @@
+/*
+ * API for creating VLC trees
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2010 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/error.h"
+#include "libavutil/internal.h"
+#include "libavutil/log.h"
+#include "libavutil/macros.h"
+#include "libavutil/mem.h"
+#include "libavutil/qsort.h"
+#include "libavutil/reverse.h"
+#include "vlc.h"
+
+#define GET_DATA(v, table, i, wrap, size) \
+{ \
+ const uint8_t *ptr = (const uint8_t *)table + i * wrap; \
+ switch(size) { \
+ case 1: \
+ v = *(const uint8_t *)ptr; \
+ break; \
+ case 2: \
+ v = *(const uint16_t *)ptr; \
+ break; \
+ case 4: \
+ default: \
+ av_assert1(size == 4); \
+ v = *(const uint32_t *)ptr; \
+ break; \
+ } \
+}
+
+
+static int alloc_table(VLC *vlc, int size, int use_static)
+{
+ int index = vlc->table_size;
+
+ vlc->table_size += size;
+ if (vlc->table_size > vlc->table_allocated) {
+ if (use_static)
+ abort(); // cannot do anything, init_vlc() is used with too little memory
+ vlc->table_allocated += (1 << vlc->bits);
+ vlc->table = av_realloc_f(vlc->table, vlc->table_allocated, sizeof(*vlc->table));
+ if (!vlc->table) {
+ vlc->table_allocated = 0;
+ vlc->table_size = 0;
+ return AVERROR(ENOMEM);
+ }
+ memset(vlc->table + vlc->table_allocated - (1 << vlc->bits), 0, sizeof(*vlc->table) << vlc->bits);
+ }
+ return index;
+}
+
+#define LOCALBUF_ELEMS 1500 // the maximum currently needed is 1296 by rv34
+
+static av_always_inline uint32_t bitswap_32(uint32_t x)
+{
+ return (uint32_t)ff_reverse[ x & 0xFF] << 24 |
+ (uint32_t)ff_reverse[(x >> 8) & 0xFF] << 16 |
+ (uint32_t)ff_reverse[(x >> 16) & 0xFF] << 8 |
+ (uint32_t)ff_reverse[ x >> 24];
+}
+
+typedef struct VLCcode {
+ uint8_t bits;
+ VLCBaseType symbol;
+ /** codeword, with the first bit-to-be-read in the msb
+ * (even if intended for a little-endian bitstream reader) */
+ uint32_t code;
+} VLCcode;
+
+static int vlc_common_init(VLC *vlc, int nb_bits, int nb_codes,
+ VLCcode **buf, int flags)
+{
+ vlc->bits = nb_bits;
+ vlc->table_size = 0;
+ if (flags & INIT_VLC_USE_NEW_STATIC) {
+ av_assert0(nb_codes <= LOCALBUF_ELEMS);
+ } else {
+ vlc->table = NULL;
+ vlc->table_allocated = 0;
+ }
+ if (nb_codes > LOCALBUF_ELEMS) {
+ *buf = av_malloc_array(nb_codes, sizeof(VLCcode));
+ if (!*buf)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int compare_vlcspec(const void *a, const void *b)
+{
+ const VLCcode *sa = a, *sb = b;
+ return (sa->code >> 1) - (sb->code >> 1);
+}
+
+/**
+ * Build VLC decoding tables suitable for use with get_vlc().
+ *
+ * @param vlc the context to be initialized
+ *
+ * @param table_nb_bits max length of vlc codes to store directly in this table
+ * (Longer codes are delegated to subtables.)
+ *
+ * @param nb_codes number of elements in codes[]
+ *
+ * @param codes descriptions of the vlc codes
+ * These must be ordered such that codes going into the same subtable are contiguous.
+ * Sorting by VLCcode.code is sufficient, though not necessary.
+ */
+static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
+ VLCcode *codes, int flags)
+{
+ int table_size, table_index;
+ VLCElem *table;
+
+ if (table_nb_bits > 30)
+ return AVERROR(EINVAL);
+ table_size = 1 << table_nb_bits;
+ table_index = alloc_table(vlc, table_size, flags & INIT_VLC_USE_NEW_STATIC);
+ ff_dlog(NULL, "new table index=%d size=%d\n", table_index, table_size);
+ if (table_index < 0)
+ return table_index;
+ table = &vlc->table[table_index];
+
+ /* first pass: map codes and compute auxiliary table sizes */
+ for (int i = 0; i < nb_codes; i++) {
+ int n = codes[i].bits;
+ uint32_t code = codes[i].code;
+ int symbol = codes[i].symbol;
+ ff_dlog(NULL, "i=%d n=%d code=0x%"PRIx32"\n", i, n, code);
+ if (n <= table_nb_bits) {
+ /* no need to add another table */
+ int j = code >> (32 - table_nb_bits);
+ int nb = 1 << (table_nb_bits - n);
+ int inc = 1;
+
+ if (flags & INIT_VLC_OUTPUT_LE) {
+ j = bitswap_32(code);
+ inc = 1 << n;
+ }
+ for (int k = 0; k < nb; k++) {
+ int bits = table[j].len;
+ int oldsym = table[j].sym;
+ ff_dlog(NULL, "%4x: code=%d n=%d\n", j, i, n);
+ if ((bits || oldsym) && (bits != n || oldsym != symbol)) {
+ av_log(NULL, AV_LOG_ERROR, "incorrect codes\n");
+ return AVERROR_INVALIDDATA;
+ }
+ table[j].len = n;
+ table[j].sym = symbol;
+ j += inc;
+ }
+ } else {
+ /* fill auxiliary table recursively */
+ uint32_t code_prefix;
+ int index, subtable_bits, j, k;
+
+ n -= table_nb_bits;
+ code_prefix = code >> (32 - table_nb_bits);
+ subtable_bits = n;
+ codes[i].bits = n;
+ codes[i].code = code << table_nb_bits;
+ for (k = i + 1; k < nb_codes; k++) {
+ n = codes[k].bits - table_nb_bits;
+ if (n <= 0)
+ break;
+ code = codes[k].code;
+ if (code >> (32 - table_nb_bits) != code_prefix)
+ break;
+ codes[k].bits = n;
+ codes[k].code = code << table_nb_bits;
+ subtable_bits = FFMAX(subtable_bits, n);
+ }
+ subtable_bits = FFMIN(subtable_bits, table_nb_bits);
+ j = (flags & INIT_VLC_OUTPUT_LE) ? bitswap_32(code_prefix) >> (32 - table_nb_bits) : code_prefix;
+ table[j].len = -subtable_bits;
+ ff_dlog(NULL, "%4x: n=%d (subtable)\n",
+ j, codes[i].bits + table_nb_bits);
+ index = build_table(vlc, subtable_bits, k-i, codes+i, flags);
+ if (index < 0)
+ return index;
+ /* note: realloc has been done, so reload tables */
+ table = &vlc->table[table_index];
+ table[j].sym = index;
+ if (table[j].sym != index) {
+ avpriv_request_sample(NULL, "strange codes");
+ return AVERROR_PATCHWELCOME;
+ }
+ i = k-1;
+ }
+ }
+
+ for (int i = 0; i < table_size; i++) {
+ if (table[i].len == 0)
+ table[i].sym = -1;
+ }
+
+ return table_index;
+}
+
+static int vlc_common_end(VLC *vlc, int nb_bits, int nb_codes, VLCcode *codes,
+ int flags, VLCcode localbuf[LOCALBUF_ELEMS])
+{
+ int ret = build_table(vlc, nb_bits, nb_codes, codes, flags);
+
+ if (flags & INIT_VLC_USE_NEW_STATIC) {
+ if (vlc->table_size != vlc->table_allocated &&
+ !(flags & (INIT_VLC_STATIC_OVERLONG & ~INIT_VLC_USE_NEW_STATIC)))
+ av_log(NULL, AV_LOG_ERROR, "needed %d had %d\n", vlc->table_size, vlc->table_allocated);
+ av_assert0(ret >= 0);
+ } else {
+ if (codes != localbuf)
+ av_free(codes);
+ if (ret < 0) {
+ av_freep(&vlc->table);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/* Build VLC decoding tables suitable for use with get_vlc().
+
+ 'nb_bits' sets the decoding table size (2^nb_bits) entries. The
+ bigger it is, the faster is the decoding. But it should not be too
+ big to save memory and L1 cache. '9' is a good compromise.
+
+ 'nb_codes' : number of vlcs codes
+
+ 'bits' : table which gives the size (in bits) of each vlc code.
+
+ 'codes' : table which gives the bit pattern of of each vlc code.
+
+ 'symbols' : table which gives the values to be returned from get_vlc().
+
+ 'xxx_wrap' : give the number of bytes between each entry of the
+ 'bits' or 'codes' tables.
+
+ 'xxx_size' : gives the number of bytes of each entry of the 'bits'
+ or 'codes' tables. Currently 1,2 and 4 are supported.
+
+ 'wrap' and 'size' make it possible to use any memory configuration and types
+ (byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
+*/
+int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
+ const void *bits, int bits_wrap, int bits_size,
+ const void *codes, int codes_wrap, int codes_size,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int flags)
+{
+ VLCcode localbuf[LOCALBUF_ELEMS], *buf = localbuf;
+ int j, ret;
+
+ ret = vlc_common_init(vlc, nb_bits, nb_codes, &buf, flags);
+ if (ret < 0)
+ return ret;
+
+ av_assert0(symbols_size <= 2 || !symbols);
+ j = 0;
+#define COPY(condition)\
+ for (int i = 0; i < nb_codes; i++) { \
+ unsigned len; \
+ GET_DATA(len, bits, i, bits_wrap, bits_size); \
+ if (!(condition)) \
+ continue; \
+ if (len > 3*nb_bits || len > 32) { \
+ av_log(NULL, AV_LOG_ERROR, "Too long VLC (%u) in init_vlc\n", len);\
+ if (buf != localbuf) \
+ av_free(buf); \
+ return AVERROR(EINVAL); \
+ } \
+ buf[j].bits = len; \
+ GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size); \
+ if (buf[j].code >= (1LL<<buf[j].bits)) { \
+ av_log(NULL, AV_LOG_ERROR, "Invalid code %"PRIx32" for %d in " \
+ "init_vlc\n", buf[j].code, i); \
+ if (buf != localbuf) \
+ av_free(buf); \
+ return AVERROR(EINVAL); \
+ } \
+ if (flags & INIT_VLC_INPUT_LE) \
+ buf[j].code = bitswap_32(buf[j].code); \
+ else \
+ buf[j].code <<= 32 - buf[j].bits; \
+ if (symbols) \
+ GET_DATA(buf[j].symbol, symbols, i, symbols_wrap, symbols_size) \
+ else \
+ buf[j].symbol = i; \
+ j++; \
+ }
+ COPY(len > nb_bits);
+ // qsort is the slowest part of init_vlc, and could probably be improved or avoided
+ AV_QSORT(buf, j, struct VLCcode, compare_vlcspec);
+ COPY(len && len <= nb_bits);
+ nb_codes = j;
+
+ return vlc_common_end(vlc, nb_bits, nb_codes, buf,
+ flags, localbuf);
+}
+
+int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes,
+ const int8_t *lens, int lens_wrap,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int offset, int flags, void *logctx)
+{
+ VLCcode localbuf[LOCALBUF_ELEMS], *buf = localbuf;
+ uint64_t code;
+ int ret, j, len_max = FFMIN(32, 3 * nb_bits);
+
+ ret = vlc_common_init(vlc, nb_bits, nb_codes, &buf, flags);
+ if (ret < 0)
+ return ret;
+
+ j = code = 0;
+ for (int i = 0; i < nb_codes; i++, lens += lens_wrap) {
+ int len = *lens;
+ if (len > 0) {
+ unsigned sym;
+
+ buf[j].bits = len;
+ if (symbols)
+ GET_DATA(sym, symbols, i, symbols_wrap, symbols_size)
+ else
+ sym = i;
+ buf[j].symbol = sym + offset;
+ buf[j++].code = code;
+ } else if (len < 0) {
+ len = -len;
+ } else
+ continue;
+ if (len > len_max || code & ((1U << (32 - len)) - 1)) {
+ av_log(logctx, AV_LOG_ERROR, "Invalid VLC (length %u)\n", len);
+ goto fail;
+ }
+ code += 1U << (32 - len);
+ if (code > UINT32_MAX + 1ULL) {
+ av_log(logctx, AV_LOG_ERROR, "Overdetermined VLC tree\n");
+ goto fail;
+ }
+ }
+ return vlc_common_end(vlc, nb_bits, j, buf, flags, localbuf);
+fail:
+ if (buf != localbuf)
+ av_free(buf);
+ return AVERROR_INVALIDDATA;
+}
+
+void ff_free_vlc(VLC *vlc)
+{
+ av_freep(&vlc->table);
+}
diff --git a/media/ffvpx/libavcodec/vlc.h b/media/ffvpx/libavcodec/vlc.h
new file mode 100644
index 0000000000..e63c484755
--- /dev/null
+++ b/media/ffvpx/libavcodec/vlc.h
@@ -0,0 +1,144 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VLC_H
+#define AVCODEC_VLC_H
+
+#include <stdint.h>
+
+// When changing this, be sure to also update tableprint_vlc.h accordingly.
+typedef int16_t VLCBaseType;
+
+typedef struct VLCElem {
+ VLCBaseType sym, len;
+} VLCElem;
+
+typedef struct VLC {
+ int bits;
+ VLCElem *table;
+ int table_size, table_allocated;
+} VLC;
+
+typedef struct RL_VLC_ELEM {
+ int16_t level;
+ int8_t len;
+ uint8_t run;
+} RL_VLC_ELEM;
+
+#define init_vlc(vlc, nb_bits, nb_codes, \
+ bits, bits_wrap, bits_size, \
+ codes, codes_wrap, codes_size, \
+ flags) \
+ ff_init_vlc_sparse(vlc, nb_bits, nb_codes, \
+ bits, bits_wrap, bits_size, \
+ codes, codes_wrap, codes_size, \
+ NULL, 0, 0, flags)
+
+int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
+ const void *bits, int bits_wrap, int bits_size,
+ const void *codes, int codes_wrap, int codes_size,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int flags);
+
+/**
+ * Build VLC decoding tables suitable for use with get_vlc2()
+ *
+ * This function takes lengths and symbols and calculates the codes from them.
+ * For this the input lengths and symbols have to be sorted according to "left
+ * nodes in the corresponding tree first".
+ *
+ * @param[in,out] vlc The VLC to be initialized; table and table_allocated
+ * must have been set when initializing a static VLC,
+ * otherwise this will be treated as uninitialized.
+ * @param[in] nb_bits The number of bits to use for the VLC table;
+ * higher values take up more memory and cache, but
+ * allow to read codes with fewer reads.
+ * @param[in] nb_codes The number of provided length and (if supplied) symbol
+ * entries.
+ * @param[in] lens The lengths of the codes. Entries > 0 correspond to
+ * valid codes; entries == 0 will be skipped and entries
+ * with len < 0 indicate that the tree is incomplete and
+ * has an open end of length -len at this position.
+ * @param[in] lens_wrap Stride (in bytes) of the lengths.
+ * @param[in] symbols The symbols, i.e. what is returned from get_vlc2()
+ * when the corresponding code is encountered.
+ * May be NULL, then 0, 1, 2, 3, 4,... will be used.
+ * @param[in] symbols_wrap Stride (in bytes) of the symbols.
+ * @param[in] symbols_size Size of the symbols. 1 and 2 are supported.
+ * @param[in] offset An offset to apply to all the valid symbols.
+ * @param[in] flags A combination of the INIT_VLC_* flags; notice that
+ * INIT_VLC_INPUT_LE is pointless and ignored.
+ */
+int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes,
+ const int8_t *lens, int lens_wrap,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int offset, int flags, void *logctx);
+
+void ff_free_vlc(VLC *vlc);
+
+/* If INIT_VLC_INPUT_LE is set, the LSB bit of the codes used to
+ * initialize the VLC table is the first bit to be read. */
+#define INIT_VLC_INPUT_LE 2
+/* If set the VLC is intended for a little endian bitstream reader. */
+#define INIT_VLC_OUTPUT_LE 8
+#define INIT_VLC_LE (INIT_VLC_INPUT_LE | INIT_VLC_OUTPUT_LE)
+#define INIT_VLC_USE_NEW_STATIC 4
+#define INIT_VLC_STATIC_OVERLONG (1 | INIT_VLC_USE_NEW_STATIC)
+
+#define INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ h, i, j, flags, static_size) \
+ do { \
+ static VLCElem table[static_size]; \
+ (vlc)->table = table; \
+ (vlc)->table_allocated = static_size; \
+ ff_init_vlc_sparse(vlc, bits, a, b, c, d, e, f, g, h, i, j, \
+ flags | INIT_VLC_USE_NEW_STATIC); \
+ } while (0)
+
+#define INIT_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size) \
+ INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ h, i, j, 0, static_size)
+
+#define INIT_LE_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size) \
+ INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ h, i, j, INIT_VLC_LE, static_size)
+
+#define INIT_CUSTOM_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, flags, static_size) \
+ INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ NULL, 0, 0, flags, static_size)
+
+#define INIT_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size) \
+ INIT_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, NULL, 0, 0, static_size)
+
+#define INIT_LE_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size) \
+ INIT_LE_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, NULL, 0, 0, static_size)
+
+#define INIT_VLC_STATIC_FROM_LENGTHS(vlc, bits, nb_codes, lens, len_wrap, \
+ symbols, symbols_wrap, symbols_size, \
+ offset, flags, static_size) \
+ do { \
+ static VLCElem table[static_size]; \
+ (vlc)->table = table; \
+ (vlc)->table_allocated = static_size; \
+ ff_init_vlc_from_lengths(vlc, bits, nb_codes, lens, len_wrap, \
+ symbols, symbols_wrap, symbols_size, \
+ offset, flags | INIT_VLC_USE_NEW_STATIC, \
+ NULL); \
+ } while (0)
+
+#endif /* AVCODEC_VLC_H */
diff --git a/media/ffvpx/libavcodec/vorbis_parser.c b/media/ffvpx/libavcodec/vorbis_parser.c
new file mode 100644
index 0000000000..a7d15d4ce9
--- /dev/null
+++ b/media/ffvpx/libavcodec/vorbis_parser.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2012 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Vorbis audio parser
+ *
+ * Determines the duration for each packet.
+ */
+
+#include "config_components.h"
+
+#include "libavutil/log.h"
+
+#include "get_bits.h"
+#include "parser.h"
+#include "xiph.h"
+#include "vorbis_parser_internal.h"
+
+static const AVClass vorbis_parser_class = {
+ .class_name = "Vorbis parser",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static int parse_id_header(AVVorbisParseContext *s,
+ const uint8_t *buf, int buf_size)
+{
+ /* Id header should be 30 bytes */
+ if (buf_size < 30) {
+ av_log(s, AV_LOG_ERROR, "Id header is too short\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* make sure this is the Id header */
+ if (buf[0] != 1) {
+ av_log(s, AV_LOG_ERROR, "Wrong packet type in Id header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* check for header signature */
+ if (memcmp(&buf[1], "vorbis", 6)) {
+ av_log(s, AV_LOG_ERROR, "Invalid packet signature in Id header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (!(buf[29] & 0x1)) {
+ av_log(s, AV_LOG_ERROR, "Invalid framing bit in Id header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->blocksize[0] = 1 << (buf[28] & 0xF);
+ s->blocksize[1] = 1 << (buf[28] >> 4);
+
+ return 0;
+}
+
+static int parse_setup_header(AVVorbisParseContext *s,
+ const uint8_t *buf, int buf_size)
+{
+ GetBitContext gb, gb0;
+ uint8_t *rev_buf;
+ int i, ret = 0;
+ int got_framing_bit, mode_count, got_mode_header, last_mode_count = 0;
+
+ /* avoid overread */
+ if (buf_size < 7) {
+ av_log(s, AV_LOG_ERROR, "Setup header is too short\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* make sure this is the Setup header */
+ if (buf[0] != 5) {
+ av_log(s, AV_LOG_ERROR, "Wrong packet type in Setup header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* check for header signature */
+ if (memcmp(&buf[1], "vorbis", 6)) {
+ av_log(s, AV_LOG_ERROR, "Invalid packet signature in Setup header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* reverse bytes so we can easily read backwards with get_bits() */
+ if (!(rev_buf = av_malloc(buf_size))) {
+ av_log(s, AV_LOG_ERROR, "Out of memory\n");
+ return AVERROR(ENOMEM);
+ }
+ for (i = 0; i < buf_size; i++)
+ rev_buf[i] = buf[buf_size - 1 - i];
+ init_get_bits(&gb, rev_buf, buf_size * 8);
+
+ got_framing_bit = 0;
+ while (get_bits_left(&gb) > 97) {
+ if (get_bits1(&gb)) {
+ got_framing_bit = get_bits_count(&gb);
+ break;
+ }
+ }
+ if (!got_framing_bit) {
+ av_log(s, AV_LOG_ERROR, "Invalid Setup header\n");
+ ret = AVERROR_INVALIDDATA;
+ goto bad_header;
+ }
+
+ /* Now we search backwards to find possible valid mode counts. This is not
+ * fool-proof because we could have false positive matches and read too
+ * far, but there isn't really any way to be sure without parsing through
+ * all the many variable-sized fields before the modes. This approach seems
+ * to work well in testing, and it is similar to how it is handled in
+ * liboggz. */
+ mode_count = 0;
+ got_mode_header = 0;
+ while (get_bits_left(&gb) >= 97) {
+ if (get_bits(&gb, 8) > 63 || get_bits(&gb, 16) || get_bits(&gb, 16))
+ break;
+ skip_bits(&gb, 1);
+ mode_count++;
+ if (mode_count > 64)
+ break;
+ gb0 = gb;
+ if (get_bits(&gb0, 6) + 1 == mode_count) {
+ got_mode_header = 1;
+ last_mode_count = mode_count;
+ }
+ }
+ if (!got_mode_header) {
+ av_log(s, AV_LOG_ERROR, "Invalid Setup header\n");
+ ret = AVERROR_INVALIDDATA;
+ goto bad_header;
+ }
+ /* All samples I've seen use <= 2 modes, so ask for a sample if we find
+ * more than that, as it is most likely a false positive. If we get any
+ * we may need to approach this the long way and parse the whole Setup
+ * header, but I hope very much that it never comes to that. */
+ if (last_mode_count > 2) {
+ avpriv_request_sample(s,
+ "%d modes (either a false positive or a "
+ "sample from an unknown encoder)",
+ last_mode_count);
+ }
+ /* We're limiting the mode count to 63 so that we know that the previous
+ * block flag will be in the first packet byte. */
+ if (last_mode_count > 63) {
+ av_log(s, AV_LOG_ERROR, "Unsupported mode count: %d\n",
+ last_mode_count);
+ ret = AVERROR_INVALIDDATA;
+ goto bad_header;
+ }
+ s->mode_count = mode_count = last_mode_count;
+ /* Determine the number of bits required to code the mode and turn that
+ * into a bitmask to directly access the mode from the first frame byte. */
+ s->mode_mask = ((1 << (av_log2(mode_count - 1) + 1)) - 1) << 1;
+ /* The previous window flag is the next bit after the mode */
+ s->prev_mask = (s->mode_mask | 0x1) + 1;
+
+ init_get_bits(&gb, rev_buf, buf_size * 8);
+ skip_bits_long(&gb, got_framing_bit);
+ for (i = mode_count - 1; i >= 0; i--) {
+ skip_bits_long(&gb, 40);
+ s->mode_blocksize[i] = get_bits1(&gb);
+ }
+
+bad_header:
+ av_free(rev_buf);
+ return ret;
+}
+
+static int vorbis_parse_init(AVVorbisParseContext *s,
+ const uint8_t *extradata, int extradata_size)
+{
+ const uint8_t *header_start[3];
+ int header_len[3];
+ int ret;
+
+ s->class = &vorbis_parser_class;
+ s->extradata_parsed = 1;
+
+ if ((ret = avpriv_split_xiph_headers(extradata,
+ extradata_size, 30,
+ header_start, header_len)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Extradata corrupt.\n");
+ return ret;
+ }
+
+ if ((ret = parse_id_header(s, header_start[0], header_len[0])) < 0)
+ return ret;
+
+ if ((ret = parse_setup_header(s, header_start[2], header_len[2])) < 0)
+ return ret;
+
+ s->valid_extradata = 1;
+ s->previous_blocksize = s->blocksize[s->mode_blocksize[0]];
+
+ return 0;
+}
+
+int av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf,
+ int buf_size, int *flags)
+{
+ int duration = 0;
+
+ if (s->valid_extradata && buf_size > 0) {
+ int mode, current_blocksize;
+ int previous_blocksize = s->previous_blocksize;
+
+ if (buf[0] & 1) {
+ /* If the user doesn't care about special packets, it's a bad one. */
+ if (!flags)
+ goto bad_packet;
+
+ /* Set the flag for which kind of special packet it is. */
+ if (buf[0] == 1)
+ *flags |= VORBIS_FLAG_HEADER;
+ else if (buf[0] == 3)
+ *flags |= VORBIS_FLAG_COMMENT;
+ else if (buf[0] == 5)
+ *flags |= VORBIS_FLAG_SETUP;
+ else
+ goto bad_packet;
+
+ /* Special packets have no duration. */
+ return 0;
+
+bad_packet:
+ av_log(s, AV_LOG_ERROR, "Invalid packet\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (s->mode_count == 1)
+ mode = 0;
+ else
+ mode = (buf[0] & s->mode_mask) >> 1;
+ if (mode >= s->mode_count) {
+ av_log(s, AV_LOG_ERROR, "Invalid mode in packet\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if(s->mode_blocksize[mode]){
+ int flag = !!(buf[0] & s->prev_mask);
+ previous_blocksize = s->blocksize[flag];
+ }
+ current_blocksize = s->blocksize[s->mode_blocksize[mode]];
+ duration = (previous_blocksize + current_blocksize) >> 2;
+ s->previous_blocksize = current_blocksize;
+ }
+
+ return duration;
+}
+
+int av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf,
+ int buf_size)
+{
+ return av_vorbis_parse_frame_flags(s, buf, buf_size, NULL);
+}
+
+void av_vorbis_parse_reset(AVVorbisParseContext *s)
+{
+ if (s->valid_extradata)
+ s->previous_blocksize = s->blocksize[0];
+}
+
+void av_vorbis_parse_free(AVVorbisParseContext **s)
+{
+ av_freep(s);
+}
+
+AVVorbisParseContext *av_vorbis_parse_init(const uint8_t *extradata,
+ int extradata_size)
+{
+ AVVorbisParseContext *s = av_mallocz(sizeof(*s));
+ int ret;
+
+ if (!s)
+ return NULL;
+
+ ret = vorbis_parse_init(s, extradata, extradata_size);
+ if (ret < 0) {
+ av_vorbis_parse_free(&s);
+ return NULL;
+ }
+
+ return s;
+}
+
+#if CONFIG_VORBIS_PARSER
+
+typedef struct VorbisParseContext {
+ AVVorbisParseContext *vp;
+} VorbisParseContext;
+
+static int vorbis_parse(AVCodecParserContext *s1, AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ VorbisParseContext *s = s1->priv_data;
+ int duration;
+
+ if (!s->vp && avctx->extradata && avctx->extradata_size) {
+ s->vp = av_vorbis_parse_init(avctx->extradata, avctx->extradata_size);
+ }
+ if (!s->vp)
+ goto end;
+
+ if ((duration = av_vorbis_parse_frame(s->vp, buf, buf_size)) >= 0)
+ s1->duration = duration;
+
+end:
+ /* always return the full packet. this parser isn't doing any splitting or
+ combining, only packet analysis */
+ *poutbuf = buf;
+ *poutbuf_size = buf_size;
+ return buf_size;
+}
+
+static void vorbis_parser_close(AVCodecParserContext *ctx)
+{
+ VorbisParseContext *s = ctx->priv_data;
+ av_vorbis_parse_free(&s->vp);
+}
+
+const AVCodecParser ff_vorbis_parser = {
+ .codec_ids = { AV_CODEC_ID_VORBIS },
+ .priv_data_size = sizeof(VorbisParseContext),
+ .parser_parse = vorbis_parse,
+ .parser_close = vorbis_parser_close,
+};
+#endif /* CONFIG_VORBIS_PARSER */
diff --git a/media/ffvpx/libavcodec/vorbis_parser.h b/media/ffvpx/libavcodec/vorbis_parser.h
new file mode 100644
index 0000000000..789932ac49
--- /dev/null
+++ b/media/ffvpx/libavcodec/vorbis_parser.h
@@ -0,0 +1,74 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * A public API for Vorbis parsing
+ *
+ * Determines the duration for each packet.
+ */
+
+#ifndef AVCODEC_VORBIS_PARSER_H
+#define AVCODEC_VORBIS_PARSER_H
+
+#include <stdint.h>
+
+typedef struct AVVorbisParseContext AVVorbisParseContext;
+
+/**
+ * Allocate and initialize the Vorbis parser using headers in the extradata.
+ */
+AVVorbisParseContext *av_vorbis_parse_init(const uint8_t *extradata,
+ int extradata_size);
+
+/**
+ * Free the parser and everything associated with it.
+ */
+void av_vorbis_parse_free(AVVorbisParseContext **s);
+
+#define VORBIS_FLAG_HEADER 0x00000001
+#define VORBIS_FLAG_COMMENT 0x00000002
+#define VORBIS_FLAG_SETUP 0x00000004
+
+/**
+ * Get the duration for a Vorbis packet.
+ *
+ * If @p flags is @c NULL,
+ * special frames are considered invalid.
+ *
+ * @param s Vorbis parser context
+ * @param buf buffer containing a Vorbis frame
+ * @param buf_size size of the buffer
+ * @param flags flags for special frames
+ */
+int av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf,
+ int buf_size, int *flags);
+
+/**
+ * Get the duration for a Vorbis packet.
+ *
+ * @param s Vorbis parser context
+ * @param buf buffer containing a Vorbis frame
+ * @param buf_size size of the buffer
+ */
+int av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf,
+ int buf_size);
+
+void av_vorbis_parse_reset(AVVorbisParseContext *s);
+
+#endif /* AVCODEC_VORBIS_PARSER_H */
diff --git a/media/ffvpx/libavcodec/vorbis_parser_internal.h b/media/ffvpx/libavcodec/vorbis_parser_internal.h
new file mode 100644
index 0000000000..691a842385
--- /dev/null
+++ b/media/ffvpx/libavcodec/vorbis_parser_internal.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2012 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Vorbis audio parser
+ *
+ * Determines the duration for each packet.
+ */
+
+#ifndef AVCODEC_VORBIS_PARSER_INTERNAL_H
+#define AVCODEC_VORBIS_PARSER_INTERNAL_H
+
+#include "avcodec.h"
+#include "vorbis_parser.h"
+
+struct AVVorbisParseContext {
+ const AVClass *class;
+ int extradata_parsed; ///< we have attempted to parse extradata
+ int valid_extradata; ///< extradata is valid, so we can calculate duration
+ int blocksize[2]; ///< short and long window sizes
+ int previous_blocksize; ///< previous window size
+ int mode_blocksize[64]; ///< window size mapping for each mode
+ int mode_count; ///< number of modes
+ int mode_mask; ///< bitmask used to get the mode in each packet
+ int prev_mask; ///< bitmask used to get the previous mode flag in each packet
+};
+
+#endif /* AVCODEC_VORBIS_PARSER_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/vp3dsp.h b/media/ffvpx/libavcodec/vp3dsp.h
new file mode 100644
index 0000000000..3b849ec05d
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp3dsp.h
@@ -0,0 +1,64 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP3DSP_H
+#define AVCODEC_VP3DSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef struct VP3DSPContext {
+ /**
+ * Copy 8xH pixels from source to destination buffer using a bilinear
+ * filter with no rounding (i.e. *dst = (*a + *b) >> 1).
+ *
+ * @param dst destination buffer, aligned by 8
+ * @param a first source buffer, no alignment
+ * @param b second source buffer, no alignment
+ * @param stride distance between two lines in source/dest buffers
+ * @param h height
+ */
+ void (*put_no_rnd_pixels_l2)(uint8_t *dst,
+ const uint8_t *a,
+ const uint8_t *b,
+ ptrdiff_t stride, int h);
+
+ void (*idct_put)(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+ void (*idct_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+ void (*idct_dc_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+ void (*v_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values);
+ void (*h_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values);
+ void (*v_loop_filter_unaligned)(uint8_t *src, ptrdiff_t stride, int *bounding_values);
+ void (*h_loop_filter_unaligned)(uint8_t *src, ptrdiff_t stride, int *bounding_values);
+} VP3DSPContext;
+
+void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values);
+void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values);
+
+void ff_vp3dsp_idct10_put(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vp3dsp_idct10_add(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+
+void ff_vp3dsp_init(VP3DSPContext *c, int flags);
+void ff_vp3dsp_init_arm(VP3DSPContext *c, int flags);
+void ff_vp3dsp_init_ppc(VP3DSPContext *c, int flags);
+void ff_vp3dsp_init_x86(VP3DSPContext *c, int flags);
+void ff_vp3dsp_init_mips(VP3DSPContext *c, int flags);
+
+void ff_vp3dsp_set_bounding_values(int * bound_values_array, int filter_limit);
+
+#endif /* AVCODEC_VP3DSP_H */
diff --git a/media/ffvpx/libavcodec/vp56.h b/media/ffvpx/libavcodec/vp56.h
new file mode 100644
index 0000000000..9dc0b9c7ad
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp56.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * VP5 and VP6 compatible video decoder (common features)
+ */
+
+#ifndef AVCODEC_VP56_H
+#define AVCODEC_VP56_H
+
+#include "libavutil/mem_internal.h"
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "hpeldsp.h"
+#include "h264chroma.h"
+#include "videodsp.h"
+#include "vp3dsp.h"
+#include "vp56dsp.h"
+#include "vpx_rac.h"
+
+typedef struct vp56_context VP56Context;
+
+typedef enum {
+ VP56_FRAME_NONE =-1,
+ VP56_FRAME_CURRENT = 0,
+ VP56_FRAME_PREVIOUS = 1,
+ VP56_FRAME_GOLDEN = 2,
+} VP56Frame;
+
+typedef enum {
+ VP56_MB_INTER_NOVEC_PF = 0, /**< Inter MB, no vector, from previous frame */
+ VP56_MB_INTRA = 1, /**< Intra MB */
+ VP56_MB_INTER_DELTA_PF = 2, /**< Inter MB, above/left vector + delta, from previous frame */
+ VP56_MB_INTER_V1_PF = 3, /**< Inter MB, first vector, from previous frame */
+ VP56_MB_INTER_V2_PF = 4, /**< Inter MB, second vector, from previous frame */
+ VP56_MB_INTER_NOVEC_GF = 5, /**< Inter MB, no vector, from golden frame */
+ VP56_MB_INTER_DELTA_GF = 6, /**< Inter MB, above/left vector + delta, from golden frame */
+ VP56_MB_INTER_4V = 7, /**< Inter MB, 4 vectors, from previous frame */
+ VP56_MB_INTER_V1_GF = 8, /**< Inter MB, first vector, from golden frame */
+ VP56_MB_INTER_V2_GF = 9, /**< Inter MB, second vector, from golden frame */
+} VP56mb;
+
+typedef struct VP56Tree {
+ int8_t val;
+ int8_t prob_idx;
+} VP56Tree;
+
+typedef struct VP56mv {
+ DECLARE_ALIGNED(4, int16_t, x);
+ int16_t y;
+} VP56mv;
+
+#define VP56_SIZE_CHANGE 1
+
+typedef void (*VP56ParseVectorAdjustment)(VP56Context *s,
+ VP56mv *vect);
+typedef void (*VP56Filter)(VP56Context *s, uint8_t *dst, uint8_t *src,
+ int offset1, int offset2, ptrdiff_t stride,
+ VP56mv mv, int mask, int select, int luma);
+typedef int (*VP56ParseCoeff)(VP56Context *s);
+typedef void (*VP56DefaultModelsInit)(VP56Context *s);
+typedef void (*VP56ParseVectorModels)(VP56Context *s);
+typedef int (*VP56ParseCoeffModels)(VP56Context *s);
+typedef int (*VP56ParseHeader)(VP56Context *s, const uint8_t *buf,
+ int buf_size);
+
+typedef struct VP56RefDc {
+ uint8_t not_null_dc;
+ VP56Frame ref_frame;
+ int16_t dc_coeff;
+} VP56RefDc;
+
+typedef struct VP56Macroblock {
+ uint8_t type;
+ VP56mv mv;
+} VP56Macroblock;
+
+typedef struct VP56Model {
+ uint8_t coeff_reorder[64]; /* used in vp6 only */
+ uint8_t coeff_index_to_pos[64]; /* used in vp6 only */
+ uint8_t coeff_index_to_idct_selector[64]; /* used in vp6 only */
+ uint8_t vector_sig[2]; /* delta sign */
+ uint8_t vector_dct[2]; /* delta coding types */
+ uint8_t vector_pdi[2][2]; /* predefined delta init */
+ uint8_t vector_pdv[2][7]; /* predefined delta values */
+ uint8_t vector_fdv[2][8]; /* 8 bit delta value definition */
+ uint8_t coeff_dccv[2][11]; /* DC coeff value */
+ uint8_t coeff_ract[2][3][6][11]; /* Run/AC coding type and AC coeff value */
+ uint8_t coeff_acct[2][3][3][6][5];/* vp5 only AC coding type for coding group < 3 */
+ uint8_t coeff_dcct[2][36][5]; /* DC coeff coding type */
+ uint8_t coeff_runv[2][14]; /* run value (vp6 only) */
+ uint8_t mb_type[3][10][10]; /* model for decoding MB type */
+ uint8_t mb_types_stats[3][10][2];/* contextual, next MB type stats */
+} VP56Model;
+
+struct vp56_context {
+ AVCodecContext *avctx;
+ H264ChromaContext h264chroma;
+ HpelDSPContext hdsp;
+ VideoDSPContext vdsp;
+ VP3DSPContext vp3dsp;
+ VP56DSPContext vp56dsp;
+ uint8_t idct_scantable[64];
+ AVFrame *frames[4];
+ uint8_t *edge_emu_buffer_alloc;
+ uint8_t *edge_emu_buffer;
+ VPXRangeCoder c;
+ VPXRangeCoder cc;
+ VPXRangeCoder *ccp;
+ int sub_version;
+
+ /* frame info */
+ int golden_frame;
+ int plane_width[4];
+ int plane_height[4];
+ int mb_width; /* number of horizontal MB */
+ int mb_height; /* number of vertical MB */
+ int block_offset[6];
+
+ int quantizer;
+ uint16_t dequant_dc;
+ uint16_t dequant_ac;
+
+ /* DC predictors management */
+ VP56RefDc *above_blocks;
+ VP56RefDc left_block[4];
+ int above_block_idx[6];
+ int16_t prev_dc[3][3]; /* [plan][ref_frame] */
+
+ /* blocks / macroblock */
+ VP56mb mb_type;
+ VP56Macroblock *macroblocks;
+ DECLARE_ALIGNED(16, int16_t, block_coeff)[6][64];
+ int idct_selector[6];
+
+ /* motion vectors */
+ VP56mv mv[6]; /* vectors for each block in MB */
+ VP56mv vector_candidate[2];
+ int vector_candidate_pos;
+
+ /* filtering hints */
+ int filter_header; /* used in vp6 only */
+ int deblock_filtering;
+ int filter_selection;
+ int filter_mode;
+ int max_vector_length;
+ int sample_variance_threshold;
+ DECLARE_ALIGNED(8, int, bounding_values_array)[256];
+
+ uint8_t coeff_ctx[4][64]; /* used in vp5 only */
+ uint8_t coeff_ctx_last[4]; /* used in vp5 only */
+
+ int has_alpha;
+
+ /* upside-down flipping hints */
+ int flip; /* are we flipping ? */
+ int frbi; /* first row block index in MB */
+ int srbi; /* second row block index in MB */
+ ptrdiff_t stride[4]; /* stride for each plan */
+
+ const uint8_t *vp56_coord_div;
+ VP56ParseVectorAdjustment parse_vector_adjustment;
+ VP56Filter filter;
+ VP56ParseCoeff parse_coeff;
+ VP56DefaultModelsInit default_models_init;
+ VP56ParseVectorModels parse_vector_models;
+ VP56ParseCoeffModels parse_coeff_models;
+ VP56ParseHeader parse_header;
+
+ /* for "slice" parallelism between YUV and A */
+ VP56Context *alpha_context;
+
+ VP56Model *modelp;
+ VP56Model model;
+
+ /* huffman decoding */
+ int use_huffman;
+ GetBitContext gb;
+ VLC dccv_vlc[2];
+ VLC runv_vlc[2];
+ VLC ract_vlc[2][3][6];
+ unsigned int nb_null[2][2]; /* number of consecutive NULL DC/AC */
+
+ int have_undamaged_frame;
+ int discard_frame;
+};
+
+
+/**
+ * Initializes an VP56Context. Expects its caller to clean up
+ * in case of error.
+ */
+int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
+ int flip, int has_alpha);
+int ff_vp56_free_context(VP56Context *s);
+void ff_vp56_init_dequant(VP56Context *s, int quantizer);
+int ff_vp56_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *avpkt);
+
+
+/**
+ * vp56 specific range coder implementation
+ */
+
+static int vp56_rac_gets(VPXRangeCoder *c, int bits)
+{
+ int value = 0;
+
+ while (bits--) {
+ value = (value << 1) | vpx_rac_get(c);
+ }
+
+ return value;
+}
+
+// P(7)
+static av_unused int vp56_rac_gets_nn(VPXRangeCoder *c, int bits)
+{
+ int v = vp56_rac_gets(c, 7) << 1;
+ return v + !v;
+}
+
+static av_always_inline
+int vp56_rac_get_tree(VPXRangeCoder *c,
+ const VP56Tree *tree,
+ const uint8_t *probs)
+{
+ while (tree->val > 0) {
+ if (vpx_rac_get_prob_branchy(c, probs[tree->prob_idx]))
+ tree += tree->val;
+ else
+ tree++;
+ }
+ return -tree->val;
+}
+
+#endif /* AVCODEC_VP56_H */
diff --git a/media/ffvpx/libavcodec/vp56dsp.h b/media/ffvpx/libavcodec/vp56dsp.h
new file mode 100644
index 0000000000..e35e232ea3
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp56dsp.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP56DSP_H
+#define AVCODEC_VP56DSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef struct VP56DSPContext {
+ void (*edge_filter_hor)(uint8_t *yuv, ptrdiff_t stride, int t);
+ void (*edge_filter_ver)(uint8_t *yuv, ptrdiff_t stride, int t);
+
+ void (*vp6_filter_diag4)(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
+ const int16_t *h_weights,const int16_t *v_weights);
+} VP56DSPContext;
+
+void ff_vp6_filter_diag4_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
+ const int16_t *h_weights, const int16_t *v_weights);
+
+void ff_vp5dsp_init(VP56DSPContext *s);
+void ff_vp6dsp_init(VP56DSPContext *s);
+
+void ff_vp6dsp_init_arm(VP56DSPContext *s);
+void ff_vp6dsp_init_x86(VP56DSPContext *s);
+
+#endif /* AVCODEC_VP56DSP_H */
diff --git a/media/ffvpx/libavcodec/vp8.c b/media/ffvpx/libavcodec/vp8.c
new file mode 100644
index 0000000000..db2419deaf
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp8.c
@@ -0,0 +1,3014 @@
+/*
+ * VP7/VP8 compatible video decoder
+ *
+ * Copyright (C) 2010 David Conrad
+ * Copyright (C) 2010 Ronald S. Bultje
+ * Copyright (C) 2010 Fiona Glaser
+ * Copyright (C) 2012 Daniel Kang
+ * Copyright (C) 2014 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config_components.h"
+
+#include "libavutil/mem_internal.h"
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "hwconfig.h"
+#include "mathops.h"
+#include "thread.h"
+#include "threadframe.h"
+#include "vp8.h"
+#include "vp89_rac.h"
+#include "vp8data.h"
+#include "vpx_rac.h"
+
+#if ARCH_ARM
+# include "arm/vp8.h"
+#endif
+
+// fixme: add 1 bit to all the calls to this?
+static int vp8_rac_get_sint(VPXRangeCoder *c, int bits)
+{
+ int v;
+
+ if (!vp89_rac_get(c))
+ return 0;
+
+ v = vp89_rac_get_uint(c, bits);
+
+ if (vp89_rac_get(c))
+ v = -v;
+
+ return v;
+}
+
+static int vp8_rac_get_nn(VPXRangeCoder *c)
+{
+ int v = vp89_rac_get_uint(c, 7) << 1;
+ return v + !v;
+}
+
+// DCTextra
+static int vp8_rac_get_coeff(VPXRangeCoder *c, const uint8_t *prob)
+{
+ int v = 0;
+
+ do {
+ v = (v<<1) + vpx_rac_get_prob(c, *prob++);
+ } while (*prob);
+
+ return v;
+}
+
+static void free_buffers(VP8Context *s)
+{
+ int i;
+ if (s->thread_data)
+ for (i = 0; i < MAX_THREADS; i++) {
+#if HAVE_THREADS
+ pthread_cond_destroy(&s->thread_data[i].cond);
+ pthread_mutex_destroy(&s->thread_data[i].lock);
+#endif
+ av_freep(&s->thread_data[i].filter_strength);
+ }
+ av_freep(&s->thread_data);
+ av_freep(&s->macroblocks_base);
+ av_freep(&s->intra4x4_pred_mode_top);
+ av_freep(&s->top_nnz);
+ av_freep(&s->top_border);
+
+ s->macroblocks = NULL;
+}
+
+static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
+{
+ int ret;
+ if ((ret = ff_thread_get_ext_buffer(s->avctx, &f->tf,
+ ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
+ return ret;
+ if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height)))
+ goto fail;
+ if (s->avctx->hwaccel) {
+ const AVHWAccel *hwaccel = s->avctx->hwaccel;
+ if (hwaccel->frame_priv_data_size) {
+ f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
+ if (!f->hwaccel_priv_buf)
+ goto fail;
+ f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
+ }
+ }
+ return 0;
+
+fail:
+ av_buffer_unref(&f->seg_map);
+ ff_thread_release_ext_buffer(s->avctx, &f->tf);
+ return AVERROR(ENOMEM);
+}
+
+static void vp8_release_frame(VP8Context *s, VP8Frame *f)
+{
+ av_buffer_unref(&f->seg_map);
+ av_buffer_unref(&f->hwaccel_priv_buf);
+ f->hwaccel_picture_private = NULL;
+ ff_thread_release_ext_buffer(s->avctx, &f->tf);
+}
+
+#if CONFIG_VP8_DECODER
+static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, const VP8Frame *src)
+{
+ int ret;
+
+ vp8_release_frame(s, dst);
+
+ if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
+ return ret;
+ if (src->seg_map &&
+ !(dst->seg_map = av_buffer_ref(src->seg_map))) {
+ vp8_release_frame(s, dst);
+ return AVERROR(ENOMEM);
+ }
+ if (src->hwaccel_picture_private) {
+ dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
+ if (!dst->hwaccel_priv_buf)
+ return AVERROR(ENOMEM);
+ dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_VP8_DECODER */
+
+static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
+{
+ VP8Context *s = avctx->priv_data;
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
+ vp8_release_frame(s, &s->frames[i]);
+ memset(s->framep, 0, sizeof(s->framep));
+
+ if (free_mem)
+ free_buffers(s);
+}
+
+static void vp8_decode_flush(AVCodecContext *avctx)
+{
+ vp8_decode_flush_impl(avctx, 0);
+}
+
+static VP8Frame *vp8_find_free_buffer(VP8Context *s)
+{
+ VP8Frame *frame = NULL;
+ int i;
+
+ // find a free buffer
+ for (i = 0; i < 5; i++)
+ if (&s->frames[i] != s->framep[VP8_FRAME_CURRENT] &&
+ &s->frames[i] != s->framep[VP8_FRAME_PREVIOUS] &&
+ &s->frames[i] != s->framep[VP8_FRAME_GOLDEN] &&
+ &s->frames[i] != s->framep[VP8_FRAME_ALTREF]) {
+ frame = &s->frames[i];
+ break;
+ }
+ if (i == 5) {
+ av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
+ abort();
+ }
+ if (frame->tf.f->buf[0])
+ vp8_release_frame(s, frame);
+
+ return frame;
+}
+
+static enum AVPixelFormat get_pixel_format(VP8Context *s)
+{
+ enum AVPixelFormat pix_fmts[] = {
+#if CONFIG_VP8_VAAPI_HWACCEL
+ AV_PIX_FMT_VAAPI,
+#endif
+#if CONFIG_VP8_NVDEC_HWACCEL
+ AV_PIX_FMT_CUDA,
+#endif
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE,
+ };
+
+ return ff_get_format(s->avctx, pix_fmts);
+}
+
+static av_always_inline
+int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
+{
+ AVCodecContext *avctx = s->avctx;
+ int i, ret, dim_reset = 0;
+
+ if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
+ height != s->avctx->height) {
+ vp8_decode_flush_impl(s->avctx, 1);
+
+ ret = ff_set_dimensions(s->avctx, width, height);
+ if (ret < 0)
+ return ret;
+
+ dim_reset = (s->macroblocks_base != NULL);
+ }
+
+ if ((s->pix_fmt == AV_PIX_FMT_NONE || dim_reset) &&
+ !s->actually_webp && !is_vp7) {
+ s->pix_fmt = get_pixel_format(s);
+ if (s->pix_fmt < 0)
+ return AVERROR(EINVAL);
+ avctx->pix_fmt = s->pix_fmt;
+ }
+
+ s->mb_width = (s->avctx->coded_width + 15) / 16;
+ s->mb_height = (s->avctx->coded_height + 15) / 16;
+
+ s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
+ avctx->thread_count > 1;
+ if (!s->mb_layout) { // Frame threading and one thread
+ s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
+ sizeof(*s->macroblocks));
+ s->intra4x4_pred_mode_top = av_mallocz(s->mb_width * 4);
+ } else // Sliced threading
+ s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
+ sizeof(*s->macroblocks));
+ s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
+ s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
+ s->thread_data = av_mallocz(MAX_THREADS * sizeof(VP8ThreadData));
+
+ if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
+ !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
+ free_buffers(s);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < MAX_THREADS; i++) {
+ s->thread_data[i].filter_strength =
+ av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
+ if (!s->thread_data[i].filter_strength) {
+ free_buffers(s);
+ return AVERROR(ENOMEM);
+ }
+#if HAVE_THREADS
+ pthread_mutex_init(&s->thread_data[i].lock, NULL);
+ pthread_cond_init(&s->thread_data[i].cond, NULL);
+#endif
+ }
+
+ s->macroblocks = s->macroblocks_base + 1;
+
+ return 0;
+}
+
+static int vp7_update_dimensions(VP8Context *s, int width, int height)
+{
+ return update_dimensions(s, width, height, IS_VP7);
+}
+
+static int vp8_update_dimensions(VP8Context *s, int width, int height)
+{
+ return update_dimensions(s, width, height, IS_VP8);
+}
+
+
+static void parse_segment_info(VP8Context *s)
+{
+ VPXRangeCoder *c = &s->c;
+ int i;
+
+ s->segmentation.update_map = vp89_rac_get(c);
+ s->segmentation.update_feature_data = vp89_rac_get(c);
+
+ if (s->segmentation.update_feature_data) {
+ s->segmentation.absolute_vals = vp89_rac_get(c);
+
+ for (i = 0; i < 4; i++)
+ s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
+
+ for (i = 0; i < 4; i++)
+ s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
+ }
+ if (s->segmentation.update_map)
+ for (i = 0; i < 3; i++)
+ s->prob->segmentid[i] = vp89_rac_get(c) ? vp89_rac_get_uint(c, 8) : 255;
+}
+
+static void update_lf_deltas(VP8Context *s)
+{
+ VPXRangeCoder *c = &s->c;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (vp89_rac_get(c)) {
+ s->lf_delta.ref[i] = vp89_rac_get_uint(c, 6);
+
+ if (vp89_rac_get(c))
+ s->lf_delta.ref[i] = -s->lf_delta.ref[i];
+ }
+ }
+
+ for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
+ if (vp89_rac_get(c)) {
+ s->lf_delta.mode[i] = vp89_rac_get_uint(c, 6);
+
+ if (vp89_rac_get(c))
+ s->lf_delta.mode[i] = -s->lf_delta.mode[i];
+ }
+ }
+}
+
+static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
+{
+ const uint8_t *sizes = buf;
+ int i;
+ int ret;
+
+ s->num_coeff_partitions = 1 << vp89_rac_get_uint(&s->c, 2);
+
+ buf += 3 * (s->num_coeff_partitions - 1);
+ buf_size -= 3 * (s->num_coeff_partitions - 1);
+ if (buf_size < 0)
+ return -1;
+
+ for (i = 0; i < s->num_coeff_partitions - 1; i++) {
+ int size = AV_RL24(sizes + 3 * i);
+ if (buf_size - size < 0)
+ return -1;
+ s->coeff_partition_size[i] = size;
+
+ ret = ff_vpx_init_range_decoder(&s->coeff_partition[i], buf, size);
+ if (ret < 0)
+ return ret;
+ buf += size;
+ buf_size -= size;
+ }
+
+ s->coeff_partition_size[i] = buf_size;
+ ff_vpx_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
+
+ return 0;
+}
+
+static void vp7_get_quants(VP8Context *s)
+{
+ VPXRangeCoder *c = &s->c;
+
+ int yac_qi = vp89_rac_get_uint(c, 7);
+ int ydc_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+ int y2dc_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+ int y2ac_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+ int uvdc_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+ int uvac_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+
+ s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
+ s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
+ s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
+ s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
+ s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
+ s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
+}
+
+static void vp8_get_quants(VP8Context *s)
+{
+ VPXRangeCoder *c = &s->c;
+ int i, base_qi;
+
+ s->quant.yac_qi = vp89_rac_get_uint(c, 7);
+ s->quant.ydc_delta = vp8_rac_get_sint(c, 4);
+ s->quant.y2dc_delta = vp8_rac_get_sint(c, 4);
+ s->quant.y2ac_delta = vp8_rac_get_sint(c, 4);
+ s->quant.uvdc_delta = vp8_rac_get_sint(c, 4);
+ s->quant.uvac_delta = vp8_rac_get_sint(c, 4);
+
+ for (i = 0; i < 4; i++) {
+ if (s->segmentation.enabled) {
+ base_qi = s->segmentation.base_quant[i];
+ if (!s->segmentation.absolute_vals)
+ base_qi += s->quant.yac_qi;
+ } else
+ base_qi = s->quant.yac_qi;
+
+ s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.ydc_delta, 7)];
+ s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
+ s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.y2dc_delta, 7)] * 2;
+ /* 101581>>16 is equivalent to 155/100 */
+ s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + s->quant.y2ac_delta, 7)] * 101581 >> 16;
+ s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.uvdc_delta, 7)];
+ s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + s->quant.uvac_delta, 7)];
+
+ s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
+ s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
+ }
+}
+
+/**
+ * Determine which buffers golden and altref should be updated with after this frame.
+ * The spec isn't clear here, so I'm going by my understanding of what libvpx does
+ *
+ * Intra frames update all 3 references
+ * Inter frames update VP8_FRAME_PREVIOUS if the update_last flag is set
+ * If the update (golden|altref) flag is set, it's updated with the current frame
+ * if update_last is set, and VP8_FRAME_PREVIOUS otherwise.
+ * If the flag is not set, the number read means:
+ * 0: no update
+ * 1: VP8_FRAME_PREVIOUS
+ * 2: update golden with altref, or update altref with golden
+ */
+static VP8FrameType ref_to_update(VP8Context *s, int update, VP8FrameType ref)
+{
+ VPXRangeCoder *c = &s->c;
+
+ if (update)
+ return VP8_FRAME_CURRENT;
+
+ switch (vp89_rac_get_uint(c, 2)) {
+ case 1:
+ return VP8_FRAME_PREVIOUS;
+ case 2:
+ return (ref == VP8_FRAME_GOLDEN) ? VP8_FRAME_ALTREF : VP8_FRAME_GOLDEN;
+ }
+ return VP8_FRAME_NONE;
+}
+
+static void vp78_reset_probability_tables(VP8Context *s)
+{
+ int i, j;
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 16; j++)
+ memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
+ sizeof(s->prob->token[i][j]));
+}
+
+static void vp78_update_probability_tables(VP8Context *s)
+{
+ VPXRangeCoder *c = &s->c;
+ int i, j, k, l, m;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 3; k++)
+ for (l = 0; l < NUM_DCT_TOKENS-1; l++)
+ if (vpx_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
+ int prob = vp89_rac_get_uint(c, 8);
+ for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
+ s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
+ }
+}
+
+#define VP7_MVC_SIZE 17
+#define VP8_MVC_SIZE 19
+
+static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s,
+ int mvc_size)
+{
+ VPXRangeCoder *c = &s->c;
+ int i, j;
+
+ if (vp89_rac_get(c))
+ for (i = 0; i < 4; i++)
+ s->prob->pred16x16[i] = vp89_rac_get_uint(c, 8);
+ if (vp89_rac_get(c))
+ for (i = 0; i < 3; i++)
+ s->prob->pred8x8c[i] = vp89_rac_get_uint(c, 8);
+
+ // 17.2 MV probability update
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < mvc_size; j++)
+ if (vpx_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
+ s->prob->mvc[i][j] = vp8_rac_get_nn(c);
+}
+
+static void update_refs(VP8Context *s)
+{
+ VPXRangeCoder *c = &s->c;
+
+ int update_golden = vp89_rac_get(c);
+ int update_altref = vp89_rac_get(c);
+
+ s->update_golden = ref_to_update(s, update_golden, VP8_FRAME_GOLDEN);
+ s->update_altref = ref_to_update(s, update_altref, VP8_FRAME_ALTREF);
+}
+
+static void copy_chroma(AVFrame *dst, const AVFrame *src, int width, int height)
+{
+ int i, j;
+
+ for (j = 1; j < 3; j++) {
+ for (i = 0; i < height / 2; i++)
+ memcpy(dst->data[j] + i * dst->linesize[j],
+ src->data[j] + i * src->linesize[j], width / 2);
+ }
+}
+
+static void fade(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ int width, int height,
+ int alpha, int beta)
+{
+ int i, j;
+ for (j = 0; j < height; j++) {
+ const uint8_t *src2 = src + j * src_linesize;
+ uint8_t *dst2 = dst + j * dst_linesize;
+ for (i = 0; i < width; i++) {
+ uint8_t y = src2[i];
+ dst2[i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
+ }
+ }
+}
+
+static int vp7_fade_frame(VP8Context *s, int alpha, int beta)
+{
+ int ret;
+
+ if (!s->keyframe && (alpha || beta)) {
+ int width = s->mb_width * 16;
+ int height = s->mb_height * 16;
+ const AVFrame *src;
+ AVFrame *dst;
+
+ if (!s->framep[VP8_FRAME_PREVIOUS] ||
+ !s->framep[VP8_FRAME_GOLDEN]) {
+ av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ src =
+ dst = s->framep[VP8_FRAME_PREVIOUS]->tf.f;
+
+ /* preserve the golden frame, write a new previous frame */
+ if (s->framep[VP8_FRAME_GOLDEN] == s->framep[VP8_FRAME_PREVIOUS]) {
+ s->framep[VP8_FRAME_PREVIOUS] = vp8_find_free_buffer(s);
+ if ((ret = vp8_alloc_frame(s, s->framep[VP8_FRAME_PREVIOUS], 1)) < 0)
+ return ret;
+
+ dst = s->framep[VP8_FRAME_PREVIOUS]->tf.f;
+
+ copy_chroma(dst, src, width, height);
+ }
+
+ fade(dst->data[0], dst->linesize[0],
+ src->data[0], src->linesize[0],
+ width, height, alpha, beta);
+ }
+
+ return 0;
+}
+
+static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
+{
+ VPXRangeCoder *c = &s->c;
+ int part1_size, hscale, vscale, i, j, ret;
+ int width = s->avctx->width;
+ int height = s->avctx->height;
+ int alpha = 0;
+ int beta = 0;
+ int fade_present = 1;
+
+ if (buf_size < 4) {
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->profile = (buf[0] >> 1) & 7;
+ if (s->profile > 1) {
+ avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->keyframe = !(buf[0] & 1);
+ s->invisible = 0;
+ part1_size = AV_RL24(buf) >> 4;
+
+ if (buf_size < 4 - s->profile + part1_size) {
+ av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
+ return AVERROR_INVALIDDATA;
+ }
+
+ buf += 4 - s->profile;
+ buf_size -= 4 - s->profile;
+
+ memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
+
+ ret = ff_vpx_init_range_decoder(c, buf, part1_size);
+ if (ret < 0)
+ return ret;
+ buf += part1_size;
+ buf_size -= part1_size;
+
+ /* A. Dimension information (keyframes only) */
+ if (s->keyframe) {
+ width = vp89_rac_get_uint(c, 12);
+ height = vp89_rac_get_uint(c, 12);
+ hscale = vp89_rac_get_uint(c, 2);
+ vscale = vp89_rac_get_uint(c, 2);
+ if (hscale || vscale)
+ avpriv_request_sample(s->avctx, "Upscaling");
+
+ s->update_golden = s->update_altref = VP8_FRAME_CURRENT;
+ vp78_reset_probability_tables(s);
+ memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter,
+ sizeof(s->prob->pred16x16));
+ memcpy(s->prob->pred8x8c, vp8_pred8x8c_prob_inter,
+ sizeof(s->prob->pred8x8c));
+ for (i = 0; i < 2; i++)
+ memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
+ sizeof(vp7_mv_default_prob[i]));
+ memset(&s->segmentation, 0, sizeof(s->segmentation));
+ memset(&s->lf_delta, 0, sizeof(s->lf_delta));
+ memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
+ }
+
+ if (s->keyframe || s->profile > 0)
+ memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
+
+ /* B. Decoding information for all four macroblock-level features */
+ for (i = 0; i < 4; i++) {
+ s->feature_enabled[i] = vp89_rac_get(c);
+ if (s->feature_enabled[i]) {
+ s->feature_present_prob[i] = vp89_rac_get_uint(c, 8);
+
+ for (j = 0; j < 3; j++)
+ s->feature_index_prob[i][j] =
+ vp89_rac_get(c) ? vp89_rac_get_uint(c, 8) : 255;
+
+ if (vp7_feature_value_size[s->profile][i])
+ for (j = 0; j < 4; j++)
+ s->feature_value[i][j] =
+ vp89_rac_get(c) ? vp89_rac_get_uint(c, vp7_feature_value_size[s->profile][i]) : 0;
+ }
+ }
+
+ s->segmentation.enabled = 0;
+ s->segmentation.update_map = 0;
+ s->lf_delta.enabled = 0;
+
+ s->num_coeff_partitions = 1;
+ ret = ff_vpx_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
+ if (ret < 0)
+ return ret;
+
+ if (!s->macroblocks_base || /* first frame */
+ width != s->avctx->width || height != s->avctx->height ||
+ (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
+ if ((ret = vp7_update_dimensions(s, width, height)) < 0)
+ return ret;
+ }
+
+ /* C. Dequantization indices */
+ vp7_get_quants(s);
+
+ /* D. Golden frame update flag (a Flag) for interframes only */
+ if (!s->keyframe) {
+ s->update_golden = vp89_rac_get(c) ? VP8_FRAME_CURRENT : VP8_FRAME_NONE;
+ s->sign_bias[VP8_FRAME_GOLDEN] = 0;
+ }
+
+ s->update_last = 1;
+ s->update_probabilities = 1;
+
+ if (s->profile > 0) {
+ s->update_probabilities = vp89_rac_get(c);
+ if (!s->update_probabilities)
+ s->prob[1] = s->prob[0];
+
+ if (!s->keyframe)
+ fade_present = vp89_rac_get(c);
+ }
+
+ if (vpx_rac_is_end(c))
+ return AVERROR_INVALIDDATA;
+ /* E. Fading information for previous frame */
+ if (fade_present && vp89_rac_get(c)) {
+ alpha = (int8_t) vp89_rac_get_uint(c, 8);
+ beta = (int8_t) vp89_rac_get_uint(c, 8);
+ }
+
+ /* F. Loop filter type */
+ if (!s->profile)
+ s->filter.simple = vp89_rac_get(c);
+
+ /* G. DCT coefficient ordering specification */
+ if (vp89_rac_get(c))
+ for (i = 1; i < 16; i++)
+ s->prob[0].scan[i] = ff_zigzag_scan[vp89_rac_get_uint(c, 4)];
+
+ /* H. Loop filter levels */
+ if (s->profile > 0)
+ s->filter.simple = vp89_rac_get(c);
+ s->filter.level = vp89_rac_get_uint(c, 6);
+ s->filter.sharpness = vp89_rac_get_uint(c, 3);
+
+ /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
+ vp78_update_probability_tables(s);
+
+ s->mbskip_enabled = 0;
+
+ /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
+ if (!s->keyframe) {
+ s->prob->intra = vp89_rac_get_uint(c, 8);
+ s->prob->last = vp89_rac_get_uint(c, 8);
+ vp78_update_pred16x16_pred8x8_mvc_probabilities(s, VP7_MVC_SIZE);
+ }
+
+ if (vpx_rac_is_end(c))
+ return AVERROR_INVALIDDATA;
+
+ if ((ret = vp7_fade_frame(s, alpha, beta)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
+{
+ VPXRangeCoder *c = &s->c;
+ int header_size, hscale, vscale, ret;
+ int width = s->avctx->width;
+ int height = s->avctx->height;
+
+ if (buf_size < 3) {
+ av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->keyframe = !(buf[0] & 1);
+ s->profile = (buf[0]>>1) & 7;
+ s->invisible = !(buf[0] & 0x10);
+ header_size = AV_RL24(buf) >> 5;
+ buf += 3;
+ buf_size -= 3;
+
+ s->header_partition_size = header_size;
+
+ if (s->profile > 3)
+ av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
+
+ if (!s->profile)
+ memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab,
+ sizeof(s->put_pixels_tab));
+ else // profile 1-3 use bilinear, 4+ aren't defined so whatever
+ memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab,
+ sizeof(s->put_pixels_tab));
+
+ if (header_size > buf_size - 7 * s->keyframe) {
+ av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (s->keyframe) {
+ if (AV_RL24(buf) != 0x2a019d) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Invalid start code 0x%x\n", AV_RL24(buf));
+ return AVERROR_INVALIDDATA;
+ }
+ width = AV_RL16(buf + 3) & 0x3fff;
+ height = AV_RL16(buf + 5) & 0x3fff;
+ hscale = buf[4] >> 6;
+ vscale = buf[6] >> 6;
+ buf += 7;
+ buf_size -= 7;
+
+ if (hscale || vscale)
+ avpriv_request_sample(s->avctx, "Upscaling");
+
+ s->update_golden = s->update_altref = VP8_FRAME_CURRENT;
+ vp78_reset_probability_tables(s);
+ memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter,
+ sizeof(s->prob->pred16x16));
+ memcpy(s->prob->pred8x8c, vp8_pred8x8c_prob_inter,
+ sizeof(s->prob->pred8x8c));
+ memcpy(s->prob->mvc, vp8_mv_default_prob,
+ sizeof(s->prob->mvc));
+ memset(&s->segmentation, 0, sizeof(s->segmentation));
+ memset(&s->lf_delta, 0, sizeof(s->lf_delta));
+ }
+
+ ret = ff_vpx_init_range_decoder(c, buf, header_size);
+ if (ret < 0)
+ return ret;
+ buf += header_size;
+ buf_size -= header_size;
+
+ if (s->keyframe) {
+ s->colorspace = vp89_rac_get(c);
+ if (s->colorspace)
+ av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
+ s->fullrange = vp89_rac_get(c);
+ }
+
+ if ((s->segmentation.enabled = vp89_rac_get(c)))
+ parse_segment_info(s);
+ else
+ s->segmentation.update_map = 0; // FIXME: move this to some init function?
+
+ s->filter.simple = vp89_rac_get(c);
+ s->filter.level = vp89_rac_get_uint(c, 6);
+ s->filter.sharpness = vp89_rac_get_uint(c, 3);
+
+ if ((s->lf_delta.enabled = vp89_rac_get(c))) {
+ s->lf_delta.update = vp89_rac_get(c);
+ if (s->lf_delta.update)
+ update_lf_deltas(s);
+ }
+
+ if (setup_partitions(s, buf, buf_size)) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (!s->macroblocks_base || /* first frame */
+ width != s->avctx->width || height != s->avctx->height ||
+ (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
+ if ((ret = vp8_update_dimensions(s, width, height)) < 0)
+ return ret;
+
+ vp8_get_quants(s);
+
+ if (!s->keyframe) {
+ update_refs(s);
+ s->sign_bias[VP8_FRAME_GOLDEN] = vp89_rac_get(c);
+ s->sign_bias[VP8_FRAME_ALTREF] = vp89_rac_get(c);
+ }
+
+ // if we aren't saving this frame's probabilities for future frames,
+ // make a copy of the current probabilities
+ if (!(s->update_probabilities = vp89_rac_get(c)))
+ s->prob[1] = s->prob[0];
+
+ s->update_last = s->keyframe || vp89_rac_get(c);
+
+ vp78_update_probability_tables(s);
+
+ if ((s->mbskip_enabled = vp89_rac_get(c)))
+ s->prob->mbskip = vp89_rac_get_uint(c, 8);
+
+ if (!s->keyframe) {
+ s->prob->intra = vp89_rac_get_uint(c, 8);
+ s->prob->last = vp89_rac_get_uint(c, 8);
+ s->prob->golden = vp89_rac_get_uint(c, 8);
+ vp78_update_pred16x16_pred8x8_mvc_probabilities(s, VP8_MVC_SIZE);
+ }
+
+ // Record the entropy coder state here so that hwaccels can use it.
+ s->c.code_word = vpx_rac_renorm(&s->c);
+ s->coder_state_at_header_end.input = s->c.buffer - (-s->c.bits / 8);
+ s->coder_state_at_header_end.range = s->c.high;
+ s->coder_state_at_header_end.value = s->c.code_word >> 16;
+ s->coder_state_at_header_end.bit_count = -s->c.bits % 8;
+
+ return 0;
+}
+
+static av_always_inline
+void clamp_mv(const VP8mvbounds *s, VP8mv *dst, const VP8mv *src)
+{
+ dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
+ av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
+ dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
+ av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
+}
+
+/**
+ * Motion vector coding, 17.1.
+ */
+static av_always_inline int read_mv_component(VPXRangeCoder *c, const uint8_t *p, int vp7)
+{
+ int bit, x = 0;
+
+ if (vpx_rac_get_prob_branchy(c, p[0])) {
+ int i;
+
+ for (i = 0; i < 3; i++)
+ x += vpx_rac_get_prob(c, p[9 + i]) << i;
+ for (i = (vp7 ? 7 : 9); i > 3; i--)
+ x += vpx_rac_get_prob(c, p[9 + i]) << i;
+ if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vpx_rac_get_prob(c, p[12]))
+ x += 8;
+ } else {
+ // small_mvtree
+ const uint8_t *ps = p + 2;
+ bit = vpx_rac_get_prob(c, *ps);
+ ps += 1 + 3 * bit;
+ x += 4 * bit;
+ bit = vpx_rac_get_prob(c, *ps);
+ ps += 1 + bit;
+ x += 2 * bit;
+ x += vpx_rac_get_prob(c, *ps);
+ }
+
+ return (x && vpx_rac_get_prob(c, p[1])) ? -x : x;
+}
+
+static int vp7_read_mv_component(VPXRangeCoder *c, const uint8_t *p)
+{
+ return read_mv_component(c, p, 1);
+}
+
+static int vp8_read_mv_component(VPXRangeCoder *c, const uint8_t *p)
+{
+ return read_mv_component(c, p, 0);
+}
+
+static av_always_inline
+const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
+{
+ if (is_vp7)
+ return vp7_submv_prob;
+
+ if (left == top)
+ return vp8_submv_prob[4 - !!left];
+ if (!top)
+ return vp8_submv_prob[2];
+ return vp8_submv_prob[1 - !!left];
+}
+
+/**
+ * Split motion vector prediction, 16.4.
+ * @returns the number of motion vectors parsed (2, 4 or 16)
+ */
+static av_always_inline
+int decode_splitmvs(const VP8Context *s, VPXRangeCoder *c, VP8Macroblock *mb,
+ int layout, int is_vp7)
+{
+ int part_idx;
+ int n, num;
+ const VP8Macroblock *top_mb;
+ const VP8Macroblock *left_mb = &mb[-1];
+ const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
+ const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
+ const VP8mv *top_mv;
+ const VP8mv *left_mv = left_mb->bmv;
+ const VP8mv *cur_mv = mb->bmv;
+
+ if (!layout) // layout is inlined, s->mb_layout is not
+ top_mb = &mb[2];
+ else
+ top_mb = &mb[-s->mb_width - 1];
+ mbsplits_top = vp8_mbsplits[top_mb->partitioning];
+ top_mv = top_mb->bmv;
+
+ if (vpx_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
+ if (vpx_rac_get_prob_branchy(c, vp8_mbsplit_prob[1]))
+ part_idx = VP8_SPLITMVMODE_16x8 + vpx_rac_get_prob(c, vp8_mbsplit_prob[2]);
+ else
+ part_idx = VP8_SPLITMVMODE_8x8;
+ } else {
+ part_idx = VP8_SPLITMVMODE_4x4;
+ }
+
+ num = vp8_mbsplit_count[part_idx];
+ mbsplits_cur = vp8_mbsplits[part_idx],
+ firstidx = vp8_mbfirstidx[part_idx];
+ mb->partitioning = part_idx;
+
+ for (n = 0; n < num; n++) {
+ int k = firstidx[n];
+ uint32_t left, above;
+ const uint8_t *submv_prob;
+
+ if (!(k & 3))
+ left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
+ else
+ left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
+ if (k <= 3)
+ above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
+ else
+ above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
+
+ submv_prob = get_submv_prob(left, above, is_vp7);
+
+ if (vpx_rac_get_prob_branchy(c, submv_prob[0])) {
+ if (vpx_rac_get_prob_branchy(c, submv_prob[1])) {
+ if (vpx_rac_get_prob_branchy(c, submv_prob[2])) {
+ mb->bmv[n].y = mb->mv.y +
+ read_mv_component(c, s->prob->mvc[0], is_vp7);
+ mb->bmv[n].x = mb->mv.x +
+ read_mv_component(c, s->prob->mvc[1], is_vp7);
+ } else {
+ AV_ZERO32(&mb->bmv[n]);
+ }
+ } else {
+ AV_WN32A(&mb->bmv[n], above);
+ }
+ } else {
+ AV_WN32A(&mb->bmv[n], left);
+ }
+ }
+
+ return num;
+}
+
+/**
+ * The vp7 reference decoder uses a padding macroblock column (added to right
+ * edge of the frame) to guard against illegal macroblock offsets. The
+ * algorithm has bugs that permit offsets to straddle the padding column.
+ * This function replicates those bugs.
+ *
+ * @param[out] edge_x macroblock x address
+ * @param[out] edge_y macroblock y address
+ *
+ * @return macroblock offset legal (boolean)
+ */
+static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
+ int xoffset, int yoffset, int boundary,
+ int *edge_x, int *edge_y)
+{
+ int vwidth = mb_width + 1;
+ int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
+ if (new < boundary || new % vwidth == vwidth - 1)
+ return 0;
+ *edge_y = new / vwidth;
+ *edge_x = new % vwidth;
+ return 1;
+}
+
+static const VP8mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
+{
+ return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
+}
+
+static av_always_inline
+void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb,
+ int mb_x, int mb_y, int layout)
+{
+ enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
+ enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
+ int idx = CNT_ZERO;
+ VP8mv near_mv[3];
+ uint8_t cnt[3] = { 0 };
+ VPXRangeCoder *c = &s->c;
+ int i;
+
+ AV_ZERO32(&near_mv[0]);
+ AV_ZERO32(&near_mv[1]);
+ AV_ZERO32(&near_mv[2]);
+
+ for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
+ const VP7MVPred * pred = &vp7_mv_pred[i];
+ int edge_x, edge_y;
+
+ if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
+ pred->yoffset, !s->profile, &edge_x, &edge_y)) {
+ const VP8Macroblock *edge = (s->mb_layout == 1)
+ ? s->macroblocks_base + 1 + edge_x +
+ (s->mb_width + 1) * (edge_y + 1)
+ : s->macroblocks + edge_x +
+ (s->mb_height - edge_y - 1) * 2;
+ uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
+ if (mv) {
+ if (AV_RN32A(&near_mv[CNT_NEAREST])) {
+ if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
+ idx = CNT_NEAREST;
+ } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
+ if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
+ continue;
+ idx = CNT_NEAR;
+ } else {
+ AV_WN32A(&near_mv[CNT_NEAR], mv);
+ idx = CNT_NEAR;
+ }
+ } else {
+ AV_WN32A(&near_mv[CNT_NEAREST], mv);
+ idx = CNT_NEAREST;
+ }
+ } else {
+ idx = CNT_ZERO;
+ }
+ } else {
+ idx = CNT_ZERO;
+ }
+ cnt[idx] += vp7_mv_pred[i].score;
+ }
+
+ mb->partitioning = VP8_SPLITMVMODE_NONE;
+
+ if (vpx_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
+ mb->mode = VP8_MVMODE_MV;
+
+ if (vpx_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
+
+ if (vpx_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
+
+ if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
+ AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
+ else
+ AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
+
+ if (vpx_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
+ mb->mode = VP8_MVMODE_SPLIT;
+ mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
+ } else {
+ mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
+ mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
+ mb->bmv[0] = mb->mv;
+ }
+ } else {
+ mb->mv = near_mv[CNT_NEAR];
+ mb->bmv[0] = mb->mv;
+ }
+ } else {
+ mb->mv = near_mv[CNT_NEAREST];
+ mb->bmv[0] = mb->mv;
+ }
+ } else {
+ mb->mode = VP8_MVMODE_ZERO;
+ AV_ZERO32(&mb->mv);
+ mb->bmv[0] = mb->mv;
+ }
+}
+
+static av_always_inline
+void vp8_decode_mvs(VP8Context *s, const VP8mvbounds *mv_bounds, VP8Macroblock *mb,
+ int mb_x, int mb_y, int layout)
+{
+ VP8Macroblock *mb_edge[3] = { 0 /* top */,
+ mb - 1 /* left */,
+ 0 /* top-left */ };
+ enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
+ enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
+ int idx = CNT_ZERO;
+ int cur_sign_bias = s->sign_bias[mb->ref_frame];
+ const int8_t *sign_bias = s->sign_bias;
+ VP8mv near_mv[4];
+ uint8_t cnt[4] = { 0 };
+ VPXRangeCoder *c = &s->c;
+
+ if (!layout) { // layout is inlined (s->mb_layout is not)
+ mb_edge[0] = mb + 2;
+ mb_edge[2] = mb + 1;
+ } else {
+ mb_edge[0] = mb - s->mb_width - 1;
+ mb_edge[2] = mb - s->mb_width - 2;
+ }
+
+ AV_ZERO32(&near_mv[0]);
+ AV_ZERO32(&near_mv[1]);
+ AV_ZERO32(&near_mv[2]);
+
+ /* Process MB on top, left and top-left */
+#define MV_EDGE_CHECK(n) \
+ { \
+ const VP8Macroblock *edge = mb_edge[n]; \
+ int edge_ref = edge->ref_frame; \
+ if (edge_ref != VP8_FRAME_CURRENT) { \
+ uint32_t mv = AV_RN32A(&edge->mv); \
+ if (mv) { \
+ if (cur_sign_bias != sign_bias[edge_ref]) { \
+ /* SWAR negate of the values in mv. */ \
+ mv = ~mv; \
+ mv = ((mv & 0x7fff7fff) + \
+ 0x00010001) ^ (mv & 0x80008000); \
+ } \
+ if (!n || mv != AV_RN32A(&near_mv[idx])) \
+ AV_WN32A(&near_mv[++idx], mv); \
+ cnt[idx] += 1 + (n != 2); \
+ } else \
+ cnt[CNT_ZERO] += 1 + (n != 2); \
+ } \
+ }
+
+ MV_EDGE_CHECK(0)
+ MV_EDGE_CHECK(1)
+ MV_EDGE_CHECK(2)
+
+ mb->partitioning = VP8_SPLITMVMODE_NONE;
+ if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
+ mb->mode = VP8_MVMODE_MV;
+
+ /* If we have three distinct MVs, merge first and last if they're the same */
+ if (cnt[CNT_SPLITMV] &&
+ AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
+ cnt[CNT_NEAREST] += 1;
+
+ /* Swap near and nearest if necessary */
+ if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
+ FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
+ FFSWAP(VP8mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
+ }
+
+ if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
+ if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
+ /* Choose the best mv out of 0,0 and the nearest mv */
+ clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
+ cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
+ (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
+ (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
+
+ if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
+ mb->mode = VP8_MVMODE_SPLIT;
+ mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
+ } else {
+ mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
+ mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
+ mb->bmv[0] = mb->mv;
+ }
+ } else {
+ clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAR]);
+ mb->bmv[0] = mb->mv;
+ }
+ } else {
+ clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAREST]);
+ mb->bmv[0] = mb->mv;
+ }
+ } else {
+ mb->mode = VP8_MVMODE_ZERO;
+ AV_ZERO32(&mb->mv);
+ mb->bmv[0] = mb->mv;
+ }
+}
+
+static av_always_inline
+void decode_intra4x4_modes(VP8Context *s, VPXRangeCoder *c, VP8Macroblock *mb,
+ int mb_x, int keyframe, int layout)
+{
+ uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
+
+ if (layout) {
+ VP8Macroblock *mb_top = mb - s->mb_width - 1;
+ memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
+ }
+ if (keyframe) {
+ int x, y;
+ uint8_t *top;
+ uint8_t *const left = s->intra4x4_pred_mode_left;
+ if (layout)
+ top = mb->intra4x4_pred_mode_top;
+ else
+ top = s->intra4x4_pred_mode_top + 4 * mb_x;
+ for (y = 0; y < 4; y++) {
+ for (x = 0; x < 4; x++) {
+ const uint8_t *ctx;
+ ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
+ *intra4x4 = vp89_rac_get_tree(c, vp8_pred4x4_tree, ctx);
+ left[y] = top[x] = *intra4x4;
+ intra4x4++;
+ }
+ }
+ } else {
+ int i;
+ for (i = 0; i < 16; i++)
+ intra4x4[i] = vp89_rac_get_tree(c, vp8_pred4x4_tree,
+ vp8_pred4x4_prob_inter);
+ }
+}
+
+static av_always_inline
+void decode_mb_mode(VP8Context *s, const VP8mvbounds *mv_bounds,
+ VP8Macroblock *mb, int mb_x, int mb_y,
+ uint8_t *segment, const uint8_t *ref, int layout, int is_vp7)
+{
+ VPXRangeCoder *c = &s->c;
+ static const char * const vp7_feature_name[] = { "q-index",
+ "lf-delta",
+ "partial-golden-update",
+ "blit-pitch" };
+ if (is_vp7) {
+ int i;
+ *segment = 0;
+ for (i = 0; i < 4; i++) {
+ if (s->feature_enabled[i]) {
+ if (vpx_rac_get_prob_branchy(c, s->feature_present_prob[i])) {
+ int index = vp89_rac_get_tree(c, vp7_feature_index_tree,
+ s->feature_index_prob[i]);
+ av_log(s->avctx, AV_LOG_WARNING,
+ "Feature %s present in macroblock (value 0x%x)\n",
+ vp7_feature_name[i], s->feature_value[i][index]);
+ }
+ }
+ }
+ } else if (s->segmentation.update_map) {
+ int bit = vpx_rac_get_prob(c, s->prob->segmentid[0]);
+ *segment = vpx_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
+ } else if (s->segmentation.enabled)
+ *segment = ref ? *ref : *segment;
+ mb->segment = *segment;
+
+ mb->skip = s->mbskip_enabled ? vpx_rac_get_prob(c, s->prob->mbskip) : 0;
+
+ if (s->keyframe) {
+ mb->mode = vp89_rac_get_tree(c, vp8_pred16x16_tree_intra,
+ vp8_pred16x16_prob_intra);
+
+ if (mb->mode == MODE_I4x4) {
+ decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
+ } else {
+ const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
+ : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
+ if (s->mb_layout)
+ AV_WN32A(mb->intra4x4_pred_mode_top, modes);
+ else
+ AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
+ AV_WN32A(s->intra4x4_pred_mode_left, modes);
+ }
+
+ mb->chroma_pred_mode = vp89_rac_get_tree(c, vp8_pred8x8c_tree,
+ vp8_pred8x8c_prob_intra);
+ mb->ref_frame = VP8_FRAME_CURRENT;
+ } else if (vpx_rac_get_prob_branchy(c, s->prob->intra)) {
+ // inter MB, 16.2
+ if (vpx_rac_get_prob_branchy(c, s->prob->last))
+ mb->ref_frame =
+ (!is_vp7 && vpx_rac_get_prob(c, s->prob->golden)) ? VP8_FRAME_ALTREF
+ : VP8_FRAME_GOLDEN;
+ else
+ mb->ref_frame = VP8_FRAME_PREVIOUS;
+ s->ref_count[mb->ref_frame - 1]++;
+
+ // motion vectors, 16.3
+ if (is_vp7)
+ vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
+ else
+ vp8_decode_mvs(s, mv_bounds, mb, mb_x, mb_y, layout);
+ } else {
+ // intra MB, 16.1
+ mb->mode = vp89_rac_get_tree(c, vp8_pred16x16_tree_inter,
+ s->prob->pred16x16);
+
+ if (mb->mode == MODE_I4x4)
+ decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
+
+ mb->chroma_pred_mode = vp89_rac_get_tree(c, vp8_pred8x8c_tree,
+ s->prob->pred8x8c);
+ mb->ref_frame = VP8_FRAME_CURRENT;
+ mb->partitioning = VP8_SPLITMVMODE_NONE;
+ AV_ZERO32(&mb->bmv[0]);
+ }
+}
+
+/**
+ * @param r arithmetic bitstream reader context
+ * @param block destination for block coefficients
+ * @param probs probabilities to use when reading trees from the bitstream
+ * @param i initial coeff index, 0 unless a separate DC block is coded
+ * @param qmul array holding the dc/ac dequant factor at position 0/1
+ *
+ * @return 0 if no coeffs were decoded
+ * otherwise, the index of the last coeff decoded plus one
+ */
+static av_always_inline
+int decode_block_coeffs_internal(VPXRangeCoder *r, int16_t block[16],
+ uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
+ int i, const uint8_t *token_prob, const int16_t qmul[2],
+ const uint8_t scan[16], int vp7)
+{
+ VPXRangeCoder c = *r;
+ goto skip_eob;
+ do {
+ int coeff;
+restart:
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
+ break;
+
+skip_eob:
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
+ if (++i == 16)
+ break; // invalid input; blocks should end with EOB
+ token_prob = probs[i][0];
+ if (vp7)
+ goto restart;
+ goto skip_eob;
+ }
+
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
+ coeff = 1;
+ token_prob = probs[i + 1][1];
+ } else {
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
+ coeff = vpx_rac_get_prob_branchy(&c, token_prob[4]);
+ if (coeff)
+ coeff += vpx_rac_get_prob(&c, token_prob[5]);
+ coeff += 2;
+ } else {
+ // DCT_CAT*
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[6])) {
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
+ coeff = 5 + vpx_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
+ } else { // DCT_CAT2
+ coeff = 7;
+ coeff += vpx_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
+ coeff += vpx_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
+ }
+ } else { // DCT_CAT3 and up
+ int a = vpx_rac_get_prob(&c, token_prob[8]);
+ int b = vpx_rac_get_prob(&c, token_prob[9 + a]);
+ int cat = (a << 1) + b;
+ coeff = 3 + (8 << cat);
+ coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
+ }
+ }
+ token_prob = probs[i + 1][2];
+ }
+ block[scan[i]] = (vp89_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
+ } while (++i < 16);
+
+ *r = c;
+ return i;
+}
+
+static av_always_inline
+int inter_predict_dc(int16_t block[16], int16_t pred[2])
+{
+ int16_t dc = block[0];
+ int ret = 0;
+
+ if (pred[1] > 3) {
+ dc += pred[0];
+ ret = 1;
+ }
+
+ if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
+ block[0] = pred[0] = dc;
+ pred[1] = 0;
+ } else {
+ if (pred[0] == dc)
+ pred[1]++;
+ block[0] = pred[0] = dc;
+ }
+
+ return ret;
+}
+
+static int vp7_decode_block_coeffs_internal(VPXRangeCoder *r,
+ int16_t block[16],
+ uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
+ int i, const uint8_t *token_prob,
+ const int16_t qmul[2],
+ const uint8_t scan[16])
+{
+ return decode_block_coeffs_internal(r, block, probs, i,
+ token_prob, qmul, scan, IS_VP7);
+}
+
+#ifndef vp8_decode_block_coeffs_internal
+static int vp8_decode_block_coeffs_internal(VPXRangeCoder *r,
+ int16_t block[16],
+ uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
+ int i, const uint8_t *token_prob,
+ const int16_t qmul[2])
+{
+ return decode_block_coeffs_internal(r, block, probs, i,
+ token_prob, qmul, ff_zigzag_scan, IS_VP8);
+}
+#endif
+
+/**
+ * @param c arithmetic bitstream reader context
+ * @param block destination for block coefficients
+ * @param probs probabilities to use when reading trees from the bitstream
+ * @param i initial coeff index, 0 unless a separate DC block is coded
+ * @param zero_nhood the initial prediction context for number of surrounding
+ * all-zero blocks (only left/top, so 0-2)
+ * @param qmul array holding the dc/ac dequant factor at position 0/1
+ * @param scan scan pattern (VP7 only)
+ *
+ * @return 0 if no coeffs were decoded
+ * otherwise, the index of the last coeff decoded plus one
+ */
+static av_always_inline
+int decode_block_coeffs(VPXRangeCoder *c, int16_t block[16],
+ uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
+ int i, int zero_nhood, const int16_t qmul[2],
+ const uint8_t scan[16], int vp7)
+{
+ const uint8_t *token_prob = probs[i][zero_nhood];
+ if (!vpx_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
+ return 0;
+ return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
+ token_prob, qmul, scan)
+ : vp8_decode_block_coeffs_internal(c, block, probs, i,
+ token_prob, qmul);
+}
+
+static av_always_inline
+void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VPXRangeCoder *c,
+ VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
+ int is_vp7)
+{
+ int i, x, y, luma_start = 0, luma_ctx = 3;
+ int nnz_pred, nnz, nnz_total = 0;
+ int segment = mb->segment;
+ int block_dc = 0;
+
+ if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
+ nnz_pred = t_nnz[8] + l_nnz[8];
+
+ // decode DC values and do hadamard
+ nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
+ nnz_pred, s->qmat[segment].luma_dc_qmul,
+ ff_zigzag_scan, is_vp7);
+ l_nnz[8] = t_nnz[8] = !!nnz;
+
+ if (is_vp7 && mb->mode > MODE_I4x4) {
+ nnz |= inter_predict_dc(td->block_dc,
+ s->inter_dc_pred[mb->ref_frame - 1]);
+ }
+
+ if (nnz) {
+ nnz_total += nnz;
+ block_dc = 1;
+ if (nnz == 1)
+ s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
+ else
+ s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
+ }
+ luma_start = 1;
+ luma_ctx = 0;
+ }
+
+ // luma blocks
+ for (y = 0; y < 4; y++)
+ for (x = 0; x < 4; x++) {
+ nnz_pred = l_nnz[y] + t_nnz[x];
+ nnz = decode_block_coeffs(c, td->block[y][x],
+ s->prob->token[luma_ctx],
+ luma_start, nnz_pred,
+ s->qmat[segment].luma_qmul,
+ s->prob[0].scan, is_vp7);
+ /* nnz+block_dc may be one more than the actual last index,
+ * but we don't care */
+ td->non_zero_count_cache[y][x] = nnz + block_dc;
+ t_nnz[x] = l_nnz[y] = !!nnz;
+ nnz_total += nnz;
+ }
+
+ // chroma blocks
+ // TODO: what to do about dimensions? 2nd dim for luma is x,
+ // but for chroma it's (y<<1)|x
+ for (i = 4; i < 6; i++)
+ for (y = 0; y < 2; y++)
+ for (x = 0; x < 2; x++) {
+ nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
+ nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
+ s->prob->token[2], 0, nnz_pred,
+ s->qmat[segment].chroma_qmul,
+ s->prob[0].scan, is_vp7);
+ td->non_zero_count_cache[i][(y << 1) + x] = nnz;
+ t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
+ nnz_total += nnz;
+ }
+
+ // if there were no coded coeffs despite the macroblock not being marked skip,
+ // we MUST not do the inner loop filter and should not do IDCT
+ // Since skip isn't used for bitstream prediction, just manually set it.
+ if (!nnz_total)
+ mb->skip = 1;
+}
+
+static av_always_inline
+void backup_mb_border(uint8_t *top_border, const uint8_t *src_y,
+ const uint8_t *src_cb, const uint8_t *src_cr,
+ ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
+{
+ AV_COPY128(top_border, src_y + 15 * linesize);
+ if (!simple) {
+ AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
+ AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
+ }
+}
+
+static av_always_inline
+void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
+ uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x,
+ int mb_y, int mb_width, int simple, int xchg)
+{
+ uint8_t *top_border_m1 = top_border - 32; // for TL prediction
+ src_y -= linesize;
+ src_cb -= uvlinesize;
+ src_cr -= uvlinesize;
+
+#define XCHG(a, b, xchg) \
+ do { \
+ if (xchg) \
+ AV_SWAP64(b, a); \
+ else \
+ AV_COPY64(b, a); \
+ } while (0)
+
+ XCHG(top_border_m1 + 8, src_y - 8, xchg);
+ XCHG(top_border, src_y, xchg);
+ XCHG(top_border + 8, src_y + 8, 1);
+ if (mb_x < mb_width - 1)
+ XCHG(top_border + 32, src_y + 16, 1);
+
+ // only copy chroma for normal loop filter
+ // or to initialize the top row to 127
+ if (!simple || !mb_y) {
+ XCHG(top_border_m1 + 16, src_cb - 8, xchg);
+ XCHG(top_border_m1 + 24, src_cr - 8, xchg);
+ XCHG(top_border + 16, src_cb, 1);
+ XCHG(top_border + 24, src_cr, 1);
+ }
+}
+
+static av_always_inline
+int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
+{
+ if (!mb_x)
+ return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
+ else
+ return mb_y ? mode : LEFT_DC_PRED8x8;
+}
+
+static av_always_inline
+int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
+{
+ if (!mb_x)
+ return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
+ else
+ return mb_y ? mode : HOR_PRED8x8;
+}
+
+static av_always_inline
+int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
+{
+ switch (mode) {
+ case DC_PRED8x8:
+ return check_dc_pred8x8_mode(mode, mb_x, mb_y);
+ case VERT_PRED8x8:
+ return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
+ case HOR_PRED8x8:
+ return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
+ case PLANE_PRED8x8: /* TM */
+ return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
+ }
+ return mode;
+}
+
+static av_always_inline
+int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
+{
+ if (!mb_x) {
+ return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
+ } else {
+ return mb_y ? mode : HOR_VP8_PRED;
+ }
+}
+
+static av_always_inline
+int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
+ int *copy_buf, int vp7)
+{
+ switch (mode) {
+ case VERT_PRED:
+ if (!mb_x && mb_y) {
+ *copy_buf = 1;
+ return mode;
+ }
+ /* fall-through */
+ case DIAG_DOWN_LEFT_PRED:
+ case VERT_LEFT_PRED:
+ return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
+ case HOR_PRED:
+ if (!mb_y) {
+ *copy_buf = 1;
+ return mode;
+ }
+ /* fall-through */
+ case HOR_UP_PRED:
+ return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
+ case TM_VP8_PRED:
+ return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
+ case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
+ * as 16x16/8x8 DC */
+ case DIAG_DOWN_RIGHT_PRED:
+ case VERT_RIGHT_PRED:
+ case HOR_DOWN_PRED:
+ if (!mb_y || !mb_x)
+ *copy_buf = 1;
+ return mode;
+ }
+ return mode;
+}
+
+static av_always_inline
+void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *const dst[3],
+ VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
+{
+ int x, y, mode, nnz;
+ uint32_t tr;
+
+ /* for the first row, we need to run xchg_mb_border to init the top edge
+ * to 127 otherwise, skip it if we aren't going to deblock */
+ if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
+ xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
+ s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
+ s->filter.simple, 1);
+
+ if (mb->mode < MODE_I4x4) {
+ mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
+ s->hpc.pred16x16[mode](dst[0], s->linesize);
+ } else {
+ uint8_t *ptr = dst[0];
+ const uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
+ const uint8_t lo = is_vp7 ? 128 : 127;
+ const uint8_t hi = is_vp7 ? 128 : 129;
+ const uint8_t tr_top[4] = { lo, lo, lo, lo };
+
+ // all blocks on the right edge of the macroblock use bottom edge
+ // the top macroblock for their topright edge
+ const uint8_t *tr_right = ptr - s->linesize + 16;
+
+ // if we're on the right edge of the frame, said edge is extended
+ // from the top macroblock
+ if (mb_y && mb_x == s->mb_width - 1) {
+ tr = tr_right[-1] * 0x01010101u;
+ tr_right = (uint8_t *) &tr;
+ }
+
+ if (mb->skip)
+ AV_ZERO128(td->non_zero_count_cache);
+
+ for (y = 0; y < 4; y++) {
+ const uint8_t *topright = ptr + 4 - s->linesize;
+ for (x = 0; x < 4; x++) {
+ int copy = 0;
+ ptrdiff_t linesize = s->linesize;
+ uint8_t *dst = ptr + 4 * x;
+ LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
+
+ if ((y == 0 || x == 3) && mb_y == 0) {
+ topright = tr_top;
+ } else if (x == 3)
+ topright = tr_right;
+
+ mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
+ mb_y + y, &copy, is_vp7);
+ if (copy) {
+ dst = copy_dst + 12;
+ linesize = 8;
+ if (!(mb_y + y)) {
+ copy_dst[3] = lo;
+ AV_WN32A(copy_dst + 4, lo * 0x01010101U);
+ } else {
+ AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
+ if (!(mb_x + x)) {
+ copy_dst[3] = hi;
+ } else {
+ copy_dst[3] = ptr[4 * x - s->linesize - 1];
+ }
+ }
+ if (!(mb_x + x)) {
+ copy_dst[11] =
+ copy_dst[19] =
+ copy_dst[27] =
+ copy_dst[35] = hi;
+ } else {
+ copy_dst[11] = ptr[4 * x - 1];
+ copy_dst[19] = ptr[4 * x + s->linesize - 1];
+ copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
+ copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
+ }
+ }
+ s->hpc.pred4x4[mode](dst, topright, linesize);
+ if (copy) {
+ AV_COPY32(ptr + 4 * x, copy_dst + 12);
+ AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
+ AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
+ AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
+ }
+
+ nnz = td->non_zero_count_cache[y][x];
+ if (nnz) {
+ if (nnz == 1)
+ s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
+ td->block[y][x], s->linesize);
+ else
+ s->vp8dsp.vp8_idct_add(ptr + 4 * x,
+ td->block[y][x], s->linesize);
+ }
+ topright += 4;
+ }
+
+ ptr += 4 * s->linesize;
+ intra4x4 += 4;
+ }
+ }
+
+ mode = check_intra_pred8x8_mode_emuedge(mb->chroma_pred_mode,
+ mb_x, mb_y, is_vp7);
+ s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
+ s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
+
+ if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
+ xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
+ s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
+ s->filter.simple, 0);
+}
+
+static const uint8_t subpel_idx[3][8] = {
+ { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
+ // also function pointer index
+ { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
+ { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
+};
+
+/**
+ * luma MC function
+ *
+ * @param s VP8 decoding context
+ * @param dst target buffer for block data at block position
+ * @param ref reference picture buffer at origin (0, 0)
+ * @param mv motion vector (relative to block position) to get pixel data from
+ * @param x_off horizontal position of block from origin (0, 0)
+ * @param y_off vertical position of block from origin (0, 0)
+ * @param block_w width of block (16, 8 or 4)
+ * @param block_h height of block (always same as block_w)
+ * @param width width of src/dst plane data
+ * @param height height of src/dst plane data
+ * @param linesize size of a single line of plane data, including padding
+ * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
+ */
+static av_always_inline
+void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
+ const ThreadFrame *ref, const VP8mv *mv,
+ int x_off, int y_off, int block_w, int block_h,
+ int width, int height, ptrdiff_t linesize,
+ vp8_mc_func mc_func[3][3])
+{
+ const uint8_t *src = ref->f->data[0];
+
+ if (AV_RN32A(mv)) {
+ ptrdiff_t src_linesize = linesize;
+
+ int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
+ int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
+
+ x_off += mv->x >> 2;
+ y_off += mv->y >> 2;
+
+ // edge emulation
+ ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
+ src += y_off * linesize + x_off;
+ if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
+ y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ src - my_idx * linesize - mx_idx,
+ EDGE_EMU_LINESIZE, linesize,
+ block_w + subpel_idx[1][mx],
+ block_h + subpel_idx[1][my],
+ x_off - mx_idx, y_off - my_idx,
+ width, height);
+ src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
+ src_linesize = EDGE_EMU_LINESIZE;
+ }
+ mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
+ } else {
+ ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
+ mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
+ linesize, block_h, 0, 0);
+ }
+}
+
+/**
+ * chroma MC function
+ *
+ * @param s VP8 decoding context
+ * @param dst1 target buffer for block data at block position (U plane)
+ * @param dst2 target buffer for block data at block position (V plane)
+ * @param ref reference picture buffer at origin (0, 0)
+ * @param mv motion vector (relative to block position) to get pixel data from
+ * @param x_off horizontal position of block from origin (0, 0)
+ * @param y_off vertical position of block from origin (0, 0)
+ * @param block_w width of block (16, 8 or 4)
+ * @param block_h height of block (always same as block_w)
+ * @param width width of src/dst plane data
+ * @param height height of src/dst plane data
+ * @param linesize size of a single line of plane data, including padding
+ * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
+ */
+static av_always_inline
+void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1,
+ uint8_t *dst2, const ThreadFrame *ref, const VP8mv *mv,
+ int x_off, int y_off, int block_w, int block_h,
+ int width, int height, ptrdiff_t linesize,
+ vp8_mc_func mc_func[3][3])
+{
+ const uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
+
+ if (AV_RN32A(mv)) {
+ int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
+ int my = mv->y & 7, my_idx = subpel_idx[0][my];
+
+ x_off += mv->x >> 3;
+ y_off += mv->y >> 3;
+
+ // edge emulation
+ src1 += y_off * linesize + x_off;
+ src2 += y_off * linesize + x_off;
+ ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
+ if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
+ y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ src1 - my_idx * linesize - mx_idx,
+ EDGE_EMU_LINESIZE, linesize,
+ block_w + subpel_idx[1][mx],
+ block_h + subpel_idx[1][my],
+ x_off - mx_idx, y_off - my_idx, width, height);
+ src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
+ mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
+
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ src2 - my_idx * linesize - mx_idx,
+ EDGE_EMU_LINESIZE, linesize,
+ block_w + subpel_idx[1][mx],
+ block_h + subpel_idx[1][my],
+ x_off - mx_idx, y_off - my_idx, width, height);
+ src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
+ mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
+ } else {
+ mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
+ mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
+ }
+ } else {
+ ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
+ mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
+ mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
+ }
+}
+
+static av_always_inline
+void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *const dst[3],
+ const ThreadFrame *ref_frame, int x_off, int y_off,
+ int bx_off, int by_off, int block_w, int block_h,
+ int width, int height, const VP8mv *mv)
+{
+ VP8mv uvmv = *mv;
+
+ /* Y */
+ vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
+ ref_frame, mv, x_off + bx_off, y_off + by_off,
+ block_w, block_h, width, height, s->linesize,
+ s->put_pixels_tab[block_w == 8]);
+
+ /* U/V */
+ if (s->profile == 3) {
+ /* this block only applies VP8; it is safe to check
+ * only the profile, as VP7 profile <= 1 */
+ uvmv.x &= ~7;
+ uvmv.y &= ~7;
+ }
+ x_off >>= 1;
+ y_off >>= 1;
+ bx_off >>= 1;
+ by_off >>= 1;
+ width >>= 1;
+ height >>= 1;
+ block_w >>= 1;
+ block_h >>= 1;
+ vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
+ dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
+ &uvmv, x_off + bx_off, y_off + by_off,
+ block_w, block_h, width, height, s->uvlinesize,
+ s->put_pixels_tab[1 + (block_w == 4)]);
+}
+
+/* Fetch pixels for estimated mv 4 macroblocks ahead.
+ * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
+static av_always_inline
+void prefetch_motion(const VP8Context *s, const VP8Macroblock *mb,
+ int mb_x, int mb_y, int mb_xy, int ref)
+{
+ /* Don't prefetch refs that haven't been used very often this frame. */
+ if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
+ int x_off = mb_x << 4, y_off = mb_y << 4;
+ int mx = (mb->mv.x >> 2) + x_off + 8;
+ int my = (mb->mv.y >> 2) + y_off;
+ uint8_t **src = s->framep[ref]->tf.f->data;
+ int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
+ /* For threading, a ff_thread_await_progress here might be useful, but
+ * it actually slows down the decoder. Since a bad prefetch doesn't
+ * generate bad decoder output, we don't run it here. */
+ s->vdsp.prefetch(src[0] + off, s->linesize, 4);
+ off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
+ s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
+ }
+}
+
+/**
+ * Apply motion vectors to prediction buffer, chapter 18.
+ */
+static av_always_inline
+void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *const dst[3],
+ VP8Macroblock *mb, int mb_x, int mb_y)
+{
+ int x_off = mb_x << 4, y_off = mb_y << 4;
+ int width = 16 * s->mb_width, height = 16 * s->mb_height;
+ const ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
+ const VP8mv *bmv = mb->bmv;
+
+ switch (mb->partitioning) {
+ case VP8_SPLITMVMODE_NONE:
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 0, 0, 16, 16, width, height, &mb->mv);
+ break;
+ case VP8_SPLITMVMODE_4x4: {
+ int x, y;
+ VP8mv uvmv;
+
+ /* Y */
+ for (y = 0; y < 4; y++) {
+ for (x = 0; x < 4; x++) {
+ vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
+ ref, &bmv[4 * y + x],
+ 4 * x + x_off, 4 * y + y_off, 4, 4,
+ width, height, s->linesize,
+ s->put_pixels_tab[2]);
+ }
+ }
+
+ /* U/V */
+ x_off >>= 1;
+ y_off >>= 1;
+ width >>= 1;
+ height >>= 1;
+ for (y = 0; y < 2; y++) {
+ for (x = 0; x < 2; x++) {
+ uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
+ mb->bmv[2 * y * 4 + 2 * x + 1].x +
+ mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
+ mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
+ uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
+ mb->bmv[2 * y * 4 + 2 * x + 1].y +
+ mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
+ mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
+ uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
+ uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
+ if (s->profile == 3) {
+ uvmv.x &= ~7;
+ uvmv.y &= ~7;
+ }
+ vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
+ dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
+ &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
+ width, height, s->uvlinesize,
+ s->put_pixels_tab[2]);
+ }
+ }
+ break;
+ }
+ case VP8_SPLITMVMODE_16x8:
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 0, 0, 16, 8, width, height, &bmv[0]);
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 0, 8, 16, 8, width, height, &bmv[1]);
+ break;
+ case VP8_SPLITMVMODE_8x16:
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 0, 0, 8, 16, width, height, &bmv[0]);
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 8, 0, 8, 16, width, height, &bmv[1]);
+ break;
+ case VP8_SPLITMVMODE_8x8:
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 0, 0, 8, 8, width, height, &bmv[0]);
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 8, 0, 8, 8, width, height, &bmv[1]);
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 0, 8, 8, 8, width, height, &bmv[2]);
+ vp8_mc_part(s, td, dst, ref, x_off, y_off,
+ 8, 8, 8, 8, width, height, &bmv[3]);
+ break;
+ }
+}
+
+static av_always_inline
+void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *const dst[3],
+ const VP8Macroblock *mb)
+{
+ int x, y, ch;
+
+ if (mb->mode != MODE_I4x4) {
+ uint8_t *y_dst = dst[0];
+ for (y = 0; y < 4; y++) {
+ uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
+ if (nnz4) {
+ if (nnz4 & ~0x01010101) {
+ for (x = 0; x < 4; x++) {
+ if ((uint8_t) nnz4 == 1)
+ s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
+ td->block[y][x],
+ s->linesize);
+ else if ((uint8_t) nnz4 > 1)
+ s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
+ td->block[y][x],
+ s->linesize);
+ nnz4 >>= 8;
+ if (!nnz4)
+ break;
+ }
+ } else {
+ s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
+ }
+ }
+ y_dst += 4 * s->linesize;
+ }
+ }
+
+ for (ch = 0; ch < 2; ch++) {
+ uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
+ if (nnz4) {
+ uint8_t *ch_dst = dst[1 + ch];
+ if (nnz4 & ~0x01010101) {
+ for (y = 0; y < 2; y++) {
+ for (x = 0; x < 2; x++) {
+ if ((uint8_t) nnz4 == 1)
+ s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
+ td->block[4 + ch][(y << 1) + x],
+ s->uvlinesize);
+ else if ((uint8_t) nnz4 > 1)
+ s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
+ td->block[4 + ch][(y << 1) + x],
+ s->uvlinesize);
+ nnz4 >>= 8;
+ if (!nnz4)
+ goto chroma_idct_end;
+ }
+ ch_dst += 4 * s->uvlinesize;
+ }
+ } else {
+ s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
+ }
+ }
+chroma_idct_end:
+ ;
+ }
+}
+
+static av_always_inline
+void filter_level_for_mb(const VP8Context *s, const VP8Macroblock *mb,
+ VP8FilterStrength *f, int is_vp7)
+{
+ int interior_limit, filter_level;
+
+ if (s->segmentation.enabled) {
+ filter_level = s->segmentation.filter_level[mb->segment];
+ if (!s->segmentation.absolute_vals)
+ filter_level += s->filter.level;
+ } else
+ filter_level = s->filter.level;
+
+ if (s->lf_delta.enabled) {
+ filter_level += s->lf_delta.ref[mb->ref_frame];
+ filter_level += s->lf_delta.mode[mb->mode];
+ }
+
+ filter_level = av_clip_uintp2(filter_level, 6);
+
+ interior_limit = filter_level;
+ if (s->filter.sharpness) {
+ interior_limit >>= (s->filter.sharpness + 3) >> 2;
+ interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
+ }
+ interior_limit = FFMAX(interior_limit, 1);
+
+ f->filter_level = filter_level;
+ f->inner_limit = interior_limit;
+ f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
+ mb->mode == VP8_MVMODE_SPLIT;
+}
+
+static av_always_inline
+void filter_mb(const VP8Context *s, uint8_t *const dst[3], const VP8FilterStrength *f,
+ int mb_x, int mb_y, int is_vp7)
+{
+ int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
+ int filter_level = f->filter_level;
+ int inner_limit = f->inner_limit;
+ int inner_filter = f->inner_filter;
+ ptrdiff_t linesize = s->linesize;
+ ptrdiff_t uvlinesize = s->uvlinesize;
+ static const uint8_t hev_thresh_lut[2][64] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2 }
+ };
+
+ if (!filter_level)
+ return;
+
+ if (is_vp7) {
+ bedge_lim_y = filter_level;
+ bedge_lim_uv = filter_level * 2;
+ mbedge_lim = filter_level + 2;
+ } else {
+ bedge_lim_y =
+ bedge_lim_uv = filter_level * 2 + inner_limit;
+ mbedge_lim = bedge_lim_y + 4;
+ }
+
+ hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
+
+ if (mb_x) {
+ s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
+ mbedge_lim, inner_limit, hev_thresh);
+ s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
+ mbedge_lim, inner_limit, hev_thresh);
+ }
+
+#define H_LOOP_FILTER_16Y_INNER(cond) \
+ if (cond && inner_filter) { \
+ s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
+ bedge_lim_y, inner_limit, \
+ hev_thresh); \
+ s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
+ bedge_lim_y, inner_limit, \
+ hev_thresh); \
+ s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
+ bedge_lim_y, inner_limit, \
+ hev_thresh); \
+ s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
+ uvlinesize, bedge_lim_uv, \
+ inner_limit, hev_thresh); \
+ }
+
+ H_LOOP_FILTER_16Y_INNER(!is_vp7)
+
+ if (mb_y) {
+ s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
+ mbedge_lim, inner_limit, hev_thresh);
+ s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
+ mbedge_lim, inner_limit, hev_thresh);
+ }
+
+ if (inner_filter) {
+ s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
+ linesize, bedge_lim_y,
+ inner_limit, hev_thresh);
+ s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
+ linesize, bedge_lim_y,
+ inner_limit, hev_thresh);
+ s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
+ linesize, bedge_lim_y,
+ inner_limit, hev_thresh);
+ s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
+ dst[2] + 4 * uvlinesize,
+ uvlinesize, bedge_lim_uv,
+ inner_limit, hev_thresh);
+ }
+
+ H_LOOP_FILTER_16Y_INNER(is_vp7)
+}
+
+static av_always_inline
+void filter_mb_simple(const VP8Context *s, uint8_t *dst, const VP8FilterStrength *f,
+ int mb_x, int mb_y)
+{
+ int mbedge_lim, bedge_lim;
+ int filter_level = f->filter_level;
+ int inner_limit = f->inner_limit;
+ int inner_filter = f->inner_filter;
+ ptrdiff_t linesize = s->linesize;
+
+ if (!filter_level)
+ return;
+
+ bedge_lim = 2 * filter_level + inner_limit;
+ mbedge_lim = bedge_lim + 4;
+
+ if (mb_x)
+ s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
+ if (inner_filter) {
+ s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
+ s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
+ s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
+ }
+
+ if (mb_y)
+ s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
+ if (inner_filter) {
+ s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
+ s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
+ s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
+ }
+}
+
+#define MARGIN (16 << 2)
+static av_always_inline
+int vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
+ const VP8Frame *prev_frame, int is_vp7)
+{
+ VP8Context *s = avctx->priv_data;
+ int mb_x, mb_y;
+
+ s->mv_bounds.mv_min.y = -MARGIN;
+ s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
+ for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
+ VP8Macroblock *mb = s->macroblocks_base +
+ ((s->mb_width + 1) * (mb_y + 1) + 1);
+ int mb_xy = mb_y * s->mb_width;
+
+ AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
+
+ s->mv_bounds.mv_min.x = -MARGIN;
+ s->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
+
+ for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
+ if (vpx_rac_is_end(&s->c)) {
+ return AVERROR_INVALIDDATA;
+ }
+ if (mb_y == 0)
+ AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
+ DC_PRED * 0x01010101);
+ decode_mb_mode(s, &s->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
+ prev_frame && prev_frame->seg_map ?
+ prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
+ s->mv_bounds.mv_min.x -= 64;
+ s->mv_bounds.mv_max.x -= 64;
+ }
+ s->mv_bounds.mv_min.y -= 64;
+ s->mv_bounds.mv_max.y -= 64;
+ }
+ return 0;
+}
+
+static int vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
+ const VP8Frame *prev_frame)
+{
+ return vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
+}
+
+static int vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
+ const VP8Frame *prev_frame)
+{
+ return vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
+}
+
+#if HAVE_THREADS
+#define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
+ do { \
+ int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
+ if (atomic_load(&otd->thread_mb_pos) < tmp) { \
+ pthread_mutex_lock(&otd->lock); \
+ atomic_store(&td->wait_mb_pos, tmp); \
+ do { \
+ if (atomic_load(&otd->thread_mb_pos) >= tmp) \
+ break; \
+ pthread_cond_wait(&otd->cond, &otd->lock); \
+ } while (1); \
+ atomic_store(&td->wait_mb_pos, INT_MAX); \
+ pthread_mutex_unlock(&otd->lock); \
+ } \
+ } while (0)
+
+#define update_pos(td, mb_y, mb_x) \
+ do { \
+ int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
+ int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
+ (num_jobs > 1); \
+ int is_null = !next_td || !prev_td; \
+ int pos_check = (is_null) ? 1 : \
+ (next_td != td && pos >= atomic_load(&next_td->wait_mb_pos)) || \
+ (prev_td != td && pos >= atomic_load(&prev_td->wait_mb_pos)); \
+ atomic_store(&td->thread_mb_pos, pos); \
+ if (sliced_threading && pos_check) { \
+ pthread_mutex_lock(&td->lock); \
+ pthread_cond_broadcast(&td->cond); \
+ pthread_mutex_unlock(&td->lock); \
+ } \
+ } while (0)
+#else
+#define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
+#define update_pos(td, mb_y, mb_x) while(0)
+#endif
+
+static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
+ int jobnr, int threadnr, int is_vp7)
+{
+ VP8Context *s = avctx->priv_data;
+ VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
+ int mb_y = atomic_load(&td->thread_mb_pos) >> 16;
+ int mb_x, mb_xy = mb_y * s->mb_width;
+ int num_jobs = s->num_jobs;
+ const VP8Frame *prev_frame = s->prev_frame;
+ VP8Frame *curframe = s->curframe;
+ VPXRangeCoder *coeff_c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
+
+ VP8Macroblock *mb;
+ uint8_t *dst[3] = {
+ curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
+ curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
+ curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
+ };
+
+ if (vpx_rac_is_end(&s->c))
+ return AVERROR_INVALIDDATA;
+
+ if (mb_y == 0)
+ prev_td = td;
+ else
+ prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
+ if (mb_y == s->mb_height - 1)
+ next_td = td;
+ else
+ next_td = &s->thread_data[(jobnr + 1) % num_jobs];
+ if (s->mb_layout == 1)
+ mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
+ else {
+ // Make sure the previous frame has read its segmentation map,
+ // if we re-use the same map.
+ if (prev_frame && s->segmentation.enabled &&
+ !s->segmentation.update_map)
+ ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
+ mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
+ memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
+ AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
+ }
+
+ if (!is_vp7 || mb_y == 0)
+ memset(td->left_nnz, 0, sizeof(td->left_nnz));
+
+ td->mv_bounds.mv_min.x = -MARGIN;
+ td->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
+
+ for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
+ if (vpx_rac_is_end(&s->c))
+ return AVERROR_INVALIDDATA;
+ // Wait for previous thread to read mb_x+2, and reach mb_y-1.
+ if (prev_td != td) {
+ if (threadnr != 0) {
+ check_thread_pos(td, prev_td,
+ mb_x + (is_vp7 ? 2 : 1),
+ mb_y - (is_vp7 ? 2 : 1));
+ } else {
+ check_thread_pos(td, prev_td,
+ mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
+ mb_y - (is_vp7 ? 2 : 1));
+ }
+ }
+
+ s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
+ s->linesize, 4);
+ s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
+ dst[2] - dst[1], 2);
+
+ if (!s->mb_layout)
+ decode_mb_mode(s, &td->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
+ prev_frame && prev_frame->seg_map ?
+ prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
+
+ prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_PREVIOUS);
+
+ if (!mb->skip) {
+ if (vpx_rac_is_end(coeff_c))
+ return AVERROR_INVALIDDATA;
+ decode_mb_coeffs(s, td, coeff_c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
+ }
+
+ if (mb->mode <= MODE_I4x4)
+ intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
+ else
+ inter_predict(s, td, dst, mb, mb_x, mb_y);
+
+ prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_GOLDEN);
+
+ if (!mb->skip) {
+ idct_mb(s, td, dst, mb);
+ } else {
+ AV_ZERO64(td->left_nnz);
+ AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
+
+ /* Reset DC block predictors if they would exist
+ * if the mb had coefficients */
+ if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
+ td->left_nnz[8] = 0;
+ s->top_nnz[mb_x][8] = 0;
+ }
+ }
+
+ if (s->deblock_filter)
+ filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
+
+ if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
+ if (s->filter.simple)
+ backup_mb_border(s->top_border[mb_x + 1], dst[0],
+ NULL, NULL, s->linesize, 0, 1);
+ else
+ backup_mb_border(s->top_border[mb_x + 1], dst[0],
+ dst[1], dst[2], s->linesize, s->uvlinesize, 0);
+ }
+
+ prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_ALTREF);
+
+ dst[0] += 16;
+ dst[1] += 8;
+ dst[2] += 8;
+ td->mv_bounds.mv_min.x -= 64;
+ td->mv_bounds.mv_max.x -= 64;
+
+ if (mb_x == s->mb_width + 1) {
+ update_pos(td, mb_y, s->mb_width + 3);
+ } else {
+ update_pos(td, mb_y, mb_x);
+ }
+ }
+ return 0;
+}
+
+static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
+ int jobnr, int threadnr)
+{
+ return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
+}
+
+static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
+ int jobnr, int threadnr)
+{
+ return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
+}
+
+static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
+ int jobnr, int threadnr, int is_vp7)
+{
+ VP8Context *s = avctx->priv_data;
+ VP8ThreadData *td = &s->thread_data[threadnr];
+ int mb_x, mb_y = atomic_load(&td->thread_mb_pos) >> 16, num_jobs = s->num_jobs;
+ AVFrame *curframe = s->curframe->tf.f;
+ VP8Macroblock *mb;
+ VP8ThreadData *prev_td, *next_td;
+ uint8_t *dst[3] = {
+ curframe->data[0] + 16 * mb_y * s->linesize,
+ curframe->data[1] + 8 * mb_y * s->uvlinesize,
+ curframe->data[2] + 8 * mb_y * s->uvlinesize
+ };
+
+ if (s->mb_layout == 1)
+ mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
+ else
+ mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
+
+ if (mb_y == 0)
+ prev_td = td;
+ else
+ prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
+ if (mb_y == s->mb_height - 1)
+ next_td = td;
+ else
+ next_td = &s->thread_data[(jobnr + 1) % num_jobs];
+
+ for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
+ const VP8FilterStrength *f = &td->filter_strength[mb_x];
+ if (prev_td != td)
+ check_thread_pos(td, prev_td,
+ (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
+ if (next_td != td)
+ if (next_td != &s->thread_data[0])
+ check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
+
+ if (num_jobs == 1) {
+ if (s->filter.simple)
+ backup_mb_border(s->top_border[mb_x + 1], dst[0],
+ NULL, NULL, s->linesize, 0, 1);
+ else
+ backup_mb_border(s->top_border[mb_x + 1], dst[0],
+ dst[1], dst[2], s->linesize, s->uvlinesize, 0);
+ }
+
+ if (s->filter.simple)
+ filter_mb_simple(s, dst[0], f, mb_x, mb_y);
+ else
+ filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
+ dst[0] += 16;
+ dst[1] += 8;
+ dst[2] += 8;
+
+ update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
+ }
+}
+
+static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
+ int jobnr, int threadnr)
+{
+ filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
+}
+
+static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
+ int jobnr, int threadnr)
+{
+ filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
+}
+
+static av_always_inline
+int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
+ int threadnr, int is_vp7)
+{
+ const VP8Context *s = avctx->priv_data;
+ VP8ThreadData *td = &s->thread_data[jobnr];
+ VP8ThreadData *next_td = NULL, *prev_td = NULL;
+ VP8Frame *curframe = s->curframe;
+ int mb_y, num_jobs = s->num_jobs;
+ int ret;
+
+ td->thread_nr = threadnr;
+ td->mv_bounds.mv_min.y = -MARGIN - 64 * threadnr;
+ td->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN - 64 * threadnr;
+ for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
+ atomic_store(&td->thread_mb_pos, mb_y << 16);
+ ret = s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
+ if (ret < 0) {
+ update_pos(td, s->mb_height, INT_MAX & 0xFFFF);
+ return ret;
+ }
+ if (s->deblock_filter)
+ s->filter_mb_row(avctx, tdata, jobnr, threadnr);
+ update_pos(td, mb_y, INT_MAX & 0xFFFF);
+
+ td->mv_bounds.mv_min.y -= 64 * num_jobs;
+ td->mv_bounds.mv_max.y -= 64 * num_jobs;
+
+ if (avctx->active_thread_type == FF_THREAD_FRAME)
+ ff_thread_report_progress(&curframe->tf, mb_y, 0);
+ }
+
+ return 0;
+}
+
+static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
+ int jobnr, int threadnr)
+{
+ return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
+}
+
+static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
+ int jobnr, int threadnr)
+{
+ return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
+}
+
+static av_always_inline
+int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
+ const AVPacket *avpkt, int is_vp7)
+{
+ VP8Context *s = avctx->priv_data;
+ int ret, i, referenced, num_jobs;
+ enum AVDiscard skip_thresh;
+ VP8Frame *av_uninit(curframe), *prev_frame;
+
+ if (is_vp7)
+ ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
+ else
+ ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
+
+ if (ret < 0)
+ goto err;
+
+ if (s->actually_webp) {
+ // avctx->pix_fmt already set in caller.
+ } else if (!is_vp7 && s->pix_fmt == AV_PIX_FMT_NONE) {
+ s->pix_fmt = get_pixel_format(s);
+ if (s->pix_fmt < 0) {
+ ret = AVERROR(EINVAL);
+ goto err;
+ }
+ avctx->pix_fmt = s->pix_fmt;
+ }
+
+ prev_frame = s->framep[VP8_FRAME_CURRENT];
+
+ referenced = s->update_last || s->update_golden == VP8_FRAME_CURRENT ||
+ s->update_altref == VP8_FRAME_CURRENT;
+
+ skip_thresh = !referenced ? AVDISCARD_NONREF
+ : !s->keyframe ? AVDISCARD_NONKEY
+ : AVDISCARD_ALL;
+
+ if (avctx->skip_frame >= skip_thresh) {
+ s->invisible = 1;
+ memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
+ goto skip_decode;
+ }
+ s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
+
+ // release no longer referenced frames
+ for (i = 0; i < 5; i++)
+ if (s->frames[i].tf.f->buf[0] &&
+ &s->frames[i] != prev_frame &&
+ &s->frames[i] != s->framep[VP8_FRAME_PREVIOUS] &&
+ &s->frames[i] != s->framep[VP8_FRAME_GOLDEN] &&
+ &s->frames[i] != s->framep[VP8_FRAME_ALTREF])
+ vp8_release_frame(s, &s->frames[i]);
+
+ curframe = s->framep[VP8_FRAME_CURRENT] = vp8_find_free_buffer(s);
+
+ if (!s->colorspace)
+ avctx->colorspace = AVCOL_SPC_BT470BG;
+ if (s->fullrange)
+ avctx->color_range = AVCOL_RANGE_JPEG;
+ else
+ avctx->color_range = AVCOL_RANGE_MPEG;
+
+ /* Given that arithmetic probabilities are updated every frame, it's quite
+ * likely that the values we have on a random interframe are complete
+ * junk if we didn't start decode on a keyframe. So just don't display
+ * anything rather than junk. */
+ if (!s->keyframe && (!s->framep[VP8_FRAME_PREVIOUS] ||
+ !s->framep[VP8_FRAME_GOLDEN] ||
+ !s->framep[VP8_FRAME_ALTREF])) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Discarding interframe without a prior keyframe!\n");
+ ret = AVERROR_INVALIDDATA;
+ goto err;
+ }
+
+ curframe->tf.f->key_frame = s->keyframe;
+ curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
+ : AV_PICTURE_TYPE_P;
+ if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
+ goto err;
+
+ // check if golden and altref are swapped
+ if (s->update_altref != VP8_FRAME_NONE)
+ s->next_framep[VP8_FRAME_ALTREF] = s->framep[s->update_altref];
+ else
+ s->next_framep[VP8_FRAME_ALTREF] = s->framep[VP8_FRAME_ALTREF];
+
+ if (s->update_golden != VP8_FRAME_NONE)
+ s->next_framep[VP8_FRAME_GOLDEN] = s->framep[s->update_golden];
+ else
+ s->next_framep[VP8_FRAME_GOLDEN] = s->framep[VP8_FRAME_GOLDEN];
+
+ if (s->update_last)
+ s->next_framep[VP8_FRAME_PREVIOUS] = curframe;
+ else
+ s->next_framep[VP8_FRAME_PREVIOUS] = s->framep[VP8_FRAME_PREVIOUS];
+
+ s->next_framep[VP8_FRAME_CURRENT] = curframe;
+
+ if (ffcodec(avctx->codec)->update_thread_context)
+ ff_thread_finish_setup(avctx);
+
+ if (avctx->hwaccel) {
+ ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
+ if (ret < 0)
+ goto err;
+
+ ret = avctx->hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
+ if (ret < 0)
+ goto err;
+
+ ret = avctx->hwaccel->end_frame(avctx);
+ if (ret < 0)
+ goto err;
+
+ } else {
+ s->linesize = curframe->tf.f->linesize[0];
+ s->uvlinesize = curframe->tf.f->linesize[1];
+
+ memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
+ /* Zero macroblock structures for top/top-left prediction
+ * from outside the frame. */
+ if (!s->mb_layout)
+ memset(s->macroblocks + s->mb_height * 2 - 1, 0,
+ (s->mb_width + 1) * sizeof(*s->macroblocks));
+ if (!s->mb_layout && s->keyframe)
+ memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
+
+ memset(s->ref_count, 0, sizeof(s->ref_count));
+
+ if (s->mb_layout == 1) {
+ // Make sure the previous frame has read its segmentation map,
+ // if we re-use the same map.
+ if (prev_frame && s->segmentation.enabled &&
+ !s->segmentation.update_map)
+ ff_thread_await_progress(&prev_frame->tf, 1, 0);
+ if (is_vp7)
+ ret = vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
+ else
+ ret = vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
+ if (ret < 0)
+ goto err;
+ }
+
+ if (avctx->active_thread_type == FF_THREAD_FRAME)
+ num_jobs = 1;
+ else
+ num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
+ s->num_jobs = num_jobs;
+ s->curframe = curframe;
+ s->prev_frame = prev_frame;
+ s->mv_bounds.mv_min.y = -MARGIN;
+ s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
+ for (i = 0; i < MAX_THREADS; i++) {
+ VP8ThreadData *td = &s->thread_data[i];
+ atomic_init(&td->thread_mb_pos, 0);
+ atomic_init(&td->wait_mb_pos, INT_MAX);
+ }
+ if (is_vp7)
+ avctx->execute2(avctx, vp7_decode_mb_row_sliced, s->thread_data, NULL,
+ num_jobs);
+ else
+ avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL,
+ num_jobs);
+ }
+
+ ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
+ memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
+
+skip_decode:
+ // if future frames don't use the updated probabilities,
+ // reset them to the values we saved
+ if (!s->update_probabilities)
+ s->prob[0] = s->prob[1];
+
+ if (!s->invisible) {
+ if ((ret = av_frame_ref(rframe, curframe->tf.f)) < 0)
+ return ret;
+ *got_frame = 1;
+ }
+
+ return avpkt->size;
+err:
+ memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
+ return ret;
+}
+
+int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *avpkt)
+{
+ return vp78_decode_frame(avctx, frame, got_frame, avpkt, IS_VP8);
+}
+
+#if CONFIG_VP7_DECODER
+static int vp7_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *avpkt)
+{
+ return vp78_decode_frame(avctx, frame, got_frame, avpkt, IS_VP7);
+}
+#endif /* CONFIG_VP7_DECODER */
+
+av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
+{
+ VP8Context *s = avctx->priv_data;
+ int i;
+
+ vp8_decode_flush_impl(avctx, 1);
+ for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
+ av_frame_free(&s->frames[i].tf.f);
+
+ return 0;
+}
+
+static av_cold int vp8_init_frames(VP8Context *s)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
+ s->frames[i].tf.f = av_frame_alloc();
+ if (!s->frames[i].tf.f)
+ return AVERROR(ENOMEM);
+ }
+ return 0;
+}
+
+static av_always_inline
+int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
+{
+ VP8Context *s = avctx->priv_data;
+ int ret;
+
+ s->avctx = avctx;
+ s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
+ s->pix_fmt = AV_PIX_FMT_NONE;
+ avctx->pix_fmt = AV_PIX_FMT_YUV420P;
+
+ ff_videodsp_init(&s->vdsp, 8);
+
+ ff_vp78dsp_init(&s->vp8dsp);
+ if (CONFIG_VP7_DECODER && is_vp7) {
+ ff_h264_pred_init(&s->hpc, AV_CODEC_ID_VP7, 8, 1);
+ ff_vp7dsp_init(&s->vp8dsp);
+ s->decode_mb_row_no_filter = vp7_decode_mb_row_no_filter;
+ s->filter_mb_row = vp7_filter_mb_row;
+ } else if (CONFIG_VP8_DECODER && !is_vp7) {
+ ff_h264_pred_init(&s->hpc, AV_CODEC_ID_VP8, 8, 1);
+ ff_vp8dsp_init(&s->vp8dsp);
+ s->decode_mb_row_no_filter = vp8_decode_mb_row_no_filter;
+ s->filter_mb_row = vp8_filter_mb_row;
+ }
+
+ /* does not change for VP8 */
+ memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
+
+ if ((ret = vp8_init_frames(s)) < 0) {
+ ff_vp8_decode_free(avctx);
+ return ret;
+ }
+
+ return 0;
+}
+
+#if CONFIG_VP7_DECODER
+static int vp7_decode_init(AVCodecContext *avctx)
+{
+ return vp78_decode_init(avctx, IS_VP7);
+}
+#endif /* CONFIG_VP7_DECODER */
+
+av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
+{
+ return vp78_decode_init(avctx, IS_VP8);
+}
+
+#if CONFIG_VP8_DECODER
+#if HAVE_THREADS
+#define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
+
+static int vp8_decode_update_thread_context(AVCodecContext *dst,
+ const AVCodecContext *src)
+{
+ VP8Context *s = dst->priv_data, *s_src = src->priv_data;
+ int i;
+
+ if (s->macroblocks_base &&
+ (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
+ free_buffers(s);
+ s->mb_width = s_src->mb_width;
+ s->mb_height = s_src->mb_height;
+ }
+
+ s->pix_fmt = s_src->pix_fmt;
+ s->prob[0] = s_src->prob[!s_src->update_probabilities];
+ s->segmentation = s_src->segmentation;
+ s->lf_delta = s_src->lf_delta;
+ memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
+
+ for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
+ if (s_src->frames[i].tf.f->buf[0]) {
+ int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ s->framep[0] = REBASE(s_src->next_framep[0]);
+ s->framep[1] = REBASE(s_src->next_framep[1]);
+ s->framep[2] = REBASE(s_src->next_framep[2]);
+ s->framep[3] = REBASE(s_src->next_framep[3]);
+
+ return 0;
+}
+#endif /* HAVE_THREADS */
+#endif /* CONFIG_VP8_DECODER */
+
+#if CONFIG_VP7_DECODER
+const FFCodec ff_vp7_decoder = {
+ .p.name = "vp7",
+ CODEC_LONG_NAME("On2 VP7"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_VP7,
+ .priv_data_size = sizeof(VP8Context),
+ .init = vp7_decode_init,
+ .close = ff_vp8_decode_free,
+ FF_CODEC_DECODE_CB(vp7_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_DR1,
+ .flush = vp8_decode_flush,
+};
+#endif /* CONFIG_VP7_DECODER */
+
+#if CONFIG_VP8_DECODER
+const FFCodec ff_vp8_decoder = {
+ .p.name = "vp8",
+ CODEC_LONG_NAME("On2 VP8"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_VP8,
+ .priv_data_size = sizeof(VP8Context),
+ .init = ff_vp8_decode_init,
+ .close = ff_vp8_decode_free,
+ FF_CODEC_DECODE_CB(ff_vp8_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
+ AV_CODEC_CAP_SLICE_THREADS,
+ .caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS,
+ .flush = vp8_decode_flush,
+ UPDATE_THREAD_CONTEXT(vp8_decode_update_thread_context),
+ .hw_configs = (const AVCodecHWConfigInternal *const []) {
+#if CONFIG_VP8_VAAPI_HWACCEL
+ HWACCEL_VAAPI(vp8),
+#endif
+#if CONFIG_VP8_NVDEC_HWACCEL
+ HWACCEL_NVDEC(vp8),
+#endif
+ NULL
+ },
+};
+#endif /* CONFIG_VP7_DECODER */
diff --git a/media/ffvpx/libavcodec/vp8.h b/media/ffvpx/libavcodec/vp8.h
new file mode 100644
index 0000000000..6f29156b53
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp8.h
@@ -0,0 +1,361 @@
+/*
+ * VP8 compatible video decoder
+ *
+ * Copyright (C) 2010 David Conrad
+ * Copyright (C) 2010 Ronald S. Bultje
+ * Copyright (C) 2010 Fiona Glaser
+ * Copyright (C) 2012 Daniel Kang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP8_H
+#define AVCODEC_VP8_H
+
+#include <stdatomic.h>
+
+#include "libavutil/buffer.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/thread.h"
+
+#include "h264pred.h"
+#include "threadframe.h"
+#include "videodsp.h"
+#include "vp8dsp.h"
+#include "vpx_rac.h"
+
+#define VP8_MAX_QUANT 127
+
+typedef enum {
+ VP8_FRAME_NONE = -1,
+ VP8_FRAME_CURRENT = 0,
+ VP8_FRAME_PREVIOUS = 1,
+ VP8_FRAME_GOLDEN = 2,
+ VP8_FRAME_ALTREF = 3,
+} VP8FrameType;
+
+enum dct_token {
+ DCT_0,
+ DCT_1,
+ DCT_2,
+ DCT_3,
+ DCT_4,
+ DCT_CAT1,
+ DCT_CAT2,
+ DCT_CAT3,
+ DCT_CAT4,
+ DCT_CAT5,
+ DCT_CAT6,
+ DCT_EOB,
+
+ NUM_DCT_TOKENS
+};
+
+// used to signal 4x4 intra pred in luma MBs
+#define MODE_I4x4 4
+
+enum inter_mvmode {
+ VP8_MVMODE_ZERO = MODE_I4x4 + 1,
+ VP8_MVMODE_MV,
+ VP8_MVMODE_SPLIT
+};
+
+enum inter_splitmvmode {
+ VP8_SPLITMVMODE_16x8 = 0, ///< 2 16x8 blocks (vertical)
+ VP8_SPLITMVMODE_8x16, ///< 2 8x16 blocks (horizontal)
+ VP8_SPLITMVMODE_8x8, ///< 2x2 blocks of 8x8px each
+ VP8_SPLITMVMODE_4x4, ///< 4x4 blocks of 4x4px each
+ VP8_SPLITMVMODE_NONE, ///< (only used in prediction) no split MVs
+};
+
+typedef struct VP8mv {
+ DECLARE_ALIGNED(4, int16_t, x);
+ int16_t y;
+} VP8mv;
+
+typedef struct VP8FilterStrength {
+ uint8_t filter_level;
+ uint8_t inner_limit;
+ uint8_t inner_filter;
+} VP8FilterStrength;
+
+typedef struct VP8Macroblock {
+ uint8_t skip;
+ // TODO: make it possible to check for at least (i4x4 or split_mv)
+ // in one op. are others needed?
+ uint8_t mode;
+ uint8_t ref_frame;
+ uint8_t partitioning;
+ uint8_t chroma_pred_mode;
+ uint8_t segment;
+ uint8_t intra4x4_pred_mode_mb[16];
+ DECLARE_ALIGNED(4, uint8_t, intra4x4_pred_mode_top)[4];
+ VP8mv mv;
+ VP8mv bmv[16];
+} VP8Macroblock;
+
+typedef struct VP8intmv {
+ int x;
+ int y;
+} VP8intmv;
+
+typedef struct VP8mvbounds {
+ VP8intmv mv_min;
+ VP8intmv mv_max;
+} VP8mvbounds;
+
+typedef struct VP8ThreadData {
+ DECLARE_ALIGNED(16, int16_t, block)[6][4][16];
+ DECLARE_ALIGNED(16, int16_t, block_dc)[16];
+ /**
+ * This is the index plus one of the last non-zero coeff
+ * for each of the blocks in the current macroblock.
+ * So, 0 -> no coeffs
+ * 1 -> dc-only (special transform)
+ * 2+-> full transform
+ */
+ DECLARE_ALIGNED(16, uint8_t, non_zero_count_cache)[6][4];
+ /**
+ * For coeff decode, we need to know whether the above block had non-zero
+ * coefficients. This means for each macroblock, we need data for 4 luma
+ * blocks, 2 u blocks, 2 v blocks, and the luma dc block, for a total of 9
+ * per macroblock. We keep the last row in top_nnz.
+ */
+ DECLARE_ALIGNED(8, uint8_t, left_nnz)[9];
+ int thread_nr;
+#if HAVE_THREADS
+ pthread_mutex_t lock;
+ pthread_cond_t cond;
+#endif
+ atomic_int thread_mb_pos; // (mb_y << 16) | (mb_x & 0xFFFF)
+ atomic_int wait_mb_pos; // What the current thread is waiting on.
+
+#define EDGE_EMU_LINESIZE 32
+ DECLARE_ALIGNED(16, uint8_t, edge_emu_buffer)[21 * EDGE_EMU_LINESIZE];
+ VP8FilterStrength *filter_strength;
+ VP8mvbounds mv_bounds;
+} VP8ThreadData;
+
+typedef struct VP8Frame {
+ ThreadFrame tf;
+ AVBufferRef *seg_map;
+
+ AVBufferRef *hwaccel_priv_buf;
+ void *hwaccel_picture_private;
+} VP8Frame;
+
+#define MAX_THREADS 8
+typedef struct VP8Context {
+ VP8ThreadData *thread_data;
+ AVCodecContext *avctx;
+ enum AVPixelFormat pix_fmt;
+ int actually_webp;
+
+ VP8Frame *framep[4];
+ VP8Frame *next_framep[4];
+ VP8Frame *curframe;
+ VP8Frame *prev_frame;
+
+ uint16_t mb_width; /* number of horizontal MB */
+ uint16_t mb_height; /* number of vertical MB */
+ ptrdiff_t linesize;
+ ptrdiff_t uvlinesize;
+
+ uint8_t keyframe;
+ uint8_t deblock_filter;
+ uint8_t mbskip_enabled;
+ uint8_t profile;
+ VP8mvbounds mv_bounds;
+
+ int8_t sign_bias[4]; ///< one state [0, 1] per ref frame type
+ int ref_count[3];
+
+ /**
+ * Base parameters for segmentation, i.e. per-macroblock parameters.
+ * These must be kept unchanged even if segmentation is not used for
+ * a frame, since the values persist between interframes.
+ */
+ struct {
+ uint8_t enabled;
+ uint8_t absolute_vals;
+ uint8_t update_map;
+ uint8_t update_feature_data;
+ int8_t base_quant[4];
+ int8_t filter_level[4]; ///< base loop filter level
+ } segmentation;
+
+ struct {
+ uint8_t simple;
+ uint8_t level;
+ uint8_t sharpness;
+ } filter;
+
+ VP8Macroblock *macroblocks;
+
+ uint8_t *intra4x4_pred_mode_top;
+ uint8_t intra4x4_pred_mode_left[4];
+
+ /**
+ * Macroblocks can have one of 4 different quants in a frame when
+ * segmentation is enabled.
+ * If segmentation is disabled, only the first segment's values are used.
+ */
+ struct {
+ // [0] - DC qmul [1] - AC qmul
+ int16_t luma_qmul[2];
+ int16_t luma_dc_qmul[2]; ///< luma dc-only block quant
+ int16_t chroma_qmul[2];
+ } qmat[4];
+
+ // Raw quantisation values, which may be needed by hwaccel decode.
+ struct {
+ int yac_qi;
+ int ydc_delta;
+ int y2dc_delta;
+ int y2ac_delta;
+ int uvdc_delta;
+ int uvac_delta;
+ } quant;
+
+ struct {
+ uint8_t enabled; ///< whether each mb can have a different strength based on mode/ref
+ uint8_t update;
+
+ /**
+ * filter strength adjustment for the following macroblock modes:
+ * [0-3] - i16x16 (always zero)
+ * [4] - i4x4
+ * [5] - zero mv
+ * [6] - inter modes except for zero or split mv
+ * [7] - split mv
+ * i16x16 modes never have any adjustment
+ */
+ int8_t mode[VP8_MVMODE_SPLIT + 1];
+
+ /**
+ * filter strength adjustment for macroblocks that reference:
+ * [0] - intra / VP8_FRAME_CURRENT
+ * [1] - VP8_FRAME_PREVIOUS
+ * [2] - VP8_FRAME_GOLDEN
+ * [3] - altref / VP8_FRAME_ALTREF
+ */
+ int8_t ref[4];
+ } lf_delta;
+
+ uint8_t (*top_border)[16 + 8 + 8];
+ uint8_t (*top_nnz)[9];
+
+ VPXRangeCoder c; ///< header context, includes mb modes and motion vectors
+
+ /* This contains the entropy coder state at the end of the header
+ * block, in the form specified by the standard. For use by
+ * hwaccels, so that a hardware decoder has the information to
+ * start decoding at the macroblock layer.
+ */
+ struct {
+ const uint8_t *input;
+ uint32_t range;
+ uint32_t value;
+ int bit_count;
+ } coder_state_at_header_end;
+
+ int header_partition_size;
+
+ /**
+ * These are all of the updatable probabilities for binary decisions.
+ * They are only implicitly reset on keyframes, making it quite likely
+ * for an interframe to desync if a prior frame's header was corrupt
+ * or missing outright!
+ */
+ struct {
+ uint8_t segmentid[3];
+ uint8_t mbskip;
+ uint8_t intra;
+ uint8_t last;
+ uint8_t golden;
+ uint8_t pred16x16[4];
+ uint8_t pred8x8c[3];
+ uint8_t token[4][16][3][NUM_DCT_TOKENS - 1];
+ uint8_t mvc[2][19];
+ uint8_t scan[16];
+ } prob[2];
+
+ VP8Macroblock *macroblocks_base;
+ int invisible;
+ int update_last; ///< update VP8_FRAME_PREVIOUS with the current one
+ int update_golden; ///< VP8_FRAME_NONE if not updated, or which frame to copy if so
+ int update_altref;
+
+ /**
+ * If this flag is not set, all the probability updates
+ * are discarded after this frame is decoded.
+ */
+ int update_probabilities;
+
+ /**
+ * All coefficients are contained in separate arith coding contexts.
+ * There can be 1, 2, 4, or 8 of these after the header context.
+ */
+ int num_coeff_partitions;
+ VPXRangeCoder coeff_partition[8];
+ int coeff_partition_size[8];
+ VideoDSPContext vdsp;
+ VP8DSPContext vp8dsp;
+ H264PredContext hpc;
+ vp8_mc_func put_pixels_tab[3][3][3];
+ VP8Frame frames[5];
+
+ uint8_t colorspace; ///< 0 is the only value allowed (meaning bt601)
+ uint8_t fullrange; ///< whether we can skip clamping in dsp functions
+
+ int num_jobs;
+ /**
+ * This describes the macroblock memory layout.
+ * 0 -> Only width+height*2+1 macroblocks allocated (frame/single thread).
+ * 1 -> Macroblocks for entire frame allocated (sliced thread).
+ */
+ int mb_layout;
+
+ int (*decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr);
+ void (*filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr);
+
+ int vp7;
+
+ /**
+ * Interframe DC prediction (VP7)
+ * [0] VP8_FRAME_PREVIOUS
+ * [1] VP8_FRAME_GOLDEN
+ */
+ uint16_t inter_dc_pred[2][2];
+
+ /**
+ * Macroblock features (VP7)
+ */
+ uint8_t feature_enabled[4];
+ uint8_t feature_present_prob[4];
+ uint8_t feature_index_prob[4][3];
+ uint8_t feature_value[4][4];
+} VP8Context;
+
+int ff_vp8_decode_init(AVCodecContext *avctx);
+
+int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *avpkt);
+
+int ff_vp8_decode_free(AVCodecContext *avctx);
+
+#endif /* AVCODEC_VP8_H */
diff --git a/media/ffvpx/libavcodec/vp89_rac.h b/media/ffvpx/libavcodec/vp89_rac.h
new file mode 100644
index 0000000000..bc0924c387
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp89_rac.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Range decoder functions common to VP8 and VP9
+ */
+
+#ifndef AVCODEC_VP89_RAC_H
+#define AVCODEC_VP89_RAC_H
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+
+#include "vpx_rac.h"
+
+// rounding is different than vpx_rac_get, is vpx_rac_get wrong?
+static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
+{
+ return vpx_rac_get_prob(c, 128);
+}
+
+static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
+{
+ int value = 0;
+
+ while (bits--) {
+ value = (value << 1) | vp89_rac_get(c);
+ }
+
+ return value;
+}
+
+// how probabilities are associated with decisions is different I think
+// well, the new scheme fits in the old but this way has one fewer branches per decision
+static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t (*tree)[2],
+ const uint8_t *probs)
+{
+ int i = 0;
+
+ do {
+ i = tree[i][vpx_rac_get_prob(c, probs[i])];
+ } while (i > 0);
+
+ return -i;
+}
+
+#endif /* AVCODEC_VP89_RAC_H */
diff --git a/media/ffvpx/libavcodec/vp8_parser.c b/media/ffvpx/libavcodec/vp8_parser.c
new file mode 100644
index 0000000000..98b752bfb9
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp8_parser.c
@@ -0,0 +1,79 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intreadwrite.h"
+
+#include "avcodec.h"
+
+static int parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ unsigned int frame_type;
+ unsigned int profile;
+
+ *poutbuf = buf;
+ *poutbuf_size = buf_size;
+
+ if (buf_size < 3)
+ return buf_size;
+
+ frame_type = buf[0] & 1;
+ profile = (buf[0] >> 1) & 7;
+ if (profile > 3) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid profile %u.\n", profile);
+ return buf_size;
+ }
+
+ avctx->profile = profile;
+ s->key_frame = frame_type == 0;
+ s->pict_type = frame_type ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+ s->format = AV_PIX_FMT_YUV420P;
+ s->field_order = AV_FIELD_PROGRESSIVE;
+ s->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
+
+ if (frame_type == 0) {
+ unsigned int sync_code;
+ unsigned int width, height;
+
+ if (buf_size < 10)
+ return buf_size;
+
+ sync_code = AV_RL24(buf + 3);
+ if (sync_code != 0x2a019d) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid sync code %06x.\n", sync_code);
+ return buf_size;
+ }
+
+ width = AV_RL16(buf + 6) & 0x3fff;
+ height = AV_RL16(buf + 8) & 0x3fff;
+
+ s->width = width;
+ s->height = height;
+ s->coded_width = FFALIGN(width, 16);
+ s->coded_height = FFALIGN(height, 16);
+ }
+
+ return buf_size;
+}
+
+const AVCodecParser ff_vp8_parser = {
+ .codec_ids = { AV_CODEC_ID_VP8 },
+ .parser_parse = parse,
+};
diff --git a/media/ffvpx/libavcodec/vp8data.h b/media/ffvpx/libavcodec/vp8data.h
new file mode 100644
index 0000000000..1fcce134eb
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp8data.h
@@ -0,0 +1,824 @@
+/*
+ * Copyright (C) 2010 David Conrad
+ * Copyright (C) 2010 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * VP8 compatible video decoder
+ */
+
+#ifndef AVCODEC_VP8DATA_H
+#define AVCODEC_VP8DATA_H
+
+#include "vp8.h"
+#include "h264pred.h"
+
+static const uint8_t vp7_pred4x4_mode[] = {
+ [DC_PRED8x8] = DC_PRED,
+ [VERT_PRED8x8] = TM_VP8_PRED,
+ [HOR_PRED8x8] = TM_VP8_PRED,
+ [PLANE_PRED8x8] = TM_VP8_PRED,
+};
+
+static const uint8_t vp8_pred4x4_mode[] = {
+ [DC_PRED8x8] = DC_PRED,
+ [VERT_PRED8x8] = VERT_PRED,
+ [HOR_PRED8x8] = HOR_PRED,
+ [PLANE_PRED8x8] = TM_VP8_PRED,
+};
+
+static const int8_t vp8_pred16x16_tree_intra[4][2] = {
+ { -MODE_I4x4, 1 }, // '0'
+ { 2, 3 },
+ { -DC_PRED8x8, -VERT_PRED8x8 }, // '100', '101'
+ { -HOR_PRED8x8, -PLANE_PRED8x8 }, // '110', '111'
+};
+
+static const int8_t vp8_pred16x16_tree_inter[4][2] = {
+ { -DC_PRED8x8, 1 }, // '0'
+ { 2, 3 },
+ { -VERT_PRED8x8, -HOR_PRED8x8 }, // '100', '101'
+ { -PLANE_PRED8x8, -MODE_I4x4 }, // '110', '111'
+};
+
+typedef struct VP7MVPred {
+ int8_t yoffset;
+ int8_t xoffset;
+ uint8_t subblock;
+ uint8_t score;
+} VP7MVPred;
+
+#define VP7_MV_PRED_COUNT 12
+static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT] = {
+ { -1, 0, 12, 8 },
+ { 0, -1, 3, 8 },
+ { -1, -1, 15, 2 },
+ { -1, 1, 12, 2 },
+ { -2, 0, 12, 2 },
+ { 0, -2, 3, 2 },
+ { -1, -2, 15, 1 },
+ { -2, -1, 15, 1 },
+ { -2, 1, 12, 1 },
+ { -1, 2, 12, 1 },
+ { -2, -2, 15, 1 },
+ { -2, 2, 12, 1 },
+};
+
+static const int vp7_mode_contexts[31][4] = {
+ { 3, 3, 1, 246 },
+ { 7, 89, 66, 239 },
+ { 10, 90, 78, 238 },
+ { 14, 118, 95, 241 },
+ { 14, 123, 106, 238 },
+ { 20, 140, 109, 240 },
+ { 13, 155, 103, 238 },
+ { 21, 158, 99, 240 },
+ { 27, 82, 108, 232 },
+ { 19, 99, 123, 217 },
+ { 45, 139, 148, 236 },
+ { 50, 117, 144, 235 },
+ { 57, 128, 164, 238 },
+ { 69, 139, 171, 239 },
+ { 74, 154, 179, 238 },
+ { 112, 165, 186, 242 },
+ { 98, 143, 185, 245 },
+ { 105, 153, 190, 250 },
+ { 124, 167, 192, 245 },
+ { 131, 186, 203, 246 },
+ { 59, 184, 222, 224 },
+ { 148, 215, 214, 213 },
+ { 137, 211, 210, 219 },
+ { 190, 227, 128, 228 },
+ { 183, 228, 128, 228 },
+ { 194, 234, 128, 228 },
+ { 202, 236, 128, 228 },
+ { 205, 240, 128, 228 },
+ { 205, 244, 128, 228 },
+ { 225, 246, 128, 228 },
+ { 233, 251, 128, 228 },
+};
+
+static const int vp8_mode_contexts[6][4] = {
+ { 7, 1, 1, 143 },
+ { 14, 18, 14, 107 },
+ { 135, 64, 57, 68 },
+ { 60, 56, 128, 65 },
+ { 159, 134, 128, 34 },
+ { 234, 188, 128, 28 },
+};
+
+static const uint8_t vp8_mbsplits[5][16] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
+ { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
+ { 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+};
+
+static const uint8_t vp8_mbfirstidx[4][16] = {
+ { 0, 8 },
+ { 0, 2 },
+ { 0, 2, 8, 10 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
+};
+
+static const uint8_t vp8_mbsplit_count[4] = {
+ 2, 2, 4, 16
+};
+static const uint8_t vp8_mbsplit_prob[3] = {
+ 110, 111, 150
+};
+
+static const uint8_t vp7_submv_prob[3] = {
+ 180, 162, 25
+};
+
+static const uint8_t vp8_submv_prob[5][3] = {
+ { 147, 136, 18 },
+ { 106, 145, 1 },
+ { 179, 121, 1 },
+ { 223, 1, 34 },
+ { 208, 1, 1 }
+};
+
+static const uint8_t vp8_pred16x16_prob_intra[4] = {
+ 145, 156, 163, 128
+};
+static const uint8_t vp8_pred16x16_prob_inter[4] = {
+ 112, 86, 140, 37
+};
+
+static const int8_t vp8_pred4x4_tree[9][2] = {
+ { -DC_PRED, 1 }, // '0'
+ { -TM_VP8_PRED, 2 }, // '10'
+ { -VERT_PRED, 3 }, // '110'
+ { 4, 6 },
+ { -HOR_PRED, 5 }, // '11100'
+ { -DIAG_DOWN_RIGHT_PRED, -VERT_RIGHT_PRED }, // '111010', '111011'
+ { -DIAG_DOWN_LEFT_PRED, 7 }, // '11110'
+ { -VERT_LEFT_PRED, 8 }, // '111110'
+ { -HOR_DOWN_PRED, -HOR_UP_PRED }, // '1111110', '1111111'
+};
+
+static const int8_t vp8_pred8x8c_tree[3][2] = {
+ { -DC_PRED8x8, 1 }, // '0'
+ { -VERT_PRED8x8, 2 }, // '10
+ { -HOR_PRED8x8, -PLANE_PRED8x8 }, // '110', '111'
+};
+
+static const uint8_t vp8_pred8x8c_prob_intra[3] = {
+ 142, 114, 183
+};
+static const uint8_t vp8_pred8x8c_prob_inter[3] = {
+ 162, 101, 204
+};
+static const uint8_t vp8_pred4x4_prob_inter[9] = {
+ 120, 90, 79, 133, 87, 85, 80, 111, 151
+};
+
+static const uint8_t vp8_pred4x4_prob_intra[10][10][9] = {
+ {
+ { 39, 53, 200, 87, 26, 21, 43, 232, 171 },
+ { 56, 34, 51, 104, 114, 102, 29, 93, 77 },
+ { 88, 88, 147, 150, 42, 46, 45, 196, 205 },
+ { 107, 54, 32, 26, 51, 1, 81, 43, 31 },
+ { 39, 28, 85, 171, 58, 165, 90, 98, 64 },
+ { 34, 22, 116, 206, 23, 34, 43, 166, 73 },
+ { 34, 19, 21, 102, 132, 188, 16, 76, 124 },
+ { 68, 25, 106, 22, 64, 171, 36, 225, 114 },
+ { 62, 18, 78, 95, 85, 57, 50, 48, 51 },
+ { 43, 97, 183, 117, 85, 38, 35, 179, 61 },
+ },
+ {
+ { 112, 113, 77, 85, 179, 255, 38, 120, 114 },
+ { 40, 42, 1, 196, 245, 209, 10, 25, 109 },
+ { 193, 101, 35, 159, 215, 111, 89, 46, 111 },
+ { 100, 80, 8, 43, 154, 1, 51, 26, 71 },
+ { 88, 43, 29, 140, 166, 213, 37, 43, 154 },
+ { 61, 63, 30, 155, 67, 45, 68, 1, 209 },
+ { 41, 40, 5, 102, 211, 183, 4, 1, 221 },
+ { 142, 78, 78, 16, 255, 128, 34, 197, 171 },
+ { 51, 50, 17, 168, 209, 192, 23, 25, 82 },
+ { 60, 148, 31, 172, 219, 228, 21, 18, 111 },
+ },
+ {
+ { 175, 69, 143, 80, 85, 82, 72, 155, 103 },
+ { 56, 58, 10, 171, 218, 189, 17, 13, 152 },
+ { 231, 120, 48, 89, 115, 113, 120, 152, 112 },
+ { 144, 71, 10, 38, 171, 213, 144, 34, 26 },
+ { 114, 26, 17, 163, 44, 195, 21, 10, 173 },
+ { 121, 24, 80, 195, 26, 62, 44, 64, 85 },
+ { 63, 20, 8, 114, 114, 208, 12, 9, 226 },
+ { 170, 46, 55, 19, 136, 160, 33, 206, 71 },
+ { 81, 40, 11, 96, 182, 84, 29, 16, 36 },
+ { 152, 179, 64, 126, 170, 118, 46, 70, 95 },
+ },
+ {
+ { 75, 79, 123, 47, 51, 128, 81, 171, 1 },
+ { 57, 17, 5, 71, 102, 57, 53, 41, 49 },
+ { 125, 98, 42, 88, 104, 85, 117, 175, 82 },
+ { 115, 21, 2, 10, 102, 255, 166, 23, 6 },
+ { 38, 33, 13, 121, 57, 73, 26, 1, 85 },
+ { 41, 10, 67, 138, 77, 110, 90, 47, 114 },
+ { 57, 18, 10, 102, 102, 213, 34, 20, 43 },
+ { 101, 29, 16, 10, 85, 128, 101, 196, 26 },
+ { 117, 20, 15, 36, 163, 128, 68, 1, 26 },
+ { 95, 84, 53, 89, 128, 100, 113, 101, 45 },
+ },
+ {
+ { 63, 59, 90, 180, 59, 166, 93, 73, 154 },
+ { 40, 40, 21, 116, 143, 209, 34, 39, 175 },
+ { 138, 31, 36, 171, 27, 166, 38, 44, 229 },
+ { 57, 46, 22, 24, 128, 1, 54, 17, 37 },
+ { 47, 15, 16, 183, 34, 223, 49, 45, 183 },
+ { 46, 17, 33, 183, 6, 98, 15, 32, 183 },
+ { 40, 3, 9, 115, 51, 192, 18, 6, 223 },
+ { 65, 32, 73, 115, 28, 128, 23, 128, 205 },
+ { 87, 37, 9, 115, 59, 77, 64, 21, 47 },
+ { 67, 87, 58, 169, 82, 115, 26, 59, 179 },
+ },
+ {
+ { 54, 57, 112, 184, 5, 41, 38, 166, 213 },
+ { 30, 34, 26, 133, 152, 116, 10, 32, 134 },
+ { 104, 55, 44, 218, 9, 54, 53, 130, 226 },
+ { 75, 32, 12, 51, 192, 255, 160, 43, 51 },
+ { 39, 19, 53, 221, 26, 114, 32, 73, 255 },
+ { 31, 9, 65, 234, 2, 15, 1, 118, 73 },
+ { 56, 21, 23, 111, 59, 205, 45, 37, 192 },
+ { 88, 31, 35, 67, 102, 85, 55, 186, 85 },
+ { 55, 38, 70, 124, 73, 102, 1, 34, 98 },
+ { 64, 90, 70, 205, 40, 41, 23, 26, 57 },
+ },
+ {
+ { 86, 40, 64, 135, 148, 224, 45, 183, 128 },
+ { 22, 26, 17, 131, 240, 154, 14, 1, 209 },
+ { 164, 50, 31, 137, 154, 133, 25, 35, 218 },
+ { 83, 12, 13, 54, 192, 255, 68, 47, 28 },
+ { 45, 16, 21, 91, 64, 222, 7, 1, 197 },
+ { 56, 21, 39, 155, 60, 138, 23, 102, 213 },
+ { 18, 11, 7, 63, 144, 171, 4, 4, 246 },
+ { 85, 26, 85, 85, 128, 128, 32, 146, 171 },
+ { 35, 27, 10, 146, 174, 171, 12, 26, 128 },
+ { 51, 103, 44, 131, 131, 123, 31, 6, 158 },
+ },
+ {
+ { 68, 45, 128, 34, 1, 47, 11, 245, 171 },
+ { 62, 17, 19, 70, 146, 85, 55, 62, 70 },
+ { 102, 61, 71, 37, 34, 53, 31, 243, 192 },
+ { 75, 15, 9, 9, 64, 255, 184, 119, 16 },
+ { 37, 43, 37, 154, 100, 163, 85, 160, 1 },
+ { 63, 9, 92, 136, 28, 64, 32, 201, 85 },
+ { 56, 8, 17, 132, 137, 255, 55, 116, 128 },
+ { 86, 6, 28, 5, 64, 255, 25, 248, 1 },
+ { 58, 15, 20, 82, 135, 57, 26, 121, 40 },
+ { 69, 60, 71, 38, 73, 119, 28, 222, 37 },
+ },
+ {
+ { 101, 75, 128, 139, 118, 146, 116, 128, 85 },
+ { 56, 41, 15, 176, 236, 85, 37, 9, 62 },
+ { 190, 80, 35, 99, 180, 80, 126, 54, 45 },
+ { 146, 36, 19, 30, 171, 255, 97, 27, 20 },
+ { 71, 30, 17, 119, 118, 255, 17, 18, 138 },
+ { 101, 38, 60, 138, 55, 70, 43, 26, 142 },
+ { 32, 41, 20, 117, 151, 142, 20, 21, 163 },
+ { 138, 45, 61, 62, 219, 1, 81, 188, 64 },
+ { 112, 19, 12, 61, 195, 128, 48, 4, 24 },
+ { 85, 126, 47, 87, 176, 51, 41, 20, 32 },
+ },
+ {
+ { 66, 102, 167, 99, 74, 62, 40, 234, 128 },
+ { 41, 53, 9, 178, 241, 141, 26, 8, 107 },
+ { 134, 183, 89, 137, 98, 101, 106, 165, 148 },
+ { 104, 79, 12, 27, 217, 255, 87, 17, 7 },
+ { 74, 43, 26, 146, 73, 166, 49, 23, 157 },
+ { 65, 38, 105, 160, 51, 52, 31, 115, 128 },
+ { 47, 41, 14, 110, 182, 183, 21, 17, 194 },
+ { 87, 68, 71, 44, 114, 51, 15, 186, 23 },
+ { 66, 45, 25, 102, 197, 189, 23, 18, 22 },
+ { 72, 187, 100, 130, 157, 111, 32, 75, 80 },
+ },
+};
+
+static const uint8_t vp8_coeff_band[16] = {
+ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7
+};
+
+/* Inverse of vp8_coeff_band: mappings of bands to coefficient indexes.
+ * Each list is -1-terminated. */
+static const int8_t vp8_coeff_band_indexes[8][10] = {
+ { 0, -1 },
+ { 1, -1 },
+ { 2, -1 },
+ { 3, -1 },
+ { 5, -1 },
+ { 6, -1 },
+ { 4, 7, 8, 9, 10, 11, 12, 13, 14, -1 },
+ { 15, -1 }
+};
+
+static const uint8_t vp8_dct_cat1_prob[] = {
+ 159, 0
+};
+static const uint8_t vp8_dct_cat2_prob[] = {
+ 165, 145, 0
+};
+static const uint8_t vp8_dct_cat3_prob[] = {
+ 173, 148, 140, 0
+};
+static const uint8_t vp8_dct_cat4_prob[] = {
+ 176, 155, 140, 135, 0
+};
+static const uint8_t vp8_dct_cat5_prob[] = {
+ 180, 157, 141, 134, 130, 0
+};
+static const uint8_t vp8_dct_cat6_prob[] = {
+ 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0
+};
+
+// only used for cat3 and above; cat 1 and 2 are referenced directly
+const uint8_t *const ff_vp8_dct_cat_prob[] = {
+ vp8_dct_cat3_prob,
+ vp8_dct_cat4_prob,
+ vp8_dct_cat5_prob,
+ vp8_dct_cat6_prob,
+};
+
+static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS - 1] = {
+ {
+ {
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ },
+ {
+ { 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 },
+ { 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 },
+ { 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 },
+ },
+ {
+ { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 },
+ { 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 },
+ { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 },
+ },
+ {
+ { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 },
+ { 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 },
+ { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 },
+ },
+ {
+ { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 },
+ { 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 },
+ { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 },
+ },
+ {
+ { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 },
+ { 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 },
+ { 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 },
+ },
+ {
+ { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 },
+ { 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 },
+ { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 },
+ },
+ {
+ { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ },
+ },
+ {
+ {
+ { 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 },
+ { 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 },
+ { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 },
+ },
+ {
+ { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 },
+ { 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 },
+ { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 },
+ },
+ {
+ { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 },
+ { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 },
+ { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 },
+ },
+ {
+ { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 },
+ { 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 },
+ { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 },
+ },
+ {
+ { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 },
+ { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 },
+ { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 },
+ },
+ {
+ { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 },
+ { 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 },
+ { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 },
+ },
+ {
+ { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 },
+ { 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 },
+ { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 },
+ },
+ {
+ { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 },
+ { 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 },
+ },
+ },
+ {
+ {
+ { 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 },
+ { 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 },
+ { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 },
+ },
+ {
+ { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 },
+ { 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 },
+ { 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 },
+ },
+ {
+ { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 },
+ { 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 },
+ { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 },
+ },
+ {
+ { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 },
+ { 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
+ },
+ {
+ { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 },
+ { 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ },
+ {
+ { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ },
+ {
+ { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ },
+ {
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ },
+ },
+ {
+ {
+ { 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 },
+ { 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 },
+ { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 },
+ },
+ {
+ { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 },
+ { 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 },
+ { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 },
+ },
+ {
+ { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 },
+ { 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 },
+ { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 },
+ },
+ {
+ { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 },
+ { 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 },
+ { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 },
+ },
+ {
+ { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 },
+ { 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 },
+ { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 },
+ },
+ {
+ { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 },
+ { 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 },
+ { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 },
+ },
+ {
+ { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 },
+ { 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 },
+ { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 },
+ },
+ {
+ { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ },
+ },
+};
+
+static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS - 1] = {
+ {
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ },
+ {
+ {
+ { 217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255 },
+ { 234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ },
+ {
+ {
+ { 186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ },
+ {
+ {
+ { 248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255 },
+ { 248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ {
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ },
+ },
+};
+
+static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT + 1] = {
+ 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 17,
+ 18, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 25, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 91, 93, 95, 96, 98, 100, 101, 102, 104, 106, 108, 110, 112, 114, 116, 118,
+ 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 143, 145, 148, 151, 154, 157,
+};
+
+static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT + 1] = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76,
+ 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108,
+ 110, 112, 114, 116, 119, 122, 125, 128, 131, 134, 137, 140, 143, 146, 149, 152,
+ 155, 158, 161, 164, 167, 170, 173, 177, 181, 185, 189, 193, 197, 201, 205, 209,
+ 213, 217, 221, 225, 229, 234, 239, 245, 249, 254, 259, 264, 269, 274, 279, 284,
+};
+
+static const uint8_t vp8_mv_update_prob[2][19] = {
+ { 237,
+ 246,
+ 253, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 250, 250, 252, /* VP8 only: */ 254, 254 },
+ { 231,
+ 243,
+ 245, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 251, 251, 254, /* VP8 only: */ 254, 254 }
+};
+
+static const uint8_t vp7_mv_default_prob[2][17] = {
+ { 162,
+ 128,
+ 225, 146, 172, 147, 214, 39, 156,
+ 247, 210, 135, 68, 138, 220, 239, 246 },
+ { 164,
+ 128,
+ 204, 170, 119, 235, 140, 230, 228,
+ 244, 184, 201, 44, 173, 221, 239, 253 }
+};
+
+static const uint8_t vp8_mv_default_prob[2][19] = {
+ { 162,
+ 128,
+ 225, 146, 172, 147, 214, 39, 156,
+ 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 },
+ { 164,
+ 128,
+ 204, 170, 119, 235, 140, 230, 228,
+ 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 }
+};
+
+static const uint8_t vp7_feature_value_size[2][4] = {
+ { 7, 6, 0, 8 },
+ { 7, 6, 0, 5 },
+};
+
+static const int8_t vp7_feature_index_tree[4][2] =
+{
+ { 1, 2 },
+ { -0, -1 }, // '00', '01'
+ { -2, -3 }, // '10', '11'
+};
+
+static const uint16_t vp7_ydc_qlookup[] = {
+ 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38, 39, 39, 40, 41,
+ 41, 42, 43, 43, 44, 45, 45, 46, 47, 48, 48, 49, 50, 51, 52,
+ 53, 53, 54, 56, 57, 58, 59, 60, 62, 63, 65, 66, 68, 70, 72,
+ 74, 76, 79, 81, 84, 87, 90, 93, 96, 100, 104, 108, 112, 116, 121,
+ 126, 131, 136, 142, 148, 154, 160, 167, 174, 182, 189, 198, 206, 215, 224,
+ 234, 244, 254, 265, 277, 288, 301, 313, 327, 340, 355, 370, 385, 401, 417,
+ 434, 452, 470, 489, 509, 529, 550, 572,
+};
+
+static const uint16_t vp7_yac_qlookup[] = {
+ 4, 4, 5, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 15,
+ 16, 17, 19, 20, 22, 23, 25, 26, 28, 29, 31, 32, 34, 35,
+ 37, 38, 40, 41, 42, 44, 45, 46, 48, 49, 50, 51, 53, 54,
+ 55, 56, 57, 58, 59, 61, 62, 63, 64, 65, 67, 68, 69, 70,
+ 72, 73, 75, 76, 78, 80, 82, 84, 86, 88, 91, 93, 96, 99,
+ 102, 105, 109, 112, 116, 121, 125, 130, 135, 140, 146, 152, 158, 165,
+ 172, 180, 188, 196, 205, 214, 224, 234, 245, 256, 268, 281, 294, 308,
+ 322, 337, 353, 369, 386, 404, 423, 443, 463, 484, 506, 529, 553, 578,
+ 604, 631, 659, 688, 718, 749, 781, 814, 849, 885, 922, 960, 1000, 1041,
+ 1083, 1127,
+};
+
+static const uint16_t vp7_y2dc_qlookup[] = {
+ 7, 9, 11, 13, 15, 17, 19, 21, 23, 26, 28, 30, 33, 35,
+ 37, 39, 42, 44, 46, 48, 51, 53, 55, 57, 59, 61, 63, 65,
+ 67, 69, 70, 72, 74, 75, 77, 78, 80, 81, 83, 84, 85, 87,
+ 88, 89, 90, 92, 93, 94, 95, 96, 97, 99, 100, 101, 102, 104,
+ 105, 106, 108, 109, 111, 113, 114, 116, 118, 120, 123, 125, 128, 131,
+ 134, 137, 140, 144, 148, 152, 156, 161, 166, 171, 176, 182, 188, 195,
+ 202, 209, 217, 225, 234, 243, 253, 263, 274, 285, 297, 309, 322, 336,
+ 350, 365, 381, 397, 414, 432, 450, 470, 490, 511, 533, 556, 579, 604,
+ 630, 656, 684, 713, 742, 773, 805, 838, 873, 908, 945, 983, 1022, 1063,
+ 1105, 1148,
+};
+
+static const uint16_t vp7_y2ac_qlookup[] = {
+ 7, 9, 11, 13, 16, 18, 21, 24, 26, 29, 32, 35,
+ 38, 41, 43, 46, 49, 52, 55, 58, 61, 64, 66, 69,
+ 72, 74, 77, 79, 82, 84, 86, 88, 91, 93, 95, 97,
+ 98, 100, 102, 104, 105, 107, 109, 110, 112, 113, 115, 116,
+ 117, 119, 120, 122, 123, 125, 127, 128, 130, 132, 134, 136,
+ 138, 141, 143, 146, 149, 152, 155, 158, 162, 166, 171, 175,
+ 180, 185, 191, 197, 204, 210, 218, 226, 234, 243, 252, 262,
+ 273, 284, 295, 308, 321, 335, 350, 365, 381, 398, 416, 435,
+ 455, 476, 497, 520, 544, 569, 595, 622, 650, 680, 711, 743,
+ 776, 811, 848, 885, 925, 965, 1008, 1052, 1097, 1144, 1193, 1244,
+ 1297, 1351, 1407, 1466, 1526, 1588, 1652, 1719,
+};
+
+#endif /* AVCODEC_VP8DATA_H */
diff --git a/media/ffvpx/libavcodec/vp8dsp.c b/media/ffvpx/libavcodec/vp8dsp.c
new file mode 100644
index 0000000000..7a85e9f4ca
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp8dsp.c
@@ -0,0 +1,753 @@
+/*
+ * Copyright (C) 2010 David Conrad
+ * Copyright (C) 2010 Ronald S. Bultje
+ * Copyright (C) 2014 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * VP8 compatible video decoder
+ */
+
+#include "config_components.h"
+
+#include "libavutil/common.h"
+#include "libavutil/intreadwrite.h"
+
+#include "mathops.h"
+#include "vp8dsp.h"
+
+#define MK_IDCT_DC_ADD4_C(name) \
+static void name ## _idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], \
+ ptrdiff_t stride) \
+{ \
+ name ## _idct_dc_add_c(dst + stride * 0 + 0, block[0], stride); \
+ name ## _idct_dc_add_c(dst + stride * 0 + 4, block[1], stride); \
+ name ## _idct_dc_add_c(dst + stride * 4 + 0, block[2], stride); \
+ name ## _idct_dc_add_c(dst + stride * 4 + 4, block[3], stride); \
+} \
+ \
+static void name ## _idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], \
+ ptrdiff_t stride) \
+{ \
+ name ## _idct_dc_add_c(dst + 0, block[0], stride); \
+ name ## _idct_dc_add_c(dst + 4, block[1], stride); \
+ name ## _idct_dc_add_c(dst + 8, block[2], stride); \
+ name ## _idct_dc_add_c(dst + 12, block[3], stride); \
+}
+
+#if CONFIG_VP7_DECODER
+static void vp7_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
+{
+ int i;
+ unsigned a1, b1, c1, d1;
+ int16_t tmp[16];
+
+ for (i = 0; i < 4; i++) {
+ a1 = (dc[i * 4 + 0] + dc[i * 4 + 2]) * 23170;
+ b1 = (dc[i * 4 + 0] - dc[i * 4 + 2]) * 23170;
+ c1 = dc[i * 4 + 1] * 12540 - dc[i * 4 + 3] * 30274;
+ d1 = dc[i * 4 + 1] * 30274 + dc[i * 4 + 3] * 12540;
+ tmp[i * 4 + 0] = (int)(a1 + d1) >> 14;
+ tmp[i * 4 + 3] = (int)(a1 - d1) >> 14;
+ tmp[i * 4 + 1] = (int)(b1 + c1) >> 14;
+ tmp[i * 4 + 2] = (int)(b1 - c1) >> 14;
+ }
+
+ for (i = 0; i < 4; i++) {
+ a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
+ b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
+ c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
+ d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
+ AV_ZERO64(dc + i * 4);
+ block[0][i][0] = (int)(a1 + d1 + 0x20000) >> 18;
+ block[3][i][0] = (int)(a1 - d1 + 0x20000) >> 18;
+ block[1][i][0] = (int)(b1 + c1 + 0x20000) >> 18;
+ block[2][i][0] = (int)(b1 - c1 + 0x20000) >> 18;
+ }
+}
+
+static void vp7_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
+{
+ int i, val = (23170 * (23170 * dc[0] >> 14) + 0x20000) >> 18;
+ dc[0] = 0;
+
+ for (i = 0; i < 4; i++) {
+ block[i][0][0] = val;
+ block[i][1][0] = val;
+ block[i][2][0] = val;
+ block[i][3][0] = val;
+ }
+}
+
+static void vp7_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
+{
+ int i;
+ unsigned a1, b1, c1, d1;
+ int16_t tmp[16];
+
+ for (i = 0; i < 4; i++) {
+ a1 = (block[i * 4 + 0] + block[i * 4 + 2]) * 23170;
+ b1 = (block[i * 4 + 0] - block[i * 4 + 2]) * 23170;
+ c1 = block[i * 4 + 1] * 12540 - block[i * 4 + 3] * 30274;
+ d1 = block[i * 4 + 1] * 30274 + block[i * 4 + 3] * 12540;
+ AV_ZERO64(block + i * 4);
+ tmp[i * 4 + 0] = (int)(a1 + d1) >> 14;
+ tmp[i * 4 + 3] = (int)(a1 - d1) >> 14;
+ tmp[i * 4 + 1] = (int)(b1 + c1) >> 14;
+ tmp[i * 4 + 2] = (int)(b1 - c1) >> 14;
+ }
+
+ for (i = 0; i < 4; i++) {
+ a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
+ b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
+ c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
+ d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
+ dst[0 * stride + i] = av_clip_uint8(dst[0 * stride + i] +
+ ((int)(a1 + d1 + 0x20000) >> 18));
+ dst[3 * stride + i] = av_clip_uint8(dst[3 * stride + i] +
+ ((int)(a1 - d1 + 0x20000) >> 18));
+ dst[1 * stride + i] = av_clip_uint8(dst[1 * stride + i] +
+ ((int)(b1 + c1 + 0x20000) >> 18));
+ dst[2 * stride + i] = av_clip_uint8(dst[2 * stride + i] +
+ ((int)(b1 - c1 + 0x20000) >> 18));
+ }
+}
+
+static void vp7_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
+{
+ int i, dc = (23170 * (23170 * block[0] >> 14) + 0x20000) >> 18;
+ block[0] = 0;
+
+ for (i = 0; i < 4; i++) {
+ dst[0] = av_clip_uint8(dst[0] + dc);
+ dst[1] = av_clip_uint8(dst[1] + dc);
+ dst[2] = av_clip_uint8(dst[2] + dc);
+ dst[3] = av_clip_uint8(dst[3] + dc);
+ dst += stride;
+ }
+}
+
+MK_IDCT_DC_ADD4_C(vp7)
+#endif /* CONFIG_VP7_DECODER */
+
+// TODO: Maybe add dequant
+#if CONFIG_VP8_DECODER
+static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
+{
+ int i, t0, t1, t2, t3;
+
+ for (i = 0; i < 4; i++) {
+ t0 = dc[0 * 4 + i] + dc[3 * 4 + i];
+ t1 = dc[1 * 4 + i] + dc[2 * 4 + i];
+ t2 = dc[1 * 4 + i] - dc[2 * 4 + i];
+ t3 = dc[0 * 4 + i] - dc[3 * 4 + i];
+
+ dc[0 * 4 + i] = t0 + t1;
+ dc[1 * 4 + i] = t3 + t2;
+ dc[2 * 4 + i] = t0 - t1;
+ dc[3 * 4 + i] = t3 - t2;
+ }
+
+ for (i = 0; i < 4; i++) {
+ t0 = dc[i * 4 + 0] + dc[i * 4 + 3] + 3; // rounding
+ t1 = dc[i * 4 + 1] + dc[i * 4 + 2];
+ t2 = dc[i * 4 + 1] - dc[i * 4 + 2];
+ t3 = dc[i * 4 + 0] - dc[i * 4 + 3] + 3; // rounding
+ AV_ZERO64(dc + i * 4);
+
+ block[i][0][0] = (t0 + t1) >> 3;
+ block[i][1][0] = (t3 + t2) >> 3;
+ block[i][2][0] = (t0 - t1) >> 3;
+ block[i][3][0] = (t3 - t2) >> 3;
+ }
+}
+
+static void vp8_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
+{
+ int i, val = (dc[0] + 3) >> 3;
+ dc[0] = 0;
+
+ for (i = 0; i < 4; i++) {
+ block[i][0][0] = val;
+ block[i][1][0] = val;
+ block[i][2][0] = val;
+ block[i][3][0] = val;
+ }
+}
+
+#define MUL_20091(a) ((((a) * 20091) >> 16) + (a))
+#define MUL_35468(a) (((a) * 35468) >> 16)
+
+static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
+{
+ int i, t0, t1, t2, t3;
+ int16_t tmp[16];
+
+ for (i = 0; i < 4; i++) {
+ t0 = block[0 * 4 + i] + block[2 * 4 + i];
+ t1 = block[0 * 4 + i] - block[2 * 4 + i];
+ t2 = MUL_35468(block[1 * 4 + i]) - MUL_20091(block[3 * 4 + i]);
+ t3 = MUL_20091(block[1 * 4 + i]) + MUL_35468(block[3 * 4 + i]);
+ block[0 * 4 + i] = 0;
+ block[1 * 4 + i] = 0;
+ block[2 * 4 + i] = 0;
+ block[3 * 4 + i] = 0;
+
+ tmp[i * 4 + 0] = t0 + t3;
+ tmp[i * 4 + 1] = t1 + t2;
+ tmp[i * 4 + 2] = t1 - t2;
+ tmp[i * 4 + 3] = t0 - t3;
+ }
+
+ for (i = 0; i < 4; i++) {
+ t0 = tmp[0 * 4 + i] + tmp[2 * 4 + i];
+ t1 = tmp[0 * 4 + i] - tmp[2 * 4 + i];
+ t2 = MUL_35468(tmp[1 * 4 + i]) - MUL_20091(tmp[3 * 4 + i]);
+ t3 = MUL_20091(tmp[1 * 4 + i]) + MUL_35468(tmp[3 * 4 + i]);
+
+ dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
+ dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
+ dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
+ dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
+ dst += stride;
+ }
+}
+
+static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
+{
+ int i, dc = (block[0] + 4) >> 3;
+ block[0] = 0;
+
+ for (i = 0; i < 4; i++) {
+ dst[0] = av_clip_uint8(dst[0] + dc);
+ dst[1] = av_clip_uint8(dst[1] + dc);
+ dst[2] = av_clip_uint8(dst[2] + dc);
+ dst[3] = av_clip_uint8(dst[3] + dc);
+ dst += stride;
+ }
+}
+
+MK_IDCT_DC_ADD4_C(vp8)
+#endif /* CONFIG_VP8_DECODER */
+
+// because I like only having two parameters to pass functions...
+#define LOAD_PIXELS \
+ int av_unused p3 = p[-4 * stride]; \
+ int av_unused p2 = p[-3 * stride]; \
+ int av_unused p1 = p[-2 * stride]; \
+ int av_unused p0 = p[-1 * stride]; \
+ int av_unused q0 = p[ 0 * stride]; \
+ int av_unused q1 = p[ 1 * stride]; \
+ int av_unused q2 = p[ 2 * stride]; \
+ int av_unused q3 = p[ 3 * stride];
+
+#define clip_int8(n) (cm[(n) + 0x80] - 0x80)
+
+static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride,
+ int is4tap, int is_vp7)
+{
+ LOAD_PIXELS
+ int a, f1, f2;
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
+
+ a = 3 * (q0 - p0);
+
+ if (is4tap)
+ a += clip_int8(p1 - q1);
+
+ a = clip_int8(a);
+
+ // We deviate from the spec here with c(a+3) >> 3
+ // since that's what libvpx does.
+ f1 = FFMIN(a + 4, 127) >> 3;
+
+ if (is_vp7)
+ f2 = f1 - ((a & 7) == 4);
+ else
+ f2 = FFMIN(a + 3, 127) >> 3;
+
+ // Despite what the spec says, we do need to clamp here to
+ // be bitexact with libvpx.
+ p[-1 * stride] = cm[p0 + f2];
+ p[ 0 * stride] = cm[q0 - f1];
+
+ // only used for _inner on blocks without high edge variance
+ if (!is4tap) {
+ a = (f1 + 1) >> 1;
+ p[-2 * stride] = cm[p1 + a];
+ p[ 1 * stride] = cm[q1 - a];
+ }
+}
+
+static av_always_inline void vp7_filter_common(uint8_t *p, ptrdiff_t stride,
+ int is4tap)
+{
+ filter_common(p, stride, is4tap, IS_VP7);
+}
+
+static av_always_inline void vp8_filter_common(uint8_t *p, ptrdiff_t stride,
+ int is4tap)
+{
+ filter_common(p, stride, is4tap, IS_VP8);
+}
+
+static av_always_inline int vp7_simple_limit(uint8_t *p, ptrdiff_t stride,
+ int flim)
+{
+ LOAD_PIXELS
+ return FFABS(p0 - q0) <= flim;
+}
+
+static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride,
+ int flim)
+{
+ LOAD_PIXELS
+ return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim;
+}
+
+/**
+ * E - limit at the macroblock edge
+ * I - limit for interior difference
+ */
+#define NORMAL_LIMIT(vpn) \
+static av_always_inline int vp ## vpn ## _normal_limit(uint8_t *p, \
+ ptrdiff_t stride, \
+ int E, int I) \
+{ \
+ LOAD_PIXELS \
+ return vp ## vpn ## _simple_limit(p, stride, E) && \
+ FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I && \
+ FFABS(p1 - p0) <= I && FFABS(q3 - q2) <= I && \
+ FFABS(q2 - q1) <= I && FFABS(q1 - q0) <= I; \
+}
+
+NORMAL_LIMIT(7)
+NORMAL_LIMIT(8)
+
+// high edge variance
+static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
+{
+ LOAD_PIXELS
+ return FFABS(p1 - p0) > thresh || FFABS(q1 - q0) > thresh;
+}
+
+static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
+{
+ int a0, a1, a2, w;
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
+
+ LOAD_PIXELS
+
+ w = clip_int8(p1 - q1);
+ w = clip_int8(w + 3 * (q0 - p0));
+
+ a0 = (27 * w + 63) >> 7;
+ a1 = (18 * w + 63) >> 7;
+ a2 = (9 * w + 63) >> 7;
+
+ p[-3 * stride] = cm[p2 + a2];
+ p[-2 * stride] = cm[p1 + a1];
+ p[-1 * stride] = cm[p0 + a0];
+ p[ 0 * stride] = cm[q0 - a0];
+ p[ 1 * stride] = cm[q1 - a1];
+ p[ 2 * stride] = cm[q2 - a2];
+}
+
+#define LOOP_FILTER(vpn, dir, size, stridea, strideb, maybe_inline) \
+static maybe_inline \
+void vpn ## _ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim_E, int flim_I, \
+ int hev_thresh) \
+{ \
+ int i; \
+ for (i = 0; i < size; i++) \
+ if (vpn ## _normal_limit(dst + i * stridea, strideb, \
+ flim_E, flim_I)) { \
+ if (hev(dst + i * stridea, strideb, hev_thresh)) \
+ vpn ## _filter_common(dst + i * stridea, strideb, 1); \
+ else \
+ filter_mbedge(dst + i * stridea, strideb); \
+ } \
+} \
+ \
+static maybe_inline \
+void vpn ## _ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim_E, \
+ int flim_I, \
+ int hev_thresh) \
+{ \
+ int i; \
+ for (i = 0; i < size; i++) \
+ if (vpn ## _normal_limit(dst + i * stridea, strideb, \
+ flim_E, flim_I)) { \
+ int hv = hev(dst + i * stridea, strideb, hev_thresh); \
+ if (hv) \
+ vpn ## _filter_common(dst + i * stridea, strideb, 1); \
+ else \
+ vpn ## _filter_common(dst + i * stridea, strideb, 0); \
+ } \
+}
+
+#define UV_LOOP_FILTER(vpn, dir, stridea, strideb) \
+LOOP_FILTER(vpn, dir, 8, stridea, strideb, av_always_inline) \
+static void vpn ## _ ## dir ## _loop_filter8uv_c(uint8_t *dstU, \
+ uint8_t *dstV, \
+ ptrdiff_t stride, int fE, \
+ int fI, int hev_thresh) \
+{ \
+ vpn ## _ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh); \
+ vpn ## _ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh); \
+} \
+ \
+static void vpn ## _ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, \
+ uint8_t *dstV, \
+ ptrdiff_t stride, \
+ int fE, int fI, \
+ int hev_thresh) \
+{ \
+ vpn ## _ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, \
+ hev_thresh); \
+ vpn ## _ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, \
+ hev_thresh); \
+}
+
+#define LOOP_FILTER_SIMPLE(vpn) \
+static void vpn ## _v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \
+ int flim) \
+{ \
+ int i; \
+ for (i = 0; i < 16; i++) \
+ if (vpn ## _simple_limit(dst + i, stride, flim)) \
+ vpn ## _filter_common(dst + i, stride, 1); \
+} \
+ \
+static void vpn ## _h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \
+ int flim) \
+{ \
+ int i; \
+ for (i = 0; i < 16; i++) \
+ if (vpn ## _simple_limit(dst + i * stride, 1, flim)) \
+ vpn ## _filter_common(dst + i * stride, 1, 1); \
+}
+
+#define LOOP_FILTERS(vpn) \
+ LOOP_FILTER(vpn, v, 16, 1, stride, ) \
+ LOOP_FILTER(vpn, h, 16, stride, 1, ) \
+ UV_LOOP_FILTER(vpn, v, 1, stride) \
+ UV_LOOP_FILTER(vpn, h, stride, 1) \
+ LOOP_FILTER_SIMPLE(vpn) \
+
+static const uint8_t subpel_filters[7][6] = {
+ { 0, 6, 123, 12, 1, 0 },
+ { 2, 11, 108, 36, 8, 1 },
+ { 0, 9, 93, 50, 6, 0 },
+ { 3, 16, 77, 77, 16, 3 },
+ { 0, 6, 50, 93, 9, 0 },
+ { 1, 8, 36, 108, 11, 2 },
+ { 0, 1, 12, 123, 6, 0 },
+};
+
+#define PUT_PIXELS(WIDTH) \
+static void put_vp8_pixels ## WIDTH ## _c(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, ptrdiff_t srcstride, \
+ int h, int x, int y) \
+{ \
+ int i; \
+ for (i = 0; i < h; i++, dst += dststride, src += srcstride) \
+ memcpy(dst, src, WIDTH); \
+}
+
+PUT_PIXELS(16)
+PUT_PIXELS(8)
+PUT_PIXELS(4)
+
+#define FILTER_6TAP(src, F, stride) \
+ cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
+ F[0] * src[x - 2 * stride] + F[3] * src[x + 1 * stride] - \
+ F[4] * src[x + 2 * stride] + F[5] * src[x + 3 * stride] + 64) >> 7]
+
+#define FILTER_4TAP(src, F, stride) \
+ cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
+ F[3] * src[x + 1 * stride] - F[4] * src[x + 2 * stride] + 64) >> 7]
+
+#define VP8_EPEL_H(SIZE, TAPS) \
+static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, \
+ ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const uint8_t *filter = subpel_filters[mx - 1]; \
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
+ int x, y; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
+ dst += dststride; \
+ src += srcstride; \
+ } \
+}
+
+#define VP8_EPEL_V(SIZE, TAPS) \
+static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, \
+ ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const uint8_t *filter = subpel_filters[my - 1]; \
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
+ int x, y; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
+ dst += dststride; \
+ src += srcstride; \
+ } \
+}
+
+#define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
+static void \
+put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, \
+ ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, \
+ int my) \
+{ \
+ const uint8_t *filter = subpel_filters[mx - 1]; \
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
+ int x, y; \
+ uint8_t tmp_array[(2 * SIZE + VTAPS - 1) * SIZE]; \
+ uint8_t *tmp = tmp_array; \
+ src -= (2 - (VTAPS == 4)) * srcstride; \
+ \
+ for (y = 0; y < h + VTAPS - 1; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
+ tmp += SIZE; \
+ src += srcstride; \
+ } \
+ tmp = tmp_array + (2 - (VTAPS == 4)) * SIZE; \
+ filter = subpel_filters[my - 1]; \
+ \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
+ dst += dststride; \
+ tmp += SIZE; \
+ } \
+}
+
+VP8_EPEL_H(16, 4)
+VP8_EPEL_H(8, 4)
+VP8_EPEL_H(4, 4)
+VP8_EPEL_H(16, 6)
+VP8_EPEL_H(8, 6)
+VP8_EPEL_H(4, 6)
+VP8_EPEL_V(16, 4)
+VP8_EPEL_V(8, 4)
+VP8_EPEL_V(4, 4)
+VP8_EPEL_V(16, 6)
+VP8_EPEL_V(8, 6)
+VP8_EPEL_V(4, 6)
+
+VP8_EPEL_HV(16, 4, 4)
+VP8_EPEL_HV(8, 4, 4)
+VP8_EPEL_HV(4, 4, 4)
+VP8_EPEL_HV(16, 4, 6)
+VP8_EPEL_HV(8, 4, 6)
+VP8_EPEL_HV(4, 4, 6)
+VP8_EPEL_HV(16, 6, 4)
+VP8_EPEL_HV(8, 6, 4)
+VP8_EPEL_HV(4, 6, 4)
+VP8_EPEL_HV(16, 6, 6)
+VP8_EPEL_HV(8, 6, 6)
+VP8_EPEL_HV(4, 6, 6)
+
+#define VP8_BILINEAR(SIZE) \
+static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, \
+ const uint8_t *src, ptrdiff_t sstride, \
+ int h, int mx, int my) \
+{ \
+ int a = 8 - mx, b = mx; \
+ int x, y; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
+ dst += dstride; \
+ src += sstride; \
+ } \
+} \
+ \
+static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, \
+ const uint8_t *src, ptrdiff_t sstride, \
+ int h, int mx, int my) \
+{ \
+ int c = 8 - my, d = my; \
+ int x, y; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3; \
+ dst += dstride; \
+ src += sstride; \
+ } \
+} \
+ \
+static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, \
+ ptrdiff_t dstride, \
+ const uint8_t *src, \
+ ptrdiff_t sstride, \
+ int h, int mx, int my) \
+{ \
+ int a = 8 - mx, b = mx; \
+ int c = 8 - my, d = my; \
+ int x, y; \
+ uint8_t tmp_array[(2 * SIZE + 1) * SIZE]; \
+ uint8_t *tmp = tmp_array; \
+ for (y = 0; y < h + 1; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
+ tmp += SIZE; \
+ src += sstride; \
+ } \
+ tmp = tmp_array; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = (c * tmp[x] + d * tmp[x + SIZE] + 4) >> 3; \
+ dst += dstride; \
+ tmp += SIZE; \
+ } \
+}
+
+VP8_BILINEAR(16)
+VP8_BILINEAR(8)
+VP8_BILINEAR(4)
+
+#define VP78_MC_FUNC(IDX, SIZE) \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
+
+#define VP78_BILINEAR_MC_FUNC(IDX, SIZE) \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c
+
+av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
+{
+ VP78_MC_FUNC(0, 16);
+ VP78_MC_FUNC(1, 8);
+ VP78_MC_FUNC(2, 4);
+
+ VP78_BILINEAR_MC_FUNC(0, 16);
+ VP78_BILINEAR_MC_FUNC(1, 8);
+ VP78_BILINEAR_MC_FUNC(2, 4);
+
+#if ARCH_AARCH64
+ ff_vp78dsp_init_aarch64(dsp);
+#elif ARCH_ARM
+ ff_vp78dsp_init_arm(dsp);
+#elif ARCH_PPC
+ ff_vp78dsp_init_ppc(dsp);
+#elif ARCH_X86
+ ff_vp78dsp_init_x86(dsp);
+#endif
+}
+
+#if CONFIG_VP7_DECODER
+LOOP_FILTERS(vp7)
+
+av_cold void ff_vp7dsp_init(VP8DSPContext *dsp)
+{
+ dsp->vp8_luma_dc_wht = vp7_luma_dc_wht_c;
+ dsp->vp8_luma_dc_wht_dc = vp7_luma_dc_wht_dc_c;
+ dsp->vp8_idct_add = vp7_idct_add_c;
+ dsp->vp8_idct_dc_add = vp7_idct_dc_add_c;
+ dsp->vp8_idct_dc_add4y = vp7_idct_dc_add4y_c;
+ dsp->vp8_idct_dc_add4uv = vp7_idct_dc_add4uv_c;
+
+ dsp->vp8_v_loop_filter16y = vp7_v_loop_filter16_c;
+ dsp->vp8_h_loop_filter16y = vp7_h_loop_filter16_c;
+ dsp->vp8_v_loop_filter8uv = vp7_v_loop_filter8uv_c;
+ dsp->vp8_h_loop_filter8uv = vp7_h_loop_filter8uv_c;
+
+ dsp->vp8_v_loop_filter16y_inner = vp7_v_loop_filter16_inner_c;
+ dsp->vp8_h_loop_filter16y_inner = vp7_h_loop_filter16_inner_c;
+ dsp->vp8_v_loop_filter8uv_inner = vp7_v_loop_filter8uv_inner_c;
+ dsp->vp8_h_loop_filter8uv_inner = vp7_h_loop_filter8uv_inner_c;
+
+ dsp->vp8_v_loop_filter_simple = vp7_v_loop_filter_simple_c;
+ dsp->vp8_h_loop_filter_simple = vp7_h_loop_filter_simple_c;
+}
+#endif /* CONFIG_VP7_DECODER */
+
+#if CONFIG_VP8_DECODER
+LOOP_FILTERS(vp8)
+
+av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
+{
+ dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c;
+ dsp->vp8_luma_dc_wht_dc = vp8_luma_dc_wht_dc_c;
+ dsp->vp8_idct_add = vp8_idct_add_c;
+ dsp->vp8_idct_dc_add = vp8_idct_dc_add_c;
+ dsp->vp8_idct_dc_add4y = vp8_idct_dc_add4y_c;
+ dsp->vp8_idct_dc_add4uv = vp8_idct_dc_add4uv_c;
+
+ dsp->vp8_v_loop_filter16y = vp8_v_loop_filter16_c;
+ dsp->vp8_h_loop_filter16y = vp8_h_loop_filter16_c;
+ dsp->vp8_v_loop_filter8uv = vp8_v_loop_filter8uv_c;
+ dsp->vp8_h_loop_filter8uv = vp8_h_loop_filter8uv_c;
+
+ dsp->vp8_v_loop_filter16y_inner = vp8_v_loop_filter16_inner_c;
+ dsp->vp8_h_loop_filter16y_inner = vp8_h_loop_filter16_inner_c;
+ dsp->vp8_v_loop_filter8uv_inner = vp8_v_loop_filter8uv_inner_c;
+ dsp->vp8_h_loop_filter8uv_inner = vp8_h_loop_filter8uv_inner_c;
+
+ dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c;
+ dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c;
+
+#if ARCH_AARCH64
+ ff_vp8dsp_init_aarch64(dsp);
+#elif ARCH_ARM
+ ff_vp8dsp_init_arm(dsp);
+#elif ARCH_X86
+ ff_vp8dsp_init_x86(dsp);
+#elif ARCH_MIPS
+ ff_vp8dsp_init_mips(dsp);
+#elif ARCH_LOONGARCH
+ ff_vp8dsp_init_loongarch(dsp);
+#endif
+}
+#endif /* CONFIG_VP8_DECODER */
diff --git a/media/ffvpx/libavcodec/vp8dsp.h b/media/ffvpx/libavcodec/vp8dsp.h
new file mode 100644
index 0000000000..16b5e9c35b
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp8dsp.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2010 David Conrad
+ * Copyright (C) 2010 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * VP8 compatible video decoder
+ */
+
+#ifndef AVCODEC_VP8DSP_H
+#define AVCODEC_VP8DSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef void (*vp8_mc_func)(uint8_t *dst /* align 8 */, ptrdiff_t dstStride,
+ const uint8_t *src /* align 1 */, ptrdiff_t srcStride,
+ int h, int x, int y);
+
+typedef struct VP8DSPContext {
+ void (*vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16]);
+ void (*vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16]);
+ void (*vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+ void (*vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+ void (*vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride);
+ void (*vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride);
+
+ // loop filter applied to edges between macroblocks
+ void (*vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride,
+ int flim_E, int flim_I, int hev_thresh);
+ void (*vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride,
+ int flim_E, int flim_I, int hev_thresh);
+ void (*vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
+ int flim_E, int flim_I, int hev_thresh);
+ void (*vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
+ int flim_E, int flim_I, int hev_thresh);
+
+ // loop filter applied to inner macroblock edges
+ void (*vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride,
+ int flim_E, int flim_I, int hev_thresh);
+ void (*vp8_h_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride,
+ int flim_E, int flim_I, int hev_thresh);
+ void (*vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV,
+ ptrdiff_t stride,
+ int flim_E, int flim_I, int hev_thresh);
+ void (*vp8_h_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV,
+ ptrdiff_t stride,
+ int flim_E, int flim_I, int hev_thresh);
+
+ void (*vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim);
+ void (*vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim);
+
+ /**
+ * first dimension: 4-log2(width)
+ * second dimension: 0 if no vertical interpolation is needed;
+ * 1 4-tap vertical interpolation filter (my & 1)
+ * 2 6-tap vertical interpolation filter (!(my & 1))
+ * third dimension: same as second dimension, for horizontal interpolation
+ * so something like put_vp8_epel_pixels_tab[4-log2(width)][2*!!my-(my&1)][2*!!mx-(mx&1)](..., mx, my)
+ */
+ vp8_mc_func put_vp8_epel_pixels_tab[3][3][3];
+ vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3];
+} VP8DSPContext;
+
+void ff_vp7dsp_init(VP8DSPContext *c);
+
+void ff_vp78dsp_init(VP8DSPContext *c);
+void ff_vp78dsp_init_aarch64(VP8DSPContext *c);
+void ff_vp78dsp_init_arm(VP8DSPContext *c);
+void ff_vp78dsp_init_ppc(VP8DSPContext *c);
+void ff_vp78dsp_init_x86(VP8DSPContext *c);
+
+void ff_vp8dsp_init(VP8DSPContext *c);
+void ff_vp8dsp_init_aarch64(VP8DSPContext *c);
+void ff_vp8dsp_init_arm(VP8DSPContext *c);
+void ff_vp8dsp_init_x86(VP8DSPContext *c);
+void ff_vp8dsp_init_mips(VP8DSPContext *c);
+void ff_vp8dsp_init_loongarch(VP8DSPContext *c);
+
+#define IS_VP7 1
+#define IS_VP8 0
+
+#endif /* AVCODEC_VP8DSP_H */
diff --git a/media/ffvpx/libavcodec/vp9.c b/media/ffvpx/libavcodec/vp9.c
new file mode 100644
index 0000000000..7c0a246446
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9.c
@@ -0,0 +1,1914 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config_components.h"
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "get_bits.h"
+#include "hwconfig.h"
+#include "profiles.h"
+#include "thread.h"
+#include "threadframe.h"
+#include "pthread_internal.h"
+
+#include "videodsp.h"
+#include "vp89_rac.h"
+#include "vp9.h"
+#include "vp9data.h"
+#include "vp9dec.h"
+#include "vpx_rac.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/video_enc_params.h"
+
+#define VP9_SYNCCODE 0x498342
+
+#if HAVE_THREADS
+DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
+ (offsetof(VP9Context, progress_mutex)),
+ (offsetof(VP9Context, progress_cond)));
+
+static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
+ VP9Context *s = avctx->priv_data;
+ int i;
+
+ if (avctx->active_thread_type & FF_THREAD_SLICE) {
+ if (s->entries)
+ av_freep(&s->entries);
+
+ s->entries = av_malloc_array(n, sizeof(atomic_int));
+ if (!s->entries)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < n; i++)
+ atomic_init(&s->entries[i], 0);
+ }
+ return 0;
+}
+
+static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
+ pthread_mutex_lock(&s->progress_mutex);
+ atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
+ pthread_cond_signal(&s->progress_cond);
+ pthread_mutex_unlock(&s->progress_mutex);
+}
+
+static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
+ if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
+ return;
+
+ pthread_mutex_lock(&s->progress_mutex);
+ while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
+ pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
+ pthread_mutex_unlock(&s->progress_mutex);
+}
+#else
+static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
+#endif
+
+static void vp9_tile_data_free(VP9TileData *td)
+{
+ av_freep(&td->b_base);
+ av_freep(&td->block_base);
+ av_freep(&td->block_structure);
+}
+
+static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
+{
+ ff_thread_release_ext_buffer(avctx, &f->tf);
+ av_buffer_unref(&f->extradata);
+ av_buffer_unref(&f->hwaccel_priv_buf);
+ f->segmentation_map = NULL;
+ f->hwaccel_picture_private = NULL;
+}
+
+static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
+{
+ VP9Context *s = avctx->priv_data;
+ int ret, sz;
+
+ ret = ff_thread_get_ext_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0)
+ return ret;
+
+ sz = 64 * s->sb_cols * s->sb_rows;
+ if (sz != s->frame_extradata_pool_size) {
+ av_buffer_pool_uninit(&s->frame_extradata_pool);
+ s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
+ if (!s->frame_extradata_pool) {
+ s->frame_extradata_pool_size = 0;
+ goto fail;
+ }
+ s->frame_extradata_pool_size = sz;
+ }
+ f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
+ if (!f->extradata) {
+ goto fail;
+ }
+ memset(f->extradata->data, 0, f->extradata->size);
+
+ f->segmentation_map = f->extradata->data;
+ f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
+
+ if (avctx->hwaccel) {
+ const AVHWAccel *hwaccel = avctx->hwaccel;
+ av_assert0(!f->hwaccel_picture_private);
+ if (hwaccel->frame_priv_data_size) {
+ f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
+ if (!f->hwaccel_priv_buf)
+ goto fail;
+ f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
+ }
+ }
+
+ return 0;
+
+fail:
+ vp9_frame_unref(avctx, f);
+ return AVERROR(ENOMEM);
+}
+
+static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
+{
+ int ret;
+
+ ret = ff_thread_ref_frame(&dst->tf, &src->tf);
+ if (ret < 0)
+ return ret;
+
+ dst->extradata = av_buffer_ref(src->extradata);
+ if (!dst->extradata)
+ goto fail;
+
+ dst->segmentation_map = src->segmentation_map;
+ dst->mv = src->mv;
+ dst->uses_2pass = src->uses_2pass;
+
+ if (src->hwaccel_picture_private) {
+ dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
+ if (!dst->hwaccel_priv_buf)
+ goto fail;
+ dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
+ }
+
+ return 0;
+
+fail:
+ vp9_frame_unref(avctx, dst);
+ return AVERROR(ENOMEM);
+}
+
+static int update_size(AVCodecContext *avctx, int w, int h)
+{
+#define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
+ CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
+ CONFIG_VP9_NVDEC_HWACCEL + \
+ CONFIG_VP9_VAAPI_HWACCEL + \
+ CONFIG_VP9_VDPAU_HWACCEL + \
+ CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
+ enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
+ VP9Context *s = avctx->priv_data;
+ uint8_t *p;
+ int bytesperpixel = s->bytesperpixel, ret, cols, rows;
+ int lflvl_len, i;
+
+ av_assert0(w > 0 && h > 0);
+
+ if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
+ if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
+ return ret;
+
+ switch (s->pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+ case AV_PIX_FMT_YUV420P10:
+#if CONFIG_VP9_DXVA2_HWACCEL
+ *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
+#endif
+#if CONFIG_VP9_D3D11VA_HWACCEL
+ *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
+ *fmtp++ = AV_PIX_FMT_D3D11;
+#endif
+#if CONFIG_VP9_NVDEC_HWACCEL
+ *fmtp++ = AV_PIX_FMT_CUDA;
+#endif
+#if CONFIG_VP9_VAAPI_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VDPAU;
+#endif
+#if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
+#endif
+ break;
+ case AV_PIX_FMT_YUV420P12:
+#if CONFIG_VP9_NVDEC_HWACCEL
+ *fmtp++ = AV_PIX_FMT_CUDA;
+#endif
+#if CONFIG_VP9_VAAPI_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VDPAU;
+#endif
+ break;
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_YUV444P10:
+ case AV_PIX_FMT_YUV444P12:
+#if CONFIG_VP9_VAAPI_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
+ break;
+ }
+
+ *fmtp++ = s->pix_fmt;
+ *fmtp = AV_PIX_FMT_NONE;
+
+ ret = ff_thread_get_format(avctx, pix_fmts);
+ if (ret < 0)
+ return ret;
+
+ avctx->pix_fmt = ret;
+ s->gf_fmt = s->pix_fmt;
+ s->w = w;
+ s->h = h;
+ }
+
+ cols = (w + 7) >> 3;
+ rows = (h + 7) >> 3;
+
+ if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
+ return 0;
+
+ s->last_fmt = s->pix_fmt;
+ s->sb_cols = (w + 63) >> 6;
+ s->sb_rows = (h + 63) >> 6;
+ s->cols = (w + 7) >> 3;
+ s->rows = (h + 7) >> 3;
+ lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
+
+#define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
+ av_freep(&s->intra_pred_data[0]);
+ // FIXME we slightly over-allocate here for subsampled chroma, but a little
+ // bit of padding shouldn't affect performance...
+ p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
+ lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
+ if (!p)
+ return AVERROR(ENOMEM);
+ assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
+ assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
+ assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
+ assign(s->above_y_nnz_ctx, uint8_t *, 16);
+ assign(s->above_mode_ctx, uint8_t *, 16);
+ assign(s->above_mv_ctx, VP9mv(*)[2], 16);
+ assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
+ assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
+ assign(s->above_partition_ctx, uint8_t *, 8);
+ assign(s->above_skip_ctx, uint8_t *, 8);
+ assign(s->above_txfm_ctx, uint8_t *, 8);
+ assign(s->above_segpred_ctx, uint8_t *, 8);
+ assign(s->above_intra_ctx, uint8_t *, 8);
+ assign(s->above_comp_ctx, uint8_t *, 8);
+ assign(s->above_ref_ctx, uint8_t *, 8);
+ assign(s->above_filter_ctx, uint8_t *, 8);
+ assign(s->lflvl, VP9Filter *, lflvl_len);
+#undef assign
+
+ if (s->td) {
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
+ }
+
+ if (s->s.h.bpp != s->last_bpp) {
+ ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ ff_videodsp_init(&s->vdsp, s->s.h.bpp);
+ s->last_bpp = s->s.h.bpp;
+ }
+
+ return 0;
+}
+
+static int update_block_buffers(AVCodecContext *avctx)
+{
+ int i;
+ VP9Context *s = avctx->priv_data;
+ int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
+ VP9TileData *td = &s->td[0];
+
+ if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
+ return 0;
+
+ vp9_tile_data_free(td);
+ chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
+ chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
+ if (s->s.frames[CUR_FRAME].uses_2pass) {
+ int sbs = s->sb_cols * s->sb_rows;
+
+ td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
+ td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
+ 16 * 16 + 2 * chroma_eobs) * sbs);
+ if (!td->b_base || !td->block_base)
+ return AVERROR(ENOMEM);
+ td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
+ td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
+ td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
+ td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
+ td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
+
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
+ if (!td->block_structure)
+ return AVERROR(ENOMEM);
+ }
+ } else {
+ for (i = 1; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
+
+ for (i = 0; i < s->active_tile_cols; i++) {
+ s->td[i].b_base = av_malloc(sizeof(VP9Block));
+ s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
+ 16 * 16 + 2 * chroma_eobs);
+ if (!s->td[i].b_base || !s->td[i].block_base)
+ return AVERROR(ENOMEM);
+ s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
+ s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
+ s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
+ s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
+ s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
+
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
+ if (!s->td[i].block_structure)
+ return AVERROR(ENOMEM);
+ }
+ }
+ }
+ s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
+
+ return 0;
+}
+
+// The sign bit is at the end, not the start, of a bit sequence
+static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
+{
+ int v = get_bits(gb, n);
+ return get_bits1(gb) ? -v : v;
+}
+
+static av_always_inline int inv_recenter_nonneg(int v, int m)
+{
+ if (v > 2 * m)
+ return v;
+ if (v & 1)
+ return m - ((v + 1) >> 1);
+ return m + (v >> 1);
+}
+
+// differential forward probability updates
+static int update_prob(VPXRangeCoder *c, int p)
+{
+ static const uint8_t inv_map_table[255] = {
+ 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
+ 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 253,
+ };
+ int d;
+
+ /* This code is trying to do a differential probability update. For a
+ * current probability A in the range [1, 255], the difference to a new
+ * probability of any value can be expressed differentially as 1-A, 255-A
+ * where some part of this (absolute range) exists both in positive as
+ * well as the negative part, whereas another part only exists in one
+ * half. We're trying to code this shared part differentially, i.e.
+ * times two where the value of the lowest bit specifies the sign, and
+ * the single part is then coded on top of this. This absolute difference
+ * then again has a value of [0, 254], but a bigger value in this range
+ * indicates that we're further away from the original value A, so we
+ * can code this as a VLC code, since higher values are increasingly
+ * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
+ * updates vs. the 'fine, exact' updates further down the range, which
+ * adds one extra dimension to this differential update model. */
+
+ if (!vp89_rac_get(c)) {
+ d = vp89_rac_get_uint(c, 4) + 0;
+ } else if (!vp89_rac_get(c)) {
+ d = vp89_rac_get_uint(c, 4) + 16;
+ } else if (!vp89_rac_get(c)) {
+ d = vp89_rac_get_uint(c, 5) + 32;
+ } else {
+ d = vp89_rac_get_uint(c, 7);
+ if (d >= 65)
+ d = (d << 1) - 65 + vp89_rac_get(c);
+ d += 64;
+ av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
+ }
+
+ return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
+ 255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
+}
+
+static int read_colorspace_details(AVCodecContext *avctx)
+{
+ static const enum AVColorSpace colorspaces[8] = {
+ AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_BT470BG, AVCOL_SPC_BT709, AVCOL_SPC_SMPTE170M,
+ AVCOL_SPC_SMPTE240M, AVCOL_SPC_BT2020_NCL, AVCOL_SPC_RESERVED, AVCOL_SPC_RGB,
+ };
+ VP9Context *s = avctx->priv_data;
+ int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
+
+ s->bpp_index = bits;
+ s->s.h.bpp = 8 + bits * 2;
+ s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
+ avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
+ if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
+ static const enum AVPixelFormat pix_fmt_rgb[3] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12
+ };
+ s->ss_h = s->ss_v = 0;
+ avctx->color_range = AVCOL_RANGE_JPEG;
+ s->pix_fmt = pix_fmt_rgb[bits];
+ if (avctx->profile & 1) {
+ if (get_bits1(&s->gb)) {
+ av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
+ return AVERROR_INVALIDDATA;
+ }
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
+ avctx->profile);
+ return AVERROR_INVALIDDATA;
+ }
+ } else {
+ static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
+ { { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P },
+ { AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV420P } },
+ { { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10 },
+ { AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV420P10 } },
+ { { AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12 },
+ { AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV420P12 } }
+ };
+ avctx->color_range = get_bits1(&s->gb) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+ if (avctx->profile & 1) {
+ s->ss_h = get_bits1(&s->gb);
+ s->ss_v = get_bits1(&s->gb);
+ s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
+ if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
+ av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
+ avctx->profile);
+ return AVERROR_INVALIDDATA;
+ } else if (get_bits1(&s->gb)) {
+ av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
+ avctx->profile);
+ return AVERROR_INVALIDDATA;
+ }
+ } else {
+ s->ss_h = s->ss_v = 1;
+ s->pix_fmt = pix_fmt_for_ss[bits][1][1];
+ }
+ }
+
+ return 0;
+}
+
+static int decode_frame_header(AVCodecContext *avctx,
+ const uint8_t *data, int size, int *ref)
+{
+ VP9Context *s = avctx->priv_data;
+ int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
+ int last_invisible;
+ const uint8_t *data2;
+
+ /* general header */
+ if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
+ return ret;
+ }
+ if (get_bits(&s->gb, 2) != 0x2) { // frame marker
+ av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
+ return AVERROR_INVALIDDATA;
+ }
+ avctx->profile = get_bits1(&s->gb);
+ avctx->profile |= get_bits1(&s->gb) << 1;
+ if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
+ if (avctx->profile > 3) {
+ av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
+ return AVERROR_INVALIDDATA;
+ }
+ s->s.h.profile = avctx->profile;
+ if (get_bits1(&s->gb)) {
+ *ref = get_bits(&s->gb, 3);
+ return 0;
+ }
+
+ s->last_keyframe = s->s.h.keyframe;
+ s->s.h.keyframe = !get_bits1(&s->gb);
+
+ last_invisible = s->s.h.invisible;
+ s->s.h.invisible = !get_bits1(&s->gb);
+ s->s.h.errorres = get_bits1(&s->gb);
+ s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
+
+ if (s->s.h.keyframe) {
+ if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+ av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if ((ret = read_colorspace_details(avctx)) < 0)
+ return ret;
+ // for profile 1, here follows the subsampling bits
+ s->s.h.refreshrefmask = 0xff;
+ w = get_bits(&s->gb, 16) + 1;
+ h = get_bits(&s->gb, 16) + 1;
+ if (get_bits1(&s->gb)) // display size
+ skip_bits(&s->gb, 32);
+ } else {
+ s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
+ s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
+ if (s->s.h.intraonly) {
+ if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+ av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (avctx->profile >= 1) {
+ if ((ret = read_colorspace_details(avctx)) < 0)
+ return ret;
+ } else {
+ s->ss_h = s->ss_v = 1;
+ s->s.h.bpp = 8;
+ s->bpp_index = 0;
+ s->bytesperpixel = 1;
+ s->pix_fmt = AV_PIX_FMT_YUV420P;
+ avctx->colorspace = AVCOL_SPC_BT470BG;
+ avctx->color_range = AVCOL_RANGE_MPEG;
+ }
+ s->s.h.refreshrefmask = get_bits(&s->gb, 8);
+ w = get_bits(&s->gb, 16) + 1;
+ h = get_bits(&s->gb, 16) + 1;
+ if (get_bits1(&s->gb)) // display size
+ skip_bits(&s->gb, 32);
+ } else {
+ s->s.h.refreshrefmask = get_bits(&s->gb, 8);
+ s->s.h.refidx[0] = get_bits(&s->gb, 3);
+ s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
+ s->s.h.refidx[1] = get_bits(&s->gb, 3);
+ s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
+ s->s.h.refidx[2] = get_bits(&s->gb, 3);
+ s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
+ if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
+ !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
+ !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
+ av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (get_bits1(&s->gb)) {
+ w = s->s.refs[s->s.h.refidx[0]].f->width;
+ h = s->s.refs[s->s.h.refidx[0]].f->height;
+ } else if (get_bits1(&s->gb)) {
+ w = s->s.refs[s->s.h.refidx[1]].f->width;
+ h = s->s.refs[s->s.h.refidx[1]].f->height;
+ } else if (get_bits1(&s->gb)) {
+ w = s->s.refs[s->s.h.refidx[2]].f->width;
+ h = s->s.refs[s->s.h.refidx[2]].f->height;
+ } else {
+ w = get_bits(&s->gb, 16) + 1;
+ h = get_bits(&s->gb, 16) + 1;
+ }
+ // Note that in this code, "CUR_FRAME" is actually before we
+ // have formally allocated a frame, and thus actually represents
+ // the _last_ frame
+ s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
+ s->s.frames[CUR_FRAME].tf.f->height == h;
+ if (get_bits1(&s->gb)) // display size
+ skip_bits(&s->gb, 32);
+ s->s.h.highprecisionmvs = get_bits1(&s->gb);
+ s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
+ get_bits(&s->gb, 2);
+ s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
+ s->s.h.signbias[0] != s->s.h.signbias[2];
+ if (s->s.h.allowcompinter) {
+ if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
+ s->s.h.fixcompref = 2;
+ s->s.h.varcompref[0] = 0;
+ s->s.h.varcompref[1] = 1;
+ } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
+ s->s.h.fixcompref = 1;
+ s->s.h.varcompref[0] = 0;
+ s->s.h.varcompref[1] = 2;
+ } else {
+ s->s.h.fixcompref = 0;
+ s->s.h.varcompref[0] = 1;
+ s->s.h.varcompref[1] = 2;
+ }
+ }
+ }
+ }
+ s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
+ s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
+ s->s.h.framectxid = c = get_bits(&s->gb, 2);
+ if (s->s.h.keyframe || s->s.h.intraonly)
+ s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
+
+ /* loopfilter header data */
+ if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
+ // reset loopfilter defaults
+ s->s.h.lf_delta.ref[0] = 1;
+ s->s.h.lf_delta.ref[1] = 0;
+ s->s.h.lf_delta.ref[2] = -1;
+ s->s.h.lf_delta.ref[3] = -1;
+ s->s.h.lf_delta.mode[0] = 0;
+ s->s.h.lf_delta.mode[1] = 0;
+ memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
+ }
+ s->s.h.filter.level = get_bits(&s->gb, 6);
+ sharp = get_bits(&s->gb, 3);
+ // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
+ // the old cache values since they are still valid
+ if (s->s.h.filter.sharpness != sharp) {
+ for (i = 1; i <= 63; i++) {
+ int limit = i;
+
+ if (sharp > 0) {
+ limit >>= (sharp + 3) >> 2;
+ limit = FFMIN(limit, 9 - sharp);
+ }
+ limit = FFMAX(limit, 1);
+
+ s->filter_lut.lim_lut[i] = limit;
+ s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
+ }
+ }
+ s->s.h.filter.sharpness = sharp;
+ if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
+ if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
+ for (i = 0; i < 4; i++)
+ if (get_bits1(&s->gb))
+ s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
+ for (i = 0; i < 2; i++)
+ if (get_bits1(&s->gb))
+ s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
+ }
+ }
+
+ /* quantization header data */
+ s->s.h.yac_qi = get_bits(&s->gb, 8);
+ s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
+ s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
+ s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
+ s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
+ s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
+ if (s->s.h.lossless)
+ avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
+
+ /* segmentation header info */
+ if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
+ if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
+ for (i = 0; i < 7; i++)
+ s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
+ get_bits(&s->gb, 8) : 255;
+ if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
+ for (i = 0; i < 3; i++)
+ s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
+ get_bits(&s->gb, 8) : 255;
+ }
+
+ if (get_bits1(&s->gb)) {
+ s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
+ for (i = 0; i < 8; i++) {
+ if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
+ s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
+ if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
+ s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
+ if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
+ s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
+ s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
+ }
+ }
+ }
+
+ // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
+ for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
+ int qyac, qydc, quvac, quvdc, lflvl, sh;
+
+ if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
+ if (s->s.h.segmentation.absolute_vals)
+ qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
+ else
+ qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
+ } else {
+ qyac = s->s.h.yac_qi;
+ }
+ qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
+ quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
+ quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
+ qyac = av_clip_uintp2(qyac, 8);
+
+ s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
+ s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
+ s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
+ s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
+
+ sh = s->s.h.filter.level >= 32;
+ if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
+ if (s->s.h.segmentation.absolute_vals)
+ lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
+ else
+ lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
+ } else {
+ lflvl = s->s.h.filter.level;
+ }
+ if (s->s.h.lf_delta.enabled) {
+ s->s.h.segmentation.feat[i].lflvl[0][0] =
+ s->s.h.segmentation.feat[i].lflvl[0][1] =
+ av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
+ for (j = 1; j < 4; j++) {
+ s->s.h.segmentation.feat[i].lflvl[j][0] =
+ av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
+ s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
+ s->s.h.segmentation.feat[i].lflvl[j][1] =
+ av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
+ s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
+ }
+ } else {
+ memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
+ sizeof(s->s.h.segmentation.feat[i].lflvl));
+ }
+ }
+
+ /* tiling info */
+ if ((ret = update_size(avctx, w, h)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
+ w, h, s->pix_fmt);
+ return ret;
+ }
+ for (s->s.h.tiling.log2_tile_cols = 0;
+ s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
+ s->s.h.tiling.log2_tile_cols++) ;
+ for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
+ max = FFMAX(0, max - 1);
+ while (max > s->s.h.tiling.log2_tile_cols) {
+ if (get_bits1(&s->gb))
+ s->s.h.tiling.log2_tile_cols++;
+ else
+ break;
+ }
+ s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
+ s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
+ if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
+ int n_range_coders;
+ VPXRangeCoder *rc;
+
+ if (s->td) {
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
+ av_freep(&s->td);
+ }
+
+ s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
+ s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
+ s->s.h.tiling.tile_cols : 1;
+ vp9_alloc_entries(avctx, s->sb_rows);
+ if (avctx->active_thread_type == FF_THREAD_SLICE) {
+ n_range_coders = 4; // max_tile_rows
+ } else {
+ n_range_coders = s->s.h.tiling.tile_cols;
+ }
+ s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
+ n_range_coders * sizeof(VPXRangeCoder));
+ if (!s->td)
+ return AVERROR(ENOMEM);
+ rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
+ for (i = 0; i < s->active_tile_cols; i++) {
+ s->td[i].s = s;
+ s->td[i].c_b = rc;
+ rc += n_range_coders;
+ }
+ }
+
+ /* check reference frames */
+ if (!s->s.h.keyframe && !s->s.h.intraonly) {
+ int valid_ref_frame = 0;
+ for (i = 0; i < 3; i++) {
+ AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
+ int refw = ref->width, refh = ref->height;
+
+ if (ref->format != avctx->pix_fmt) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Ref pixfmt (%s) did not match current frame (%s)",
+ av_get_pix_fmt_name(ref->format),
+ av_get_pix_fmt_name(avctx->pix_fmt));
+ return AVERROR_INVALIDDATA;
+ } else if (refw == w && refh == h) {
+ s->mvscale[i][0] = s->mvscale[i][1] = 0;
+ } else {
+ /* Check to make sure at least one of frames that */
+ /* this frame references has valid dimensions */
+ if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
+ refw, refh, w, h);
+ s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
+ continue;
+ }
+ s->mvscale[i][0] = (refw << 14) / w;
+ s->mvscale[i][1] = (refh << 14) / h;
+ s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
+ s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
+ }
+ valid_ref_frame++;
+ }
+ if (!valid_ref_frame) {
+ av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
+ s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
+ s->prob_ctx[3].p = ff_vp9_default_probs;
+ memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
+ sizeof(ff_vp9_default_coef_probs));
+ memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
+ sizeof(ff_vp9_default_coef_probs));
+ memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
+ sizeof(ff_vp9_default_coef_probs));
+ memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
+ sizeof(ff_vp9_default_coef_probs));
+ } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
+ s->prob_ctx[c].p = ff_vp9_default_probs;
+ memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
+ sizeof(ff_vp9_default_coef_probs));
+ }
+
+ // next 16 bits is size of the rest of the header (arith-coded)
+ s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
+ s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
+
+ data2 = align_get_bits(&s->gb);
+ if (size2 > size - (data2 - data)) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
+ return AVERROR_INVALIDDATA;
+ }
+ ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
+ if (ret < 0)
+ return ret;
+
+ if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
+ av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (i = 0; i < s->active_tile_cols; i++) {
+ if (s->s.h.keyframe || s->s.h.intraonly) {
+ memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
+ memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
+ } else {
+ memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
+ }
+ s->td[i].nb_block_structure = 0;
+ }
+
+ /* FIXME is it faster to not copy here, but do it down in the fw updates
+ * as explicit copies if the fw update is missing (and skip the copy upon
+ * fw update)? */
+ s->prob.p = s->prob_ctx[c].p;
+
+ // txfm updates
+ if (s->s.h.lossless) {
+ s->s.h.txfmmode = TX_4X4;
+ } else {
+ s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
+ if (s->s.h.txfmmode == 3)
+ s->s.h.txfmmode += vp89_rac_get(&s->c);
+
+ if (s->s.h.txfmmode == TX_SWITCHABLE) {
+ for (i = 0; i < 2; i++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.tx16p[i][j] =
+ update_prob(&s->c, s->prob.p.tx16p[i][j]);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 3; j++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.tx32p[i][j] =
+ update_prob(&s->c, s->prob.p.tx32p[i][j]);
+ }
+ }
+
+ // coef updates
+ for (i = 0; i < 4; i++) {
+ uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
+ if (vp89_rac_get(&s->c)) {
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ for (l = 0; l < 6; l++)
+ for (m = 0; m < 6; m++) {
+ uint8_t *p = s->prob.coef[i][j][k][l][m];
+ uint8_t *r = ref[j][k][l][m];
+ if (m >= 3 && l == 0) // dc only has 3 pt
+ break;
+ for (n = 0; n < 3; n++) {
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ p[n] = update_prob(&s->c, r[n]);
+ else
+ p[n] = r[n];
+ }
+ memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
+ }
+ } else {
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ for (l = 0; l < 6; l++)
+ for (m = 0; m < 6; m++) {
+ uint8_t *p = s->prob.coef[i][j][k][l][m];
+ uint8_t *r = ref[j][k][l][m];
+ if (m > 3 && l == 0) // dc only has 3 pt
+ break;
+ memcpy(p, r, 3);
+ memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
+ }
+ }
+ if (s->s.h.txfmmode == i)
+ break;
+ }
+
+ // mode updates
+ for (i = 0; i < 3; i++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
+ if (!s->s.h.keyframe && !s->s.h.intraonly) {
+ for (i = 0; i < 7; i++)
+ for (j = 0; j < 3; j++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_mode[i][j] =
+ update_prob(&s->c, s->prob.p.mv_mode[i][j]);
+
+ if (s->s.h.filtermode == FILTER_SWITCHABLE)
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 2; j++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.filter[i][j] =
+ update_prob(&s->c, s->prob.p.filter[i][j]);
+
+ for (i = 0; i < 4; i++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
+
+ if (s->s.h.allowcompinter) {
+ s->s.h.comppredmode = vp89_rac_get(&s->c);
+ if (s->s.h.comppredmode)
+ s->s.h.comppredmode += vp89_rac_get(&s->c);
+ if (s->s.h.comppredmode == PRED_SWITCHABLE)
+ for (i = 0; i < 5; i++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.comp[i] =
+ update_prob(&s->c, s->prob.p.comp[i]);
+ } else {
+ s->s.h.comppredmode = PRED_SINGLEREF;
+ }
+
+ if (s->s.h.comppredmode != PRED_COMPREF) {
+ for (i = 0; i < 5; i++) {
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.single_ref[i][0] =
+ update_prob(&s->c, s->prob.p.single_ref[i][0]);
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.single_ref[i][1] =
+ update_prob(&s->c, s->prob.p.single_ref[i][1]);
+ }
+ }
+
+ if (s->s.h.comppredmode != PRED_SINGLEREF) {
+ for (i = 0; i < 5; i++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.comp_ref[i] =
+ update_prob(&s->c, s->prob.p.comp_ref[i]);
+ }
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 9; j++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.y_mode[i][j] =
+ update_prob(&s->c, s->prob.p.y_mode[i][j]);
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++)
+ for (k = 0; k < 3; k++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.partition[3 - i][j][k] =
+ update_prob(&s->c,
+ s->prob.p.partition[3 - i][j][k]);
+
+ // mv fields don't use the update_prob subexp model for some reason
+ for (i = 0; i < 3; i++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+
+ for (i = 0; i < 2; i++) {
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_comp[i].sign =
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+
+ for (j = 0; j < 10; j++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_comp[i].classes[j] =
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_comp[i].class0 =
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+
+ for (j = 0; j < 10; j++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_comp[i].bits[j] =
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+ }
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 3; k++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_comp[i].class0_fp[j][k] =
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+
+ for (j = 0; j < 3; j++)
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_comp[i].fp[j] =
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+ }
+
+ if (s->s.h.highprecisionmvs) {
+ for (i = 0; i < 2; i++) {
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_comp[i].class0_hp =
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_comp[i].hp =
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
+ }
+ }
+ }
+
+ return (data2 - data) + size2;
+}
+
+static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
+ ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
+{
+ const VP9Context *s = td->s;
+ int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
+ (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
+ const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
+ s->prob.p.partition[bl][c];
+ enum BlockPartition bp;
+ ptrdiff_t hbs = 4 >> bl;
+ AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
+ ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
+ int bytesperpixel = s->bytesperpixel;
+
+ if (bl == BL_8X8) {
+ bp = vp89_rac_get_tree(td->c, ff_vp9_partition_tree, p);
+ ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
+ } else if (col + hbs < s->cols) { // FIXME why not <=?
+ if (row + hbs < s->rows) { // FIXME why not <=?
+ bp = vp89_rac_get_tree(td->c, ff_vp9_partition_tree, p);
+ switch (bp) {
+ case PARTITION_NONE:
+ ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
+ break;
+ case PARTITION_H:
+ ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
+ yoff += hbs * 8 * y_stride;
+ uvoff += hbs * 8 * uv_stride >> s->ss_v;
+ ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
+ break;
+ case PARTITION_V:
+ ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
+ yoff += hbs * 8 * bytesperpixel;
+ uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
+ ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
+ break;
+ case PARTITION_SPLIT:
+ decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
+ decode_sb(td, row, col + hbs, lflvl,
+ yoff + 8 * hbs * bytesperpixel,
+ uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
+ yoff += hbs * 8 * y_stride;
+ uvoff += hbs * 8 * uv_stride >> s->ss_v;
+ decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
+ decode_sb(td, row + hbs, col + hbs, lflvl,
+ yoff + 8 * hbs * bytesperpixel,
+ uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
+ break;
+ default:
+ av_assert0(0);
+ }
+ } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
+ bp = PARTITION_SPLIT;
+ decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
+ decode_sb(td, row, col + hbs, lflvl,
+ yoff + 8 * hbs * bytesperpixel,
+ uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
+ } else {
+ bp = PARTITION_H;
+ ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
+ }
+ } else if (row + hbs < s->rows) { // FIXME why not <=?
+ if (vpx_rac_get_prob_branchy(td->c, p[2])) {
+ bp = PARTITION_SPLIT;
+ decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
+ yoff += hbs * 8 * y_stride;
+ uvoff += hbs * 8 * uv_stride >> s->ss_v;
+ decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
+ } else {
+ bp = PARTITION_V;
+ ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
+ }
+ } else {
+ bp = PARTITION_SPLIT;
+ decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
+ }
+ td->counts.partition[bl][c][bp]++;
+}
+
+static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
+ ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
+{
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+ ptrdiff_t hbs = 4 >> bl;
+ AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
+ ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
+ int bytesperpixel = s->bytesperpixel;
+
+ if (bl == BL_8X8) {
+ av_assert2(b->bl == BL_8X8);
+ ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
+ } else if (td->b->bl == bl) {
+ ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
+ if (b->bp == PARTITION_H && row + hbs < s->rows) {
+ yoff += hbs * 8 * y_stride;
+ uvoff += hbs * 8 * uv_stride >> s->ss_v;
+ ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
+ } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
+ yoff += hbs * 8 * bytesperpixel;
+ uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
+ ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
+ }
+ } else {
+ decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
+ if (col + hbs < s->cols) { // FIXME why not <=?
+ if (row + hbs < s->rows) {
+ decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
+ uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
+ yoff += hbs * 8 * y_stride;
+ uvoff += hbs * 8 * uv_stride >> s->ss_v;
+ decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
+ decode_sb_mem(td, row + hbs, col + hbs, lflvl,
+ yoff + 8 * hbs * bytesperpixel,
+ uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
+ } else {
+ yoff += hbs * 8 * bytesperpixel;
+ uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
+ decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
+ }
+ } else if (row + hbs < s->rows) {
+ yoff += hbs * 8 * y_stride;
+ uvoff += hbs * 8 * uv_stride >> s->ss_v;
+ decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
+ }
+ }
+}
+
+static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
+{
+ int sb_start = ( idx * n) >> log2_n;
+ int sb_end = ((idx + 1) * n) >> log2_n;
+ *start = FFMIN(sb_start, n) << 3;
+ *end = FFMIN(sb_end, n) << 3;
+}
+
+static void free_buffers(VP9Context *s)
+{
+ int i;
+
+ av_freep(&s->intra_pred_data[0]);
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
+}
+
+static av_cold int vp9_decode_free(AVCodecContext *avctx)
+{
+ VP9Context *s = avctx->priv_data;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ vp9_frame_unref(avctx, &s->s.frames[i]);
+ av_frame_free(&s->s.frames[i].tf.f);
+ }
+ av_buffer_pool_uninit(&s->frame_extradata_pool);
+ for (i = 0; i < 8; i++) {
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
+ av_frame_free(&s->s.refs[i].f);
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
+ av_frame_free(&s->next_refs[i].f);
+ }
+
+ free_buffers(s);
+#if HAVE_THREADS
+ av_freep(&s->entries);
+ ff_pthread_free(s, vp9_context_offsets);
+#endif
+ av_freep(&s->td);
+ return 0;
+}
+
+static int decode_tiles(AVCodecContext *avctx,
+ const uint8_t *data, int size)
+{
+ VP9Context *s = avctx->priv_data;
+ VP9TileData *td = &s->td[0];
+ int row, col, tile_row, tile_col, ret;
+ int bytesperpixel;
+ int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
+ AVFrame *f;
+ ptrdiff_t yoff, uvoff, ls_y, ls_uv;
+
+ f = s->s.frames[CUR_FRAME].tf.f;
+ ls_y = f->linesize[0];
+ ls_uv =f->linesize[1];
+ bytesperpixel = s->bytesperpixel;
+
+ yoff = uvoff = 0;
+ for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
+ set_tile_offset(&tile_row_start, &tile_row_end,
+ tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
+
+ for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
+ int64_t tile_size;
+
+ if (tile_col == s->s.h.tiling.tile_cols - 1 &&
+ tile_row == s->s.h.tiling.tile_rows - 1) {
+ tile_size = size;
+ } else {
+ tile_size = AV_RB32(data);
+ data += 4;
+ size -= 4;
+ }
+ if (tile_size > size)
+ return AVERROR_INVALIDDATA;
+ ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
+ if (ret < 0)
+ return ret;
+ if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
+ return AVERROR_INVALIDDATA;
+ data += tile_size;
+ size -= tile_size;
+ }
+
+ for (row = tile_row_start; row < tile_row_end;
+ row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
+ VP9Filter *lflvl_ptr = s->lflvl;
+ ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
+
+ for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
+ set_tile_offset(&tile_col_start, &tile_col_end,
+ tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
+ td->tile_col_start = tile_col_start;
+ if (s->pass != 2) {
+ memset(td->left_partition_ctx, 0, 8);
+ memset(td->left_skip_ctx, 0, 8);
+ if (s->s.h.keyframe || s->s.h.intraonly) {
+ memset(td->left_mode_ctx, DC_PRED, 16);
+ } else {
+ memset(td->left_mode_ctx, NEARESTMV, 8);
+ }
+ memset(td->left_y_nnz_ctx, 0, 16);
+ memset(td->left_uv_nnz_ctx, 0, 32);
+ memset(td->left_segpred_ctx, 0, 8);
+
+ td->c = &td->c_b[tile_col];
+ }
+
+ for (col = tile_col_start;
+ col < tile_col_end;
+ col += 8, yoff2 += 64 * bytesperpixel,
+ uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
+ // FIXME integrate with lf code (i.e. zero after each
+ // use, similar to invtxfm coefficients, or similar)
+ if (s->pass != 1) {
+ memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
+ }
+
+ if (s->pass == 2) {
+ decode_sb_mem(td, row, col, lflvl_ptr,
+ yoff2, uvoff2, BL_64X64);
+ } else {
+ if (vpx_rac_is_end(td->c)) {
+ return AVERROR_INVALIDDATA;
+ }
+ decode_sb(td, row, col, lflvl_ptr,
+ yoff2, uvoff2, BL_64X64);
+ }
+ }
+ }
+
+ if (s->pass == 1)
+ continue;
+
+ // backup pre-loopfilter reconstruction data for intra
+ // prediction of next row of sb64s
+ if (row + 8 < s->rows) {
+ memcpy(s->intra_pred_data[0],
+ f->data[0] + yoff + 63 * ls_y,
+ 8 * s->cols * bytesperpixel);
+ memcpy(s->intra_pred_data[1],
+ f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
+ 8 * s->cols * bytesperpixel >> s->ss_h);
+ memcpy(s->intra_pred_data[2],
+ f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
+ 8 * s->cols * bytesperpixel >> s->ss_h);
+ }
+
+ // loopfilter one row
+ if (s->s.h.filter.level) {
+ yoff2 = yoff;
+ uvoff2 = uvoff;
+ lflvl_ptr = s->lflvl;
+ for (col = 0; col < s->cols;
+ col += 8, yoff2 += 64 * bytesperpixel,
+ uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
+ ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
+ yoff2, uvoff2);
+ }
+ }
+
+ // FIXME maybe we can make this more finegrained by running the
+ // loopfilter per-block instead of after each sbrow
+ // In fact that would also make intra pred left preparation easier?
+ ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
+ }
+ }
+ return 0;
+}
+
+#if HAVE_THREADS
+static av_always_inline
+int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
+ int threadnr)
+{
+ VP9Context *s = avctx->priv_data;
+ VP9TileData *td = &s->td[jobnr];
+ ptrdiff_t uvoff, yoff, ls_y, ls_uv;
+ int bytesperpixel = s->bytesperpixel, row, col, tile_row;
+ unsigned tile_cols_len;
+ int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
+ VP9Filter *lflvl_ptr_base;
+ AVFrame *f;
+
+ f = s->s.frames[CUR_FRAME].tf.f;
+ ls_y = f->linesize[0];
+ ls_uv =f->linesize[1];
+
+ set_tile_offset(&tile_col_start, &tile_col_end,
+ jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
+ td->tile_col_start = tile_col_start;
+ uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
+ yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
+ lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
+
+ for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
+ set_tile_offset(&tile_row_start, &tile_row_end,
+ tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
+
+ td->c = &td->c_b[tile_row];
+ for (row = tile_row_start; row < tile_row_end;
+ row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
+ ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
+ VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
+
+ memset(td->left_partition_ctx, 0, 8);
+ memset(td->left_skip_ctx, 0, 8);
+ if (s->s.h.keyframe || s->s.h.intraonly) {
+ memset(td->left_mode_ctx, DC_PRED, 16);
+ } else {
+ memset(td->left_mode_ctx, NEARESTMV, 8);
+ }
+ memset(td->left_y_nnz_ctx, 0, 16);
+ memset(td->left_uv_nnz_ctx, 0, 32);
+ memset(td->left_segpred_ctx, 0, 8);
+
+ for (col = tile_col_start;
+ col < tile_col_end;
+ col += 8, yoff2 += 64 * bytesperpixel,
+ uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
+ // FIXME integrate with lf code (i.e. zero after each
+ // use, similar to invtxfm coefficients, or similar)
+ memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
+ decode_sb(td, row, col, lflvl_ptr,
+ yoff2, uvoff2, BL_64X64);
+ }
+
+ // backup pre-loopfilter reconstruction data for intra
+ // prediction of next row of sb64s
+ tile_cols_len = tile_col_end - tile_col_start;
+ if (row + 8 < s->rows) {
+ memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
+ f->data[0] + yoff + 63 * ls_y,
+ 8 * tile_cols_len * bytesperpixel);
+ memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
+ f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
+ 8 * tile_cols_len * bytesperpixel >> s->ss_h);
+ memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
+ f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
+ 8 * tile_cols_len * bytesperpixel >> s->ss_h);
+ }
+
+ vp9_report_tile_progress(s, row >> 3, 1);
+ }
+ }
+ return 0;
+}
+
+static av_always_inline
+int loopfilter_proc(AVCodecContext *avctx)
+{
+ VP9Context *s = avctx->priv_data;
+ ptrdiff_t uvoff, yoff, ls_y, ls_uv;
+ VP9Filter *lflvl_ptr;
+ int bytesperpixel = s->bytesperpixel, col, i;
+ AVFrame *f;
+
+ f = s->s.frames[CUR_FRAME].tf.f;
+ ls_y = f->linesize[0];
+ ls_uv =f->linesize[1];
+
+ for (i = 0; i < s->sb_rows; i++) {
+ vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
+
+ if (s->s.h.filter.level) {
+ yoff = (ls_y * 64)*i;
+ uvoff = (ls_uv * 64 >> s->ss_v)*i;
+ lflvl_ptr = s->lflvl+s->sb_cols*i;
+ for (col = 0; col < s->cols;
+ col += 8, yoff += 64 * bytesperpixel,
+ uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
+ ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
+ yoff, uvoff);
+ }
+ }
+ }
+ return 0;
+}
+#endif
+
+static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
+{
+ AVVideoEncParams *par;
+ unsigned int tile, nb_blocks = 0;
+
+ if (s->s.h.segmentation.enabled) {
+ for (tile = 0; tile < s->active_tile_cols; tile++)
+ nb_blocks += s->td[tile].nb_block_structure;
+ }
+
+ par = av_video_enc_params_create_side_data(frame->tf.f,
+ AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
+ if (!par)
+ return AVERROR(ENOMEM);
+
+ par->qp = s->s.h.yac_qi;
+ par->delta_qp[0][0] = s->s.h.ydc_qdelta;
+ par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
+ par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
+ par->delta_qp[1][1] = s->s.h.uvac_qdelta;
+ par->delta_qp[2][1] = s->s.h.uvac_qdelta;
+
+ if (nb_blocks) {
+ unsigned int block = 0;
+ unsigned int tile, block_tile;
+
+ for (tile = 0; tile < s->active_tile_cols; tile++) {
+ VP9TileData *td = &s->td[tile];
+
+ for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
+ AVVideoBlockParams *b = av_video_enc_params_block(par, block++);
+ unsigned int row = td->block_structure[block_tile].row;
+ unsigned int col = td->block_structure[block_tile].col;
+ uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
+
+ b->src_x = col * 8;
+ b->src_y = row * 8;
+ b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
+ b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
+
+ if (s->s.h.segmentation.feat[seg_id].q_enabled) {
+ b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
+ if (s->s.h.segmentation.absolute_vals)
+ b->delta_qp -= par->qp;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *pkt)
+{
+ const uint8_t *data = pkt->data;
+ int size = pkt->size;
+ VP9Context *s = avctx->priv_data;
+ int ret, i, j, ref;
+ int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
+ (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
+ AVFrame *f;
+
+ if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
+ return ret;
+ } else if (ret == 0) {
+ if (!s->s.refs[ref].f->buf[0]) {
+ av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
+ return AVERROR_INVALIDDATA;
+ }
+ if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
+ return ret;
+ frame->pts = pkt->pts;
+ frame->pkt_dts = pkt->dts;
+ for (i = 0; i < 8; i++) {
+ if (s->next_refs[i].f->buf[0])
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
+ if (s->s.refs[i].f->buf[0] &&
+ (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
+ return ret;
+ }
+ *got_frame = 1;
+ return pkt->size;
+ }
+ data += ret;
+ size -= ret;
+
+ if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
+ if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
+ vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
+ if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
+ (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
+ return ret;
+ }
+ if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
+ vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
+ if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
+ (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
+ return ret;
+ if (s->s.frames[CUR_FRAME].tf.f->buf[0])
+ vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
+ if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
+ return ret;
+ f = s->s.frames[CUR_FRAME].tf.f;
+ f->key_frame = s->s.h.keyframe;
+ f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+
+ if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
+ (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
+ s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
+ vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
+ }
+
+ // ref frame setup
+ for (i = 0; i < 8; i++) {
+ if (s->next_refs[i].f->buf[0])
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
+ if (s->s.h.refreshrefmask & (1 << i)) {
+ ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
+ } else if (s->s.refs[i].f->buf[0]) {
+ ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
+ }
+ if (ret < 0)
+ return ret;
+ }
+
+ if (avctx->hwaccel) {
+ ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
+ if (ret < 0)
+ return ret;
+ ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
+ if (ret < 0)
+ return ret;
+ ret = avctx->hwaccel->end_frame(avctx);
+ if (ret < 0)
+ return ret;
+ goto finish;
+ }
+
+ // main tile decode loop
+ memset(s->above_partition_ctx, 0, s->cols);
+ memset(s->above_skip_ctx, 0, s->cols);
+ if (s->s.h.keyframe || s->s.h.intraonly) {
+ memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
+ } else {
+ memset(s->above_mode_ctx, NEARESTMV, s->cols);
+ }
+ memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
+ memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
+ memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
+ memset(s->above_segpred_ctx, 0, s->cols);
+ s->pass = s->s.frames[CUR_FRAME].uses_2pass =
+ avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
+ if ((ret = update_block_buffers(avctx)) < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate block buffers\n");
+ return ret;
+ }
+ if (s->s.h.refreshctx && s->s.h.parallelmode) {
+ int j, k, l, m;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ for (l = 0; l < 6; l++)
+ for (m = 0; m < 6; m++)
+ memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
+ s->prob.coef[i][j][k][l][m], 3);
+ if (s->s.h.txfmmode == i)
+ break;
+ }
+ s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
+ ff_thread_finish_setup(avctx);
+ } else if (!s->s.h.refreshctx) {
+ ff_thread_finish_setup(avctx);
+ }
+
+#if HAVE_THREADS
+ if (avctx->active_thread_type & FF_THREAD_SLICE) {
+ for (i = 0; i < s->sb_rows; i++)
+ atomic_store(&s->entries[i], 0);
+ }
+#endif
+
+ do {
+ for (i = 0; i < s->active_tile_cols; i++) {
+ s->td[i].b = s->td[i].b_base;
+ s->td[i].block = s->td[i].block_base;
+ s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
+ s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
+ s->td[i].eob = s->td[i].eob_base;
+ s->td[i].uveob[0] = s->td[i].uveob_base[0];
+ s->td[i].uveob[1] = s->td[i].uveob_base[1];
+ s->td[i].error_info = 0;
+ }
+
+#if HAVE_THREADS
+ if (avctx->active_thread_type == FF_THREAD_SLICE) {
+ int tile_row, tile_col;
+
+ av_assert1(!s->pass);
+
+ for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
+ for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
+ int64_t tile_size;
+
+ if (tile_col == s->s.h.tiling.tile_cols - 1 &&
+ tile_row == s->s.h.tiling.tile_rows - 1) {
+ tile_size = size;
+ } else {
+ tile_size = AV_RB32(data);
+ data += 4;
+ size -= 4;
+ }
+ if (tile_size > size)
+ return AVERROR_INVALIDDATA;
+ ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
+ if (ret < 0)
+ return ret;
+ if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
+ return AVERROR_INVALIDDATA;
+ data += tile_size;
+ size -= tile_size;
+ }
+ }
+
+ ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
+ } else
+#endif
+ {
+ ret = decode_tiles(avctx, data, size);
+ if (ret < 0) {
+ ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
+ return ret;
+ }
+ }
+
+ // Sum all counts fields into td[0].counts for tile threading
+ if (avctx->active_thread_type == FF_THREAD_SLICE)
+ for (i = 1; i < s->s.h.tiling.tile_cols; i++)
+ for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
+ ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
+
+ if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
+ ff_vp9_adapt_probs(s);
+ ff_thread_finish_setup(avctx);
+ }
+ } while (s->pass++ == 1);
+ ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
+
+ if (s->td->error_info < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
+ s->td->error_info = 0;
+ return AVERROR_INVALIDDATA;
+ }
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
+ if (ret < 0)
+ return ret;
+ }
+
+finish:
+ // ref frame setup
+ for (i = 0; i < 8; i++) {
+ if (s->s.refs[i].f->buf[0])
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
+ if (s->next_refs[i].f->buf[0] &&
+ (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
+ return ret;
+ }
+
+ if (!s->s.h.invisible) {
+ if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
+ return ret;
+ *got_frame = 1;
+ }
+
+ return pkt->size;
+}
+
+static void vp9_decode_flush(AVCodecContext *avctx)
+{
+ VP9Context *s = avctx->priv_data;
+ int i;
+
+ for (i = 0; i < 3; i++)
+ vp9_frame_unref(avctx, &s->s.frames[i]);
+ for (i = 0; i < 8; i++)
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
+}
+
+static av_cold int vp9_decode_init(AVCodecContext *avctx)
+{
+ VP9Context *s = avctx->priv_data;
+ int ret;
+
+ s->last_bpp = 0;
+ s->s.h.filter.sharpness = -1;
+
+#if HAVE_THREADS
+ if (avctx->active_thread_type & FF_THREAD_SLICE) {
+ ret = ff_pthread_init(s, vp9_context_offsets);
+ if (ret < 0)
+ return ret;
+ }
+#endif
+
+ for (int i = 0; i < 3; i++) {
+ s->s.frames[i].tf.f = av_frame_alloc();
+ if (!s->s.frames[i].tf.f)
+ return AVERROR(ENOMEM);
+ }
+ for (int i = 0; i < 8; i++) {
+ s->s.refs[i].f = av_frame_alloc();
+ s->next_refs[i].f = av_frame_alloc();
+ if (!s->s.refs[i].f || !s->next_refs[i].f)
+ return AVERROR(ENOMEM);
+ }
+ return 0;
+}
+
+#if HAVE_THREADS
+static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
+{
+ int i, ret;
+ VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
+
+ for (i = 0; i < 3; i++) {
+ if (s->s.frames[i].tf.f->buf[0])
+ vp9_frame_unref(dst, &s->s.frames[i]);
+ if (ssrc->s.frames[i].tf.f->buf[0]) {
+ if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
+ return ret;
+ }
+ }
+ for (i = 0; i < 8; i++) {
+ if (s->s.refs[i].f->buf[0])
+ ff_thread_release_ext_buffer(dst, &s->s.refs[i]);
+ if (ssrc->next_refs[i].f->buf[0]) {
+ if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
+ return ret;
+ }
+ }
+
+ s->s.h.invisible = ssrc->s.h.invisible;
+ s->s.h.keyframe = ssrc->s.h.keyframe;
+ s->s.h.intraonly = ssrc->s.h.intraonly;
+ s->ss_v = ssrc->ss_v;
+ s->ss_h = ssrc->ss_h;
+ s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
+ s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
+ s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
+ s->bytesperpixel = ssrc->bytesperpixel;
+ s->gf_fmt = ssrc->gf_fmt;
+ s->w = ssrc->w;
+ s->h = ssrc->h;
+ s->s.h.bpp = ssrc->s.h.bpp;
+ s->bpp_index = ssrc->bpp_index;
+ s->pix_fmt = ssrc->pix_fmt;
+ memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
+ memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
+ memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
+ sizeof(s->s.h.segmentation.feat));
+
+ return 0;
+}
+#endif
+
+const FFCodec ff_vp9_decoder = {
+ .p.name = "vp9",
+ CODEC_LONG_NAME("Google VP9"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_VP9,
+ .priv_data_size = sizeof(VP9Context),
+ .init = vp9_decode_init,
+ .close = vp9_decode_free,
+ FF_CODEC_DECODE_CB(vp9_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
+ FF_CODEC_CAP_SLICE_THREAD_HAS_MF |
+ FF_CODEC_CAP_ALLOCATE_PROGRESS,
+ .flush = vp9_decode_flush,
+ UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
+ .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
+ .bsfs = "vp9_superframe_split",
+ .hw_configs = (const AVCodecHWConfigInternal *const []) {
+#if CONFIG_VP9_DXVA2_HWACCEL
+ HWACCEL_DXVA2(vp9),
+#endif
+#if CONFIG_VP9_D3D11VA_HWACCEL
+ HWACCEL_D3D11VA(vp9),
+#endif
+#if CONFIG_VP9_D3D11VA2_HWACCEL
+ HWACCEL_D3D11VA2(vp9),
+#endif
+#if CONFIG_VP9_NVDEC_HWACCEL
+ HWACCEL_NVDEC(vp9),
+#endif
+#if CONFIG_VP9_VAAPI_HWACCEL
+ HWACCEL_VAAPI(vp9),
+#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ HWACCEL_VDPAU(vp9),
+#endif
+#if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
+ HWACCEL_VIDEOTOOLBOX(vp9),
+#endif
+ NULL
+ },
+};
diff --git a/media/ffvpx/libavcodec/vp9.h b/media/ffvpx/libavcodec/vp9.h
new file mode 100644
index 0000000000..c8d07ad986
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9.h
@@ -0,0 +1,73 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP9_H
+#define AVCODEC_VP9_H
+
+enum TxfmMode {
+ TX_4X4,
+ TX_8X8,
+ TX_16X16,
+ TX_32X32,
+ N_TXFM_SIZES,
+ TX_SWITCHABLE = N_TXFM_SIZES,
+ N_TXFM_MODES
+};
+
+enum TxfmType {
+ DCT_DCT,
+ DCT_ADST,
+ ADST_DCT,
+ ADST_ADST,
+ N_TXFM_TYPES
+};
+
+enum IntraPredMode {
+ VERT_PRED,
+ HOR_PRED,
+ DC_PRED,
+ DIAG_DOWN_LEFT_PRED,
+ DIAG_DOWN_RIGHT_PRED,
+ VERT_RIGHT_PRED,
+ HOR_DOWN_PRED,
+ VERT_LEFT_PRED,
+ HOR_UP_PRED,
+ TM_VP8_PRED,
+ LEFT_DC_PRED,
+ TOP_DC_PRED,
+ DC_128_PRED,
+ DC_127_PRED,
+ DC_129_PRED,
+ N_INTRA_PRED_MODES
+};
+
+enum FilterMode {
+ FILTER_8TAP_SMOOTH,
+ FILTER_8TAP_REGULAR,
+ FILTER_8TAP_SHARP,
+ FILTER_BILINEAR,
+ N_FILTERS,
+ FILTER_SWITCHABLE = N_FILTERS,
+};
+
+#endif /* AVCODEC_VP9_H */
diff --git a/media/ffvpx/libavcodec/vp9_mc_template.c b/media/ffvpx/libavcodec/vp9_mc_template.c
new file mode 100644
index 0000000000..e654c0e5ed
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9_mc_template.c
@@ -0,0 +1,439 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define ROUNDED_DIV_MVx2(a, b) \
+ (VP9mv) { .x = ROUNDED_DIV(a.x + b.x, 2), .y = ROUNDED_DIV(a.y + b.y, 2) }
+#define ROUNDED_DIV_MVx4(a, b, c, d) \
+ (VP9mv) { .x = ROUNDED_DIV(a.x + b.x + c.x + d.x, 4), \
+ .y = ROUNDED_DIV(a.y + b.y + c.y + d.y, 4) }
+
+static void FN(inter_pred)(VP9TileData *td)
+{
+ static const uint8_t bwlog_tab[2][N_BS_SIZES] = {
+ { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4 },
+ { 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4 },
+ };
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+ int row = td->row, col = td->col;
+ const ThreadFrame *tref1 = &s->s.refs[s->s.h.refidx[b->ref[0]]], *tref2;
+ const AVFrame *ref1 = tref1->f, *ref2;
+ int w1 = ref1->width, h1 = ref1->height, w2, h2;
+ ptrdiff_t ls_y = td->y_stride, ls_uv = td->uv_stride;
+ int bytesperpixel = BYTES_PER_PIXEL;
+
+ if (b->comp) {
+ tref2 = &s->s.refs[s->s.h.refidx[b->ref[1]]];
+ ref2 = tref2->f;
+ w2 = ref2->width;
+ h2 = ref2->height;
+ }
+
+ // y inter pred
+ if (b->bs > BS_8x8) {
+ VP9mv uvmv;
+
+#if SCALED == 0
+ if (b->bs == BS_8x4) {
+ mc_luma_dir(td, mc[3][b->filter][0], td->dst[0], ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ row << 3, col << 3, &b->mv[0][0],,,,, 8, 4, w1, h1, 0);
+ mc_luma_dir(td, mc[3][b->filter][0],
+ td->dst[0] + 4 * ls_y, ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ (row << 3) + 4, col << 3, &b->mv[2][0],,,,, 8, 4, w1, h1, 0);
+ w1 = (w1 + s->ss_h) >> s->ss_h;
+ if (s->ss_v) {
+ h1 = (h1 + 1) >> 1;
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][0], b->mv[2][0]);
+ mc_chroma_dir(td, mc[3 + s->ss_h][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << 2, col << (3 - s->ss_h),
+ &uvmv,,,,, 8 >> s->ss_h, 4, w1, h1, 0);
+ } else {
+ mc_chroma_dir(td, mc[3 + s->ss_h][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << 3, col << (3 - s->ss_h),
+ &b->mv[0][0],,,,, 8 >> s->ss_h, 4, w1, h1, 0);
+ // BUG for 4:2:2 bs=8x4, libvpx uses the wrong block index
+ // to get the motion vector for the bottom 4x4 block
+ // https://code.google.com/p/webm/issues/detail?id=993
+ if (s->ss_h == 0) {
+ uvmv = b->mv[2][0];
+ } else {
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][0], b->mv[2][0]);
+ }
+ mc_chroma_dir(td, mc[3 + s->ss_h][b->filter][0],
+ td->dst[1] + 4 * ls_uv, td->dst[2] + 4 * ls_uv, ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ (row << 3) + 4, col << (3 - s->ss_h),
+ &uvmv,,,,, 8 >> s->ss_h, 4, w1, h1, 0);
+ }
+
+ if (b->comp) {
+ mc_luma_dir(td, mc[3][b->filter][1], td->dst[0], ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ row << 3, col << 3, &b->mv[0][1],,,,, 8, 4, w2, h2, 1);
+ mc_luma_dir(td, mc[3][b->filter][1],
+ td->dst[0] + 4 * ls_y, ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ (row << 3) + 4, col << 3, &b->mv[2][1],,,,, 8, 4, w2, h2, 1);
+ w2 = (w2 + s->ss_h) >> s->ss_h;
+ if (s->ss_v) {
+ h2 = (h2 + 1) >> 1;
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][1], b->mv[2][1]);
+ mc_chroma_dir(td, mc[3 + s->ss_h][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << 2, col << (3 - s->ss_h),
+ &uvmv,,,,, 8 >> s->ss_h, 4, w2, h2, 1);
+ } else {
+ mc_chroma_dir(td, mc[3 + s->ss_h][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << 3, col << (3 - s->ss_h),
+ &b->mv[0][1],,,,, 8 >> s->ss_h, 4, w2, h2, 1);
+ // BUG for 4:2:2 bs=8x4, libvpx uses the wrong block index
+ // to get the motion vector for the bottom 4x4 block
+ // https://code.google.com/p/webm/issues/detail?id=993
+ if (s->ss_h == 0) {
+ uvmv = b->mv[2][1];
+ } else {
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][1], b->mv[2][1]);
+ }
+ mc_chroma_dir(td, mc[3 + s->ss_h][b->filter][1],
+ td->dst[1] + 4 * ls_uv, td->dst[2] + 4 * ls_uv, ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ (row << 3) + 4, col << (3 - s->ss_h),
+ &uvmv,,,,, 8 >> s->ss_h, 4, w2, h2, 1);
+ }
+ }
+ } else if (b->bs == BS_4x8) {
+ mc_luma_dir(td, mc[4][b->filter][0], td->dst[0], ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ row << 3, col << 3, &b->mv[0][0],,,,, 4, 8, w1, h1, 0);
+ mc_luma_dir(td, mc[4][b->filter][0], td->dst[0] + 4 * bytesperpixel, ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ row << 3, (col << 3) + 4, &b->mv[1][0],,,,, 4, 8, w1, h1, 0);
+ h1 = (h1 + s->ss_v) >> s->ss_v;
+ if (s->ss_h) {
+ w1 = (w1 + 1) >> 1;
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][0], b->mv[1][0]);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << (3 - s->ss_v), col << 2,
+ &uvmv,,,,, 4, 8 >> s->ss_v, w1, h1, 0);
+ } else {
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << (3 - s->ss_v), col << 3,
+ &b->mv[0][0],,,,, 4, 8 >> s->ss_v, w1, h1, 0);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1] + 4 * bytesperpixel,
+ td->dst[2] + 4 * bytesperpixel, ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << (3 - s->ss_v), (col << 3) + 4,
+ &b->mv[1][0],,,,, 4, 8 >> s->ss_v, w1, h1, 0);
+ }
+
+ if (b->comp) {
+ mc_luma_dir(td, mc[4][b->filter][1], td->dst[0], ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ row << 3, col << 3, &b->mv[0][1],,,,, 4, 8, w2, h2, 1);
+ mc_luma_dir(td, mc[4][b->filter][1], td->dst[0] + 4 * bytesperpixel, ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ row << 3, (col << 3) + 4, &b->mv[1][1],,,,, 4, 8, w2, h2, 1);
+ h2 = (h2 + s->ss_v) >> s->ss_v;
+ if (s->ss_h) {
+ w2 = (w2 + 1) >> 1;
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][1], b->mv[1][1]);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << (3 - s->ss_v), col << 2,
+ &uvmv,,,,, 4, 8 >> s->ss_v, w2, h2, 1);
+ } else {
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << (3 - s->ss_v), col << 3,
+ &b->mv[0][1],,,,, 4, 8 >> s->ss_v, w2, h2, 1);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1] + 4 * bytesperpixel,
+ td->dst[2] + 4 * bytesperpixel, ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << (3 - s->ss_v), (col << 3) + 4,
+ &b->mv[1][1],,,,, 4, 8 >> s->ss_v, w2, h2, 1);
+ }
+ }
+ } else
+#endif
+ {
+#if SCALED == 0
+ av_assert2(b->bs == BS_4x4);
+#endif
+
+ // FIXME if two horizontally adjacent blocks have the same MV,
+ // do a w8 instead of a w4 call
+ mc_luma_dir(td, mc[4][b->filter][0], td->dst[0], ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ row << 3, col << 3, &b->mv[0][0],
+ 0, 0, 8, 8, 4, 4, w1, h1, 0);
+ mc_luma_dir(td, mc[4][b->filter][0], td->dst[0] + 4 * bytesperpixel, ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ row << 3, (col << 3) + 4, &b->mv[1][0],
+ 4, 0, 8, 8, 4, 4, w1, h1, 0);
+ mc_luma_dir(td, mc[4][b->filter][0],
+ td->dst[0] + 4 * ls_y, ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ (row << 3) + 4, col << 3, &b->mv[2][0],
+ 0, 4, 8, 8, 4, 4, w1, h1, 0);
+ mc_luma_dir(td, mc[4][b->filter][0],
+ td->dst[0] + 4 * ls_y + 4 * bytesperpixel, ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ (row << 3) + 4, (col << 3) + 4, &b->mv[3][0],
+ 4, 4, 8, 8, 4, 4, w1, h1, 0);
+ if (s->ss_v) {
+ h1 = (h1 + 1) >> 1;
+ if (s->ss_h) {
+ w1 = (w1 + 1) >> 1;
+ uvmv = ROUNDED_DIV_MVx4(b->mv[0][0], b->mv[1][0],
+ b->mv[2][0], b->mv[3][0]);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << 2, col << 2,
+ &uvmv, 0, 0, 4, 4, 4, 4, w1, h1, 0);
+ } else {
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][0], b->mv[2][0]);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << 2, col << 3,
+ &uvmv, 0, 0, 8, 4, 4, 4, w1, h1, 0);
+ uvmv = ROUNDED_DIV_MVx2(b->mv[1][0], b->mv[3][0]);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1] + 4 * bytesperpixel,
+ td->dst[2] + 4 * bytesperpixel, ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << 2, (col << 3) + 4,
+ &uvmv, 4, 0, 8, 4, 4, 4, w1, h1, 0);
+ }
+ } else {
+ if (s->ss_h) {
+ w1 = (w1 + 1) >> 1;
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][0], b->mv[1][0]);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << 3, col << 2,
+ &uvmv, 0, 0, 4, 8, 4, 4, w1, h1, 0);
+ // BUG libvpx uses wrong block index for 4:2:2 bs=4x4
+ // bottom block
+ // https://code.google.com/p/webm/issues/detail?id=993
+ uvmv = ROUNDED_DIV_MVx2(b->mv[1][0], b->mv[2][0]);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1] + 4 * ls_uv, td->dst[2] + 4 * ls_uv, ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ (row << 3) + 4, col << 2,
+ &uvmv, 0, 4, 4, 8, 4, 4, w1, h1, 0);
+ } else {
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << 3, col << 3,
+ &b->mv[0][0], 0, 0, 8, 8, 4, 4, w1, h1, 0);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1] + 4 * bytesperpixel,
+ td->dst[2] + 4 * bytesperpixel, ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << 3, (col << 3) + 4,
+ &b->mv[1][0], 4, 0, 8, 8, 4, 4, w1, h1, 0);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1] + 4 * ls_uv, td->dst[2] + 4 * ls_uv, ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ (row << 3) + 4, col << 3,
+ &b->mv[2][0], 0, 4, 8, 8, 4, 4, w1, h1, 0);
+ mc_chroma_dir(td, mc[4][b->filter][0],
+ td->dst[1] + 4 * ls_uv + 4 * bytesperpixel,
+ td->dst[2] + 4 * ls_uv + 4 * bytesperpixel, ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ (row << 3) + 4, (col << 3) + 4,
+ &b->mv[3][0], 4, 4, 8, 8, 4, 4, w1, h1, 0);
+ }
+ }
+
+ if (b->comp) {
+ mc_luma_dir(td, mc[4][b->filter][1], td->dst[0], ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ row << 3, col << 3, &b->mv[0][1], 0, 0, 8, 8, 4, 4, w2, h2, 1);
+ mc_luma_dir(td, mc[4][b->filter][1], td->dst[0] + 4 * bytesperpixel, ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ row << 3, (col << 3) + 4, &b->mv[1][1], 4, 0, 8, 8, 4, 4, w2, h2, 1);
+ mc_luma_dir(td, mc[4][b->filter][1],
+ td->dst[0] + 4 * ls_y, ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ (row << 3) + 4, col << 3, &b->mv[2][1], 0, 4, 8, 8, 4, 4, w2, h2, 1);
+ mc_luma_dir(td, mc[4][b->filter][1],
+ td->dst[0] + 4 * ls_y + 4 * bytesperpixel, ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ (row << 3) + 4, (col << 3) + 4, &b->mv[3][1], 4, 4, 8, 8, 4, 4, w2, h2, 1);
+ if (s->ss_v) {
+ h2 = (h2 + 1) >> 1;
+ if (s->ss_h) {
+ w2 = (w2 + 1) >> 1;
+ uvmv = ROUNDED_DIV_MVx4(b->mv[0][1], b->mv[1][1],
+ b->mv[2][1], b->mv[3][1]);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << 2, col << 2,
+ &uvmv, 0, 0, 4, 4, 4, 4, w2, h2, 1);
+ } else {
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][1], b->mv[2][1]);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << 2, col << 3,
+ &uvmv, 0, 0, 8, 4, 4, 4, w2, h2, 1);
+ uvmv = ROUNDED_DIV_MVx2(b->mv[1][1], b->mv[3][1]);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1] + 4 * bytesperpixel,
+ td->dst[2] + 4 * bytesperpixel, ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << 2, (col << 3) + 4,
+ &uvmv, 4, 0, 8, 4, 4, 4, w2, h2, 1);
+ }
+ } else {
+ if (s->ss_h) {
+ w2 = (w2 + 1) >> 1;
+ uvmv = ROUNDED_DIV_MVx2(b->mv[0][1], b->mv[1][1]);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << 3, col << 2,
+ &uvmv, 0, 0, 4, 8, 4, 4, w2, h2, 1);
+ // BUG libvpx uses wrong block index for 4:2:2 bs=4x4
+ // bottom block
+ // https://code.google.com/p/webm/issues/detail?id=993
+ uvmv = ROUNDED_DIV_MVx2(b->mv[1][1], b->mv[2][1]);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1] + 4 * ls_uv, td->dst[2] + 4 * ls_uv, ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ (row << 3) + 4, col << 2,
+ &uvmv, 0, 4, 4, 8, 4, 4, w2, h2, 1);
+ } else {
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << 3, col << 3,
+ &b->mv[0][1], 0, 0, 8, 8, 4, 4, w2, h2, 1);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1] + 4 * bytesperpixel,
+ td->dst[2] + 4 * bytesperpixel, ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << 3, (col << 3) + 4,
+ &b->mv[1][1], 4, 0, 8, 8, 4, 4, w2, h2, 1);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1] + 4 * ls_uv, td->dst[2] + 4 * ls_uv, ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ (row << 3) + 4, col << 3,
+ &b->mv[2][1], 0, 4, 8, 8, 4, 4, w2, h2, 1);
+ mc_chroma_dir(td, mc[4][b->filter][1],
+ td->dst[1] + 4 * ls_uv + 4 * bytesperpixel,
+ td->dst[2] + 4 * ls_uv + 4 * bytesperpixel, ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ (row << 3) + 4, (col << 3) + 4,
+ &b->mv[3][1], 4, 4, 8, 8, 4, 4, w2, h2, 1);
+ }
+ }
+ }
+ }
+ } else {
+ int bwl = bwlog_tab[0][b->bs];
+ int bw = ff_vp9_bwh_tab[0][b->bs][0] * 4;
+ int bh = ff_vp9_bwh_tab[0][b->bs][1] * 4;
+ int uvbw = ff_vp9_bwh_tab[s->ss_h][b->bs][0] * 4;
+ int uvbh = ff_vp9_bwh_tab[s->ss_v][b->bs][1] * 4;
+
+ mc_luma_dir(td, mc[bwl][b->filter][0], td->dst[0], ls_y,
+ ref1->data[0], ref1->linesize[0], tref1,
+ row << 3, col << 3, &b->mv[0][0], 0, 0, bw, bh, bw, bh, w1, h1, 0);
+ w1 = (w1 + s->ss_h) >> s->ss_h;
+ h1 = (h1 + s->ss_v) >> s->ss_v;
+ mc_chroma_dir(td, mc[bwl + s->ss_h][b->filter][0],
+ td->dst[1], td->dst[2], ls_uv,
+ ref1->data[1], ref1->linesize[1],
+ ref1->data[2], ref1->linesize[2], tref1,
+ row << (3 - s->ss_v), col << (3 - s->ss_h),
+ &b->mv[0][0], 0, 0, uvbw, uvbh, uvbw, uvbh, w1, h1, 0);
+
+ if (b->comp) {
+ mc_luma_dir(td, mc[bwl][b->filter][1], td->dst[0], ls_y,
+ ref2->data[0], ref2->linesize[0], tref2,
+ row << 3, col << 3, &b->mv[0][1], 0, 0, bw, bh, bw, bh, w2, h2, 1);
+ w2 = (w2 + s->ss_h) >> s->ss_h;
+ h2 = (h2 + s->ss_v) >> s->ss_v;
+ mc_chroma_dir(td, mc[bwl + s->ss_h][b->filter][1],
+ td->dst[1], td->dst[2], ls_uv,
+ ref2->data[1], ref2->linesize[1],
+ ref2->data[2], ref2->linesize[2], tref2,
+ row << (3 - s->ss_v), col << (3 - s->ss_h),
+ &b->mv[0][1], 0, 0, uvbw, uvbh, uvbw, uvbh, w2, h2, 1);
+ }
+ }
+}
diff --git a/media/ffvpx/libavcodec/vp9_parser.c b/media/ffvpx/libavcodec/vp9_parser.c
new file mode 100644
index 0000000000..ffcb93505f
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9_parser.c
@@ -0,0 +1,70 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "libavcodec/get_bits.h"
+#include "parser.h"
+
+static int parse(AVCodecParserContext *ctx,
+ AVCodecContext *avctx,
+ const uint8_t **out_data, int *out_size,
+ const uint8_t *data, int size)
+{
+ GetBitContext gb;
+ int res, profile, keyframe;
+
+ *out_data = data;
+ *out_size = size;
+
+ if (!size || (res = init_get_bits8(&gb, data, size)) < 0)
+ return size; // parsers can't return errors
+ get_bits(&gb, 2); // frame marker
+ profile = get_bits1(&gb);
+ profile |= get_bits1(&gb) << 1;
+ if (profile == 3) profile += get_bits1(&gb);
+ if (profile > 3)
+ return size;
+
+ avctx->profile = profile;
+
+ if (get_bits1(&gb)) {
+ keyframe = 0;
+ } else {
+ keyframe = !get_bits1(&gb);
+ }
+
+ if (!keyframe) {
+ ctx->pict_type = AV_PICTURE_TYPE_P;
+ ctx->key_frame = 0;
+ } else {
+ ctx->pict_type = AV_PICTURE_TYPE_I;
+ ctx->key_frame = 1;
+ }
+
+ return size;
+}
+
+const AVCodecParser ff_vp9_parser = {
+ .codec_ids = { AV_CODEC_ID_VP9 },
+ .parser_parse = parse,
+};
diff --git a/media/ffvpx/libavcodec/vp9_superframe_split_bsf.c b/media/ffvpx/libavcodec/vp9_superframe_split_bsf.c
new file mode 100644
index 0000000000..cddd48119c
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9_superframe_split_bsf.c
@@ -0,0 +1,170 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * This bitstream filter splits VP9 superframes into packets containing
+ * just one frame.
+ */
+
+#include <stddef.h>
+
+#include "bsf.h"
+#include "bsf_internal.h"
+#include "bytestream.h"
+#include "get_bits.h"
+
+typedef struct VP9SFSplitContext {
+ AVPacket *buffer_pkt;
+
+ int nb_frames;
+ int next_frame;
+ size_t next_frame_offset;
+ int sizes[8];
+} VP9SFSplitContext;
+
+static int vp9_superframe_split_filter(AVBSFContext *ctx, AVPacket *out)
+{
+ VP9SFSplitContext *s = ctx->priv_data;
+ AVPacket *in;
+ int i, j, ret, marker;
+ int is_superframe = !!s->buffer_pkt->data;
+
+ if (!s->buffer_pkt->data) {
+ ret = ff_bsf_get_packet_ref(ctx, s->buffer_pkt);
+ if (ret < 0)
+ return ret;
+ in = s->buffer_pkt;
+
+ if (!in->size)
+ goto passthrough;
+
+ marker = in->data[in->size - 1];
+ if ((marker & 0xe0) == 0xc0) {
+ int length_size = 1 + ((marker >> 3) & 0x3);
+ int nb_frames = 1 + (marker & 0x7);
+ int idx_size = 2 + nb_frames * length_size;
+
+ if (in->size >= idx_size && in->data[in->size - idx_size] == marker) {
+ GetByteContext bc;
+ int64_t total_size = 0;
+
+ bytestream2_init(&bc, in->data + in->size + 1 - idx_size,
+ nb_frames * length_size);
+
+ for (i = 0; i < nb_frames; i++) {
+ int frame_size = 0;
+ for (j = 0; j < length_size; j++)
+ frame_size |= bytestream2_get_byte(&bc) << (j * 8);
+
+ total_size += frame_size;
+ if (frame_size <= 0 || total_size > in->size - idx_size) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid frame size in a superframe: %d\n", frame_size);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ s->sizes[i] = frame_size;
+ }
+ s->nb_frames = nb_frames;
+ s->next_frame = 0;
+ s->next_frame_offset = 0;
+ is_superframe = 1;
+ }
+ }
+ }
+
+ if (is_superframe) {
+ GetBitContext gb;
+ int profile, invisible = 0;
+
+ ret = av_packet_ref(out, s->buffer_pkt);
+ if (ret < 0)
+ goto fail;
+
+ out->data += s->next_frame_offset;
+ out->size = s->sizes[s->next_frame];
+
+ s->next_frame_offset += out->size;
+ s->next_frame++;
+
+ if (s->next_frame >= s->nb_frames)
+ av_packet_unref(s->buffer_pkt);
+
+ ret = init_get_bits8(&gb, out->data, out->size);
+ if (ret < 0)
+ goto fail;
+
+ get_bits(&gb, 2); // frame_marker
+ profile = get_bits1(&gb);
+ profile |= get_bits1(&gb) << 1;
+ if (profile == 3)
+ get_bits1(&gb);
+ if (!get_bits1(&gb)) {
+ get_bits1(&gb);
+ invisible = !get_bits1(&gb);
+ }
+
+ if (invisible)
+ out->pts = AV_NOPTS_VALUE;
+
+ } else {
+passthrough:
+ av_packet_move_ref(out, s->buffer_pkt);
+ }
+
+ return 0;
+fail:
+ if (ret < 0)
+ av_packet_unref(out);
+ av_packet_unref(s->buffer_pkt);
+ return ret;
+}
+
+static int vp9_superframe_split_init(AVBSFContext *ctx)
+{
+ VP9SFSplitContext *s = ctx->priv_data;
+
+ s->buffer_pkt = av_packet_alloc();
+ if (!s->buffer_pkt)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static void vp9_superframe_split_flush(AVBSFContext *ctx)
+{
+ VP9SFSplitContext *s = ctx->priv_data;
+ av_packet_unref(s->buffer_pkt);
+}
+
+static void vp9_superframe_split_uninit(AVBSFContext *ctx)
+{
+ VP9SFSplitContext *s = ctx->priv_data;
+ av_packet_free(&s->buffer_pkt);
+}
+
+const FFBitStreamFilter ff_vp9_superframe_split_bsf = {
+ .p.name = "vp9_superframe_split",
+ .p.codec_ids = (const enum AVCodecID []){ AV_CODEC_ID_VP9, AV_CODEC_ID_NONE },
+ .priv_data_size = sizeof(VP9SFSplitContext),
+ .init = vp9_superframe_split_init,
+ .flush = vp9_superframe_split_flush,
+ .close = vp9_superframe_split_uninit,
+ .filter = vp9_superframe_split_filter,
+};
diff --git a/media/ffvpx/libavcodec/vp9block.c b/media/ffvpx/libavcodec/vp9block.c
new file mode 100644
index 0000000000..5743f048cc
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9block.c
@@ -0,0 +1,1457 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+
+#include "threadframe.h"
+#include "vp89_rac.h"
+#include "vp9.h"
+#include "vp9data.h"
+#include "vp9dec.h"
+#include "vpx_rac.h"
+
+static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h,
+ ptrdiff_t stride, int v)
+{
+ switch (w) {
+ case 1:
+ do {
+ *ptr = v;
+ ptr += stride;
+ } while (--h);
+ break;
+ case 2: {
+ int v16 = v * 0x0101;
+ do {
+ AV_WN16A(ptr, v16);
+ ptr += stride;
+ } while (--h);
+ break;
+ }
+ case 4: {
+ uint32_t v32 = v * 0x01010101;
+ do {
+ AV_WN32A(ptr, v32);
+ ptr += stride;
+ } while (--h);
+ break;
+ }
+ case 8: {
+#if HAVE_FAST_64BIT
+ uint64_t v64 = v * 0x0101010101010101ULL;
+ do {
+ AV_WN64A(ptr, v64);
+ ptr += stride;
+ } while (--h);
+#else
+ uint32_t v32 = v * 0x01010101;
+ do {
+ AV_WN32A(ptr, v32);
+ AV_WN32A(ptr + 4, v32);
+ ptr += stride;
+ } while (--h);
+#endif
+ break;
+ }
+ }
+}
+
+static void decode_mode(VP9TileData *td)
+{
+ static const uint8_t left_ctx[N_BS_SIZES] = {
+ 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
+ };
+ static const uint8_t above_ctx[N_BS_SIZES] = {
+ 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
+ };
+ static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = {
+ TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16,
+ TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4
+ };
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+ int row = td->row, col = td->col, row7 = td->row7;
+ enum TxfmMode max_tx = max_tx_for_bl_bp[b->bs];
+ int bw4 = ff_vp9_bwh_tab[1][b->bs][0], w4 = FFMIN(s->cols - col, bw4);
+ int bh4 = ff_vp9_bwh_tab[1][b->bs][1], h4 = FFMIN(s->rows - row, bh4), y;
+ int have_a = row > 0, have_l = col > td->tile_col_start;
+ int vref, filter_id;
+
+ if (!s->s.h.segmentation.enabled) {
+ b->seg_id = 0;
+ } else if (s->s.h.keyframe || s->s.h.intraonly) {
+ b->seg_id = !s->s.h.segmentation.update_map ? 0 :
+ vp89_rac_get_tree(td->c, ff_vp9_segmentation_tree,
+ s->s.h.segmentation.prob);
+ } else if (!s->s.h.segmentation.update_map ||
+ (s->s.h.segmentation.temporal &&
+ vpx_rac_get_prob_branchy(td->c,
+ s->s.h.segmentation.pred_prob[s->above_segpred_ctx[col] +
+ td->left_segpred_ctx[row7]]))) {
+ if (!s->s.h.errorres && s->s.frames[REF_FRAME_SEGMAP].segmentation_map) {
+ int pred = 8, x;
+ uint8_t *refsegmap = s->s.frames[REF_FRAME_SEGMAP].segmentation_map;
+
+ if (!s->s.frames[REF_FRAME_SEGMAP].uses_2pass)
+ ff_thread_await_progress(&s->s.frames[REF_FRAME_SEGMAP].tf, row >> 3, 0);
+ for (y = 0; y < h4; y++) {
+ int idx_base = (y + row) * 8 * s->sb_cols + col;
+ for (x = 0; x < w4; x++)
+ pred = FFMIN(pred, refsegmap[idx_base + x]);
+ }
+ av_assert1(pred < 8);
+ b->seg_id = pred;
+ } else {
+ b->seg_id = 0;
+ }
+
+ memset(&s->above_segpred_ctx[col], 1, w4);
+ memset(&td->left_segpred_ctx[row7], 1, h4);
+ } else {
+ b->seg_id = vp89_rac_get_tree(td->c, ff_vp9_segmentation_tree,
+ s->s.h.segmentation.prob);
+
+ memset(&s->above_segpred_ctx[col], 0, w4);
+ memset(&td->left_segpred_ctx[row7], 0, h4);
+ }
+ if (s->s.h.segmentation.enabled &&
+ (s->s.h.segmentation.update_map || s->s.h.keyframe || s->s.h.intraonly)) {
+ setctx_2d(&s->s.frames[CUR_FRAME].segmentation_map[row * 8 * s->sb_cols + col],
+ bw4, bh4, 8 * s->sb_cols, b->seg_id);
+ }
+
+ b->skip = s->s.h.segmentation.enabled &&
+ s->s.h.segmentation.feat[b->seg_id].skip_enabled;
+ if (!b->skip) {
+ int c = td->left_skip_ctx[row7] + s->above_skip_ctx[col];
+ b->skip = vpx_rac_get_prob(td->c, s->prob.p.skip[c]);
+ td->counts.skip[c][b->skip]++;
+ }
+
+ if (s->s.h.keyframe || s->s.h.intraonly) {
+ b->intra = 1;
+ } else if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].ref_enabled) {
+ b->intra = !s->s.h.segmentation.feat[b->seg_id].ref_val;
+ } else {
+ int c, bit;
+
+ if (have_a && have_l) {
+ c = s->above_intra_ctx[col] + td->left_intra_ctx[row7];
+ c += (c == 2);
+ } else {
+ c = have_a ? 2 * s->above_intra_ctx[col] :
+ have_l ? 2 * td->left_intra_ctx[row7] : 0;
+ }
+ bit = vpx_rac_get_prob(td->c, s->prob.p.intra[c]);
+ td->counts.intra[c][bit]++;
+ b->intra = !bit;
+ }
+
+ if ((b->intra || !b->skip) && s->s.h.txfmmode == TX_SWITCHABLE) {
+ int c;
+ if (have_a) {
+ if (have_l) {
+ c = (s->above_skip_ctx[col] ? max_tx :
+ s->above_txfm_ctx[col]) +
+ (td->left_skip_ctx[row7] ? max_tx :
+ td->left_txfm_ctx[row7]) > max_tx;
+ } else {
+ c = s->above_skip_ctx[col] ? 1 :
+ (s->above_txfm_ctx[col] * 2 > max_tx);
+ }
+ } else if (have_l) {
+ c = td->left_skip_ctx[row7] ? 1 :
+ (td->left_txfm_ctx[row7] * 2 > max_tx);
+ } else {
+ c = 1;
+ }
+ switch (max_tx) {
+ case TX_32X32:
+ b->tx = vpx_rac_get_prob(td->c, s->prob.p.tx32p[c][0]);
+ if (b->tx) {
+ b->tx += vpx_rac_get_prob(td->c, s->prob.p.tx32p[c][1]);
+ if (b->tx == 2)
+ b->tx += vpx_rac_get_prob(td->c, s->prob.p.tx32p[c][2]);
+ }
+ td->counts.tx32p[c][b->tx]++;
+ break;
+ case TX_16X16:
+ b->tx = vpx_rac_get_prob(td->c, s->prob.p.tx16p[c][0]);
+ if (b->tx)
+ b->tx += vpx_rac_get_prob(td->c, s->prob.p.tx16p[c][1]);
+ td->counts.tx16p[c][b->tx]++;
+ break;
+ case TX_8X8:
+ b->tx = vpx_rac_get_prob(td->c, s->prob.p.tx8p[c]);
+ td->counts.tx8p[c][b->tx]++;
+ break;
+ case TX_4X4:
+ b->tx = TX_4X4;
+ break;
+ }
+ } else {
+ b->tx = FFMIN(max_tx, s->s.h.txfmmode);
+ }
+
+ if (s->s.h.keyframe || s->s.h.intraonly) {
+ uint8_t *a = &s->above_mode_ctx[col * 2];
+ uint8_t *l = &td->left_mode_ctx[(row7) << 1];
+
+ b->comp = 0;
+ if (b->bs > BS_8x8) {
+ // FIXME the memory storage intermediates here aren't really
+ // necessary, they're just there to make the code slightly
+ // simpler for now
+ b->mode[0] =
+ a[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[a[0]][l[0]]);
+ if (b->bs != BS_8x4) {
+ b->mode[1] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
+ l[0] =
+ a[1] = b->mode[1];
+ } else {
+ l[0] =
+ a[1] =
+ b->mode[1] = b->mode[0];
+ }
+ if (b->bs != BS_4x8) {
+ b->mode[2] =
+ a[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[a[0]][l[1]]);
+ if (b->bs != BS_8x4) {
+ b->mode[3] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
+ l[1] =
+ a[1] = b->mode[3];
+ } else {
+ l[1] =
+ a[1] =
+ b->mode[3] = b->mode[2];
+ }
+ } else {
+ b->mode[2] = b->mode[0];
+ l[1] =
+ a[1] =
+ b->mode[3] = b->mode[1];
+ }
+ } else {
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[*a][*l]);
+ b->mode[3] =
+ b->mode[2] =
+ b->mode[1] = b->mode[0];
+ // FIXME this can probably be optimized
+ memset(a, b->mode[0], ff_vp9_bwh_tab[0][b->bs][0]);
+ memset(l, b->mode[0], ff_vp9_bwh_tab[0][b->bs][1]);
+ }
+ b->uvmode = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_uvmode_probs[b->mode[3]]);
+ } else if (b->intra) {
+ b->comp = 0;
+ if (b->bs > BS_8x8) {
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[0]);
+ td->counts.y_mode[0][b->mode[0]]++;
+ if (b->bs != BS_8x4) {
+ b->mode[1] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[0]);
+ td->counts.y_mode[0][b->mode[1]]++;
+ } else {
+ b->mode[1] = b->mode[0];
+ }
+ if (b->bs != BS_4x8) {
+ b->mode[2] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[0]);
+ td->counts.y_mode[0][b->mode[2]]++;
+ if (b->bs != BS_8x4) {
+ b->mode[3] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[0]);
+ td->counts.y_mode[0][b->mode[3]]++;
+ } else {
+ b->mode[3] = b->mode[2];
+ }
+ } else {
+ b->mode[2] = b->mode[0];
+ b->mode[3] = b->mode[1];
+ }
+ } else {
+ static const uint8_t size_group[10] = {
+ 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
+ };
+ int sz = size_group[b->bs];
+
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[sz]);
+ b->mode[1] =
+ b->mode[2] =
+ b->mode[3] = b->mode[0];
+ td->counts.y_mode[sz][b->mode[3]]++;
+ }
+ b->uvmode = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.uv_mode[b->mode[3]]);
+ td->counts.uv_mode[b->mode[3]][b->uvmode]++;
+ } else {
+ static const uint8_t inter_mode_ctx_lut[14][14] = {
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
+ };
+
+ if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].ref_enabled) {
+ av_assert2(s->s.h.segmentation.feat[b->seg_id].ref_val != 0);
+ b->comp = 0;
+ b->ref[0] = s->s.h.segmentation.feat[b->seg_id].ref_val - 1;
+ } else {
+ // read comp_pred flag
+ if (s->s.h.comppredmode != PRED_SWITCHABLE) {
+ b->comp = s->s.h.comppredmode == PRED_COMPREF;
+ } else {
+ int c;
+
+ // FIXME add intra as ref=0xff (or -1) to make these easier?
+ if (have_a) {
+ if (have_l) {
+ if (s->above_comp_ctx[col] && td->left_comp_ctx[row7]) {
+ c = 4;
+ } else if (s->above_comp_ctx[col]) {
+ c = 2 + (td->left_intra_ctx[row7] ||
+ td->left_ref_ctx[row7] == s->s.h.fixcompref);
+ } else if (td->left_comp_ctx[row7]) {
+ c = 2 + (s->above_intra_ctx[col] ||
+ s->above_ref_ctx[col] == s->s.h.fixcompref);
+ } else {
+ c = (!s->above_intra_ctx[col] &&
+ s->above_ref_ctx[col] == s->s.h.fixcompref) ^
+ (!td->left_intra_ctx[row7] &&
+ td->left_ref_ctx[row & 7] == s->s.h.fixcompref);
+ }
+ } else {
+ c = s->above_comp_ctx[col] ? 3 :
+ (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->s.h.fixcompref);
+ }
+ } else if (have_l) {
+ c = td->left_comp_ctx[row7] ? 3 :
+ (!td->left_intra_ctx[row7] && td->left_ref_ctx[row7] == s->s.h.fixcompref);
+ } else {
+ c = 1;
+ }
+ b->comp = vpx_rac_get_prob(td->c, s->prob.p.comp[c]);
+ td->counts.comp[c][b->comp]++;
+ }
+
+ // read actual references
+ // FIXME probably cache a few variables here to prevent repetitive
+ // memory accesses below
+ if (b->comp) { /* two references */
+ int fix_idx = s->s.h.signbias[s->s.h.fixcompref], var_idx = !fix_idx, c, bit;
+
+ b->ref[fix_idx] = s->s.h.fixcompref;
+ // FIXME can this codeblob be replaced by some sort of LUT?
+ if (have_a) {
+ if (have_l) {
+ if (s->above_intra_ctx[col]) {
+ if (td->left_intra_ctx[row7]) {
+ c = 2;
+ } else {
+ c = 1 + 2 * (td->left_ref_ctx[row7] != s->s.h.varcompref[1]);
+ }
+ } else if (td->left_intra_ctx[row7]) {
+ c = 1 + 2 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
+ } else {
+ int refl = td->left_ref_ctx[row7], refa = s->above_ref_ctx[col];
+
+ if (refl == refa && refa == s->s.h.varcompref[1]) {
+ c = 0;
+ } else if (!td->left_comp_ctx[row7] && !s->above_comp_ctx[col]) {
+ if ((refa == s->s.h.fixcompref && refl == s->s.h.varcompref[0]) ||
+ (refl == s->s.h.fixcompref && refa == s->s.h.varcompref[0])) {
+ c = 4;
+ } else {
+ c = (refa == refl) ? 3 : 1;
+ }
+ } else if (!td->left_comp_ctx[row7]) {
+ if (refa == s->s.h.varcompref[1] && refl != s->s.h.varcompref[1]) {
+ c = 1;
+ } else {
+ c = (refl == s->s.h.varcompref[1] &&
+ refa != s->s.h.varcompref[1]) ? 2 : 4;
+ }
+ } else if (!s->above_comp_ctx[col]) {
+ if (refl == s->s.h.varcompref[1] && refa != s->s.h.varcompref[1]) {
+ c = 1;
+ } else {
+ c = (refa == s->s.h.varcompref[1] &&
+ refl != s->s.h.varcompref[1]) ? 2 : 4;
+ }
+ } else {
+ c = (refl == refa) ? 4 : 2;
+ }
+ }
+ } else {
+ if (s->above_intra_ctx[col]) {
+ c = 2;
+ } else if (s->above_comp_ctx[col]) {
+ c = 4 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
+ } else {
+ c = 3 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
+ }
+ }
+ } else if (have_l) {
+ if (td->left_intra_ctx[row7]) {
+ c = 2;
+ } else if (td->left_comp_ctx[row7]) {
+ c = 4 * (td->left_ref_ctx[row7] != s->s.h.varcompref[1]);
+ } else {
+ c = 3 * (td->left_ref_ctx[row7] != s->s.h.varcompref[1]);
+ }
+ } else {
+ c = 2;
+ }
+ bit = vpx_rac_get_prob(td->c, s->prob.p.comp_ref[c]);
+ b->ref[var_idx] = s->s.h.varcompref[bit];
+ td->counts.comp_ref[c][bit]++;
+ } else /* single reference */ {
+ int bit, c;
+
+ if (have_a && !s->above_intra_ctx[col]) {
+ if (have_l && !td->left_intra_ctx[row7]) {
+ if (td->left_comp_ctx[row7]) {
+ if (s->above_comp_ctx[col]) {
+ c = 1 + (!s->s.h.fixcompref || !td->left_ref_ctx[row7] ||
+ !s->above_ref_ctx[col]);
+ } else {
+ c = (3 * !s->above_ref_ctx[col]) +
+ (!s->s.h.fixcompref || !td->left_ref_ctx[row7]);
+ }
+ } else if (s->above_comp_ctx[col]) {
+ c = (3 * !td->left_ref_ctx[row7]) +
+ (!s->s.h.fixcompref || !s->above_ref_ctx[col]);
+ } else {
+ c = 2 * !td->left_ref_ctx[row7] + 2 * !s->above_ref_ctx[col];
+ }
+ } else if (s->above_intra_ctx[col]) {
+ c = 2;
+ } else if (s->above_comp_ctx[col]) {
+ c = 1 + (!s->s.h.fixcompref || !s->above_ref_ctx[col]);
+ } else {
+ c = 4 * (!s->above_ref_ctx[col]);
+ }
+ } else if (have_l && !td->left_intra_ctx[row7]) {
+ if (td->left_intra_ctx[row7]) {
+ c = 2;
+ } else if (td->left_comp_ctx[row7]) {
+ c = 1 + (!s->s.h.fixcompref || !td->left_ref_ctx[row7]);
+ } else {
+ c = 4 * (!td->left_ref_ctx[row7]);
+ }
+ } else {
+ c = 2;
+ }
+ bit = vpx_rac_get_prob(td->c, s->prob.p.single_ref[c][0]);
+ td->counts.single_ref[c][0][bit]++;
+ if (!bit) {
+ b->ref[0] = 0;
+ } else {
+ // FIXME can this codeblob be replaced by some sort of LUT?
+ if (have_a) {
+ if (have_l) {
+ if (td->left_intra_ctx[row7]) {
+ if (s->above_intra_ctx[col]) {
+ c = 2;
+ } else if (s->above_comp_ctx[col]) {
+ c = 1 + 2 * (s->s.h.fixcompref == 1 ||
+ s->above_ref_ctx[col] == 1);
+ } else if (!s->above_ref_ctx[col]) {
+ c = 3;
+ } else {
+ c = 4 * (s->above_ref_ctx[col] == 1);
+ }
+ } else if (s->above_intra_ctx[col]) {
+ if (td->left_intra_ctx[row7]) {
+ c = 2;
+ } else if (td->left_comp_ctx[row7]) {
+ c = 1 + 2 * (s->s.h.fixcompref == 1 ||
+ td->left_ref_ctx[row7] == 1);
+ } else if (!td->left_ref_ctx[row7]) {
+ c = 3;
+ } else {
+ c = 4 * (td->left_ref_ctx[row7] == 1);
+ }
+ } else if (s->above_comp_ctx[col]) {
+ if (td->left_comp_ctx[row7]) {
+ if (td->left_ref_ctx[row7] == s->above_ref_ctx[col]) {
+ c = 3 * (s->s.h.fixcompref == 1 ||
+ td->left_ref_ctx[row7] == 1);
+ } else {
+ c = 2;
+ }
+ } else if (!td->left_ref_ctx[row7]) {
+ c = 1 + 2 * (s->s.h.fixcompref == 1 ||
+ s->above_ref_ctx[col] == 1);
+ } else {
+ c = 3 * (td->left_ref_ctx[row7] == 1) +
+ (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1);
+ }
+ } else if (td->left_comp_ctx[row7]) {
+ if (!s->above_ref_ctx[col]) {
+ c = 1 + 2 * (s->s.h.fixcompref == 1 ||
+ td->left_ref_ctx[row7] == 1);
+ } else {
+ c = 3 * (s->above_ref_ctx[col] == 1) +
+ (s->s.h.fixcompref == 1 || td->left_ref_ctx[row7] == 1);
+ }
+ } else if (!s->above_ref_ctx[col]) {
+ if (!td->left_ref_ctx[row7]) {
+ c = 3;
+ } else {
+ c = 4 * (td->left_ref_ctx[row7] == 1);
+ }
+ } else if (!td->left_ref_ctx[row7]) {
+ c = 4 * (s->above_ref_ctx[col] == 1);
+ } else {
+ c = 2 * (td->left_ref_ctx[row7] == 1) +
+ 2 * (s->above_ref_ctx[col] == 1);
+ }
+ } else {
+ if (s->above_intra_ctx[col] ||
+ (!s->above_comp_ctx[col] && !s->above_ref_ctx[col])) {
+ c = 2;
+ } else if (s->above_comp_ctx[col]) {
+ c = 3 * (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1);
+ } else {
+ c = 4 * (s->above_ref_ctx[col] == 1);
+ }
+ }
+ } else if (have_l) {
+ if (td->left_intra_ctx[row7] ||
+ (!td->left_comp_ctx[row7] && !td->left_ref_ctx[row7])) {
+ c = 2;
+ } else if (td->left_comp_ctx[row7]) {
+ c = 3 * (s->s.h.fixcompref == 1 || td->left_ref_ctx[row7] == 1);
+ } else {
+ c = 4 * (td->left_ref_ctx[row7] == 1);
+ }
+ } else {
+ c = 2;
+ }
+ bit = vpx_rac_get_prob(td->c, s->prob.p.single_ref[c][1]);
+ td->counts.single_ref[c][1][bit]++;
+ b->ref[0] = 1 + bit;
+ }
+ }
+ }
+
+ if (b->bs <= BS_8x8) {
+ if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].skip_enabled) {
+ b->mode[0] =
+ b->mode[1] =
+ b->mode[2] =
+ b->mode[3] = ZEROMV;
+ } else {
+ static const uint8_t off[10] = {
+ 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
+ };
+
+ // FIXME this needs to use the LUT tables from find_ref_mvs
+ // because not all are -1,0/0,-1
+ int c = inter_mode_ctx_lut[s->above_mode_ctx[col + off[b->bs]]]
+ [td->left_mode_ctx[row7 + off[b->bs]]];
+
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
+ b->mode[1] =
+ b->mode[2] =
+ b->mode[3] = b->mode[0];
+ td->counts.mv_mode[c][b->mode[0] - 10]++;
+ }
+ }
+
+ if (s->s.h.filtermode == FILTER_SWITCHABLE) {
+ int c;
+
+ if (have_a && s->above_mode_ctx[col] >= NEARESTMV) {
+ if (have_l && td->left_mode_ctx[row7] >= NEARESTMV) {
+ c = s->above_filter_ctx[col] == td->left_filter_ctx[row7] ?
+ td->left_filter_ctx[row7] : 3;
+ } else {
+ c = s->above_filter_ctx[col];
+ }
+ } else if (have_l && td->left_mode_ctx[row7] >= NEARESTMV) {
+ c = td->left_filter_ctx[row7];
+ } else {
+ c = 3;
+ }
+
+ filter_id = vp89_rac_get_tree(td->c, ff_vp9_filter_tree,
+ s->prob.p.filter[c]);
+ td->counts.filter[c][filter_id]++;
+ b->filter = ff_vp9_filter_lut[filter_id];
+ } else {
+ b->filter = s->s.h.filtermode;
+ }
+
+ if (b->bs > BS_8x8) {
+ int c = inter_mode_ctx_lut[s->above_mode_ctx[col]][td->left_mode_ctx[row7]];
+
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
+ td->counts.mv_mode[c][b->mode[0] - 10]++;
+ ff_vp9_fill_mv(td, b->mv[0], b->mode[0], 0);
+
+ if (b->bs != BS_8x4) {
+ b->mode[1] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
+ td->counts.mv_mode[c][b->mode[1] - 10]++;
+ ff_vp9_fill_mv(td, b->mv[1], b->mode[1], 1);
+ } else {
+ b->mode[1] = b->mode[0];
+ AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
+ AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
+ }
+
+ if (b->bs != BS_4x8) {
+ b->mode[2] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
+ td->counts.mv_mode[c][b->mode[2] - 10]++;
+ ff_vp9_fill_mv(td, b->mv[2], b->mode[2], 2);
+
+ if (b->bs != BS_8x4) {
+ b->mode[3] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
+ td->counts.mv_mode[c][b->mode[3] - 10]++;
+ ff_vp9_fill_mv(td, b->mv[3], b->mode[3], 3);
+ } else {
+ b->mode[3] = b->mode[2];
+ AV_COPY32(&b->mv[3][0], &b->mv[2][0]);
+ AV_COPY32(&b->mv[3][1], &b->mv[2][1]);
+ }
+ } else {
+ b->mode[2] = b->mode[0];
+ AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
+ AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
+ b->mode[3] = b->mode[1];
+ AV_COPY32(&b->mv[3][0], &b->mv[1][0]);
+ AV_COPY32(&b->mv[3][1], &b->mv[1][1]);
+ }
+ } else {
+ ff_vp9_fill_mv(td, b->mv[0], b->mode[0], -1);
+ AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
+ AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
+ AV_COPY32(&b->mv[3][0], &b->mv[0][0]);
+ AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
+ AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
+ AV_COPY32(&b->mv[3][1], &b->mv[0][1]);
+ }
+
+ vref = b->ref[b->comp ? s->s.h.signbias[s->s.h.varcompref[0]] : 0];
+ }
+
+#if HAVE_FAST_64BIT
+#define SPLAT_CTX(var, val, n) \
+ switch (n) { \
+ case 1: var = val; break; \
+ case 2: AV_WN16A(&var, val * 0x0101); break; \
+ case 4: AV_WN32A(&var, val * 0x01010101); break; \
+ case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \
+ case 16: { \
+ uint64_t v64 = val * 0x0101010101010101ULL; \
+ AV_WN64A( &var, v64); \
+ AV_WN64A(&((uint8_t *) &var)[8], v64); \
+ break; \
+ } \
+ }
+#else
+#define SPLAT_CTX(var, val, n) \
+ switch (n) { \
+ case 1: var = val; break; \
+ case 2: AV_WN16A(&var, val * 0x0101); break; \
+ case 4: AV_WN32A(&var, val * 0x01010101); break; \
+ case 8: { \
+ uint32_t v32 = val * 0x01010101; \
+ AV_WN32A( &var, v32); \
+ AV_WN32A(&((uint8_t *) &var)[4], v32); \
+ break; \
+ } \
+ case 16: { \
+ uint32_t v32 = val * 0x01010101; \
+ AV_WN32A( &var, v32); \
+ AV_WN32A(&((uint8_t *) &var)[4], v32); \
+ AV_WN32A(&((uint8_t *) &var)[8], v32); \
+ AV_WN32A(&((uint8_t *) &var)[12], v32); \
+ break; \
+ } \
+ }
+#endif
+
+ switch (ff_vp9_bwh_tab[1][b->bs][0]) {
+#define SET_CTXS(perf, dir, off, n) \
+ do { \
+ SPLAT_CTX(perf->dir##_skip_ctx[off], b->skip, n); \
+ SPLAT_CTX(perf->dir##_txfm_ctx[off], b->tx, n); \
+ SPLAT_CTX(perf->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \
+ if (!s->s.h.keyframe && !s->s.h.intraonly) { \
+ SPLAT_CTX(perf->dir##_intra_ctx[off], b->intra, n); \
+ SPLAT_CTX(perf->dir##_comp_ctx[off], b->comp, n); \
+ SPLAT_CTX(perf->dir##_mode_ctx[off], b->mode[3], n); \
+ if (!b->intra) { \
+ SPLAT_CTX(perf->dir##_ref_ctx[off], vref, n); \
+ if (s->s.h.filtermode == FILTER_SWITCHABLE) { \
+ SPLAT_CTX(perf->dir##_filter_ctx[off], filter_id, n); \
+ } \
+ } \
+ } \
+ } while (0)
+ case 1: SET_CTXS(s, above, col, 1); break;
+ case 2: SET_CTXS(s, above, col, 2); break;
+ case 4: SET_CTXS(s, above, col, 4); break;
+ case 8: SET_CTXS(s, above, col, 8); break;
+ }
+ switch (ff_vp9_bwh_tab[1][b->bs][1]) {
+ case 1: SET_CTXS(td, left, row7, 1); break;
+ case 2: SET_CTXS(td, left, row7, 2); break;
+ case 4: SET_CTXS(td, left, row7, 4); break;
+ case 8: SET_CTXS(td, left, row7, 8); break;
+ }
+#undef SPLAT_CTX
+#undef SET_CTXS
+
+ if (!s->s.h.keyframe && !s->s.h.intraonly) {
+ if (b->bs > BS_8x8) {
+ int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
+
+ AV_COPY32(&td->left_mv_ctx[row7 * 2 + 0][0], &b->mv[1][0]);
+ AV_COPY32(&td->left_mv_ctx[row7 * 2 + 0][1], &b->mv[1][1]);
+ AV_WN32A(&td->left_mv_ctx[row7 * 2 + 1][0], mv0);
+ AV_WN32A(&td->left_mv_ctx[row7 * 2 + 1][1], mv1);
+ AV_COPY32(&s->above_mv_ctx[col * 2 + 0][0], &b->mv[2][0]);
+ AV_COPY32(&s->above_mv_ctx[col * 2 + 0][1], &b->mv[2][1]);
+ AV_WN32A(&s->above_mv_ctx[col * 2 + 1][0], mv0);
+ AV_WN32A(&s->above_mv_ctx[col * 2 + 1][1], mv1);
+ } else {
+ int n, mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
+
+ for (n = 0; n < w4 * 2; n++) {
+ AV_WN32A(&s->above_mv_ctx[col * 2 + n][0], mv0);
+ AV_WN32A(&s->above_mv_ctx[col * 2 + n][1], mv1);
+ }
+ for (n = 0; n < h4 * 2; n++) {
+ AV_WN32A(&td->left_mv_ctx[row7 * 2 + n][0], mv0);
+ AV_WN32A(&td->left_mv_ctx[row7 * 2 + n][1], mv1);
+ }
+ }
+ }
+
+ // FIXME kinda ugly
+ for (y = 0; y < h4; y++) {
+ int x, o = (row + y) * s->sb_cols * 8 + col;
+ VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[o];
+
+ if (b->intra) {
+ for (x = 0; x < w4; x++) {
+ mv[x].ref[0] =
+ mv[x].ref[1] = -1;
+ }
+ } else if (b->comp) {
+ for (x = 0; x < w4; x++) {
+ mv[x].ref[0] = b->ref[0];
+ mv[x].ref[1] = b->ref[1];
+ AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
+ AV_COPY32(&mv[x].mv[1], &b->mv[3][1]);
+ }
+ } else {
+ for (x = 0; x < w4; x++) {
+ mv[x].ref[0] = b->ref[0];
+ mv[x].ref[1] = -1;
+ AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
+ }
+ }
+ }
+}
+
+// FIXME merge cnt/eob arguments?
+static av_always_inline int
+decode_coeffs_b_generic(VPXRangeCoder *c, int16_t *coef, int n_coeffs,
+ int is_tx32x32, int is8bitsperpixel, int bpp, unsigned (*cnt)[6][3],
+ unsigned (*eob)[6][2], const uint8_t (*p)[6][11],
+ int nnz, const int16_t *scan, const int16_t (*nb)[2],
+ const int16_t *band_counts, const int16_t *qmul)
+{
+ int i = 0, band = 0, band_left = band_counts[band];
+ const uint8_t *tp = p[0][nnz];
+ uint8_t cache[1024];
+
+ do {
+ int val, rc;
+
+ val = vpx_rac_get_prob_branchy(c, tp[0]); // eob
+ eob[band][nnz][val]++;
+ if (!val)
+ break;
+
+skip_eob:
+ if (!vpx_rac_get_prob_branchy(c, tp[1])) { // zero
+ cnt[band][nnz][0]++;
+ if (!--band_left)
+ band_left = band_counts[++band];
+ cache[scan[i]] = 0;
+ nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
+ tp = p[band][nnz];
+ if (++i == n_coeffs)
+ break; //invalid input; blocks should end with EOB
+ goto skip_eob;
+ }
+
+ rc = scan[i];
+ if (!vpx_rac_get_prob_branchy(c, tp[2])) { // one
+ cnt[band][nnz][1]++;
+ val = 1;
+ cache[rc] = 1;
+ } else {
+ cnt[band][nnz][2]++;
+ if (!vpx_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4
+ if (!vpx_rac_get_prob_branchy(c, tp[4])) {
+ cache[rc] = val = 2;
+ } else {
+ val = 3 + vpx_rac_get_prob(c, tp[5]);
+ cache[rc] = 3;
+ }
+ } else if (!vpx_rac_get_prob_branchy(c, tp[6])) { // cat1/2
+ cache[rc] = 4;
+ if (!vpx_rac_get_prob_branchy(c, tp[7])) {
+ val = vpx_rac_get_prob(c, 159) + 5;
+ } else {
+ val = (vpx_rac_get_prob(c, 165) << 1) + 7;
+ val += vpx_rac_get_prob(c, 145);
+ }
+ } else { // cat 3-6
+ cache[rc] = 5;
+ if (!vpx_rac_get_prob_branchy(c, tp[8])) {
+ if (!vpx_rac_get_prob_branchy(c, tp[9])) {
+ val = 11 + (vpx_rac_get_prob(c, 173) << 2);
+ val += (vpx_rac_get_prob(c, 148) << 1);
+ val += vpx_rac_get_prob(c, 140);
+ } else {
+ val = 19 + (vpx_rac_get_prob(c, 176) << 3);
+ val += (vpx_rac_get_prob(c, 155) << 2);
+ val += (vpx_rac_get_prob(c, 140) << 1);
+ val += vpx_rac_get_prob(c, 135);
+ }
+ } else if (!vpx_rac_get_prob_branchy(c, tp[10])) {
+ val = (vpx_rac_get_prob(c, 180) << 4) + 35;
+ val += (vpx_rac_get_prob(c, 157) << 3);
+ val += (vpx_rac_get_prob(c, 141) << 2);
+ val += (vpx_rac_get_prob(c, 134) << 1);
+ val += vpx_rac_get_prob(c, 130);
+ } else {
+ val = 67;
+ if (!is8bitsperpixel) {
+ if (bpp == 12) {
+ val += vpx_rac_get_prob(c, 255) << 17;
+ val += vpx_rac_get_prob(c, 255) << 16;
+ }
+ val += (vpx_rac_get_prob(c, 255) << 15);
+ val += (vpx_rac_get_prob(c, 255) << 14);
+ }
+ val += (vpx_rac_get_prob(c, 254) << 13);
+ val += (vpx_rac_get_prob(c, 254) << 12);
+ val += (vpx_rac_get_prob(c, 254) << 11);
+ val += (vpx_rac_get_prob(c, 252) << 10);
+ val += (vpx_rac_get_prob(c, 249) << 9);
+ val += (vpx_rac_get_prob(c, 243) << 8);
+ val += (vpx_rac_get_prob(c, 230) << 7);
+ val += (vpx_rac_get_prob(c, 196) << 6);
+ val += (vpx_rac_get_prob(c, 177) << 5);
+ val += (vpx_rac_get_prob(c, 153) << 4);
+ val += (vpx_rac_get_prob(c, 140) << 3);
+ val += (vpx_rac_get_prob(c, 133) << 2);
+ val += (vpx_rac_get_prob(c, 130) << 1);
+ val += vpx_rac_get_prob(c, 129);
+ }
+ }
+ }
+#define STORE_COEF(c, i, v) do { \
+ if (is8bitsperpixel) { \
+ c[i] = v; \
+ } else { \
+ AV_WN32A(&c[i * 2], v); \
+ } \
+} while (0)
+ if (!--band_left)
+ band_left = band_counts[++band];
+ if (is_tx32x32)
+ STORE_COEF(coef, rc, (int)((vp89_rac_get(c) ? -val : val) * (unsigned)qmul[!!i]) / 2);
+ else
+ STORE_COEF(coef, rc, (vp89_rac_get(c) ? -val : val) * (unsigned)qmul[!!i]);
+ nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
+ tp = p[band][nnz];
+ } while (++i < n_coeffs);
+
+ return i;
+}
+
+static int decode_coeffs_b_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
+ unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
+ const uint8_t (*p)[6][11], int nnz, const int16_t *scan,
+ const int16_t (*nb)[2], const int16_t *band_counts,
+ const int16_t *qmul)
+{
+ return decode_coeffs_b_generic(td->c, coef, n_coeffs, 0, 1, 8, cnt, eob, p,
+ nnz, scan, nb, band_counts, qmul);
+}
+
+static int decode_coeffs_b32_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
+ unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
+ const uint8_t (*p)[6][11], int nnz, const int16_t *scan,
+ const int16_t (*nb)[2], const int16_t *band_counts,
+ const int16_t *qmul)
+{
+ return decode_coeffs_b_generic(td->c, coef, n_coeffs, 1, 1, 8, cnt, eob, p,
+ nnz, scan, nb, band_counts, qmul);
+}
+
+static int decode_coeffs_b_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
+ unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
+ const uint8_t (*p)[6][11], int nnz, const int16_t *scan,
+ const int16_t (*nb)[2], const int16_t *band_counts,
+ const int16_t *qmul)
+{
+ return decode_coeffs_b_generic(td->c, coef, n_coeffs, 0, 0, td->s->s.h.bpp, cnt, eob, p,
+ nnz, scan, nb, band_counts, qmul);
+}
+
+static int decode_coeffs_b32_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
+ unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
+ const uint8_t (*p)[6][11], int nnz, const int16_t *scan,
+ const int16_t (*nb)[2], const int16_t *band_counts,
+ const int16_t *qmul)
+{
+ return decode_coeffs_b_generic(td->c, coef, n_coeffs, 1, 0, td->s->s.h.bpp, cnt, eob, p,
+ nnz, scan, nb, band_counts, qmul);
+}
+
+static av_always_inline int decode_coeffs(VP9TileData *td, int is8bitsperpixel)
+{
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+ int row = td->row, col = td->col;
+ const uint8_t (*p)[6][11] = s->prob.coef[b->tx][0 /* y */][!b->intra];
+ unsigned (*c)[6][3] = td->counts.coef[b->tx][0 /* y */][!b->intra];
+ unsigned (*e)[6][2] = td->counts.eob[b->tx][0 /* y */][!b->intra];
+ int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1;
+ int end_x = FFMIN(2 * (s->cols - col), w4);
+ int end_y = FFMIN(2 * (s->rows - row), h4);
+ int n, pl, x, y, ret;
+ const int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul;
+ int tx = 4 * s->s.h.lossless + b->tx;
+ const int16_t * const *yscans = ff_vp9_scans[tx];
+ const int16_t (* const * ynbs)[2] = ff_vp9_scans_nb[tx];
+ const int16_t *uvscan = ff_vp9_scans[b->uvtx][DCT_DCT];
+ const int16_t (*uvnb)[2] = ff_vp9_scans_nb[b->uvtx][DCT_DCT];
+ uint8_t *a = &s->above_y_nnz_ctx[col * 2];
+ uint8_t *l = &td->left_y_nnz_ctx[(row & 7) << 1];
+ static const int16_t band_counts[4][8] = {
+ { 1, 2, 3, 4, 3, 16 - 13 },
+ { 1, 2, 3, 4, 11, 64 - 21 },
+ { 1, 2, 3, 4, 11, 256 - 21 },
+ { 1, 2, 3, 4, 11, 1024 - 21 },
+ };
+ const int16_t *y_band_counts = band_counts[b->tx];
+ const int16_t *uv_band_counts = band_counts[b->uvtx];
+ int bytesperpixel = is8bitsperpixel ? 1 : 2;
+ int total_coeff = 0;
+
+#define MERGE(la, end, step, rd) \
+ for (n = 0; n < end; n += step) \
+ la[n] = !!rd(&la[n])
+#define MERGE_CTX(step, rd) \
+ do { \
+ MERGE(l, end_y, step, rd); \
+ MERGE(a, end_x, step, rd); \
+ } while (0)
+
+#define DECODE_Y_COEF_LOOP(step, mode_index, v) \
+ for (n = 0, y = 0; y < end_y; y += step) { \
+ for (x = 0; x < end_x; x += step, n += step * step) { \
+ enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \
+ ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
+ (td, td->block + 16 * n * bytesperpixel, 16 * step * step, \
+ c, e, p, a[x] + l[y], yscans[txtp], \
+ ynbs[txtp], y_band_counts, qmul[0]); \
+ a[x] = l[y] = !!ret; \
+ total_coeff |= !!ret; \
+ if (step >= 4) { \
+ AV_WN16A(&td->eob[n], ret); \
+ } else { \
+ td->eob[n] = ret; \
+ } \
+ } \
+ }
+
+#define SPLAT(la, end, step, cond) \
+ if (step == 2) { \
+ for (n = 1; n < end; n += step) \
+ la[n] = la[n - 1]; \
+ } else if (step == 4) { \
+ if (cond) { \
+ for (n = 0; n < end; n += step) \
+ AV_WN32A(&la[n], la[n] * 0x01010101); \
+ } else { \
+ for (n = 0; n < end; n += step) \
+ memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \
+ } \
+ } else /* step == 8 */ { \
+ if (cond) { \
+ if (HAVE_FAST_64BIT) { \
+ for (n = 0; n < end; n += step) \
+ AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \
+ } else { \
+ for (n = 0; n < end; n += step) { \
+ uint32_t v32 = la[n] * 0x01010101; \
+ AV_WN32A(&la[n], v32); \
+ AV_WN32A(&la[n + 4], v32); \
+ } \
+ } \
+ } else { \
+ for (n = 0; n < end; n += step) \
+ memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \
+ } \
+ }
+#define SPLAT_CTX(step) \
+ do { \
+ SPLAT(a, end_x, step, end_x == w4); \
+ SPLAT(l, end_y, step, end_y == h4); \
+ } while (0)
+
+ /* y tokens */
+ switch (b->tx) {
+ case TX_4X4:
+ DECODE_Y_COEF_LOOP(1, b->bs > BS_8x8 ? n : 0,);
+ break;
+ case TX_8X8:
+ MERGE_CTX(2, AV_RN16A);
+ DECODE_Y_COEF_LOOP(2, 0,);
+ SPLAT_CTX(2);
+ break;
+ case TX_16X16:
+ MERGE_CTX(4, AV_RN32A);
+ DECODE_Y_COEF_LOOP(4, 0,);
+ SPLAT_CTX(4);
+ break;
+ case TX_32X32:
+ MERGE_CTX(8, AV_RN64A);
+ DECODE_Y_COEF_LOOP(8, 0, 32);
+ SPLAT_CTX(8);
+ break;
+ }
+
+#define DECODE_UV_COEF_LOOP(step, v) \
+ for (n = 0, y = 0; y < end_y; y += step) { \
+ for (x = 0; x < end_x; x += step, n += step * step) { \
+ ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
+ (td, td->uvblock[pl] + 16 * n * bytesperpixel, \
+ 16 * step * step, c, e, p, a[x] + l[y], \
+ uvscan, uvnb, uv_band_counts, qmul[1]); \
+ a[x] = l[y] = !!ret; \
+ total_coeff |= !!ret; \
+ if (step >= 4) { \
+ AV_WN16A(&td->uveob[pl][n], ret); \
+ } else { \
+ td->uveob[pl][n] = ret; \
+ } \
+ } \
+ }
+
+ p = s->prob.coef[b->uvtx][1 /* uv */][!b->intra];
+ c = td->counts.coef[b->uvtx][1 /* uv */][!b->intra];
+ e = td->counts.eob[b->uvtx][1 /* uv */][!b->intra];
+ w4 >>= s->ss_h;
+ end_x >>= s->ss_h;
+ h4 >>= s->ss_v;
+ end_y >>= s->ss_v;
+ for (pl = 0; pl < 2; pl++) {
+ a = &s->above_uv_nnz_ctx[pl][col << !s->ss_h];
+ l = &td->left_uv_nnz_ctx[pl][(row & 7) << !s->ss_v];
+ switch (b->uvtx) {
+ case TX_4X4:
+ DECODE_UV_COEF_LOOP(1,);
+ break;
+ case TX_8X8:
+ MERGE_CTX(2, AV_RN16A);
+ DECODE_UV_COEF_LOOP(2,);
+ SPLAT_CTX(2);
+ break;
+ case TX_16X16:
+ MERGE_CTX(4, AV_RN32A);
+ DECODE_UV_COEF_LOOP(4,);
+ SPLAT_CTX(4);
+ break;
+ case TX_32X32:
+ MERGE_CTX(8, AV_RN64A);
+ DECODE_UV_COEF_LOOP(8, 32);
+ SPLAT_CTX(8);
+ break;
+ }
+ }
+
+ return total_coeff;
+}
+
+static int decode_coeffs_8bpp(VP9TileData *td)
+{
+ return decode_coeffs(td, 1);
+}
+
+static int decode_coeffs_16bpp(VP9TileData *td)
+{
+ return decode_coeffs(td, 0);
+}
+
+static av_always_inline void mask_edges(uint8_t (*mask)[8][4], int ss_h, int ss_v,
+ int row_and_7, int col_and_7,
+ int w, int h, int col_end, int row_end,
+ enum TxfmMode tx, int skip_inter)
+{
+ static const unsigned wide_filter_col_mask[2] = { 0x11, 0x01 };
+ static const unsigned wide_filter_row_mask[2] = { 0x03, 0x07 };
+
+ // FIXME I'm pretty sure all loops can be replaced by a single LUT if
+ // we make VP9Filter.mask uint64_t (i.e. row/col all single variable)
+ // and make the LUT 5-indexed (bl, bp, is_uv, tx and row/col), and then
+ // use row_and_7/col_and_7 as shifts (1*col_and_7+8*row_and_7)
+
+ // the intended behaviour of the vp9 loopfilter is to work on 8-pixel
+ // edges. This means that for UV, we work on two subsampled blocks at
+ // a time, and we only use the topleft block's mode information to set
+ // things like block strength. Thus, for any block size smaller than
+ // 16x16, ignore the odd portion of the block.
+ if (tx == TX_4X4 && (ss_v | ss_h)) {
+ if (h == ss_v) {
+ if (row_and_7 & 1)
+ return;
+ if (!row_end)
+ h += 1;
+ }
+ if (w == ss_h) {
+ if (col_and_7 & 1)
+ return;
+ if (!col_end)
+ w += 1;
+ }
+ }
+
+ if (tx == TX_4X4 && !skip_inter) {
+ int t = 1 << col_and_7, m_col = (t << w) - t, y;
+ // on 32-px edges, use the 8-px wide loopfilter; else, use 4-px wide
+ int m_row_8 = m_col & wide_filter_col_mask[ss_h], m_row_4 = m_col - m_row_8;
+
+ for (y = row_and_7; y < h + row_and_7; y++) {
+ int col_mask_id = 2 - !(y & wide_filter_row_mask[ss_v]);
+
+ mask[0][y][1] |= m_row_8;
+ mask[0][y][2] |= m_row_4;
+ // for odd lines, if the odd col is not being filtered,
+ // skip odd row also:
+ // .---. <-- a
+ // | |
+ // |___| <-- b
+ // ^ ^
+ // c d
+ //
+ // if a/c are even row/col and b/d are odd, and d is skipped,
+ // e.g. right edge of size-66x66.webm, then skip b also (bug)
+ if ((ss_h & ss_v) && (col_end & 1) && (y & 1)) {
+ mask[1][y][col_mask_id] |= (t << (w - 1)) - t;
+ } else {
+ mask[1][y][col_mask_id] |= m_col;
+ }
+ if (!ss_h)
+ mask[0][y][3] |= m_col;
+ if (!ss_v) {
+ if (ss_h && (col_end & 1))
+ mask[1][y][3] |= (t << (w - 1)) - t;
+ else
+ mask[1][y][3] |= m_col;
+ }
+ }
+ } else {
+ int y, t = 1 << col_and_7, m_col = (t << w) - t;
+
+ if (!skip_inter) {
+ int mask_id = (tx == TX_8X8);
+ int l2 = tx + ss_h - 1, step1d;
+ static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
+ int m_row = m_col & masks[l2];
+
+ // at odd UV col/row edges tx16/tx32 loopfilter edges, force
+ // 8wd loopfilter to prevent going off the visible edge.
+ if (ss_h && tx > TX_8X8 && (w ^ (w - 1)) == 1) {
+ int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
+ int m_row_8 = m_row - m_row_16;
+
+ for (y = row_and_7; y < h + row_and_7; y++) {
+ mask[0][y][0] |= m_row_16;
+ mask[0][y][1] |= m_row_8;
+ }
+ } else {
+ for (y = row_and_7; y < h + row_and_7; y++)
+ mask[0][y][mask_id] |= m_row;
+ }
+
+ l2 = tx + ss_v - 1;
+ step1d = 1 << l2;
+ if (ss_v && tx > TX_8X8 && (h ^ (h - 1)) == 1) {
+ for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
+ mask[1][y][0] |= m_col;
+ if (y - row_and_7 == h - 1)
+ mask[1][y][1] |= m_col;
+ } else {
+ for (y = row_and_7; y < h + row_and_7; y += step1d)
+ mask[1][y][mask_id] |= m_col;
+ }
+ } else if (tx != TX_4X4) {
+ int mask_id;
+
+ mask_id = (tx == TX_8X8) || (h == ss_v);
+ mask[1][row_and_7][mask_id] |= m_col;
+ mask_id = (tx == TX_8X8) || (w == ss_h);
+ for (y = row_and_7; y < h + row_and_7; y++)
+ mask[0][y][mask_id] |= t;
+ } else {
+ int t8 = t & wide_filter_col_mask[ss_h], t4 = t - t8;
+
+ for (y = row_and_7; y < h + row_and_7; y++) {
+ mask[0][y][2] |= t4;
+ mask[0][y][1] |= t8;
+ }
+ mask[1][row_and_7][2 - !(row_and_7 & wide_filter_row_mask[ss_v])] |= m_col;
+ }
+ }
+}
+
+void ff_vp9_decode_block(VP9TileData *td, int row, int col,
+ VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
+ enum BlockLevel bl, enum BlockPartition bp)
+{
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+ enum BlockSize bs = bl * 3 + bp;
+ int bytesperpixel = s->bytesperpixel;
+ int w4 = ff_vp9_bwh_tab[1][bs][0], h4 = ff_vp9_bwh_tab[1][bs][1], lvl;
+ int emu[2];
+ AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
+
+ td->row = row;
+ td->row7 = row & 7;
+ td->col = col;
+ td->col7 = col & 7;
+
+ td->min_mv.x = -(128 + col * 64);
+ td->min_mv.y = -(128 + row * 64);
+ td->max_mv.x = 128 + (s->cols - col - w4) * 64;
+ td->max_mv.y = 128 + (s->rows - row - h4) * 64;
+
+ if (s->pass < 2) {
+ b->bs = bs;
+ b->bl = bl;
+ b->bp = bp;
+ decode_mode(td);
+ b->uvtx = b->tx - ((s->ss_h && w4 * 2 == (1 << b->tx)) ||
+ (s->ss_v && h4 * 2 == (1 << b->tx)));
+
+ if (td->block_structure) {
+ td->block_structure[td->nb_block_structure].row = row;
+ td->block_structure[td->nb_block_structure].col = col;
+ td->block_structure[td->nb_block_structure].block_size_idx_x = av_log2(w4);
+ td->block_structure[td->nb_block_structure].block_size_idx_y = av_log2(h4);
+ td->nb_block_structure++;
+ }
+
+ if (!b->skip) {
+ int has_coeffs;
+
+ if (bytesperpixel == 1) {
+ has_coeffs = decode_coeffs_8bpp(td);
+ } else {
+ has_coeffs = decode_coeffs_16bpp(td);
+ }
+ if (!has_coeffs && b->bs <= BS_8x8 && !b->intra) {
+ b->skip = 1;
+ memset(&s->above_skip_ctx[col], 1, w4);
+ memset(&td->left_skip_ctx[td->row7], 1, h4);
+ }
+ } else {
+ int row7 = td->row7;
+
+#define SPLAT_ZERO_CTX(v, n) \
+ switch (n) { \
+ case 1: v = 0; break; \
+ case 2: AV_ZERO16(&v); break; \
+ case 4: AV_ZERO32(&v); break; \
+ case 8: AV_ZERO64(&v); break; \
+ case 16: AV_ZERO128(&v); break; \
+ }
+#define SPLAT_ZERO_YUV(dir, var, off, n, dir2) \
+ do { \
+ SPLAT_ZERO_CTX(dir##_y_##var[off * 2], n * 2); \
+ if (s->ss_##dir2) { \
+ SPLAT_ZERO_CTX(dir##_uv_##var[0][off], n); \
+ SPLAT_ZERO_CTX(dir##_uv_##var[1][off], n); \
+ } else { \
+ SPLAT_ZERO_CTX(dir##_uv_##var[0][off * 2], n * 2); \
+ SPLAT_ZERO_CTX(dir##_uv_##var[1][off * 2], n * 2); \
+ } \
+ } while (0)
+
+ switch (w4) {
+ case 1: SPLAT_ZERO_YUV(s->above, nnz_ctx, col, 1, h); break;
+ case 2: SPLAT_ZERO_YUV(s->above, nnz_ctx, col, 2, h); break;
+ case 4: SPLAT_ZERO_YUV(s->above, nnz_ctx, col, 4, h); break;
+ case 8: SPLAT_ZERO_YUV(s->above, nnz_ctx, col, 8, h); break;
+ }
+ switch (h4) {
+ case 1: SPLAT_ZERO_YUV(td->left, nnz_ctx, row7, 1, v); break;
+ case 2: SPLAT_ZERO_YUV(td->left, nnz_ctx, row7, 2, v); break;
+ case 4: SPLAT_ZERO_YUV(td->left, nnz_ctx, row7, 4, v); break;
+ case 8: SPLAT_ZERO_YUV(td->left, nnz_ctx, row7, 8, v); break;
+ }
+ }
+
+ if (s->pass == 1) {
+ s->td[0].b++;
+ s->td[0].block += w4 * h4 * 64 * bytesperpixel;
+ s->td[0].uvblock[0] += w4 * h4 * 64 * bytesperpixel >> (s->ss_h + s->ss_v);
+ s->td[0].uvblock[1] += w4 * h4 * 64 * bytesperpixel >> (s->ss_h + s->ss_v);
+ s->td[0].eob += 4 * w4 * h4;
+ s->td[0].uveob[0] += 4 * w4 * h4 >> (s->ss_h + s->ss_v);
+ s->td[0].uveob[1] += 4 * w4 * h4 >> (s->ss_h + s->ss_v);
+
+ return;
+ }
+ }
+
+ // emulated overhangs if the stride of the target buffer can't hold. This
+ // makes it possible to support emu-edge and so on even if we have large block
+ // overhangs
+ emu[0] = (col + w4) * 8 * bytesperpixel > f->linesize[0] ||
+ (row + h4) > s->rows;
+ emu[1] = ((col + w4) * 8 >> s->ss_h) * bytesperpixel > f->linesize[1] ||
+ (row + h4) > s->rows;
+ if (emu[0]) {
+ td->dst[0] = td->tmp_y;
+ td->y_stride = 128;
+ } else {
+ td->dst[0] = f->data[0] + yoff;
+ td->y_stride = f->linesize[0];
+ }
+ if (emu[1]) {
+ td->dst[1] = td->tmp_uv[0];
+ td->dst[2] = td->tmp_uv[1];
+ td->uv_stride = 128;
+ } else {
+ td->dst[1] = f->data[1] + uvoff;
+ td->dst[2] = f->data[2] + uvoff;
+ td->uv_stride = f->linesize[1];
+ }
+ if (b->intra) {
+ if (s->s.h.bpp > 8) {
+ ff_vp9_intra_recon_16bpp(td, yoff, uvoff);
+ } else {
+ ff_vp9_intra_recon_8bpp(td, yoff, uvoff);
+ }
+ } else {
+ if (s->s.h.bpp > 8) {
+ ff_vp9_inter_recon_16bpp(td);
+ } else {
+ ff_vp9_inter_recon_8bpp(td);
+ }
+ }
+ if (emu[0]) {
+ int w = FFMIN(s->cols - col, w4) * 8, h = FFMIN(s->rows - row, h4) * 8, n, o = 0;
+
+ for (n = 0; o < w; n++) {
+ int bw = 64 >> n;
+
+ av_assert2(n <= 4);
+ if (w & bw) {
+ s->dsp.mc[n][0][0][0][0](f->data[0] + yoff + o * bytesperpixel, f->linesize[0],
+ td->tmp_y + o * bytesperpixel, 128, h, 0, 0);
+ o += bw;
+ }
+ }
+ }
+ if (emu[1]) {
+ int w = FFMIN(s->cols - col, w4) * 8 >> s->ss_h;
+ int h = FFMIN(s->rows - row, h4) * 8 >> s->ss_v, n, o = 0;
+
+ for (n = s->ss_h; o < w; n++) {
+ int bw = 64 >> n;
+
+ av_assert2(n <= 4);
+ if (w & bw) {
+ s->dsp.mc[n][0][0][0][0](f->data[1] + uvoff + o * bytesperpixel, f->linesize[1],
+ td->tmp_uv[0] + o * bytesperpixel, 128, h, 0, 0);
+ s->dsp.mc[n][0][0][0][0](f->data[2] + uvoff + o * bytesperpixel, f->linesize[2],
+ td->tmp_uv[1] + o * bytesperpixel, 128, h, 0, 0);
+ o += bw;
+ }
+ }
+ }
+
+ // pick filter level and find edges to apply filter to
+ if (s->s.h.filter.level &&
+ (lvl = s->s.h.segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1]
+ [b->mode[3] != ZEROMV]) > 0) {
+ int x_end = FFMIN(s->cols - col, w4), y_end = FFMIN(s->rows - row, h4);
+ int skip_inter = !b->intra && b->skip, col7 = td->col7, row7 = td->row7;
+
+ setctx_2d(&lflvl->level[row7 * 8 + col7], w4, h4, 8, lvl);
+ mask_edges(lflvl->mask[0], 0, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
+ if (s->ss_h || s->ss_v)
+ mask_edges(lflvl->mask[1], s->ss_h, s->ss_v, row7, col7, x_end, y_end,
+ s->cols & 1 && col + w4 >= s->cols ? s->cols & 7 : 0,
+ s->rows & 1 && row + h4 >= s->rows ? s->rows & 7 : 0,
+ b->uvtx, skip_inter);
+ }
+
+ if (s->pass == 2) {
+ s->td[0].b++;
+ s->td[0].block += w4 * h4 * 64 * bytesperpixel;
+ s->td[0].uvblock[0] += w4 * h4 * 64 * bytesperpixel >> (s->ss_v + s->ss_h);
+ s->td[0].uvblock[1] += w4 * h4 * 64 * bytesperpixel >> (s->ss_v + s->ss_h);
+ s->td[0].eob += 4 * w4 * h4;
+ s->td[0].uveob[0] += 4 * w4 * h4 >> (s->ss_v + s->ss_h);
+ s->td[0].uveob[1] += 4 * w4 * h4 >> (s->ss_v + s->ss_h);
+ }
+}
diff --git a/media/ffvpx/libavcodec/vp9data.c b/media/ffvpx/libavcodec/vp9data.c
new file mode 100644
index 0000000000..7af8a97b1e
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9data.c
@@ -0,0 +1,2247 @@
+/*
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vp9.h"
+#include "vp9data.h"
+
+const uint8_t ff_vp9_bwh_tab[2][N_BS_SIZES][2] = {
+ {
+ { 16, 16 }, { 16, 8 }, { 8, 16 }, { 8, 8 }, { 8, 4 }, { 4, 8 },
+ { 4, 4 }, { 4, 2 }, { 2, 4 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 },
+ }, {
+ { 8, 8 }, { 8, 4 }, { 4, 8 }, { 4, 4 }, { 4, 2 }, { 2, 4 },
+ { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
+ }
+};
+
+const int8_t ff_vp9_partition_tree[3][2] = {
+ { -PARTITION_NONE, 1 }, // '0'
+ { -PARTITION_H, 2 }, // '10'
+ { -PARTITION_V, -PARTITION_SPLIT }, // '110', '111'
+};
+
+const uint8_t ff_vp9_default_kf_partition_probs[4][4][3] = {
+ { /* 64x64 -> 32x32 */
+ { 174, 35, 49 } /* a/l both not split */,
+ { 68, 11, 27 } /* a split, l not split */,
+ { 57, 15, 9 } /* l split, a not split */,
+ { 12, 3, 3 } /* a/l both split */
+ }, { /* 32x32 -> 16x16 */
+ { 150, 40, 39 } /* a/l both not split */,
+ { 78, 12, 26 } /* a split, l not split */,
+ { 67, 33, 11 } /* l split, a not split */,
+ { 24, 7, 5 } /* a/l both split */,
+ }, { /* 16x16 -> 8x8 */
+ { 149, 53, 53 } /* a/l both not split */,
+ { 94, 20, 48 } /* a split, l not split */,
+ { 83, 53, 24 } /* l split, a not split */,
+ { 52, 18, 18 } /* a/l both split */,
+ }, { /* 8x8 -> 4x4 */
+ { 158, 97, 94 } /* a/l both not split */,
+ { 93, 24, 99 } /* a split, l not split */,
+ { 85, 119, 44 } /* l split, a not split */,
+ { 62, 59, 67 } /* a/l both split */,
+ },
+};
+
+const int8_t ff_vp9_segmentation_tree[7][2] = {
+ { 1, 2 },
+ { 3, 4 },
+ { 5, 6 },
+ { -0, -1 }, // '00x'
+ { -2, -3 }, // '01x'
+ { -4, -5 }, // '10x'
+ { -6, -7 }, // '11x'
+};
+
+const int8_t ff_vp9_intramode_tree[9][2] = {
+ { -DC_PRED, 1 }, // '0'
+ { -TM_VP8_PRED, 2 }, // '10'
+ { -VERT_PRED, 3 }, // '110'
+ { 4, 6 },
+ { -HOR_PRED, 5 }, // '11100'
+ { -DIAG_DOWN_RIGHT_PRED, -VERT_RIGHT_PRED }, // '11101x'
+ { -DIAG_DOWN_LEFT_PRED, 7 }, // '11110'
+ { -VERT_LEFT_PRED, 8 }, // '111110'
+ { -HOR_DOWN_PRED, -HOR_UP_PRED }, // '111111x'
+};
+
+const uint8_t ff_vp9_default_kf_ymode_probs[10][10][9] = {
+ { /* above = v */
+ { 43, 46, 168, 134, 107, 128, 69, 142, 92 } /* left = v */,
+ { 44, 29, 68, 159, 201, 177, 50, 57, 77 } /* left = h */,
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96 } /* left = dc */,
+ { 58, 38, 76, 114, 97, 172, 78, 133, 92 } /* left = d45 */,
+ { 46, 41, 76, 140, 63, 184, 69, 112, 57 } /* left = d135 */,
+ { 38, 32, 85, 140, 46, 112, 54, 151, 133 } /* left = d117 */,
+ { 39, 27, 61, 131, 110, 175, 44, 75, 136 } /* left = d153 */,
+ { 47, 35, 80, 100, 74, 143, 64, 163, 74 } /* left = d63 */,
+ { 52, 30, 74, 113, 130, 175, 51, 64, 58 } /* left = d27 */,
+ { 36, 61, 116, 114, 128, 162, 80, 125, 82 } /* left = tm */
+ }, { /* above = h */
+ { 55, 44, 68, 166, 179, 192, 57, 57, 108 } /* left = v */,
+ { 42, 26, 11, 199, 241, 228, 23, 15, 85 } /* left = h */,
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105 } /* left = dc */,
+ { 68, 42, 19, 131, 160, 199, 55, 52, 83 } /* left = d45 */,
+ { 58, 50, 25, 139, 115, 232, 39, 52, 118 } /* left = d135 */,
+ { 50, 35, 33, 153, 104, 162, 64, 59, 131 } /* left = d117 */,
+ { 44, 24, 16, 150, 177, 202, 33, 19, 156 } /* left = d153 */,
+ { 53, 49, 21, 110, 116, 168, 59, 80, 76 } /* left = d63 */,
+ { 55, 27, 12, 153, 203, 218, 26, 27, 49 } /* left = d27 */,
+ { 38, 72, 19, 168, 203, 212, 50, 50, 107 } /* left = tm */
+ }, { /* above = dc */
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100 } /* left = v */,
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100 } /* left = h */,
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91 } /* left = dc */,
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94 } /* left = d45 */,
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105 } /* left = d135 */,
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151 } /* left = d117 */,
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171 } /* left = d153 */,
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102 } /* left = d63 */,
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53 } /* left = d27 */,
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119 } /* left = tm */
+ }, { /* above = d45 */
+ { 59, 38, 83, 112, 103, 162, 98, 136, 90 } /* left = v */,
+ { 62, 30, 23, 158, 200, 207, 59, 57, 50 } /* left = h */,
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93 } /* left = dc */,
+ { 67, 30, 29, 84, 86, 191, 102, 91, 59 } /* left = d45 */,
+ { 60, 32, 33, 112, 71, 220, 64, 89, 104 } /* left = d135 */,
+ { 53, 26, 34, 130, 56, 149, 84, 120, 103 } /* left = d117 */,
+ { 53, 21, 23, 133, 109, 210, 56, 77, 172 } /* left = d153 */,
+ { 61, 29, 29, 93, 97, 165, 83, 175, 162 } /* left = d63 */,
+ { 77, 19, 29, 112, 142, 228, 55, 66, 36 } /* left = d27 */,
+ { 47, 47, 43, 114, 137, 181, 100, 99, 95 } /* left = tm */
+ }, { /* above = d135 */
+ { 53, 40, 55, 139, 69, 183, 61, 80, 110 } /* left = v */,
+ { 40, 29, 19, 161, 180, 207, 43, 24, 91 } /* left = h */,
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101 } /* left = dc */,
+ { 60, 34, 19, 105, 61, 198, 53, 64, 89 } /* left = d45 */,
+ { 52, 31, 22, 158, 40, 209, 58, 62, 89 } /* left = d135 */,
+ { 44, 31, 29, 147, 46, 158, 56, 102, 198 } /* left = d117 */,
+ { 35, 19, 12, 135, 87, 209, 41, 45, 167 } /* left = d153 */,
+ { 51, 38, 25, 113, 58, 164, 70, 93, 97 } /* left = d63 */,
+ { 55, 25, 21, 118, 95, 215, 38, 39, 66 } /* left = d27 */,
+ { 47, 54, 34, 146, 108, 203, 72, 103, 151 } /* left = tm */
+ }, { /* above = d117 */
+ { 46, 27, 80, 150, 55, 124, 55, 121, 135 } /* left = v */,
+ { 36, 23, 27, 165, 149, 166, 54, 64, 118 } /* left = h */,
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133 } /* left = dc */,
+ { 53, 21, 36, 131, 63, 163, 60, 109, 81 } /* left = d45 */,
+ { 40, 26, 35, 154, 40, 185, 51, 97, 123 } /* left = d135 */,
+ { 35, 19, 34, 179, 19, 97, 48, 129, 124 } /* left = d117 */,
+ { 36, 20, 26, 136, 62, 164, 33, 77, 154 } /* left = d153 */,
+ { 45, 26, 28, 129, 45, 129, 49, 147, 123 } /* left = d63 */,
+ { 45, 18, 32, 130, 90, 157, 40, 79, 91 } /* left = d27 */,
+ { 38, 44, 51, 136, 74, 162, 57, 97, 121 } /* left = tm */
+ }, { /* above = d153 */
+ { 56, 39, 58, 133, 117, 173, 48, 53, 187 } /* left = v */,
+ { 35, 21, 12, 161, 212, 207, 20, 23, 145 } /* left = h */,
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166 } /* left = dc */,
+ { 56, 29, 19, 117, 109, 181, 55, 68, 112 } /* left = d45 */,
+ { 47, 29, 17, 153, 64, 220, 59, 51, 114 } /* left = d135 */,
+ { 46, 16, 24, 136, 76, 147, 41, 64, 172 } /* left = d117 */,
+ { 34, 17, 11, 108, 152, 187, 13, 15, 209 } /* left = d153 */,
+ { 55, 30, 18, 122, 79, 179, 44, 88, 116 } /* left = d63 */,
+ { 51, 24, 14, 115, 133, 209, 32, 26, 104 } /* left = d27 */,
+ { 37, 49, 25, 129, 168, 164, 41, 54, 148 } /* left = tm */
+ }, { /* above = d63 */
+ { 48, 34, 86, 101, 92, 146, 78, 179, 134 } /* left = v */,
+ { 47, 22, 24, 138, 187, 178, 68, 69, 59 } /* left = h */,
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94 } /* left = dc */,
+ { 56, 25, 33, 105, 112, 187, 95, 177, 129 } /* left = d45 */,
+ { 48, 31, 27, 114, 63, 183, 82, 116, 56 } /* left = d135 */,
+ { 43, 28, 37, 121, 63, 123, 61, 192, 169 } /* left = d117 */,
+ { 42, 17, 24, 109, 97, 177, 56, 76, 122 } /* left = d153 */,
+ { 46, 23, 32, 74, 86, 150, 67, 183, 88 } /* left = d63 */,
+ { 58, 18, 28, 105, 139, 182, 70, 92, 63 } /* left = d27 */,
+ { 36, 38, 48, 92, 122, 165, 88, 137, 91 } /* left = tm */
+ }, { /* above = d27 */
+ { 62, 44, 61, 123, 105, 189, 48, 57, 64 } /* left = v */,
+ { 47, 25, 17, 175, 222, 220, 24, 30, 86 } /* left = h */,
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70 } /* left = dc */,
+ { 68, 36, 17, 106, 102, 206, 59, 74, 74 } /* left = d45 */,
+ { 57, 39, 23, 151, 68, 216, 55, 63, 58 } /* left = d135 */,
+ { 49, 30, 35, 141, 70, 168, 82, 40, 115 } /* left = d117 */,
+ { 51, 25, 15, 136, 129, 202, 38, 35, 139 } /* left = d153 */,
+ { 59, 39, 19, 114, 75, 180, 77, 104, 42 } /* left = d63 */,
+ { 68, 26, 16, 111, 141, 215, 29, 28, 28 } /* left = d27 */,
+ { 40, 61, 26, 126, 152, 206, 61, 59, 93 } /* left = tm */
+ }, { /* above = tm */
+ { 44, 78, 115, 132, 119, 173, 71, 112, 93 } /* left = v */,
+ { 39, 38, 21, 184, 227, 206, 42, 32, 64 } /* left = h */,
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81 } /* left = dc */,
+ { 58, 47, 36, 124, 137, 193, 80, 82, 78 } /* left = d45 */,
+ { 49, 50, 35, 144, 95, 205, 63, 78, 59 } /* left = d135 */,
+ { 41, 53, 52, 148, 71, 142, 65, 128, 51 } /* left = d117 */,
+ { 40, 36, 28, 143, 143, 202, 40, 55, 137 } /* left = d153 */,
+ { 42, 44, 44, 104, 105, 164, 64, 130, 80 } /* left = d63 */,
+ { 52, 34, 29, 129, 183, 227, 42, 35, 43 } /* left = d27 */,
+ { 43, 81, 53, 140, 169, 204, 68, 84, 72 } /* left = tm */
+ }
+};
+
+const uint8_t ff_vp9_default_kf_uvmode_probs[10][9] = {
+ { 118, 15, 123, 148, 131, 101, 44, 93, 131 } /* y = v */,
+ { 113, 12, 23, 188, 226, 142, 26, 32, 125 } /* y = h */,
+ { 144, 11, 54, 157, 195, 130, 46, 58, 108 } /* y = dc */,
+ { 120, 11, 50, 123, 163, 135, 64, 77, 103 } /* y = d45 */,
+ { 113, 9, 36, 155, 111, 157, 32, 44, 161 } /* y = d135 */,
+ { 116, 9, 55, 176, 76, 96, 37, 61, 149 } /* y = d117 */,
+ { 115, 9, 28, 141, 161, 167, 21, 25, 193 } /* y = d153 */,
+ { 116, 12, 64, 120, 140, 125, 49, 115, 121 } /* y = d63 */,
+ { 120, 12, 32, 145, 195, 142, 32, 38, 86 } /* y = d27 */,
+ { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */
+};
+
+const int8_t ff_vp9_inter_mode_tree[3][2] = {
+ { -ZEROMV, 1 }, // '0'
+ { -NEARESTMV, 2 }, // '10'
+ { -NEARMV, -NEWMV }, // '11x'
+};
+
+const int8_t ff_vp9_filter_tree[2][2] = {
+ { -0, 1 }, // '0'
+ { -1, -2 }, // '1x'
+};
+
+const enum FilterMode ff_vp9_filter_lut[3] = {
+ FILTER_8TAP_REGULAR,
+ FILTER_8TAP_SMOOTH,
+ FILTER_8TAP_SHARP,
+};
+
+const int16_t ff_vp9_dc_qlookup[3][256] = {
+ {
+ 4, 8, 8, 9, 10, 11, 12, 12,
+ 13, 14, 15, 16, 17, 18, 19, 19,
+ 20, 21, 22, 23, 24, 25, 26, 26,
+ 27, 28, 29, 30, 31, 32, 32, 33,
+ 34, 35, 36, 37, 38, 38, 39, 40,
+ 41, 42, 43, 43, 44, 45, 46, 47,
+ 48, 48, 49, 50, 51, 52, 53, 53,
+ 54, 55, 56, 57, 57, 58, 59, 60,
+ 61, 62, 62, 63, 64, 65, 66, 66,
+ 67, 68, 69, 70, 70, 71, 72, 73,
+ 74, 74, 75, 76, 77, 78, 78, 79,
+ 80, 81, 81, 82, 83, 84, 85, 85,
+ 87, 88, 90, 92, 93, 95, 96, 98,
+ 99, 101, 102, 104, 105, 107, 108, 110,
+ 111, 113, 114, 116, 117, 118, 120, 121,
+ 123, 125, 127, 129, 131, 134, 136, 138,
+ 140, 142, 144, 146, 148, 150, 152, 154,
+ 156, 158, 161, 164, 166, 169, 172, 174,
+ 177, 180, 182, 185, 187, 190, 192, 195,
+ 199, 202, 205, 208, 211, 214, 217, 220,
+ 223, 226, 230, 233, 237, 240, 243, 247,
+ 250, 253, 257, 261, 265, 269, 272, 276,
+ 280, 284, 288, 292, 296, 300, 304, 309,
+ 313, 317, 322, 326, 330, 335, 340, 344,
+ 349, 354, 359, 364, 369, 374, 379, 384,
+ 389, 395, 400, 406, 411, 417, 423, 429,
+ 435, 441, 447, 454, 461, 467, 475, 482,
+ 489, 497, 505, 513, 522, 530, 539, 549,
+ 559, 569, 579, 590, 602, 614, 626, 640,
+ 654, 668, 684, 700, 717, 736, 755, 775,
+ 796, 819, 843, 869, 896, 925, 955, 988,
+ 1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336,
+ }, {
+ 4, 9, 10, 13, 15, 17, 20, 22,
+ 25, 28, 31, 34, 37, 40, 43, 47,
+ 50, 53, 57, 60, 64, 68, 71, 75,
+ 78, 82, 86, 90, 93, 97, 101, 105,
+ 109, 113, 116, 120, 124, 128, 132, 136,
+ 140, 143, 147, 151, 155, 159, 163, 166,
+ 170, 174, 178, 182, 185, 189, 193, 197,
+ 200, 204, 208, 212, 215, 219, 223, 226,
+ 230, 233, 237, 241, 244, 248, 251, 255,
+ 259, 262, 266, 269, 273, 276, 280, 283,
+ 287, 290, 293, 297, 300, 304, 307, 310,
+ 314, 317, 321, 324, 327, 331, 334, 337,
+ 343, 350, 356, 362, 369, 375, 381, 387,
+ 394, 400, 406, 412, 418, 424, 430, 436,
+ 442, 448, 454, 460, 466, 472, 478, 484,
+ 490, 499, 507, 516, 525, 533, 542, 550,
+ 559, 567, 576, 584, 592, 601, 609, 617,
+ 625, 634, 644, 655, 666, 676, 687, 698,
+ 708, 718, 729, 739, 749, 759, 770, 782,
+ 795, 807, 819, 831, 844, 856, 868, 880,
+ 891, 906, 920, 933, 947, 961, 975, 988,
+ 1001, 1015, 1030, 1045, 1061, 1076, 1090, 1105,
+ 1120, 1137, 1153, 1170, 1186, 1202, 1218, 1236,
+ 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379,
+ 1398, 1416, 1436, 1456, 1476, 1496, 1516, 1537,
+ 1559, 1580, 1601, 1624, 1647, 1670, 1692, 1717,
+ 1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929,
+ 1958, 1990, 2021, 2054, 2088, 2123, 2159, 2197,
+ 2236, 2276, 2319, 2363, 2410, 2458, 2508, 2561,
+ 2616, 2675, 2737, 2802, 2871, 2944, 3020, 3102,
+ 3188, 3280, 3375, 3478, 3586, 3702, 3823, 3953,
+ 4089, 4236, 4394, 4559, 4737, 4929, 5130, 5347,
+ }, {
+ 4, 12, 18, 25, 33, 41, 50, 60,
+ 70, 80, 91, 103, 115, 127, 140, 153,
+ 166, 180, 194, 208, 222, 237, 251, 266,
+ 281, 296, 312, 327, 343, 358, 374, 390,
+ 405, 421, 437, 453, 469, 484, 500, 516,
+ 532, 548, 564, 580, 596, 611, 627, 643,
+ 659, 674, 690, 706, 721, 737, 752, 768,
+ 783, 798, 814, 829, 844, 859, 874, 889,
+ 904, 919, 934, 949, 964, 978, 993, 1008,
+ 1022, 1037, 1051, 1065, 1080, 1094, 1108, 1122,
+ 1136, 1151, 1165, 1179, 1192, 1206, 1220, 1234,
+ 1248, 1261, 1275, 1288, 1302, 1315, 1329, 1342,
+ 1368, 1393, 1419, 1444, 1469, 1494, 1519, 1544,
+ 1569, 1594, 1618, 1643, 1668, 1692, 1717, 1741,
+ 1765, 1789, 1814, 1838, 1862, 1885, 1909, 1933,
+ 1957, 1992, 2027, 2061, 2096, 2130, 2165, 2199,
+ 2233, 2267, 2300, 2334, 2367, 2400, 2434, 2467,
+ 2499, 2532, 2575, 2618, 2661, 2704, 2746, 2788,
+ 2830, 2872, 2913, 2954, 2995, 3036, 3076, 3127,
+ 3177, 3226, 3275, 3324, 3373, 3421, 3469, 3517,
+ 3565, 3621, 3677, 3733, 3788, 3843, 3897, 3951,
+ 4005, 4058, 4119, 4181, 4241, 4301, 4361, 4420,
+ 4479, 4546, 4612, 4677, 4742, 4807, 4871, 4942,
+ 5013, 5083, 5153, 5222, 5291, 5367, 5442, 5517,
+ 5591, 5665, 5745, 5825, 5905, 5984, 6063, 6149,
+ 6234, 6319, 6404, 6495, 6587, 6678, 6769, 6867,
+ 6966, 7064, 7163, 7269, 7376, 7483, 7599, 7715,
+ 7832, 7958, 8085, 8214, 8352, 8492, 8635, 8788,
+ 8945, 9104, 9275, 9450, 9639, 9832, 10031, 10245,
+ 10465, 10702, 10946, 11210, 11482, 11776, 12081, 12409,
+ 12750, 13118, 13501, 13913, 14343, 14807, 15290, 15812,
+ 16356, 16943, 17575, 18237, 18949, 19718, 20521, 21387,
+ }
+};
+
+const int16_t ff_vp9_ac_qlookup[3][256] = {
+ {
+ 4, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 120, 122, 124, 126, 128, 130, 132, 134,
+ 136, 138, 140, 142, 144, 146, 148, 150,
+ 152, 155, 158, 161, 164, 167, 170, 173,
+ 176, 179, 182, 185, 188, 191, 194, 197,
+ 200, 203, 207, 211, 215, 219, 223, 227,
+ 231, 235, 239, 243, 247, 251, 255, 260,
+ 265, 270, 275, 280, 285, 290, 295, 300,
+ 305, 311, 317, 323, 329, 335, 341, 347,
+ 353, 359, 366, 373, 380, 387, 394, 401,
+ 408, 416, 424, 432, 440, 448, 456, 465,
+ 474, 483, 492, 501, 510, 520, 530, 540,
+ 550, 560, 571, 582, 593, 604, 615, 627,
+ 639, 651, 663, 676, 689, 702, 715, 729,
+ 743, 757, 771, 786, 801, 816, 832, 848,
+ 864, 881, 898, 915, 933, 951, 969, 988,
+ 1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151,
+ 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
+ 1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567,
+ 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
+ }, {
+ 4, 9, 11, 13, 16, 18, 21, 24,
+ 27, 30, 33, 37, 40, 44, 48, 51,
+ 55, 59, 63, 67, 71, 75, 79, 83,
+ 88, 92, 96, 100, 105, 109, 114, 118,
+ 122, 127, 131, 136, 140, 145, 149, 154,
+ 158, 163, 168, 172, 177, 181, 186, 190,
+ 195, 199, 204, 208, 213, 217, 222, 226,
+ 231, 235, 240, 244, 249, 253, 258, 262,
+ 267, 271, 275, 280, 284, 289, 293, 297,
+ 302, 306, 311, 315, 319, 324, 328, 332,
+ 337, 341, 345, 349, 354, 358, 362, 367,
+ 371, 375, 379, 384, 388, 392, 396, 401,
+ 409, 417, 425, 433, 441, 449, 458, 466,
+ 474, 482, 490, 498, 506, 514, 523, 531,
+ 539, 547, 555, 563, 571, 579, 588, 596,
+ 604, 616, 628, 640, 652, 664, 676, 688,
+ 700, 713, 725, 737, 749, 761, 773, 785,
+ 797, 809, 825, 841, 857, 873, 889, 905,
+ 922, 938, 954, 970, 986, 1002, 1018, 1038,
+ 1058, 1078, 1098, 1118, 1138, 1158, 1178, 1198,
+ 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386,
+ 1411, 1435, 1463, 1491, 1519, 1547, 1575, 1603,
+ 1631, 1663, 1695, 1727, 1759, 1791, 1823, 1859,
+ 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159,
+ 2199, 2239, 2283, 2327, 2371, 2415, 2459, 2507,
+ 2555, 2603, 2651, 2703, 2755, 2807, 2859, 2915,
+ 2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391,
+ 3455, 3523, 3591, 3659, 3731, 3803, 3876, 3952,
+ 4028, 4104, 4184, 4264, 4348, 4432, 4516, 4604,
+ 4692, 4784, 4876, 4972, 5068, 5168, 5268, 5372,
+ 5476, 5584, 5692, 5804, 5916, 6032, 6148, 6268,
+ 6388, 6512, 6640, 6768, 6900, 7036, 7172, 7312,
+ }, {
+ 4, 13, 19, 27, 35, 44, 54, 64,
+ 75, 87, 99, 112, 126, 139, 154, 168,
+ 183, 199, 214, 230, 247, 263, 280, 297,
+ 314, 331, 349, 366, 384, 402, 420, 438,
+ 456, 475, 493, 511, 530, 548, 567, 586,
+ 604, 623, 642, 660, 679, 698, 716, 735,
+ 753, 772, 791, 809, 828, 846, 865, 884,
+ 902, 920, 939, 957, 976, 994, 1012, 1030,
+ 1049, 1067, 1085, 1103, 1121, 1139, 1157, 1175,
+ 1193, 1211, 1229, 1246, 1264, 1282, 1299, 1317,
+ 1335, 1352, 1370, 1387, 1405, 1422, 1440, 1457,
+ 1474, 1491, 1509, 1526, 1543, 1560, 1577, 1595,
+ 1627, 1660, 1693, 1725, 1758, 1791, 1824, 1856,
+ 1889, 1922, 1954, 1987, 2020, 2052, 2085, 2118,
+ 2150, 2183, 2216, 2248, 2281, 2313, 2346, 2378,
+ 2411, 2459, 2508, 2556, 2605, 2653, 2701, 2750,
+ 2798, 2847, 2895, 2943, 2992, 3040, 3088, 3137,
+ 3185, 3234, 3298, 3362, 3426, 3491, 3555, 3619,
+ 3684, 3748, 3812, 3876, 3941, 4005, 4069, 4149,
+ 4230, 4310, 4390, 4470, 4550, 4631, 4711, 4791,
+ 4871, 4967, 5064, 5160, 5256, 5352, 5448, 5544,
+ 5641, 5737, 5849, 5961, 6073, 6185, 6297, 6410,
+ 6522, 6650, 6778, 6906, 7034, 7162, 7290, 7435,
+ 7579, 7723, 7867, 8011, 8155, 8315, 8475, 8635,
+ 8795, 8956, 9132, 9308, 9484, 9660, 9836, 10028,
+ 10220, 10412, 10604, 10812, 11020, 11228, 11437, 11661,
+ 11885, 12109, 12333, 12573, 12813, 13053, 13309, 13565,
+ 13821, 14093, 14365, 14637, 14925, 15213, 15502, 15806,
+ 16110, 16414, 16734, 17054, 17390, 17726, 18062, 18414,
+ 18766, 19134, 19502, 19886, 20270, 20670, 21070, 21486,
+ 21902, 22334, 22766, 23214, 23662, 24126, 24590, 25070,
+ 25551, 26047, 26559, 27071, 27599, 28143, 28687, 29247,
+ }
+};
+
+const enum TxfmType ff_vp9_intra_txfm_type[14] = {
+ [VERT_PRED] = ADST_DCT,
+ [HOR_PRED] = DCT_ADST,
+ [DC_PRED] = DCT_DCT,
+ [DIAG_DOWN_LEFT_PRED] = DCT_DCT,
+ [DIAG_DOWN_RIGHT_PRED] = ADST_ADST,
+ [VERT_RIGHT_PRED] = ADST_DCT,
+ [HOR_DOWN_PRED] = DCT_ADST,
+ [VERT_LEFT_PRED] = ADST_DCT,
+ [HOR_UP_PRED] = DCT_ADST,
+ [TM_VP8_PRED] = ADST_ADST,
+ [NEARESTMV] = DCT_DCT,
+ [NEARMV] = DCT_DCT,
+ [ZEROMV] = DCT_DCT,
+ [NEWMV] = DCT_DCT,
+};
+
+const int16_t ff_vp9_default_scan_4x4[16] = {
+ 0, 1, 4, 5,
+ 2, 8, 3, 6,
+ 12, 9, 7, 10,
+ 13, 11, 14, 15,
+};
+
+const int16_t ff_vp9_col_scan_4x4[16] = {
+ 0, 1, 2, 4,
+ 3, 5, 6, 8,
+ 7, 9, 10, 12,
+ 13, 11, 14, 15,
+};
+
+const int16_t ff_vp9_row_scan_4x4[16] = {
+ 0, 4, 1, 8,
+ 5, 12, 9, 2,
+ 6, 13, 3, 10,
+ 7, 14, 11, 15,
+};
+
+const int16_t ff_vp9_default_scan_8x8[64] = {
+ 0, 1, 8, 2, 9, 16, 10, 3,
+ 17, 24, 18, 11, 4, 25, 32, 19,
+ 12, 26, 5, 33, 20, 27, 40, 13,
+ 34, 6, 41, 28, 21, 35, 42, 48,
+ 14, 7, 36, 29, 43, 56, 49, 22,
+ 15, 37, 50, 44, 57, 30, 23, 51,
+ 45, 58, 38, 31, 52, 59, 39, 46,
+ 53, 60, 47, 54, 61, 55, 62, 63,
+};
+
+const int16_t ff_vp9_col_scan_8x8[64] = {
+ 0, 1, 2, 8, 3, 9, 4, 10,
+ 16, 5, 11, 17, 12, 18, 6, 24,
+ 19, 13, 25, 7, 26, 20, 32, 14,
+ 27, 21, 33, 28, 34, 15, 22, 35,
+ 40, 29, 41, 36, 23, 30, 42, 37,
+ 48, 43, 31, 44, 49, 38, 50, 56,
+ 45, 39, 51, 57, 52, 46, 58, 53,
+ 59, 47, 60, 54, 61, 55, 62, 63,
+};
+
+const int16_t ff_vp9_row_scan_8x8[64] = {
+ 0, 8, 16, 1, 9, 24, 2, 17,
+ 32, 10, 25, 3, 40, 18, 11, 33,
+ 26, 19, 4, 48, 41, 34, 12, 27,
+ 56, 20, 5, 42, 35, 13, 49, 28,
+ 6, 21, 43, 36, 14, 50, 29, 57,
+ 7, 44, 22, 37, 51, 15, 58, 30,
+ 23, 45, 52, 38, 59, 31, 46, 53,
+ 39, 60, 47, 61, 54, 62, 55, 63,
+};
+
+const int16_t ff_vp9_default_scan_16x16[256] = {
+ 0, 1, 16, 2, 17, 32, 3, 18, 33, 48, 4, 34, 19, 49, 20, 5,
+ 35, 64, 50, 36, 65, 21, 6, 51, 80, 66, 37, 22, 52, 7, 81, 67,
+ 38, 82, 53, 23, 96, 68, 8, 83, 97, 54, 39, 69, 112, 24, 98, 84,
+ 70, 55, 9, 40, 85, 99, 113, 128, 25, 114, 100, 71, 86, 56, 10, 41,
+ 115, 101, 129, 116, 72, 87, 26, 130, 144, 102, 57, 11, 42, 117, 131, 145,
+ 88, 103, 27, 73, 132, 118, 146, 58, 160, 12, 43, 133, 147, 104, 89, 119,
+ 161, 74, 148, 134, 28, 162, 59, 13, 176, 120, 149, 90, 135, 105, 163, 44,
+ 75, 177, 164, 29, 150, 121, 136, 178, 165, 14, 106, 60, 91, 151, 45, 179,
+ 192, 137, 166, 122, 76, 180, 152, 30, 61, 15, 107, 167, 181, 193, 92, 208,
+ 46, 138, 123, 153, 194, 77, 168, 182, 31, 195, 209, 183, 108, 139, 62, 154,
+ 47, 196, 93, 169, 210, 197, 224, 124, 184, 211, 78, 109, 170, 155, 63, 198,
+ 212, 185, 225, 240, 140, 94, 199, 125, 79, 213, 226, 171, 186, 156, 214, 200,
+ 110, 227, 141, 95, 241, 215, 228, 201, 126, 242, 187, 172, 157, 229, 111, 216,
+ 243, 142, 202, 230, 127, 217, 244, 173, 188, 231, 158, 203, 143, 245, 218, 232,
+ 189, 246, 159, 174, 233, 247, 219, 204, 175, 190, 248, 234, 205, 220, 249, 191,
+ 235, 221, 250, 206, 222, 251, 236, 207, 237, 223, 252, 238, 253, 239, 254, 255,
+};
+
+const int16_t ff_vp9_col_scan_16x16[256] = {
+ 0, 1, 2, 3, 16, 4, 17, 5, 18, 6, 19, 32, 20, 7, 33, 21,
+ 34, 8, 35, 22, 48, 36, 9, 49, 23, 50, 37, 10, 38, 51, 24, 64,
+ 52, 11, 65, 39, 25, 53, 66, 54, 40, 67, 12, 80, 26, 68, 55, 81,
+ 41, 69, 13, 27, 82, 56, 70, 83, 42, 14, 84, 96, 71, 28, 57, 85,
+ 97, 15, 72, 98, 43, 86, 58, 99, 29, 87, 100, 112, 73, 44, 101, 59,
+ 30, 113, 88, 114, 74, 128, 102, 45, 31, 115, 60, 103, 89, 116, 75, 129,
+ 117, 46, 104, 90, 61, 130, 118, 131, 132, 105, 76, 47, 119, 144, 91, 62,
+ 133, 106, 145, 120, 146, 134, 77, 147, 121, 92, 135, 148, 63, 107, 136, 122,
+ 93, 149, 160, 78, 150, 137, 108, 161, 162, 151, 123, 79, 138, 163, 152, 94,
+ 164, 109, 165, 153, 124, 139, 176, 166, 95, 177, 167, 110, 154, 178, 125, 179,
+ 140, 168, 155, 111, 180, 192, 181, 169, 141, 126, 182, 193, 194, 156, 183, 170,
+ 195, 127, 142, 196, 184, 208, 197, 157, 171, 143, 185, 198, 209, 199, 210, 172,
+ 158, 186, 211, 224, 212, 200, 240, 159, 213, 225, 187, 201, 173, 226, 214, 215,
+ 227, 202, 228, 188, 241, 216, 174, 229, 242, 203, 243, 217, 230, 175, 189, 244,
+ 231, 204, 218, 232, 245, 219, 246, 190, 233, 205, 191, 247, 234, 248, 220, 206,
+ 249, 235, 221, 207, 250, 236, 222, 251, 223, 237, 238, 252, 239, 253, 254, 255,
+};
+
+const int16_t ff_vp9_row_scan_16x16[256] = {
+ 0, 16, 32, 1, 48, 17, 64, 33, 2, 80, 18, 49, 96, 34, 3, 65,
+ 19, 112, 50, 81, 35, 4, 128, 66, 20, 97, 51, 82, 5, 144, 36, 67,
+ 113, 98, 21, 52, 160, 83, 129, 37, 68, 6, 114, 176, 99, 53, 22, 84,
+ 145, 38, 69, 130, 7, 115, 192, 100, 54, 23, 85, 161, 146, 131, 39, 70,
+ 208, 116, 8, 101, 177, 55, 86, 24, 162, 147, 132, 71, 224, 117, 40, 102,
+ 9, 148, 56, 87, 193, 163, 240, 133, 178, 25, 118, 72, 41, 103, 164, 10,
+ 149, 88, 134, 209, 179, 57, 119, 194, 26, 73, 165, 150, 104, 42, 135, 11,
+ 180, 120, 89, 225, 195, 58, 27, 210, 151, 181, 166, 74, 43, 105, 12, 136,
+ 90, 59, 241, 121, 28, 196, 167, 211, 152, 44, 182, 137, 75, 13, 226, 106,
+ 122, 60, 197, 91, 168, 29, 183, 153, 14, 76, 212, 138, 45, 107, 15, 198,
+ 92, 227, 169, 30, 123, 154, 61, 242, 184, 213, 139, 46, 77, 31, 108, 170,
+ 199, 185, 124, 228, 93, 155, 214, 62, 140, 243, 78, 47, 200, 109, 186, 171,
+ 201, 94, 63, 215, 229, 156, 79, 125, 141, 110, 216, 187, 172, 244, 202, 230,
+ 217, 95, 157, 126, 245, 111, 142, 231, 188, 127, 158, 218, 173, 232, 246, 233,
+ 203, 143, 247, 174, 189, 159, 219, 204, 248, 234, 249, 175, 190, 220, 205, 250,
+ 235, 191, 221, 251, 236, 206, 252, 222, 207, 237, 223, 253, 238, 254, 239, 255,
+};
+
+const int16_t ff_vp9_default_scan_32x32[1024] = {
+ 0, 1, 32, 2, 33, 64, 3, 34, 65, 4, 96, 35, 66, 5, 36, 97, 67, 128, 98, 68, 37, 6, 129, 99, 7, 160, 69, 38, 130, 100, 161, 131,
+ 39, 70, 8, 101, 162, 132, 192, 71, 40, 9, 102, 163, 133, 193, 72, 224, 103, 41, 164, 10, 194, 134, 165, 73, 104, 135, 225, 42, 195, 11, 256, 166,
+ 226, 196, 74, 105, 136, 43, 12, 167, 197, 227, 257, 75, 106, 137, 228, 44, 198, 168, 258, 288, 13, 229, 76, 107, 199, 138, 259, 169, 289, 45, 230, 260,
+ 200, 108, 14, 170, 139, 320, 290, 77, 231, 261, 46, 201, 140, 291, 109, 232, 321, 262, 171, 78, 292, 15, 322, 202, 263, 352, 172, 293, 233, 141, 323, 110,
+ 47, 203, 264, 234, 294, 353, 324, 16, 79, 204, 265, 295, 325, 173, 354, 142, 235, 384, 48, 296, 111, 266, 355, 326, 80, 17, 205, 236, 174, 356, 385, 327,
+ 143, 297, 267, 357, 386, 112, 49, 328, 298, 206, 416, 237, 358, 387, 81, 175, 18, 329, 359, 388, 299, 330, 389, 113, 417, 238, 360, 50, 207, 418, 390, 331,
+ 19, 448, 361, 82, 419, 391, 239, 51, 362, 420, 114, 449, 480, 421, 83, 363, 450, 422, 512, 451, 423, 115, 452, 481, 453, 482, 454, 544, 483, 455, 513, 484,
+ 514, 485, 515, 486, 545, 576, 487, 546, 547, 608, 577, 578, 579, 609, 610, 611, 20, 144, 268, 392, 516, 640, 21, 52, 145, 176, 269, 300, 393, 424, 517, 548,
+ 641, 672, 22, 53, 84, 146, 177, 208, 270, 301, 332, 394, 425, 456, 518, 549, 580, 642, 673, 704, 23, 54, 85, 116, 147, 178, 209, 240, 271, 302, 333, 364,
+ 395, 426, 457, 488, 519, 550, 581, 612, 643, 674, 705, 736, 55, 86, 117, 179, 210, 241, 303, 334, 365, 427, 458, 489, 551, 582, 613, 675, 706, 737, 87, 118,
+ 211, 242, 335, 366, 459, 490, 583, 614, 707, 738, 119, 243, 367, 491, 615, 739, 24, 148, 272, 396, 520, 644, 768, 25, 56, 149, 180, 273, 304, 397, 428, 521,
+ 552, 645, 676, 769, 800, 26, 57, 88, 150, 181, 212, 274, 305, 336, 398, 429, 460, 522, 553, 584, 646, 677, 708, 770, 801, 832, 27, 58, 89, 120, 151, 182,
+ 213, 244, 275, 306, 337, 368, 399, 430, 461, 492, 523, 554, 585, 616, 647, 678, 709, 740, 771, 802, 833, 864, 59, 90, 121, 183, 214, 245, 307, 338, 369, 431,
+ 462, 493, 555, 586, 617, 679, 710, 741, 803, 834, 865, 91, 122, 215, 246, 339, 370, 463, 494, 587, 618, 711, 742, 835, 866, 123, 247, 371, 495, 619, 743, 867,
+ 28, 152, 276, 400, 524, 648, 772, 896, 29, 60, 153, 184, 277, 308, 401, 432, 525, 556, 649, 680, 773, 804, 897, 928, 30, 61, 92, 154, 185, 216, 278, 309,
+ 340, 402, 433, 464, 526, 557, 588, 650, 681, 712, 774, 805, 836, 898, 929, 960, 31, 62, 93, 124, 155, 186, 217, 248, 279, 310, 341, 372, 403, 434, 465, 496,
+ 527, 558, 589, 620, 651, 682, 713, 744, 775, 806, 837, 868, 899, 930, 961, 992, 63, 94, 125, 187, 218, 249, 311, 342, 373, 435, 466, 497, 559, 590, 621, 683,
+ 714, 745, 807, 838, 869, 931, 962, 993, 95, 126, 219, 250, 343, 374, 467, 498, 591, 622, 715, 746, 839, 870, 963, 994, 127, 251, 375, 499, 623, 747, 871, 995,
+ 156, 280, 404, 528, 652, 776, 900, 157, 188, 281, 312, 405, 436, 529, 560, 653, 684, 777, 808, 901, 932, 158, 189, 220, 282, 313, 344, 406, 437, 468, 530, 561,
+ 592, 654, 685, 716, 778, 809, 840, 902, 933, 964, 159, 190, 221, 252, 283, 314, 345, 376, 407, 438, 469, 500, 531, 562, 593, 624, 655, 686, 717, 748, 779, 810,
+ 841, 872, 903, 934, 965, 996, 191, 222, 253, 315, 346, 377, 439, 470, 501, 563, 594, 625, 687, 718, 749, 811, 842, 873, 935, 966, 997, 223, 254, 347, 378, 471,
+ 502, 595, 626, 719, 750, 843, 874, 967, 998, 255, 379, 503, 627, 751, 875, 999, 284, 408, 532, 656, 780, 904, 285, 316, 409, 440, 533, 564, 657, 688, 781, 812,
+ 905, 936, 286, 317, 348, 410, 441, 472, 534, 565, 596, 658, 689, 720, 782, 813, 844, 906, 937, 968, 287, 318, 349, 380, 411, 442, 473, 504, 535, 566, 597, 628,
+ 659, 690, 721, 752, 783, 814, 845, 876, 907, 938, 969, 1000, 319, 350, 381, 443, 474, 505, 567, 598, 629, 691, 722, 753, 815, 846, 877, 939, 970, 1001, 351, 382,
+ 475, 506, 599, 630, 723, 754, 847, 878, 971, 1002, 383, 507, 631, 755, 879, 1003, 412, 536, 660, 784, 908, 413, 444, 537, 568, 661, 692, 785, 816, 909, 940, 414,
+ 445, 476, 538, 569, 600, 662, 693, 724, 786, 817, 848, 910, 941, 972, 415, 446, 477, 508, 539, 570, 601, 632, 663, 694, 725, 756, 787, 818, 849, 880, 911, 942,
+ 973, 1004, 447, 478, 509, 571, 602, 633, 695, 726, 757, 819, 850, 881, 943, 974, 1005, 479, 510, 603, 634, 727, 758, 851, 882, 975, 1006, 511, 635, 759, 883, 1007,
+ 540, 664, 788, 912, 541, 572, 665, 696, 789, 820, 913, 944, 542, 573, 604, 666, 697, 728, 790, 821, 852, 914, 945, 976, 543, 574, 605, 636, 667, 698, 729, 760,
+ 791, 822, 853, 884, 915, 946, 977, 1008, 575, 606, 637, 699, 730, 761, 823, 854, 885, 947, 978, 1009, 607, 638, 731, 762, 855, 886, 979, 1010, 639, 763, 887, 1011,
+ 668, 792, 916, 669, 700, 793, 824, 917, 948, 670, 701, 732, 794, 825, 856, 918, 949, 980, 671, 702, 733, 764, 795, 826, 857, 888, 919, 950, 981, 1012, 703, 734,
+ 765, 827, 858, 889, 951, 982, 1013, 735, 766, 859, 890, 983, 1014, 767, 891, 1015, 796, 920, 797, 828, 921, 952, 798, 829, 860, 922, 953, 984, 799, 830, 861, 892,
+ 923, 954, 985, 1016, 831, 862, 893, 955, 986, 1017, 863, 894, 987, 1018, 895, 1019, 924, 925, 956, 926, 957, 988, 927, 958, 989, 1020, 959, 990, 1021, 991, 1022, 1023,
+};
+
+const int16_t * const ff_vp9_scans[5][4] = {
+ {
+ ff_vp9_default_scan_4x4, ff_vp9_col_scan_4x4,
+ ff_vp9_row_scan_4x4, ff_vp9_default_scan_4x4
+ }, {
+ ff_vp9_default_scan_8x8, ff_vp9_col_scan_8x8,
+ ff_vp9_row_scan_8x8, ff_vp9_default_scan_8x8
+ }, {
+ ff_vp9_default_scan_16x16, ff_vp9_col_scan_16x16,
+ ff_vp9_row_scan_16x16, ff_vp9_default_scan_16x16
+ }, {
+ ff_vp9_default_scan_32x32, ff_vp9_default_scan_32x32,
+ ff_vp9_default_scan_32x32, ff_vp9_default_scan_32x32
+ }, { // lossless
+ ff_vp9_default_scan_4x4, ff_vp9_default_scan_4x4,
+ ff_vp9_default_scan_4x4, ff_vp9_default_scan_4x4
+ }
+};
+
+const int16_t ff_vp9_default_scan_4x4_nb[16][2] = {
+ { 0, 0 }, { 0, 0 }, { 4, 1 }, { 1, 1 },
+ { 4, 4 }, { 2, 2 }, { 5, 2 }, { 8, 8 },
+ { 8, 5 }, { 6, 3 }, { 9, 6 }, { 12, 9 },
+ { 10, 7 }, { 13, 10 }, { 14, 11 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_col_scan_4x4_nb[16][2] = {
+ { 0, 0 }, { 1, 1 }, { 0, 0 }, { 2, 2 },
+ { 4, 4 }, { 5, 5 }, { 4, 4 }, { 6, 6 },
+ { 8, 8 }, { 9, 9 }, { 8, 8 }, { 12, 12 },
+ { 10, 10 }, { 13, 13 }, { 14, 14 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_row_scan_4x4_nb[16][2] = {
+ { 0, 0 }, { 0, 0 }, { 4, 4 }, { 1, 1 },
+ { 8, 8 }, { 5, 5 }, { 1, 1 }, { 2, 2 },
+ { 9, 9 }, { 2, 2 }, { 6, 6 }, { 3, 3 },
+ { 10, 10 }, { 7, 7 }, { 11, 11 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_default_scan_8x8_nb[64][2] = {
+ { 0, 0 }, { 0, 0 }, { 1, 1 }, { 8, 1 },
+ { 8, 8 }, { 9, 2 }, { 2, 2 }, { 16, 9 },
+ { 16, 16 }, { 17, 10 }, { 10, 3 }, { 3, 3 },
+ { 24, 17 }, { 24, 24 }, { 18, 11 }, { 11, 4 },
+ { 25, 18 }, { 4, 4 }, { 32, 25 }, { 19, 12 },
+ { 26, 19 }, { 32, 32 }, { 12, 5 }, { 33, 26 },
+ { 5, 5 }, { 40, 33 }, { 27, 20 }, { 20, 13 },
+ { 34, 27 }, { 41, 34 }, { 40, 40 }, { 13, 6 },
+ { 6, 6 }, { 35, 28 }, { 28, 21 }, { 42, 35 },
+ { 48, 48 }, { 48, 41 }, { 21, 14 }, { 14, 7 },
+ { 36, 29 }, { 49, 42 }, { 43, 36 }, { 56, 49 },
+ { 29, 22 }, { 22, 15 }, { 50, 43 }, { 44, 37 },
+ { 57, 50 }, { 37, 30 }, { 30, 23 }, { 51, 44 },
+ { 58, 51 }, { 38, 31 }, { 45, 38 }, { 52, 45 },
+ { 59, 52 }, { 46, 39 }, { 53, 46 }, { 60, 53 },
+ { 54, 47 }, { 61, 54 }, { 62, 55 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_col_scan_8x8_nb[64][2] = {
+ { 0, 0 }, { 1, 1 }, { 0, 0 }, { 2, 2 },
+ { 8, 8 }, { 3, 3 }, { 9, 9 }, { 8, 8 },
+ { 4, 4 }, { 10, 10 }, { 16, 16 }, { 11, 11 },
+ { 17, 17 }, { 5, 5 }, { 16, 16 }, { 18, 18 },
+ { 12, 12 }, { 24, 24 }, { 6, 6 }, { 25, 25 },
+ { 19, 19 }, { 24, 24 }, { 13, 13 }, { 26, 26 },
+ { 20, 20 }, { 32, 32 }, { 27, 27 }, { 33, 33 },
+ { 14, 14 }, { 21, 21 }, { 34, 34 }, { 32, 32 },
+ { 28, 28 }, { 40, 40 }, { 35, 35 }, { 22, 22 },
+ { 29, 29 }, { 41, 41 }, { 36, 36 }, { 40, 40 },
+ { 42, 42 }, { 30, 30 }, { 43, 43 }, { 48, 48 },
+ { 37, 37 }, { 49, 49 }, { 48, 48 }, { 44, 44 },
+ { 38, 38 }, { 50, 50 }, { 56, 56 }, { 51, 51 },
+ { 45, 45 }, { 57, 57 }, { 52, 52 }, { 58, 58 },
+ { 46, 46 }, { 59, 59 }, { 53, 53 }, { 60, 60 },
+ { 54, 54 }, { 61, 61 }, { 62, 62 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_row_scan_8x8_nb[64][2] = {
+ { 0, 0 }, { 8, 8 }, { 0, 0 }, { 1, 1 },
+ { 16, 16 }, { 1, 1 }, { 9, 9 }, { 24, 24 },
+ { 2, 2 }, { 17, 17 }, { 2, 2 }, { 32, 32 },
+ { 10, 10 }, { 3, 3 }, { 25, 25 }, { 18, 18 },
+ { 11, 11 }, { 3, 3 }, { 40, 40 }, { 33, 33 },
+ { 26, 26 }, { 4, 4 }, { 19, 19 }, { 48, 48 },
+ { 12, 12 }, { 4, 4 }, { 34, 34 }, { 27, 27 },
+ { 5, 5 }, { 41, 41 }, { 20, 20 }, { 5, 5 },
+ { 13, 13 }, { 35, 35 }, { 28, 28 }, { 6, 6 },
+ { 42, 42 }, { 21, 21 }, { 49, 49 }, { 6, 6 },
+ { 36, 36 }, { 14, 14 }, { 29, 29 }, { 43, 43 },
+ { 7, 7 }, { 50, 50 }, { 22, 22 }, { 15, 15 },
+ { 37, 37 }, { 44, 44 }, { 30, 30 }, { 51, 51 },
+ { 23, 23 }, { 38, 38 }, { 45, 45 }, { 31, 31 },
+ { 52, 52 }, { 39, 39 }, { 53, 53 }, { 46, 46 },
+ { 54, 54 }, { 47, 47 }, { 55, 55 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_default_scan_16x16_nb[256][2] = {
+ { 0, 0 }, { 0, 0 }, { 1, 1 }, { 16, 1 },
+ { 16, 16 }, { 2, 2 }, { 17, 2 }, { 32, 17 },
+ { 32, 32 }, { 3, 3 }, { 33, 18 }, { 18, 3 },
+ { 48, 33 }, { 19, 4 }, { 4, 4 }, { 34, 19 },
+ { 48, 48 }, { 49, 34 }, { 35, 20 }, { 64, 49 },
+ { 20, 5 }, { 5, 5 }, { 50, 35 }, { 64, 64 },
+ { 65, 50 }, { 36, 21 }, { 21, 6 }, { 51, 36 },
+ { 6, 6 }, { 80, 65 }, { 66, 51 }, { 37, 22 },
+ { 81, 66 }, { 52, 37 }, { 22, 7 }, { 80, 80 },
+ { 67, 52 }, { 7, 7 }, { 82, 67 }, { 96, 81 },
+ { 53, 38 }, { 38, 23 }, { 68, 53 }, { 96, 96 },
+ { 23, 8 }, { 97, 82 }, { 83, 68 }, { 69, 54 },
+ { 54, 39 }, { 8, 8 }, { 39, 24 }, { 84, 69 },
+ { 98, 83 }, { 112, 97 }, { 112, 112 }, { 24, 9 },
+ { 113, 98 }, { 99, 84 }, { 70, 55 }, { 85, 70 },
+ { 55, 40 }, { 9, 9 }, { 40, 25 }, { 114, 99 },
+ { 100, 85 }, { 128, 113 }, { 115, 100 }, { 71, 56 },
+ { 86, 71 }, { 25, 10 }, { 129, 114 }, { 128, 128 },
+ { 101, 86 }, { 56, 41 }, { 10, 10 }, { 41, 26 },
+ { 116, 101 }, { 130, 115 }, { 144, 129 }, { 87, 72 },
+ { 102, 87 }, { 26, 11 }, { 72, 57 }, { 131, 116 },
+ { 117, 102 }, { 145, 130 }, { 57, 42 }, { 144, 144 },
+ { 11, 11 }, { 42, 27 }, { 132, 117 }, { 146, 131 },
+ { 103, 88 }, { 88, 73 }, { 118, 103 }, { 160, 145 },
+ { 73, 58 }, { 147, 132 }, { 133, 118 }, { 27, 12 },
+ { 161, 146 }, { 58, 43 }, { 12, 12 }, { 160, 160 },
+ { 119, 104 }, { 148, 133 }, { 89, 74 }, { 134, 119 },
+ { 104, 89 }, { 162, 147 }, { 43, 28 }, { 74, 59 },
+ { 176, 161 }, { 163, 148 }, { 28, 13 }, { 149, 134 },
+ { 120, 105 }, { 135, 120 }, { 177, 162 }, { 164, 149 },
+ { 13, 13 }, { 105, 90 }, { 59, 44 }, { 90, 75 },
+ { 150, 135 }, { 44, 29 }, { 178, 163 }, { 176, 176 },
+ { 136, 121 }, { 165, 150 }, { 121, 106 }, { 75, 60 },
+ { 179, 164 }, { 151, 136 }, { 29, 14 }, { 60, 45 },
+ { 14, 14 }, { 106, 91 }, { 166, 151 }, { 180, 165 },
+ { 192, 177 }, { 91, 76 }, { 192, 192 }, { 45, 30 },
+ { 137, 122 }, { 122, 107 }, { 152, 137 }, { 193, 178 },
+ { 76, 61 }, { 167, 152 }, { 181, 166 }, { 30, 15 },
+ { 194, 179 }, { 208, 193 }, { 182, 167 }, { 107, 92 },
+ { 138, 123 }, { 61, 46 }, { 153, 138 }, { 46, 31 },
+ { 195, 180 }, { 92, 77 }, { 168, 153 }, { 209, 194 },
+ { 196, 181 }, { 208, 208 }, { 123, 108 }, { 183, 168 },
+ { 210, 195 }, { 77, 62 }, { 108, 93 }, { 169, 154 },
+ { 154, 139 }, { 62, 47 }, { 197, 182 }, { 211, 196 },
+ { 184, 169 }, { 224, 209 }, { 224, 224 }, { 139, 124 },
+ { 93, 78 }, { 198, 183 }, { 124, 109 }, { 78, 63 },
+ { 212, 197 }, { 225, 210 }, { 170, 155 }, { 185, 170 },
+ { 155, 140 }, { 213, 198 }, { 199, 184 }, { 109, 94 },
+ { 226, 211 }, { 140, 125 }, { 94, 79 }, { 240, 225 },
+ { 214, 199 }, { 227, 212 }, { 200, 185 }, { 125, 110 },
+ { 241, 226 }, { 186, 171 }, { 171, 156 }, { 156, 141 },
+ { 228, 213 }, { 110, 95 }, { 215, 200 }, { 242, 227 },
+ { 141, 126 }, { 201, 186 }, { 229, 214 }, { 126, 111 },
+ { 216, 201 }, { 243, 228 }, { 172, 157 }, { 187, 172 },
+ { 230, 215 }, { 157, 142 }, { 202, 187 }, { 142, 127 },
+ { 244, 229 }, { 217, 202 }, { 231, 216 }, { 188, 173 },
+ { 245, 230 }, { 158, 143 }, { 173, 158 }, { 232, 217 },
+ { 246, 231 }, { 218, 203 }, { 203, 188 }, { 174, 159 },
+ { 189, 174 }, { 247, 232 }, { 233, 218 }, { 204, 189 },
+ { 219, 204 }, { 248, 233 }, { 190, 175 }, { 234, 219 },
+ { 220, 205 }, { 249, 234 }, { 205, 190 }, { 221, 206 },
+ { 250, 235 }, { 235, 220 }, { 206, 191 }, { 236, 221 },
+ { 222, 207 }, { 251, 236 }, { 237, 222 }, { 252, 237 },
+ { 238, 223 }, { 253, 238 }, { 254, 239 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_col_scan_16x16_nb[256][2] = {
+ { 0, 0 }, { 1, 1 }, { 2, 2 }, { 0, 0 },
+ { 3, 3 }, { 16, 16 }, { 4, 4 }, { 17, 17 },
+ { 5, 5 }, { 18, 18 }, { 16, 16 }, { 19, 19 },
+ { 6, 6 }, { 32, 32 }, { 20, 20 }, { 33, 33 },
+ { 7, 7 }, { 34, 34 }, { 21, 21 }, { 32, 32 },
+ { 35, 35 }, { 8, 8 }, { 48, 48 }, { 22, 22 },
+ { 49, 49 }, { 36, 36 }, { 9, 9 }, { 37, 37 },
+ { 50, 50 }, { 23, 23 }, { 48, 48 }, { 51, 51 },
+ { 10, 10 }, { 64, 64 }, { 38, 38 }, { 24, 24 },
+ { 52, 52 }, { 65, 65 }, { 53, 53 }, { 39, 39 },
+ { 66, 66 }, { 11, 11 }, { 64, 64 }, { 25, 25 },
+ { 67, 67 }, { 54, 54 }, { 80, 80 }, { 40, 40 },
+ { 68, 68 }, { 12, 12 }, { 26, 26 }, { 81, 81 },
+ { 55, 55 }, { 69, 69 }, { 82, 82 }, { 41, 41 },
+ { 13, 13 }, { 83, 83 }, { 80, 80 }, { 70, 70 },
+ { 27, 27 }, { 56, 56 }, { 84, 84 }, { 96, 96 },
+ { 14, 14 }, { 71, 71 }, { 97, 97 }, { 42, 42 },
+ { 85, 85 }, { 57, 57 }, { 98, 98 }, { 28, 28 },
+ { 86, 86 }, { 99, 99 }, { 96, 96 }, { 72, 72 },
+ { 43, 43 }, { 100, 100 }, { 58, 58 }, { 29, 29 },
+ { 112, 112 }, { 87, 87 }, { 113, 113 }, { 73, 73 },
+ { 112, 112 }, { 101, 101 }, { 44, 44 }, { 30, 30 },
+ { 114, 114 }, { 59, 59 }, { 102, 102 }, { 88, 88 },
+ { 115, 115 }, { 74, 74 }, { 128, 128 }, { 116, 116 },
+ { 45, 45 }, { 103, 103 }, { 89, 89 }, { 60, 60 },
+ { 129, 129 }, { 117, 117 }, { 130, 130 }, { 131, 131 },
+ { 104, 104 }, { 75, 75 }, { 46, 46 }, { 118, 118 },
+ { 128, 128 }, { 90, 90 }, { 61, 61 }, { 132, 132 },
+ { 105, 105 }, { 144, 144 }, { 119, 119 }, { 145, 145 },
+ { 133, 133 }, { 76, 76 }, { 146, 146 }, { 120, 120 },
+ { 91, 91 }, { 134, 134 }, { 147, 147 }, { 62, 62 },
+ { 106, 106 }, { 135, 135 }, { 121, 121 }, { 92, 92 },
+ { 148, 148 }, { 144, 144 }, { 77, 77 }, { 149, 149 },
+ { 136, 136 }, { 107, 107 }, { 160, 160 }, { 161, 161 },
+ { 150, 150 }, { 122, 122 }, { 78, 78 }, { 137, 137 },
+ { 162, 162 }, { 151, 151 }, { 93, 93 }, { 163, 163 },
+ { 108, 108 }, { 164, 164 }, { 152, 152 }, { 123, 123 },
+ { 138, 138 }, { 160, 160 }, { 165, 165 }, { 94, 94 },
+ { 176, 176 }, { 166, 166 }, { 109, 109 }, { 153, 153 },
+ { 177, 177 }, { 124, 124 }, { 178, 178 }, { 139, 139 },
+ { 167, 167 }, { 154, 154 }, { 110, 110 }, { 179, 179 },
+ { 176, 176 }, { 180, 180 }, { 168, 168 }, { 140, 140 },
+ { 125, 125 }, { 181, 181 }, { 192, 192 }, { 193, 193 },
+ { 155, 155 }, { 182, 182 }, { 169, 169 }, { 194, 194 },
+ { 126, 126 }, { 141, 141 }, { 195, 195 }, { 183, 183 },
+ { 192, 192 }, { 196, 196 }, { 156, 156 }, { 170, 170 },
+ { 142, 142 }, { 184, 184 }, { 197, 197 }, { 208, 208 },
+ { 198, 198 }, { 209, 209 }, { 171, 171 }, { 157, 157 },
+ { 185, 185 }, { 210, 210 }, { 208, 208 }, { 211, 211 },
+ { 199, 199 }, { 224, 224 }, { 158, 158 }, { 212, 212 },
+ { 224, 224 }, { 186, 186 }, { 200, 200 }, { 172, 172 },
+ { 225, 225 }, { 213, 213 }, { 214, 214 }, { 226, 226 },
+ { 201, 201 }, { 227, 227 }, { 187, 187 }, { 240, 240 },
+ { 215, 215 }, { 173, 173 }, { 228, 228 }, { 241, 241 },
+ { 202, 202 }, { 242, 242 }, { 216, 216 }, { 229, 229 },
+ { 174, 174 }, { 188, 188 }, { 243, 243 }, { 230, 230 },
+ { 203, 203 }, { 217, 217 }, { 231, 231 }, { 244, 244 },
+ { 218, 218 }, { 245, 245 }, { 189, 189 }, { 232, 232 },
+ { 204, 204 }, { 190, 190 }, { 246, 246 }, { 233, 233 },
+ { 247, 247 }, { 219, 219 }, { 205, 205 }, { 248, 248 },
+ { 234, 234 }, { 220, 220 }, { 206, 206 }, { 249, 249 },
+ { 235, 235 }, { 221, 221 }, { 250, 250 }, { 222, 222 },
+ { 236, 236 }, { 237, 237 }, { 251, 251 }, { 238, 238 },
+ { 252, 252 }, { 253, 253 }, { 254, 254 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_row_scan_16x16_nb[256][2] = {
+ { 0, 0 }, { 16, 16 }, { 0, 0 }, { 32, 32 },
+ { 1, 1 }, { 48, 48 }, { 17, 17 }, { 1, 1 },
+ { 64, 64 }, { 2, 2 }, { 33, 33 }, { 80, 80 },
+ { 18, 18 }, { 2, 2 }, { 49, 49 }, { 3, 3 },
+ { 96, 96 }, { 34, 34 }, { 65, 65 }, { 19, 19 },
+ { 3, 3 }, { 112, 112 }, { 50, 50 }, { 4, 4 },
+ { 81, 81 }, { 35, 35 }, { 66, 66 }, { 4, 4 },
+ { 128, 128 }, { 20, 20 }, { 51, 51 }, { 97, 97 },
+ { 82, 82 }, { 5, 5 }, { 36, 36 }, { 144, 144 },
+ { 67, 67 }, { 113, 113 }, { 21, 21 }, { 52, 52 },
+ { 5, 5 }, { 98, 98 }, { 160, 160 }, { 83, 83 },
+ { 37, 37 }, { 6, 6 }, { 68, 68 }, { 129, 129 },
+ { 22, 22 }, { 53, 53 }, { 114, 114 }, { 6, 6 },
+ { 99, 99 }, { 176, 176 }, { 84, 84 }, { 38, 38 },
+ { 7, 7 }, { 69, 69 }, { 145, 145 }, { 130, 130 },
+ { 115, 115 }, { 23, 23 }, { 54, 54 }, { 192, 192 },
+ { 100, 100 }, { 7, 7 }, { 85, 85 }, { 161, 161 },
+ { 39, 39 }, { 70, 70 }, { 8, 8 }, { 146, 146 },
+ { 131, 131 }, { 116, 116 }, { 55, 55 }, { 208, 208 },
+ { 101, 101 }, { 24, 24 }, { 86, 86 }, { 8, 8 },
+ { 132, 132 }, { 40, 40 }, { 71, 71 }, { 177, 177 },
+ { 147, 147 }, { 224, 224 }, { 117, 117 }, { 162, 162 },
+ { 9, 9 }, { 102, 102 }, { 56, 56 }, { 25, 25 },
+ { 87, 87 }, { 148, 148 }, { 9, 9 }, { 133, 133 },
+ { 72, 72 }, { 118, 118 }, { 193, 193 }, { 163, 163 },
+ { 41, 41 }, { 103, 103 }, { 178, 178 }, { 10, 10 },
+ { 57, 57 }, { 149, 149 }, { 134, 134 }, { 88, 88 },
+ { 26, 26 }, { 119, 119 }, { 10, 10 }, { 164, 164 },
+ { 104, 104 }, { 73, 73 }, { 209, 209 }, { 179, 179 },
+ { 42, 42 }, { 11, 11 }, { 194, 194 }, { 135, 135 },
+ { 165, 165 }, { 150, 150 }, { 58, 58 }, { 27, 27 },
+ { 89, 89 }, { 11, 11 }, { 120, 120 }, { 74, 74 },
+ { 43, 43 }, { 225, 225 }, { 105, 105 }, { 12, 12 },
+ { 180, 180 }, { 151, 151 }, { 195, 195 }, { 136, 136 },
+ { 28, 28 }, { 166, 166 }, { 121, 121 }, { 59, 59 },
+ { 12, 12 }, { 210, 210 }, { 90, 90 }, { 106, 106 },
+ { 44, 44 }, { 181, 181 }, { 75, 75 }, { 152, 152 },
+ { 13, 13 }, { 167, 167 }, { 137, 137 }, { 13, 13 },
+ { 60, 60 }, { 196, 196 }, { 122, 122 }, { 29, 29 },
+ { 91, 91 }, { 14, 14 }, { 182, 182 }, { 76, 76 },
+ { 211, 211 }, { 153, 153 }, { 14, 14 }, { 107, 107 },
+ { 138, 138 }, { 45, 45 }, { 226, 226 }, { 168, 168 },
+ { 197, 197 }, { 123, 123 }, { 30, 30 }, { 61, 61 },
+ { 15, 15 }, { 92, 92 }, { 154, 154 }, { 183, 183 },
+ { 169, 169 }, { 108, 108 }, { 212, 212 }, { 77, 77 },
+ { 139, 139 }, { 198, 198 }, { 46, 46 }, { 124, 124 },
+ { 227, 227 }, { 62, 62 }, { 31, 31 }, { 184, 184 },
+ { 93, 93 }, { 170, 170 }, { 155, 155 }, { 185, 185 },
+ { 78, 78 }, { 47, 47 }, { 199, 199 }, { 213, 213 },
+ { 140, 140 }, { 63, 63 }, { 109, 109 }, { 125, 125 },
+ { 94, 94 }, { 200, 200 }, { 171, 171 }, { 156, 156 },
+ { 228, 228 }, { 186, 186 }, { 214, 214 }, { 201, 201 },
+ { 79, 79 }, { 141, 141 }, { 110, 110 }, { 229, 229 },
+ { 95, 95 }, { 126, 126 }, { 215, 215 }, { 172, 172 },
+ { 111, 111 }, { 142, 142 }, { 202, 202 }, { 157, 157 },
+ { 216, 216 }, { 230, 230 }, { 217, 217 }, { 187, 187 },
+ { 127, 127 }, { 231, 231 }, { 158, 158 }, { 173, 173 },
+ { 143, 143 }, { 203, 203 }, { 188, 188 }, { 232, 232 },
+ { 218, 218 }, { 233, 233 }, { 159, 159 }, { 174, 174 },
+ { 204, 204 }, { 189, 189 }, { 234, 234 }, { 219, 219 },
+ { 175, 175 }, { 205, 205 }, { 235, 235 }, { 220, 220 },
+ { 190, 190 }, { 236, 236 }, { 206, 206 }, { 191, 191 },
+ { 221, 221 }, { 207, 207 }, { 237, 237 }, { 222, 222 },
+ { 238, 238 }, { 223, 223 }, { 239, 239 }, { 0, 0 },
+};
+
+const int16_t ff_vp9_default_scan_32x32_nb[1024][2] = {
+ { 0, 0 }, { 0, 0 }, { 1, 1 }, { 32, 1 },
+ { 32, 32 }, { 2, 2 }, { 33, 2 }, { 64, 33 },
+ { 3, 3 }, { 64, 64 }, { 34, 3 }, { 65, 34 },
+ { 4, 4 }, { 35, 4 }, { 96, 65 }, { 66, 35 },
+ { 96, 96 }, { 97, 66 }, { 67, 36 }, { 36, 5 },
+ { 5, 5 }, { 128, 97 }, { 98, 67 }, { 6, 6 },
+ { 128, 128 }, { 68, 37 }, { 37, 6 }, { 129, 98 },
+ { 99, 68 }, { 160, 129 }, { 130, 99 }, { 38, 7 },
+ { 69, 38 }, { 7, 7 }, { 100, 69 }, { 161, 130 },
+ { 131, 100 }, { 160, 160 }, { 70, 39 }, { 39, 8 },
+ { 8, 8 }, { 101, 70 }, { 162, 131 }, { 132, 101 },
+ { 192, 161 }, { 71, 40 }, { 192, 192 }, { 102, 71 },
+ { 40, 9 }, { 163, 132 }, { 9, 9 }, { 193, 162 },
+ { 133, 102 }, { 164, 133 }, { 72, 41 }, { 103, 72 },
+ { 134, 103 }, { 224, 193 }, { 41, 10 }, { 194, 163 },
+ { 10, 10 }, { 224, 224 }, { 165, 134 }, { 225, 194 },
+ { 195, 164 }, { 73, 42 }, { 104, 73 }, { 135, 104 },
+ { 42, 11 }, { 11, 11 }, { 166, 135 }, { 196, 165 },
+ { 226, 195 }, { 256, 225 }, { 74, 43 }, { 105, 74 },
+ { 136, 105 }, { 227, 196 }, { 43, 12 }, { 197, 166 },
+ { 167, 136 }, { 257, 226 }, { 256, 256 }, { 12, 12 },
+ { 228, 197 }, { 75, 44 }, { 106, 75 }, { 198, 167 },
+ { 137, 106 }, { 258, 227 }, { 168, 137 }, { 288, 257 },
+ { 44, 13 }, { 229, 198 }, { 259, 228 }, { 199, 168 },
+ { 107, 76 }, { 13, 13 }, { 169, 138 }, { 138, 107 },
+ { 288, 288 }, { 289, 258 }, { 76, 45 }, { 230, 199 },
+ { 260, 229 }, { 45, 14 }, { 200, 169 }, { 139, 108 },
+ { 290, 259 }, { 108, 77 }, { 231, 200 }, { 320, 289 },
+ { 261, 230 }, { 170, 139 }, { 77, 46 }, { 291, 260 },
+ { 14, 14 }, { 321, 290 }, { 201, 170 }, { 262, 231 },
+ { 320, 320 }, { 171, 140 }, { 292, 261 }, { 232, 201 },
+ { 140, 109 }, { 322, 291 }, { 109, 78 }, { 46, 15 },
+ { 202, 171 }, { 263, 232 }, { 233, 202 }, { 293, 262 },
+ { 352, 321 }, { 323, 292 }, { 15, 15 }, { 78, 47 },
+ { 203, 172 }, { 264, 233 }, { 294, 263 }, { 324, 293 },
+ { 172, 141 }, { 353, 322 }, { 141, 110 }, { 234, 203 },
+ { 352, 352 }, { 47, 16 }, { 295, 264 }, { 110, 79 },
+ { 265, 234 }, { 354, 323 }, { 325, 294 }, { 79, 48 },
+ { 16, 16 }, { 204, 173 }, { 235, 204 }, { 173, 142 },
+ { 355, 324 }, { 384, 353 }, { 326, 295 }, { 142, 111 },
+ { 296, 265 }, { 266, 235 }, { 356, 325 }, { 385, 354 },
+ { 111, 80 }, { 48, 17 }, { 327, 296 }, { 297, 266 },
+ { 205, 174 }, { 384, 384 }, { 236, 205 }, { 357, 326 },
+ { 386, 355 }, { 80, 49 }, { 174, 143 }, { 17, 17 },
+ { 328, 297 }, { 358, 327 }, { 387, 356 }, { 298, 267 },
+ { 329, 298 }, { 388, 357 }, { 112, 81 }, { 416, 385 },
+ { 237, 206 }, { 359, 328 }, { 49, 18 }, { 206, 175 },
+ { 417, 386 }, { 389, 358 }, { 330, 299 }, { 18, 18 },
+ { 416, 416 }, { 360, 329 }, { 81, 50 }, { 418, 387 },
+ { 390, 359 }, { 238, 207 }, { 50, 19 }, { 361, 330 },
+ { 419, 388 }, { 113, 82 }, { 448, 417 }, { 448, 448 },
+ { 420, 389 }, { 82, 51 }, { 362, 331 }, { 449, 418 },
+ { 421, 390 }, { 480, 480 }, { 450, 419 }, { 422, 391 },
+ { 114, 83 }, { 451, 420 }, { 480, 449 }, { 452, 421 },
+ { 481, 450 }, { 453, 422 }, { 512, 512 }, { 482, 451 },
+ { 454, 423 }, { 512, 481 }, { 483, 452 }, { 513, 482 },
+ { 484, 453 }, { 514, 483 }, { 485, 454 }, { 544, 513 },
+ { 544, 544 }, { 486, 455 }, { 545, 514 }, { 546, 515 },
+ { 576, 576 }, { 576, 545 }, { 577, 546 }, { 578, 547 },
+ { 608, 577 }, { 609, 578 }, { 610, 579 }, { 19, 19 },
+ { 143, 112 }, { 267, 236 }, { 391, 360 }, { 515, 484 },
+ { 608, 608 }, { 20, 20 }, { 51, 20 }, { 144, 113 },
+ { 175, 144 }, { 268, 237 }, { 299, 268 }, { 392, 361 },
+ { 423, 392 }, { 516, 485 }, { 547, 516 }, { 640, 609 },
+ { 640, 640 }, { 21, 21 }, { 52, 21 }, { 83, 52 },
+ { 145, 114 }, { 176, 145 }, { 207, 176 }, { 269, 238 },
+ { 300, 269 }, { 331, 300 }, { 393, 362 }, { 424, 393 },
+ { 455, 424 }, { 517, 486 }, { 548, 517 }, { 579, 548 },
+ { 641, 610 }, { 672, 641 }, { 672, 672 }, { 22, 22 },
+ { 53, 22 }, { 84, 53 }, { 115, 84 }, { 146, 115 },
+ { 177, 146 }, { 208, 177 }, { 239, 208 }, { 270, 239 },
+ { 301, 270 }, { 332, 301 }, { 363, 332 }, { 394, 363 },
+ { 425, 394 }, { 456, 425 }, { 487, 456 }, { 518, 487 },
+ { 549, 518 }, { 580, 549 }, { 611, 580 }, { 642, 611 },
+ { 673, 642 }, { 704, 673 }, { 704, 704 }, { 54, 23 },
+ { 85, 54 }, { 116, 85 }, { 178, 147 }, { 209, 178 },
+ { 240, 209 }, { 302, 271 }, { 333, 302 }, { 364, 333 },
+ { 426, 395 }, { 457, 426 }, { 488, 457 }, { 550, 519 },
+ { 581, 550 }, { 612, 581 }, { 674, 643 }, { 705, 674 },
+ { 736, 705 }, { 86, 55 }, { 117, 86 }, { 210, 179 },
+ { 241, 210 }, { 334, 303 }, { 365, 334 }, { 458, 427 },
+ { 489, 458 }, { 582, 551 }, { 613, 582 }, { 706, 675 },
+ { 737, 706 }, { 118, 87 }, { 242, 211 }, { 366, 335 },
+ { 490, 459 }, { 614, 583 }, { 738, 707 }, { 23, 23 },
+ { 147, 116 }, { 271, 240 }, { 395, 364 }, { 519, 488 },
+ { 643, 612 }, { 736, 736 }, { 24, 24 }, { 55, 24 },
+ { 148, 117 }, { 179, 148 }, { 272, 241 }, { 303, 272 },
+ { 396, 365 }, { 427, 396 }, { 520, 489 }, { 551, 520 },
+ { 644, 613 }, { 675, 644 }, { 768, 737 }, { 768, 768 },
+ { 25, 25 }, { 56, 25 }, { 87, 56 }, { 149, 118 },
+ { 180, 149 }, { 211, 180 }, { 273, 242 }, { 304, 273 },
+ { 335, 304 }, { 397, 366 }, { 428, 397 }, { 459, 428 },
+ { 521, 490 }, { 552, 521 }, { 583, 552 }, { 645, 614 },
+ { 676, 645 }, { 707, 676 }, { 769, 738 }, { 800, 769 },
+ { 800, 800 }, { 26, 26 }, { 57, 26 }, { 88, 57 },
+ { 119, 88 }, { 150, 119 }, { 181, 150 }, { 212, 181 },
+ { 243, 212 }, { 274, 243 }, { 305, 274 }, { 336, 305 },
+ { 367, 336 }, { 398, 367 }, { 429, 398 }, { 460, 429 },
+ { 491, 460 }, { 522, 491 }, { 553, 522 }, { 584, 553 },
+ { 615, 584 }, { 646, 615 }, { 677, 646 }, { 708, 677 },
+ { 739, 708 }, { 770, 739 }, { 801, 770 }, { 832, 801 },
+ { 832, 832 }, { 58, 27 }, { 89, 58 }, { 120, 89 },
+ { 182, 151 }, { 213, 182 }, { 244, 213 }, { 306, 275 },
+ { 337, 306 }, { 368, 337 }, { 430, 399 }, { 461, 430 },
+ { 492, 461 }, { 554, 523 }, { 585, 554 }, { 616, 585 },
+ { 678, 647 }, { 709, 678 }, { 740, 709 }, { 802, 771 },
+ { 833, 802 }, { 864, 833 }, { 90, 59 }, { 121, 90 },
+ { 214, 183 }, { 245, 214 }, { 338, 307 }, { 369, 338 },
+ { 462, 431 }, { 493, 462 }, { 586, 555 }, { 617, 586 },
+ { 710, 679 }, { 741, 710 }, { 834, 803 }, { 865, 834 },
+ { 122, 91 }, { 246, 215 }, { 370, 339 }, { 494, 463 },
+ { 618, 587 }, { 742, 711 }, { 866, 835 }, { 27, 27 },
+ { 151, 120 }, { 275, 244 }, { 399, 368 }, { 523, 492 },
+ { 647, 616 }, { 771, 740 }, { 864, 864 }, { 28, 28 },
+ { 59, 28 }, { 152, 121 }, { 183, 152 }, { 276, 245 },
+ { 307, 276 }, { 400, 369 }, { 431, 400 }, { 524, 493 },
+ { 555, 524 }, { 648, 617 }, { 679, 648 }, { 772, 741 },
+ { 803, 772 }, { 896, 865 }, { 896, 896 }, { 29, 29 },
+ { 60, 29 }, { 91, 60 }, { 153, 122 }, { 184, 153 },
+ { 215, 184 }, { 277, 246 }, { 308, 277 }, { 339, 308 },
+ { 401, 370 }, { 432, 401 }, { 463, 432 }, { 525, 494 },
+ { 556, 525 }, { 587, 556 }, { 649, 618 }, { 680, 649 },
+ { 711, 680 }, { 773, 742 }, { 804, 773 }, { 835, 804 },
+ { 897, 866 }, { 928, 897 }, { 928, 928 }, { 30, 30 },
+ { 61, 30 }, { 92, 61 }, { 123, 92 }, { 154, 123 },
+ { 185, 154 }, { 216, 185 }, { 247, 216 }, { 278, 247 },
+ { 309, 278 }, { 340, 309 }, { 371, 340 }, { 402, 371 },
+ { 433, 402 }, { 464, 433 }, { 495, 464 }, { 526, 495 },
+ { 557, 526 }, { 588, 557 }, { 619, 588 }, { 650, 619 },
+ { 681, 650 }, { 712, 681 }, { 743, 712 }, { 774, 743 },
+ { 805, 774 }, { 836, 805 }, { 867, 836 }, { 898, 867 },
+ { 929, 898 }, { 960, 929 }, { 960, 960 }, { 62, 31 },
+ { 93, 62 }, { 124, 93 }, { 186, 155 }, { 217, 186 },
+ { 248, 217 }, { 310, 279 }, { 341, 310 }, { 372, 341 },
+ { 434, 403 }, { 465, 434 }, { 496, 465 }, { 558, 527 },
+ { 589, 558 }, { 620, 589 }, { 682, 651 }, { 713, 682 },
+ { 744, 713 }, { 806, 775 }, { 837, 806 }, { 868, 837 },
+ { 930, 899 }, { 961, 930 }, { 992, 961 }, { 94, 63 },
+ { 125, 94 }, { 218, 187 }, { 249, 218 }, { 342, 311 },
+ { 373, 342 }, { 466, 435 }, { 497, 466 }, { 590, 559 },
+ { 621, 590 }, { 714, 683 }, { 745, 714 }, { 838, 807 },
+ { 869, 838 }, { 962, 931 }, { 993, 962 }, { 126, 95 },
+ { 250, 219 }, { 374, 343 }, { 498, 467 }, { 622, 591 },
+ { 746, 715 }, { 870, 839 }, { 994, 963 }, { 155, 124 },
+ { 279, 248 }, { 403, 372 }, { 527, 496 }, { 651, 620 },
+ { 775, 744 }, { 899, 868 }, { 156, 125 }, { 187, 156 },
+ { 280, 249 }, { 311, 280 }, { 404, 373 }, { 435, 404 },
+ { 528, 497 }, { 559, 528 }, { 652, 621 }, { 683, 652 },
+ { 776, 745 }, { 807, 776 }, { 900, 869 }, { 931, 900 },
+ { 157, 126 }, { 188, 157 }, { 219, 188 }, { 281, 250 },
+ { 312, 281 }, { 343, 312 }, { 405, 374 }, { 436, 405 },
+ { 467, 436 }, { 529, 498 }, { 560, 529 }, { 591, 560 },
+ { 653, 622 }, { 684, 653 }, { 715, 684 }, { 777, 746 },
+ { 808, 777 }, { 839, 808 }, { 901, 870 }, { 932, 901 },
+ { 963, 932 }, { 158, 127 }, { 189, 158 }, { 220, 189 },
+ { 251, 220 }, { 282, 251 }, { 313, 282 }, { 344, 313 },
+ { 375, 344 }, { 406, 375 }, { 437, 406 }, { 468, 437 },
+ { 499, 468 }, { 530, 499 }, { 561, 530 }, { 592, 561 },
+ { 623, 592 }, { 654, 623 }, { 685, 654 }, { 716, 685 },
+ { 747, 716 }, { 778, 747 }, { 809, 778 }, { 840, 809 },
+ { 871, 840 }, { 902, 871 }, { 933, 902 }, { 964, 933 },
+ { 995, 964 }, { 190, 159 }, { 221, 190 }, { 252, 221 },
+ { 314, 283 }, { 345, 314 }, { 376, 345 }, { 438, 407 },
+ { 469, 438 }, { 500, 469 }, { 562, 531 }, { 593, 562 },
+ { 624, 593 }, { 686, 655 }, { 717, 686 }, { 748, 717 },
+ { 810, 779 }, { 841, 810 }, { 872, 841 }, { 934, 903 },
+ { 965, 934 }, { 996, 965 }, { 222, 191 }, { 253, 222 },
+ { 346, 315 }, { 377, 346 }, { 470, 439 }, { 501, 470 },
+ { 594, 563 }, { 625, 594 }, { 718, 687 }, { 749, 718 },
+ { 842, 811 }, { 873, 842 }, { 966, 935 }, { 997, 966 },
+ { 254, 223 }, { 378, 347 }, { 502, 471 }, { 626, 595 },
+ { 750, 719 }, { 874, 843 }, { 998, 967 }, { 283, 252 },
+ { 407, 376 }, { 531, 500 }, { 655, 624 }, { 779, 748 },
+ { 903, 872 }, { 284, 253 }, { 315, 284 }, { 408, 377 },
+ { 439, 408 }, { 532, 501 }, { 563, 532 }, { 656, 625 },
+ { 687, 656 }, { 780, 749 }, { 811, 780 }, { 904, 873 },
+ { 935, 904 }, { 285, 254 }, { 316, 285 }, { 347, 316 },
+ { 409, 378 }, { 440, 409 }, { 471, 440 }, { 533, 502 },
+ { 564, 533 }, { 595, 564 }, { 657, 626 }, { 688, 657 },
+ { 719, 688 }, { 781, 750 }, { 812, 781 }, { 843, 812 },
+ { 905, 874 }, { 936, 905 }, { 967, 936 }, { 286, 255 },
+ { 317, 286 }, { 348, 317 }, { 379, 348 }, { 410, 379 },
+ { 441, 410 }, { 472, 441 }, { 503, 472 }, { 534, 503 },
+ { 565, 534 }, { 596, 565 }, { 627, 596 }, { 658, 627 },
+ { 689, 658 }, { 720, 689 }, { 751, 720 }, { 782, 751 },
+ { 813, 782 }, { 844, 813 }, { 875, 844 }, { 906, 875 },
+ { 937, 906 }, { 968, 937 }, { 999, 968 }, { 318, 287 },
+ { 349, 318 }, { 380, 349 }, { 442, 411 }, { 473, 442 },
+ { 504, 473 }, { 566, 535 }, { 597, 566 }, { 628, 597 },
+ { 690, 659 }, { 721, 690 }, { 752, 721 }, { 814, 783 },
+ { 845, 814 }, { 876, 845 }, { 938, 907 }, { 969, 938 },
+ { 1000, 969 }, { 350, 319 }, { 381, 350 }, { 474, 443 },
+ { 505, 474 }, { 598, 567 }, { 629, 598 }, { 722, 691 },
+ { 753, 722 }, { 846, 815 }, { 877, 846 }, { 970, 939 },
+ { 1001, 970 }, { 382, 351 }, { 506, 475 }, { 630, 599 },
+ { 754, 723 }, { 878, 847 }, { 1002, 971 }, { 411, 380 },
+ { 535, 504 }, { 659, 628 }, { 783, 752 }, { 907, 876 },
+ { 412, 381 }, { 443, 412 }, { 536, 505 }, { 567, 536 },
+ { 660, 629 }, { 691, 660 }, { 784, 753 }, { 815, 784 },
+ { 908, 877 }, { 939, 908 }, { 413, 382 }, { 444, 413 },
+ { 475, 444 }, { 537, 506 }, { 568, 537 }, { 599, 568 },
+ { 661, 630 }, { 692, 661 }, { 723, 692 }, { 785, 754 },
+ { 816, 785 }, { 847, 816 }, { 909, 878 }, { 940, 909 },
+ { 971, 940 }, { 414, 383 }, { 445, 414 }, { 476, 445 },
+ { 507, 476 }, { 538, 507 }, { 569, 538 }, { 600, 569 },
+ { 631, 600 }, { 662, 631 }, { 693, 662 }, { 724, 693 },
+ { 755, 724 }, { 786, 755 }, { 817, 786 }, { 848, 817 },
+ { 879, 848 }, { 910, 879 }, { 941, 910 }, { 972, 941 },
+ { 1003, 972 }, { 446, 415 }, { 477, 446 }, { 508, 477 },
+ { 570, 539 }, { 601, 570 }, { 632, 601 }, { 694, 663 },
+ { 725, 694 }, { 756, 725 }, { 818, 787 }, { 849, 818 },
+ { 880, 849 }, { 942, 911 }, { 973, 942 }, { 1004, 973 },
+ { 478, 447 }, { 509, 478 }, { 602, 571 }, { 633, 602 },
+ { 726, 695 }, { 757, 726 }, { 850, 819 }, { 881, 850 },
+ { 974, 943 }, { 1005, 974 }, { 510, 479 }, { 634, 603 },
+ { 758, 727 }, { 882, 851 }, { 1006, 975 }, { 539, 508 },
+ { 663, 632 }, { 787, 756 }, { 911, 880 }, { 540, 509 },
+ { 571, 540 }, { 664, 633 }, { 695, 664 }, { 788, 757 },
+ { 819, 788 }, { 912, 881 }, { 943, 912 }, { 541, 510 },
+ { 572, 541 }, { 603, 572 }, { 665, 634 }, { 696, 665 },
+ { 727, 696 }, { 789, 758 }, { 820, 789 }, { 851, 820 },
+ { 913, 882 }, { 944, 913 }, { 975, 944 }, { 542, 511 },
+ { 573, 542 }, { 604, 573 }, { 635, 604 }, { 666, 635 },
+ { 697, 666 }, { 728, 697 }, { 759, 728 }, { 790, 759 },
+ { 821, 790 }, { 852, 821 }, { 883, 852 }, { 914, 883 },
+ { 945, 914 }, { 976, 945 }, { 1007, 976 }, { 574, 543 },
+ { 605, 574 }, { 636, 605 }, { 698, 667 }, { 729, 698 },
+ { 760, 729 }, { 822, 791 }, { 853, 822 }, { 884, 853 },
+ { 946, 915 }, { 977, 946 }, { 1008, 977 }, { 606, 575 },
+ { 637, 606 }, { 730, 699 }, { 761, 730 }, { 854, 823 },
+ { 885, 854 }, { 978, 947 }, { 1009, 978 }, { 638, 607 },
+ { 762, 731 }, { 886, 855 }, { 1010, 979 }, { 667, 636 },
+ { 791, 760 }, { 915, 884 }, { 668, 637 }, { 699, 668 },
+ { 792, 761 }, { 823, 792 }, { 916, 885 }, { 947, 916 },
+ { 669, 638 }, { 700, 669 }, { 731, 700 }, { 793, 762 },
+ { 824, 793 }, { 855, 824 }, { 917, 886 }, { 948, 917 },
+ { 979, 948 }, { 670, 639 }, { 701, 670 }, { 732, 701 },
+ { 763, 732 }, { 794, 763 }, { 825, 794 }, { 856, 825 },
+ { 887, 856 }, { 918, 887 }, { 949, 918 }, { 980, 949 },
+ { 1011, 980 }, { 702, 671 }, { 733, 702 }, { 764, 733 },
+ { 826, 795 }, { 857, 826 }, { 888, 857 }, { 950, 919 },
+ { 981, 950 }, { 1012, 981 }, { 734, 703 }, { 765, 734 },
+ { 858, 827 }, { 889, 858 }, { 982, 951 }, { 1013, 982 },
+ { 766, 735 }, { 890, 859 }, { 1014, 983 }, { 795, 764 },
+ { 919, 888 }, { 796, 765 }, { 827, 796 }, { 920, 889 },
+ { 951, 920 }, { 797, 766 }, { 828, 797 }, { 859, 828 },
+ { 921, 890 }, { 952, 921 }, { 983, 952 }, { 798, 767 },
+ { 829, 798 }, { 860, 829 }, { 891, 860 }, { 922, 891 },
+ { 953, 922 }, { 984, 953 }, { 1015, 984 }, { 830, 799 },
+ { 861, 830 }, { 892, 861 }, { 954, 923 }, { 985, 954 },
+ { 1016, 985 }, { 862, 831 }, { 893, 862 }, { 986, 955 },
+ { 1017, 986 }, { 894, 863 }, { 1018, 987 }, { 923, 892 },
+ { 924, 893 }, { 955, 924 }, { 925, 894 }, { 956, 925 },
+ { 987, 956 }, { 926, 895 }, { 957, 926 }, { 988, 957 },
+ { 1019, 988 }, { 958, 927 }, { 989, 958 }, { 1020, 989 },
+ { 990, 959 }, { 1021, 990 }, { 1022, 991 }, { 0, 0 },
+};
+
+const int16_t (* const ff_vp9_scans_nb[5][4])[2] = {
+ {
+ ff_vp9_default_scan_4x4_nb, ff_vp9_col_scan_4x4_nb,
+ ff_vp9_row_scan_4x4_nb, ff_vp9_default_scan_4x4_nb
+ }, {
+ ff_vp9_default_scan_8x8_nb, ff_vp9_col_scan_8x8_nb,
+ ff_vp9_row_scan_8x8_nb, ff_vp9_default_scan_8x8_nb
+ }, {
+ ff_vp9_default_scan_16x16_nb, ff_vp9_col_scan_16x16_nb,
+ ff_vp9_row_scan_16x16_nb, ff_vp9_default_scan_16x16_nb
+ }, {
+ ff_vp9_default_scan_32x32_nb, ff_vp9_default_scan_32x32_nb,
+ ff_vp9_default_scan_32x32_nb, ff_vp9_default_scan_32x32_nb
+ }, { // lossless
+ ff_vp9_default_scan_4x4_nb, ff_vp9_default_scan_4x4_nb,
+ ff_vp9_default_scan_4x4_nb, ff_vp9_default_scan_4x4_nb
+ }
+};
+
+const uint8_t ff_vp9_model_pareto8[256][8] = {
+ { 6, 86, 128, 11, 87, 42, 91, 52 },
+ { 3, 86, 128, 6, 86, 23, 88, 29 },
+ { 6, 86, 128, 11, 87, 42, 91, 52 },
+ { 9, 86, 129, 17, 88, 61, 94, 76 },
+ { 12, 86, 129, 22, 88, 77, 97, 93 },
+ { 15, 87, 129, 28, 89, 93, 100, 110 },
+ { 17, 87, 129, 33, 90, 105, 103, 123 },
+ { 20, 88, 130, 38, 91, 118, 106, 136 },
+ { 23, 88, 130, 43, 91, 128, 108, 146 },
+ { 26, 89, 131, 48, 92, 139, 111, 156 },
+ { 28, 89, 131, 53, 93, 147, 114, 163 },
+ { 31, 90, 131, 58, 94, 156, 117, 171 },
+ { 34, 90, 131, 62, 94, 163, 119, 177 },
+ { 37, 90, 132, 66, 95, 171, 122, 184 },
+ { 39, 90, 132, 70, 96, 177, 124, 189 },
+ { 42, 91, 132, 75, 97, 183, 127, 194 },
+ { 44, 91, 132, 79, 97, 188, 129, 198 },
+ { 47, 92, 133, 83, 98, 193, 132, 202 },
+ { 49, 92, 133, 86, 99, 197, 134, 205 },
+ { 52, 93, 133, 90, 100, 201, 137, 208 },
+ { 54, 93, 133, 94, 100, 204, 139, 211 },
+ { 57, 94, 134, 98, 101, 208, 142, 214 },
+ { 59, 94, 134, 101, 102, 211, 144, 216 },
+ { 62, 94, 135, 105, 103, 214, 146, 218 },
+ { 64, 94, 135, 108, 103, 216, 148, 220 },
+ { 66, 95, 135, 111, 104, 219, 151, 222 },
+ { 68, 95, 135, 114, 105, 221, 153, 223 },
+ { 71, 96, 136, 117, 106, 224, 155, 225 },
+ { 73, 96, 136, 120, 106, 225, 157, 226 },
+ { 76, 97, 136, 123, 107, 227, 159, 228 },
+ { 78, 97, 136, 126, 108, 229, 160, 229 },
+ { 80, 98, 137, 129, 109, 231, 162, 231 },
+ { 82, 98, 137, 131, 109, 232, 164, 232 },
+ { 84, 98, 138, 134, 110, 234, 166, 233 },
+ { 86, 98, 138, 137, 111, 235, 168, 234 },
+ { 89, 99, 138, 140, 112, 236, 170, 235 },
+ { 91, 99, 138, 142, 112, 237, 171, 235 },
+ { 93, 100, 139, 145, 113, 238, 173, 236 },
+ { 95, 100, 139, 147, 114, 239, 174, 237 },
+ { 97, 101, 140, 149, 115, 240, 176, 238 },
+ { 99, 101, 140, 151, 115, 241, 177, 238 },
+ { 101, 102, 140, 154, 116, 242, 179, 239 },
+ { 103, 102, 140, 156, 117, 242, 180, 239 },
+ { 105, 103, 141, 158, 118, 243, 182, 240 },
+ { 107, 103, 141, 160, 118, 243, 183, 240 },
+ { 109, 104, 141, 162, 119, 244, 185, 241 },
+ { 111, 104, 141, 164, 119, 244, 186, 241 },
+ { 113, 104, 142, 166, 120, 245, 187, 242 },
+ { 114, 104, 142, 168, 121, 245, 188, 242 },
+ { 116, 105, 143, 170, 122, 246, 190, 243 },
+ { 118, 105, 143, 171, 122, 246, 191, 243 },
+ { 120, 106, 143, 173, 123, 247, 192, 244 },
+ { 121, 106, 143, 175, 124, 247, 193, 244 },
+ { 123, 107, 144, 177, 125, 248, 195, 244 },
+ { 125, 107, 144, 178, 125, 248, 196, 244 },
+ { 127, 108, 145, 180, 126, 249, 197, 245 },
+ { 128, 108, 145, 181, 127, 249, 198, 245 },
+ { 130, 109, 145, 183, 128, 249, 199, 245 },
+ { 132, 109, 145, 184, 128, 249, 200, 245 },
+ { 134, 110, 146, 186, 129, 250, 201, 246 },
+ { 135, 110, 146, 187, 130, 250, 202, 246 },
+ { 137, 111, 147, 189, 131, 251, 203, 246 },
+ { 138, 111, 147, 190, 131, 251, 204, 246 },
+ { 140, 112, 147, 192, 132, 251, 205, 247 },
+ { 141, 112, 147, 193, 132, 251, 206, 247 },
+ { 143, 113, 148, 194, 133, 251, 207, 247 },
+ { 144, 113, 148, 195, 134, 251, 207, 247 },
+ { 146, 114, 149, 197, 135, 252, 208, 248 },
+ { 147, 114, 149, 198, 135, 252, 209, 248 },
+ { 149, 115, 149, 199, 136, 252, 210, 248 },
+ { 150, 115, 149, 200, 137, 252, 210, 248 },
+ { 152, 115, 150, 201, 138, 252, 211, 248 },
+ { 153, 115, 150, 202, 138, 252, 212, 248 },
+ { 155, 116, 151, 204, 139, 253, 213, 249 },
+ { 156, 116, 151, 205, 139, 253, 213, 249 },
+ { 158, 117, 151, 206, 140, 253, 214, 249 },
+ { 159, 117, 151, 207, 141, 253, 215, 249 },
+ { 161, 118, 152, 208, 142, 253, 216, 249 },
+ { 162, 118, 152, 209, 142, 253, 216, 249 },
+ { 163, 119, 153, 210, 143, 253, 217, 249 },
+ { 164, 119, 153, 211, 143, 253, 217, 249 },
+ { 166, 120, 153, 212, 144, 254, 218, 250 },
+ { 167, 120, 153, 212, 145, 254, 219, 250 },
+ { 168, 121, 154, 213, 146, 254, 220, 250 },
+ { 169, 121, 154, 214, 146, 254, 220, 250 },
+ { 171, 122, 155, 215, 147, 254, 221, 250 },
+ { 172, 122, 155, 216, 147, 254, 221, 250 },
+ { 173, 123, 155, 217, 148, 254, 222, 250 },
+ { 174, 123, 155, 217, 149, 254, 222, 250 },
+ { 176, 124, 156, 218, 150, 254, 223, 250 },
+ { 177, 124, 156, 219, 150, 254, 223, 250 },
+ { 178, 125, 157, 220, 151, 254, 224, 251 },
+ { 179, 125, 157, 220, 151, 254, 224, 251 },
+ { 180, 126, 157, 221, 152, 254, 225, 251 },
+ { 181, 126, 157, 221, 152, 254, 225, 251 },
+ { 183, 127, 158, 222, 153, 254, 226, 251 },
+ { 184, 127, 158, 223, 154, 254, 226, 251 },
+ { 185, 128, 159, 224, 155, 255, 227, 251 },
+ { 186, 128, 159, 224, 155, 255, 227, 251 },
+ { 187, 129, 160, 225, 156, 255, 228, 251 },
+ { 188, 130, 160, 225, 156, 255, 228, 251 },
+ { 189, 131, 160, 226, 157, 255, 228, 251 },
+ { 190, 131, 160, 226, 158, 255, 228, 251 },
+ { 191, 132, 161, 227, 159, 255, 229, 251 },
+ { 192, 132, 161, 227, 159, 255, 229, 251 },
+ { 193, 133, 162, 228, 160, 255, 230, 252 },
+ { 194, 133, 162, 229, 160, 255, 230, 252 },
+ { 195, 134, 163, 230, 161, 255, 231, 252 },
+ { 196, 134, 163, 230, 161, 255, 231, 252 },
+ { 197, 135, 163, 231, 162, 255, 231, 252 },
+ { 198, 135, 163, 231, 162, 255, 231, 252 },
+ { 199, 136, 164, 232, 163, 255, 232, 252 },
+ { 200, 136, 164, 232, 164, 255, 232, 252 },
+ { 201, 137, 165, 233, 165, 255, 233, 252 },
+ { 201, 137, 165, 233, 165, 255, 233, 252 },
+ { 202, 138, 166, 233, 166, 255, 233, 252 },
+ { 203, 138, 166, 233, 166, 255, 233, 252 },
+ { 204, 139, 166, 234, 167, 255, 234, 252 },
+ { 205, 139, 166, 234, 167, 255, 234, 252 },
+ { 206, 140, 167, 235, 168, 255, 235, 252 },
+ { 206, 140, 167, 235, 168, 255, 235, 252 },
+ { 207, 141, 168, 236, 169, 255, 235, 252 },
+ { 208, 141, 168, 236, 170, 255, 235, 252 },
+ { 209, 142, 169, 237, 171, 255, 236, 252 },
+ { 209, 143, 169, 237, 171, 255, 236, 252 },
+ { 210, 144, 169, 237, 172, 255, 236, 252 },
+ { 211, 144, 169, 237, 172, 255, 236, 252 },
+ { 212, 145, 170, 238, 173, 255, 237, 252 },
+ { 213, 145, 170, 238, 173, 255, 237, 252 },
+ { 214, 146, 171, 239, 174, 255, 237, 253 },
+ { 214, 146, 171, 239, 174, 255, 237, 253 },
+ { 215, 147, 172, 240, 175, 255, 238, 253 },
+ { 215, 147, 172, 240, 175, 255, 238, 253 },
+ { 216, 148, 173, 240, 176, 255, 238, 253 },
+ { 217, 148, 173, 240, 176, 255, 238, 253 },
+ { 218, 149, 173, 241, 177, 255, 239, 253 },
+ { 218, 149, 173, 241, 178, 255, 239, 253 },
+ { 219, 150, 174, 241, 179, 255, 239, 253 },
+ { 219, 151, 174, 241, 179, 255, 239, 253 },
+ { 220, 152, 175, 242, 180, 255, 240, 253 },
+ { 221, 152, 175, 242, 180, 255, 240, 253 },
+ { 222, 153, 176, 242, 181, 255, 240, 253 },
+ { 222, 153, 176, 242, 181, 255, 240, 253 },
+ { 223, 154, 177, 243, 182, 255, 240, 253 },
+ { 223, 154, 177, 243, 182, 255, 240, 253 },
+ { 224, 155, 178, 244, 183, 255, 241, 253 },
+ { 224, 155, 178, 244, 183, 255, 241, 253 },
+ { 225, 156, 178, 244, 184, 255, 241, 253 },
+ { 225, 157, 178, 244, 184, 255, 241, 253 },
+ { 226, 158, 179, 244, 185, 255, 242, 253 },
+ { 227, 158, 179, 244, 185, 255, 242, 253 },
+ { 228, 159, 180, 245, 186, 255, 242, 253 },
+ { 228, 159, 180, 245, 186, 255, 242, 253 },
+ { 229, 160, 181, 245, 187, 255, 242, 253 },
+ { 229, 160, 181, 245, 187, 255, 242, 253 },
+ { 230, 161, 182, 246, 188, 255, 243, 253 },
+ { 230, 162, 182, 246, 188, 255, 243, 253 },
+ { 231, 163, 183, 246, 189, 255, 243, 253 },
+ { 231, 163, 183, 246, 189, 255, 243, 253 },
+ { 232, 164, 184, 247, 190, 255, 243, 253 },
+ { 232, 164, 184, 247, 190, 255, 243, 253 },
+ { 233, 165, 185, 247, 191, 255, 244, 253 },
+ { 233, 165, 185, 247, 191, 255, 244, 253 },
+ { 234, 166, 185, 247, 192, 255, 244, 253 },
+ { 234, 167, 185, 247, 192, 255, 244, 253 },
+ { 235, 168, 186, 248, 193, 255, 244, 253 },
+ { 235, 168, 186, 248, 193, 255, 244, 253 },
+ { 236, 169, 187, 248, 194, 255, 244, 253 },
+ { 236, 169, 187, 248, 194, 255, 244, 253 },
+ { 236, 170, 188, 248, 195, 255, 245, 253 },
+ { 236, 170, 188, 248, 195, 255, 245, 253 },
+ { 237, 171, 189, 249, 196, 255, 245, 254 },
+ { 237, 172, 189, 249, 196, 255, 245, 254 },
+ { 238, 173, 190, 249, 197, 255, 245, 254 },
+ { 238, 173, 190, 249, 197, 255, 245, 254 },
+ { 239, 174, 191, 249, 198, 255, 245, 254 },
+ { 239, 174, 191, 249, 198, 255, 245, 254 },
+ { 240, 175, 192, 249, 199, 255, 246, 254 },
+ { 240, 176, 192, 249, 199, 255, 246, 254 },
+ { 240, 177, 193, 250, 200, 255, 246, 254 },
+ { 240, 177, 193, 250, 200, 255, 246, 254 },
+ { 241, 178, 194, 250, 201, 255, 246, 254 },
+ { 241, 178, 194, 250, 201, 255, 246, 254 },
+ { 242, 179, 195, 250, 202, 255, 246, 254 },
+ { 242, 180, 195, 250, 202, 255, 246, 254 },
+ { 242, 181, 196, 250, 203, 255, 247, 254 },
+ { 242, 181, 196, 250, 203, 255, 247, 254 },
+ { 243, 182, 197, 251, 204, 255, 247, 254 },
+ { 243, 183, 197, 251, 204, 255, 247, 254 },
+ { 244, 184, 198, 251, 205, 255, 247, 254 },
+ { 244, 184, 198, 251, 205, 255, 247, 254 },
+ { 244, 185, 199, 251, 206, 255, 247, 254 },
+ { 244, 185, 199, 251, 206, 255, 247, 254 },
+ { 245, 186, 200, 251, 207, 255, 247, 254 },
+ { 245, 187, 200, 251, 207, 255, 247, 254 },
+ { 246, 188, 201, 252, 207, 255, 248, 254 },
+ { 246, 188, 201, 252, 207, 255, 248, 254 },
+ { 246, 189, 202, 252, 208, 255, 248, 254 },
+ { 246, 190, 202, 252, 208, 255, 248, 254 },
+ { 247, 191, 203, 252, 209, 255, 248, 254 },
+ { 247, 191, 203, 252, 209, 255, 248, 254 },
+ { 247, 192, 204, 252, 210, 255, 248, 254 },
+ { 247, 193, 204, 252, 210, 255, 248, 254 },
+ { 248, 194, 205, 252, 211, 255, 248, 254 },
+ { 248, 194, 205, 252, 211, 255, 248, 254 },
+ { 248, 195, 206, 252, 212, 255, 249, 254 },
+ { 248, 196, 206, 252, 212, 255, 249, 254 },
+ { 249, 197, 207, 253, 213, 255, 249, 254 },
+ { 249, 197, 207, 253, 213, 255, 249, 254 },
+ { 249, 198, 208, 253, 214, 255, 249, 254 },
+ { 249, 199, 209, 253, 214, 255, 249, 254 },
+ { 250, 200, 210, 253, 215, 255, 249, 254 },
+ { 250, 200, 210, 253, 215, 255, 249, 254 },
+ { 250, 201, 211, 253, 215, 255, 249, 254 },
+ { 250, 202, 211, 253, 215, 255, 249, 254 },
+ { 250, 203, 212, 253, 216, 255, 249, 254 },
+ { 250, 203, 212, 253, 216, 255, 249, 254 },
+ { 251, 204, 213, 253, 217, 255, 250, 254 },
+ { 251, 205, 213, 253, 217, 255, 250, 254 },
+ { 251, 206, 214, 254, 218, 255, 250, 254 },
+ { 251, 206, 215, 254, 218, 255, 250, 254 },
+ { 252, 207, 216, 254, 219, 255, 250, 254 },
+ { 252, 208, 216, 254, 219, 255, 250, 254 },
+ { 252, 209, 217, 254, 220, 255, 250, 254 },
+ { 252, 210, 217, 254, 220, 255, 250, 254 },
+ { 252, 211, 218, 254, 221, 255, 250, 254 },
+ { 252, 212, 218, 254, 221, 255, 250, 254 },
+ { 253, 213, 219, 254, 222, 255, 250, 254 },
+ { 253, 213, 220, 254, 222, 255, 250, 254 },
+ { 253, 214, 221, 254, 223, 255, 250, 254 },
+ { 253, 215, 221, 254, 223, 255, 250, 254 },
+ { 253, 216, 222, 254, 224, 255, 251, 254 },
+ { 253, 217, 223, 254, 224, 255, 251, 254 },
+ { 253, 218, 224, 254, 225, 255, 251, 254 },
+ { 253, 219, 224, 254, 225, 255, 251, 254 },
+ { 254, 220, 225, 254, 225, 255, 251, 254 },
+ { 254, 221, 226, 254, 225, 255, 251, 254 },
+ { 254, 222, 227, 255, 226, 255, 251, 254 },
+ { 254, 223, 227, 255, 226, 255, 251, 254 },
+ { 254, 224, 228, 255, 227, 255, 251, 254 },
+ { 254, 225, 229, 255, 227, 255, 251, 254 },
+ { 254, 226, 230, 255, 228, 255, 251, 254 },
+ { 254, 227, 230, 255, 229, 255, 251, 254 },
+ { 255, 228, 231, 255, 230, 255, 251, 254 },
+ { 255, 229, 232, 255, 230, 255, 251, 254 },
+ { 255, 230, 233, 255, 231, 255, 252, 254 },
+ { 255, 231, 234, 255, 231, 255, 252, 254 },
+ { 255, 232, 235, 255, 232, 255, 252, 254 },
+ { 255, 233, 236, 255, 232, 255, 252, 254 },
+ { 255, 235, 237, 255, 233, 255, 252, 254 },
+ { 255, 236, 238, 255, 234, 255, 252, 254 },
+ { 255, 238, 240, 255, 235, 255, 252, 255 },
+ { 255, 239, 241, 255, 235, 255, 252, 254 },
+ { 255, 241, 243, 255, 236, 255, 252, 254 },
+ { 255, 243, 245, 255, 237, 255, 252, 254 },
+ { 255, 246, 247, 255, 239, 255, 253, 255 },
+};
+
+const ProbContext ff_vp9_default_probs = {
+ { /* y_mode */
+ { 65, 32, 18, 144, 162, 194, 41, 51, 98 } /* bsize < 8x8 */,
+ { 132, 68, 18, 165, 217, 196, 45, 40, 78 } /* bsize < 16x16 */,
+ { 173, 80, 19, 176, 240, 193, 64, 35, 46 } /* bsize < 32x32 */,
+ { 221, 135, 38, 194, 248, 121, 96, 85, 29 } /* bsize >= 32x32 */
+ }, { /* uv_mode */
+ { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */,
+ { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */,
+ { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */,
+ { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */,
+ { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */,
+ { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */,
+ { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */,
+ { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */,
+ { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d27 */,
+ { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */
+ }, { /* filter */
+ { 235, 162, },
+ { 36, 255, },
+ { 34, 3, },
+ { 149, 144, },
+ }, { /* mv_mode */
+ { 2, 173, 34 }, // 0 = both zero mv
+ { 7, 145, 85 }, // 1 = one zero mv + one a predicted mv
+ { 7, 166, 63 }, // 2 = two predicted mvs
+ { 7, 94, 66 }, // 3 = one predicted/zero and one new mv
+ { 8, 64, 46 }, // 4 = two new mvs
+ { 17, 81, 31 }, // 5 = one intra neighbor + x
+ { 25, 29, 30 }, // 6 = two intra neighbors
+ }, { /* intra */
+ 9, 102, 187, 225
+ }, { /* comp */
+ 239, 183, 119, 96, 41
+ }, { /* single_ref */
+ { 33, 16 },
+ { 77, 74 },
+ { 142, 142 },
+ { 172, 170 },
+ { 238, 247 }
+ }, { /* comp_ref */
+ 50, 126, 123, 221, 226
+ }, { /* tx32p */
+ { 3, 136, 37, },
+ { 5, 52, 13, },
+ }, { /* tx16p */
+ { 20, 152, },
+ { 15, 101, },
+ }, { /* tx8p */
+ 100, 66
+ }, { /* skip */
+ 192, 128, 64
+ }, { /* mv_joint */
+ 32, 64, 96
+ }, {
+ { /* mv vertical component */
+ 128, /* sign */
+ { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, /* class */
+ 216, /* class0 */
+ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 }, /* bits */
+ { /* class0_fp */
+ { 128, 128, 64 },
+ { 96, 112, 64 }
+ },
+ { 64, 96, 64 }, /* fp */
+ 160, /* class0_hp bit */
+ 128, /* hp */
+ }, { /* mv horizontal component */
+ 128, /* sign */
+ { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 }, /* class */
+ 208, /* class0 */
+ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 }, /* bits */
+ { /* class0_fp */
+ { 128, 128, 64 },
+ { 96, 112, 64 }
+ },
+ { 64, 96, 64 }, /* fp */
+ 160, /* class0_hp bit */
+ 128, /* hp */
+ }
+ }, { /* partition */
+ { /* 64x64 -> 32x32 */
+ { 222, 34, 30 } /* a/l both not split */,
+ { 72, 16, 44 } /* a split, l not split */,
+ { 58, 32, 12 } /* l split, a not split */,
+ { 10, 7, 6 } /* a/l both split */,
+ }, { /* 32x32 -> 16x16 */
+ { 177, 58, 59 } /* a/l both not split */,
+ { 68, 26, 63 } /* a split, l not split */,
+ { 52, 79, 25 } /* l split, a not split */,
+ { 17, 14, 12 } /* a/l both split */,
+ }, { /* 16x16 -> 8x8 */
+ { 174, 73, 87 } /* a/l both not split */,
+ { 92, 41, 83 } /* a split, l not split */,
+ { 82, 99, 50 } /* l split, a not split */,
+ { 53, 39, 39 } /* a/l both split */,
+ }, { /* 8x8 -> 4x4 */
+ { 199, 122, 141 } /* a/l both not split */,
+ { 147, 63, 159 } /* a split, l not split */,
+ { 148, 133, 118 } /* l split, a not split */,
+ { 121, 104, 114 } /* a/l both split */,
+ }
+ },
+};
+
+const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3] = {
+ { /* tx = 4x4 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 195, 29, 183 },
+ { 84, 49, 136 },
+ { 8, 42, 71 }
+ }, { /* Coeff Band 1 */
+ { 31, 107, 169 },
+ { 35, 99, 159 },
+ { 17, 82, 140 },
+ { 8, 66, 114 },
+ { 2, 44, 76 },
+ { 1, 19, 32 }
+ }, { /* Coeff Band 2 */
+ { 40, 132, 201 },
+ { 29, 114, 187 },
+ { 13, 91, 157 },
+ { 7, 75, 127 },
+ { 3, 58, 95 },
+ { 1, 28, 47 }
+ }, { /* Coeff Band 3 */
+ { 69, 142, 221 },
+ { 42, 122, 201 },
+ { 15, 91, 159 },
+ { 6, 67, 121 },
+ { 1, 42, 77 },
+ { 1, 17, 31 }
+ }, { /* Coeff Band 4 */
+ { 102, 148, 228 },
+ { 67, 117, 204 },
+ { 17, 82, 154 },
+ { 6, 59, 114 },
+ { 2, 39, 75 },
+ { 1, 15, 29 }
+ }, { /* Coeff Band 5 */
+ { 156, 57, 233 },
+ { 119, 57, 212 },
+ { 58, 48, 163 },
+ { 29, 40, 124 },
+ { 12, 30, 81 },
+ { 3, 12, 31 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 191, 107, 226 },
+ { 124, 117, 204 },
+ { 25, 99, 155 }
+ }, { /* Coeff Band 1 */
+ { 29, 148, 210 },
+ { 37, 126, 194 },
+ { 8, 93, 157 },
+ { 2, 68, 118 },
+ { 1, 39, 69 },
+ { 1, 17, 33 }
+ }, { /* Coeff Band 2 */
+ { 41, 151, 213 },
+ { 27, 123, 193 },
+ { 3, 82, 144 },
+ { 1, 58, 105 },
+ { 1, 32, 60 },
+ { 1, 13, 26 }
+ }, { /* Coeff Band 3 */
+ { 59, 159, 220 },
+ { 23, 126, 198 },
+ { 4, 88, 151 },
+ { 1, 66, 114 },
+ { 1, 38, 71 },
+ { 1, 18, 34 }
+ }, { /* Coeff Band 4 */
+ { 114, 136, 232 },
+ { 51, 114, 207 },
+ { 11, 83, 155 },
+ { 3, 56, 105 },
+ { 1, 33, 65 },
+ { 1, 17, 34 }
+ }, { /* Coeff Band 5 */
+ { 149, 65, 234 },
+ { 121, 57, 215 },
+ { 61, 49, 166 },
+ { 28, 36, 114 },
+ { 12, 25, 76 },
+ { 3, 16, 42 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 214, 49, 220 },
+ { 132, 63, 188 },
+ { 42, 65, 137 }
+ }, { /* Coeff Band 1 */
+ { 85, 137, 221 },
+ { 104, 131, 216 },
+ { 49, 111, 192 },
+ { 21, 87, 155 },
+ { 2, 49, 87 },
+ { 1, 16, 28 }
+ }, { /* Coeff Band 2 */
+ { 89, 163, 230 },
+ { 90, 137, 220 },
+ { 29, 100, 183 },
+ { 10, 70, 135 },
+ { 2, 42, 81 },
+ { 1, 17, 33 }
+ }, { /* Coeff Band 3 */
+ { 108, 167, 237 },
+ { 55, 133, 222 },
+ { 15, 97, 179 },
+ { 4, 72, 135 },
+ { 1, 45, 85 },
+ { 1, 19, 38 }
+ }, { /* Coeff Band 4 */
+ { 124, 146, 240 },
+ { 66, 124, 224 },
+ { 17, 88, 175 },
+ { 4, 58, 122 },
+ { 1, 36, 75 },
+ { 1, 18, 37 }
+ }, { /* Coeff Band 5 */
+ { 141, 79, 241 },
+ { 126, 70, 227 },
+ { 66, 58, 182 },
+ { 30, 44, 136 },
+ { 12, 34, 96 },
+ { 2, 20, 47 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 229, 99, 249 },
+ { 143, 111, 235 },
+ { 46, 109, 192 }
+ }, { /* Coeff Band 1 */
+ { 82, 158, 236 },
+ { 94, 146, 224 },
+ { 25, 117, 191 },
+ { 9, 87, 149 },
+ { 3, 56, 99 },
+ { 1, 33, 57 }
+ }, { /* Coeff Band 2 */
+ { 83, 167, 237 },
+ { 68, 145, 222 },
+ { 10, 103, 177 },
+ { 2, 72, 131 },
+ { 1, 41, 79 },
+ { 1, 20, 39 }
+ }, { /* Coeff Band 3 */
+ { 99, 167, 239 },
+ { 47, 141, 224 },
+ { 10, 104, 178 },
+ { 2, 73, 133 },
+ { 1, 44, 85 },
+ { 1, 22, 47 }
+ }, { /* Coeff Band 4 */
+ { 127, 145, 243 },
+ { 71, 129, 228 },
+ { 17, 93, 177 },
+ { 3, 61, 124 },
+ { 1, 41, 84 },
+ { 1, 21, 52 }
+ }, { /* Coeff Band 5 */
+ { 157, 78, 244 },
+ { 140, 72, 231 },
+ { 69, 58, 184 },
+ { 31, 44, 137 },
+ { 14, 38, 105 },
+ { 8, 23, 61 }
+ }
+ }
+ }
+ }, { /* tx = 8x8 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 125, 34, 187 },
+ { 52, 41, 133 },
+ { 6, 31, 56 }
+ }, { /* Coeff Band 1 */
+ { 37, 109, 153 },
+ { 51, 102, 147 },
+ { 23, 87, 128 },
+ { 8, 67, 101 },
+ { 1, 41, 63 },
+ { 1, 19, 29 }
+ }, { /* Coeff Band 2 */
+ { 31, 154, 185 },
+ { 17, 127, 175 },
+ { 6, 96, 145 },
+ { 2, 73, 114 },
+ { 1, 51, 82 },
+ { 1, 28, 45 }
+ }, { /* Coeff Band 3 */
+ { 23, 163, 200 },
+ { 10, 131, 185 },
+ { 2, 93, 148 },
+ { 1, 67, 111 },
+ { 1, 41, 69 },
+ { 1, 14, 24 }
+ }, { /* Coeff Band 4 */
+ { 29, 176, 217 },
+ { 12, 145, 201 },
+ { 3, 101, 156 },
+ { 1, 69, 111 },
+ { 1, 39, 63 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 5 */
+ { 57, 192, 233 },
+ { 25, 154, 215 },
+ { 6, 109, 167 },
+ { 3, 78, 118 },
+ { 1, 48, 69 },
+ { 1, 21, 29 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 202, 105, 245 },
+ { 108, 106, 216 },
+ { 18, 90, 144 }
+ }, { /* Coeff Band 1 */
+ { 33, 172, 219 },
+ { 64, 149, 206 },
+ { 14, 117, 177 },
+ { 5, 90, 141 },
+ { 2, 61, 95 },
+ { 1, 37, 57 }
+ }, { /* Coeff Band 2 */
+ { 33, 179, 220 },
+ { 11, 140, 198 },
+ { 1, 89, 148 },
+ { 1, 60, 104 },
+ { 1, 33, 57 },
+ { 1, 12, 21 }
+ }, { /* Coeff Band 3 */
+ { 30, 181, 221 },
+ { 8, 141, 198 },
+ { 1, 87, 145 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 20 }
+ }, { /* Coeff Band 4 */
+ { 32, 186, 224 },
+ { 7, 142, 198 },
+ { 1, 86, 143 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 22 }
+ }, { /* Coeff Band 5 */
+ { 57, 192, 227 },
+ { 20, 143, 204 },
+ { 3, 96, 154 },
+ { 1, 68, 112 },
+ { 1, 42, 69 },
+ { 1, 19, 32 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 212, 35, 215 },
+ { 113, 47, 169 },
+ { 29, 48, 105 }
+ }, { /* Coeff Band 1 */
+ { 74, 129, 203 },
+ { 106, 120, 203 },
+ { 49, 107, 178 },
+ { 19, 84, 144 },
+ { 4, 50, 84 },
+ { 1, 15, 25 }
+ }, { /* Coeff Band 2 */
+ { 71, 172, 217 },
+ { 44, 141, 209 },
+ { 15, 102, 173 },
+ { 6, 76, 133 },
+ { 2, 51, 89 },
+ { 1, 24, 42 }
+ }, { /* Coeff Band 3 */
+ { 64, 185, 231 },
+ { 31, 148, 216 },
+ { 8, 103, 175 },
+ { 3, 74, 131 },
+ { 1, 46, 81 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 4 */
+ { 65, 196, 235 },
+ { 25, 157, 221 },
+ { 5, 105, 174 },
+ { 1, 67, 120 },
+ { 1, 38, 69 },
+ { 1, 15, 30 }
+ }, { /* Coeff Band 5 */
+ { 65, 204, 238 },
+ { 30, 156, 224 },
+ { 7, 107, 177 },
+ { 2, 70, 124 },
+ { 1, 42, 73 },
+ { 1, 18, 34 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 225, 86, 251 },
+ { 144, 104, 235 },
+ { 42, 99, 181 }
+ }, { /* Coeff Band 1 */
+ { 85, 175, 239 },
+ { 112, 165, 229 },
+ { 29, 136, 200 },
+ { 12, 103, 162 },
+ { 6, 77, 123 },
+ { 2, 53, 84 }
+ }, { /* Coeff Band 2 */
+ { 75, 183, 239 },
+ { 30, 155, 221 },
+ { 3, 106, 171 },
+ { 1, 74, 128 },
+ { 1, 44, 76 },
+ { 1, 17, 28 }
+ }, { /* Coeff Band 3 */
+ { 73, 185, 240 },
+ { 27, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 75, 127 },
+ { 1, 42, 73 },
+ { 1, 17, 29 }
+ }, { /* Coeff Band 4 */
+ { 62, 190, 238 },
+ { 21, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 72, 122 },
+ { 1, 40, 71 },
+ { 1, 18, 32 }
+ }, { /* Coeff Band 5 */
+ { 61, 199, 240 },
+ { 27, 161, 226 },
+ { 4, 113, 180 },
+ { 1, 76, 129 },
+ { 1, 46, 80 },
+ { 1, 23, 41 }
+ }
+ }
+ }
+ }, { /* tx = 16x16 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 27, 153 },
+ { 5, 30, 95 },
+ { 1, 16, 30 }
+ }, { /* Coeff Band 1 */
+ { 50, 75, 127 },
+ { 57, 75, 124 },
+ { 27, 67, 108 },
+ { 10, 54, 86 },
+ { 1, 33, 52 },
+ { 1, 12, 18 }
+ }, { /* Coeff Band 2 */
+ { 43, 125, 151 },
+ { 26, 108, 148 },
+ { 7, 83, 122 },
+ { 2, 59, 89 },
+ { 1, 38, 60 },
+ { 1, 17, 27 }
+ }, { /* Coeff Band 3 */
+ { 23, 144, 163 },
+ { 13, 112, 154 },
+ { 2, 75, 117 },
+ { 1, 50, 81 },
+ { 1, 31, 51 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 4 */
+ { 18, 162, 185 },
+ { 6, 123, 171 },
+ { 1, 78, 125 },
+ { 1, 51, 86 },
+ { 1, 31, 54 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 5 */
+ { 15, 199, 227 },
+ { 3, 150, 204 },
+ { 1, 91, 146 },
+ { 1, 55, 95 },
+ { 1, 30, 53 },
+ { 1, 11, 20 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 19, 55, 240 },
+ { 19, 59, 196 },
+ { 3, 52, 105 }
+ }, { /* Coeff Band 1 */
+ { 41, 166, 207 },
+ { 104, 153, 199 },
+ { 31, 123, 181 },
+ { 14, 101, 152 },
+ { 5, 72, 106 },
+ { 1, 36, 52 }
+ }, { /* Coeff Band 2 */
+ { 35, 176, 211 },
+ { 12, 131, 190 },
+ { 2, 88, 144 },
+ { 1, 60, 101 },
+ { 1, 36, 60 },
+ { 1, 16, 28 }
+ }, { /* Coeff Band 3 */
+ { 28, 183, 213 },
+ { 8, 134, 191 },
+ { 1, 86, 142 },
+ { 1, 56, 96 },
+ { 1, 30, 53 },
+ { 1, 12, 20 }
+ }, { /* Coeff Band 4 */
+ { 20, 190, 215 },
+ { 4, 135, 192 },
+ { 1, 84, 139 },
+ { 1, 53, 91 },
+ { 1, 28, 49 },
+ { 1, 11, 20 }
+ }, { /* Coeff Band 5 */
+ { 13, 196, 216 },
+ { 2, 137, 192 },
+ { 1, 86, 143 },
+ { 1, 57, 99 },
+ { 1, 32, 56 },
+ { 1, 13, 24 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 211, 29, 217 },
+ { 96, 47, 156 },
+ { 22, 43, 87 }
+ }, { /* Coeff Band 1 */
+ { 78, 120, 193 },
+ { 111, 116, 186 },
+ { 46, 102, 164 },
+ { 15, 80, 128 },
+ { 2, 49, 76 },
+ { 1, 18, 28 }
+ }, { /* Coeff Band 2 */
+ { 71, 161, 203 },
+ { 42, 132, 192 },
+ { 10, 98, 150 },
+ { 3, 69, 109 },
+ { 1, 44, 70 },
+ { 1, 18, 29 }
+ }, { /* Coeff Band 3 */
+ { 57, 186, 211 },
+ { 30, 140, 196 },
+ { 4, 93, 146 },
+ { 1, 62, 102 },
+ { 1, 38, 65 },
+ { 1, 16, 27 }
+ }, { /* Coeff Band 4 */
+ { 47, 199, 217 },
+ { 14, 145, 196 },
+ { 1, 88, 142 },
+ { 1, 57, 98 },
+ { 1, 36, 62 },
+ { 1, 15, 26 }
+ }, { /* Coeff Band 5 */
+ { 26, 219, 229 },
+ { 5, 155, 207 },
+ { 1, 94, 151 },
+ { 1, 60, 104 },
+ { 1, 36, 62 },
+ { 1, 16, 28 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 233, 29, 248 },
+ { 146, 47, 220 },
+ { 43, 52, 140 }
+ }, { /* Coeff Band 1 */
+ { 100, 163, 232 },
+ { 179, 161, 222 },
+ { 63, 142, 204 },
+ { 37, 113, 174 },
+ { 26, 89, 137 },
+ { 18, 68, 97 }
+ }, { /* Coeff Band 2 */
+ { 85, 181, 230 },
+ { 32, 146, 209 },
+ { 7, 100, 164 },
+ { 3, 71, 121 },
+ { 1, 45, 77 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 3 */
+ { 65, 187, 230 },
+ { 20, 148, 207 },
+ { 2, 97, 159 },
+ { 1, 68, 116 },
+ { 1, 40, 70 },
+ { 1, 14, 29 }
+ }, { /* Coeff Band 4 */
+ { 40, 194, 227 },
+ { 8, 147, 204 },
+ { 1, 94, 155 },
+ { 1, 65, 112 },
+ { 1, 39, 66 },
+ { 1, 14, 26 }
+ }, { /* Coeff Band 5 */
+ { 16, 208, 228 },
+ { 3, 151, 207 },
+ { 1, 98, 160 },
+ { 1, 67, 117 },
+ { 1, 41, 74 },
+ { 1, 17, 31 }
+ }
+ }
+ }
+ }, { /* tx = 32x32 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 17, 38, 140 },
+ { 7, 34, 80 },
+ { 1, 17, 29 }
+ }, { /* Coeff Band 1 */
+ { 37, 75, 128 },
+ { 41, 76, 128 },
+ { 26, 66, 116 },
+ { 12, 52, 94 },
+ { 2, 32, 55 },
+ { 1, 10, 16 }
+ }, { /* Coeff Band 2 */
+ { 50, 127, 154 },
+ { 37, 109, 152 },
+ { 16, 82, 121 },
+ { 5, 59, 85 },
+ { 1, 35, 54 },
+ { 1, 13, 20 }
+ }, { /* Coeff Band 3 */
+ { 40, 142, 167 },
+ { 17, 110, 157 },
+ { 2, 71, 112 },
+ { 1, 44, 72 },
+ { 1, 27, 45 },
+ { 1, 11, 17 }
+ }, { /* Coeff Band 4 */
+ { 30, 175, 188 },
+ { 9, 124, 169 },
+ { 1, 74, 116 },
+ { 1, 48, 78 },
+ { 1, 30, 49 },
+ { 1, 11, 18 }
+ }, { /* Coeff Band 5 */
+ { 10, 222, 223 },
+ { 2, 150, 194 },
+ { 1, 83, 128 },
+ { 1, 48, 79 },
+ { 1, 27, 45 },
+ { 1, 11, 17 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 36, 41, 235 },
+ { 29, 36, 193 },
+ { 10, 27, 111 }
+ }, { /* Coeff Band 1 */
+ { 85, 165, 222 },
+ { 177, 162, 215 },
+ { 110, 135, 195 },
+ { 57, 113, 168 },
+ { 23, 83, 120 },
+ { 10, 49, 61 }
+ }, { /* Coeff Band 2 */
+ { 85, 190, 223 },
+ { 36, 139, 200 },
+ { 5, 90, 146 },
+ { 1, 60, 103 },
+ { 1, 38, 65 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 3 */
+ { 72, 202, 223 },
+ { 23, 141, 199 },
+ { 2, 86, 140 },
+ { 1, 56, 97 },
+ { 1, 36, 61 },
+ { 1, 16, 27 }
+ }, { /* Coeff Band 4 */
+ { 55, 218, 225 },
+ { 13, 145, 200 },
+ { 1, 86, 141 },
+ { 1, 57, 99 },
+ { 1, 35, 61 },
+ { 1, 13, 22 }
+ }, { /* Coeff Band 5 */
+ { 15, 235, 212 },
+ { 1, 132, 184 },
+ { 1, 84, 139 },
+ { 1, 57, 97 },
+ { 1, 34, 56 },
+ { 1, 14, 23 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 181, 21, 201 },
+ { 61, 37, 123 },
+ { 10, 38, 71 }
+ }, { /* Coeff Band 1 */
+ { 47, 106, 172 },
+ { 95, 104, 173 },
+ { 42, 93, 159 },
+ { 18, 77, 131 },
+ { 4, 50, 81 },
+ { 1, 17, 23 }
+ }, { /* Coeff Band 2 */
+ { 62, 147, 199 },
+ { 44, 130, 189 },
+ { 28, 102, 154 },
+ { 18, 75, 115 },
+ { 2, 44, 65 },
+ { 1, 12, 19 }
+ }, { /* Coeff Band 3 */
+ { 55, 153, 210 },
+ { 24, 130, 194 },
+ { 3, 93, 146 },
+ { 1, 61, 97 },
+ { 1, 31, 50 },
+ { 1, 10, 16 }
+ }, { /* Coeff Band 4 */
+ { 49, 186, 223 },
+ { 17, 148, 204 },
+ { 1, 96, 142 },
+ { 1, 53, 83 },
+ { 1, 26, 44 },
+ { 1, 11, 17 }
+ }, { /* Coeff Band 5 */
+ { 13, 217, 212 },
+ { 2, 136, 180 },
+ { 1, 78, 124 },
+ { 1, 50, 83 },
+ { 1, 29, 49 },
+ { 1, 14, 23 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 197, 13, 247 },
+ { 82, 17, 222 },
+ { 25, 17, 162 }
+ }, { /* Coeff Band 1 */
+ { 126, 186, 247 },
+ { 234, 191, 243 },
+ { 176, 177, 234 },
+ { 104, 158, 220 },
+ { 66, 128, 186 },
+ { 55, 90, 137 }
+ }, { /* Coeff Band 2 */
+ { 111, 197, 242 },
+ { 46, 158, 219 },
+ { 9, 104, 171 },
+ { 2, 65, 125 },
+ { 1, 44, 80 },
+ { 1, 17, 91 }
+ }, { /* Coeff Band 3 */
+ { 104, 208, 245 },
+ { 39, 168, 224 },
+ { 3, 109, 162 },
+ { 1, 79, 124 },
+ { 1, 50, 102 },
+ { 1, 43, 102 }
+ }, { /* Coeff Band 4 */
+ { 84, 220, 246 },
+ { 31, 177, 231 },
+ { 2, 115, 180 },
+ { 1, 79, 134 },
+ { 1, 55, 77 },
+ { 1, 60, 79 }
+ }, { /* Coeff Band 5 */
+ { 43, 243, 240 },
+ { 8, 180, 217 },
+ { 1, 115, 166 },
+ { 1, 84, 121 },
+ { 1, 51, 67 },
+ { 1, 16, 6 }
+ }
+ }
+ }
+ }
+};
+
+const int8_t ff_vp9_mv_joint_tree[3][2] = {
+ { -MV_JOINT_ZERO, 1 }, // '0'
+ { -MV_JOINT_H, 2 }, // '10'
+ { -MV_JOINT_V, -MV_JOINT_HV }, // '11x'
+};
+
+const int8_t ff_vp9_mv_class_tree[10][2] = {
+ { -0, 1 }, // '0'
+ { -1, 2 }, // '10'
+ { 3, 4 },
+ { -2, -3 }, // '110x'
+ { 5, 6 },
+ { -4, -5 }, // '1110x'
+ { -6, 7 }, // '11110'
+ { 8, 9 },
+ { -7, -8 }, // '111110x'
+ { -9, -10 }, // '111111x'
+};
+
+const int8_t ff_vp9_mv_fp_tree[3][2] = {
+ { -0, 1 }, // '0'
+ { -1, 2 }, // '10'
+ { -2, -3 }, // '11x'
+};
diff --git a/media/ffvpx/libavcodec/vp9data.h b/media/ffvpx/libavcodec/vp9data.h
new file mode 100644
index 0000000000..086dbdec06
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9data.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP9DATA_H
+#define AVCODEC_VP9DATA_H
+
+#include <stdint.h>
+
+#include "vp9dec.h"
+
+extern const uint8_t ff_vp9_bwh_tab[2][N_BS_SIZES][2];
+extern const int8_t ff_vp9_partition_tree[3][2];
+extern const uint8_t ff_vp9_default_kf_partition_probs[4][4][3];
+extern const int8_t ff_vp9_segmentation_tree[7][2];
+extern const int8_t ff_vp9_intramode_tree[9][2];
+extern const uint8_t ff_vp9_default_kf_ymode_probs[10][10][9];
+extern const uint8_t ff_vp9_default_kf_uvmode_probs[10][9];
+extern const int8_t ff_vp9_inter_mode_tree[3][2];
+extern const int8_t ff_vp9_filter_tree[2][2];
+extern const enum FilterMode ff_vp9_filter_lut[3];
+extern const int16_t ff_vp9_dc_qlookup[3][256];
+extern const int16_t ff_vp9_ac_qlookup[3][256];
+extern const enum TxfmType ff_vp9_intra_txfm_type[14];
+extern const int16_t ff_vp9_default_scan_4x4[16];
+extern const int16_t ff_vp9_col_scan_4x4[16];
+extern const int16_t ff_vp9_row_scan_4x4[16];
+extern const int16_t ff_vp9_default_scan_8x8[64];
+extern const int16_t ff_vp9_col_scan_8x8[64];
+extern const int16_t ff_vp9_row_scan_8x8[64];
+extern const int16_t ff_vp9_default_scan_16x16[256];
+extern const int16_t ff_vp9_col_scan_16x16[256];
+extern const int16_t ff_vp9_row_scan_16x16[256];
+extern const int16_t ff_vp9_default_scan_32x32[1024];
+extern const int16_t * const ff_vp9_scans[5][4];
+extern const int16_t ff_vp9_default_scan_4x4_nb[16][2];
+extern const int16_t ff_vp9_col_scan_4x4_nb[16][2];
+extern const int16_t ff_vp9_row_scan_4x4_nb[16][2];
+extern const int16_t ff_vp9_default_scan_8x8_nb[64][2];
+extern const int16_t ff_vp9_col_scan_8x8_nb[64][2];
+extern const int16_t ff_vp9_row_scan_8x8_nb[64][2];
+extern const int16_t ff_vp9_default_scan_16x16_nb[256][2];
+extern const int16_t ff_vp9_col_scan_16x16_nb[256][2];
+extern const int16_t ff_vp9_row_scan_16x16_nb[256][2];
+extern const int16_t ff_vp9_default_scan_32x32_nb[1024][2];
+extern const int16_t (* const ff_vp9_scans_nb[5][4])[2];
+extern const uint8_t ff_vp9_model_pareto8[256][8];
+extern const ProbContext ff_vp9_default_probs;
+extern const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3];
+extern const int8_t ff_vp9_mv_joint_tree[3][2];
+extern const int8_t ff_vp9_mv_class_tree[10][2];
+extern const int8_t ff_vp9_mv_fp_tree[3][2];
+
+#endif /* AVCODEC_VP9DATA_H */
diff --git a/media/ffvpx/libavcodec/vp9dec.h b/media/ffvpx/libavcodec/vp9dec.h
new file mode 100644
index 0000000000..de7aba0458
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9dec.h
@@ -0,0 +1,259 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP9DEC_H
+#define AVCODEC_VP9DEC_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include "libavutil/buffer.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/thread.h"
+#include "libavutil/internal.h"
+
+#include "get_bits.h"
+#include "videodsp.h"
+#include "vp9.h"
+#include "vp9dsp.h"
+#include "vp9shared.h"
+#include "vpx_rac.h"
+
+#define REF_INVALID_SCALE 0xFFFF
+
+enum MVJoint {
+ MV_JOINT_ZERO,
+ MV_JOINT_H,
+ MV_JOINT_V,
+ MV_JOINT_HV,
+};
+
+typedef struct ProbContext {
+ uint8_t y_mode[4][9];
+ uint8_t uv_mode[10][9];
+ uint8_t filter[4][2];
+ uint8_t mv_mode[7][3];
+ uint8_t intra[4];
+ uint8_t comp[5];
+ uint8_t single_ref[5][2];
+ uint8_t comp_ref[5];
+ uint8_t tx32p[2][3];
+ uint8_t tx16p[2][2];
+ uint8_t tx8p[2];
+ uint8_t skip[3];
+ uint8_t mv_joint[3];
+ struct {
+ uint8_t sign;
+ uint8_t classes[10];
+ uint8_t class0;
+ uint8_t bits[10];
+ uint8_t class0_fp[2][3];
+ uint8_t fp[3];
+ uint8_t class0_hp;
+ uint8_t hp;
+ } mv_comp[2];
+ uint8_t partition[4][4][3];
+} ProbContext;
+
+typedef struct VP9Filter {
+ uint8_t level[8 * 8];
+ uint8_t /* bit=col */ mask[2 /* 0=y, 1=uv */][2 /* 0=col, 1=row */]
+ [8 /* rows */][4 /* 0=16, 1=8, 2=4, 3=inner4 */];
+} VP9Filter;
+
+typedef struct VP9Block {
+ uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
+ enum FilterMode filter;
+ VP9mv mv[4 /* b_idx */][2 /* ref */];
+ enum BlockSize bs;
+ enum TxfmMode tx, uvtx;
+ enum BlockLevel bl;
+ enum BlockPartition bp;
+} VP9Block;
+
+typedef struct VP9TileData VP9TileData;
+
+typedef struct VP9Context {
+ VP9SharedContext s;
+ VP9TileData *td;
+
+ VP9DSPContext dsp;
+ VideoDSPContext vdsp;
+ GetBitContext gb;
+ VPXRangeCoder c;
+ int pass, active_tile_cols;
+
+#if HAVE_THREADS
+ pthread_mutex_t progress_mutex;
+ pthread_cond_t progress_cond;
+ atomic_int *entries;
+ unsigned pthread_init_cnt;
+#endif
+
+ uint8_t ss_h, ss_v;
+ uint8_t last_bpp, bpp_index, bytesperpixel;
+ uint8_t last_keyframe;
+ // sb_cols/rows, rows/cols and last_fmt are used for allocating all internal
+ // arrays, and are thus per-thread. w/h and gf_fmt are synced between threads
+ // and are therefore per-stream. pix_fmt represents the value in the header
+ // of the currently processed frame.
+ int w, h;
+ enum AVPixelFormat pix_fmt, last_fmt, gf_fmt;
+ unsigned sb_cols, sb_rows, rows, cols;
+ ThreadFrame next_refs[8];
+
+ struct {
+ uint8_t lim_lut[64];
+ uint8_t mblim_lut[64];
+ } filter_lut;
+ struct {
+ ProbContext p;
+ uint8_t coef[4][2][2][6][6][3];
+ } prob_ctx[4];
+ struct {
+ ProbContext p;
+ uint8_t coef[4][2][2][6][6][11];
+ } prob;
+
+ // contextual (above) cache
+ uint8_t *above_partition_ctx;
+ uint8_t *above_mode_ctx;
+ // FIXME maybe merge some of the below in a flags field?
+ uint8_t *above_y_nnz_ctx;
+ uint8_t *above_uv_nnz_ctx[2];
+ uint8_t *above_skip_ctx; // 1bit
+ uint8_t *above_txfm_ctx; // 2bit
+ uint8_t *above_segpred_ctx; // 1bit
+ uint8_t *above_intra_ctx; // 1bit
+ uint8_t *above_comp_ctx; // 1bit
+ uint8_t *above_ref_ctx; // 2bit
+ uint8_t *above_filter_ctx;
+ VP9mv (*above_mv_ctx)[2];
+
+ // whole-frame cache
+ uint8_t *intra_pred_data[3];
+ VP9Filter *lflvl;
+
+ // block reconstruction intermediates
+ int block_alloc_using_2pass;
+ uint16_t mvscale[3][2];
+ uint8_t mvstep[3][2];
+
+ // frame specific buffer pools
+ AVBufferPool *frame_extradata_pool;
+ int frame_extradata_pool_size;
+} VP9Context;
+
+struct VP9TileData {
+ const VP9Context *s;
+ VPXRangeCoder *c_b;
+ VPXRangeCoder *c;
+ int row, row7, col, col7;
+ uint8_t *dst[3];
+ ptrdiff_t y_stride, uv_stride;
+ VP9Block *b_base, *b;
+ unsigned tile_col_start;
+
+ struct {
+ unsigned y_mode[4][10];
+ unsigned uv_mode[10][10];
+ unsigned filter[4][3];
+ unsigned mv_mode[7][4];
+ unsigned intra[4][2];
+ unsigned comp[5][2];
+ unsigned single_ref[5][2][2];
+ unsigned comp_ref[5][2];
+ unsigned tx32p[2][4];
+ unsigned tx16p[2][3];
+ unsigned tx8p[2][2];
+ unsigned skip[3][2];
+ unsigned mv_joint[4];
+ struct {
+ unsigned sign[2];
+ unsigned classes[11];
+ unsigned class0[2];
+ unsigned bits[10][2];
+ unsigned class0_fp[2][4];
+ unsigned fp[4];
+ unsigned class0_hp[2];
+ unsigned hp[2];
+ } mv_comp[2];
+ unsigned partition[4][4][4];
+ unsigned coef[4][2][2][6][6][3];
+ unsigned eob[4][2][2][6][6][2];
+ } counts;
+
+ // whole-frame cache
+ DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[135 * 144 * 2];
+
+ // contextual (left) cache
+ DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
+ DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
+ DECLARE_ALIGNED(16, VP9mv, left_mv_ctx)[16][2];
+ DECLARE_ALIGNED(16, uint8_t, left_uv_nnz_ctx)[2][16];
+ DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_txfm_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_segpred_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_intra_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_comp_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_ref_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_filter_ctx)[8];
+ // block reconstruction intermediates
+ DECLARE_ALIGNED(32, uint8_t, tmp_y)[64 * 64 * 2];
+ DECLARE_ALIGNED(32, uint8_t, tmp_uv)[2][64 * 64 * 2];
+ struct { int x, y; } min_mv, max_mv;
+ int16_t *block_base, *block, *uvblock_base[2], *uvblock[2];
+ uint8_t *eob_base, *uveob_base[2], *eob, *uveob[2];
+
+ // error message
+ int error_info;
+ struct {
+ unsigned int row:13;
+ unsigned int col:13;
+ unsigned int block_size_idx_x:2;
+ unsigned int block_size_idx_y:2;
+ } *block_structure;
+ unsigned int nb_block_structure;
+};
+
+void ff_vp9_fill_mv(VP9TileData *td, VP9mv *mv, int mode, int sb);
+
+void ff_vp9_adapt_probs(VP9Context *s);
+
+void ff_vp9_decode_block(VP9TileData *td, int row, int col,
+ VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
+ enum BlockLevel bl, enum BlockPartition bp);
+
+void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl,
+ int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff);
+
+void ff_vp9_intra_recon_8bpp(VP9TileData *td,
+ ptrdiff_t y_off, ptrdiff_t uv_off);
+void ff_vp9_intra_recon_16bpp(VP9TileData *td,
+ ptrdiff_t y_off, ptrdiff_t uv_off);
+void ff_vp9_inter_recon_8bpp(VP9TileData *td);
+void ff_vp9_inter_recon_16bpp(VP9TileData *td);
+
+#endif /* AVCODEC_VP9DEC_H */
diff --git a/media/ffvpx/libavcodec/vp9dsp.c b/media/ffvpx/libavcodec/vp9dsp.c
new file mode 100644
index 0000000000..d8ddf74d4f
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9dsp.c
@@ -0,0 +1,110 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/mem_internal.h"
+
+#include "vp9dsp.h"
+
+const DECLARE_ALIGNED(16, int16_t, ff_vp9_subpel_filters)[3][16][8] = {
+ [FILTER_8TAP_REGULAR] = {
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { 0, 1, -5, 126, 8, -3, 1, 0 },
+ { -1, 3, -10, 122, 18, -6, 2, 0 },
+ { -1, 4, -13, 118, 27, -9, 3, -1 },
+ { -1, 4, -16, 112, 37, -11, 4, -1 },
+ { -1, 5, -18, 105, 48, -14, 4, -1 },
+ { -1, 5, -19, 97, 58, -16, 5, -1 },
+ { -1, 6, -19, 88, 68, -18, 5, -1 },
+ { -1, 6, -19, 78, 78, -19, 6, -1 },
+ { -1, 5, -18, 68, 88, -19, 6, -1 },
+ { -1, 5, -16, 58, 97, -19, 5, -1 },
+ { -1, 4, -14, 48, 105, -18, 5, -1 },
+ { -1, 4, -11, 37, 112, -16, 4, -1 },
+ { -1, 3, -9, 27, 118, -13, 4, -1 },
+ { 0, 2, -6, 18, 122, -10, 3, -1 },
+ { 0, 1, -3, 8, 126, -5, 1, 0 },
+ }, [FILTER_8TAP_SHARP] = {
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { -1, 3, -7, 127, 8, -3, 1, 0 },
+ { -2, 5, -13, 125, 17, -6, 3, -1 },
+ { -3, 7, -17, 121, 27, -10, 5, -2 },
+ { -4, 9, -20, 115, 37, -13, 6, -2 },
+ { -4, 10, -23, 108, 48, -16, 8, -3 },
+ { -4, 10, -24, 100, 59, -19, 9, -3 },
+ { -4, 11, -24, 90, 70, -21, 10, -4 },
+ { -4, 11, -23, 80, 80, -23, 11, -4 },
+ { -4, 10, -21, 70, 90, -24, 11, -4 },
+ { -3, 9, -19, 59, 100, -24, 10, -4 },
+ { -3, 8, -16, 48, 108, -23, 10, -4 },
+ { -2, 6, -13, 37, 115, -20, 9, -4 },
+ { -2, 5, -10, 27, 121, -17, 7, -3 },
+ { -1, 3, -6, 17, 125, -13, 5, -2 },
+ { 0, 1, -3, 8, 127, -7, 3, -1 },
+ }, [FILTER_8TAP_SMOOTH] = {
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 },
+ { -2, -2, 26, 63, 43, 4, -4, 0 },
+ { -2, -3, 24, 62, 46, 5, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 },
+ { -1, -4, 18, 59, 51, 9, -4, 0 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 },
+ { 0, -4, 9, 51, 59, 18, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 },
+ { 0, -4, 5, 46, 62, 24, -3, -2 },
+ { 0, -4, 4, 43, 63, 26, -2, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 },
+ }
+};
+
+
+av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
+{
+ if (bpp == 8) {
+ ff_vp9dsp_init_8(dsp);
+ } else if (bpp == 10) {
+ ff_vp9dsp_init_10(dsp);
+ } else {
+ av_assert0(bpp == 12);
+ ff_vp9dsp_init_12(dsp);
+ }
+
+#if ARCH_AARCH64
+ ff_vp9dsp_init_aarch64(dsp, bpp);
+#elif ARCH_ARM
+ ff_vp9dsp_init_arm(dsp, bpp);
+#elif ARCH_X86
+ ff_vp9dsp_init_x86(dsp, bpp, bitexact);
+#elif ARCH_MIPS
+ ff_vp9dsp_init_mips(dsp, bpp);
+#elif ARCH_LOONGARCH
+ ff_vp9dsp_init_loongarch(dsp, bpp);
+#endif
+}
diff --git a/media/ffvpx/libavcodec/vp9dsp.h b/media/ffvpx/libavcodec/vp9dsp.h
new file mode 100644
index 0000000000..be0ac0b181
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9dsp.h
@@ -0,0 +1,138 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP9DSP_H
+#define AVCODEC_VP9DSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavcodec/vp9.h"
+#include "libavutil/attributes_internal.h"
+
+typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ int h, int mx, int my);
+typedef void (*vp9_scaled_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ int h, int mx, int my, int dx, int dy);
+
+typedef struct VP9DSPContext {
+ /*
+ * dimension 1: 0=4x4, 1=8x8, 2=16x16, 3=32x32
+ * dimension 2: intra prediction modes
+ *
+ * dst/left/top is aligned by transform-size (i.e. 4, 8, 16 or 32 pixels)
+ * stride is aligned by 16 pixels
+ * top[-1] is top/left; top[4,7] is top-right for 4x4
+ */
+ // FIXME(rbultje) maybe replace left/top pointers with HAVE_TOP/
+ // HAVE_LEFT/HAVE_TOPRIGHT flags instead, and then handle it in-place?
+ // also needs to fit in with what H.264/VP8/etc do
+ void (*intra_pred[N_TXFM_SIZES][N_INTRA_PRED_MODES])(uint8_t *dst,
+ ptrdiff_t stride,
+ const uint8_t *left,
+ const uint8_t *top);
+
+ /*
+ * dimension 1: 0=4x4, 1=8x8, 2=16x16, 3=32x32, 4=lossless (3-4=dct only)
+ * dimension 2: 0=dct/dct, 1=dct/adst, 2=adst/dct, 3=adst/adst
+ *
+ * dst is aligned by transform-size (i.e. 4, 8, 16 or 32 pixels)
+ * stride is aligned by 16 pixels
+ * block is 16-byte aligned
+ * eob indicates the position (+1) of the last non-zero coefficient,
+ * in scan-order. This can be used to write faster versions, e.g. a
+ * dc-only 4x4/8x8/16x16/32x32, or a 4x4-only (eob<10) 8x8/16x16/32x32,
+ * etc.
+ */
+ // FIXME also write idct_add_block() versions for whole (inter) pred
+ // blocks, so we can do 2 4x4s at once
+ void (*itxfm_add[N_TXFM_SIZES + 1][N_TXFM_TYPES])(uint8_t *dst,
+ ptrdiff_t stride,
+ int16_t *block, int eob);
+
+ /*
+ * dimension 1: width of filter (0=4, 1=8, 2=16)
+ * dimension 2: 0=col-edge filter (h), 1=row-edge filter (v)
+ *
+ * dst/stride are aligned by 8
+ */
+ void (*loop_filter_8[3][2])(uint8_t *dst, ptrdiff_t stride,
+ int mb_lim, int lim, int hev_thr);
+
+ /*
+ * dimension 1: 0=col-edge filter (h), 1=row-edge filter (v)
+ *
+ * The width of filter is assumed to be 16; dst/stride are aligned by 16
+ */
+ void (*loop_filter_16[2])(uint8_t *dst, ptrdiff_t stride,
+ int mb_lim, int lim, int hev_thr);
+
+ /*
+ * dimension 1/2: width of filter (0=4, 1=8) for each filter half
+ * dimension 3: 0=col-edge filter (h), 1=row-edge filter (v)
+ *
+ * dst/stride are aligned by operation size
+ * this basically calls loop_filter[d1][d3][0](), followed by
+ * loop_filter[d2][d3][0]() on the next 8 pixels
+ * mb_lim/lim/hev_thr contain two values in the lowest two bytes of the
+ * integer.
+ */
+ // FIXME perhaps a mix4 that operates on 32px (for AVX2)
+ void (*loop_filter_mix2[2][2][2])(uint8_t *dst, ptrdiff_t stride,
+ int mb_lim, int lim, int hev_thr);
+
+ /*
+ * dimension 1: hsize (0: 64, 1: 32, 2: 16, 3: 8, 4: 4)
+ * dimension 2: filter type (0: smooth, 1: regular, 2: sharp, 3: bilin)
+ * dimension 3: averaging type (0: put, 1: avg)
+ * dimension 4: x subpel interpolation (0: none, 1: 8tap/bilin)
+ * dimension 5: y subpel interpolation (0: none, 1: 8tap/bilin)
+ *
+ * dst/stride are aligned by hsize
+ */
+ vp9_mc_func mc[5][N_FILTERS][2][2][2];
+
+ /*
+ * for scalable MC, first 3 dimensions identical to above, the other two
+ * don't exist since it changes per stepsize.
+ */
+ vp9_scaled_mc_func smc[5][N_FILTERS][2];
+} VP9DSPContext;
+
+extern const int16_t attribute_visibility_hidden ff_vp9_subpel_filters[3][16][8];
+
+void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact);
+
+void ff_vp9dsp_init_8(VP9DSPContext *dsp);
+void ff_vp9dsp_init_10(VP9DSPContext *dsp);
+void ff_vp9dsp_init_12(VP9DSPContext *dsp);
+
+void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int bpp);
+void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
+void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
+void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
+void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp);
+
+#endif /* AVCODEC_VP9DSP_H */
diff --git a/media/ffvpx/libavcodec/vp9dsp_10bpp.c b/media/ffvpx/libavcodec/vp9dsp_10bpp.c
new file mode 100644
index 0000000000..62ce182070
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9dsp_10bpp.c
@@ -0,0 +1,26 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BIT_DEPTH 10
+#define dctint int64_t
+#include "vp9dsp_template.c"
diff --git a/media/ffvpx/libavcodec/vp9dsp_12bpp.c b/media/ffvpx/libavcodec/vp9dsp_12bpp.c
new file mode 100644
index 0000000000..2f36471c5b
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9dsp_12bpp.c
@@ -0,0 +1,26 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BIT_DEPTH 12
+#define dctint int64_t
+#include "vp9dsp_template.c"
diff --git a/media/ffvpx/libavcodec/vp9dsp_8bpp.c b/media/ffvpx/libavcodec/vp9dsp_8bpp.c
new file mode 100644
index 0000000000..4b219b06b0
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9dsp_8bpp.c
@@ -0,0 +1,26 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BIT_DEPTH 8
+#define dctint int
+#include "vp9dsp_template.c"
diff --git a/media/ffvpx/libavcodec/vp9dsp_template.c b/media/ffvpx/libavcodec/vp9dsp_template.c
new file mode 100644
index 0000000000..9b11661704
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9dsp_template.c
@@ -0,0 +1,2546 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/common.h"
+#include "bit_depth_template.c"
+#include "vp9dsp.h"
+
+#if BIT_DEPTH != 12
+
+// FIXME see whether we can merge parts of this (perhaps at least 4x4 and 8x8)
+// back with h264pred.[ch]
+
+static void vert_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ pixel4 p4 = AV_RN4PA(top);
+
+ stride /= sizeof(pixel);
+ AV_WN4PA(dst + stride * 0, p4);
+ AV_WN4PA(dst + stride * 1, p4);
+ AV_WN4PA(dst + stride * 2, p4);
+ AV_WN4PA(dst + stride * 3, p4);
+}
+
+static void vert_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ pixel4 p4a = AV_RN4PA(top + 0);
+ pixel4 p4b = AV_RN4PA(top + 4);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ AV_WN4PA(dst + 0, p4a);
+ AV_WN4PA(dst + 4, p4b);
+ dst += stride;
+ }
+}
+
+static void vert_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ pixel4 p4a = AV_RN4PA(top + 0);
+ pixel4 p4b = AV_RN4PA(top + 4);
+ pixel4 p4c = AV_RN4PA(top + 8);
+ pixel4 p4d = AV_RN4PA(top + 12);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ AV_WN4PA(dst + 0, p4a);
+ AV_WN4PA(dst + 4, p4b);
+ AV_WN4PA(dst + 8, p4c);
+ AV_WN4PA(dst + 12, p4d);
+ dst += stride;
+ }
+}
+
+static void vert_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ pixel4 p4a = AV_RN4PA(top + 0);
+ pixel4 p4b = AV_RN4PA(top + 4);
+ pixel4 p4c = AV_RN4PA(top + 8);
+ pixel4 p4d = AV_RN4PA(top + 12);
+ pixel4 p4e = AV_RN4PA(top + 16);
+ pixel4 p4f = AV_RN4PA(top + 20);
+ pixel4 p4g = AV_RN4PA(top + 24);
+ pixel4 p4h = AV_RN4PA(top + 28);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ AV_WN4PA(dst + 0, p4a);
+ AV_WN4PA(dst + 4, p4b);
+ AV_WN4PA(dst + 8, p4c);
+ AV_WN4PA(dst + 12, p4d);
+ AV_WN4PA(dst + 16, p4e);
+ AV_WN4PA(dst + 20, p4f);
+ AV_WN4PA(dst + 24, p4g);
+ AV_WN4PA(dst + 28, p4h);
+ dst += stride;
+ }
+}
+
+static void hor_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+
+ stride /= sizeof(pixel);
+ AV_WN4PA(dst + stride * 0, PIXEL_SPLAT_X4(left[3]));
+ AV_WN4PA(dst + stride * 1, PIXEL_SPLAT_X4(left[2]));
+ AV_WN4PA(dst + stride * 2, PIXEL_SPLAT_X4(left[1]));
+ AV_WN4PA(dst + stride * 3, PIXEL_SPLAT_X4(left[0]));
+}
+
+static void hor_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ pixel4 p4 = PIXEL_SPLAT_X4(left[7 - y]);
+
+ AV_WN4PA(dst + 0, p4);
+ AV_WN4PA(dst + 4, p4);
+ dst += stride;
+ }
+}
+
+static void hor_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ pixel4 p4 = PIXEL_SPLAT_X4(left[15 - y]);
+
+ AV_WN4PA(dst + 0, p4);
+ AV_WN4PA(dst + 4, p4);
+ AV_WN4PA(dst + 8, p4);
+ AV_WN4PA(dst + 12, p4);
+ dst += stride;
+ }
+}
+
+static void hor_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ pixel4 p4 = PIXEL_SPLAT_X4(left[31 - y]);
+
+ AV_WN4PA(dst + 0, p4);
+ AV_WN4PA(dst + 4, p4);
+ AV_WN4PA(dst + 8, p4);
+ AV_WN4PA(dst + 12, p4);
+ AV_WN4PA(dst + 16, p4);
+ AV_WN4PA(dst + 20, p4);
+ AV_WN4PA(dst + 24, p4);
+ AV_WN4PA(dst + 28, p4);
+ dst += stride;
+ }
+}
+
+#endif /* BIT_DEPTH != 12 */
+
+static void tm_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ const pixel *top = (const pixel *) _top;
+ int y, tl = top[-1];
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 4; y++) {
+ int l_m_tl = left[3 - y] - tl;
+
+ dst[0] = av_clip_pixel(top[0] + l_m_tl);
+ dst[1] = av_clip_pixel(top[1] + l_m_tl);
+ dst[2] = av_clip_pixel(top[2] + l_m_tl);
+ dst[3] = av_clip_pixel(top[3] + l_m_tl);
+ dst += stride;
+ }
+}
+
+static void tm_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ const pixel *top = (const pixel *) _top;
+ int y, tl = top[-1];
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ int l_m_tl = left[7 - y] - tl;
+
+ dst[0] = av_clip_pixel(top[0] + l_m_tl);
+ dst[1] = av_clip_pixel(top[1] + l_m_tl);
+ dst[2] = av_clip_pixel(top[2] + l_m_tl);
+ dst[3] = av_clip_pixel(top[3] + l_m_tl);
+ dst[4] = av_clip_pixel(top[4] + l_m_tl);
+ dst[5] = av_clip_pixel(top[5] + l_m_tl);
+ dst[6] = av_clip_pixel(top[6] + l_m_tl);
+ dst[7] = av_clip_pixel(top[7] + l_m_tl);
+ dst += stride;
+ }
+}
+
+static void tm_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ const pixel *top = (const pixel *) _top;
+ int y, tl = top[-1];
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ int l_m_tl = left[15 - y] - tl;
+
+ dst[ 0] = av_clip_pixel(top[ 0] + l_m_tl);
+ dst[ 1] = av_clip_pixel(top[ 1] + l_m_tl);
+ dst[ 2] = av_clip_pixel(top[ 2] + l_m_tl);
+ dst[ 3] = av_clip_pixel(top[ 3] + l_m_tl);
+ dst[ 4] = av_clip_pixel(top[ 4] + l_m_tl);
+ dst[ 5] = av_clip_pixel(top[ 5] + l_m_tl);
+ dst[ 6] = av_clip_pixel(top[ 6] + l_m_tl);
+ dst[ 7] = av_clip_pixel(top[ 7] + l_m_tl);
+ dst[ 8] = av_clip_pixel(top[ 8] + l_m_tl);
+ dst[ 9] = av_clip_pixel(top[ 9] + l_m_tl);
+ dst[10] = av_clip_pixel(top[10] + l_m_tl);
+ dst[11] = av_clip_pixel(top[11] + l_m_tl);
+ dst[12] = av_clip_pixel(top[12] + l_m_tl);
+ dst[13] = av_clip_pixel(top[13] + l_m_tl);
+ dst[14] = av_clip_pixel(top[14] + l_m_tl);
+ dst[15] = av_clip_pixel(top[15] + l_m_tl);
+ dst += stride;
+ }
+}
+
+static void tm_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ const pixel *top = (const pixel *) _top;
+ int y, tl = top[-1];
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ int l_m_tl = left[31 - y] - tl;
+
+ dst[ 0] = av_clip_pixel(top[ 0] + l_m_tl);
+ dst[ 1] = av_clip_pixel(top[ 1] + l_m_tl);
+ dst[ 2] = av_clip_pixel(top[ 2] + l_m_tl);
+ dst[ 3] = av_clip_pixel(top[ 3] + l_m_tl);
+ dst[ 4] = av_clip_pixel(top[ 4] + l_m_tl);
+ dst[ 5] = av_clip_pixel(top[ 5] + l_m_tl);
+ dst[ 6] = av_clip_pixel(top[ 6] + l_m_tl);
+ dst[ 7] = av_clip_pixel(top[ 7] + l_m_tl);
+ dst[ 8] = av_clip_pixel(top[ 8] + l_m_tl);
+ dst[ 9] = av_clip_pixel(top[ 9] + l_m_tl);
+ dst[10] = av_clip_pixel(top[10] + l_m_tl);
+ dst[11] = av_clip_pixel(top[11] + l_m_tl);
+ dst[12] = av_clip_pixel(top[12] + l_m_tl);
+ dst[13] = av_clip_pixel(top[13] + l_m_tl);
+ dst[14] = av_clip_pixel(top[14] + l_m_tl);
+ dst[15] = av_clip_pixel(top[15] + l_m_tl);
+ dst[16] = av_clip_pixel(top[16] + l_m_tl);
+ dst[17] = av_clip_pixel(top[17] + l_m_tl);
+ dst[18] = av_clip_pixel(top[18] + l_m_tl);
+ dst[19] = av_clip_pixel(top[19] + l_m_tl);
+ dst[20] = av_clip_pixel(top[20] + l_m_tl);
+ dst[21] = av_clip_pixel(top[21] + l_m_tl);
+ dst[22] = av_clip_pixel(top[22] + l_m_tl);
+ dst[23] = av_clip_pixel(top[23] + l_m_tl);
+ dst[24] = av_clip_pixel(top[24] + l_m_tl);
+ dst[25] = av_clip_pixel(top[25] + l_m_tl);
+ dst[26] = av_clip_pixel(top[26] + l_m_tl);
+ dst[27] = av_clip_pixel(top[27] + l_m_tl);
+ dst[28] = av_clip_pixel(top[28] + l_m_tl);
+ dst[29] = av_clip_pixel(top[29] + l_m_tl);
+ dst[30] = av_clip_pixel(top[30] + l_m_tl);
+ dst[31] = av_clip_pixel(top[31] + l_m_tl);
+ dst += stride;
+ }
+}
+
+#if BIT_DEPTH != 12
+
+static void dc_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ const pixel *top = (const pixel *) _top;
+ pixel4 dc = PIXEL_SPLAT_X4((left[0] + left[1] + left[2] + left[3] +
+ top[0] + top[1] + top[2] + top[3] + 4) >> 3);
+
+ stride /= sizeof(pixel);
+ AV_WN4PA(dst + stride * 0, dc);
+ AV_WN4PA(dst + stride * 1, dc);
+ AV_WN4PA(dst + stride * 2, dc);
+ AV_WN4PA(dst + stride * 3, dc);
+}
+
+static void dc_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ const pixel *top = (const pixel *) _top;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
+ left[6] + left[7] + top[0] + top[1] + top[2] + top[3] +
+ top[4] + top[5] + top[6] + top[7] + 8) >> 4);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ dst += stride;
+ }
+}
+
+static void dc_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ const pixel *top = (const pixel *) _top;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] +
+ left[7] + left[8] + left[9] + left[10] + left[11] + left[12] +
+ left[13] + left[14] + left[15] + top[0] + top[1] + top[2] + top[3] +
+ top[4] + top[5] + top[6] + top[7] + top[8] + top[9] + top[10] +
+ top[11] + top[12] + top[13] + top[14] + top[15] + 16) >> 5);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ AV_WN4PA(dst + 8, dc);
+ AV_WN4PA(dst + 12, dc);
+ dst += stride;
+ }
+}
+
+static void dc_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ const pixel *top = (const pixel *) _top;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] +
+ left[7] + left[8] + left[9] + left[10] + left[11] + left[12] +
+ left[13] + left[14] + left[15] + left[16] + left[17] + left[18] +
+ left[19] + left[20] + left[21] + left[22] + left[23] + left[24] +
+ left[25] + left[26] + left[27] + left[28] + left[29] + left[30] +
+ left[31] + top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
+ top[6] + top[7] + top[8] + top[9] + top[10] + top[11] + top[12] +
+ top[13] + top[14] + top[15] + top[16] + top[17] + top[18] + top[19] +
+ top[20] + top[21] + top[22] + top[23] + top[24] + top[25] + top[26] +
+ top[27] + top[28] + top[29] + top[30] + top[31] + 32) >> 6);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ AV_WN4PA(dst + 8, dc);
+ AV_WN4PA(dst + 12, dc);
+ AV_WN4PA(dst + 16, dc);
+ AV_WN4PA(dst + 20, dc);
+ AV_WN4PA(dst + 24, dc);
+ AV_WN4PA(dst + 28, dc);
+ dst += stride;
+ }
+}
+
+static void dc_left_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ pixel4 dc = PIXEL_SPLAT_X4((left[0] + left[1] + left[2] + left[3] + 2) >> 2);
+
+ stride /= sizeof(pixel);
+ AV_WN4PA(dst + stride * 0, dc);
+ AV_WN4PA(dst + stride * 1, dc);
+ AV_WN4PA(dst + stride * 2, dc);
+ AV_WN4PA(dst + stride * 3, dc);
+}
+
+static void dc_left_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((left[0] + left[1] + left[2] + left[3] +
+ left[4] + left[5] + left[6] + left[7] + 4) >> 3);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ dst += stride;
+ }
+}
+
+static void dc_left_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
+ left[6] + left[7] + left[8] + left[9] + left[10] + left[11] +
+ left[12] + left[13] + left[14] + left[15] + 8) >> 4);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ AV_WN4PA(dst + 8, dc);
+ AV_WN4PA(dst + 12, dc);
+ dst += stride;
+ }
+}
+
+static void dc_left_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
+ left[6] + left[7] + left[8] + left[9] + left[10] + left[11] +
+ left[12] + left[13] + left[14] + left[15] + left[16] + left[17] +
+ left[18] + left[19] + left[20] + left[21] + left[22] + left[23] +
+ left[24] + left[25] + left[26] + left[27] + left[28] + left[29] +
+ left[30] + left[31] + 16) >> 5);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ AV_WN4PA(dst + 8, dc);
+ AV_WN4PA(dst + 12, dc);
+ AV_WN4PA(dst + 16, dc);
+ AV_WN4PA(dst + 20, dc);
+ AV_WN4PA(dst + 24, dc);
+ AV_WN4PA(dst + 28, dc);
+ dst += stride;
+ }
+}
+
+static void dc_top_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ pixel4 dc = PIXEL_SPLAT_X4((top[0] + top[1] + top[2] + top[3] + 2) >> 2);
+
+ stride /= sizeof(pixel);
+ AV_WN4PA(dst + stride * 0, dc);
+ AV_WN4PA(dst + stride * 1, dc);
+ AV_WN4PA(dst + stride * 2, dc);
+ AV_WN4PA(dst + stride * 3, dc);
+}
+
+static void dc_top_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((top[0] + top[1] + top[2] + top[3] +
+ top[4] + top[5] + top[6] + top[7] + 4) >> 3);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ dst += stride;
+ }
+}
+
+static void dc_top_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
+ top[6] + top[7] + top[8] + top[9] + top[10] + top[11] +
+ top[12] + top[13] + top[14] + top[15] + 8) >> 4);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ AV_WN4PA(dst + 8, dc);
+ AV_WN4PA(dst + 12, dc);
+ dst += stride;
+ }
+}
+
+static void dc_top_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ pixel4 dc = PIXEL_SPLAT_X4
+ ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
+ top[6] + top[7] + top[8] + top[9] + top[10] + top[11] +
+ top[12] + top[13] + top[14] + top[15] + top[16] + top[17] +
+ top[18] + top[19] + top[20] + top[21] + top[22] + top[23] +
+ top[24] + top[25] + top[26] + top[27] + top[28] + top[29] +
+ top[30] + top[31] + 16) >> 5);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ AV_WN4PA(dst + 0, dc);
+ AV_WN4PA(dst + 4, dc);
+ AV_WN4PA(dst + 8, dc);
+ AV_WN4PA(dst + 12, dc);
+ AV_WN4PA(dst + 16, dc);
+ AV_WN4PA(dst + 20, dc);
+ AV_WN4PA(dst + 24, dc);
+ AV_WN4PA(dst + 28, dc);
+ dst += stride;
+ }
+}
+
+#endif /* BIT_DEPTH != 12 */
+
+static void dc_128_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8));
+
+ stride /= sizeof(pixel);
+ AV_WN4PA(dst + stride * 0, val);
+ AV_WN4PA(dst + stride * 1, val);
+ AV_WN4PA(dst + stride * 2, val);
+ AV_WN4PA(dst + stride * 3, val);
+}
+
+static void dc_128_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8));
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ dst += stride;
+ }
+}
+
+static void dc_128_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8));
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ AV_WN4PA(dst + 8, val);
+ AV_WN4PA(dst + 12, val);
+ dst += stride;
+ }
+}
+
+static void dc_128_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8));
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ AV_WN4PA(dst + 8, val);
+ AV_WN4PA(dst + 12, val);
+ AV_WN4PA(dst + 16, val);
+ AV_WN4PA(dst + 20, val);
+ AV_WN4PA(dst + 24, val);
+ AV_WN4PA(dst + 28, val);
+ dst += stride;
+ }
+}
+
+static void dc_127_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1);
+
+ stride /= sizeof(pixel);
+ AV_WN4PA(dst + stride * 0, val);
+ AV_WN4PA(dst + stride * 1, val);
+ AV_WN4PA(dst + stride * 2, val);
+ AV_WN4PA(dst + stride * 3, val);}
+
+static void dc_127_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ dst += stride;
+ }
+}
+
+static void dc_127_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ AV_WN4PA(dst + 8, val);
+ AV_WN4PA(dst + 12, val);
+ dst += stride;
+ }
+}
+
+static void dc_127_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ AV_WN4PA(dst + 8, val);
+ AV_WN4PA(dst + 12, val);
+ AV_WN4PA(dst + 16, val);
+ AV_WN4PA(dst + 20, val);
+ AV_WN4PA(dst + 24, val);
+ AV_WN4PA(dst + 28, val);
+ dst += stride;
+ }
+}
+
+static void dc_129_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1);
+
+ stride /= sizeof(pixel);
+ AV_WN4PA(dst + stride * 0, val);
+ AV_WN4PA(dst + stride * 1, val);
+ AV_WN4PA(dst + stride * 2, val);
+ AV_WN4PA(dst + stride * 3, val);
+}
+
+static void dc_129_8x8_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 8; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ dst += stride;
+ }
+}
+
+static void dc_129_16x16_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 16; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ AV_WN4PA(dst + 8, val);
+ AV_WN4PA(dst + 12, val);
+ dst += stride;
+ }
+}
+
+static void dc_129_32x32_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1);
+ int y;
+
+ stride /= sizeof(pixel);
+ for (y = 0; y < 32; y++) {
+ AV_WN4PA(dst + 0, val);
+ AV_WN4PA(dst + 4, val);
+ AV_WN4PA(dst + 8, val);
+ AV_WN4PA(dst + 12, val);
+ AV_WN4PA(dst + 16, val);
+ AV_WN4PA(dst + 20, val);
+ AV_WN4PA(dst + 24, val);
+ AV_WN4PA(dst + 28, val);
+ dst += stride;
+ }
+}
+
+#if BIT_DEPTH != 12
+
+#if BIT_DEPTH == 8
+#define memset_bpc memset
+#else
+static inline void memset_bpc(uint16_t *dst, int val, int len) {
+ int n;
+ for (n = 0; n < len; n++) {
+ dst[n] = val;
+ }
+}
+#endif
+
+#define DST(x, y) dst[(x) + (y) * stride]
+
+static void diag_downleft_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ int a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
+ a4 = top[4], a5 = top[5], a6 = top[6], a7 = top[7];
+
+ stride /= sizeof(pixel);
+ DST(0,0) = (a0 + a1 * 2 + a2 + 2) >> 2;
+ DST(1,0) = DST(0,1) = (a1 + a2 * 2 + a3 + 2) >> 2;
+ DST(2,0) = DST(1,1) = DST(0,2) = (a2 + a3 * 2 + a4 + 2) >> 2;
+ DST(3,0) = DST(2,1) = DST(1,2) = DST(0,3) = (a3 + a4 * 2 + a5 + 2) >> 2;
+ DST(3,1) = DST(2,2) = DST(1,3) = (a4 + a5 * 2 + a6 + 2) >> 2;
+ DST(3,2) = DST(2,3) = (a5 + a6 * 2 + a7 + 2) >> 2;
+ DST(3,3) = a7; // note: this is different from vp8 and such
+}
+
+#define def_diag_downleft(size) \
+static void diag_downleft_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
+ const uint8_t *left, const uint8_t *_top) \
+{ \
+ pixel *dst = (pixel *) _dst; \
+ const pixel *top = (const pixel *) _top; \
+ int i, j; \
+ pixel v[size - 1]; \
+\
+ stride /= sizeof(pixel); \
+ for (i = 0; i < size - 2; i++) \
+ v[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
+ v[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \
+\
+ for (j = 0; j < size; j++) { \
+ memcpy(dst + j*stride, v + j, (size - 1 - j) * sizeof(pixel)); \
+ memset_bpc(dst + j*stride + size - 1 - j, top[size - 1], j + 1); \
+ } \
+}
+
+def_diag_downleft(8)
+def_diag_downleft(16)
+def_diag_downleft(32)
+
+static void diag_downright_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ const pixel *left = (const pixel *) _left;
+ int tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
+ l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0];
+
+ stride /= sizeof(pixel);
+ DST(0,3) = (l1 + l2 * 2 + l3 + 2) >> 2;
+ DST(0,2) = DST(1,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
+ DST(0,1) = DST(1,2) = DST(2,3) = (tl + l0 * 2 + l1 + 2) >> 2;
+ DST(0,0) = DST(1,1) = DST(2,2) = DST(3,3) = (l0 + tl * 2 + a0 + 2) >> 2;
+ DST(1,0) = DST(2,1) = DST(3,2) = (tl + a0 * 2 + a1 + 2) >> 2;
+ DST(2,0) = DST(3,1) = (a0 + a1 * 2 + a2 + 2) >> 2;
+ DST(3,0) = (a1 + a2 * 2 + a3 + 2) >> 2;
+}
+
+#define def_diag_downright(size) \
+static void diag_downright_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
+ const uint8_t *_left, const uint8_t *_top) \
+{ \
+ pixel *dst = (pixel *) _dst; \
+ const pixel *top = (const pixel *) _top; \
+ const pixel *left = (const pixel *) _left; \
+ int i, j; \
+ pixel v[size + size - 1]; \
+\
+ stride /= sizeof(pixel); \
+ for (i = 0; i < size - 2; i++) { \
+ v[i ] = (left[i] + left[i + 1] * 2 + left[i + 2] + 2) >> 2; \
+ v[size + 1 + i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
+ } \
+ v[size - 2] = (left[size - 2] + left[size - 1] * 2 + top[-1] + 2) >> 2; \
+ v[size - 1] = (left[size - 1] + top[-1] * 2 + top[ 0] + 2) >> 2; \
+ v[size ] = (top[-1] + top[0] * 2 + top[ 1] + 2) >> 2; \
+\
+ for (j = 0; j < size; j++) \
+ memcpy(dst + j*stride, v + size - 1 - j, size * sizeof(pixel)); \
+}
+
+def_diag_downright(8)
+def_diag_downright(16)
+def_diag_downright(32)
+
+static void vert_right_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ const pixel *left = (const pixel *) _left;
+ int tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
+ l0 = left[3], l1 = left[2], l2 = left[1];
+
+ stride /= sizeof(pixel);
+ DST(0,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
+ DST(0,2) = (tl + l0 * 2 + l1 + 2) >> 2;
+ DST(0,0) = DST(1,2) = (tl + a0 + 1) >> 1;
+ DST(0,1) = DST(1,3) = (l0 + tl * 2 + a0 + 2) >> 2;
+ DST(1,0) = DST(2,2) = (a0 + a1 + 1) >> 1;
+ DST(1,1) = DST(2,3) = (tl + a0 * 2 + a1 + 2) >> 2;
+ DST(2,0) = DST(3,2) = (a1 + a2 + 1) >> 1;
+ DST(2,1) = DST(3,3) = (a0 + a1 * 2 + a2 + 2) >> 2;
+ DST(3,0) = (a2 + a3 + 1) >> 1;
+ DST(3,1) = (a1 + a2 * 2 + a3 + 2) >> 2;
+}
+
+#define def_vert_right(size) \
+static void vert_right_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
+ const uint8_t *_left, const uint8_t *_top) \
+{ \
+ pixel *dst = (pixel *) _dst; \
+ const pixel *top = (const pixel *) _top; \
+ const pixel *left = (const pixel *) _left; \
+ int i, j; \
+ pixel ve[size + size/2 - 1], vo[size + size/2 - 1]; \
+\
+ stride /= sizeof(pixel); \
+ for (i = 0; i < size/2 - 2; i++) { \
+ vo[i] = (left[i*2 + 3] + left[i*2 + 2] * 2 + left[i*2 + 1] + 2) >> 2; \
+ ve[i] = (left[i*2 + 4] + left[i*2 + 3] * 2 + left[i*2 + 2] + 2) >> 2; \
+ } \
+ vo[size/2 - 2] = (left[size - 1] + left[size - 2] * 2 + left[size - 3] + 2) >> 2; \
+ ve[size/2 - 2] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \
+\
+ ve[size/2 - 1] = (top[-1] + top[0] + 1) >> 1; \
+ vo[size/2 - 1] = (left[size - 1] + top[-1] * 2 + top[0] + 2) >> 2; \
+ for (i = 0; i < size - 1; i++) { \
+ ve[size/2 + i] = (top[i] + top[i + 1] + 1) >> 1; \
+ vo[size/2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \
+ } \
+\
+ for (j = 0; j < size / 2; j++) { \
+ memcpy(dst + j*2 *stride, ve + size/2 - 1 - j, size * sizeof(pixel)); \
+ memcpy(dst + (j*2 + 1)*stride, vo + size/2 - 1 - j, size * sizeof(pixel)); \
+ } \
+}
+
+def_vert_right(8)
+def_vert_right(16)
+def_vert_right(32)
+
+static void hor_down_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ const pixel *left = (const pixel *) _left;
+ int l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0],
+ tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2];
+
+ stride /= sizeof(pixel);
+ DST(2,0) = (tl + a0 * 2 + a1 + 2) >> 2;
+ DST(3,0) = (a0 + a1 * 2 + a2 + 2) >> 2;
+ DST(0,0) = DST(2,1) = (tl + l0 + 1) >> 1;
+ DST(1,0) = DST(3,1) = (a0 + tl * 2 + l0 + 2) >> 2;
+ DST(0,1) = DST(2,2) = (l0 + l1 + 1) >> 1;
+ DST(1,1) = DST(3,2) = (tl + l0 * 2 + l1 + 2) >> 2;
+ DST(0,2) = DST(2,3) = (l1 + l2 + 1) >> 1;
+ DST(1,2) = DST(3,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
+ DST(0,3) = (l2 + l3 + 1) >> 1;
+ DST(1,3) = (l1 + l2 * 2 + l3 + 2) >> 2;
+}
+
+#define def_hor_down(size) \
+static void hor_down_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
+ const uint8_t *_left, const uint8_t *_top) \
+{ \
+ pixel *dst = (pixel *) _dst; \
+ const pixel *top = (const pixel *) _top; \
+ const pixel *left = (const pixel *) _left; \
+ int i, j; \
+ pixel v[size * 3 - 2]; \
+\
+ stride /= sizeof(pixel); \
+ for (i = 0; i < size - 2; i++) { \
+ v[i*2 ] = (left[i + 1] + left[i + 0] + 1) >> 1; \
+ v[i*2 + 1] = (left[i + 2] + left[i + 1] * 2 + left[i + 0] + 2) >> 2; \
+ v[size*2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \
+ } \
+ v[size*2 - 2] = (top[-1] + left[size - 1] + 1) >> 1; \
+ v[size*2 - 4] = (left[size - 1] + left[size - 2] + 1) >> 1; \
+ v[size*2 - 1] = (top[0] + top[-1] * 2 + left[size - 1] + 2) >> 2; \
+ v[size*2 - 3] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \
+\
+ for (j = 0; j < size; j++) \
+ memcpy(dst + j*stride, v + size*2 - 2 - j*2, size * sizeof(pixel)); \
+}
+
+def_hor_down(8)
+def_hor_down(16)
+def_hor_down(32)
+
+static void vert_left_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *_top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *top = (const pixel *) _top;
+ int a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
+ a4 = top[4], a5 = top[5], a6 = top[6];
+
+ stride /= sizeof(pixel);
+ DST(0,0) = (a0 + a1 + 1) >> 1;
+ DST(0,1) = (a0 + a1 * 2 + a2 + 2) >> 2;
+ DST(1,0) = DST(0,2) = (a1 + a2 + 1) >> 1;
+ DST(1,1) = DST(0,3) = (a1 + a2 * 2 + a3 + 2) >> 2;
+ DST(2,0) = DST(1,2) = (a2 + a3 + 1) >> 1;
+ DST(2,1) = DST(1,3) = (a2 + a3 * 2 + a4 + 2) >> 2;
+ DST(3,0) = DST(2,2) = (a3 + a4 + 1) >> 1;
+ DST(3,1) = DST(2,3) = (a3 + a4 * 2 + a5 + 2) >> 2;
+ DST(3,2) = (a4 + a5 + 1) >> 1;
+ DST(3,3) = (a4 + a5 * 2 + a6 + 2) >> 2;
+}
+
+#define def_vert_left(size) \
+static void vert_left_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
+ const uint8_t *left, const uint8_t *_top) \
+{ \
+ pixel *dst = (pixel *) _dst; \
+ const pixel *top = (const pixel *) _top; \
+ int i, j; \
+ pixel ve[size - 1], vo[size - 1]; \
+\
+ stride /= sizeof(pixel); \
+ for (i = 0; i < size - 2; i++) { \
+ ve[i] = (top[i] + top[i + 1] + 1) >> 1; \
+ vo[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
+ } \
+ ve[size - 2] = (top[size - 2] + top[size - 1] + 1) >> 1; \
+ vo[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \
+\
+ for (j = 0; j < size / 2; j++) { \
+ memcpy(dst + j*2 * stride, ve + j, (size - j - 1) * sizeof(pixel)); \
+ memset_bpc(dst + j*2 * stride + size - j - 1, top[size - 1], j + 1); \
+ memcpy(dst + (j*2 + 1) * stride, vo + j, (size - j - 1) * sizeof(pixel)); \
+ memset_bpc(dst + (j*2 + 1) * stride + size - j - 1, top[size - 1], j + 1); \
+ } \
+}
+
+def_vert_left(8)
+def_vert_left(16)
+def_vert_left(32)
+
+static void hor_up_4x4_c(uint8_t *_dst, ptrdiff_t stride,
+ const uint8_t *_left, const uint8_t *top)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *left = (const pixel *) _left;
+ int l0 = left[0], l1 = left[1], l2 = left[2], l3 = left[3];
+
+ stride /= sizeof(pixel);
+ DST(0,0) = (l0 + l1 + 1) >> 1;
+ DST(1,0) = (l0 + l1 * 2 + l2 + 2) >> 2;
+ DST(0,1) = DST(2,0) = (l1 + l2 + 1) >> 1;
+ DST(1,1) = DST(3,0) = (l1 + l2 * 2 + l3 + 2) >> 2;
+ DST(0,2) = DST(2,1) = (l2 + l3 + 1) >> 1;
+ DST(1,2) = DST(3,1) = (l2 + l3 * 3 + 2) >> 2;
+ DST(0,3) = DST(1,3) = DST(2,2) = DST(2,3) = DST(3,2) = DST(3,3) = l3;
+}
+
+#define def_hor_up(size) \
+static void hor_up_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
+ const uint8_t *_left, const uint8_t *top) \
+{ \
+ pixel *dst = (pixel *) _dst; \
+ const pixel *left = (const pixel *) _left; \
+ int i, j; \
+ pixel v[size*2 - 2]; \
+\
+ stride /= sizeof(pixel); \
+ for (i = 0; i < size - 2; i++) { \
+ v[i*2 ] = (left[i] + left[i + 1] + 1) >> 1; \
+ v[i*2 + 1] = (left[i] + left[i + 1] * 2 + left[i + 2] + 2) >> 2; \
+ } \
+ v[size*2 - 4] = (left[size - 2] + left[size - 1] + 1) >> 1; \
+ v[size*2 - 3] = (left[size - 2] + left[size - 1] * 3 + 2) >> 2; \
+\
+ for (j = 0; j < size / 2; j++) \
+ memcpy(dst + j*stride, v + j*2, size * sizeof(pixel)); \
+ for (j = size / 2; j < size; j++) { \
+ memcpy(dst + j*stride, v + j*2, (size*2 - 2 - j*2) * sizeof(pixel)); \
+ memset_bpc(dst + j*stride + size*2 - 2 - j*2, left[size - 1], \
+ 2 + j*2 - size); \
+ } \
+}
+
+def_hor_up(8)
+def_hor_up(16)
+def_hor_up(32)
+
+#undef DST
+
+#endif /* BIT_DEPTH != 12 */
+
+#if BIT_DEPTH != 8
+void ff_vp9dsp_intrapred_init_10(VP9DSPContext *dsp);
+#endif
+#if BIT_DEPTH != 10
+static
+#endif
+av_cold void FUNC(ff_vp9dsp_intrapred_init)(VP9DSPContext *dsp)
+{
+#define init_intra_pred_bd_aware(tx, sz) \
+ dsp->intra_pred[tx][TM_VP8_PRED] = tm_##sz##_c; \
+ dsp->intra_pred[tx][DC_128_PRED] = dc_128_##sz##_c; \
+ dsp->intra_pred[tx][DC_127_PRED] = dc_127_##sz##_c; \
+ dsp->intra_pred[tx][DC_129_PRED] = dc_129_##sz##_c
+
+#if BIT_DEPTH == 12
+ ff_vp9dsp_intrapred_init_10(dsp);
+#define init_intra_pred(tx, sz) \
+ init_intra_pred_bd_aware(tx, sz)
+#else
+ #define init_intra_pred(tx, sz) \
+ dsp->intra_pred[tx][VERT_PRED] = vert_##sz##_c; \
+ dsp->intra_pred[tx][HOR_PRED] = hor_##sz##_c; \
+ dsp->intra_pred[tx][DC_PRED] = dc_##sz##_c; \
+ dsp->intra_pred[tx][DIAG_DOWN_LEFT_PRED] = diag_downleft_##sz##_c; \
+ dsp->intra_pred[tx][DIAG_DOWN_RIGHT_PRED] = diag_downright_##sz##_c; \
+ dsp->intra_pred[tx][VERT_RIGHT_PRED] = vert_right_##sz##_c; \
+ dsp->intra_pred[tx][HOR_DOWN_PRED] = hor_down_##sz##_c; \
+ dsp->intra_pred[tx][VERT_LEFT_PRED] = vert_left_##sz##_c; \
+ dsp->intra_pred[tx][HOR_UP_PRED] = hor_up_##sz##_c; \
+ dsp->intra_pred[tx][LEFT_DC_PRED] = dc_left_##sz##_c; \
+ dsp->intra_pred[tx][TOP_DC_PRED] = dc_top_##sz##_c; \
+ init_intra_pred_bd_aware(tx, sz)
+#endif
+
+ init_intra_pred(TX_4X4, 4x4);
+ init_intra_pred(TX_8X8, 8x8);
+ init_intra_pred(TX_16X16, 16x16);
+ init_intra_pred(TX_32X32, 32x32);
+
+#undef init_intra_pred
+#undef init_intra_pred_bd_aware
+}
+
+#define itxfm_wrapper(type_a, type_b, sz, bits, has_dconly) \
+static void type_a##_##type_b##_##sz##x##sz##_add_c(uint8_t *_dst, \
+ ptrdiff_t stride, \
+ int16_t *_block, int eob) \
+{ \
+ int i, j; \
+ pixel *dst = (pixel *) _dst; \
+ dctcoef *block = (dctcoef *) _block, tmp[sz * sz], out[sz]; \
+\
+ stride /= sizeof(pixel); \
+ if (has_dconly && eob == 1) { \
+ const int t = ((((dctint) block[0] * 11585 + (1 << 13)) >> 14) \
+ * 11585 + (1 << 13)) >> 14; \
+ block[0] = 0; \
+ for (i = 0; i < sz; i++) { \
+ for (j = 0; j < sz; j++) \
+ dst[j * stride] = av_clip_pixel(dst[j * stride] + \
+ (bits ? \
+ (int)(t + (1U << (bits - 1))) >> bits : \
+ t)); \
+ dst++; \
+ } \
+ return; \
+ } \
+\
+ for (i = 0; i < sz; i++) \
+ type_a##sz##_1d(block + i, sz, tmp + i * sz, 0); \
+ memset(block, 0, sz * sz * sizeof(*block)); \
+ for (i = 0; i < sz; i++) { \
+ type_b##sz##_1d(tmp + i, sz, out, 1); \
+ for (j = 0; j < sz; j++) \
+ dst[j * stride] = av_clip_pixel(dst[j * stride] + \
+ (bits ? \
+ (int)(out[j] + (1U << (bits - 1))) >> bits : \
+ out[j])); \
+ dst++; \
+ } \
+}
+
+#define itxfm_wrap(sz, bits) \
+itxfm_wrapper(idct, idct, sz, bits, 1) \
+itxfm_wrapper(iadst, idct, sz, bits, 0) \
+itxfm_wrapper(idct, iadst, sz, bits, 0) \
+itxfm_wrapper(iadst, iadst, sz, bits, 0)
+
+#define IN(x) ((dctint) in[(x) * stride])
+
+static av_always_inline void idct4_1d(const dctcoef *in, ptrdiff_t stride,
+ dctcoef *out, int pass)
+{
+ dctint t0, t1, t2, t3;
+
+ t0 = ((IN(0) + IN(2)) * 11585 + (1 << 13)) >> 14;
+ t1 = ((IN(0) - IN(2)) * 11585 + (1 << 13)) >> 14;
+ t2 = (IN(1) * 6270 - IN(3) * 15137 + (1 << 13)) >> 14;
+ t3 = (IN(1) * 15137 + IN(3) * 6270 + (1 << 13)) >> 14;
+
+ out[0] = t0 + t3;
+ out[1] = t1 + t2;
+ out[2] = t1 - t2;
+ out[3] = t0 - t3;
+}
+
+static av_always_inline void iadst4_1d(const dctcoef *in, ptrdiff_t stride,
+ dctcoef *out, int pass)
+{
+ dctint t0, t1, t2, t3;
+
+ t0 = 5283 * IN(0) + 15212 * IN(2) + 9929 * IN(3);
+ t1 = 9929 * IN(0) - 5283 * IN(2) - 15212 * IN(3);
+ t2 = 13377 * (IN(0) - IN(2) + IN(3));
+ t3 = 13377 * IN(1);
+
+ out[0] = (t0 + t3 + (1 << 13)) >> 14;
+ out[1] = (t1 + t3 + (1 << 13)) >> 14;
+ out[2] = (t2 + (1 << 13)) >> 14;
+ out[3] = (t0 + t1 - t3 + (1 << 13)) >> 14;
+}
+
+itxfm_wrap(4, 4)
+
+static av_always_inline void idct8_1d(const dctcoef *in, ptrdiff_t stride,
+ dctcoef *out, int pass)
+{
+ dctint t0, t0a, t1, t1a, t2, t2a, t3, t3a, t4, t4a, t5, t5a, t6, t6a, t7, t7a;
+
+ t0a = ((IN(0) + IN(4)) * 11585 + (1 << 13)) >> 14;
+ t1a = ((IN(0) - IN(4)) * 11585 + (1 << 13)) >> 14;
+ t2a = (IN(2) * 6270 - IN(6) * 15137 + (1 << 13)) >> 14;
+ t3a = (IN(2) * 15137 + IN(6) * 6270 + (1 << 13)) >> 14;
+ t4a = (IN(1) * 3196 - IN(7) * 16069 + (1 << 13)) >> 14;
+ t5a = (IN(5) * 13623 - IN(3) * 9102 + (1 << 13)) >> 14;
+ t6a = (IN(5) * 9102 + IN(3) * 13623 + (1 << 13)) >> 14;
+ t7a = (IN(1) * 16069 + IN(7) * 3196 + (1 << 13)) >> 14;
+
+ t0 = t0a + t3a;
+ t1 = t1a + t2a;
+ t2 = t1a - t2a;
+ t3 = t0a - t3a;
+ t4 = t4a + t5a;
+ t5a = t4a - t5a;
+ t7 = t7a + t6a;
+ t6a = t7a - t6a;
+
+ t5 = ((t6a - t5a) * 11585 + (1 << 13)) >> 14;
+ t6 = ((t6a + t5a) * 11585 + (1 << 13)) >> 14;
+
+ out[0] = t0 + t7;
+ out[1] = t1 + t6;
+ out[2] = t2 + t5;
+ out[3] = t3 + t4;
+ out[4] = t3 - t4;
+ out[5] = t2 - t5;
+ out[6] = t1 - t6;
+ out[7] = t0 - t7;
+}
+
+static av_always_inline void iadst8_1d(const dctcoef *in, ptrdiff_t stride,
+ dctcoef *out, int pass)
+{
+ dctint t0, t0a, t1, t1a, t2, t2a, t3, t3a, t4, t4a, t5, t5a, t6, t6a, t7, t7a;
+
+ t0a = 16305 * IN(7) + 1606 * IN(0);
+ t1a = 1606 * IN(7) - 16305 * IN(0);
+ t2a = 14449 * IN(5) + 7723 * IN(2);
+ t3a = 7723 * IN(5) - 14449 * IN(2);
+ t4a = 10394 * IN(3) + 12665 * IN(4);
+ t5a = 12665 * IN(3) - 10394 * IN(4);
+ t6a = 4756 * IN(1) + 15679 * IN(6);
+ t7a = 15679 * IN(1) - 4756 * IN(6);
+
+ t0 = (t0a + t4a + (1 << 13)) >> 14;
+ t1 = (t1a + t5a + (1 << 13)) >> 14;
+ t2 = (t2a + t6a + (1 << 13)) >> 14;
+ t3 = (t3a + t7a + (1 << 13)) >> 14;
+ t4 = (t0a - t4a + (1 << 13)) >> 14;
+ t5 = (t1a - t5a + (1 << 13)) >> 14;
+ t6 = (t2a - t6a + (1 << 13)) >> 14;
+ t7 = (t3a - t7a + (1 << 13)) >> 14;
+
+ t4a = 15137U * t4 + 6270U * t5;
+ t5a = 6270U * t4 - 15137U * t5;
+ t6a = 15137U * t7 - 6270U * t6;
+ t7a = 6270U * t7 + 15137U * t6;
+
+ out[0] = t0 + t2;
+ out[7] = -(t1 + t3);
+ t2 = t0 - t2;
+ t3 = t1 - t3;
+
+ out[1] = -((dctint)((1U << 13) + t4a + t6a) >> 14);
+ out[6] = (dctint)((1U << 13) + t5a + t7a) >> 14;
+ t6 = (dctint)((1U << 13) + t4a - t6a) >> 14;
+ t7 = (dctint)((1U << 13) + t5a - t7a) >> 14;
+
+ out[3] = -((dctint)((t2 + t3) * 11585U + (1 << 13)) >> 14);
+ out[4] = (dctint)((t2 - t3) * 11585U + (1 << 13)) >> 14;
+ out[2] = (dctint)((t6 + t7) * 11585U + (1 << 13)) >> 14;
+ out[5] = -((dctint)((t6 - t7) * 11585U + (1 << 13)) >> 14);
+}
+
+itxfm_wrap(8, 5)
+
+static av_always_inline void idct16_1d(const dctcoef *in, ptrdiff_t stride,
+ dctcoef *out, int pass)
+{
+ dctint t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
+ dctint t0a, t1a, t2a, t3a, t4a, t5a, t6a, t7a;
+ dctint t8a, t9a, t10a, t11a, t12a, t13a, t14a, t15a;
+
+ t0a = (dctint)((IN(0) + IN(8)) * 11585U + (1 << 13)) >> 14;
+ t1a = (dctint)((IN(0) - IN(8)) * 11585U + (1 << 13)) >> 14;
+ t2a = (dctint)(IN(4) * 6270U - IN(12) * 15137U + (1 << 13)) >> 14;
+ t3a = (dctint)(IN(4) * 15137U + IN(12) * 6270U + (1 << 13)) >> 14;
+ t4a = (dctint)(IN(2) * 3196U - IN(14) * 16069U + (1 << 13)) >> 14;
+ t7a = (dctint)(IN(2) * 16069U + IN(14) * 3196U + (1 << 13)) >> 14;
+ t5a = (dctint)(IN(10) * 13623U - IN(6) * 9102U + (1 << 13)) >> 14;
+ t6a = (dctint)(IN(10) * 9102U + IN(6) * 13623U + (1 << 13)) >> 14;
+ t8a = (dctint)(IN(1) * 1606U - IN(15) * 16305U + (1 << 13)) >> 14;
+ t15a = (dctint)(IN(1) * 16305U + IN(15) * 1606U + (1 << 13)) >> 14;
+ t9a = (dctint)(IN(9) * 12665U - IN(7) * 10394U + (1 << 13)) >> 14;
+ t14a = (dctint)(IN(9) * 10394U + IN(7) * 12665U + (1 << 13)) >> 14;
+ t10a = (dctint)(IN(5) * 7723U - IN(11) * 14449U + (1 << 13)) >> 14;
+ t13a = (dctint)(IN(5) * 14449U + IN(11) * 7723U + (1 << 13)) >> 14;
+ t11a = (dctint)(IN(13) * 15679U - IN(3) * 4756U + (1 << 13)) >> 14;
+ t12a = (dctint)(IN(13) * 4756U + IN(3) * 15679U + (1 << 13)) >> 14;
+
+ t0 = t0a + t3a;
+ t1 = t1a + t2a;
+ t2 = t1a - t2a;
+ t3 = t0a - t3a;
+ t4 = t4a + t5a;
+ t5 = t4a - t5a;
+ t6 = t7a - t6a;
+ t7 = t7a + t6a;
+ t8 = t8a + t9a;
+ t9 = t8a - t9a;
+ t10 = t11a - t10a;
+ t11 = t11a + t10a;
+ t12 = t12a + t13a;
+ t13 = t12a - t13a;
+ t14 = t15a - t14a;
+ t15 = t15a + t14a;
+
+ t5a = (dctint)((t6 - t5) * 11585U + (1 << 13)) >> 14;
+ t6a = (dctint)((t6 + t5) * 11585U + (1 << 13)) >> 14;
+ t9a = (dctint)( t14 * 6270U - t9 * 15137U + (1 << 13)) >> 14;
+ t14a = (dctint)( t14 * 15137U + t9 * 6270U + (1 << 13)) >> 14;
+ t10a = (dctint)(-(t13 * 15137U + t10 * 6270U) + (1 << 13)) >> 14;
+ t13a = (dctint)( t13 * 6270U - t10 * 15137U + (1 << 13)) >> 14;
+
+ t0a = t0 + t7;
+ t1a = t1 + t6a;
+ t2a = t2 + t5a;
+ t3a = t3 + t4;
+ t4 = t3 - t4;
+ t5 = t2 - t5a;
+ t6 = t1 - t6a;
+ t7 = t0 - t7;
+ t8a = t8 + t11;
+ t9 = t9a + t10a;
+ t10 = t9a - t10a;
+ t11a = t8 - t11;
+ t12a = t15 - t12;
+ t13 = t14a - t13a;
+ t14 = t14a + t13a;
+ t15a = t15 + t12;
+
+ t10a = (dctint)((t13 - t10) * 11585U + (1 << 13)) >> 14;
+ t13a = (dctint)((t13 + t10) * 11585U + (1 << 13)) >> 14;
+ t11 = (dctint)((t12a - t11a) * 11585U + (1 << 13)) >> 14;
+ t12 = (dctint)((t12a + t11a) * 11585U + (1 << 13)) >> 14;
+
+ out[ 0] = t0a + t15a;
+ out[ 1] = t1a + t14;
+ out[ 2] = t2a + t13a;
+ out[ 3] = t3a + t12;
+ out[ 4] = t4 + t11;
+ out[ 5] = t5 + t10a;
+ out[ 6] = t6 + t9;
+ out[ 7] = t7 + t8a;
+ out[ 8] = t7 - t8a;
+ out[ 9] = t6 - t9;
+ out[10] = t5 - t10a;
+ out[11] = t4 - t11;
+ out[12] = t3a - t12;
+ out[13] = t2a - t13a;
+ out[14] = t1a - t14;
+ out[15] = t0a - t15a;
+}
+
+static av_always_inline void iadst16_1d(const dctcoef *in, ptrdiff_t stride,
+ dctcoef *out, int pass)
+{
+ dctint t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
+ dctint t0a, t1a, t2a, t3a, t4a, t5a, t6a, t7a;
+ dctint t8a, t9a, t10a, t11a, t12a, t13a, t14a, t15a;
+
+ t0 = IN(15) * 16364U + IN(0) * 804U;
+ t1 = IN(15) * 804U - IN(0) * 16364U;
+ t2 = IN(13) * 15893U + IN(2) * 3981U;
+ t3 = IN(13) * 3981U - IN(2) * 15893U;
+ t4 = IN(11) * 14811U + IN(4) * 7005U;
+ t5 = IN(11) * 7005U - IN(4) * 14811U;
+ t6 = IN(9) * 13160U + IN(6) * 9760U;
+ t7 = IN(9) * 9760U - IN(6) * 13160U;
+ t8 = IN(7) * 11003U + IN(8) * 12140U;
+ t9 = IN(7) * 12140U - IN(8) * 11003U;
+ t10 = IN(5) * 8423U + IN(10) * 14053U;
+ t11 = IN(5) * 14053U - IN(10) * 8423U;
+ t12 = IN(3) * 5520U + IN(12) * 15426U;
+ t13 = IN(3) * 15426U - IN(12) * 5520U;
+ t14 = IN(1) * 2404U + IN(14) * 16207U;
+ t15 = IN(1) * 16207U - IN(14) * 2404U;
+
+ t0a = (dctint)((1U << 13) + t0 + t8 ) >> 14;
+ t1a = (dctint)((1U << 13) + t1 + t9 ) >> 14;
+ t2a = (dctint)((1U << 13) + t2 + t10) >> 14;
+ t3a = (dctint)((1U << 13) + t3 + t11) >> 14;
+ t4a = (dctint)((1U << 13) + t4 + t12) >> 14;
+ t5a = (dctint)((1U << 13) + t5 + t13) >> 14;
+ t6a = (dctint)((1U << 13) + t6 + t14) >> 14;
+ t7a = (dctint)((1U << 13) + t7 + t15) >> 14;
+ t8a = (dctint)((1U << 13) + t0 - t8 ) >> 14;
+ t9a = (dctint)((1U << 13) + t1 - t9 ) >> 14;
+ t10a = (dctint)((1U << 13) + t2 - t10) >> 14;
+ t11a = (dctint)((1U << 13) + t3 - t11) >> 14;
+ t12a = (dctint)((1U << 13) + t4 - t12) >> 14;
+ t13a = (dctint)((1U << 13) + t5 - t13) >> 14;
+ t14a = (dctint)((1U << 13) + t6 - t14) >> 14;
+ t15a = (dctint)((1U << 13) + t7 - t15) >> 14;
+
+ t8 = t8a * 16069U + t9a * 3196U;
+ t9 = t8a * 3196U - t9a * 16069U;
+ t10 = t10a * 9102U + t11a * 13623U;
+ t11 = t10a * 13623U - t11a * 9102U;
+ t12 = t13a * 16069U - t12a * 3196U;
+ t13 = t13a * 3196U + t12a * 16069U;
+ t14 = t15a * 9102U - t14a * 13623U;
+ t15 = t15a * 13623U + t14a * 9102U;
+
+ t0 = t0a + t4a;
+ t1 = t1a + t5a;
+ t2 = t2a + t6a;
+ t3 = t3a + t7a;
+ t4 = t0a - t4a;
+ t5 = t1a - t5a;
+ t6 = t2a - t6a;
+ t7 = t3a - t7a;
+ t8a = (dctint)((1U << 13) + t8 + t12) >> 14;
+ t9a = (dctint)((1U << 13) + t9 + t13) >> 14;
+ t10a = (dctint)((1U << 13) + t10 + t14) >> 14;
+ t11a = (dctint)((1U << 13) + t11 + t15) >> 14;
+ t12a = (dctint)((1U << 13) + t8 - t12) >> 14;
+ t13a = (dctint)((1U << 13) + t9 - t13) >> 14;
+ t14a = (dctint)((1U << 13) + t10 - t14) >> 14;
+ t15a = (dctint)((1U << 13) + t11 - t15) >> 14;
+
+ t4a = t4 * 15137U + t5 * 6270U;
+ t5a = t4 * 6270U - t5 * 15137U;
+ t6a = t7 * 15137U - t6 * 6270U;
+ t7a = t7 * 6270U + t6 * 15137U;
+ t12 = t12a * 15137U + t13a * 6270U;
+ t13 = t12a * 6270U - t13a * 15137U;
+ t14 = t15a * 15137U - t14a * 6270U;
+ t15 = t15a * 6270U + t14a * 15137U;
+
+ out[ 0] = t0 + t2;
+ out[15] = -(t1 + t3);
+ t2a = t0 - t2;
+ t3a = t1 - t3;
+ out[ 3] = -((dctint)((1U << 13) + t4a + t6a) >> 14);
+ out[12] = (dctint)((1U << 13) + t5a + t7a) >> 14;
+ t6 = (dctint)((1U << 13) + t4a - t6a) >> 14;
+ t7 = (dctint)((1U << 13) + t5a - t7a) >> 14;
+ out[ 1] = -(t8a + t10a);
+ out[14] = t9a + t11a;
+ t10 = t8a - t10a;
+ t11 = t9a - t11a;
+ out[ 2] = (dctint)((1U << 13) + t12 + t14) >> 14;
+ out[13] = -((dctint)((1U << 13) + t13 + t15) >> 14);
+ t14a = (dctint)((1U << 13) + t12 - t14) >> 14;
+ t15a = (dctint)((1U << 13) + t13 - t15) >> 14;
+
+ out[ 7] = (dctint)(-(t2a + t3a) * 11585U + (1 << 13)) >> 14;
+ out[ 8] = (dctint)( (t2a - t3a) * 11585U + (1 << 13)) >> 14;
+ out[ 4] = (dctint)( (t7 + t6) * 11585U + (1 << 13)) >> 14;
+ out[11] = (dctint)( (t7 - t6) * 11585U + (1 << 13)) >> 14;
+ out[ 6] = (dctint)( (t11 + t10) * 11585U + (1 << 13)) >> 14;
+ out[ 9] = (dctint)( (t11 - t10) * 11585U + (1 << 13)) >> 14;
+ out[ 5] = (dctint)(-(t14a + t15a) * 11585U + (1 << 13)) >> 14;
+ out[10] = (dctint)( (t14a - t15a) * 11585U + (1 << 13)) >> 14;
+}
+
+itxfm_wrap(16, 6)
+
+static av_always_inline void idct32_1d(const dctcoef *in, ptrdiff_t stride,
+ dctcoef *out, int pass)
+{
+ dctint t0a = (dctint)((IN(0) + IN(16)) * 11585U + (1 << 13)) >> 14;
+ dctint t1a = (dctint)((IN(0) - IN(16)) * 11585U + (1 << 13)) >> 14;
+ dctint t2a = (dctint)(IN( 8) * 6270U - IN(24) * 15137U + (1 << 13)) >> 14;
+ dctint t3a = (dctint)(IN( 8) * 15137U + IN(24) * 6270U + (1 << 13)) >> 14;
+ dctint t4a = (dctint)(IN( 4) * 3196U - IN(28) * 16069U + (1 << 13)) >> 14;
+ dctint t7a = (dctint)(IN( 4) * 16069U + IN(28) * 3196U + (1 << 13)) >> 14;
+ dctint t5a = (dctint)(IN(20) * 13623U - IN(12) * 9102U + (1 << 13)) >> 14;
+ dctint t6a = (dctint)(IN(20) * 9102U + IN(12) * 13623U + (1 << 13)) >> 14;
+ dctint t8a = (dctint)(IN( 2) * 1606U - IN(30) * 16305U + (1 << 13)) >> 14;
+ dctint t15a = (dctint)(IN( 2) * 16305U + IN(30) * 1606U + (1 << 13)) >> 14;
+ dctint t9a = (dctint)(IN(18) * 12665U - IN(14) * 10394U + (1 << 13)) >> 14;
+ dctint t14a = (dctint)(IN(18) * 10394U + IN(14) * 12665U + (1 << 13)) >> 14;
+ dctint t10a = (dctint)(IN(10) * 7723U - IN(22) * 14449U + (1 << 13)) >> 14;
+ dctint t13a = (dctint)(IN(10) * 14449U + IN(22) * 7723U + (1 << 13)) >> 14;
+ dctint t11a = (dctint)(IN(26) * 15679U - IN( 6) * 4756U + (1 << 13)) >> 14;
+ dctint t12a = (dctint)(IN(26) * 4756U + IN( 6) * 15679U + (1 << 13)) >> 14;
+ dctint t16a = (dctint)(IN( 1) * 804U - IN(31) * 16364U + (1 << 13)) >> 14;
+ dctint t31a = (dctint)(IN( 1) * 16364U + IN(31) * 804U + (1 << 13)) >> 14;
+ dctint t17a = (dctint)(IN(17) * 12140U - IN(15) * 11003U + (1 << 13)) >> 14;
+ dctint t30a = (dctint)(IN(17) * 11003U + IN(15) * 12140U + (1 << 13)) >> 14;
+ dctint t18a = (dctint)(IN( 9) * 7005U - IN(23) * 14811U + (1 << 13)) >> 14;
+ dctint t29a = (dctint)(IN( 9) * 14811U + IN(23) * 7005U + (1 << 13)) >> 14;
+ dctint t19a = (dctint)(IN(25) * 15426U - IN( 7) * 5520U + (1 << 13)) >> 14;
+ dctint t28a = (dctint)(IN(25) * 5520U + IN( 7) * 15426U + (1 << 13)) >> 14;
+ dctint t20a = (dctint)(IN( 5) * 3981U - IN(27) * 15893U + (1 << 13)) >> 14;
+ dctint t27a = (dctint)(IN( 5) * 15893U + IN(27) * 3981U + (1 << 13)) >> 14;
+ dctint t21a = (dctint)(IN(21) * 14053U - IN(11) * 8423U + (1 << 13)) >> 14;
+ dctint t26a = (dctint)(IN(21) * 8423U + IN(11) * 14053U + (1 << 13)) >> 14;
+ dctint t22a = (dctint)(IN(13) * 9760U - IN(19) * 13160U + (1 << 13)) >> 14;
+ dctint t25a = (dctint)(IN(13) * 13160U + IN(19) * 9760U + (1 << 13)) >> 14;
+ dctint t23a = (dctint)(IN(29) * 16207U - IN( 3) * 2404U + (1 << 13)) >> 14;
+ dctint t24a = (dctint)(IN(29) * 2404U + IN( 3) * 16207U + (1 << 13)) >> 14;
+
+ dctint t0 = t0a + t3a;
+ dctint t1 = t1a + t2a;
+ dctint t2 = t1a - t2a;
+ dctint t3 = t0a - t3a;
+ dctint t4 = t4a + t5a;
+ dctint t5 = t4a - t5a;
+ dctint t6 = t7a - t6a;
+ dctint t7 = t7a + t6a;
+ dctint t8 = t8a + t9a;
+ dctint t9 = t8a - t9a;
+ dctint t10 = t11a - t10a;
+ dctint t11 = t11a + t10a;
+ dctint t12 = t12a + t13a;
+ dctint t13 = t12a - t13a;
+ dctint t14 = t15a - t14a;
+ dctint t15 = t15a + t14a;
+ dctint t16 = t16a + t17a;
+ dctint t17 = t16a - t17a;
+ dctint t18 = t19a - t18a;
+ dctint t19 = t19a + t18a;
+ dctint t20 = t20a + t21a;
+ dctint t21 = t20a - t21a;
+ dctint t22 = t23a - t22a;
+ dctint t23 = t23a + t22a;
+ dctint t24 = t24a + t25a;
+ dctint t25 = t24a - t25a;
+ dctint t26 = t27a - t26a;
+ dctint t27 = t27a + t26a;
+ dctint t28 = t28a + t29a;
+ dctint t29 = t28a - t29a;
+ dctint t30 = t31a - t30a;
+ dctint t31 = t31a + t30a;
+
+ t5a = (dctint)((t6 - t5) * 11585U + (1 << 13)) >> 14;
+ t6a = (dctint)((t6 + t5) * 11585U + (1 << 13)) >> 14;
+ t9a = (dctint)( t14 * 6270U - t9 * 15137U + (1 << 13)) >> 14;
+ t14a = (dctint)( t14 * 15137U + t9 * 6270U + (1 << 13)) >> 14;
+ t10a = (dctint)(-(t13 * 15137U + t10 * 6270U) + (1 << 13)) >> 14;
+ t13a = (dctint)( t13 * 6270U - t10 * 15137U + (1 << 13)) >> 14;
+ t17a = (dctint)( t30 * 3196U - t17 * 16069U + (1 << 13)) >> 14;
+ t30a = (dctint)( t30 * 16069U + t17 * 3196U + (1 << 13)) >> 14;
+ t18a = (dctint)(-(t29 * 16069U + t18 * 3196U) + (1 << 13)) >> 14;
+ t29a = (dctint)( t29 * 3196U - t18 * 16069U + (1 << 13)) >> 14;
+ t21a = (dctint)( t26 * 13623U - t21 * 9102U + (1 << 13)) >> 14;
+ t26a = (dctint)( t26 * 9102U + t21 * 13623U + (1 << 13)) >> 14;
+ t22a = (dctint)(-(t25 * 9102U + t22 * 13623U) + (1 << 13)) >> 14;
+ t25a = (dctint)( t25 * 13623U - t22 * 9102U + (1 << 13)) >> 14;
+
+ t0a = t0 + t7;
+ t1a = t1 + t6a;
+ t2a = t2 + t5a;
+ t3a = t3 + t4;
+ t4a = t3 - t4;
+ t5 = t2 - t5a;
+ t6 = t1 - t6a;
+ t7a = t0 - t7;
+ t8a = t8 + t11;
+ t9 = t9a + t10a;
+ t10 = t9a - t10a;
+ t11a = t8 - t11;
+ t12a = t15 - t12;
+ t13 = t14a - t13a;
+ t14 = t14a + t13a;
+ t15a = t15 + t12;
+ t16a = t16 + t19;
+ t17 = t17a + t18a;
+ t18 = t17a - t18a;
+ t19a = t16 - t19;
+ t20a = t23 - t20;
+ t21 = t22a - t21a;
+ t22 = t22a + t21a;
+ t23a = t23 + t20;
+ t24a = t24 + t27;
+ t25 = t25a + t26a;
+ t26 = t25a - t26a;
+ t27a = t24 - t27;
+ t28a = t31 - t28;
+ t29 = t30a - t29a;
+ t30 = t30a + t29a;
+ t31a = t31 + t28;
+
+ t10a = (dctint)((t13 - t10) * 11585U + (1 << 13)) >> 14;
+ t13a = (dctint)((t13 + t10) * 11585U + (1 << 13)) >> 14;
+ t11 = (dctint)((t12a - t11a) * 11585U + (1 << 13)) >> 14;
+ t12 = (dctint)((t12a + t11a) * 11585U + (1 << 13)) >> 14;
+ t18a = (dctint)( t29 * 6270U - t18 * 15137U + (1 << 13)) >> 14;
+ t29a = (dctint)( t29 * 15137U + t18 * 6270U + (1 << 13)) >> 14;
+ t19 = (dctint)( t28a * 6270U - t19a * 15137U + (1 << 13)) >> 14;
+ t28 = (dctint)( t28a * 15137U + t19a * 6270U + (1 << 13)) >> 14;
+ t20 = (dctint)(-(t27a * 15137U + t20a * 6270U) + (1 << 13)) >> 14;
+ t27 = (dctint)( t27a * 6270U - t20a * 15137U + (1 << 13)) >> 14;
+ t21a = (dctint)(-(t26 * 15137U + t21 * 6270U) + (1 << 13)) >> 14;
+ t26a = (dctint)( t26 * 6270U - t21 * 15137U + (1 << 13)) >> 14;
+
+ t0 = t0a + t15a;
+ t1 = t1a + t14;
+ t2 = t2a + t13a;
+ t3 = t3a + t12;
+ t4 = t4a + t11;
+ t5a = t5 + t10a;
+ t6a = t6 + t9;
+ t7 = t7a + t8a;
+ t8 = t7a - t8a;
+ t9a = t6 - t9;
+ t10 = t5 - t10a;
+ t11a = t4a - t11;
+ t12a = t3a - t12;
+ t13 = t2a - t13a;
+ t14a = t1a - t14;
+ t15 = t0a - t15a;
+ t16 = t16a + t23a;
+ t17a = t17 + t22;
+ t18 = t18a + t21a;
+ t19a = t19 + t20;
+ t20a = t19 - t20;
+ t21 = t18a - t21a;
+ t22a = t17 - t22;
+ t23 = t16a - t23a;
+ t24 = t31a - t24a;
+ t25a = t30 - t25;
+ t26 = t29a - t26a;
+ t27a = t28 - t27;
+ t28a = t28 + t27;
+ t29 = t29a + t26a;
+ t30a = t30 + t25;
+ t31 = t31a + t24a;
+
+ t20 = (dctint)((t27a - t20a) * 11585U + (1 << 13)) >> 14;
+ t27 = (dctint)((t27a + t20a) * 11585U + (1 << 13)) >> 14;
+ t21a = (dctint)((t26 - t21 ) * 11585U + (1 << 13)) >> 14;
+ t26a = (dctint)((t26 + t21 ) * 11585U + (1 << 13)) >> 14;
+ t22 = (dctint)((t25a - t22a) * 11585U + (1 << 13)) >> 14;
+ t25 = (dctint)((t25a + t22a) * 11585U + (1 << 13)) >> 14;
+ t23a = (dctint)((t24 - t23 ) * 11585U + (1 << 13)) >> 14;
+ t24a = (dctint)((t24 + t23 ) * 11585U + (1 << 13)) >> 14;
+
+ out[ 0] = t0 + t31;
+ out[ 1] = t1 + t30a;
+ out[ 2] = t2 + t29;
+ out[ 3] = t3 + t28a;
+ out[ 4] = t4 + t27;
+ out[ 5] = t5a + t26a;
+ out[ 6] = t6a + t25;
+ out[ 7] = t7 + t24a;
+ out[ 8] = t8 + t23a;
+ out[ 9] = t9a + t22;
+ out[10] = t10 + t21a;
+ out[11] = t11a + t20;
+ out[12] = t12a + t19a;
+ out[13] = t13 + t18;
+ out[14] = t14a + t17a;
+ out[15] = t15 + t16;
+ out[16] = t15 - t16;
+ out[17] = t14a - t17a;
+ out[18] = t13 - t18;
+ out[19] = t12a - t19a;
+ out[20] = t11a - t20;
+ out[21] = t10 - t21a;
+ out[22] = t9a - t22;
+ out[23] = t8 - t23a;
+ out[24] = t7 - t24a;
+ out[25] = t6a - t25;
+ out[26] = t5a - t26a;
+ out[27] = t4 - t27;
+ out[28] = t3 - t28a;
+ out[29] = t2 - t29;
+ out[30] = t1 - t30a;
+ out[31] = t0 - t31;
+}
+
+itxfm_wrapper(idct, idct, 32, 6, 1)
+
+static av_always_inline void iwht4_1d(const dctcoef *in, ptrdiff_t stride,
+ dctcoef *out, int pass)
+{
+ int t0, t1, t2, t3, t4;
+
+ if (pass == 0) {
+ t0 = IN(0) >> 2;
+ t1 = IN(3) >> 2;
+ t2 = IN(1) >> 2;
+ t3 = IN(2) >> 2;
+ } else {
+ t0 = IN(0);
+ t1 = IN(3);
+ t2 = IN(1);
+ t3 = IN(2);
+ }
+
+ t0 += t2;
+ t3 -= t1;
+ t4 = (t0 - t3) >> 1;
+ t1 = t4 - t1;
+ t2 = t4 - t2;
+ t0 -= t1;
+ t3 += t2;
+
+ out[0] = t0;
+ out[1] = t1;
+ out[2] = t2;
+ out[3] = t3;
+}
+
+itxfm_wrapper(iwht, iwht, 4, 0, 0)
+
+#undef IN
+#undef itxfm_wrapper
+#undef itxfm_wrap
+
+static av_cold void vp9dsp_itxfm_init(VP9DSPContext *dsp)
+{
+#define init_itxfm(tx, sz) \
+ dsp->itxfm_add[tx][DCT_DCT] = idct_idct_##sz##_add_c; \
+ dsp->itxfm_add[tx][DCT_ADST] = iadst_idct_##sz##_add_c; \
+ dsp->itxfm_add[tx][ADST_DCT] = idct_iadst_##sz##_add_c; \
+ dsp->itxfm_add[tx][ADST_ADST] = iadst_iadst_##sz##_add_c
+
+#define init_idct(tx, nm) \
+ dsp->itxfm_add[tx][DCT_DCT] = \
+ dsp->itxfm_add[tx][ADST_DCT] = \
+ dsp->itxfm_add[tx][DCT_ADST] = \
+ dsp->itxfm_add[tx][ADST_ADST] = nm##_add_c
+
+ init_itxfm(TX_4X4, 4x4);
+ init_itxfm(TX_8X8, 8x8);
+ init_itxfm(TX_16X16, 16x16);
+ init_idct(TX_32X32, idct_idct_32x32);
+ init_idct(4 /* lossless */, iwht_iwht_4x4);
+
+#undef init_itxfm
+#undef init_idct
+}
+
+static av_always_inline void loop_filter(pixel *dst, int E, int I, int H,
+ ptrdiff_t stridea, ptrdiff_t strideb,
+ int wd)
+{
+ int i, F = 1 << (BIT_DEPTH - 8);
+
+ E <<= (BIT_DEPTH - 8);
+ I <<= (BIT_DEPTH - 8);
+ H <<= (BIT_DEPTH - 8);
+ for (i = 0; i < 8; i++, dst += stridea) {
+ int p7, p6, p5, p4;
+ int p3 = dst[strideb * -4], p2 = dst[strideb * -3];
+ int p1 = dst[strideb * -2], p0 = dst[strideb * -1];
+ int q0 = dst[strideb * +0], q1 = dst[strideb * +1];
+ int q2 = dst[strideb * +2], q3 = dst[strideb * +3];
+ int q4, q5, q6, q7;
+ int fm = FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I &&
+ FFABS(p1 - p0) <= I && FFABS(q1 - q0) <= I &&
+ FFABS(q2 - q1) <= I && FFABS(q3 - q2) <= I &&
+ FFABS(p0 - q0) * 2 + (FFABS(p1 - q1) >> 1) <= E;
+ int flat8out, flat8in;
+
+ if (!fm)
+ continue;
+
+ if (wd >= 16) {
+ p7 = dst[strideb * -8];
+ p6 = dst[strideb * -7];
+ p5 = dst[strideb * -6];
+ p4 = dst[strideb * -5];
+ q4 = dst[strideb * +4];
+ q5 = dst[strideb * +5];
+ q6 = dst[strideb * +6];
+ q7 = dst[strideb * +7];
+
+ flat8out = FFABS(p7 - p0) <= F && FFABS(p6 - p0) <= F &&
+ FFABS(p5 - p0) <= F && FFABS(p4 - p0) <= F &&
+ FFABS(q4 - q0) <= F && FFABS(q5 - q0) <= F &&
+ FFABS(q6 - q0) <= F && FFABS(q7 - q0) <= F;
+ }
+
+ if (wd >= 8)
+ flat8in = FFABS(p3 - p0) <= F && FFABS(p2 - p0) <= F &&
+ FFABS(p1 - p0) <= F && FFABS(q1 - q0) <= F &&
+ FFABS(q2 - q0) <= F && FFABS(q3 - q0) <= F;
+
+ if (wd >= 16 && flat8out && flat8in) {
+ dst[strideb * -7] = (p7 + p7 + p7 + p7 + p7 + p7 + p7 + p6 * 2 +
+ p5 + p4 + p3 + p2 + p1 + p0 + q0 + 8) >> 4;
+ dst[strideb * -6] = (p7 + p7 + p7 + p7 + p7 + p7 + p6 + p5 * 2 +
+ p4 + p3 + p2 + p1 + p0 + q0 + q1 + 8) >> 4;
+ dst[strideb * -5] = (p7 + p7 + p7 + p7 + p7 + p6 + p5 + p4 * 2 +
+ p3 + p2 + p1 + p0 + q0 + q1 + q2 + 8) >> 4;
+ dst[strideb * -4] = (p7 + p7 + p7 + p7 + p6 + p5 + p4 + p3 * 2 +
+ p2 + p1 + p0 + q0 + q1 + q2 + q3 + 8) >> 4;
+ dst[strideb * -3] = (p7 + p7 + p7 + p6 + p5 + p4 + p3 + p2 * 2 +
+ p1 + p0 + q0 + q1 + q2 + q3 + q4 + 8) >> 4;
+ dst[strideb * -2] = (p7 + p7 + p6 + p5 + p4 + p3 + p2 + p1 * 2 +
+ p0 + q0 + q1 + q2 + q3 + q4 + q5 + 8) >> 4;
+ dst[strideb * -1] = (p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
+ q0 + q1 + q2 + q3 + q4 + q5 + q6 + 8) >> 4;
+ dst[strideb * +0] = (p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 +
+ q1 + q2 + q3 + q4 + q5 + q6 + q7 + 8) >> 4;
+ dst[strideb * +1] = (p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 +
+ q2 + q3 + q4 + q5 + q6 + q7 + q7 + 8) >> 4;
+ dst[strideb * +2] = (p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 +
+ q3 + q4 + q5 + q6 + q7 + q7 + q7 + 8) >> 4;
+ dst[strideb * +3] = (p3 + p2 + p1 + p0 + q0 + q1 + q2 + q3 * 2 +
+ q4 + q5 + q6 + q7 + q7 + q7 + q7 + 8) >> 4;
+ dst[strideb * +4] = (p2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 * 2 +
+ q5 + q6 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
+ dst[strideb * +5] = (p1 + p0 + q0 + q1 + q2 + q3 + q4 + q5 * 2 +
+ q6 + q7 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
+ dst[strideb * +6] = (p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 +
+ q7 + q7 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
+ } else if (wd >= 8 && flat8in) {
+ dst[strideb * -3] = (p3 + p3 + p3 + 2 * p2 + p1 + p0 + q0 + 4) >> 3;
+ dst[strideb * -2] = (p3 + p3 + p2 + 2 * p1 + p0 + q0 + q1 + 4) >> 3;
+ dst[strideb * -1] = (p3 + p2 + p1 + 2 * p0 + q0 + q1 + q2 + 4) >> 3;
+ dst[strideb * +0] = (p2 + p1 + p0 + 2 * q0 + q1 + q2 + q3 + 4) >> 3;
+ dst[strideb * +1] = (p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3 + 4) >> 3;
+ dst[strideb * +2] = (p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3 + 4) >> 3;
+ } else {
+ int hev = FFABS(p1 - p0) > H || FFABS(q1 - q0) > H;
+
+ if (hev) {
+ int f = av_clip_intp2(p1 - q1, BIT_DEPTH - 1), f1, f2;
+ f = av_clip_intp2(3 * (q0 - p0) + f, BIT_DEPTH - 1);
+
+ f1 = FFMIN(f + 4, (1 << (BIT_DEPTH - 1)) - 1) >> 3;
+ f2 = FFMIN(f + 3, (1 << (BIT_DEPTH - 1)) - 1) >> 3;
+
+ dst[strideb * -1] = av_clip_pixel(p0 + f2);
+ dst[strideb * +0] = av_clip_pixel(q0 - f1);
+ } else {
+ int f = av_clip_intp2(3 * (q0 - p0), BIT_DEPTH - 1), f1, f2;
+
+ f1 = FFMIN(f + 4, (1 << (BIT_DEPTH - 1)) - 1) >> 3;
+ f2 = FFMIN(f + 3, (1 << (BIT_DEPTH - 1)) - 1) >> 3;
+
+ dst[strideb * -1] = av_clip_pixel(p0 + f2);
+ dst[strideb * +0] = av_clip_pixel(q0 - f1);
+
+ f = (f1 + 1) >> 1;
+ dst[strideb * -2] = av_clip_pixel(p1 + f);
+ dst[strideb * +1] = av_clip_pixel(q1 - f);
+ }
+ }
+ }
+}
+
+#define lf_8_fn(dir, wd, stridea, strideb) \
+static void loop_filter_##dir##_##wd##_8_c(uint8_t *_dst, \
+ ptrdiff_t stride, \
+ int E, int I, int H) \
+{ \
+ pixel *dst = (pixel *) _dst; \
+ stride /= sizeof(pixel); \
+ loop_filter(dst, E, I, H, stridea, strideb, wd); \
+}
+
+#define lf_8_fns(wd) \
+lf_8_fn(h, wd, stride, 1) \
+lf_8_fn(v, wd, 1, stride)
+
+lf_8_fns(4)
+lf_8_fns(8)
+lf_8_fns(16)
+
+#undef lf_8_fn
+#undef lf_8_fns
+
+#define lf_16_fn(dir, stridea) \
+static void loop_filter_##dir##_16_16_c(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int E, int I, int H) \
+{ \
+ loop_filter_##dir##_16_8_c(dst, stride, E, I, H); \
+ loop_filter_##dir##_16_8_c(dst + 8 * stridea, stride, E, I, H); \
+}
+
+lf_16_fn(h, stride)
+lf_16_fn(v, sizeof(pixel))
+
+#undef lf_16_fn
+
+#define lf_mix_fn(dir, wd1, wd2, stridea) \
+static void loop_filter_##dir##_##wd1##wd2##_16_c(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int E, int I, int H) \
+{ \
+ loop_filter_##dir##_##wd1##_8_c(dst, stride, E & 0xff, I & 0xff, H & 0xff); \
+ loop_filter_##dir##_##wd2##_8_c(dst + 8 * stridea, stride, E >> 8, I >> 8, H >> 8); \
+}
+
+#define lf_mix_fns(wd1, wd2) \
+lf_mix_fn(h, wd1, wd2, stride) \
+lf_mix_fn(v, wd1, wd2, sizeof(pixel))
+
+lf_mix_fns(4, 4)
+lf_mix_fns(4, 8)
+lf_mix_fns(8, 4)
+lf_mix_fns(8, 8)
+
+#undef lf_mix_fn
+#undef lf_mix_fns
+
+static av_cold void vp9dsp_loopfilter_init(VP9DSPContext *dsp)
+{
+ dsp->loop_filter_8[0][0] = loop_filter_h_4_8_c;
+ dsp->loop_filter_8[0][1] = loop_filter_v_4_8_c;
+ dsp->loop_filter_8[1][0] = loop_filter_h_8_8_c;
+ dsp->loop_filter_8[1][1] = loop_filter_v_8_8_c;
+ dsp->loop_filter_8[2][0] = loop_filter_h_16_8_c;
+ dsp->loop_filter_8[2][1] = loop_filter_v_16_8_c;
+
+ dsp->loop_filter_16[0] = loop_filter_h_16_16_c;
+ dsp->loop_filter_16[1] = loop_filter_v_16_16_c;
+
+ dsp->loop_filter_mix2[0][0][0] = loop_filter_h_44_16_c;
+ dsp->loop_filter_mix2[0][0][1] = loop_filter_v_44_16_c;
+ dsp->loop_filter_mix2[0][1][0] = loop_filter_h_48_16_c;
+ dsp->loop_filter_mix2[0][1][1] = loop_filter_v_48_16_c;
+ dsp->loop_filter_mix2[1][0][0] = loop_filter_h_84_16_c;
+ dsp->loop_filter_mix2[1][0][1] = loop_filter_v_84_16_c;
+ dsp->loop_filter_mix2[1][1][0] = loop_filter_h_88_16_c;
+ dsp->loop_filter_mix2[1][1][1] = loop_filter_v_88_16_c;
+}
+
+#if BIT_DEPTH != 12
+
+static av_always_inline void copy_c(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *src, ptrdiff_t src_stride,
+ int w, int h)
+{
+ do {
+ memcpy(dst, src, w * sizeof(pixel));
+
+ dst += dst_stride;
+ src += src_stride;
+ } while (--h);
+}
+
+static av_always_inline void avg_c(uint8_t *_dst, ptrdiff_t dst_stride,
+ const uint8_t *_src, ptrdiff_t src_stride,
+ int w, int h)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *src = (const pixel *) _src;
+
+ dst_stride /= sizeof(pixel);
+ src_stride /= sizeof(pixel);
+ do {
+ int x;
+
+ for (x = 0; x < w; x += 4)
+ AV_WN4PA(&dst[x], rnd_avg_pixel4(AV_RN4PA(&dst[x]), AV_RN4P(&src[x])));
+
+ dst += dst_stride;
+ src += src_stride;
+ } while (--h);
+}
+
+#define fpel_fn(type, sz) \
+static void type##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ type##_c(dst, dst_stride, src, src_stride, sz, h); \
+}
+
+#define copy_avg_fn(sz) \
+fpel_fn(copy, sz) \
+fpel_fn(avg, sz)
+
+copy_avg_fn(64)
+copy_avg_fn(32)
+copy_avg_fn(16)
+copy_avg_fn(8)
+copy_avg_fn(4)
+
+#undef fpel_fn
+#undef copy_avg_fn
+
+#endif /* BIT_DEPTH != 12 */
+
+#define FILTER_8TAP(src, x, F, stride) \
+ av_clip_pixel((F[0] * src[x + -3 * stride] + \
+ F[1] * src[x + -2 * stride] + \
+ F[2] * src[x + -1 * stride] + \
+ F[3] * src[x + +0 * stride] + \
+ F[4] * src[x + +1 * stride] + \
+ F[5] * src[x + +2 * stride] + \
+ F[6] * src[x + +3 * stride] + \
+ F[7] * src[x + +4 * stride] + 64) >> 7)
+
+static av_always_inline void do_8tap_1d_c(uint8_t *_dst, ptrdiff_t dst_stride,
+ const uint8_t *_src, ptrdiff_t src_stride,
+ int w, int h, ptrdiff_t ds,
+ const int16_t *filter, int avg)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *src = (const pixel *) _src;
+
+ dst_stride /= sizeof(pixel);
+ src_stride /= sizeof(pixel);
+ do {
+ int x;
+
+ for (x = 0; x < w; x++)
+ if (avg) {
+ dst[x] = (dst[x] + FILTER_8TAP(src, x, filter, ds) + 1) >> 1;
+ } else {
+ dst[x] = FILTER_8TAP(src, x, filter, ds);
+ }
+
+ dst += dst_stride;
+ src += src_stride;
+ } while (--h);
+}
+
+#define filter_8tap_1d_fn(opn, opa, dir, ds) \
+static av_noinline void opn##_8tap_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int w, int h, const int16_t *filter) \
+{ \
+ do_8tap_1d_c(dst, dst_stride, src, src_stride, w, h, ds, filter, opa); \
+}
+
+filter_8tap_1d_fn(put, 0, v, src_stride / sizeof(pixel))
+filter_8tap_1d_fn(put, 0, h, 1)
+filter_8tap_1d_fn(avg, 1, v, src_stride / sizeof(pixel))
+filter_8tap_1d_fn(avg, 1, h, 1)
+
+#undef filter_8tap_1d_fn
+
+static av_always_inline void do_8tap_2d_c(uint8_t *_dst, ptrdiff_t dst_stride,
+ const uint8_t *_src, ptrdiff_t src_stride,
+ int w, int h, const int16_t *filterx,
+ const int16_t *filtery, int avg)
+{
+ int tmp_h = h + 7;
+ pixel tmp[64 * 71], *tmp_ptr = tmp;
+ pixel *dst = (pixel *) _dst;
+ const pixel *src = (const pixel *) _src;
+
+ dst_stride /= sizeof(pixel);
+ src_stride /= sizeof(pixel);
+ src -= src_stride * 3;
+ do {
+ int x;
+
+ for (x = 0; x < w; x++)
+ tmp_ptr[x] = FILTER_8TAP(src, x, filterx, 1);
+
+ tmp_ptr += 64;
+ src += src_stride;
+ } while (--tmp_h);
+
+ tmp_ptr = tmp + 64 * 3;
+ do {
+ int x;
+
+ for (x = 0; x < w; x++)
+ if (avg) {
+ dst[x] = (dst[x] + FILTER_8TAP(tmp_ptr, x, filtery, 64) + 1) >> 1;
+ } else {
+ dst[x] = FILTER_8TAP(tmp_ptr, x, filtery, 64);
+ }
+
+ tmp_ptr += 64;
+ dst += dst_stride;
+ } while (--h);
+}
+
+#define filter_8tap_2d_fn(opn, opa) \
+static av_noinline void opn##_8tap_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int w, int h, const int16_t *filterx, \
+ const int16_t *filtery) \
+{ \
+ do_8tap_2d_c(dst, dst_stride, src, src_stride, w, h, filterx, filtery, opa); \
+}
+
+filter_8tap_2d_fn(put, 0)
+filter_8tap_2d_fn(avg, 1)
+
+#undef filter_8tap_2d_fn
+
+#define filter_fn_1d(sz, dir, dir_m, type, type_idx, avg) \
+static void avg##_8tap_##type##_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ avg##_8tap_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, \
+ ff_vp9_subpel_filters[type_idx][dir_m]); \
+}
+
+#define filter_fn_2d(sz, type, type_idx, avg) \
+static void avg##_8tap_##type##_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ avg##_8tap_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, \
+ ff_vp9_subpel_filters[type_idx][mx], \
+ ff_vp9_subpel_filters[type_idx][my]); \
+}
+
+#if BIT_DEPTH != 12
+
+#define FILTER_BILIN(src, x, mxy, stride) \
+ (src[x] + ((mxy * (src[x + stride] - src[x]) + 8) >> 4))
+
+static av_always_inline void do_bilin_1d_c(uint8_t *_dst, ptrdiff_t dst_stride,
+ const uint8_t *_src, ptrdiff_t src_stride,
+ int w, int h, ptrdiff_t ds, int mxy, int avg)
+{
+ pixel *dst = (pixel *) _dst;
+ const pixel *src = (const pixel *) _src;
+
+ dst_stride /= sizeof(pixel);
+ src_stride /= sizeof(pixel);
+ do {
+ int x;
+
+ for (x = 0; x < w; x++)
+ if (avg) {
+ dst[x] = (dst[x] + FILTER_BILIN(src, x, mxy, ds) + 1) >> 1;
+ } else {
+ dst[x] = FILTER_BILIN(src, x, mxy, ds);
+ }
+
+ dst += dst_stride;
+ src += src_stride;
+ } while (--h);
+}
+
+#define bilin_1d_fn(opn, opa, dir, ds) \
+static av_noinline void opn##_bilin_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int w, int h, int mxy) \
+{ \
+ do_bilin_1d_c(dst, dst_stride, src, src_stride, w, h, ds, mxy, opa); \
+}
+
+bilin_1d_fn(put, 0, v, src_stride / sizeof(pixel))
+bilin_1d_fn(put, 0, h, 1)
+bilin_1d_fn(avg, 1, v, src_stride / sizeof(pixel))
+bilin_1d_fn(avg, 1, h, 1)
+
+#undef bilin_1d_fn
+
+static av_always_inline void do_bilin_2d_c(uint8_t *_dst, ptrdiff_t dst_stride,
+ const uint8_t *_src, ptrdiff_t src_stride,
+ int w, int h, int mx, int my, int avg)
+{
+ pixel tmp[64 * 65], *tmp_ptr = tmp;
+ int tmp_h = h + 1;
+ pixel *dst = (pixel *) _dst;
+ const pixel *src = (const pixel *) _src;
+
+ dst_stride /= sizeof(pixel);
+ src_stride /= sizeof(pixel);
+ do {
+ int x;
+
+ for (x = 0; x < w; x++)
+ tmp_ptr[x] = FILTER_BILIN(src, x, mx, 1);
+
+ tmp_ptr += 64;
+ src += src_stride;
+ } while (--tmp_h);
+
+ tmp_ptr = tmp;
+ do {
+ int x;
+
+ for (x = 0; x < w; x++)
+ if (avg) {
+ dst[x] = (dst[x] + FILTER_BILIN(tmp_ptr, x, my, 64) + 1) >> 1;
+ } else {
+ dst[x] = FILTER_BILIN(tmp_ptr, x, my, 64);
+ }
+
+ tmp_ptr += 64;
+ dst += dst_stride;
+ } while (--h);
+}
+
+#define bilin_2d_fn(opn, opa) \
+static av_noinline void opn##_bilin_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int w, int h, int mx, int my) \
+{ \
+ do_bilin_2d_c(dst, dst_stride, src, src_stride, w, h, mx, my, opa); \
+}
+
+bilin_2d_fn(put, 0)
+bilin_2d_fn(avg, 1)
+
+#undef bilin_2d_fn
+
+#define bilinf_fn_1d(sz, dir, dir_m, avg) \
+static void avg##_bilin_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ avg##_bilin_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, dir_m); \
+}
+
+#define bilinf_fn_2d(sz, avg) \
+static void avg##_bilin_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ avg##_bilin_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, mx, my); \
+}
+
+#else
+
+#define bilinf_fn_1d(a, b, c, d)
+#define bilinf_fn_2d(a, b)
+
+#endif
+
+#define filter_fn(sz, avg) \
+filter_fn_1d(sz, h, mx, regular, FILTER_8TAP_REGULAR, avg) \
+filter_fn_1d(sz, v, my, regular, FILTER_8TAP_REGULAR, avg) \
+filter_fn_2d(sz, regular, FILTER_8TAP_REGULAR, avg) \
+filter_fn_1d(sz, h, mx, smooth, FILTER_8TAP_SMOOTH, avg) \
+filter_fn_1d(sz, v, my, smooth, FILTER_8TAP_SMOOTH, avg) \
+filter_fn_2d(sz, smooth, FILTER_8TAP_SMOOTH, avg) \
+filter_fn_1d(sz, h, mx, sharp, FILTER_8TAP_SHARP, avg) \
+filter_fn_1d(sz, v, my, sharp, FILTER_8TAP_SHARP, avg) \
+filter_fn_2d(sz, sharp, FILTER_8TAP_SHARP, avg) \
+bilinf_fn_1d(sz, h, mx, avg) \
+bilinf_fn_1d(sz, v, my, avg) \
+bilinf_fn_2d(sz, avg)
+
+#define filter_fn_set(avg) \
+filter_fn(64, avg) \
+filter_fn(32, avg) \
+filter_fn(16, avg) \
+filter_fn(8, avg) \
+filter_fn(4, avg)
+
+filter_fn_set(put)
+filter_fn_set(avg)
+
+#undef filter_fn
+#undef filter_fn_set
+#undef filter_fn_1d
+#undef filter_fn_2d
+#undef bilinf_fn_1d
+#undef bilinf_fn_2d
+
+#if BIT_DEPTH != 8
+void ff_vp9dsp_mc_init_10(VP9DSPContext *dsp);
+#endif
+#if BIT_DEPTH != 10
+static
+#endif
+av_cold void FUNC(ff_vp9dsp_mc_init)(VP9DSPContext *dsp)
+{
+#if BIT_DEPTH == 12
+ ff_vp9dsp_mc_init_10(dsp);
+#else /* BIT_DEPTH == 12 */
+
+#define init_fpel(idx1, idx2, sz, type) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = type##sz##_c; \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = type##sz##_c; \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = type##sz##_c; \
+ dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = type##sz##_c
+
+#define init_copy_avg(idx, sz) \
+ init_fpel(idx, 0, sz, copy); \
+ init_fpel(idx, 1, sz, avg)
+
+ init_copy_avg(0, 64);
+ init_copy_avg(1, 32);
+ init_copy_avg(2, 16);
+ init_copy_avg(3, 8);
+ init_copy_avg(4, 4);
+
+#undef init_copy_avg
+#undef init_fpel
+
+#endif /* BIT_DEPTH == 12 */
+
+#define init_subpel1_bd_aware(idx1, idx2, idxh, idxv, sz, dir, type) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_c; \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type##_8tap_regular_##sz##dir##_c; \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = type##_8tap_sharp_##sz##dir##_c
+
+#if BIT_DEPTH == 12
+#define init_subpel1 init_subpel1_bd_aware
+#else
+#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
+ init_subpel1_bd_aware(idx1, idx2, idxh, idxv, sz, dir, type); \
+ dsp->mc[idx1][FILTER_BILINEAR ][idx2][idxh][idxv] = type##_bilin_##sz##dir##_c
+#endif
+
+#define init_subpel2(idx, idxh, idxv, dir, type) \
+ init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
+ init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
+ init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
+ init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
+ init_subpel1(4, idx, idxh, idxv, 4, dir, type)
+
+#define init_subpel3(idx, type) \
+ init_subpel2(idx, 1, 1, hv, type); \
+ init_subpel2(idx, 0, 1, v, type); \
+ init_subpel2(idx, 1, 0, h, type)
+
+ init_subpel3(0, put);
+ init_subpel3(1, avg);
+
+#undef init_subpel1
+#undef init_subpel2
+#undef init_subpel3
+#undef init_subpel1_bd_aware
+}
+
+static av_always_inline void do_scaled_8tap_c(uint8_t *_dst, ptrdiff_t dst_stride,
+ const uint8_t *_src, ptrdiff_t src_stride,
+ int w, int h, int mx, int my,
+ int dx, int dy, int avg,
+ const int16_t (*filters)[8])
+{
+ int tmp_h = (((h - 1) * dy + my) >> 4) + 8;
+ pixel tmp[64 * 135], *tmp_ptr = tmp;
+ pixel *dst = (pixel *) _dst;
+ const pixel *src = (const pixel *) _src;
+
+ dst_stride /= sizeof(pixel);
+ src_stride /= sizeof(pixel);
+ src -= src_stride * 3;
+ do {
+ int x;
+ int imx = mx, ioff = 0;
+
+ for (x = 0; x < w; x++) {
+ tmp_ptr[x] = FILTER_8TAP(src, ioff, filters[imx], 1);
+ imx += dx;
+ ioff += imx >> 4;
+ imx &= 0xf;
+ }
+
+ tmp_ptr += 64;
+ src += src_stride;
+ } while (--tmp_h);
+
+ tmp_ptr = tmp + 64 * 3;
+ do {
+ int x;
+ const int16_t *filter = filters[my];
+
+ for (x = 0; x < w; x++)
+ if (avg) {
+ dst[x] = (dst[x] + FILTER_8TAP(tmp_ptr, x, filter, 64) + 1) >> 1;
+ } else {
+ dst[x] = FILTER_8TAP(tmp_ptr, x, filter, 64);
+ }
+
+ my += dy;
+ tmp_ptr += (my >> 4) * 64;
+ my &= 0xf;
+ dst += dst_stride;
+ } while (--h);
+}
+
+#define scaled_filter_8tap_fn(opn, opa) \
+static av_noinline void opn##_scaled_8tap_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int w, int h, int mx, int my, int dx, int dy, \
+ const int16_t (*filters)[8]) \
+{ \
+ do_scaled_8tap_c(dst, dst_stride, src, src_stride, w, h, mx, my, dx, dy, \
+ opa, filters); \
+}
+
+scaled_filter_8tap_fn(put, 0)
+scaled_filter_8tap_fn(avg, 1)
+
+#undef scaled_filter_8tap_fn
+
+#undef FILTER_8TAP
+
+#define scaled_filter_fn(sz, type, type_idx, avg) \
+static void avg##_scaled_##type##_##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my, int dx, int dy) \
+{ \
+ avg##_scaled_8tap_c(dst, dst_stride, src, src_stride, sz, h, mx, my, dx, dy, \
+ ff_vp9_subpel_filters[type_idx]); \
+}
+
+#if BIT_DEPTH != 12
+
+static av_always_inline void do_scaled_bilin_c(uint8_t *_dst, ptrdiff_t dst_stride,
+ const uint8_t *_src, ptrdiff_t src_stride,
+ int w, int h, int mx, int my,
+ int dx, int dy, int avg)
+{
+ pixel tmp[64 * 129], *tmp_ptr = tmp;
+ int tmp_h = (((h - 1) * dy + my) >> 4) + 2;
+ pixel *dst = (pixel *) _dst;
+ const pixel *src = (const pixel *) _src;
+
+ dst_stride /= sizeof(pixel);
+ src_stride /= sizeof(pixel);
+ do {
+ int x;
+ int imx = mx, ioff = 0;
+
+ for (x = 0; x < w; x++) {
+ tmp_ptr[x] = FILTER_BILIN(src, ioff, imx, 1);
+ imx += dx;
+ ioff += imx >> 4;
+ imx &= 0xf;
+ }
+
+ tmp_ptr += 64;
+ src += src_stride;
+ } while (--tmp_h);
+
+ tmp_ptr = tmp;
+ do {
+ int x;
+
+ for (x = 0; x < w; x++)
+ if (avg) {
+ dst[x] = (dst[x] + FILTER_BILIN(tmp_ptr, x, my, 64) + 1) >> 1;
+ } else {
+ dst[x] = FILTER_BILIN(tmp_ptr, x, my, 64);
+ }
+
+ my += dy;
+ tmp_ptr += (my >> 4) * 64;
+ my &= 0xf;
+ dst += dst_stride;
+ } while (--h);
+}
+
+#define scaled_bilin_fn(opn, opa) \
+static av_noinline void opn##_scaled_bilin_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int w, int h, int mx, int my, int dx, int dy) \
+{ \
+ do_scaled_bilin_c(dst, dst_stride, src, src_stride, w, h, mx, my, dx, dy, opa); \
+}
+
+scaled_bilin_fn(put, 0)
+scaled_bilin_fn(avg, 1)
+
+#undef scaled_bilin_fn
+
+#undef FILTER_BILIN
+
+#define scaled_bilinf_fn(sz, avg) \
+static void avg##_scaled_bilin_##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my, int dx, int dy) \
+{ \
+ avg##_scaled_bilin_c(dst, dst_stride, src, src_stride, sz, h, mx, my, dx, dy); \
+}
+
+#else
+
+#define scaled_bilinf_fn(a, b)
+
+#endif
+
+#define scaled_filter_fns(sz, avg) \
+scaled_filter_fn(sz, regular, FILTER_8TAP_REGULAR, avg) \
+scaled_filter_fn(sz, smooth, FILTER_8TAP_SMOOTH, avg) \
+scaled_filter_fn(sz, sharp, FILTER_8TAP_SHARP, avg) \
+scaled_bilinf_fn(sz, avg)
+
+#define scaled_filter_fn_set(avg) \
+scaled_filter_fns(64, avg) \
+scaled_filter_fns(32, avg) \
+scaled_filter_fns(16, avg) \
+scaled_filter_fns(8, avg) \
+scaled_filter_fns(4, avg)
+
+scaled_filter_fn_set(put)
+scaled_filter_fn_set(avg)
+
+#undef scaled_filter_fns
+#undef scaled_filter_fn_set
+#undef scaled_filter_fn
+#undef scaled_bilinf_fn
+
+#if BIT_DEPTH != 8
+void ff_vp9dsp_scaled_mc_init_10(VP9DSPContext *dsp);
+#endif
+#if BIT_DEPTH != 10
+static
+#endif
+av_cold void FUNC(ff_vp9dsp_scaled_mc_init)(VP9DSPContext *dsp)
+{
+#define init_scaled_bd_aware(idx1, idx2, sz, type) \
+ dsp->smc[idx1][FILTER_8TAP_SMOOTH ][idx2] = type##_scaled_smooth_##sz##_c; \
+ dsp->smc[idx1][FILTER_8TAP_REGULAR][idx2] = type##_scaled_regular_##sz##_c; \
+ dsp->smc[idx1][FILTER_8TAP_SHARP ][idx2] = type##_scaled_sharp_##sz##_c
+
+#if BIT_DEPTH == 12
+ ff_vp9dsp_scaled_mc_init_10(dsp);
+#define init_scaled(a,b,c,d) init_scaled_bd_aware(a,b,c,d)
+#else
+#define init_scaled(idx1, idx2, sz, type) \
+ init_scaled_bd_aware(idx1, idx2, sz, type); \
+ dsp->smc[idx1][FILTER_BILINEAR ][idx2] = type##_scaled_bilin_##sz##_c
+#endif
+
+#define init_scaled_put_avg(idx, sz) \
+ init_scaled(idx, 0, sz, put); \
+ init_scaled(idx, 1, sz, avg)
+
+ init_scaled_put_avg(0, 64);
+ init_scaled_put_avg(1, 32);
+ init_scaled_put_avg(2, 16);
+ init_scaled_put_avg(3, 8);
+ init_scaled_put_avg(4, 4);
+
+#undef init_scaled_put_avg
+#undef init_scaled
+#undef init_scaled_bd_aware
+}
+
+av_cold void FUNC(ff_vp9dsp_init)(VP9DSPContext *dsp)
+{
+ FUNC(ff_vp9dsp_intrapred_init)(dsp);
+ vp9dsp_itxfm_init(dsp);
+ vp9dsp_loopfilter_init(dsp);
+ FUNC(ff_vp9dsp_mc_init)(dsp);
+ FUNC(ff_vp9dsp_scaled_mc_init)(dsp);
+}
diff --git a/media/ffvpx/libavcodec/vp9lpf.c b/media/ffvpx/libavcodec/vp9lpf.c
new file mode 100644
index 0000000000..414cede852
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9lpf.c
@@ -0,0 +1,202 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vp9dec.h"
+
+static av_always_inline void filter_plane_cols(VP9Context *s, int col, int ss_h, int ss_v,
+ uint8_t *lvl, uint8_t (*mask)[4],
+ uint8_t *dst, ptrdiff_t ls)
+{
+ int y, x, bytesperpixel = s->bytesperpixel;
+
+ // filter edges between columns (e.g. block1 | block2)
+ for (y = 0; y < 8; y += 2 << ss_v, dst += 16 * ls, lvl += 16 << ss_v) {
+ uint8_t *ptr = dst, *l = lvl, *hmask1 = mask[y], *hmask2 = mask[y + 1 + ss_v];
+ unsigned hm1 = hmask1[0] | hmask1[1] | hmask1[2], hm13 = hmask1[3];
+ unsigned hm2 = hmask2[1] | hmask2[2], hm23 = hmask2[3];
+ unsigned hm = hm1 | hm2 | hm13 | hm23;
+
+ for (x = 1; hm & ~(x - 1); x <<= 1, ptr += 8 * bytesperpixel >> ss_h) {
+ if (col || x > 1) {
+ if (hm1 & x) {
+ int L = *l, H = L >> 4;
+ int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
+
+ if (hmask1[0] & x) {
+ if (hmask2[0] & x) {
+ av_assert2(l[8 << ss_v] == L);
+ s->dsp.loop_filter_16[0](ptr, ls, E, I, H);
+ } else {
+ s->dsp.loop_filter_8[2][0](ptr, ls, E, I, H);
+ }
+ } else if (hm2 & x) {
+ L = l[8 << ss_v];
+ H |= (L >> 4) << 8;
+ E |= s->filter_lut.mblim_lut[L] << 8;
+ I |= s->filter_lut.lim_lut[L] << 8;
+ s->dsp.loop_filter_mix2[!!(hmask1[1] & x)]
+ [!!(hmask2[1] & x)]
+ [0](ptr, ls, E, I, H);
+ } else {
+ s->dsp.loop_filter_8[!!(hmask1[1] & x)]
+ [0](ptr, ls, E, I, H);
+ }
+ } else if (hm2 & x) {
+ int L = l[8 << ss_v], H = L >> 4;
+ int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
+
+ s->dsp.loop_filter_8[!!(hmask2[1] & x)]
+ [0](ptr + 8 * ls, ls, E, I, H);
+ }
+ }
+ if (ss_h) {
+ if (x & 0xAA)
+ l += 2;
+ } else {
+ if (hm13 & x) {
+ int L = *l, H = L >> 4;
+ int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
+
+ if (hm23 & x) {
+ L = l[8 << ss_v];
+ H |= (L >> 4) << 8;
+ E |= s->filter_lut.mblim_lut[L] << 8;
+ I |= s->filter_lut.lim_lut[L] << 8;
+ s->dsp.loop_filter_mix2[0][0][0](ptr + 4 * bytesperpixel, ls, E, I, H);
+ } else {
+ s->dsp.loop_filter_8[0][0](ptr + 4 * bytesperpixel, ls, E, I, H);
+ }
+ } else if (hm23 & x) {
+ int L = l[8 << ss_v], H = L >> 4;
+ int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
+
+ s->dsp.loop_filter_8[0][0](ptr + 8 * ls + 4 * bytesperpixel, ls, E, I, H);
+ }
+ l++;
+ }
+ }
+ }
+}
+
+static av_always_inline void filter_plane_rows(VP9Context *s, int row, int ss_h, int ss_v,
+ uint8_t *lvl, uint8_t (*mask)[4],
+ uint8_t *dst, ptrdiff_t ls)
+{
+ int y, x, bytesperpixel = s->bytesperpixel;
+
+ // block1
+ // filter edges between rows (e.g. ------)
+ // block2
+ for (y = 0; y < 8; y++, dst += 8 * ls >> ss_v) {
+ uint8_t *ptr = dst, *l = lvl, *vmask = mask[y];
+ unsigned vm = vmask[0] | vmask[1] | vmask[2], vm3 = vmask[3];
+
+ for (x = 1; vm & ~(x - 1); x <<= (2 << ss_h), ptr += 16 * bytesperpixel, l += 2 << ss_h) {
+ if (row || y) {
+ if (vm & x) {
+ int L = *l, H = L >> 4;
+ int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
+
+ if (vmask[0] & x) {
+ if (vmask[0] & (x << (1 + ss_h))) {
+ av_assert2(l[1 + ss_h] == L);
+ s->dsp.loop_filter_16[1](ptr, ls, E, I, H);
+ } else {
+ s->dsp.loop_filter_8[2][1](ptr, ls, E, I, H);
+ }
+ } else if (vm & (x << (1 + ss_h))) {
+ L = l[1 + ss_h];
+ H |= (L >> 4) << 8;
+ E |= s->filter_lut.mblim_lut[L] << 8;
+ I |= s->filter_lut.lim_lut[L] << 8;
+ s->dsp.loop_filter_mix2[!!(vmask[1] & x)]
+ [!!(vmask[1] & (x << (1 + ss_h)))]
+ [1](ptr, ls, E, I, H);
+ } else {
+ s->dsp.loop_filter_8[!!(vmask[1] & x)]
+ [1](ptr, ls, E, I, H);
+ }
+ } else if (vm & (x << (1 + ss_h))) {
+ int L = l[1 + ss_h], H = L >> 4;
+ int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
+
+ s->dsp.loop_filter_8[!!(vmask[1] & (x << (1 + ss_h)))]
+ [1](ptr + 8 * bytesperpixel, ls, E, I, H);
+ }
+ }
+ if (!ss_v) {
+ if (vm3 & x) {
+ int L = *l, H = L >> 4;
+ int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
+
+ if (vm3 & (x << (1 + ss_h))) {
+ L = l[1 + ss_h];
+ H |= (L >> 4) << 8;
+ E |= s->filter_lut.mblim_lut[L] << 8;
+ I |= s->filter_lut.lim_lut[L] << 8;
+ s->dsp.loop_filter_mix2[0][0][1](ptr + ls * 4, ls, E, I, H);
+ } else {
+ s->dsp.loop_filter_8[0][1](ptr + ls * 4, ls, E, I, H);
+ }
+ } else if (vm3 & (x << (1 + ss_h))) {
+ int L = l[1 + ss_h], H = L >> 4;
+ int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
+
+ s->dsp.loop_filter_8[0][1](ptr + ls * 4 + 8 * bytesperpixel, ls, E, I, H);
+ }
+ }
+ }
+ if (ss_v) {
+ if (y & 1)
+ lvl += 16;
+ } else {
+ lvl += 8;
+ }
+ }
+}
+
+void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl,
+ int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
+{
+ VP9Context *s = avctx->priv_data;
+ AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
+ uint8_t *dst = f->data[0] + yoff;
+ ptrdiff_t ls_y = f->linesize[0], ls_uv = f->linesize[1];
+ uint8_t (*uv_masks)[8][4] = lflvl->mask[s->ss_h | s->ss_v];
+ int p;
+
+ /* FIXME: In how far can we interleave the v/h loopfilter calls? E.g.
+ * if you think of them as acting on a 8x8 block max, we can interleave
+ * each v/h within the single x loop, but that only works if we work on
+ * 8 pixel blocks, and we won't always do that (we want at least 16px
+ * to use SSE2 optimizations, perhaps 32 for AVX2) */
+
+ filter_plane_cols(s, col, 0, 0, lflvl->level, lflvl->mask[0][0], dst, ls_y);
+ filter_plane_rows(s, row, 0, 0, lflvl->level, lflvl->mask[0][1], dst, ls_y);
+
+ for (p = 0; p < 2; p++) {
+ dst = f->data[1 + p] + uvoff;
+ filter_plane_cols(s, col, s->ss_h, s->ss_v, lflvl->level, uv_masks[0], dst, ls_uv);
+ filter_plane_rows(s, row, s->ss_h, s->ss_v, lflvl->level, uv_masks[1], dst, ls_uv);
+ }
+}
diff --git a/media/ffvpx/libavcodec/vp9mvs.c b/media/ffvpx/libavcodec/vp9mvs.c
new file mode 100644
index 0000000000..b93d878d6f
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9mvs.c
@@ -0,0 +1,364 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "threadframe.h"
+#include "vp89_rac.h"
+#include "vp9data.h"
+#include "vp9dec.h"
+#include "vpx_rac.h"
+
+static av_always_inline void clamp_mv(VP9mv *dst, const VP9mv *src,
+ VP9TileData *td)
+{
+ dst->x = av_clip(src->x, td->min_mv.x, td->max_mv.x);
+ dst->y = av_clip(src->y, td->min_mv.y, td->max_mv.y);
+}
+
+static void find_ref_mvs(VP9TileData *td,
+ VP9mv *pmv, int ref, int z, int idx, int sb)
+{
+ static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
+ [BS_64x64] = { { 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
+ { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 } },
+ [BS_64x32] = { { 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
+ { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 } },
+ [BS_32x64] = { { -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
+ { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 } },
+ [BS_32x32] = { { 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
+ { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
+ [BS_32x16] = { { 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
+ { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
+ [BS_16x32] = { { -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
+ { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 } },
+ [BS_16x16] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
+ { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
+ [BS_16x8] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
+ { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 } },
+ [BS_8x16] = { { -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
+ { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 } },
+ [BS_8x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
+ { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
+ [BS_8x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
+ { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
+ [BS_4x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
+ { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
+ [BS_4x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
+ { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
+ };
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+ int row = td->row, col = td->col, row7 = td->row7;
+ const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
+#define INVALID_MV 0x80008000U
+ uint32_t mem = INVALID_MV, mem_sub8x8 = INVALID_MV;
+ int i;
+
+#define RETURN_DIRECT_MV(mv) \
+ do { \
+ uint32_t m = AV_RN32A(&mv); \
+ if (!idx) { \
+ AV_WN32A(pmv, m); \
+ return; \
+ } else if (mem == INVALID_MV) { \
+ mem = m; \
+ } else if (m != mem) { \
+ AV_WN32A(pmv, m); \
+ return; \
+ } \
+ } while (0)
+
+ if (sb >= 0) {
+ if (sb == 2 || sb == 1) {
+ RETURN_DIRECT_MV(b->mv[0][z]);
+ } else if (sb == 3) {
+ RETURN_DIRECT_MV(b->mv[2][z]);
+ RETURN_DIRECT_MV(b->mv[1][z]);
+ RETURN_DIRECT_MV(b->mv[0][z]);
+ }
+
+#define RETURN_MV(mv) \
+ do { \
+ if (sb > 0) { \
+ VP9mv tmp; \
+ uint32_t m; \
+ av_assert2(idx == 1); \
+ av_assert2(mem != INVALID_MV); \
+ if (mem_sub8x8 == INVALID_MV) { \
+ clamp_mv(&tmp, &mv, td); \
+ m = AV_RN32A(&tmp); \
+ if (m != mem) { \
+ AV_WN32A(pmv, m); \
+ return; \
+ } \
+ mem_sub8x8 = AV_RN32A(&mv); \
+ } else if (mem_sub8x8 != AV_RN32A(&mv)) { \
+ clamp_mv(&tmp, &mv, td); \
+ m = AV_RN32A(&tmp); \
+ if (m != mem) { \
+ AV_WN32A(pmv, m); \
+ } else { \
+ /* BUG I'm pretty sure this isn't the intention */ \
+ AV_WN32A(pmv, 0); \
+ } \
+ return; \
+ } \
+ } else { \
+ uint32_t m = AV_RN32A(&mv); \
+ if (!idx) { \
+ clamp_mv(pmv, &mv, td); \
+ return; \
+ } else if (mem == INVALID_MV) { \
+ mem = m; \
+ } else if (m != mem) { \
+ clamp_mv(pmv, &mv, td); \
+ return; \
+ } \
+ } \
+ } while (0)
+
+ if (row > 0) {
+ VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
+ if (mv->ref[0] == ref)
+ RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
+ else if (mv->ref[1] == ref)
+ RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
+ }
+ if (col > td->tile_col_start) {
+ VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
+ if (mv->ref[0] == ref)
+ RETURN_MV(td->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
+ else if (mv->ref[1] == ref)
+ RETURN_MV(td->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
+ }
+ i = 2;
+ } else {
+ i = 0;
+ }
+
+ // previously coded MVs in this neighborhood, using same reference frame
+ for (; i < 8; i++) {
+ int c = p[i][0] + col, r = p[i][1] + row;
+
+ if (c >= td->tile_col_start && c < s->cols &&
+ r >= 0 && r < s->rows) {
+ VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
+
+ if (mv->ref[0] == ref)
+ RETURN_MV(mv->mv[0]);
+ else if (mv->ref[1] == ref)
+ RETURN_MV(mv->mv[1]);
+ }
+ }
+
+ // MV at this position in previous frame, using same reference frame
+ if (s->s.h.use_last_frame_mvs) {
+ VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
+
+ if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass)
+ ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0);
+ if (mv->ref[0] == ref)
+ RETURN_MV(mv->mv[0]);
+ else if (mv->ref[1] == ref)
+ RETURN_MV(mv->mv[1]);
+ }
+
+#define RETURN_SCALE_MV(mv, scale) \
+ do { \
+ if (scale) { \
+ VP9mv mv_temp = { -mv.x, -mv.y }; \
+ RETURN_MV(mv_temp); \
+ } else { \
+ RETURN_MV(mv); \
+ } \
+ } while (0)
+
+ // previously coded MVs in this neighborhood, using different reference frame
+ for (i = 0; i < 8; i++) {
+ int c = p[i][0] + col, r = p[i][1] + row;
+
+ if (c >= td->tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
+ VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
+
+ if (mv->ref[0] != ref && mv->ref[0] >= 0)
+ RETURN_SCALE_MV(mv->mv[0],
+ s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
+ if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
+ // BUG - libvpx has this condition regardless of whether
+ // we used the first ref MV and pre-scaling
+ AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
+ RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]);
+ }
+ }
+ }
+
+ // MV at this position in previous frame, using different reference frame
+ if (s->s.h.use_last_frame_mvs) {
+ VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
+
+ // no need to await_progress, because we already did that above
+ if (mv->ref[0] != ref && mv->ref[0] >= 0)
+ RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
+ if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
+ // BUG - libvpx has this condition regardless of whether
+ // we used the first ref MV and pre-scaling
+ AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
+ RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]);
+ }
+ }
+
+ AV_ZERO32(pmv);
+ clamp_mv(pmv, pmv, td);
+#undef INVALID_MV
+#undef RETURN_MV
+#undef RETURN_SCALE_MV
+}
+
+static av_always_inline int read_mv_component(VP9TileData *td, int idx, int hp)
+{
+ const VP9Context *s = td->s;
+ int bit, sign = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].sign);
+ int n, c = vp89_rac_get_tree(td->c, ff_vp9_mv_class_tree,
+ s->prob.p.mv_comp[idx].classes);
+
+ td->counts.mv_comp[idx].sign[sign]++;
+ td->counts.mv_comp[idx].classes[c]++;
+ if (c) {
+ int m;
+
+ for (n = 0, m = 0; m < c; m++) {
+ bit = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].bits[m]);
+ n |= bit << m;
+ td->counts.mv_comp[idx].bits[m][bit]++;
+ }
+ n <<= 3;
+ bit = vp89_rac_get_tree(td->c, ff_vp9_mv_fp_tree,
+ s->prob.p.mv_comp[idx].fp);
+ n |= bit << 1;
+ td->counts.mv_comp[idx].fp[bit]++;
+ if (hp) {
+ bit = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].hp);
+ td->counts.mv_comp[idx].hp[bit]++;
+ n |= bit;
+ } else {
+ n |= 1;
+ // bug in libvpx - we count for bw entropy purposes even if the
+ // bit wasn't coded
+ td->counts.mv_comp[idx].hp[1]++;
+ }
+ n += 8 << c;
+ } else {
+ n = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].class0);
+ td->counts.mv_comp[idx].class0[n]++;
+ bit = vp89_rac_get_tree(td->c, ff_vp9_mv_fp_tree,
+ s->prob.p.mv_comp[idx].class0_fp[n]);
+ td->counts.mv_comp[idx].class0_fp[n][bit]++;
+ n = (n << 3) | (bit << 1);
+ if (hp) {
+ bit = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].class0_hp);
+ td->counts.mv_comp[idx].class0_hp[bit]++;
+ n |= bit;
+ } else {
+ n |= 1;
+ // bug in libvpx - we count for bw entropy purposes even if the
+ // bit wasn't coded
+ td->counts.mv_comp[idx].class0_hp[1]++;
+ }
+ }
+
+ return sign ? -(n + 1) : (n + 1);
+}
+
+void ff_vp9_fill_mv(VP9TileData *td, VP9mv *mv, int mode, int sb)
+{
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+
+ if (mode == ZEROMV) {
+ AV_ZERO64(mv);
+ } else {
+ int hp;
+
+ // FIXME cache this value and reuse for other subblocks
+ find_ref_mvs(td, &mv[0], b->ref[0], 0, mode == NEARMV,
+ mode == NEWMV ? -1 : sb);
+ // FIXME maybe move this code into find_ref_mvs()
+ if ((mode == NEWMV || sb == -1) &&
+ !(hp = s->s.h.highprecisionmvs &&
+ abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
+ if (mv[0].y & 1) {
+ if (mv[0].y < 0)
+ mv[0].y++;
+ else
+ mv[0].y--;
+ }
+ if (mv[0].x & 1) {
+ if (mv[0].x < 0)
+ mv[0].x++;
+ else
+ mv[0].x--;
+ }
+ }
+ if (mode == NEWMV) {
+ enum MVJoint j = vp89_rac_get_tree(td->c, ff_vp9_mv_joint_tree,
+ s->prob.p.mv_joint);
+
+ td->counts.mv_joint[j]++;
+ if (j >= MV_JOINT_V)
+ mv[0].y += read_mv_component(td, 0, hp);
+ if (j & 1)
+ mv[0].x += read_mv_component(td, 1, hp);
+ }
+
+ if (b->comp) {
+ // FIXME cache this value and reuse for other subblocks
+ find_ref_mvs(td, &mv[1], b->ref[1], 1, mode == NEARMV,
+ mode == NEWMV ? -1 : sb);
+ if ((mode == NEWMV || sb == -1) &&
+ !(hp = s->s.h.highprecisionmvs &&
+ abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
+ if (mv[1].y & 1) {
+ if (mv[1].y < 0)
+ mv[1].y++;
+ else
+ mv[1].y--;
+ }
+ if (mv[1].x & 1) {
+ if (mv[1].x < 0)
+ mv[1].x++;
+ else
+ mv[1].x--;
+ }
+ }
+ if (mode == NEWMV) {
+ enum MVJoint j = vp89_rac_get_tree(td->c, ff_vp9_mv_joint_tree,
+ s->prob.p.mv_joint);
+
+ td->counts.mv_joint[j]++;
+ if (j >= MV_JOINT_V)
+ mv[1].y += read_mv_component(td, 0, hp);
+ if (j & 1)
+ mv[1].x += read_mv_component(td, 1, hp);
+ }
+ }
+ }
+}
diff --git a/media/ffvpx/libavcodec/vp9prob.c b/media/ffvpx/libavcodec/vp9prob.c
new file mode 100644
index 0000000000..69a5180770
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9prob.c
@@ -0,0 +1,272 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vp9.h"
+#include "vp9dec.h"
+
+static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
+ int max_count, int update_factor)
+{
+ unsigned ct = ct0 + ct1, p2, p1;
+
+ if (!ct)
+ return;
+
+ update_factor = FASTDIV(update_factor * FFMIN(ct, max_count), max_count);
+ p1 = *p;
+ p2 = ((((int64_t) ct0) << 8) + (ct >> 1)) / ct;
+ p2 = av_clip(p2, 1, 255);
+
+ // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
+ *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
+}
+
+void ff_vp9_adapt_probs(VP9Context *s)
+{
+ int i, j, k, l, m;
+ ProbContext *p = &s->prob_ctx[s->s.h.framectxid].p;
+ int uf = (s->s.h.keyframe || s->s.h.intraonly || !s->last_keyframe) ? 112 : 128;
+
+ // coefficients
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ for (l = 0; l < 6; l++)
+ for (m = 0; m < 6; m++) {
+ uint8_t *pp = s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m];
+ unsigned *e = s->td[0].counts.eob[i][j][k][l][m];
+ unsigned *c = s->td[0].counts.coef[i][j][k][l][m];
+
+ if (l == 0 && m >= 3) // dc only has 3 pt
+ break;
+
+ adapt_prob(&pp[0], e[0], e[1], 24, uf);
+ adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
+ adapt_prob(&pp[2], c[1], c[2], 24, uf);
+ }
+
+ if (s->s.h.keyframe || s->s.h.intraonly) {
+ memcpy(p->skip, s->prob.p.skip, sizeof(p->skip));
+ memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
+ memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
+ memcpy(p->tx8p, s->prob.p.tx8p, sizeof(p->tx8p));
+ return;
+ }
+
+ // skip flag
+ for (i = 0; i < 3; i++)
+ adapt_prob(&p->skip[i], s->td[0].counts.skip[i][0],
+ s->td[0].counts.skip[i][1], 20, 128);
+
+ // intra/inter flag
+ for (i = 0; i < 4; i++)
+ adapt_prob(&p->intra[i], s->td[0].counts.intra[i][0],
+ s->td[0].counts.intra[i][1], 20, 128);
+
+ // comppred flag
+ if (s->s.h.comppredmode == PRED_SWITCHABLE) {
+ for (i = 0; i < 5; i++)
+ adapt_prob(&p->comp[i], s->td[0].counts.comp[i][0],
+ s->td[0].counts.comp[i][1], 20, 128);
+ }
+
+ // reference frames
+ if (s->s.h.comppredmode != PRED_SINGLEREF) {
+ for (i = 0; i < 5; i++)
+ adapt_prob(&p->comp_ref[i], s->td[0].counts.comp_ref[i][0],
+ s->td[0].counts.comp_ref[i][1], 20, 128);
+ }
+
+ if (s->s.h.comppredmode != PRED_COMPREF) {
+ for (i = 0; i < 5; i++) {
+ uint8_t *pp = p->single_ref[i];
+ unsigned (*c)[2] = s->td[0].counts.single_ref[i];
+
+ adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
+ adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
+ }
+ }
+
+ // block partitioning
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++) {
+ uint8_t *pp = p->partition[i][j];
+ unsigned *c = s->td[0].counts.partition[i][j];
+
+ adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
+ adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
+ adapt_prob(&pp[2], c[2], c[3], 20, 128);
+ }
+
+ // tx size
+ if (s->s.h.txfmmode == TX_SWITCHABLE) {
+ for (i = 0; i < 2; i++) {
+ unsigned *c16 = s->td[0].counts.tx16p[i], *c32 = s->td[0].counts.tx32p[i];
+
+ adapt_prob(&p->tx8p[i], s->td[0].counts.tx8p[i][0],
+ s->td[0].counts.tx8p[i][1], 20, 128);
+ adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
+ adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
+ adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
+ adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
+ adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
+ }
+ }
+
+ // interpolation filter
+ if (s->s.h.filtermode == FILTER_SWITCHABLE) {
+ for (i = 0; i < 4; i++) {
+ uint8_t *pp = p->filter[i];
+ unsigned *c = s->td[0].counts.filter[i];
+
+ adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
+ adapt_prob(&pp[1], c[1], c[2], 20, 128);
+ }
+ }
+
+ // inter modes
+ for (i = 0; i < 7; i++) {
+ uint8_t *pp = p->mv_mode[i];
+ unsigned *c = s->td[0].counts.mv_mode[i];
+
+ adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
+ adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
+ adapt_prob(&pp[2], c[1], c[3], 20, 128);
+ }
+
+ // mv joints
+ {
+ uint8_t *pp = p->mv_joint;
+ unsigned *c = s->td[0].counts.mv_joint;
+
+ adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
+ adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
+ adapt_prob(&pp[2], c[2], c[3], 20, 128);
+ }
+
+ // mv components
+ for (i = 0; i < 2; i++) {
+ uint8_t *pp;
+ unsigned *c, (*c2)[2], sum;
+
+ adapt_prob(&p->mv_comp[i].sign, s->td[0].counts.mv_comp[i].sign[0],
+ s->td[0].counts.mv_comp[i].sign[1], 20, 128);
+
+ pp = p->mv_comp[i].classes;
+ c = s->td[0].counts.mv_comp[i].classes;
+ sum = c[1] + c[2] + c[3] + c[4] + c[5] +
+ c[6] + c[7] + c[8] + c[9] + c[10];
+ adapt_prob(&pp[0], c[0], sum, 20, 128);
+ sum -= c[1];
+ adapt_prob(&pp[1], c[1], sum, 20, 128);
+ sum -= c[2] + c[3];
+ adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
+ adapt_prob(&pp[3], c[2], c[3], 20, 128);
+ sum -= c[4] + c[5];
+ adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
+ adapt_prob(&pp[5], c[4], c[5], 20, 128);
+ sum -= c[6];
+ adapt_prob(&pp[6], c[6], sum, 20, 128);
+ adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
+ adapt_prob(&pp[8], c[7], c[8], 20, 128);
+ adapt_prob(&pp[9], c[9], c[10], 20, 128);
+
+ adapt_prob(&p->mv_comp[i].class0, s->td[0].counts.mv_comp[i].class0[0],
+ s->td[0].counts.mv_comp[i].class0[1], 20, 128);
+ pp = p->mv_comp[i].bits;
+ c2 = s->td[0].counts.mv_comp[i].bits;
+ for (j = 0; j < 10; j++)
+ adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
+
+ for (j = 0; j < 2; j++) {
+ pp = p->mv_comp[i].class0_fp[j];
+ c = s->td[0].counts.mv_comp[i].class0_fp[j];
+ adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
+ adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
+ adapt_prob(&pp[2], c[2], c[3], 20, 128);
+ }
+ pp = p->mv_comp[i].fp;
+ c = s->td[0].counts.mv_comp[i].fp;
+ adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
+ adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
+ adapt_prob(&pp[2], c[2], c[3], 20, 128);
+
+ if (s->s.h.highprecisionmvs) {
+ adapt_prob(&p->mv_comp[i].class0_hp,
+ s->td[0].counts.mv_comp[i].class0_hp[0],
+ s->td[0].counts.mv_comp[i].class0_hp[1], 20, 128);
+ adapt_prob(&p->mv_comp[i].hp, s->td[0].counts.mv_comp[i].hp[0],
+ s->td[0].counts.mv_comp[i].hp[1], 20, 128);
+ }
+ }
+
+ // y intra modes
+ for (i = 0; i < 4; i++) {
+ uint8_t *pp = p->y_mode[i];
+ unsigned *c = s->td[0].counts.y_mode[i], sum, s2;
+
+ sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
+ adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
+ sum -= c[TM_VP8_PRED];
+ adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
+ sum -= c[VERT_PRED];
+ adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
+ s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
+ sum -= s2;
+ adapt_prob(&pp[3], s2, sum, 20, 128);
+ s2 -= c[HOR_PRED];
+ adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
+ adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
+ 20, 128);
+ sum -= c[DIAG_DOWN_LEFT_PRED];
+ adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
+ sum -= c[VERT_LEFT_PRED];
+ adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
+ adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
+ }
+
+ // uv intra modes
+ for (i = 0; i < 10; i++) {
+ uint8_t *pp = p->uv_mode[i];
+ unsigned *c = s->td[0].counts.uv_mode[i], sum, s2;
+
+ sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
+ adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
+ sum -= c[TM_VP8_PRED];
+ adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
+ sum -= c[VERT_PRED];
+ adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
+ s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
+ sum -= s2;
+ adapt_prob(&pp[3], s2, sum, 20, 128);
+ s2 -= c[HOR_PRED];
+ adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
+ adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
+ 20, 128);
+ sum -= c[DIAG_DOWN_LEFT_PRED];
+ adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
+ sum -= c[VERT_LEFT_PRED];
+ adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
+ adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
+ }
+}
diff --git a/media/ffvpx/libavcodec/vp9recon.c b/media/ffvpx/libavcodec/vp9recon.c
new file mode 100644
index 0000000000..073c04b47d
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9recon.c
@@ -0,0 +1,654 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/mem_internal.h"
+
+#include "threadframe.h"
+#include "videodsp.h"
+#include "vp9data.h"
+#include "vp9dec.h"
+
+static av_always_inline int check_intra_mode(VP9TileData *td, int mode, uint8_t **a,
+ uint8_t *dst_edge, ptrdiff_t stride_edge,
+ uint8_t *dst_inner, ptrdiff_t stride_inner,
+ uint8_t *l, int col, int x, int w,
+ int row, int y, enum TxfmMode tx,
+ int p, int ss_h, int ss_v, int bytesperpixel)
+{
+ const VP9Context *s = td->s;
+ int have_top = row > 0 || y > 0;
+ int have_left = col > td->tile_col_start || x > 0;
+ int have_right = x < w - 1;
+ int bpp = s->s.h.bpp;
+ static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
+ [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
+ { DC_127_PRED, VERT_PRED } },
+ [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
+ { HOR_PRED, HOR_PRED } },
+ [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
+ { LEFT_DC_PRED, DC_PRED } },
+ [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
+ { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
+ [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
+ { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
+ [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
+ { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
+ [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
+ { HOR_DOWN_PRED, HOR_DOWN_PRED } },
+ [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
+ { DC_127_PRED, VERT_LEFT_PRED } },
+ [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
+ { HOR_UP_PRED, HOR_UP_PRED } },
+ [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
+ { HOR_PRED, TM_VP8_PRED } },
+ };
+ static const struct {
+ uint8_t needs_left:1;
+ uint8_t needs_top:1;
+ uint8_t needs_topleft:1;
+ uint8_t needs_topright:1;
+ uint8_t invert_left:1;
+ } edges[N_INTRA_PRED_MODES] = {
+ [VERT_PRED] = { .needs_top = 1 },
+ [HOR_PRED] = { .needs_left = 1 },
+ [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
+ [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
+ [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
+ .needs_topleft = 1 },
+ [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
+ .needs_topleft = 1 },
+ [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1,
+ .needs_topleft = 1 },
+ [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
+ [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
+ [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1,
+ .needs_topleft = 1 },
+ [LEFT_DC_PRED] = { .needs_left = 1 },
+ [TOP_DC_PRED] = { .needs_top = 1 },
+ [DC_128_PRED] = { 0 },
+ [DC_127_PRED] = { 0 },
+ [DC_129_PRED] = { 0 }
+ };
+
+ av_assert2(mode >= 0 && mode < 10);
+ mode = mode_conv[mode][have_left][have_top];
+ if (edges[mode].needs_top) {
+ uint8_t *top, *topleft;
+ int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !ss_h) - x) * 4;
+ int n_px_need_tr = 0;
+
+ if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
+ n_px_need_tr = 4;
+
+ // if top of sb64-row, use s->intra_pred_data[] instead of
+ // dst[-stride] for intra prediction (it contains pre- instead of
+ // post-loopfilter data)
+ if (have_top) {
+ top = !(row & 7) && !y ?
+ s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
+ y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
+ if (have_left)
+ topleft = !(row & 7) && !y ?
+ s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
+ y == 0 || x == 0 ? &dst_edge[-stride_edge] :
+ &dst_inner[-stride_inner];
+ }
+
+ if (have_top &&
+ (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
+ (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
+ n_px_need + n_px_need_tr <= n_px_have) {
+ *a = top;
+ } else {
+ if (have_top) {
+ if (n_px_need <= n_px_have) {
+ memcpy(*a, top, n_px_need * bytesperpixel);
+ } else {
+#define memset_bpp(c, i1, v, i2, num) do { \
+ if (bytesperpixel == 1) { \
+ memset(&(c)[(i1)], (v)[(i2)], (num)); \
+ } else { \
+ int n, val = AV_RN16A(&(v)[(i2) * 2]); \
+ for (n = 0; n < (num); n++) { \
+ AV_WN16A(&(c)[((i1) + n) * 2], val); \
+ } \
+ } \
+} while (0)
+ memcpy(*a, top, n_px_have * bytesperpixel);
+ memset_bpp(*a, n_px_have, (*a), n_px_have - 1, n_px_need - n_px_have);
+ }
+ } else {
+#define memset_val(c, val, num) do { \
+ if (bytesperpixel == 1) { \
+ memset((c), (val), (num)); \
+ } else { \
+ int n; \
+ for (n = 0; n < (num); n++) { \
+ AV_WN16A(&(c)[n * 2], (val)); \
+ } \
+ } \
+} while (0)
+ memset_val(*a, (128 << (bpp - 8)) - 1, n_px_need);
+ }
+ if (edges[mode].needs_topleft) {
+ if (have_left && have_top) {
+#define assign_bpp(c, i1, v, i2) do { \
+ if (bytesperpixel == 1) { \
+ (c)[(i1)] = (v)[(i2)]; \
+ } else { \
+ AV_COPY16(&(c)[(i1) * 2], &(v)[(i2) * 2]); \
+ } \
+} while (0)
+ assign_bpp(*a, -1, topleft, -1);
+ } else {
+#define assign_val(c, i, v) do { \
+ if (bytesperpixel == 1) { \
+ (c)[(i)] = (v); \
+ } else { \
+ AV_WN16A(&(c)[(i) * 2], (v)); \
+ } \
+} while (0)
+ assign_val((*a), -1, (128 << (bpp - 8)) + (have_top ? +1 : -1));
+ }
+ }
+ if (tx == TX_4X4 && edges[mode].needs_topright) {
+ if (have_top && have_right &&
+ n_px_need + n_px_need_tr <= n_px_have) {
+ memcpy(&(*a)[4 * bytesperpixel], &top[4 * bytesperpixel], 4 * bytesperpixel);
+ } else {
+ memset_bpp(*a, 4, *a, 3, 4);
+ }
+ }
+ }
+ }
+ if (edges[mode].needs_left) {
+ if (have_left) {
+ int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !ss_v) - y) * 4;
+ uint8_t *dst = x == 0 ? dst_edge : dst_inner;
+ ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
+
+ if (edges[mode].invert_left) {
+ if (n_px_need <= n_px_have) {
+ for (i = 0; i < n_px_need; i++)
+ assign_bpp(l, i, &dst[i * stride], -1);
+ } else {
+ for (i = 0; i < n_px_have; i++)
+ assign_bpp(l, i, &dst[i * stride], -1);
+ memset_bpp(l, n_px_have, l, n_px_have - 1, n_px_need - n_px_have);
+ }
+ } else {
+ if (n_px_need <= n_px_have) {
+ for (i = 0; i < n_px_need; i++)
+ assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
+ } else {
+ for (i = 0; i < n_px_have; i++)
+ assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
+ memset_bpp(l, 0, l, n_px_need - n_px_have, n_px_need - n_px_have);
+ }
+ }
+ } else {
+ memset_val(l, (128 << (bpp - 8)) + 1, 4 << tx);
+ }
+ }
+
+ return mode;
+}
+
+static av_always_inline void intra_recon(VP9TileData *td, ptrdiff_t y_off,
+ ptrdiff_t uv_off, int bytesperpixel)
+{
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+ int row = td->row, col = td->col;
+ int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
+ int h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
+ int end_x = FFMIN(2 * (s->cols - col), w4);
+ int end_y = FFMIN(2 * (s->rows - row), h4);
+ int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
+ int uvstep1d = 1 << b->uvtx, p;
+ uint8_t *dst = td->dst[0], *dst_r = s->s.frames[CUR_FRAME].tf.f->data[0] + y_off;
+ LOCAL_ALIGNED_32(uint8_t, a_buf, [96]);
+ LOCAL_ALIGNED_32(uint8_t, l, [64]);
+
+ for (n = 0, y = 0; y < end_y; y += step1d) {
+ uint8_t *ptr = dst, *ptr_r = dst_r;
+ for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d * bytesperpixel,
+ ptr_r += 4 * step1d * bytesperpixel, n += step) {
+ int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
+ y * 2 + x : 0];
+ uint8_t *a = &a_buf[32];
+ enum TxfmType txtp = ff_vp9_intra_txfm_type[mode];
+ int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&td->eob[n]) : td->eob[n];
+
+ mode = check_intra_mode(td, mode, &a, ptr_r,
+ s->s.frames[CUR_FRAME].tf.f->linesize[0],
+ ptr, td->y_stride, l,
+ col, x, w4, row, y, b->tx, 0, 0, 0, bytesperpixel);
+ s->dsp.intra_pred[b->tx][mode](ptr, td->y_stride, l, a);
+ if (eob)
+ s->dsp.itxfm_add[tx][txtp](ptr, td->y_stride,
+ td->block + 16 * n * bytesperpixel, eob);
+ }
+ dst_r += 4 * step1d * s->s.frames[CUR_FRAME].tf.f->linesize[0];
+ dst += 4 * step1d * td->y_stride;
+ }
+
+ // U/V
+ w4 >>= s->ss_h;
+ end_x >>= s->ss_h;
+ end_y >>= s->ss_v;
+ step = 1 << (b->uvtx * 2);
+ for (p = 0; p < 2; p++) {
+ dst = td->dst[1 + p];
+ dst_r = s->s.frames[CUR_FRAME].tf.f->data[1 + p] + uv_off;
+ for (n = 0, y = 0; y < end_y; y += uvstep1d) {
+ uint8_t *ptr = dst, *ptr_r = dst_r;
+ for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d * bytesperpixel,
+ ptr_r += 4 * uvstep1d * bytesperpixel, n += step) {
+ int mode = b->uvmode;
+ uint8_t *a = &a_buf[32];
+ int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&td->uveob[p][n]) : td->uveob[p][n];
+
+ mode = check_intra_mode(td, mode, &a, ptr_r,
+ s->s.frames[CUR_FRAME].tf.f->linesize[1],
+ ptr, td->uv_stride, l, col, x, w4, row, y,
+ b->uvtx, p + 1, s->ss_h, s->ss_v, bytesperpixel);
+ s->dsp.intra_pred[b->uvtx][mode](ptr, td->uv_stride, l, a);
+ if (eob)
+ s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, td->uv_stride,
+ td->uvblock[p] + 16 * n * bytesperpixel, eob);
+ }
+ dst_r += 4 * uvstep1d * s->s.frames[CUR_FRAME].tf.f->linesize[1];
+ dst += 4 * uvstep1d * td->uv_stride;
+ }
+ }
+}
+
+void ff_vp9_intra_recon_8bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
+{
+ intra_recon(td, y_off, uv_off, 1);
+}
+
+void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
+{
+ intra_recon(td, y_off, uv_off, 2);
+}
+
+static av_always_inline void mc_luma_unscaled(VP9TileData *td, const vp9_mc_func (*mc)[2],
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ const ThreadFrame *ref_frame,
+ ptrdiff_t y, ptrdiff_t x, const VP9mv *mv,
+ int bw, int bh, int w, int h, int bytesperpixel)
+{
+ const VP9Context *s = td->s;
+ int mx = mv->x, my = mv->y, th;
+
+ y += my >> 3;
+ x += mx >> 3;
+ ref += y * ref_stride + x * bytesperpixel;
+ mx &= 7;
+ my &= 7;
+ // FIXME bilinear filter only needs 0/1 pixels, not 3/4
+ // we use +7 because the last 7 pixels of each sbrow can be changed in
+ // the longest loopfilter of the next sbrow
+ th = (y + bh + 4 * !!my + 7) >> 6;
+ ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
+ // The arm/aarch64 _hv filters read one more row than what actually is
+ // needed, so switch to emulated edge one pixel sooner vertically
+ // (!!my * 5) than horizontally (!!mx * 4).
+ if (x < !!mx * 3 || y < !!my * 3 ||
+ x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ ref - !!my * 3 * ref_stride - !!mx * 3 * bytesperpixel,
+ 160, ref_stride,
+ bw + !!mx * 7, bh + !!my * 7,
+ x - !!mx * 3, y - !!my * 3, w, h);
+ ref = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
+ ref_stride = 160;
+ }
+ mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
+}
+
+static av_always_inline void mc_chroma_unscaled(VP9TileData *td, const vp9_mc_func (*mc)[2],
+ uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t dst_stride,
+ const uint8_t *ref_u, ptrdiff_t src_stride_u,
+ const uint8_t *ref_v, ptrdiff_t src_stride_v,
+ const ThreadFrame *ref_frame,
+ ptrdiff_t y, ptrdiff_t x, const VP9mv *mv,
+ int bw, int bh, int w, int h, int bytesperpixel)
+{
+ const VP9Context *s = td->s;
+ int mx = mv->x * (1 << !s->ss_h), my = mv->y * (1 << !s->ss_v), th;
+
+ y += my >> 4;
+ x += mx >> 4;
+ ref_u += y * src_stride_u + x * bytesperpixel;
+ ref_v += y * src_stride_v + x * bytesperpixel;
+ mx &= 15;
+ my &= 15;
+ // FIXME bilinear filter only needs 0/1 pixels, not 3/4
+ // we use +7 because the last 7 pixels of each sbrow can be changed in
+ // the longest loopfilter of the next sbrow
+ th = (y + bh + 4 * !!my + 7) >> (6 - s->ss_v);
+ ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
+ // The arm/aarch64 _hv filters read one more row than what actually is
+ // needed, so switch to emulated edge one pixel sooner vertically
+ // (!!my * 5) than horizontally (!!mx * 4).
+ if (x < !!mx * 3 || y < !!my * 3 ||
+ x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ ref_u - !!my * 3 * src_stride_u - !!mx * 3 * bytesperpixel,
+ 160, src_stride_u,
+ bw + !!mx * 7, bh + !!my * 7,
+ x - !!mx * 3, y - !!my * 3, w, h);
+ ref_u = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
+ mc[!!mx][!!my](dst_u, dst_stride, ref_u, 160, bh, mx, my);
+
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ ref_v - !!my * 3 * src_stride_v - !!mx * 3 * bytesperpixel,
+ 160, src_stride_v,
+ bw + !!mx * 7, bh + !!my * 7,
+ x - !!mx * 3, y - !!my * 3, w, h);
+ ref_v = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
+ mc[!!mx][!!my](dst_v, dst_stride, ref_v, 160, bh, mx, my);
+ } else {
+ mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
+ mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
+ }
+}
+
+#define mc_luma_dir(td, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
+ px, py, pw, ph, bw, bh, w, h, i) \
+ mc_luma_unscaled(td, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
+ mv, bw, bh, w, h, bytesperpixel)
+#define mc_chroma_dir(td, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
+ row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
+ mc_chroma_unscaled(td, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
+ row, col, mv, bw, bh, w, h, bytesperpixel)
+#define SCALED 0
+#define FN(x) x##_8bpp
+#define BYTES_PER_PIXEL 1
+#include "vp9_mc_template.c"
+#undef FN
+#undef BYTES_PER_PIXEL
+#define FN(x) x##_16bpp
+#define BYTES_PER_PIXEL 2
+#include "vp9_mc_template.c"
+#undef mc_luma_dir
+#undef mc_chroma_dir
+#undef FN
+#undef BYTES_PER_PIXEL
+#undef SCALED
+
+static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func smc,
+ const vp9_mc_func (*mc)[2],
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ const ThreadFrame *ref_frame,
+ ptrdiff_t y, ptrdiff_t x, const VP9mv *in_mv,
+ int px, int py, int pw, int ph,
+ int bw, int bh, int w, int h, int bytesperpixel,
+ const uint16_t *scale, const uint8_t *step)
+{
+ const VP9Context *s = td->s;
+ if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
+ s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
+ mc_luma_unscaled(td, mc, dst, dst_stride, ref, ref_stride, ref_frame,
+ y, x, in_mv, bw, bh, w, h, bytesperpixel);
+ } else {
+#define scale_mv(n, dim) (((int64_t)(n) * scale[dim]) >> 14)
+ int mx, my;
+ int refbw_m1, refbh_m1;
+ int th;
+ VP9mv mv;
+
+ mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
+ mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
+ // BUG libvpx seems to scale the two components separately. This introduces
+ // rounding errors but we have to reproduce them to be exactly compatible
+ // with the output from libvpx...
+ mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
+ my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
+
+ y = my >> 4;
+ x = mx >> 4;
+ ref += y * ref_stride + x * bytesperpixel;
+ mx &= 15;
+ my &= 15;
+ refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
+ refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
+ // FIXME bilinear filter only needs 0/1 pixels, not 3/4
+ // we use +7 because the last 7 pixels of each sbrow can be changed in
+ // the longest loopfilter of the next sbrow
+ th = (y + refbh_m1 + 4 + 7) >> 6;
+ ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
+ // The arm/aarch64 _hv filters read one more row than what actually is
+ // needed, so switch to emulated edge one pixel sooner vertically
+ // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
+ if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ ref - 3 * ref_stride - 3 * bytesperpixel,
+ 288, ref_stride,
+ refbw_m1 + 8, refbh_m1 + 8,
+ x - 3, y - 3, w, h);
+ ref = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
+ ref_stride = 288;
+ }
+ smc(dst, dst_stride, ref, ref_stride, bh, mx, my, step[0], step[1]);
+ }
+}
+
+static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_func smc,
+ const vp9_mc_func (*mc)[2],
+ uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t dst_stride,
+ const uint8_t *ref_u, ptrdiff_t src_stride_u,
+ const uint8_t *ref_v, ptrdiff_t src_stride_v,
+ const ThreadFrame *ref_frame,
+ ptrdiff_t y, ptrdiff_t x, const VP9mv *in_mv,
+ int px, int py, int pw, int ph,
+ int bw, int bh, int w, int h, int bytesperpixel,
+ const uint16_t *scale, const uint8_t *step)
+{
+ const VP9Context *s = td->s;
+ if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
+ s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
+ mc_chroma_unscaled(td, mc, dst_u, dst_v, dst_stride, ref_u, src_stride_u,
+ ref_v, src_stride_v, ref_frame,
+ y, x, in_mv, bw, bh, w, h, bytesperpixel);
+ } else {
+ int mx, my;
+ int refbw_m1, refbh_m1;
+ int th;
+ VP9mv mv;
+
+ if (s->ss_h) {
+ // BUG https://code.google.com/p/webm/issues/detail?id=820
+ mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 16, (s->cols * 4 - x + px + 3) * 16);
+ mx = scale_mv(mv.x, 0) + (scale_mv(x * 16, 0) & ~15) + (scale_mv(x * 32, 0) & 15);
+ } else {
+ mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
+ mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
+ }
+ if (s->ss_v) {
+ // BUG https://code.google.com/p/webm/issues/detail?id=820
+ mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 16, (s->rows * 4 - y + py + 3) * 16);
+ my = scale_mv(mv.y, 1) + (scale_mv(y * 16, 1) & ~15) + (scale_mv(y * 32, 1) & 15);
+ } else {
+ mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
+ my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
+ }
+#undef scale_mv
+ y = my >> 4;
+ x = mx >> 4;
+ ref_u += y * src_stride_u + x * bytesperpixel;
+ ref_v += y * src_stride_v + x * bytesperpixel;
+ mx &= 15;
+ my &= 15;
+ refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
+ refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
+ // FIXME bilinear filter only needs 0/1 pixels, not 3/4
+ // we use +7 because the last 7 pixels of each sbrow can be changed in
+ // the longest loopfilter of the next sbrow
+ th = (y + refbh_m1 + 4 + 7) >> (6 - s->ss_v);
+ ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
+ // The arm/aarch64 _hv filters read one more row than what actually is
+ // needed, so switch to emulated edge one pixel sooner vertically
+ // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
+ if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ ref_u - 3 * src_stride_u - 3 * bytesperpixel,
+ 288, src_stride_u,
+ refbw_m1 + 8, refbh_m1 + 8,
+ x - 3, y - 3, w, h);
+ ref_u = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
+ smc(dst_u, dst_stride, ref_u, 288, bh, mx, my, step[0], step[1]);
+
+ s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
+ ref_v - 3 * src_stride_v - 3 * bytesperpixel,
+ 288, src_stride_v,
+ refbw_m1 + 8, refbh_m1 + 8,
+ x - 3, y - 3, w, h);
+ ref_v = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
+ smc(dst_v, dst_stride, ref_v, 288, bh, mx, my, step[0], step[1]);
+ } else {
+ smc(dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my, step[0], step[1]);
+ smc(dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my, step[0], step[1]);
+ }
+ }
+}
+
+#define mc_luma_dir(td, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
+ px, py, pw, ph, bw, bh, w, h, i) \
+ mc_luma_scaled(td, s->dsp.s##mc, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
+ mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
+ s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
+#define mc_chroma_dir(td, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
+ row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
+ mc_chroma_scaled(td, s->dsp.s##mc, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
+ row, col, mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
+ s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
+#define SCALED 1
+#define FN(x) x##_scaled_8bpp
+#define BYTES_PER_PIXEL 1
+#include "vp9_mc_template.c"
+#undef FN
+#undef BYTES_PER_PIXEL
+#define FN(x) x##_scaled_16bpp
+#define BYTES_PER_PIXEL 2
+#include "vp9_mc_template.c"
+#undef mc_luma_dir
+#undef mc_chroma_dir
+#undef FN
+#undef BYTES_PER_PIXEL
+#undef SCALED
+
+static av_always_inline void inter_recon(VP9TileData *td, int bytesperpixel)
+{
+ const VP9Context *s = td->s;
+ VP9Block *b = td->b;
+ int row = td->row, col = td->col;
+
+ if (s->mvscale[b->ref[0]][0] == REF_INVALID_SCALE ||
+ (b->comp && s->mvscale[b->ref[1]][0] == REF_INVALID_SCALE)) {
+ if (!s->td->error_info) {
+ s->td->error_info = AVERROR_INVALIDDATA;
+ av_log(NULL, AV_LOG_ERROR, "Bitstream not supported, "
+ "reference frame has invalid dimensions\n");
+ }
+ return;
+ }
+
+ if (s->mvscale[b->ref[0]][0] || (b->comp && s->mvscale[b->ref[1]][0])) {
+ if (bytesperpixel == 1) {
+ inter_pred_scaled_8bpp(td);
+ } else {
+ inter_pred_scaled_16bpp(td);
+ }
+ } else {
+ if (bytesperpixel == 1) {
+ inter_pred_8bpp(td);
+ } else {
+ inter_pred_16bpp(td);
+ }
+ }
+
+ if (!b->skip) {
+ /* mostly copied intra_recon() */
+
+ int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
+ int h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
+ int end_x = FFMIN(2 * (s->cols - col), w4);
+ int end_y = FFMIN(2 * (s->rows - row), h4);
+ int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
+ int uvstep1d = 1 << b->uvtx, p;
+ uint8_t *dst = td->dst[0];
+
+ // y itxfm add
+ for (n = 0, y = 0; y < end_y; y += step1d) {
+ uint8_t *ptr = dst;
+ for (x = 0; x < end_x; x += step1d,
+ ptr += 4 * step1d * bytesperpixel, n += step) {
+ int eob = b->tx > TX_8X8 ? AV_RN16A(&td->eob[n]) : td->eob[n];
+
+ if (eob)
+ s->dsp.itxfm_add[tx][DCT_DCT](ptr, td->y_stride,
+ td->block + 16 * n * bytesperpixel, eob);
+ }
+ dst += 4 * td->y_stride * step1d;
+ }
+
+ // uv itxfm add
+ end_x >>= s->ss_h;
+ end_y >>= s->ss_v;
+ step = 1 << (b->uvtx * 2);
+ for (p = 0; p < 2; p++) {
+ dst = td->dst[p + 1];
+ for (n = 0, y = 0; y < end_y; y += uvstep1d) {
+ uint8_t *ptr = dst;
+ for (x = 0; x < end_x; x += uvstep1d,
+ ptr += 4 * uvstep1d * bytesperpixel, n += step) {
+ int eob = b->uvtx > TX_8X8 ? AV_RN16A(&td->uveob[p][n]) : td->uveob[p][n];
+
+ if (eob)
+ s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, td->uv_stride,
+ td->uvblock[p] + 16 * n * bytesperpixel, eob);
+ }
+ dst += 4 * uvstep1d * td->uv_stride;
+ }
+ }
+ }
+}
+
+void ff_vp9_inter_recon_8bpp(VP9TileData *td)
+{
+ inter_recon(td, 1);
+}
+
+void ff_vp9_inter_recon_16bpp(VP9TileData *td)
+{
+ inter_recon(td, 2);
+}
diff --git a/media/ffvpx/libavcodec/vp9shared.h b/media/ffvpx/libavcodec/vp9shared.h
new file mode 100644
index 0000000000..543a496df8
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp9shared.h
@@ -0,0 +1,175 @@
+/*
+ * VP9 compatible video decoder
+ *
+ * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+ * Copyright (C) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VP9SHARED_H
+#define AVCODEC_VP9SHARED_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/mem_internal.h"
+
+#include "vp9.h"
+#include "threadframe.h"
+
+enum BlockPartition {
+ PARTITION_NONE, // [ ] <-.
+ PARTITION_H, // [-] |
+ PARTITION_V, // [|] |
+ PARTITION_SPLIT, // [+] --'
+};
+
+enum InterPredMode {
+ NEARESTMV = 10,
+ NEARMV = 11,
+ ZEROMV = 12,
+ NEWMV = 13,
+};
+
+enum CompPredMode {
+ PRED_SINGLEREF,
+ PRED_COMPREF,
+ PRED_SWITCHABLE,
+};
+
+typedef struct VP9mv {
+ DECLARE_ALIGNED(4, int16_t, x);
+ int16_t y;
+} VP9mv;
+
+typedef struct VP9mvrefPair {
+ VP9mv mv[2];
+ int8_t ref[2];
+} VP9mvrefPair;
+
+typedef struct VP9Frame {
+ ThreadFrame tf;
+ AVBufferRef *extradata;
+ uint8_t *segmentation_map;
+ VP9mvrefPair *mv;
+ int uses_2pass;
+
+ AVBufferRef *hwaccel_priv_buf;
+ void *hwaccel_picture_private;
+} VP9Frame;
+
+enum BlockLevel {
+ BL_64X64,
+ BL_32X32,
+ BL_16X16,
+ BL_8X8,
+};
+
+enum BlockSize {
+ BS_64x64,
+ BS_64x32,
+ BS_32x64,
+ BS_32x32,
+ BS_32x16,
+ BS_16x32,
+ BS_16x16,
+ BS_16x8,
+ BS_8x16,
+ BS_8x8,
+ BS_8x4,
+ BS_4x8,
+ BS_4x4,
+ N_BS_SIZES,
+};
+
+typedef struct VP9BitstreamHeader {
+ // bitstream header
+ uint8_t profile;
+ uint8_t bpp;
+ uint8_t keyframe;
+ uint8_t invisible;
+ uint8_t errorres;
+ uint8_t intraonly;
+ uint8_t resetctx;
+ uint8_t refreshrefmask;
+ uint8_t highprecisionmvs;
+ enum FilterMode filtermode;
+ uint8_t allowcompinter;
+ uint8_t refreshctx;
+ uint8_t parallelmode;
+ uint8_t framectxid;
+ uint8_t use_last_frame_mvs;
+ uint8_t refidx[3];
+ uint8_t signbias[3];
+ uint8_t fixcompref;
+ uint8_t varcompref[2];
+ struct {
+ uint8_t level;
+ int8_t sharpness;
+ } filter;
+ struct {
+ uint8_t enabled;
+ uint8_t updated;
+ int8_t mode[2];
+ int8_t ref[4];
+ } lf_delta;
+ uint8_t yac_qi;
+ int8_t ydc_qdelta, uvdc_qdelta, uvac_qdelta;
+ uint8_t lossless;
+#define MAX_SEGMENT 8
+ struct {
+ uint8_t enabled;
+ uint8_t temporal;
+ uint8_t absolute_vals;
+ uint8_t update_map;
+ uint8_t prob[7];
+ uint8_t pred_prob[3];
+ struct {
+ uint8_t q_enabled;
+ uint8_t lf_enabled;
+ uint8_t ref_enabled;
+ uint8_t skip_enabled;
+ uint8_t ref_val;
+ int16_t q_val;
+ int8_t lf_val;
+ int16_t qmul[2][2];
+ uint8_t lflvl[4][2];
+ } feat[MAX_SEGMENT];
+ } segmentation;
+ enum TxfmMode txfmmode;
+ enum CompPredMode comppredmode;
+ struct {
+ unsigned log2_tile_cols, log2_tile_rows;
+ unsigned tile_cols, tile_rows;
+ } tiling;
+
+ int uncompressed_header_size;
+ int compressed_header_size;
+} VP9BitstreamHeader;
+
+typedef struct VP9SharedContext {
+ VP9BitstreamHeader h;
+
+ ThreadFrame refs[8];
+#define CUR_FRAME 0
+#define REF_FRAME_MVPAIR 1
+#define REF_FRAME_SEGMAP 2
+ VP9Frame frames[3];
+} VP9SharedContext;
+
+#endif /* AVCODEC_VP9SHARED_H */
diff --git a/media/ffvpx/libavcodec/vpx_rac.c b/media/ffvpx/libavcodec/vpx_rac.c
new file mode 100644
index 0000000000..cf02e9a19c
--- /dev/null
+++ b/media/ffvpx/libavcodec/vpx_rac.c
@@ -0,0 +1,53 @@
+/*
+ * VP5/6/8 decoder
+ * Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include "libavutil/error.h"
+#include "bytestream.h"
+#include "vpx_rac.h"
+
+const uint8_t ff_vpx_norm_shift[256]= {
+ 8,7,6,6,5,5,5,5,4,4,4,4,4,4,4,4,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+};
+
+int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
+{
+ c->high = 255;
+ c->bits = -16;
+ c->buffer = buf;
+ c->end = buf + buf_size;
+ c->end_reached = 0;
+ if (buf_size < 1)
+ return AVERROR_INVALIDDATA;
+ c->code_word = bytestream_get_be24(&c->buffer);
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/vpx_rac.h b/media/ffvpx/libavcodec/vpx_rac.h
new file mode 100644
index 0000000000..b158cc0754
--- /dev/null
+++ b/media/ffvpx/libavcodec/vpx_rac.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Common VP5-VP9 range decoder stuff
+ */
+
+#ifndef AVCODEC_VPX_RAC_H
+#define AVCODEC_VPX_RAC_H
+
+#include <stdint.h>
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "bytestream.h"
+
+typedef struct VPXRangeCoder {
+ int high;
+ int bits; /* stored negated (i.e. negative "bits" is a positive number of
+ bits left) in order to eliminate a negate in cache refilling */
+ const uint8_t *buffer;
+ const uint8_t *end;
+ unsigned int code_word;
+ int end_reached;
+} VPXRangeCoder;
+
+extern const uint8_t ff_vpx_norm_shift[256];
+int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size);
+
+/**
+ * returns 1 if the end of the stream has been reached, 0 otherwise.
+ */
+static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
+{
+ if (c->end <= c->buffer && c->bits >= 0)
+ c->end_reached ++;
+ return c->end_reached > 10;
+}
+
+static av_always_inline unsigned int vpx_rac_renorm(VPXRangeCoder *c)
+{
+ int shift = ff_vpx_norm_shift[c->high];
+ int bits = c->bits;
+ unsigned int code_word = c->code_word;
+
+ c->high <<= shift;
+ code_word <<= shift;
+ bits += shift;
+ if(bits >= 0 && c->buffer < c->end) {
+ code_word |= bytestream_get_be16(&c->buffer) << bits;
+ bits -= 16;
+ }
+ c->bits = bits;
+ return code_word;
+}
+
+#if ARCH_ARM
+#include "arm/vpx_arith.h"
+#elif ARCH_X86
+#include "x86/vpx_arith.h"
+#endif
+
+#ifndef vpx_rac_get_prob
+#define vpx_rac_get_prob vpx_rac_get_prob
+static av_always_inline int vpx_rac_get_prob(VPXRangeCoder *c, uint8_t prob)
+{
+ unsigned int code_word = vpx_rac_renorm(c);
+ unsigned int low = 1 + (((c->high - 1) * prob) >> 8);
+ unsigned int low_shift = low << 16;
+ int bit = code_word >= low_shift;
+
+ c->high = bit ? c->high - low : low;
+ c->code_word = bit ? code_word - low_shift : code_word;
+
+ return bit;
+}
+#endif
+
+#ifndef vpx_rac_get_prob_branchy
+// branchy variant, to be used where there's a branch based on the bit decoded
+static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
+{
+ unsigned long code_word = vpx_rac_renorm(c);
+ unsigned low = 1 + (((c->high - 1) * prob) >> 8);
+ unsigned low_shift = low << 16;
+
+ if (code_word >= low_shift) {
+ c->high -= low;
+ c->code_word = code_word - low_shift;
+ return 1;
+ }
+
+ c->high = low;
+ c->code_word = code_word;
+ return 0;
+}
+#endif
+
+static av_always_inline int vpx_rac_get(VPXRangeCoder *c)
+{
+ unsigned int code_word = vpx_rac_renorm(c);
+ /* equiprobable */
+ int low = (c->high + 1) >> 1;
+ unsigned int low_shift = low << 16;
+ int bit = code_word >= low_shift;
+ if (bit) {
+ c->high -= low;
+ code_word -= low_shift;
+ } else {
+ c->high = low;
+ }
+
+ c->code_word = code_word;
+ return bit;
+}
+
+#endif /* AVCODEC_VPX_RAC_H */
diff --git a/media/ffvpx/libavcodec/x86/constants.c b/media/ffvpx/libavcodec/x86/constants.c
new file mode 100644
index 0000000000..bc7f2b17b8
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/constants.c
@@ -0,0 +1,93 @@
+/*
+ * MMX/SSE/AVX constants used across x86 dsp optimizations.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/mem_internal.h"
+#include "libavutil/x86/asm.h" // for xmm_reg
+#include "constants.h"
+
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL,
+ 0x0001000100010001ULL, 0x0001000100010001ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL,
+ 0x0002000200020002ULL, 0x0002000200020002ULL };
+DECLARE_ASM_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL };
+DECLARE_ASM_ALIGNED(32, const ymm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL,
+ 0x0004000400040004ULL, 0x0004000400040004ULL };
+DECLARE_ASM_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL };
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL };
+DECLARE_ASM_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
+DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
+DECLARE_ASM_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_20) = { 0x0014001400140014ULL, 0x0014001400140014ULL };
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
+DECLARE_ASM_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
+DECLARE_ASM_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
+DECLARE_ASM_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL };
+DECLARE_ASM_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
+DECLARE_ASM_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_255) = { 0x00ff00ff00ff00ffULL, 0x00ff00ff00ff00ffULL,
+ 0x00ff00ff00ff00ffULL, 0x00ff00ff00ff00ffULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_256) = { 0x0100010001000100ULL, 0x0100010001000100ULL,
+ 0x0100010001000100ULL, 0x0100010001000100ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL,
+ 0x0200020002000200ULL, 0x0200020002000200ULL };
+DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_1023) = { 0x03ff03ff03ff03ffULL, 0x03ff03ff03ff03ffULL,
+ 0x03ff03ff03ff03ffULL, 0x03ff03ff03ff03ffULL};
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_1024) = { 0x0400040004000400ULL, 0x0400040004000400ULL,
+ 0x0400040004000400ULL, 0x0400040004000400ULL};
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_2048) = { 0x0800080008000800ULL, 0x0800080008000800ULL,
+ 0x0800080008000800ULL, 0x0800080008000800ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_4095) = { 0x0fff0fff0fff0fffULL, 0x0fff0fff0fff0fffULL,
+ 0x0fff0fff0fff0fffULL, 0x0fff0fff0fff0fffULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_4096) = { 0x1000100010001000ULL, 0x1000100010001000ULL,
+ 0x1000100010001000ULL, 0x1000100010001000ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_8192) = { 0x2000200020002000ULL, 0x2000200020002000ULL,
+ 0x2000200020002000ULL, 0x2000200020002000ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_m1) = { 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL,
+ 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL };
+
+DECLARE_ALIGNED(32, const ymm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL,
+ 0x0000000000000000ULL, 0x0000000000000000ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL,
+ 0x0101010101010101ULL, 0x0101010101010101ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pb_2) = { 0x0202020202020202ULL, 0x0202020202020202ULL,
+ 0x0202020202020202ULL, 0x0202020202020202ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL,
+ 0x0303030303030303ULL, 0x0303030303030303ULL };
+DECLARE_ALIGNED(32, const xmm_reg, ff_pb_15) = { 0x0F0F0F0F0F0F0F0FULL, 0x0F0F0F0F0F0F0F0FULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL,
+ 0x8080808080808080ULL, 0x8080808080808080ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL,
+ 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL };
+DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
+
+DECLARE_ALIGNED(16, const xmm_reg, ff_ps_neg) = { 0x8000000080000000ULL, 0x8000000080000000ULL };
+
+DECLARE_ALIGNED(32, const ymm_reg, ff_pd_1) = { 0x0000000100000001ULL, 0x0000000100000001ULL,
+ 0x0000000100000001ULL, 0x0000000100000001ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pd_16) = { 0x0000001000000010ULL, 0x0000001000000010ULL,
+ 0x0000001000000010ULL, 0x0000001000000010ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pd_32) = { 0x0000002000000020ULL, 0x0000002000000020ULL,
+ 0x0000002000000020ULL, 0x0000002000000020ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pd_8192) = { 0x0000200000002000ULL, 0x0000200000002000ULL,
+ 0x0000200000002000ULL, 0x0000200000002000ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pd_65535)= { 0x0000ffff0000ffffULL, 0x0000ffff0000ffffULL,
+ 0x0000ffff0000ffffULL, 0x0000ffff0000ffffULL };
diff --git a/media/ffvpx/libavcodec/x86/constants.h b/media/ffvpx/libavcodec/x86/constants.h
new file mode 100644
index 0000000000..85da38b7b9
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/constants.h
@@ -0,0 +1,72 @@
+/*
+ * MMX/SSE constants used across x86 dsp optimizations.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_CONSTANTS_H
+#define AVCODEC_X86_CONSTANTS_H
+
+#include <stdint.h>
+
+#include "libavutil/x86/asm.h"
+
+extern const ymm_reg ff_pw_1;
+extern const ymm_reg ff_pw_2;
+extern const xmm_reg ff_pw_3;
+extern const ymm_reg ff_pw_4;
+extern const xmm_reg ff_pw_5;
+extern const xmm_reg ff_pw_8;
+extern const xmm_reg ff_pw_9;
+extern const uint64_t ff_pw_15;
+extern const xmm_reg ff_pw_16;
+extern const xmm_reg ff_pw_18;
+extern const xmm_reg ff_pw_20;
+extern const xmm_reg ff_pw_32;
+extern const uint64_t ff_pw_42;
+extern const uint64_t ff_pw_53;
+extern const xmm_reg ff_pw_64;
+extern const uint64_t ff_pw_96;
+extern const uint64_t ff_pw_128;
+extern const ymm_reg ff_pw_255;
+extern const ymm_reg ff_pw_256;
+extern const ymm_reg ff_pw_512;
+extern const ymm_reg ff_pw_1023;
+extern const ymm_reg ff_pw_1024;
+extern const ymm_reg ff_pw_2048;
+extern const ymm_reg ff_pw_4095;
+extern const ymm_reg ff_pw_4096;
+extern const ymm_reg ff_pw_8192;
+extern const ymm_reg ff_pw_m1;
+
+extern const ymm_reg ff_pb_0;
+extern const ymm_reg ff_pb_1;
+extern const ymm_reg ff_pb_2;
+extern const ymm_reg ff_pb_3;
+extern const ymm_reg ff_pb_80;
+extern const ymm_reg ff_pb_FE;
+extern const uint64_t ff_pb_FC;
+
+extern const xmm_reg ff_ps_neg;
+
+extern const ymm_reg ff_pd_1;
+extern const ymm_reg ff_pd_16;
+extern const ymm_reg ff_pd_32;
+extern const ymm_reg ff_pd_8192;
+extern const ymm_reg ff_pd_65535;
+
+#endif /* AVCODEC_X86_CONSTANTS_H */
diff --git a/media/ffvpx/libavcodec/x86/dct32.asm b/media/ffvpx/libavcodec/x86/dct32.asm
new file mode 100644
index 0000000000..37fba51543
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/dct32.asm
@@ -0,0 +1,481 @@
+;******************************************************************************
+;* 32 point SSE-optimized DCT transform
+;* Copyright (c) 2010 Vitor Sessak
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA 32
+
+ps_p1p1m1m1: dd 0, 0, 0x80000000, 0x80000000, 0, 0, 0x80000000, 0x80000000
+
+ps_cos_vec: dd 0.500603, 0.505471, 0.515447, 0.531043
+ dd 0.553104, 0.582935, 0.622504, 0.674808
+ dd -10.190008, -3.407609, -2.057781, -1.484165
+ dd -1.169440, -0.972568, -0.839350, -0.744536
+ dd 0.502419, 0.522499, 0.566944, 0.646822
+ dd 0.788155, 1.060678, 1.722447, 5.101149
+ dd 0.509796, 0.601345, 0.899976, 2.562916
+ dd 0.509796, 0.601345, 0.899976, 2.562916
+ dd 1.000000, 1.000000, 1.306563, 0.541196
+ dd 1.000000, 1.000000, 1.306563, 0.541196
+ dd 1.000000, 0.707107, 1.000000, -0.707107
+ dd 1.000000, 0.707107, 1.000000, -0.707107
+ dd 0.707107, 0.707107, 0.707107, 0.707107
+
+%macro BUTTERFLY 4
+ subps %4, %1, %2
+ addps %2, %2, %1
+ mulps %1, %4, %3
+%endmacro
+
+%macro BUTTERFLY0 5
+%if cpuflag(sse2) && notcpuflag(avx)
+ pshufd %4, %1, %5
+ xorps %1, %2
+ addps %1, %4
+ mulps %1, %3
+%else
+ shufps %4, %1, %1, %5
+ xorps %1, %1, %2
+ addps %4, %4, %1
+ mulps %1, %4, %3
+%endif
+%endmacro
+
+%macro BUTTERFLY2 4
+ BUTTERFLY0 %1, %2, %3, %4, 0x1b
+%endmacro
+
+%macro BUTTERFLY3 4
+ BUTTERFLY0 %1, %2, %3, %4, 0xb1
+%endmacro
+
+%macro BUTTERFLY3V 5
+ movaps m%5, m%1
+ addps m%1, m%2
+ subps m%5, m%2
+ SWAP %2, %5
+ mulps m%2, [ps_cos_vec+192]
+ movaps m%5, m%3
+ addps m%3, m%4
+ subps m%4, m%5
+ mulps m%4, [ps_cos_vec+192]
+%endmacro
+
+%macro PASS6_AND_PERMUTE 0
+ mov tmpd, [outq+4]
+ movss m7, [outq+72]
+ addss m7, [outq+76]
+ movss m3, [outq+56]
+ addss m3, [outq+60]
+ addss m4, m3
+ movss m2, [outq+52]
+ addss m2, m3
+ movss m3, [outq+104]
+ addss m3, [outq+108]
+ addss m1, m3
+ addss m5, m4
+ movss [outq+ 16], m1
+ movss m1, [outq+100]
+ addss m1, m3
+ movss m3, [outq+40]
+ movss [outq+ 48], m1
+ addss m3, [outq+44]
+ movss m1, [outq+100]
+ addss m4, m3
+ addss m3, m2
+ addss m1, [outq+108]
+ movss [outq+ 40], m3
+ addss m2, [outq+36]
+ movss m3, [outq+8]
+ movss [outq+ 56], m2
+ addss m3, [outq+12]
+ movss [outq+ 32], m3
+ movss m3, [outq+80]
+ movss [outq+ 8], m5
+ movss [outq+ 80], m1
+ movss m2, [outq+52]
+ movss m5, [outq+120]
+ addss m5, [outq+124]
+ movss m1, [outq+64]
+ addss m2, [outq+60]
+ addss m0, m5
+ addss m5, [outq+116]
+ mov [outq+64], tmpd
+ addss m6, m0
+ addss m1, m6
+ mov tmpd, [outq+12]
+ mov [outq+ 96], tmpd
+ movss [outq+ 4], m1
+ movss m1, [outq+24]
+ movss [outq+ 24], m4
+ movss m4, [outq+88]
+ addss m4, [outq+92]
+ addss m3, m4
+ addss m4, [outq+84]
+ mov tmpd, [outq+108]
+ addss m1, [outq+28]
+ addss m0, m1
+ addss m1, m5
+ addss m6, m3
+ addss m3, m0
+ addss m0, m7
+ addss m5, [outq+20]
+ addss m7, m1
+ movss [outq+ 12], m6
+ mov [outq+112], tmpd
+ movss m6, [outq+28]
+ movss [outq+ 28], m0
+ movss m0, [outq+36]
+ movss [outq+ 36], m7
+ addss m1, m4
+ movss m7, [outq+116]
+ addss m0, m2
+ addss m7, [outq+124]
+ movss [outq+ 72], m0
+ movss m0, [outq+44]
+ addss m2, m0
+ movss [outq+ 44], m1
+ movss [outq+ 88], m2
+ addss m0, [outq+60]
+ mov tmpd, [outq+60]
+ mov [outq+120], tmpd
+ movss [outq+104], m0
+ addss m4, m5
+ addss m5, [outq+68]
+ movss [outq+52], m4
+ movss [outq+60], m5
+ movss m4, [outq+68]
+ movss m5, [outq+20]
+ movss [outq+ 20], m3
+ addss m5, m7
+ addss m7, m6
+ addss m4, m5
+ movss m2, [outq+84]
+ addss m2, [outq+92]
+ addss m5, m2
+ movss [outq+ 68], m4
+ addss m2, m7
+ movss m4, [outq+76]
+ movss [outq+ 84], m2
+ movss [outq+ 76], m5
+ addss m7, m4
+ addss m6, [outq+124]
+ addss m4, m6
+ addss m6, [outq+92]
+ movss [outq+100], m4
+ movss [outq+108], m6
+ movss m6, [outq+92]
+ movss [outq+92], m7
+ addss m6, [outq+124]
+ movss [outq+116], m6
+%endmacro
+
+INIT_YMM avx
+SECTION .text
+%if HAVE_AVX_EXTERNAL
+; void ff_dct32_float_avx(FFTSample *out, const FFTSample *in)
+cglobal dct32_float, 2,3,8, out, in, tmp
+ ; pass 1
+ vmovaps m4, [inq+0]
+ vinsertf128 m5, m5, [inq+96], 1
+ vinsertf128 m5, m5, [inq+112], 0
+ vshufps m5, m5, m5, 0x1b
+ BUTTERFLY m4, m5, [ps_cos_vec], m6
+
+ vmovaps m2, [inq+64]
+ vinsertf128 m6, m6, [inq+32], 1
+ vinsertf128 m6, m6, [inq+48], 0
+ vshufps m6, m6, m6, 0x1b
+ BUTTERFLY m2, m6, [ps_cos_vec+32], m0
+
+ ; pass 2
+
+ BUTTERFLY m5, m6, [ps_cos_vec+64], m0
+ BUTTERFLY m4, m2, [ps_cos_vec+64], m7
+
+
+ ; pass 3
+ vperm2f128 m3, m6, m4, 0x31
+ vperm2f128 m1, m6, m4, 0x20
+ vshufps m3, m3, m3, 0x1b
+
+ BUTTERFLY m1, m3, [ps_cos_vec+96], m6
+
+
+ vperm2f128 m4, m5, m2, 0x20
+ vperm2f128 m5, m5, m2, 0x31
+ vshufps m5, m5, m5, 0x1b
+
+ BUTTERFLY m4, m5, [ps_cos_vec+96], m6
+
+ ; pass 4
+ vmovaps m6, [ps_p1p1m1m1+0]
+ vmovaps m2, [ps_cos_vec+128]
+
+ BUTTERFLY2 m5, m6, m2, m7
+ BUTTERFLY2 m4, m6, m2, m7
+ BUTTERFLY2 m1, m6, m2, m7
+ BUTTERFLY2 m3, m6, m2, m7
+
+
+ ; pass 5
+ vshufps m6, m6, m6, 0xcc
+ vmovaps m2, [ps_cos_vec+160]
+
+ BUTTERFLY3 m5, m6, m2, m7
+ BUTTERFLY3 m4, m6, m2, m7
+ BUTTERFLY3 m1, m6, m2, m7
+ BUTTERFLY3 m3, m6, m2, m7
+
+ vperm2f128 m6, m3, m3, 0x31
+ vmovaps [outq], m3
+
+ vextractf128 [outq+64], m5, 1
+ vextractf128 [outq+32], m5, 0
+
+ vextractf128 [outq+80], m4, 1
+ vextractf128 [outq+48], m4, 0
+
+ vperm2f128 m0, m1, m1, 0x31
+ vmovaps [outq+96], m1
+
+ vzeroupper
+
+ ; pass 6, no SIMD...
+INIT_XMM
+ PASS6_AND_PERMUTE
+ RET
+%endif
+
+%if ARCH_X86_64
+%define SPILL SWAP
+%define UNSPILL SWAP
+
+%macro PASS5 0
+ nop ; FIXME code alignment
+ SWAP 5, 8
+ SWAP 4, 12
+ SWAP 6, 14
+ SWAP 7, 13
+ SWAP 0, 15
+ PERMUTE 9,10, 10,12, 11,14, 12,9, 13,11, 14,13
+ TRANSPOSE4x4PS 8, 9, 10, 11, 0
+ BUTTERFLY3V 8, 9, 10, 11, 0
+ addps m10, m11
+ TRANSPOSE4x4PS 12, 13, 14, 15, 0
+ BUTTERFLY3V 12, 13, 14, 15, 0
+ addps m14, m15
+ addps m12, m14
+ addps m14, m13
+ addps m13, m15
+%endmacro
+
+%macro PASS6 0
+ SWAP 9, 12
+ SWAP 11, 14
+ movss [outq+0x00], m8
+ pshuflw m0, m8, 0xe
+ movss [outq+0x10], m9
+ pshuflw m1, m9, 0xe
+ movss [outq+0x20], m10
+ pshuflw m2, m10, 0xe
+ movss [outq+0x30], m11
+ pshuflw m3, m11, 0xe
+ movss [outq+0x40], m12
+ pshuflw m4, m12, 0xe
+ movss [outq+0x50], m13
+ pshuflw m5, m13, 0xe
+ movss [outq+0x60], m14
+ pshuflw m6, m14, 0xe
+ movaps [outq+0x70], m15
+ pshuflw m7, m15, 0xe
+ addss m0, m1
+ addss m1, m2
+ movss [outq+0x08], m0
+ addss m2, m3
+ movss [outq+0x18], m1
+ addss m3, m4
+ movss [outq+0x28], m2
+ addss m4, m5
+ movss [outq+0x38], m3
+ addss m5, m6
+ movss [outq+0x48], m4
+ addss m6, m7
+ movss [outq+0x58], m5
+ movss [outq+0x68], m6
+ movss [outq+0x78], m7
+
+ PERMUTE 1,8, 3,9, 5,10, 7,11, 9,12, 11,13, 13,14, 8,1, 10,3, 12,5, 14,7
+ movhlps m0, m1
+ pshufd m1, m1, 3
+ SWAP 0, 2, 4, 6, 8, 10, 12, 14
+ SWAP 1, 3, 5, 7, 9, 11, 13, 15
+%rep 7
+ movhlps m0, m1
+ pshufd m1, m1, 3
+ addss m15, m1
+ SWAP 0, 2, 4, 6, 8, 10, 12, 14
+ SWAP 1, 3, 5, 7, 9, 11, 13, 15
+%endrep
+%assign i 4
+%rep 15
+ addss m0, m1
+ movss [outq+i], m0
+ SWAP 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+ %assign i i+8
+%endrep
+%endmacro
+
+%else ; ARCH_X86_32
+%macro SPILL 2 ; xmm#, mempos
+ movaps [outq+(%2-8)*16], m%1
+%endmacro
+%macro UNSPILL 2
+ movaps m%1, [outq+(%2-8)*16]
+%endmacro
+
+%define PASS6 PASS6_AND_PERMUTE
+%macro PASS5 0
+ movaps m2, [ps_cos_vec+160]
+ shufps m3, m3, 0xcc
+
+ BUTTERFLY3 m5, m3, m2, m1
+ SPILL 5, 8
+
+ UNSPILL 1, 9
+ BUTTERFLY3 m1, m3, m2, m5
+ SPILL 1, 14
+
+ BUTTERFLY3 m4, m3, m2, m5
+ SPILL 4, 12
+
+ BUTTERFLY3 m7, m3, m2, m5
+ SPILL 7, 13
+
+ UNSPILL 5, 10
+ BUTTERFLY3 m5, m3, m2, m7
+ SPILL 5, 10
+
+ UNSPILL 4, 11
+ BUTTERFLY3 m4, m3, m2, m7
+ SPILL 4, 11
+
+ BUTTERFLY3 m6, m3, m2, m7
+ SPILL 6, 9
+
+ BUTTERFLY3 m0, m3, m2, m7
+ SPILL 0, 15
+%endmacro
+%endif
+
+
+; void ff_dct32_float(FFTSample *out, const FFTSample *in)
+%macro DCT32_FUNC 0
+cglobal dct32_float, 2, 3, 16, out, in, tmp
+ ; pass 1
+
+ movaps m0, [inq+0]
+ LOAD_INV m1, [inq+112]
+ BUTTERFLY m0, m1, [ps_cos_vec], m3
+
+ movaps m7, [inq+64]
+ LOAD_INV m4, [inq+48]
+ BUTTERFLY m7, m4, [ps_cos_vec+32], m3
+
+ ; pass 2
+ movaps m2, [ps_cos_vec+64]
+ BUTTERFLY m1, m4, m2, m3
+ SPILL 1, 11
+ SPILL 4, 8
+
+ ; pass 1
+ movaps m1, [inq+16]
+ LOAD_INV m6, [inq+96]
+ BUTTERFLY m1, m6, [ps_cos_vec+16], m3
+
+ movaps m4, [inq+80]
+ LOAD_INV m5, [inq+32]
+ BUTTERFLY m4, m5, [ps_cos_vec+48], m3
+
+ ; pass 2
+ BUTTERFLY m0, m7, m2, m3
+
+ movaps m2, [ps_cos_vec+80]
+ BUTTERFLY m6, m5, m2, m3
+
+ BUTTERFLY m1, m4, m2, m3
+
+ ; pass 3
+ movaps m2, [ps_cos_vec+96]
+ shufps m1, m1, 0x1b
+ BUTTERFLY m0, m1, m2, m3
+ SPILL 0, 15
+ SPILL 1, 14
+
+ UNSPILL 0, 8
+ shufps m5, m5, 0x1b
+ BUTTERFLY m0, m5, m2, m3
+
+ UNSPILL 1, 11
+ shufps m6, m6, 0x1b
+ BUTTERFLY m1, m6, m2, m3
+ SPILL 1, 11
+
+ shufps m4, m4, 0x1b
+ BUTTERFLY m7, m4, m2, m3
+
+ ; pass 4
+ movaps m3, [ps_p1p1m1m1+0]
+ movaps m2, [ps_cos_vec+128]
+
+ BUTTERFLY2 m5, m3, m2, m1
+
+ BUTTERFLY2 m0, m3, m2, m1
+ SPILL 0, 9
+
+ BUTTERFLY2 m6, m3, m2, m1
+ SPILL 6, 10
+
+ UNSPILL 0, 11
+ BUTTERFLY2 m0, m3, m2, m1
+ SPILL 0, 11
+
+ BUTTERFLY2 m4, m3, m2, m1
+
+ BUTTERFLY2 m7, m3, m2, m1
+
+ UNSPILL 6, 14
+ BUTTERFLY2 m6, m3, m2, m1
+
+ UNSPILL 0, 15
+ BUTTERFLY2 m0, m3, m2, m1
+
+ PASS5
+ PASS6
+ RET
+%endmacro
+
+%macro LOAD_INV 2
+ pshufd %1, %2, 0x1b
+%endmacro
+
+INIT_XMM sse2
+DCT32_FUNC
diff --git a/media/ffvpx/libavcodec/x86/dct_init.c b/media/ffvpx/libavcodec/x86/dct_init.c
new file mode 100644
index 0000000000..d0e4b34dd3
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/dct_init.c
@@ -0,0 +1,36 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/dct.h"
+
+void ff_dct32_float_sse2(FFTSample *out, const FFTSample *in);
+void ff_dct32_float_avx(FFTSample *out, const FFTSample *in);
+
+av_cold void ff_dct_init_x86(DCTContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags))
+ s->dct32 = ff_dct32_float_sse2;
+ if (EXTERNAL_AVX_FAST(cpu_flags))
+ s->dct32 = ff_dct32_float_avx;
+}
diff --git a/media/ffvpx/libavcodec/x86/fdct.c b/media/ffvpx/libavcodec/x86/fdct.c
new file mode 100644
index 0000000000..f4677ff4be
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/fdct.c
@@ -0,0 +1,378 @@
+/*
+ * SIMD-optimized forward DCT
+ * The gcc porting is Copyright (c) 2001 Fabrice Bellard.
+ * cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * SSE2 optimization is Copyright (c) 2004 Denes Balatoni.
+ *
+ * from fdctam32.c - AP922 MMX(3D-Now) forward-DCT
+ *
+ * Intel Application Note AP-922 - fast, precise implementation of DCT
+ * http://developer.intel.com/vtune/cbts/appnotes.htm
+ *
+ * Also of inspiration:
+ * a page about fdct at http://www.geocities.com/ssavekar/dct.htm
+ * Skal's fdct at http://skal.planet-d.net/coding/dct.html
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/macros.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/x86/asm.h"
+#include "fdct.h"
+
+#if HAVE_SSE2_INLINE
+
+//////////////////////////////////////////////////////////////////////
+//
+// constants for the forward DCT
+// -----------------------------
+//
+// Be sure to check that your compiler is aligning all constants to QWORD
+// (8-byte) memory boundaries! Otherwise the unaligned memory access will
+// severely stall MMX execution.
+//
+//////////////////////////////////////////////////////////////////////
+
+#define BITS_FRW_ACC 3 //; 2 or 3 for accuracy
+#define SHIFT_FRW_COL BITS_FRW_ACC
+#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17 - 3)
+#define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1))
+//#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1))
+
+#define X8(x) x,x,x,x,x,x,x,x
+
+//concatenated table, for forward DCT transformation
+DECLARE_ALIGNED(16, static const int16_t, fdct_tg_all_16)[24] = {
+ X8(13036), // tg * (2<<16) + 0.5
+ X8(27146), // tg * (2<<16) + 0.5
+ X8(-21746) // tg * (2<<16) + 0.5
+};
+
+DECLARE_ALIGNED(16, static const int16_t, ocos_4_16)[8] = {
+ X8(23170) //cos * (2<<15) + 0.5
+};
+
+DECLARE_ALIGNED(16, static const int16_t, fdct_one_corr)[8] = { X8(1) };
+
+static const struct
+{
+ DECLARE_ALIGNED(16, const int32_t, fdct_r_row_sse2)[4];
+} fdct_r_row_sse2 =
+{{
+ RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW
+}};
+//DECLARE_ALIGNED(16, static const long, fdct_r_row_sse2)[4] = {RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW};
+
+static const struct
+{
+ DECLARE_ALIGNED(16, const int16_t, tab_frw_01234567_sse2)[256];
+} tab_frw_01234567_sse2 =
+{{
+//DECLARE_ALIGNED(16, static const int16_t, tab_frw_01234567_sse2)[] = { // forward_dct coeff table
+#define TABLE_SSE2 C4, C4, C1, C3, -C6, -C2, -C1, -C5, \
+ C4, C4, C5, C7, C2, C6, C3, -C7, \
+ -C4, C4, C7, C3, C6, -C2, C7, -C5, \
+ C4, -C4, C5, -C1, C2, -C6, C3, -C1,
+// c1..c7 * cos(pi/4) * 2^15
+#define C1 22725
+#define C2 21407
+#define C3 19266
+#define C4 16384
+#define C5 12873
+#define C6 8867
+#define C7 4520
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 31521
+#define C2 29692
+#define C3 26722
+#define C4 22725
+#define C5 17855
+#define C6 12299
+#define C7 6270
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 29692
+#define C2 27969
+#define C3 25172
+#define C4 21407
+#define C5 16819
+#define C6 11585
+#define C7 5906
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 26722
+#define C2 25172
+#define C3 22654
+#define C4 19266
+#define C5 15137
+#define C6 10426
+#define C7 5315
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 22725
+#define C2 21407
+#define C3 19266
+#define C4 16384
+#define C5 12873
+#define C6 8867
+#define C7 4520
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 26722
+#define C2 25172
+#define C3 22654
+#define C4 19266
+#define C5 15137
+#define C6 10426
+#define C7 5315
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 29692
+#define C2 27969
+#define C3 25172
+#define C4 21407
+#define C5 16819
+#define C6 11585
+#define C7 5906
+TABLE_SSE2
+
+#undef C1
+#undef C2
+#undef C3
+#undef C4
+#undef C5
+#undef C6
+#undef C7
+#define C1 31521
+#define C2 29692
+#define C3 26722
+#define C4 22725
+#define C5 17855
+#define C6 12299
+#define C7 6270
+TABLE_SSE2
+}};
+
+#define S(s) AV_TOSTRING(s) //AV_STRINGIFY is too long
+
+#define FDCT_COL(cpu, mm, mov)\
+static av_always_inline void fdct_col_##cpu(const int16_t *in, int16_t *out, int offset)\
+{\
+ __asm__ volatile (\
+ #mov" 16(%0), %%"#mm"0 \n\t" \
+ #mov" 96(%0), %%"#mm"1 \n\t" \
+ #mov" %%"#mm"0, %%"#mm"2 \n\t" \
+ #mov" 32(%0), %%"#mm"3 \n\t" \
+ "paddsw %%"#mm"1, %%"#mm"0 \n\t" \
+ #mov" 80(%0), %%"#mm"4 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"0 \n\t" \
+ #mov" (%0), %%"#mm"5 \n\t" \
+ "paddsw %%"#mm"3, %%"#mm"4 \n\t" \
+ "paddsw 112(%0), %%"#mm"5 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"4 \n\t" \
+ #mov" %%"#mm"0, %%"#mm"6 \n\t" \
+ "psubsw %%"#mm"1, %%"#mm"2 \n\t" \
+ #mov" 16(%1), %%"#mm"1 \n\t" \
+ "psubsw %%"#mm"4, %%"#mm"0 \n\t" \
+ #mov" 48(%0), %%"#mm"7 \n\t" \
+ "pmulhw %%"#mm"0, %%"#mm"1 \n\t" \
+ "paddsw 64(%0), %%"#mm"7 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"5 \n\t" \
+ "paddsw %%"#mm"4, %%"#mm"6 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"7 \n\t" \
+ #mov" %%"#mm"5, %%"#mm"4 \n\t" \
+ "psubsw %%"#mm"7, %%"#mm"5 \n\t" \
+ "paddsw %%"#mm"5, %%"#mm"1 \n\t" \
+ "paddsw %%"#mm"7, %%"#mm"4 \n\t" \
+ "por (%2), %%"#mm"1 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"2 \n\t" \
+ "pmulhw 16(%1), %%"#mm"5 \n\t" \
+ #mov" %%"#mm"4, %%"#mm"7 \n\t" \
+ "psubsw 80(%0), %%"#mm"3 \n\t" \
+ "psubsw %%"#mm"6, %%"#mm"4 \n\t" \
+ #mov" %%"#mm"1, 32(%3) \n\t" \
+ "paddsw %%"#mm"6, %%"#mm"7 \n\t" \
+ #mov" 48(%0), %%"#mm"1 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"3 \n\t" \
+ "psubsw 64(%0), %%"#mm"1 \n\t" \
+ #mov" %%"#mm"2, %%"#mm"6 \n\t" \
+ #mov" %%"#mm"4, 64(%3) \n\t" \
+ "paddsw %%"#mm"3, %%"#mm"2 \n\t" \
+ "pmulhw (%4), %%"#mm"2 \n\t" \
+ "psubsw %%"#mm"3, %%"#mm"6 \n\t" \
+ "pmulhw (%4), %%"#mm"6 \n\t" \
+ "psubsw %%"#mm"0, %%"#mm"5 \n\t" \
+ "por (%2), %%"#mm"5 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"1 \n\t" \
+ "por (%2), %%"#mm"2 \n\t" \
+ #mov" %%"#mm"1, %%"#mm"4 \n\t" \
+ #mov" (%0), %%"#mm"3 \n\t" \
+ "paddsw %%"#mm"6, %%"#mm"1 \n\t" \
+ "psubsw 112(%0), %%"#mm"3 \n\t" \
+ "psubsw %%"#mm"6, %%"#mm"4 \n\t" \
+ #mov" (%1), %%"#mm"0 \n\t" \
+ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"3 \n\t" \
+ #mov" 32(%1), %%"#mm"6 \n\t" \
+ "pmulhw %%"#mm"1, %%"#mm"0 \n\t" \
+ #mov" %%"#mm"7, (%3) \n\t" \
+ "pmulhw %%"#mm"4, %%"#mm"6 \n\t" \
+ #mov" %%"#mm"5, 96(%3) \n\t" \
+ #mov" %%"#mm"3, %%"#mm"7 \n\t" \
+ #mov" 32(%1), %%"#mm"5 \n\t" \
+ "psubsw %%"#mm"2, %%"#mm"7 \n\t" \
+ "paddsw %%"#mm"2, %%"#mm"3 \n\t" \
+ "pmulhw %%"#mm"7, %%"#mm"5 \n\t" \
+ "paddsw %%"#mm"3, %%"#mm"0 \n\t" \
+ "paddsw %%"#mm"4, %%"#mm"6 \n\t" \
+ "pmulhw (%1), %%"#mm"3 \n\t" \
+ "por (%2), %%"#mm"0 \n\t" \
+ "paddsw %%"#mm"7, %%"#mm"5 \n\t" \
+ "psubsw %%"#mm"6, %%"#mm"7 \n\t" \
+ #mov" %%"#mm"0, 16(%3) \n\t" \
+ "paddsw %%"#mm"4, %%"#mm"5 \n\t" \
+ #mov" %%"#mm"7, 48(%3) \n\t" \
+ "psubsw %%"#mm"1, %%"#mm"3 \n\t" \
+ #mov" %%"#mm"5, 80(%3) \n\t" \
+ #mov" %%"#mm"3, 112(%3) \n\t" \
+ : \
+ : "r" (in + offset), "r" (fdct_tg_all_16), "r" (fdct_one_corr), \
+ "r" (out + offset), "r" (ocos_4_16)); \
+}
+
+FDCT_COL(sse2, xmm, movdqa)
+
+static av_always_inline void fdct_row_sse2(const int16_t *in, int16_t *out)
+{
+ __asm__ volatile(
+#define FDCT_ROW_SSE2_H1(i,t) \
+ "movq " #i "(%0), %%xmm2 \n\t" \
+ "movq " #i "+8(%0), %%xmm0 \n\t" \
+ "movdqa " #t "+32(%1), %%xmm3 \n\t" \
+ "movdqa " #t "+48(%1), %%xmm7 \n\t" \
+ "movdqa " #t "(%1), %%xmm4 \n\t" \
+ "movdqa " #t "+16(%1), %%xmm5 \n\t"
+
+#define FDCT_ROW_SSE2_H2(i,t) \
+ "movq " #i "(%0), %%xmm2 \n\t" \
+ "movq " #i "+8(%0), %%xmm0 \n\t" \
+ "movdqa " #t "+32(%1), %%xmm3 \n\t" \
+ "movdqa " #t "+48(%1), %%xmm7 \n\t"
+
+#define FDCT_ROW_SSE2(i) \
+ "movq %%xmm2, %%xmm1 \n\t" \
+ "pshuflw $27, %%xmm0, %%xmm0 \n\t" \
+ "paddsw %%xmm0, %%xmm1 \n\t" \
+ "psubsw %%xmm0, %%xmm2 \n\t" \
+ "punpckldq %%xmm2, %%xmm1 \n\t" \
+ "pshufd $78, %%xmm1, %%xmm2 \n\t" \
+ "pmaddwd %%xmm2, %%xmm3 \n\t" \
+ "pmaddwd %%xmm1, %%xmm7 \n\t" \
+ "pmaddwd %%xmm5, %%xmm2 \n\t" \
+ "pmaddwd %%xmm4, %%xmm1 \n\t" \
+ "paddd %%xmm7, %%xmm3 \n\t" \
+ "paddd %%xmm2, %%xmm1 \n\t" \
+ "paddd %%xmm6, %%xmm3 \n\t" \
+ "paddd %%xmm6, %%xmm1 \n\t" \
+ "psrad %3, %%xmm3 \n\t" \
+ "psrad %3, %%xmm1 \n\t" \
+ "packssdw %%xmm3, %%xmm1 \n\t" \
+ "movdqa %%xmm1, " #i "(%4) \n\t"
+
+ "movdqa (%2), %%xmm6 \n\t"
+ FDCT_ROW_SSE2_H1(0,0)
+ FDCT_ROW_SSE2(0)
+ FDCT_ROW_SSE2_H2(64,0)
+ FDCT_ROW_SSE2(64)
+
+ FDCT_ROW_SSE2_H1(16,64)
+ FDCT_ROW_SSE2(16)
+ FDCT_ROW_SSE2_H2(112,64)
+ FDCT_ROW_SSE2(112)
+
+ FDCT_ROW_SSE2_H1(32,128)
+ FDCT_ROW_SSE2(32)
+ FDCT_ROW_SSE2_H2(96,128)
+ FDCT_ROW_SSE2(96)
+
+ FDCT_ROW_SSE2_H1(48,192)
+ FDCT_ROW_SSE2(48)
+ FDCT_ROW_SSE2_H2(80,192)
+ FDCT_ROW_SSE2(80)
+ :
+ : "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2),
+ "r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out)
+ XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
+ "%xmm4", "%xmm5", "%xmm6", "%xmm7")
+ );
+}
+
+void ff_fdct_sse2(int16_t *block)
+{
+ DECLARE_ALIGNED(16, int64_t, align_tmp)[16];
+ int16_t * const block1= (int16_t*)align_tmp;
+
+ fdct_col_sse2(block, block1, 0);
+ fdct_row_sse2(block1, block);
+}
+
+#endif /* HAVE_SSE2_INLINE */
diff --git a/media/ffvpx/libavcodec/x86/fdct.h b/media/ffvpx/libavcodec/x86/fdct.h
new file mode 100644
index 0000000000..164d4fb30e
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/fdct.h
@@ -0,0 +1,26 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_FDCT_H
+#define AVCODEC_X86_FDCT_H
+
+#include <stdint.h>
+
+void ff_fdct_sse2(int16_t *block);
+
+#endif /* AVCODEC_X86_FDCT_H */
diff --git a/media/ffvpx/libavcodec/x86/fdctdsp_init.c b/media/ffvpx/libavcodec/x86/fdctdsp_init.c
new file mode 100644
index 0000000000..92a842433d
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/fdctdsp_init.c
@@ -0,0 +1,38 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/fdctdsp.h"
+#include "fdct.h"
+
+av_cold void ff_fdctdsp_init_x86(FDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth)
+{
+ int cpu_flags = av_get_cpu_flags();
+ const int dct_algo = avctx->dct_algo;
+
+ if (!high_bit_depth) {
+ if ((dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)) {
+ if (INLINE_SSE2(cpu_flags))
+ c->fdct = ff_fdct_sse2;
+ }
+ }
+}
diff --git a/media/ffvpx/libavcodec/x86/fft.asm b/media/ffvpx/libavcodec/x86/fft.asm
new file mode 100644
index 0000000000..34c3fc9a0f
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/fft.asm
@@ -0,0 +1,838 @@
+;******************************************************************************
+;* FFT transform with SSE/AVX optimizations
+;* Copyright (c) 2008 Loren Merritt
+;* Copyright (c) 2011 Vitor Sessak
+;*
+;* This algorithm (though not any of the implementation details) is
+;* based on libdjbfft by D. J. Bernstein.
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+; These functions are not individually interchangeable with the C versions.
+; While C takes arrays of FFTComplex, SSE/3DNow leave intermediate results
+; in blocks as conventient to the vector size.
+; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively)
+
+%include "libavutil/x86/x86util.asm"
+
+%if ARCH_X86_64
+%define pointer resq
+%else
+%define pointer resd
+%endif
+
+struc FFTContext
+ .nbits: resd 1
+ .reverse: resd 1
+ .revtab: pointer 1
+ .tmpbuf: pointer 1
+ .mdctsize: resd 1
+ .mdctbits: resd 1
+ .tcos: pointer 1
+ .tsin: pointer 1
+ .fftperm: pointer 1
+ .fftcalc: pointer 1
+ .imdctcalc:pointer 1
+ .imdcthalf:pointer 1
+endstruc
+
+SECTION_RODATA 32
+
+%define M_SQRT1_2 0.70710678118654752440
+%define M_COS_PI_1_8 0.923879532511287
+%define M_COS_PI_3_8 0.38268343236509
+
+ps_cos16_1: dd 1.0, M_COS_PI_1_8, M_SQRT1_2, M_COS_PI_3_8, 1.0, M_COS_PI_1_8, M_SQRT1_2, M_COS_PI_3_8
+ps_cos16_2: dd 0, M_COS_PI_3_8, M_SQRT1_2, M_COS_PI_1_8, 0, -M_COS_PI_3_8, -M_SQRT1_2, -M_COS_PI_1_8
+
+ps_root2: times 8 dd M_SQRT1_2
+ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2, -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
+ps_p1p1m1p1: dd 0, 0, 1<<31, 0, 0, 0, 1<<31, 0
+
+perm1: dd 0x00, 0x02, 0x03, 0x01, 0x03, 0x00, 0x02, 0x01
+perm2: dd 0x00, 0x01, 0x02, 0x03, 0x01, 0x00, 0x02, 0x03
+ps_p1p1m1p1root2: dd 1.0, 1.0, -1.0, 1.0, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2
+ps_m1m1p1m1p1m1m1m1: dd 1<<31, 1<<31, 0, 1<<31, 0, 1<<31, 1<<31, 1<<31
+ps_m1p1: dd 1<<31, 0
+
+cextern ps_neg
+
+%assign i 16
+%rep 14
+cextern cos_ %+ i
+%assign i i<<1
+%endrep
+
+%if ARCH_X86_64
+ %define pointer dq
+%else
+ %define pointer dd
+%endif
+
+%macro IF0 1+
+%endmacro
+%macro IF1 1+
+ %1
+%endmacro
+
+SECTION .text
+
+; in: %1 = {r0,i0,r2,i2,r4,i4,r6,i6}
+; %2 = {r1,i1,r3,i3,r5,i5,r7,i7}
+; %3, %4, %5 tmp
+; out: %1 = {r0,r1,r2,r3,i0,i1,i2,i3}
+; %2 = {r4,r5,r6,r7,i4,i5,i6,i7}
+%macro T8_AVX 5
+ vsubps %5, %1, %2 ; v = %1 - %2
+ vaddps %3, %1, %2 ; w = %1 + %2
+ vmulps %2, %5, [ps_p1p1m1p1root2] ; v *= vals1
+ vpermilps %2, %2, [perm1]
+ vblendps %1, %2, %3, 0x33 ; q = {w1,w2,v4,v2,w5,w6,v7,v6}
+ vshufps %5, %3, %2, 0x4e ; r = {w3,w4,v1,v3,w7,w8,v8,v5}
+ vsubps %4, %5, %1 ; s = r - q
+ vaddps %1, %5, %1 ; u = r + q
+ vpermilps %1, %1, [perm2] ; k = {u1,u2,u3,u4,u6,u5,u7,u8}
+ vshufps %5, %4, %1, 0xbb
+ vshufps %3, %4, %1, 0xee
+ vperm2f128 %3, %3, %5, 0x13
+ vxorps %4, %4, [ps_m1m1p1m1p1m1m1m1] ; s *= {1,1,-1,-1,1,-1,-1,-1}
+ vshufps %2, %1, %4, 0xdd
+ vshufps %1, %1, %4, 0x88
+ vperm2f128 %4, %2, %1, 0x02 ; v = {k1,k3,s1,s3,k2,k4,s2,s4}
+ vperm2f128 %1, %1, %2, 0x13 ; w = {k6,k8,s6,s8,k5,k7,s5,s7}
+ vsubps %5, %1, %3
+ vblendps %1, %5, %1, 0x55 ; w -= {0,s7,0,k7,0,s8,0,k8}
+ vsubps %2, %4, %1 ; %2 = v - w
+ vaddps %1, %4, %1 ; %1 = v + w
+%endmacro
+
+; In SSE mode do one fft4 transforms
+; in: %1={r0,i0,r2,i2} %2={r1,i1,r3,i3}
+; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3}
+;
+; In AVX mode do two fft4 transforms
+; in: %1={r0,i0,r2,i2,r4,i4,r6,i6} %2={r1,i1,r3,i3,r5,i5,r7,i7}
+; out: %1={r0,r1,r2,r3,r4,r5,r6,r7} %2={i0,i1,i2,i3,i4,i5,i6,i7}
+%macro T4_SSE 3
+ subps %3, %1, %2 ; {t3,t4,-t8,t7}
+ addps %1, %1, %2 ; {t1,t2,t6,t5}
+ xorps %3, %3, [ps_p1p1m1p1]
+ shufps %2, %1, %3, 0xbe ; {t6,t5,t7,t8}
+ shufps %1, %1, %3, 0x44 ; {t1,t2,t3,t4}
+ subps %3, %1, %2 ; {r2,i2,r3,i3}
+ addps %1, %1, %2 ; {r0,i0,r1,i1}
+ shufps %2, %1, %3, 0xdd ; {i0,i1,i2,i3}
+ shufps %1, %1, %3, 0x88 ; {r0,r1,r2,r3}
+%endmacro
+
+; In SSE mode do one FFT8
+; in: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3} %3={r4,i4,r6,i6} %4={r5,i5,r7,i7}
+; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3} %1={r4,r5,r6,r7} %2={i4,i5,i6,i7}
+;
+; In AVX mode do two FFT8
+; in: %1={r0,i0,r2,i2,r8, i8, r10,i10} %2={r1,i1,r3,i3,r9, i9, r11,i11}
+; %3={r4,i4,r6,i6,r12,i12,r14,i14} %4={r5,i5,r7,i7,r13,i13,r15,i15}
+; out: %1={r0,r1,r2,r3,r8, r9, r10,r11} %2={i0,i1,i2,i3,i8, i9, i10,i11}
+; %3={r4,r5,r6,r7,r12,r13,r14,r15} %4={i4,i5,i6,i7,i12,i13,i14,i15}
+%macro T8_SSE 6
+ addps %6, %3, %4 ; {t1,t2,t3,t4}
+ subps %3, %3, %4 ; {r5,i5,r7,i7}
+ shufps %4, %3, %3, 0xb1 ; {i5,r5,i7,r7}
+ mulps %3, %3, [ps_root2mppm] ; {-r5,i5,r7,-i7}
+ mulps %4, %4, [ps_root2]
+ addps %3, %3, %4 ; {t8,t7,ta,t9}
+ shufps %4, %6, %3, 0x9c ; {t1,t4,t7,ta}
+ shufps %6, %6, %3, 0x36 ; {t3,t2,t9,t8}
+ subps %3, %6, %4 ; {t6,t5,tc,tb}
+ addps %6, %6, %4 ; {t1,t2,t9,ta}
+ shufps %5, %6, %3, 0x8d ; {t2,ta,t6,tc}
+ shufps %6, %6, %3, 0xd8 ; {t1,t9,t5,tb}
+ subps %3, %1, %6 ; {r4,r5,r6,r7}
+ addps %1, %1, %6 ; {r0,r1,r2,r3}
+ subps %4, %2, %5 ; {i4,i5,i6,i7}
+ addps %2, %2, %5 ; {i0,i1,i2,i3}
+%endmacro
+
+%macro INTERL 5
+%if cpuflag(avx)
+ vunpckhps %3, %2, %1
+ vunpcklps %2, %2, %1
+ vextractf128 %4(%5), %2, 0
+ vextractf128 %4 %+ H(%5), %3, 0
+ vextractf128 %4(%5 + 1), %2, 1
+ vextractf128 %4 %+ H(%5 + 1), %3, 1
+%elif cpuflag(sse)
+ mova %3, %2
+ unpcklps %2, %1
+ unpckhps %3, %1
+ mova %4(%5), %2
+ mova %4(%5+1), %3
+%endif
+%endmacro
+
+; scheduled for cpu-bound sizes
+%macro PASS_SMALL 3 ; (to load m4-m7), wre, wim
+IF%1 mova m4, Z(4)
+IF%1 mova m5, Z(5)
+ mova m0, %2 ; wre
+ mova m1, %3 ; wim
+ mulps m2, m4, m0 ; r2*wre
+IF%1 mova m6, Z2(6)
+ mulps m3, m5, m1 ; i2*wim
+IF%1 mova m7, Z2(7)
+ mulps m4, m4, m1 ; r2*wim
+ mulps m5, m5, m0 ; i2*wre
+ addps m2, m2, m3 ; r2*wre + i2*wim
+ mulps m3, m1, m7 ; i3*wim
+ subps m5, m5, m4 ; i2*wre - r2*wim
+ mulps m1, m1, m6 ; r3*wim
+ mulps m4, m0, m6 ; r3*wre
+ mulps m0, m0, m7 ; i3*wre
+ subps m4, m4, m3 ; r3*wre - i3*wim
+ mova m3, Z(0)
+ addps m0, m0, m1 ; i3*wre + r3*wim
+ subps m1, m4, m2 ; t3
+ addps m4, m4, m2 ; t5
+ subps m3, m3, m4 ; r2
+ addps m4, m4, Z(0) ; r0
+ mova m6, Z(2)
+ mova Z(4), m3
+ mova Z(0), m4
+ subps m3, m5, m0 ; t4
+ subps m4, m6, m3 ; r3
+ addps m3, m3, m6 ; r1
+ mova Z2(6), m4
+ mova Z(2), m3
+ mova m2, Z(3)
+ addps m3, m5, m0 ; t6
+ subps m2, m2, m1 ; i3
+ mova m7, Z(1)
+ addps m1, m1, Z(3) ; i1
+ mova Z2(7), m2
+ mova Z(3), m1
+ subps m4, m7, m3 ; i2
+ addps m3, m3, m7 ; i0
+ mova Z(5), m4
+ mova Z(1), m3
+%endmacro
+
+; scheduled to avoid store->load aliasing
+%macro PASS_BIG 1 ; (!interleave)
+ mova m4, Z(4) ; r2
+ mova m5, Z(5) ; i2
+ mova m0, [wq] ; wre
+ mova m1, [wq+o1q] ; wim
+ mulps m2, m4, m0 ; r2*wre
+ mova m6, Z2(6) ; r3
+ mulps m3, m5, m1 ; i2*wim
+ mova m7, Z2(7) ; i3
+ mulps m4, m4, m1 ; r2*wim
+ mulps m5, m5, m0 ; i2*wre
+ addps m2, m2, m3 ; r2*wre + i2*wim
+ mulps m3, m1, m7 ; i3*wim
+ mulps m1, m1, m6 ; r3*wim
+ subps m5, m5, m4 ; i2*wre - r2*wim
+ mulps m4, m0, m6 ; r3*wre
+ mulps m0, m0, m7 ; i3*wre
+ subps m4, m4, m3 ; r3*wre - i3*wim
+ mova m3, Z(0)
+ addps m0, m0, m1 ; i3*wre + r3*wim
+ subps m1, m4, m2 ; t3
+ addps m4, m4, m2 ; t5
+ subps m3, m3, m4 ; r2
+ addps m4, m4, Z(0) ; r0
+ mova m6, Z(2)
+ mova Z(4), m3
+ mova Z(0), m4
+ subps m3, m5, m0 ; t4
+ subps m4, m6, m3 ; r3
+ addps m3, m3, m6 ; r1
+IF%1 mova Z2(6), m4
+IF%1 mova Z(2), m3
+ mova m2, Z(3)
+ addps m5, m5, m0 ; t6
+ subps m2, m2, m1 ; i3
+ mova m7, Z(1)
+ addps m1, m1, Z(3) ; i1
+IF%1 mova Z2(7), m2
+IF%1 mova Z(3), m1
+ subps m6, m7, m5 ; i2
+ addps m5, m5, m7 ; i0
+IF%1 mova Z(5), m6
+IF%1 mova Z(1), m5
+%if %1==0
+ INTERL m1, m3, m7, Z, 2
+ INTERL m2, m4, m0, Z2, 6
+
+ mova m1, Z(0)
+ mova m2, Z(4)
+
+ INTERL m5, m1, m3, Z, 0
+ INTERL m6, m2, m7, Z, 4
+%endif
+%endmacro
+
+%define Z(x) [r0+mmsize*x]
+%define Z2(x) [r0+mmsize*x]
+%define ZH(x) [r0+mmsize*x+mmsize/2]
+
+INIT_YMM avx
+
+%if HAVE_AVX_EXTERNAL
+align 16
+fft8_avx:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T8_AVX m0, m1, m2, m3, m4
+ mova Z(0), m0
+ mova Z(1), m1
+ ret
+
+
+align 16
+fft16_avx:
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T4_SSE m2, m3, m7
+
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T8_AVX m0, m1, m4, m5, m7
+
+ mova m4, [ps_cos16_1]
+ mova m5, [ps_cos16_2]
+ vmulps m6, m2, m4
+ vmulps m7, m3, m5
+ vaddps m7, m7, m6
+ vmulps m2, m2, m5
+ vmulps m3, m3, m4
+ vsubps m3, m3, m2
+ vblendps m2, m7, m3, 0xf0
+ vperm2f128 m3, m7, m3, 0x21
+ vaddps m4, m2, m3
+ vsubps m2, m3, m2
+ vperm2f128 m2, m2, m2, 0x01
+ vsubps m3, m1, m2
+ vaddps m1, m1, m2
+ vsubps m5, m0, m4
+ vaddps m0, m0, m4
+ vextractf128 Z(0), m0, 0
+ vextractf128 ZH(0), m1, 0
+ vextractf128 Z(1), m0, 1
+ vextractf128 ZH(1), m1, 1
+ vextractf128 Z(2), m5, 0
+ vextractf128 ZH(2), m3, 0
+ vextractf128 Z(3), m5, 1
+ vextractf128 ZH(3), m3, 1
+ ret
+
+align 16
+fft32_avx:
+ call fft16_avx
+
+ mova m0, Z(4)
+ mova m1, Z(5)
+
+ T4_SSE m0, m1, m4
+
+ mova m2, Z(6)
+ mova m3, Z(7)
+
+ T8_SSE m0, m1, m2, m3, m4, m6
+ ; m0={r0,r1,r2,r3,r8, r9, r10,r11} m1={i0,i1,i2,i3,i8, i9, i10,i11}
+ ; m2={r4,r5,r6,r7,r12,r13,r14,r15} m3={i4,i5,i6,i7,i12,i13,i14,i15}
+
+ vperm2f128 m4, m0, m2, 0x20
+ vperm2f128 m5, m1, m3, 0x20
+ vperm2f128 m6, m0, m2, 0x31
+ vperm2f128 m7, m1, m3, 0x31
+
+ PASS_SMALL 0, [cos_32], [cos_32+32]
+
+ ret
+
+fft32_interleave_avx:
+ call fft32_avx
+ mov r2d, 32
+.deint_loop:
+ mova m2, Z(0)
+ mova m3, Z(1)
+ vunpcklps m0, m2, m3
+ vunpckhps m1, m2, m3
+ vextractf128 Z(0), m0, 0
+ vextractf128 ZH(0), m1, 0
+ vextractf128 Z(1), m0, 1
+ vextractf128 ZH(1), m1, 1
+ add r0, mmsize*2
+ sub r2d, mmsize/4
+ jg .deint_loop
+ ret
+
+%endif
+
+INIT_XMM sse
+
+align 16
+fft4_avx:
+fft4_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova Z(0), m0
+ mova Z(1), m1
+ ret
+
+align 16
+fft8_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T8_SSE m0, m1, m2, m3, m4, m5
+ mova Z(0), m0
+ mova Z(1), m1
+ mova Z(2), m2
+ mova Z(3), m3
+ ret
+
+align 16
+fft16_sse:
+ mova m0, Z(0)
+ mova m1, Z(1)
+ T4_SSE m0, m1, m2
+ mova m2, Z(2)
+ mova m3, Z(3)
+ T8_SSE m0, m1, m2, m3, m4, m5
+ mova m4, Z(4)
+ mova m5, Z(5)
+ mova Z(0), m0
+ mova Z(1), m1
+ mova Z(2), m2
+ mova Z(3), m3
+ T4_SSE m4, m5, m6
+ mova m6, Z2(6)
+ mova m7, Z2(7)
+ T4_SSE m6, m7, m0
+ PASS_SMALL 0, [cos_16], [cos_16+16]
+ ret
+
+
+%define Z(x) [zcq + o1q*(x&6) + mmsize*(x&1)]
+%define Z2(x) [zcq + o3q + mmsize*(x&1)]
+%define ZH(x) [zcq + o1q*(x&6) + mmsize*(x&1) + mmsize/2]
+%define Z2H(x) [zcq + o3q + mmsize*(x&1) + mmsize/2]
+
+%macro DECL_PASS 2+ ; name, payload
+align 16
+%1:
+DEFINE_ARGS zc, w, n, o1, o3
+ lea o3q, [nq*3]
+ lea o1q, [nq*8]
+ shl o3q, 4
+.loop:
+ %2
+ add zcq, mmsize*2
+ add wq, mmsize
+ sub nd, mmsize/8
+ jg .loop
+ rep ret
+%endmacro
+
+%macro FFT_DISPATCH 2; clobbers 5 GPRs, 8 XMMs
+ lea r2, [dispatch_tab%1]
+ mov r2, [r2 + (%2q-2)*gprsize]
+%ifdef PIC
+ lea r3, [$$]
+ add r2, r3
+%endif
+ call r2
+%endmacro ; FFT_DISPATCH
+
+INIT_YMM avx
+
+%if HAVE_AVX_EXTERNAL
+DECL_PASS pass_avx, PASS_BIG 1
+DECL_PASS pass_interleave_avx, PASS_BIG 0
+
+cglobal fft_calc, 2,5,8
+ mov r3d, [r0 + FFTContext.nbits]
+ mov r0, r1
+ mov r1, r3
+ FFT_DISPATCH _interleave %+ SUFFIX, r1
+ RET
+
+%endif
+
+INIT_XMM sse
+
+DECL_PASS pass_sse, PASS_BIG 1
+DECL_PASS pass_interleave_sse, PASS_BIG 0
+
+INIT_XMM sse
+cglobal fft_calc, 2,5,8
+ mov r3d, [r0 + FFTContext.nbits]
+ PUSH r1
+ PUSH r3
+ mov r0, r1
+ mov r1, r3
+ FFT_DISPATCH _interleave %+ SUFFIX, r1
+ POP rcx
+ POP r4
+ cmp rcx, 3+(mmsize/16)
+ jg .end
+ mov r2, -1
+ add rcx, 3
+ shl r2, cl
+ sub r4, r2
+.loop:
+ movaps xmm0, [r4 + r2]
+ movaps xmm1, xmm0
+ unpcklps xmm0, [r4 + r2 + 16]
+ unpckhps xmm1, [r4 + r2 + 16]
+ movaps [r4 + r2], xmm0
+ movaps [r4 + r2 + 16], xmm1
+ add r2, mmsize*2
+ jl .loop
+.end:
+ RET
+
+cglobal fft_permute, 2,7,1
+ mov r4, [r0 + FFTContext.revtab]
+ mov r5, [r0 + FFTContext.tmpbuf]
+ mov ecx, [r0 + FFTContext.nbits]
+ mov r2, 1
+ shl r2, cl
+ xor r0, r0
+%if ARCH_X86_32
+ mov r1, r1m
+%endif
+.loop:
+ movaps xmm0, [r1 + 8*r0]
+ movzx r6, word [r4 + 2*r0]
+ movzx r3, word [r4 + 2*r0 + 2]
+ movlps [r5 + 8*r6], xmm0
+ movhps [r5 + 8*r3], xmm0
+ add r0, 2
+ cmp r0, r2
+ jl .loop
+ shl r2, 3
+ add r1, r2
+ add r5, r2
+ neg r2
+; nbits >= 2 (FFT4) and sizeof(FFTComplex)=8 => at least 32B
+.loopcopy:
+ movaps xmm0, [r5 + r2]
+ movaps xmm1, [r5 + r2 + 16]
+ movaps [r1 + r2], xmm0
+ movaps [r1 + r2 + 16], xmm1
+ add r2, 32
+ jl .loopcopy
+ RET
+
+INIT_XMM sse
+cglobal imdct_calc, 3,5,3
+ mov r3d, [r0 + FFTContext.mdctsize]
+ mov r4, [r0 + FFTContext.imdcthalf]
+ add r1, r3
+ PUSH r3
+ PUSH r1
+%if ARCH_X86_32
+ push r2
+ push r1
+ push r0
+%else
+ sub rsp, 8+32*WIN64 ; allocate win64 shadow space
+%endif
+ call r4
+%if ARCH_X86_32
+ add esp, 12
+%else
+ add rsp, 8+32*WIN64
+%endif
+ POP r1
+ POP r3
+ lea r0, [r1 + 2*r3]
+ mov r2, r3
+ sub r3, mmsize
+ neg r2
+ mova m2, [ps_neg]
+.loop:
+ mova m0, [r1 + r3]
+ mova m1, [r0 + r2]
+ shufps m0, m0, 0x1b
+ shufps m1, m1, 0x1b
+ xorps m0, m2
+ mova [r0 + r3], m1
+ mova [r1 + r2], m0
+ sub r3, mmsize
+ add r2, mmsize
+ jl .loop
+ RET
+
+%ifdef PIC
+%define SECTION_REL - $$
+%else
+%define SECTION_REL
+%endif
+
+%macro DECL_FFT 1-2 ; nbits, suffix
+%ifidn %0, 1
+%xdefine fullsuffix SUFFIX
+%else
+%xdefine fullsuffix %2 %+ SUFFIX
+%endif
+%xdefine list_of_fft fft4 %+ SUFFIX SECTION_REL, fft8 %+ SUFFIX SECTION_REL
+%if %1>=5
+%xdefine list_of_fft list_of_fft, fft16 %+ SUFFIX SECTION_REL
+%endif
+%if %1>=6
+%xdefine list_of_fft list_of_fft, fft32 %+ fullsuffix SECTION_REL
+%endif
+
+%assign n 1<<%1
+%rep 18-%1
+%assign n2 n/2
+%assign n4 n/4
+%xdefine list_of_fft list_of_fft, fft %+ n %+ fullsuffix SECTION_REL
+
+align 16
+fft %+ n %+ fullsuffix:
+ call fft %+ n2 %+ SUFFIX
+ add r0, n*4 - (n&(-2<<%1))
+ call fft %+ n4 %+ SUFFIX
+ add r0, n*2 - (n2&(-2<<%1))
+ call fft %+ n4 %+ SUFFIX
+ sub r0, n*6 + (n2&(-2<<%1))
+ lea r1, [cos_ %+ n]
+ mov r2d, n4/2
+ jmp pass %+ fullsuffix
+
+%assign n n*2
+%endrep
+%undef n
+
+align 8
+dispatch_tab %+ fullsuffix: pointer list_of_fft
+%endmacro ; DECL_FFT
+
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+DECL_FFT 6
+DECL_FFT 6, _interleave
+%endif
+INIT_XMM sse
+DECL_FFT 5
+DECL_FFT 5, _interleave
+
+INIT_XMM sse
+%undef mulps
+%undef addps
+%undef subps
+%undef unpcklps
+%undef unpckhps
+
+%macro PREROTATER 5 ;-2*k, 2*k, input+n4, tcos+n8, tsin+n8
+ movaps xmm0, [%3+%2*4]
+ movaps xmm1, [%3+%1*4-0x10]
+ movaps xmm2, xmm0
+ shufps xmm0, xmm1, 0x88
+ shufps xmm1, xmm2, 0x77
+ movlps xmm4, [%4+%2*2]
+ movlps xmm5, [%5+%2*2+0x0]
+ movhps xmm4, [%4+%1*2-0x8]
+ movhps xmm5, [%5+%1*2-0x8]
+ movaps xmm2, xmm0
+ movaps xmm3, xmm1
+ mulps xmm0, xmm5
+ mulps xmm1, xmm4
+ mulps xmm2, xmm4
+ mulps xmm3, xmm5
+ subps xmm1, xmm0
+ addps xmm2, xmm3
+ movaps xmm0, xmm1
+ unpcklps xmm1, xmm2
+ unpckhps xmm0, xmm2
+%endmacro
+
+%macro CMUL 6 ;j, xmm0, xmm1, 3, 4, 5
+ mulps m6, %3, [%5+%1]
+ mulps m7, %2, [%5+%1]
+ mulps %2, %2, [%6+%1]
+ mulps %3, %3, [%6+%1]
+ subps %2, %2, m6
+ addps %3, %3, m7
+%endmacro
+
+%macro POSROTATESHUF 5 ;j, k, z+n8, tcos+n8, tsin+n8
+.post:
+%if cpuflag(avx)
+ vmovaps ymm1, [%3+%1*2]
+ vmovaps ymm0, [%3+%1*2+0x20]
+ vmovaps ymm3, [%3+%2*2]
+ vmovaps ymm2, [%3+%2*2+0x20]
+
+ CMUL %1, ymm0, ymm1, %3, %4, %5
+ CMUL %2, ymm2, ymm3, %3, %4, %5
+ vshufps ymm1, ymm1, ymm1, 0x1b
+ vshufps ymm3, ymm3, ymm3, 0x1b
+ vperm2f128 ymm1, ymm1, ymm1, 0x01
+ vperm2f128 ymm3, ymm3, ymm3, 0x01
+ vunpcklps ymm6, ymm2, ymm1
+ vunpckhps ymm4, ymm2, ymm1
+ vunpcklps ymm7, ymm0, ymm3
+ vunpckhps ymm5, ymm0, ymm3
+
+ vextractf128 [%3+%1*2], ymm7, 0
+ vextractf128 [%3+%1*2+0x10], ymm5, 0
+ vextractf128 [%3+%1*2+0x20], ymm7, 1
+ vextractf128 [%3+%1*2+0x30], ymm5, 1
+
+ vextractf128 [%3+%2*2], ymm6, 0
+ vextractf128 [%3+%2*2+0x10], ymm4, 0
+ vextractf128 [%3+%2*2+0x20], ymm6, 1
+ vextractf128 [%3+%2*2+0x30], ymm4, 1
+ sub %2, 0x20
+ add %1, 0x20
+ jl .post
+%else
+ movaps xmm1, [%3+%1*2]
+ movaps xmm0, [%3+%1*2+0x10]
+ CMUL %1, xmm0, xmm1, %3, %4, %5
+ movaps xmm5, [%3+%2*2]
+ movaps xmm4, [%3+%2*2+0x10]
+ CMUL %2, xmm4, xmm5, %3, %4, %5
+ shufps xmm1, xmm1, 0x1b
+ shufps xmm5, xmm5, 0x1b
+ movaps xmm6, xmm4
+ unpckhps xmm4, xmm1
+ unpcklps xmm6, xmm1
+ movaps xmm2, xmm0
+ unpcklps xmm0, xmm5
+ unpckhps xmm2, xmm5
+ movaps [%3+%2*2], xmm6
+ movaps [%3+%2*2+0x10], xmm4
+ movaps [%3+%1*2], xmm0
+ movaps [%3+%1*2+0x10], xmm2
+ sub %2, 0x10
+ add %1, 0x10
+ jl .post
+%endif
+%endmacro
+
+%macro DECL_IMDCT 0
+cglobal imdct_half, 3,12,8; FFTContext *s, FFTSample *output, const FFTSample *input
+%if ARCH_X86_64
+%define rrevtab r7
+%define rtcos r8
+%define rtsin r9
+%else
+%define rrevtab r6
+%define rtsin r6
+%define rtcos r5
+%endif
+ mov r3d, [r0+FFTContext.mdctsize]
+ add r2, r3
+ shr r3, 1
+ mov rtcos, [r0+FFTContext.tcos]
+ mov rtsin, [r0+FFTContext.tsin]
+ add rtcos, r3
+ add rtsin, r3
+%if ARCH_X86_64 == 0
+ push rtcos
+ push rtsin
+%endif
+ shr r3, 1
+ mov rrevtab, [r0+FFTContext.revtab]
+ add rrevtab, r3
+%if ARCH_X86_64 == 0
+ push rrevtab
+%endif
+
+ sub r3, 4
+%if ARCH_X86_64
+ xor r4, r4
+ sub r4, r3
+%endif
+.pre:
+%if ARCH_X86_64 == 0
+;unspill
+ xor r4, r4
+ sub r4, r3
+ mov rtcos, [esp+8]
+ mov rtsin, [esp+4]
+%endif
+
+ PREROTATER r4, r3, r2, rtcos, rtsin
+%if ARCH_X86_64
+ movzx r5, word [rrevtab+r4-4]
+ movzx r6, word [rrevtab+r4-2]
+ movzx r10, word [rrevtab+r3]
+ movzx r11, word [rrevtab+r3+2]
+ movlps [r1+r5 *8], xmm0
+ movhps [r1+r6 *8], xmm0
+ movlps [r1+r10*8], xmm1
+ movhps [r1+r11*8], xmm1
+ add r4, 4
+%else
+ mov r6, [esp]
+ movzx r5, word [r6+r4-4]
+ movzx r4, word [r6+r4-2]
+ movlps [r1+r5*8], xmm0
+ movhps [r1+r4*8], xmm0
+ movzx r5, word [r6+r3]
+ movzx r4, word [r6+r3+2]
+ movlps [r1+r5*8], xmm1
+ movhps [r1+r4*8], xmm1
+%endif
+ sub r3, 4
+ jns .pre
+
+ mov r5, r0
+ mov r6, r1
+ mov r0, r1
+ mov r1d, [r5+FFTContext.nbits]
+
+ FFT_DISPATCH SUFFIX, r1
+
+ mov r0d, [r5+FFTContext.mdctsize]
+ add r6, r0
+ shr r0, 1
+%if ARCH_X86_64 == 0
+%define rtcos r2
+%define rtsin r3
+ mov rtcos, [esp+8]
+ mov rtsin, [esp+4]
+%endif
+ neg r0
+ mov r1, -mmsize
+ sub r1, r0
+ POSROTATESHUF r0, r1, r6, rtcos, rtsin
+%if ARCH_X86_64 == 0
+ add esp, 12
+%endif
+ RET
+%endmacro
+
+DECL_IMDCT
+
+INIT_YMM avx
+
+%if HAVE_AVX_EXTERNAL
+DECL_IMDCT
+%endif
diff --git a/media/ffvpx/libavcodec/x86/fft.h b/media/ffvpx/libavcodec/x86/fft.h
new file mode 100644
index 0000000000..37418ec1f4
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/fft.h
@@ -0,0 +1,32 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_FFT_H
+#define AVCODEC_X86_FFT_H
+
+#include "libavcodec/fft.h"
+
+void ff_fft_permute_sse(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_avx(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
+
+void ff_imdct_calc_sse(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_sse(FFTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_avx(FFTContext *s, FFTSample *output, const FFTSample *input);
+
+#endif /* AVCODEC_X86_FFT_H */
diff --git a/media/ffvpx/libavcodec/x86/fft_init.c b/media/ffvpx/libavcodec/x86/fft_init.c
new file mode 100644
index 0000000000..df79d57dc7
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/fft_init.c
@@ -0,0 +1,47 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+
+#include "fft.h"
+
+av_cold void ff_fft_init_x86(FFTContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (s->nbits > 16)
+ return;
+
+ if (EXTERNAL_SSE(cpu_flags)) {
+ s->imdct_calc = ff_imdct_calc_sse;
+ s->imdct_half = ff_imdct_half_sse;
+ s->fft_permute = ff_fft_permute_sse;
+ s->fft_calc = ff_fft_calc_sse;
+ s->fft_permutation = FF_FFT_PERM_SWAP_LSBS;
+ }
+
+ if (EXTERNAL_AVX_FAST(cpu_flags) && s->nbits >= 5) {
+ s->imdct_half = ff_imdct_half_avx;
+ s->fft_calc = ff_fft_calc_avx;
+ s->fft_permutation = FF_FFT_PERM_AVX;
+ }
+}
diff --git a/media/ffvpx/libavcodec/x86/flacdsp.asm b/media/ffvpx/libavcodec/x86/flacdsp.asm
new file mode 100644
index 0000000000..44416e4dfd
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/flacdsp.asm
@@ -0,0 +1,326 @@
+;******************************************************************************
+;* FLAC DSP SIMD optimizations
+;*
+;* Copyright (C) 2014 Loren Merritt
+;* Copyright (C) 2014 James Almer
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+vector: db 0,1,4,5,8,9,12,13,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,0,1,4,5,8,9,12,13,
+
+SECTION .text
+
+%macro PMACSDQL 5
+%if cpuflag(xop)
+ pmacsdql %1, %2, %3, %1
+%else
+ pmuldq %2, %3
+ paddq %1, %2
+%endif
+%endmacro
+
+%macro LPC_32 1
+INIT_XMM %1
+cglobal flac_lpc_32, 5,6,5, decoded, coeffs, pred_order, qlevel, len, j
+ sub lend, pred_orderd
+ jle .ret
+ lea decodedq, [decodedq+pred_orderq*4-8]
+ lea coeffsq, [coeffsq+pred_orderq*4]
+ neg pred_orderq
+ movd m4, qlevelm
+ALIGN 16
+.loop_sample:
+ movd m0, [decodedq+pred_orderq*4+8]
+ add decodedq, 8
+ movd m1, [coeffsq+pred_orderq*4]
+ pxor m2, m2
+ pxor m3, m3
+ lea jq, [pred_orderq+1]
+ test jq, jq
+ jz .end_order
+.loop_order:
+ PMACSDQL m2, m0, m1, m2, m0
+ movd m0, [decodedq+jq*4]
+ PMACSDQL m3, m1, m0, m3, m1
+ movd m1, [coeffsq+jq*4]
+ inc jq
+ jl .loop_order
+.end_order:
+ PMACSDQL m2, m0, m1, m2, m0
+ psrlq m2, m4
+ movd m0, [decodedq]
+ paddd m0, m2
+ movd [decodedq], m0
+ sub lend, 2
+ jl .ret
+ PMACSDQL m3, m1, m0, m3, m1
+ psrlq m3, m4
+ movd m1, [decodedq+4]
+ paddd m1, m3
+ movd [decodedq+4], m1
+ jg .loop_sample
+.ret:
+ RET
+%endmacro
+
+%if HAVE_XOP_EXTERNAL
+LPC_32 xop
+%endif
+LPC_32 sse4
+
+;----------------------------------------------------------------------------------
+;void ff_flac_decorrelate_[lrm]s_16_sse2(uint8_t **out, int32_t **in, int channels,
+; int len, int shift);
+;----------------------------------------------------------------------------------
+%macro FLAC_DECORRELATE_16 3-4
+cglobal flac_decorrelate_%1_16, 2, 4, 4, out, in0, in1, len
+%ifidn %1, indep2
+ VBROADCASTI128 m2, [vector]
+%endif
+%if ARCH_X86_32
+ mov lend, lenm
+%endif
+ movd m3, r4m
+ shl lend, 2
+ mov in1q, [in0q + gprsize]
+ mov in0q, [in0q]
+ mov outq, [outq]
+ add in1q, lenq
+ add in0q, lenq
+ add outq, lenq
+ neg lenq
+
+align 16
+.loop:
+ mova m0, [in0q + lenq]
+ mova m1, [in1q + lenq]
+%ifidn %1, ms
+ psrad m2, m1, 1
+ psubd m0, m2
+%endif
+%ifnidn %1, indep2
+ p%4d m2, m0, m1
+ packssdw m%2, m%2
+ packssdw m%3, m%3
+ punpcklwd m%2, m%3
+ psllw m%2, m3
+%else
+ pslld m%2, m3
+ pslld m%3, m3
+ pshufb m%2, m%2, m2
+ pshufb m%3, m%3, m2
+ punpcklwd m%2, m%3
+%endif
+ mova [outq + lenq], m%2
+ add lenq, 16
+ jl .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+FLAC_DECORRELATE_16 ls, 0, 2, sub
+FLAC_DECORRELATE_16 rs, 2, 1, add
+FLAC_DECORRELATE_16 ms, 2, 0, add
+
+;----------------------------------------------------------------------------------
+;void ff_flac_decorrelate_[lrm]s_32_sse2(uint8_t **out, int32_t **in, int channels,
+; int len, int shift);
+;----------------------------------------------------------------------------------
+%macro FLAC_DECORRELATE_32 5
+cglobal flac_decorrelate_%1_32, 2, 4, 4, out, in0, in1, len
+%if ARCH_X86_32
+ mov lend, lenm
+%endif
+ movd m3, r4m
+ mov in1q, [in0q + gprsize]
+ mov in0q, [in0q]
+ mov outq, [outq]
+ sub in1q, in0q
+
+align 16
+.loop:
+ mova m0, [in0q]
+ mova m1, [in0q + in1q]
+%ifidn %1, ms
+ psrad m2, m1, 1
+ psubd m0, m2
+%endif
+ p%5d m2, m0, m1
+ pslld m%2, m3
+ pslld m%3, m3
+
+ SBUTTERFLY dq, %2, %3, %4
+
+ mova [outq ], m%2
+ mova [outq + mmsize], m%3
+
+ add in0q, mmsize
+ add outq, mmsize*2
+ sub lend, mmsize/4
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+FLAC_DECORRELATE_32 ls, 0, 2, 1, sub
+FLAC_DECORRELATE_32 rs, 2, 1, 0, add
+FLAC_DECORRELATE_32 ms, 2, 0, 1, add
+
+;-----------------------------------------------------------------------------------------
+;void ff_flac_decorrelate_indep<ch>_<bps>_<opt>(uint8_t **out, int32_t **in, int channels,
+; int len, int shift);
+;-----------------------------------------------------------------------------------------
+;%1 = bps
+;%2 = channels
+;%3 = last xmm reg used
+;%4 = word/dword (shift instruction)
+%macro FLAC_DECORRELATE_INDEP 4
+%define REPCOUNT %2/(32/%1) ; 16bits = channels / 2; 32bits = channels
+cglobal flac_decorrelate_indep%2_%1, 2, %2+2, %3+1, out, in0, in1, len, in2, in3, in4, in5, in6, in7
+%if ARCH_X86_32
+%if %2 == 6
+ DEFINE_ARGS out, in0, in1, in2, in3, in4, in5
+ %define lend dword r3m
+%else
+ mov lend, lenm
+%endif
+%endif
+ movd m%3, r4m
+
+%assign %%i 1
+%rep %2-1
+ mov in %+ %%i %+ q, [in0q+%%i*gprsize]
+%assign %%i %%i+1
+%endrep
+
+ mov in0q, [in0q]
+ mov outq, [outq]
+
+%assign %%i 1
+%rep %2-1
+ sub in %+ %%i %+ q, in0q
+%assign %%i %%i+1
+%endrep
+
+align 16
+.loop:
+ mova m0, [in0q]
+
+%assign %%i 1
+%rep REPCOUNT-1
+ mova m %+ %%i, [in0q + in %+ %%i %+ q]
+%assign %%i %%i+1
+%endrep
+
+%if %1 == 32
+
+%if %2 == 8
+ TRANSPOSE8x4D 0, 1, 2, 3, 4, 5, 6, 7, 8
+%elif %2 == 6
+ SBUTTERFLY dq, 0, 1, 6
+ SBUTTERFLY dq, 2, 3, 6
+ SBUTTERFLY dq, 4, 5, 6
+
+ punpcklqdq m6, m0, m2
+ punpckhqdq m2, m4
+ shufps m4, m0, 0xe4
+ punpcklqdq m0, m1, m3
+ punpckhqdq m3, m5
+ shufps m5, m1, 0xe4
+ SWAP 0,6,1,4,5,3
+%elif %2 == 4
+ TRANSPOSE4x4D 0, 1, 2, 3, 4
+%else ; %2 == 2
+ SBUTTERFLY dq, 0, 1, 2
+%endif
+
+%else ; %1 == 16
+
+%if %2 == 8
+ packssdw m0, [in0q + in4q]
+ packssdw m1, [in0q + in5q]
+ packssdw m2, [in0q + in6q]
+ packssdw m3, [in0q + in7q]
+ TRANSPOSE2x4x4W 0, 1, 2, 3, 4
+%elif %2 == 6
+ packssdw m0, [in0q + in3q]
+ packssdw m1, [in0q + in4q]
+ packssdw m2, [in0q + in5q]
+ pshufd m3, m0, q1032
+ punpcklwd m0, m1
+ punpckhwd m1, m2
+ punpcklwd m2, m3
+
+ shufps m3, m0, m2, q2020
+ shufps m0, m1, q2031
+ shufps m2, m1, q3131
+ shufps m1, m2, m3, q3120
+ shufps m3, m0, q0220
+ shufps m0, m2, q3113
+ SWAP 2, 0, 3
+%else ; %2 == 4
+ packssdw m0, [in0q + in2q]
+ packssdw m1, [in0q + in3q]
+ SBUTTERFLY wd, 0, 1, 2
+ SBUTTERFLY dq, 0, 1, 2
+%endif
+
+%endif
+
+%assign %%i 0
+%rep REPCOUNT
+ psll%4 m %+ %%i, m%3
+%assign %%i %%i+1
+%endrep
+
+%assign %%i 0
+%rep REPCOUNT
+ mova [outq + %%i*mmsize], m %+ %%i
+%assign %%i %%i+1
+%endrep
+
+ add in0q, mmsize
+ add outq, mmsize*REPCOUNT
+ sub lend, mmsize/4
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM ssse3
+FLAC_DECORRELATE_16 indep2, 0, 1 ; Reuse stereo 16bits macro
+FLAC_DECORRELATE_INDEP 32, 2, 3, d
+FLAC_DECORRELATE_INDEP 16, 4, 3, w
+FLAC_DECORRELATE_INDEP 32, 4, 5, d
+FLAC_DECORRELATE_INDEP 16, 6, 4, w
+FLAC_DECORRELATE_INDEP 32, 6, 7, d
+%if ARCH_X86_64
+FLAC_DECORRELATE_INDEP 16, 8, 5, w
+FLAC_DECORRELATE_INDEP 32, 8, 9, d
+%endif
+
+INIT_XMM avx
+FLAC_DECORRELATE_INDEP 32, 4, 5, d
+FLAC_DECORRELATE_INDEP 32, 6, 7, d
+%if ARCH_X86_64
+FLAC_DECORRELATE_INDEP 16, 8, 5, w
+FLAC_DECORRELATE_INDEP 32, 8, 9, d
+%endif
diff --git a/media/ffvpx/libavcodec/x86/flacdsp_init.c b/media/ffvpx/libavcodec/x86/flacdsp_init.c
new file mode 100644
index 0000000000..87daed7005
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/flacdsp_init.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014 James Almer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavcodec/flacdsp.h"
+#include "libavutil/x86/cpu.h"
+#include "config.h"
+
+void ff_flac_lpc_32_sse4(int32_t *samples, const int coeffs[32], int order,
+ int qlevel, int len);
+void ff_flac_lpc_32_xop(int32_t *samples, const int coeffs[32], int order,
+ int qlevel, int len);
+
+#define DECORRELATE_FUNCS(fmt, opt) \
+void ff_flac_decorrelate_ls_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
+ int len, int shift); \
+void ff_flac_decorrelate_rs_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
+ int len, int shift); \
+void ff_flac_decorrelate_ms_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
+ int len, int shift)
+
+#define DECORRELATE_IFUNCS(fmt, opt) \
+void ff_flac_decorrelate_indep2_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
+ int len, int shift); \
+void ff_flac_decorrelate_indep4_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
+ int len, int shift); \
+void ff_flac_decorrelate_indep6_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
+ int len, int shift); \
+void ff_flac_decorrelate_indep8_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
+ int len, int shift)
+
+DECORRELATE_FUNCS(16, sse2);
+DECORRELATE_FUNCS(16, avx);
+DECORRELATE_FUNCS(32, sse2);
+DECORRELATE_FUNCS(32, avx);
+DECORRELATE_IFUNCS(16, ssse3);
+DECORRELATE_IFUNCS(16, avx);
+DECORRELATE_IFUNCS(32, ssse3);
+DECORRELATE_IFUNCS(32, avx);
+
+av_cold void ff_flacdsp_init_x86(FLACDSPContext *c, enum AVSampleFormat fmt, int channels)
+{
+#if HAVE_X86ASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ if (fmt == AV_SAMPLE_FMT_S16) {
+ c->decorrelate[1] = ff_flac_decorrelate_ls_16_sse2;
+ c->decorrelate[2] = ff_flac_decorrelate_rs_16_sse2;
+ c->decorrelate[3] = ff_flac_decorrelate_ms_16_sse2;
+ } else if (fmt == AV_SAMPLE_FMT_S32) {
+ c->decorrelate[1] = ff_flac_decorrelate_ls_32_sse2;
+ c->decorrelate[2] = ff_flac_decorrelate_rs_32_sse2;
+ c->decorrelate[3] = ff_flac_decorrelate_ms_32_sse2;
+ }
+ }
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ if (fmt == AV_SAMPLE_FMT_S16) {
+ if (channels == 2)
+ c->decorrelate[0] = ff_flac_decorrelate_indep2_16_ssse3;
+ else if (channels == 4)
+ c->decorrelate[0] = ff_flac_decorrelate_indep4_16_ssse3;
+ else if (channels == 6)
+ c->decorrelate[0] = ff_flac_decorrelate_indep6_16_ssse3;
+ else if (ARCH_X86_64 && channels == 8)
+ c->decorrelate[0] = ff_flac_decorrelate_indep8_16_ssse3;
+ } else if (fmt == AV_SAMPLE_FMT_S32) {
+ if (channels == 2)
+ c->decorrelate[0] = ff_flac_decorrelate_indep2_32_ssse3;
+ else if (channels == 4)
+ c->decorrelate[0] = ff_flac_decorrelate_indep4_32_ssse3;
+ else if (channels == 6)
+ c->decorrelate[0] = ff_flac_decorrelate_indep6_32_ssse3;
+ else if (ARCH_X86_64 && channels == 8)
+ c->decorrelate[0] = ff_flac_decorrelate_indep8_32_ssse3;
+ }
+ }
+ if (EXTERNAL_SSE4(cpu_flags)) {
+ c->lpc32 = ff_flac_lpc_32_sse4;
+ }
+ if (EXTERNAL_AVX(cpu_flags)) {
+ if (fmt == AV_SAMPLE_FMT_S16) {
+ if (ARCH_X86_64 && channels == 8)
+ c->decorrelate[0] = ff_flac_decorrelate_indep8_16_avx;
+ } else if (fmt == AV_SAMPLE_FMT_S32) {
+ if (channels == 4)
+ c->decorrelate[0] = ff_flac_decorrelate_indep4_32_avx;
+ else if (channels == 6)
+ c->decorrelate[0] = ff_flac_decorrelate_indep6_32_avx;
+ else if (ARCH_X86_64 && channels == 8)
+ c->decorrelate[0] = ff_flac_decorrelate_indep8_32_avx;
+ }
+ }
+ if (EXTERNAL_XOP(cpu_flags)) {
+ c->lpc32 = ff_flac_lpc_32_xop;
+ }
+#endif /* HAVE_X86ASM */
+}
diff --git a/media/ffvpx/libavcodec/x86/h264_intrapred.asm b/media/ffvpx/libavcodec/x86/h264_intrapred.asm
new file mode 100644
index 0000000000..8a38ba2bb5
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/h264_intrapred.asm
@@ -0,0 +1,2044 @@
+;******************************************************************************
+;* H.264 intra prediction asm optimizations
+;* Copyright (c) 2010 Fiona Glaser
+;* Copyright (c) 2010 Holger Lubitz
+;* Copyright (c) 2010 Loren Merritt
+;* Copyright (c) 2010 Ronald S. Bultje
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+tm_shuf: times 8 db 0x03, 0x80
+pw_ff00: times 8 dw 0xff00
+plane_shuf: db -8, -7, -6, -5, -4, -3, -2, -1
+ db 1, 2, 3, 4, 5, 6, 7, 8
+plane8_shuf: db -4, -3, -2, -1, 0, 0, 0, 0
+ db 1, 2, 3, 4, 0, 0, 0, 0
+pw_0to7: dw 0, 1, 2, 3, 4, 5, 6, 7
+pw_1to8: dw 1, 2, 3, 4, 5, 6, 7, 8
+pw_m8tom1: dw -8, -7, -6, -5, -4, -3, -2, -1
+pw_m4to4: dw -4, -3, -2, -1, 1, 2, 3, 4
+
+SECTION .text
+
+cextern pb_1
+cextern pb_3
+cextern pw_4
+cextern pw_8
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_vertical_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_XMM sse
+cglobal pred16x16_vertical_8, 2,3
+ sub r0, r1
+ mov r2, 4
+ movaps xmm0, [r0]
+.loop:
+ movaps [r0+r1*1], xmm0
+ movaps [r0+r1*2], xmm0
+ lea r0, [r0+r1*2]
+ movaps [r0+r1*1], xmm0
+ movaps [r0+r1*2], xmm0
+ lea r0, [r0+r1*2]
+ dec r2
+ jg .loop
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_horizontal_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED16x16_H 0
+cglobal pred16x16_horizontal_8, 2,3
+ mov r2, 8
+%if cpuflag(ssse3)
+ mova m2, [pb_3]
+%endif
+.loop:
+ movd m0, [r0+r1*0-4]
+ movd m1, [r0+r1*1-4]
+
+%if cpuflag(ssse3)
+ pshufb m0, m2
+ pshufb m1, m2
+%else
+ punpcklbw m0, m0
+ punpcklbw m1, m1
+ SPLATW m0, m0, 3
+ SPLATW m1, m1, 3
+ mova [r0+r1*0+8], m0
+ mova [r0+r1*1+8], m1
+%endif
+
+ mova [r0+r1*0], m0
+ mova [r0+r1*1], m1
+ lea r0, [r0+r1*2]
+ dec r2
+ jg .loop
+ RET
+%endmacro
+
+INIT_MMX mmxext
+PRED16x16_H
+INIT_XMM ssse3
+PRED16x16_H
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_dc_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED16x16_DC 0
+cglobal pred16x16_dc_8, 2,7
+ mov r4, r0
+ sub r0, r1
+ pxor mm0, mm0
+ pxor mm1, mm1
+ psadbw mm0, [r0+0]
+ psadbw mm1, [r0+8]
+ dec r0
+ movzx r5d, byte [r0+r1*1]
+ paddw mm0, mm1
+ movd r6d, mm0
+ lea r0, [r0+r1*2]
+%rep 7
+ movzx r2d, byte [r0+r1*0]
+ movzx r3d, byte [r0+r1*1]
+ add r5d, r2d
+ add r6d, r3d
+ lea r0, [r0+r1*2]
+%endrep
+ movzx r2d, byte [r0+r1*0]
+ add r5d, r6d
+ lea r2d, [r2+r5+16]
+ shr r2d, 5
+%if cpuflag(ssse3)
+ pxor m1, m1
+%endif
+ SPLATB_REG m0, r2, m1
+
+ mov r3d, 4
+.loop:
+ mova [r4+r1*0], m0
+ mova [r4+r1*1], m0
+ lea r4, [r4+r1*2]
+ mova [r4+r1*0], m0
+ mova [r4+r1*1], m0
+ lea r4, [r4+r1*2]
+ dec r3d
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED16x16_DC
+INIT_XMM ssse3
+PRED16x16_DC
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_tm_vp8_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_XMM sse2
+cglobal pred16x16_tm_vp8_8, 2,6,6
+ sub r0, r1
+ pxor xmm2, xmm2
+ movdqa xmm0, [r0]
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm2
+ punpckhbw xmm1, xmm2
+ movzx r4d, byte [r0-1]
+ mov r5d, 8
+.loop:
+ movzx r2d, byte [r0+r1*1-1]
+ movzx r3d, byte [r0+r1*2-1]
+ sub r2d, r4d
+ sub r3d, r4d
+ movd xmm2, r2d
+ movd xmm4, r3d
+ pshuflw xmm2, xmm2, 0
+ pshuflw xmm4, xmm4, 0
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm4, xmm4
+ movdqa xmm3, xmm2
+ movdqa xmm5, xmm4
+ paddw xmm2, xmm0
+ paddw xmm3, xmm1
+ paddw xmm4, xmm0
+ paddw xmm5, xmm1
+ packuswb xmm2, xmm3
+ packuswb xmm4, xmm5
+ movdqa [r0+r1*1], xmm2
+ movdqa [r0+r1*2], xmm4
+ lea r0, [r0+r1*2]
+ dec r5d
+ jg .loop
+ RET
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal pred16x16_tm_vp8_8, 2, 4, 5, dst, stride, stride3, iteration
+ sub dstq, strideq
+ pmovzxbw m0, [dstq]
+ vpbroadcastb xm1, [r0-1]
+ pmovzxbw m1, xm1
+ psubw m0, m1
+ mov iterationd, 4
+ lea stride3q, [strideq*3]
+.loop:
+ vpbroadcastb xm1, [dstq+strideq*1-1]
+ vpbroadcastb xm2, [dstq+strideq*2-1]
+ vpbroadcastb xm3, [dstq+stride3q-1]
+ vpbroadcastb xm4, [dstq+strideq*4-1]
+ pmovzxbw m1, xm1
+ pmovzxbw m2, xm2
+ pmovzxbw m3, xm3
+ pmovzxbw m4, xm4
+ paddw m1, m0
+ paddw m2, m0
+ paddw m3, m0
+ paddw m4, m0
+ vpackuswb m1, m1, m2
+ vpackuswb m3, m3, m4
+ vpermq m1, m1, q3120
+ vpermq m3, m3, q3120
+ movdqa [dstq+strideq*1], xm1
+ vextracti128 [dstq+strideq*2], m1, 1
+ movdqa [dstq+stride3q*1], xm3
+ vextracti128 [dstq+strideq*4], m3, 1
+ lea dstq, [dstq+strideq*4]
+ dec iterationd
+ jg .loop
+ RET
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_plane_*_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro H264_PRED16x16_PLANE 1
+cglobal pred16x16_plane_%1_8, 2,9,7
+ mov r2, r1 ; +stride
+ neg r1 ; -stride
+
+ movh m0, [r0+r1 -1]
+%if cpuflag(ssse3)
+ movhps m0, [r0+r1 +8]
+ pmaddubsw m0, [plane_shuf] ; H coefficients
+%else ; sse2
+ pxor m2, m2
+ movh m1, [r0+r1 +8]
+ punpcklbw m0, m2
+ punpcklbw m1, m2
+ pmullw m0, [pw_m8tom1]
+ pmullw m1, [pw_1to8]
+ paddw m0, m1
+%endif
+ movhlps m1, m0
+ paddw m0, m1
+ PSHUFLW m1, m0, 0xE
+ paddw m0, m1
+ PSHUFLW m1, m0, 0x1
+ paddw m0, m1 ; sum of H coefficients
+
+ lea r4, [r0+r2*8-1]
+ lea r3, [r0+r2*4-1]
+ add r4, r2
+
+%if ARCH_X86_64
+%define e_reg r8
+%else
+%define e_reg r0
+%endif
+
+ movzx e_reg, byte [r3+r2*2 ]
+ movzx r5, byte [r4+r1 ]
+ sub r5, e_reg
+
+ movzx e_reg, byte [r3+r2 ]
+ movzx r6, byte [r4 ]
+ sub r6, e_reg
+ lea r5, [r5+r6*2]
+
+ movzx e_reg, byte [r3+r1 ]
+ movzx r6, byte [r4+r2*2 ]
+ sub r6, e_reg
+ lea r5, [r5+r6*4]
+
+ movzx e_reg, byte [r3 ]
+%if ARCH_X86_64
+ movzx r7, byte [r4+r2 ]
+ sub r7, e_reg
+%else
+ movzx r6, byte [r4+r2 ]
+ sub r6, e_reg
+ lea r5, [r5+r6*4]
+ sub r5, r6
+%endif
+
+ lea e_reg, [r3+r1*4]
+ lea r3, [r4+r2*4]
+
+ movzx r4, byte [e_reg+r2 ]
+ movzx r6, byte [r3 ]
+ sub r6, r4
+%if ARCH_X86_64
+ lea r6, [r7+r6*2]
+ lea r5, [r5+r6*2]
+ add r5, r6
+%else
+ lea r5, [r5+r6*4]
+ lea r5, [r5+r6*2]
+%endif
+
+ movzx r4, byte [e_reg ]
+%if ARCH_X86_64
+ movzx r7, byte [r3 +r2 ]
+ sub r7, r4
+ sub r5, r7
+%else
+ movzx r6, byte [r3 +r2 ]
+ sub r6, r4
+ lea r5, [r5+r6*8]
+ sub r5, r6
+%endif
+
+ movzx r4, byte [e_reg+r1 ]
+ movzx r6, byte [r3 +r2*2]
+ sub r6, r4
+%if ARCH_X86_64
+ add r6, r7
+%endif
+ lea r5, [r5+r6*8]
+
+ movzx r4, byte [e_reg+r2*2]
+ movzx r6, byte [r3 +r1 ]
+ sub r6, r4
+ lea r5, [r5+r6*4]
+ add r5, r6 ; sum of V coefficients
+
+%if ARCH_X86_64 == 0
+ mov r0, r0m
+%endif
+
+%ifidn %1, h264
+ lea r5, [r5*5+32]
+ sar r5, 6
+%elifidn %1, rv40
+ lea r5, [r5*5]
+ sar r5, 6
+%elifidn %1, svq3
+ test r5, r5
+ lea r6, [r5+3]
+ cmovs r5, r6
+ sar r5, 2 ; V/4
+ lea r5, [r5*5] ; 5*(V/4)
+ test r5, r5
+ lea r6, [r5+15]
+ cmovs r5, r6
+ sar r5, 4 ; (5*(V/4))/16
+%endif
+
+ movzx r4, byte [r0+r1 +15]
+ movzx r3, byte [r3+r2*2 ]
+ lea r3, [r3+r4+1]
+ shl r3, 4
+
+ movd r1d, m0
+ movsx r1d, r1w
+%ifnidn %1, svq3
+%ifidn %1, h264
+ lea r1d, [r1d*5+32]
+%else ; rv40
+ lea r1d, [r1d*5]
+%endif
+ sar r1d, 6
+%else ; svq3
+ test r1d, r1d
+ lea r4d, [r1d+3]
+ cmovs r1d, r4d
+ sar r1d, 2 ; H/4
+ lea r1d, [r1d*5] ; 5*(H/4)
+ test r1d, r1d
+ lea r4d, [r1d+15]
+ cmovs r1d, r4d
+ sar r1d, 4 ; (5*(H/4))/16
+%endif
+ movd m0, r1d
+
+ add r1d, r5d
+ add r3d, r1d
+ shl r1d, 3
+ sub r3d, r1d ; a
+
+ movd m1, r5d
+ movd m3, r3d
+ SPLATW m0, m0, 0 ; H
+ SPLATW m1, m1, 0 ; V
+ SPLATW m3, m3, 0 ; a
+%ifidn %1, svq3
+ SWAP 0, 1
+%endif
+ mova m2, m0
+ pmullw m0, [pw_0to7] ; 0*H, 1*H, ..., 7*H (words)
+ psllw m2, 3
+ paddw m0, m3 ; a + {0,1,2,3,4,5,6,7}*H
+ paddw m2, m0 ; a + {8,9,10,11,12,13,14,15}*H
+
+ mov r4, 8
+.loop:
+ mova m3, m0 ; b[0..7]
+ mova m4, m2 ; b[8..15]
+ psraw m3, 5
+ psraw m4, 5
+ packuswb m3, m4
+ mova [r0], m3
+ paddw m0, m1
+ paddw m2, m1
+
+ mova m3, m0 ; b[0..7]
+ mova m4, m2 ; b[8..15]
+ psraw m3, 5
+ psraw m4, 5
+ packuswb m3, m4
+ mova [r0+r2], m3
+ paddw m0, m1
+ paddw m2, m1
+
+ lea r0, [r0+r2*2]
+ dec r4
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+H264_PRED16x16_PLANE h264
+H264_PRED16x16_PLANE rv40
+H264_PRED16x16_PLANE svq3
+INIT_XMM ssse3
+H264_PRED16x16_PLANE h264
+H264_PRED16x16_PLANE rv40
+H264_PRED16x16_PLANE svq3
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_plane_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro H264_PRED8x8_PLANE 0
+cglobal pred8x8_plane_8, 2,9,7
+ mov r2, r1 ; +stride
+ neg r1 ; -stride
+
+ movd m0, [r0+r1 -1]
+%if cpuflag(ssse3)
+ movhps m0, [r0+r1 +4] ; this reads 4 bytes more than necessary
+ pmaddubsw m0, [plane8_shuf] ; H coefficients
+%else ; sse2
+ pxor m2, m2
+ movd m1, [r0+r1 +4]
+ punpckldq m0, m1
+ punpcklbw m0, m2
+ pmullw m0, [pw_m4to4]
+%endif
+ movhlps m1, m0
+ paddw m0, m1
+
+%if notcpuflag(ssse3)
+ PSHUFLW m1, m0, 0xE
+ paddw m0, m1
+%endif ; !ssse3
+
+ PSHUFLW m1, m0, 0x1
+ paddw m0, m1 ; sum of H coefficients
+
+ lea r4, [r0+r2*4-1]
+ lea r3, [r0 -1]
+ add r4, r2
+
+%if ARCH_X86_64
+%define e_reg r8
+%else
+%define e_reg r0
+%endif
+
+ movzx e_reg, byte [r3+r2*2 ]
+ movzx r5, byte [r4+r1 ]
+ sub r5, e_reg
+
+ movzx e_reg, byte [r3 ]
+%if ARCH_X86_64
+ movzx r7, byte [r4+r2 ]
+ sub r7, e_reg
+ sub r5, r7
+%else
+ movzx r6, byte [r4+r2 ]
+ sub r6, e_reg
+ lea r5, [r5+r6*4]
+ sub r5, r6
+%endif
+
+ movzx e_reg, byte [r3+r1 ]
+ movzx r6, byte [r4+r2*2 ]
+ sub r6, e_reg
+%if ARCH_X86_64
+ add r6, r7
+%endif
+ lea r5, [r5+r6*4]
+
+ movzx e_reg, byte [r3+r2 ]
+ movzx r6, byte [r4 ]
+ sub r6, e_reg
+ lea r6, [r5+r6*2]
+
+ lea r5, [r6*9+16]
+ lea r5, [r5+r6*8]
+ sar r5, 5
+
+%if ARCH_X86_64 == 0
+ mov r0, r0m
+%endif
+
+ movzx r3, byte [r4+r2*2 ]
+ movzx r4, byte [r0+r1 +7]
+ lea r3, [r3+r4+1]
+ shl r3, 4
+ movd r1d, m0
+ movsx r1d, r1w
+ imul r1d, 17
+ add r1d, 16
+ sar r1d, 5
+ movd m0, r1d
+ add r1d, r5d
+ sub r3d, r1d
+ add r1d, r1d
+ sub r3d, r1d ; a
+
+ movd m1, r5d
+ movd m3, r3d
+ SPLATW m0, m0, 0 ; H
+ SPLATW m1, m1, 0 ; V
+ SPLATW m3, m3, 0 ; a
+ pmullw m0, [pw_0to7] ; 0*H, 1*H, ..., 7*H (words)
+ paddw m0, m3 ; a + {0,1,2,3,4,5,6,7}*H
+
+ mov r4, 4
+ALIGN 16
+.loop:
+ mova m3, m0 ; b[0..7]
+ paddw m0, m1
+ psraw m3, 5
+ mova m4, m0 ; V+b[0..7]
+ paddw m0, m1
+ psraw m4, 5
+ packuswb m3, m4
+ movh [r0], m3
+ movhps [r0+r2], m3
+
+ lea r0, [r0+r2*2]
+ dec r4
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+H264_PRED8x8_PLANE
+INIT_XMM ssse3
+H264_PRED8x8_PLANE
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_vertical_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmx
+cglobal pred8x8_vertical_8, 2,2
+ sub r0, r1
+ movq mm0, [r0]
+%rep 3
+ movq [r0+r1*1], mm0
+ movq [r0+r1*2], mm0
+ lea r0, [r0+r1*2]
+%endrep
+ movq [r0+r1*1], mm0
+ movq [r0+r1*2], mm0
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_horizontal_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8_H 0
+cglobal pred8x8_horizontal_8, 2,3
+ mov r2, 4
+%if cpuflag(ssse3)
+ mova m2, [pb_3]
+%endif
+.loop:
+ SPLATB_LOAD m0, r0+r1*0-1, m2
+ SPLATB_LOAD m1, r0+r1*1-1, m2
+ mova [r0+r1*0], m0
+ mova [r0+r1*1], m1
+ lea r0, [r0+r1*2]
+ dec r2
+ jg .loop
+ RET
+%endmacro
+
+INIT_MMX mmxext
+PRED8x8_H
+INIT_MMX ssse3
+PRED8x8_H
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_top_dc_8_mmxext(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_MMX mmxext
+cglobal pred8x8_top_dc_8, 2,5
+ sub r0, r1
+ movq mm0, [r0]
+ pxor mm1, mm1
+ pxor mm2, mm2
+ lea r2, [r0+r1*2]
+ punpckhbw mm1, mm0
+ punpcklbw mm0, mm2
+ psadbw mm1, mm2 ; s1
+ lea r3, [r2+r1*2]
+ psadbw mm0, mm2 ; s0
+ psrlw mm1, 1
+ psrlw mm0, 1
+ pavgw mm1, mm2
+ lea r4, [r3+r1*2]
+ pavgw mm0, mm2
+ pshufw mm1, mm1, 0
+ pshufw mm0, mm0, 0 ; dc0 (w)
+ packuswb mm0, mm1 ; dc0,dc1 (b)
+ movq [r0+r1*1], mm0
+ movq [r0+r1*2], mm0
+ lea r0, [r3+r1*2]
+ movq [r2+r1*1], mm0
+ movq [r2+r1*2], mm0
+ movq [r3+r1*1], mm0
+ movq [r3+r1*2], mm0
+ movq [r0+r1*1], mm0
+ movq [r0+r1*2], mm0
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_dc_8_mmxext(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred8x8_dc_8, 2,5
+ sub r0, r1
+ pxor m7, m7
+ movd m0, [r0+0]
+ movd m1, [r0+4]
+ psadbw m0, m7 ; s0
+ mov r4, r0
+ psadbw m1, m7 ; s1
+
+ movzx r2d, byte [r0+r1*1-1]
+ movzx r3d, byte [r0+r1*2-1]
+ lea r0, [r0+r1*2]
+ add r2d, r3d
+ movzx r3d, byte [r0+r1*1-1]
+ add r2d, r3d
+ movzx r3d, byte [r0+r1*2-1]
+ add r2d, r3d
+ lea r0, [r0+r1*2]
+ movd m2, r2d ; s2
+ movzx r2d, byte [r0+r1*1-1]
+ movzx r3d, byte [r0+r1*2-1]
+ lea r0, [r0+r1*2]
+ add r2d, r3d
+ movzx r3d, byte [r0+r1*1-1]
+ add r2d, r3d
+ movzx r3d, byte [r0+r1*2-1]
+ add r2d, r3d
+ movd m3, r2d ; s3
+
+ punpcklwd m0, m1
+ mov r0, r4
+ punpcklwd m2, m3
+ punpckldq m0, m2 ; s0, s1, s2, s3
+ pshufw m3, m0, 11110110b ; s2, s1, s3, s3
+ lea r2, [r0+r1*2]
+ pshufw m0, m0, 01110100b ; s0, s1, s3, s1
+ paddw m0, m3
+ lea r3, [r2+r1*2]
+ psrlw m0, 2
+ pavgw m0, m7 ; s0+s2, s1, s3, s1+s3
+ lea r4, [r3+r1*2]
+ packuswb m0, m0
+ punpcklbw m0, m0
+ movq m1, m0
+ punpcklbw m0, m0
+ punpckhbw m1, m1
+ movq [r0+r1*1], m0
+ movq [r0+r1*2], m0
+ movq [r2+r1*1], m0
+ movq [r2+r1*2], m0
+ movq [r3+r1*1], m1
+ movq [r3+r1*2], m1
+ movq [r4+r1*1], m1
+ movq [r4+r1*2], m1
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_dc_rv40_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred8x8_dc_rv40_8, 2,7
+ mov r4, r0
+ sub r0, r1
+ pxor mm0, mm0
+ psadbw mm0, [r0]
+ dec r0
+ movzx r5d, byte [r0+r1*1]
+ movd r6d, mm0
+ lea r0, [r0+r1*2]
+%rep 3
+ movzx r2d, byte [r0+r1*0]
+ movzx r3d, byte [r0+r1*1]
+ add r5d, r2d
+ add r6d, r3d
+ lea r0, [r0+r1*2]
+%endrep
+ movzx r2d, byte [r0+r1*0]
+ add r5d, r6d
+ lea r2d, [r2+r5+8]
+ shr r2d, 4
+ movd mm0, r2d
+ punpcklbw mm0, mm0
+ pshufw mm0, mm0, 0
+ mov r3d, 4
+.loop:
+ movq [r4+r1*0], mm0
+ movq [r4+r1*1], mm0
+ lea r4, [r4+r1*2]
+ dec r3d
+ jg .loop
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_tm_vp8_8(uint8_t *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_XMM sse2
+cglobal pred8x8_tm_vp8_8, 2,6,4
+ sub r0, r1
+ pxor xmm1, xmm1
+ movq xmm0, [r0]
+ punpcklbw xmm0, xmm1
+ movzx r4d, byte [r0-1]
+ mov r5d, 4
+.loop:
+ movzx r2d, byte [r0+r1*1-1]
+ movzx r3d, byte [r0+r1*2-1]
+ sub r2d, r4d
+ sub r3d, r4d
+ movd xmm2, r2d
+ movd xmm3, r3d
+ pshuflw xmm2, xmm2, 0
+ pshuflw xmm3, xmm3, 0
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+ paddw xmm2, xmm0
+ paddw xmm3, xmm0
+ packuswb xmm2, xmm3
+ movq [r0+r1*1], xmm2
+ movhps [r0+r1*2], xmm2
+ lea r0, [r0+r1*2]
+ dec r5d
+ jg .loop
+ RET
+
+INIT_XMM ssse3
+cglobal pred8x8_tm_vp8_8, 2,3,6
+ sub r0, r1
+ movdqa xmm4, [tm_shuf]
+ pxor xmm1, xmm1
+ movq xmm0, [r0]
+ punpcklbw xmm0, xmm1
+ movd xmm5, [r0-4]
+ pshufb xmm5, xmm4
+ mov r2d, 4
+.loop:
+ movd xmm2, [r0+r1*1-4]
+ movd xmm3, [r0+r1*2-4]
+ pshufb xmm2, xmm4
+ pshufb xmm3, xmm4
+ psubw xmm2, xmm5
+ psubw xmm3, xmm5
+ paddw xmm2, xmm0
+ paddw xmm3, xmm0
+ packuswb xmm2, xmm3
+ movq [r0+r1*1], xmm2
+ movhps [r0+r1*2], xmm2
+ lea r0, [r0+r1*2]
+ dec r2d
+ jg .loop
+ RET
+
+; dest, left, right, src, tmp
+; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2
+%macro PRED4x4_LOWPASS 5
+ mova %5, %2
+ pavgb %2, %3
+ pxor %3, %5
+ mova %1, %4
+ pand %3, [pb_1]
+ psubusb %2, %3
+ pavgb %1, %2
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_top_dc_8(uint8_t *src, int has_topleft, int has_topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED8x8L_TOP_DC 0
+cglobal pred8x8l_top_dc_8, 4,4
+ sub r0, r3
+ pxor mm7, mm7
+ movq mm0, [r0-8]
+ movq mm3, [r0]
+ movq mm1, [r0+8]
+ movq mm2, mm3
+ movq mm4, mm3
+ PALIGNR mm2, mm0, 7, mm0
+ PALIGNR mm1, mm4, 1, mm4
+ test r1d, r1d ; top_left
+ jz .fix_lt_2
+ test r2d, r2d ; top_right
+ jz .fix_tr_1
+ jmp .body
+.fix_lt_2:
+ movq mm5, mm3
+ pxor mm5, mm2
+ psllq mm5, 56
+ psrlq mm5, 56
+ pxor mm2, mm5
+ test r2d, r2d ; top_right
+ jnz .body
+.fix_tr_1:
+ movq mm5, mm3
+ pxor mm5, mm1
+ psrlq mm5, 56
+ psllq mm5, 56
+ pxor mm1, mm5
+.body:
+ PRED4x4_LOWPASS mm0, mm2, mm1, mm3, mm5
+ psadbw mm7, mm0
+ paddw mm7, [pw_4]
+ psrlw mm7, 3
+ pshufw mm7, mm7, 0
+ packuswb mm7, mm7
+%rep 3
+ movq [r0+r3*1], mm7
+ movq [r0+r3*2], mm7
+ lea r0, [r0+r3*2]
+%endrep
+ movq [r0+r3*1], mm7
+ movq [r0+r3*2], mm7
+ RET
+%endmacro
+
+INIT_MMX mmxext
+PRED8x8L_TOP_DC
+INIT_MMX ssse3
+PRED8x8L_TOP_DC
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_dc_8(uint8_t *src, int has_topleft, int has_topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_DC 0
+cglobal pred8x8l_dc_8, 4,5
+ sub r0, r3
+ lea r4, [r0+r3*2]
+ movq mm0, [r0+r3*1-8]
+ punpckhbw mm0, [r0+r3*0-8]
+ movq mm1, [r4+r3*1-8]
+ punpckhbw mm1, [r0+r3*2-8]
+ mov r4, r0
+ punpckhwd mm1, mm0
+ lea r0, [r0+r3*4]
+ movq mm2, [r0+r3*1-8]
+ punpckhbw mm2, [r0+r3*0-8]
+ lea r0, [r0+r3*2]
+ movq mm3, [r0+r3*1-8]
+ punpckhbw mm3, [r0+r3*0-8]
+ punpckhwd mm3, mm2
+ punpckhdq mm3, mm1
+ lea r0, [r0+r3*2]
+ movq mm0, [r0+r3*0-8]
+ movq mm1, [r4]
+ mov r0, r4
+ movq mm4, mm3
+ movq mm2, mm3
+ PALIGNR mm4, mm0, 7, mm0
+ PALIGNR mm1, mm2, 1, mm2
+ test r1d, r1d
+ jnz .do_left
+.fix_lt_1:
+ movq mm5, mm3
+ pxor mm5, mm4
+ psrlq mm5, 56
+ psllq mm5, 48
+ pxor mm1, mm5
+ jmp .do_left
+.fix_lt_2:
+ movq mm5, mm3
+ pxor mm5, mm2
+ psllq mm5, 56
+ psrlq mm5, 56
+ pxor mm2, mm5
+ test r2d, r2d
+ jnz .body
+.fix_tr_1:
+ movq mm5, mm3
+ pxor mm5, mm1
+ psrlq mm5, 56
+ psllq mm5, 56
+ pxor mm1, mm5
+ jmp .body
+.do_left:
+ movq mm0, mm4
+ PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
+ movq mm4, mm0
+ movq mm7, mm2
+ PRED4x4_LOWPASS mm1, mm3, mm0, mm4, mm5
+ psllq mm1, 56
+ PALIGNR mm7, mm1, 7, mm3
+ movq mm0, [r0-8]
+ movq mm3, [r0]
+ movq mm1, [r0+8]
+ movq mm2, mm3
+ movq mm4, mm3
+ PALIGNR mm2, mm0, 7, mm0
+ PALIGNR mm1, mm4, 1, mm4
+ test r1d, r1d
+ jz .fix_lt_2
+ test r2d, r2d
+ jz .fix_tr_1
+.body:
+ lea r1, [r0+r3*2]
+ PRED4x4_LOWPASS mm6, mm2, mm1, mm3, mm5
+ pxor mm0, mm0
+ pxor mm1, mm1
+ lea r2, [r1+r3*2]
+ psadbw mm0, mm7
+ psadbw mm1, mm6
+ paddw mm0, [pw_8]
+ paddw mm0, mm1
+ lea r4, [r2+r3*2]
+ psrlw mm0, 4
+ pshufw mm0, mm0, 0
+ packuswb mm0, mm0
+ movq [r0+r3*1], mm0
+ movq [r0+r3*2], mm0
+ movq [r1+r3*1], mm0
+ movq [r1+r3*2], mm0
+ movq [r2+r3*1], mm0
+ movq [r2+r3*2], mm0
+ movq [r4+r3*1], mm0
+ movq [r4+r3*2], mm0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+PRED8x8L_DC
+INIT_MMX ssse3
+PRED8x8L_DC
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_horizontal_8(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_HORIZONTAL 0
+cglobal pred8x8l_horizontal_8, 4,4
+ sub r0, r3
+ lea r2, [r0+r3*2]
+ movq mm0, [r0+r3*1-8]
+ test r1d, r1d
+ lea r1, [r0+r3]
+ cmovnz r1, r0
+ punpckhbw mm0, [r1+r3*0-8]
+ movq mm1, [r2+r3*1-8]
+ punpckhbw mm1, [r0+r3*2-8]
+ mov r2, r0
+ punpckhwd mm1, mm0
+ lea r0, [r0+r3*4]
+ movq mm2, [r0+r3*1-8]
+ punpckhbw mm2, [r0+r3*0-8]
+ lea r0, [r0+r3*2]
+ movq mm3, [r0+r3*1-8]
+ punpckhbw mm3, [r0+r3*0-8]
+ punpckhwd mm3, mm2
+ punpckhdq mm3, mm1
+ lea r0, [r0+r3*2]
+ movq mm0, [r0+r3*0-8]
+ movq mm1, [r1+r3*0-8]
+ mov r0, r2
+ movq mm4, mm3
+ movq mm2, mm3
+ PALIGNR mm4, mm0, 7, mm0
+ PALIGNR mm1, mm2, 1, mm2
+ movq mm0, mm4
+ PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
+ movq mm4, mm0
+ movq mm7, mm2
+ PRED4x4_LOWPASS mm1, mm3, mm0, mm4, mm5
+ psllq mm1, 56
+ PALIGNR mm7, mm1, 7, mm3
+ movq mm3, mm7
+ lea r1, [r0+r3*2]
+ movq mm7, mm3
+ punpckhbw mm3, mm3
+ punpcklbw mm7, mm7
+ pshufw mm0, mm3, 0xff
+ pshufw mm1, mm3, 0xaa
+ lea r2, [r1+r3*2]
+ pshufw mm2, mm3, 0x55
+ pshufw mm3, mm3, 0x00
+ pshufw mm4, mm7, 0xff
+ pshufw mm5, mm7, 0xaa
+ pshufw mm6, mm7, 0x55
+ pshufw mm7, mm7, 0x00
+ movq [r0+r3*1], mm0
+ movq [r0+r3*2], mm1
+ movq [r1+r3*1], mm2
+ movq [r1+r3*2], mm3
+ movq [r2+r3*1], mm4
+ movq [r2+r3*2], mm5
+ lea r0, [r2+r3*2]
+ movq [r0+r3*1], mm6
+ movq [r0+r3*2], mm7
+ RET
+%endmacro
+
+INIT_MMX mmxext
+PRED8x8L_HORIZONTAL
+INIT_MMX ssse3
+PRED8x8L_HORIZONTAL
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_vertical_8(uint8_t *src, int has_topleft, int has_topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_VERTICAL 0
+cglobal pred8x8l_vertical_8, 4,4
+ sub r0, r3
+ movq mm0, [r0-8]
+ movq mm3, [r0]
+ movq mm1, [r0+8]
+ movq mm2, mm3
+ movq mm4, mm3
+ PALIGNR mm2, mm0, 7, mm0
+ PALIGNR mm1, mm4, 1, mm4
+ test r1d, r1d ; top_left
+ jz .fix_lt_2
+ test r2d, r2d ; top_right
+ jz .fix_tr_1
+ jmp .body
+.fix_lt_2:
+ movq mm5, mm3
+ pxor mm5, mm2
+ psllq mm5, 56
+ psrlq mm5, 56
+ pxor mm2, mm5
+ test r2d, r2d ; top_right
+ jnz .body
+.fix_tr_1:
+ movq mm5, mm3
+ pxor mm5, mm1
+ psrlq mm5, 56
+ psllq mm5, 56
+ pxor mm1, mm5
+.body:
+ PRED4x4_LOWPASS mm0, mm2, mm1, mm3, mm5
+%rep 3
+ movq [r0+r3*1], mm0
+ movq [r0+r3*2], mm0
+ lea r0, [r0+r3*2]
+%endrep
+ movq [r0+r3*1], mm0
+ movq [r0+r3*2], mm0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+PRED8x8L_VERTICAL
+INIT_MMX ssse3
+PRED8x8L_VERTICAL
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_down_left_8(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_DOWN_LEFT 0
+cglobal pred8x8l_down_left_8, 4,4
+ sub r0, r3
+ movq mm0, [r0-8]
+ movq mm3, [r0]
+ movq mm1, [r0+8]
+ movq mm2, mm3
+ movq mm4, mm3
+ PALIGNR mm2, mm0, 7, mm0
+ PALIGNR mm1, mm4, 1, mm4
+ test r1d, r1d ; top_left
+ jz .fix_lt_2
+ test r2d, r2d ; top_right
+ jz .fix_tr_1
+ jmp .do_top
+.fix_lt_2:
+ movq mm5, mm3
+ pxor mm5, mm2
+ psllq mm5, 56
+ psrlq mm5, 56
+ pxor mm2, mm5
+ test r2d, r2d ; top_right
+ jnz .do_top
+.fix_tr_1:
+ movq mm5, mm3
+ pxor mm5, mm1
+ psrlq mm5, 56
+ psllq mm5, 56
+ pxor mm1, mm5
+ jmp .do_top
+.fix_tr_2:
+ punpckhbw mm3, mm3
+ pshufw mm1, mm3, 0xFF
+ jmp .do_topright
+.do_top:
+ PRED4x4_LOWPASS mm4, mm2, mm1, mm3, mm5
+ movq2dq xmm3, mm4
+ test r2d, r2d ; top_right
+ jz .fix_tr_2
+ movq mm0, [r0+8]
+ movq mm5, mm0
+ movq mm2, mm0
+ movq mm4, mm0
+ psrlq mm5, 56
+ PALIGNR mm2, mm3, 7, mm3
+ PALIGNR mm5, mm4, 1, mm4
+ PRED4x4_LOWPASS mm1, mm2, mm5, mm0, mm4
+.do_topright:
+ movq2dq xmm4, mm1
+ psrlq mm1, 56
+ movq2dq xmm5, mm1
+ lea r1, [r0+r3*2]
+ pslldq xmm4, 8
+ por xmm3, xmm4
+ movdqa xmm2, xmm3
+ psrldq xmm2, 1
+ pslldq xmm5, 15
+ por xmm2, xmm5
+ lea r2, [r1+r3*2]
+ movdqa xmm1, xmm3
+ pslldq xmm1, 1
+INIT_XMM cpuname
+ PRED4x4_LOWPASS xmm0, xmm1, xmm2, xmm3, xmm4
+ psrldq xmm0, 1
+ movq [r0+r3*1], xmm0
+ psrldq xmm0, 1
+ movq [r0+r3*2], xmm0
+ psrldq xmm0, 1
+ lea r0, [r2+r3*2]
+ movq [r1+r3*1], xmm0
+ psrldq xmm0, 1
+ movq [r1+r3*2], xmm0
+ psrldq xmm0, 1
+ movq [r2+r3*1], xmm0
+ psrldq xmm0, 1
+ movq [r2+r3*2], xmm0
+ psrldq xmm0, 1
+ movq [r0+r3*1], xmm0
+ psrldq xmm0, 1
+ movq [r0+r3*2], xmm0
+ RET
+%endmacro
+
+INIT_MMX sse2
+PRED8x8L_DOWN_LEFT
+INIT_MMX ssse3
+PRED8x8L_DOWN_LEFT
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_down_right_8(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_DOWN_RIGHT 0
+cglobal pred8x8l_down_right_8, 4,5
+ sub r0, r3
+ lea r4, [r0+r3*2]
+ movq mm0, [r0+r3*1-8]
+ punpckhbw mm0, [r0+r3*0-8]
+ movq mm1, [r4+r3*1-8]
+ punpckhbw mm1, [r0+r3*2-8]
+ mov r4, r0
+ punpckhwd mm1, mm0
+ lea r0, [r0+r3*4]
+ movq mm2, [r0+r3*1-8]
+ punpckhbw mm2, [r0+r3*0-8]
+ lea r0, [r0+r3*2]
+ movq mm3, [r0+r3*1-8]
+ punpckhbw mm3, [r0+r3*0-8]
+ punpckhwd mm3, mm2
+ punpckhdq mm3, mm1
+ lea r0, [r0+r3*2]
+ movq mm0, [r0+r3*0-8]
+ movq mm1, [r4]
+ mov r0, r4
+ movq mm4, mm3
+ movq mm2, mm3
+ PALIGNR mm4, mm0, 7, mm0
+ PALIGNR mm1, mm2, 1, mm2
+ test r1d, r1d
+ jz .fix_lt_1
+ jmp .do_left
+.fix_lt_1:
+ movq mm5, mm3
+ pxor mm5, mm4
+ psrlq mm5, 56
+ psllq mm5, 48
+ pxor mm1, mm5
+ jmp .do_left
+.fix_lt_2:
+ movq mm5, mm3
+ pxor mm5, mm2
+ psllq mm5, 56
+ psrlq mm5, 56
+ pxor mm2, mm5
+ test r2d, r2d
+ jnz .do_top
+.fix_tr_1:
+ movq mm5, mm3
+ pxor mm5, mm1
+ psrlq mm5, 56
+ psllq mm5, 56
+ pxor mm1, mm5
+ jmp .do_top
+.do_left:
+ movq mm0, mm4
+ PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
+ movq mm4, mm0
+ movq mm7, mm2
+ movq2dq xmm3, mm2
+ PRED4x4_LOWPASS mm1, mm3, mm0, mm4, mm5
+ psllq mm1, 56
+ PALIGNR mm7, mm1, 7, mm3
+ movq2dq xmm1, mm7
+ movq mm0, [r0-8]
+ movq mm3, [r0]
+ movq mm1, [r0+8]
+ movq mm2, mm3
+ movq mm4, mm3
+ PALIGNR mm2, mm0, 7, mm0
+ PALIGNR mm1, mm4, 1, mm4
+ test r1d, r1d
+ jz .fix_lt_2
+ test r2d, r2d
+ jz .fix_tr_1
+.do_top:
+ PRED4x4_LOWPASS mm4, mm2, mm1, mm3, mm5
+ movq2dq xmm4, mm4
+ lea r1, [r0+r3*2]
+ movdqa xmm0, xmm3
+ pslldq xmm4, 8
+ por xmm3, xmm4
+ lea r2, [r1+r3*2]
+ pslldq xmm4, 1
+ por xmm1, xmm4
+ psrldq xmm0, 7
+ pslldq xmm0, 15
+ psrldq xmm0, 7
+ por xmm1, xmm0
+ lea r0, [r2+r3*2]
+ movdqa xmm2, xmm3
+ psrldq xmm2, 1
+INIT_XMM cpuname
+ PRED4x4_LOWPASS xmm0, xmm1, xmm2, xmm3, xmm4
+ movdqa xmm1, xmm0
+ psrldq xmm1, 1
+ movq [r0+r3*2], xmm0
+ movq [r0+r3*1], xmm1
+ psrldq xmm0, 2
+ psrldq xmm1, 2
+ movq [r2+r3*2], xmm0
+ movq [r2+r3*1], xmm1
+ psrldq xmm0, 2
+ psrldq xmm1, 2
+ movq [r1+r3*2], xmm0
+ movq [r1+r3*1], xmm1
+ psrldq xmm0, 2
+ psrldq xmm1, 2
+ movq [r4+r3*2], xmm0
+ movq [r4+r3*1], xmm1
+ RET
+%endmacro
+
+INIT_MMX sse2
+PRED8x8L_DOWN_RIGHT
+INIT_MMX ssse3
+PRED8x8L_DOWN_RIGHT
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_vertical_right_8(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_VERTICAL_RIGHT 0
+cglobal pred8x8l_vertical_right_8, 4,5,7
+ ; manually spill XMM registers for Win64 because
+ ; the code here is initialized with INIT_MMX
+ WIN64_SPILL_XMM 7
+ sub r0, r3
+ lea r4, [r0+r3*2]
+ movq mm0, [r0+r3*1-8]
+ punpckhbw mm0, [r0+r3*0-8]
+ movq mm1, [r4+r3*1-8]
+ punpckhbw mm1, [r0+r3*2-8]
+ mov r4, r0
+ punpckhwd mm1, mm0
+ lea r0, [r0+r3*4]
+ movq mm2, [r0+r3*1-8]
+ punpckhbw mm2, [r0+r3*0-8]
+ lea r0, [r0+r3*2]
+ movq mm3, [r0+r3*1-8]
+ punpckhbw mm3, [r0+r3*0-8]
+ punpckhwd mm3, mm2
+ punpckhdq mm3, mm1
+ lea r0, [r0+r3*2]
+ movq mm0, [r0+r3*0-8]
+ movq mm1, [r4]
+ mov r0, r4
+ movq mm4, mm3
+ movq mm2, mm3
+ PALIGNR mm4, mm0, 7, mm0
+ PALIGNR mm1, mm2, 1, mm2
+ test r1d, r1d
+ jnz .do_left
+.fix_lt_1:
+ movq mm5, mm3
+ pxor mm5, mm4
+ psrlq mm5, 56
+ psllq mm5, 48
+ pxor mm1, mm5
+ jmp .do_left
+.fix_lt_2:
+ movq mm5, mm3
+ pxor mm5, mm2
+ psllq mm5, 56
+ psrlq mm5, 56
+ pxor mm2, mm5
+ test r2d, r2d
+ jnz .do_top
+.fix_tr_1:
+ movq mm5, mm3
+ pxor mm5, mm1
+ psrlq mm5, 56
+ psllq mm5, 56
+ pxor mm1, mm5
+ jmp .do_top
+.do_left:
+ movq mm0, mm4
+ PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
+ movq2dq xmm0, mm2
+ movq mm0, [r0-8]
+ movq mm3, [r0]
+ movq mm1, [r0+8]
+ movq mm2, mm3
+ movq mm4, mm3
+ PALIGNR mm2, mm0, 7, mm0
+ PALIGNR mm1, mm4, 1, mm4
+ test r1d, r1d
+ jz .fix_lt_2
+ test r2d, r2d
+ jz .fix_tr_1
+.do_top:
+ PRED4x4_LOWPASS mm6, mm2, mm1, mm3, mm5
+ lea r1, [r0+r3*2]
+ movq2dq xmm4, mm6
+ pslldq xmm4, 8
+ por xmm0, xmm4
+ movdqa xmm6, [pw_ff00]
+ movdqa xmm1, xmm0
+ lea r2, [r1+r3*2]
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm0
+ pslldq xmm0, 1
+ pslldq xmm1, 2
+ pavgb xmm2, xmm0
+INIT_XMM cpuname
+ PRED4x4_LOWPASS xmm4, xmm3, xmm1, xmm0, xmm5
+ pandn xmm6, xmm4
+ movdqa xmm5, xmm4
+ psrlw xmm4, 8
+ packuswb xmm6, xmm4
+ movhlps xmm4, xmm6
+ movhps [r0+r3*2], xmm5
+ movhps [r0+r3*1], xmm2
+ psrldq xmm5, 4
+ movss xmm5, xmm6
+ psrldq xmm2, 4
+ movss xmm2, xmm4
+ lea r0, [r2+r3*2]
+ psrldq xmm5, 1
+ psrldq xmm2, 1
+ movq [r0+r3*2], xmm5
+ movq [r0+r3*1], xmm2
+ psrldq xmm5, 1
+ psrldq xmm2, 1
+ movq [r2+r3*2], xmm5
+ movq [r2+r3*1], xmm2
+ psrldq xmm5, 1
+ psrldq xmm2, 1
+ movq [r1+r3*2], xmm5
+ movq [r1+r3*1], xmm2
+ RET
+%endmacro
+
+INIT_MMX sse2
+PRED8x8L_VERTICAL_RIGHT
+INIT_MMX ssse3
+PRED8x8L_VERTICAL_RIGHT
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_vertical_left_8(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_VERTICAL_LEFT 0
+cglobal pred8x8l_vertical_left_8, 4,4
+ sub r0, r3
+ movq mm0, [r0-8]
+ movq mm3, [r0]
+ movq mm1, [r0+8]
+ movq mm2, mm3
+ movq mm4, mm3
+ PALIGNR mm2, mm0, 7, mm0
+ PALIGNR mm1, mm4, 1, mm4
+ test r1d, r1d
+ jz .fix_lt_2
+ test r2d, r2d
+ jz .fix_tr_1
+ jmp .do_top
+.fix_lt_2:
+ movq mm5, mm3
+ pxor mm5, mm2
+ psllq mm5, 56
+ psrlq mm5, 56
+ pxor mm2, mm5
+ test r2d, r2d
+ jnz .do_top
+.fix_tr_1:
+ movq mm5, mm3
+ pxor mm5, mm1
+ psrlq mm5, 56
+ psllq mm5, 56
+ pxor mm1, mm5
+ jmp .do_top
+.fix_tr_2:
+ punpckhbw mm3, mm3
+ pshufw mm1, mm3, 0xFF
+ jmp .do_topright
+.do_top:
+ PRED4x4_LOWPASS mm4, mm2, mm1, mm3, mm5
+ movq2dq xmm4, mm4
+ test r2d, r2d
+ jz .fix_tr_2
+ movq mm0, [r0+8]
+ movq mm5, mm0
+ movq mm2, mm0
+ movq mm4, mm0
+ psrlq mm5, 56
+ PALIGNR mm2, mm3, 7, mm3
+ PALIGNR mm5, mm4, 1, mm4
+ PRED4x4_LOWPASS mm1, mm2, mm5, mm0, mm4
+.do_topright:
+ movq2dq xmm3, mm1
+ lea r1, [r0+r3*2]
+ pslldq xmm3, 8
+ por xmm4, xmm3
+ movdqa xmm2, xmm4
+ movdqa xmm1, xmm4
+ movdqa xmm3, xmm4
+ psrldq xmm2, 1
+ pslldq xmm1, 1
+ pavgb xmm3, xmm2
+ lea r2, [r1+r3*2]
+INIT_XMM cpuname
+ PRED4x4_LOWPASS xmm0, xmm1, xmm2, xmm4, xmm5
+ psrldq xmm0, 1
+ movq [r0+r3*1], xmm3
+ movq [r0+r3*2], xmm0
+ lea r0, [r2+r3*2]
+ psrldq xmm3, 1
+ psrldq xmm0, 1
+ movq [r1+r3*1], xmm3
+ movq [r1+r3*2], xmm0
+ psrldq xmm3, 1
+ psrldq xmm0, 1
+ movq [r2+r3*1], xmm3
+ movq [r2+r3*2], xmm0
+ psrldq xmm3, 1
+ psrldq xmm0, 1
+ movq [r0+r3*1], xmm3
+ movq [r0+r3*2], xmm0
+ RET
+%endmacro
+
+INIT_MMX sse2
+PRED8x8L_VERTICAL_LEFT
+INIT_MMX ssse3
+PRED8x8L_VERTICAL_LEFT
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_horizontal_up_8(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_HORIZONTAL_UP 0
+cglobal pred8x8l_horizontal_up_8, 4,4
+ sub r0, r3
+ lea r2, [r0+r3*2]
+ movq mm0, [r0+r3*1-8]
+ test r1d, r1d
+ lea r1, [r0+r3]
+ cmovnz r1, r0
+ punpckhbw mm0, [r1+r3*0-8]
+ movq mm1, [r2+r3*1-8]
+ punpckhbw mm1, [r0+r3*2-8]
+ mov r2, r0
+ punpckhwd mm1, mm0
+ lea r0, [r0+r3*4]
+ movq mm2, [r0+r3*1-8]
+ punpckhbw mm2, [r0+r3*0-8]
+ lea r0, [r0+r3*2]
+ movq mm3, [r0+r3*1-8]
+ punpckhbw mm3, [r0+r3*0-8]
+ punpckhwd mm3, mm2
+ punpckhdq mm3, mm1
+ lea r0, [r0+r3*2]
+ movq mm0, [r0+r3*0-8]
+ movq mm1, [r1+r3*0-8]
+ mov r0, r2
+ movq mm4, mm3
+ movq mm2, mm3
+ PALIGNR mm4, mm0, 7, mm0
+ PALIGNR mm1, mm2, 1, mm2
+ movq mm0, mm4
+ PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
+ movq mm4, mm0
+ movq mm7, mm2
+ PRED4x4_LOWPASS mm1, mm3, mm0, mm4, mm5
+ psllq mm1, 56
+ PALIGNR mm7, mm1, 7, mm3
+ lea r1, [r0+r3*2]
+ pshufw mm0, mm7, 00011011b ; l6 l7 l4 l5 l2 l3 l0 l1
+ psllq mm7, 56 ; l7 .. .. .. .. .. .. ..
+ movq mm2, mm0
+ psllw mm0, 8
+ psrlw mm2, 8
+ por mm2, mm0 ; l7 l6 l5 l4 l3 l2 l1 l0
+ movq mm3, mm2
+ movq mm4, mm2
+ movq mm5, mm2
+ psrlq mm2, 8
+ psrlq mm3, 16
+ lea r2, [r1+r3*2]
+ por mm2, mm7 ; l7 l7 l6 l5 l4 l3 l2 l1
+ punpckhbw mm7, mm7
+ por mm3, mm7 ; l7 l7 l7 l6 l5 l4 l3 l2
+ pavgb mm4, mm2
+ PRED4x4_LOWPASS mm1, mm3, mm5, mm2, mm6
+ movq mm5, mm4
+ punpcklbw mm4, mm1 ; p4 p3 p2 p1
+ punpckhbw mm5, mm1 ; p8 p7 p6 p5
+ movq mm6, mm5
+ movq mm7, mm5
+ movq mm0, mm5
+ PALIGNR mm5, mm4, 2, mm1
+ pshufw mm1, mm6, 11111001b
+ PALIGNR mm6, mm4, 4, mm2
+ pshufw mm2, mm7, 11111110b
+ PALIGNR mm7, mm4, 6, mm3
+ pshufw mm3, mm0, 11111111b
+ movq [r0+r3*1], mm4
+ movq [r0+r3*2], mm5
+ lea r0, [r2+r3*2]
+ movq [r1+r3*1], mm6
+ movq [r1+r3*2], mm7
+ movq [r2+r3*1], mm0
+ movq [r2+r3*2], mm1
+ movq [r0+r3*1], mm2
+ movq [r0+r3*2], mm3
+ RET
+%endmacro
+
+INIT_MMX mmxext
+PRED8x8L_HORIZONTAL_UP
+INIT_MMX ssse3
+PRED8x8L_HORIZONTAL_UP
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_horizontal_down_8(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+%macro PRED8x8L_HORIZONTAL_DOWN 0
+cglobal pred8x8l_horizontal_down_8, 4,5
+ sub r0, r3
+ lea r4, [r0+r3*2]
+ movq mm0, [r0+r3*1-8]
+ punpckhbw mm0, [r0+r3*0-8]
+ movq mm1, [r4+r3*1-8]
+ punpckhbw mm1, [r0+r3*2-8]
+ mov r4, r0
+ punpckhwd mm1, mm0
+ lea r0, [r0+r3*4]
+ movq mm2, [r0+r3*1-8]
+ punpckhbw mm2, [r0+r3*0-8]
+ lea r0, [r0+r3*2]
+ movq mm3, [r0+r3*1-8]
+ punpckhbw mm3, [r0+r3*0-8]
+ punpckhwd mm3, mm2
+ punpckhdq mm3, mm1
+ lea r0, [r0+r3*2]
+ movq mm0, [r0+r3*0-8]
+ movq mm1, [r4]
+ mov r0, r4
+ movq mm4, mm3
+ movq mm2, mm3
+ PALIGNR mm4, mm0, 7, mm0
+ PALIGNR mm1, mm2, 1, mm2
+ test r1d, r1d
+ jnz .do_left
+.fix_lt_1:
+ movq mm5, mm3
+ pxor mm5, mm4
+ psrlq mm5, 56
+ psllq mm5, 48
+ pxor mm1, mm5
+ jmp .do_left
+.fix_lt_2:
+ movq mm5, mm3
+ pxor mm5, mm2
+ psllq mm5, 56
+ psrlq mm5, 56
+ pxor mm2, mm5
+ test r2d, r2d
+ jnz .do_top
+.fix_tr_1:
+ movq mm5, mm3
+ pxor mm5, mm1
+ psrlq mm5, 56
+ psllq mm5, 56
+ pxor mm1, mm5
+ jmp .do_top
+.fix_tr_2:
+ punpckhbw mm3, mm3
+ pshufw mm1, mm3, 0xFF
+ jmp .do_topright
+.do_left:
+ movq mm0, mm4
+ PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
+ movq2dq xmm0, mm2
+ pslldq xmm0, 8
+ movq mm4, mm0
+ PRED4x4_LOWPASS mm1, mm3, mm0, mm4, mm5
+ movq2dq xmm2, mm1
+ pslldq xmm2, 15
+ psrldq xmm2, 8
+ por xmm0, xmm2
+ movq mm0, [r0-8]
+ movq mm3, [r0]
+ movq mm1, [r0+8]
+ movq mm2, mm3
+ movq mm4, mm3
+ PALIGNR mm2, mm0, 7, mm0
+ PALIGNR mm1, mm4, 1, mm4
+ test r1d, r1d
+ jz .fix_lt_2
+ test r2d, r2d
+ jz .fix_tr_1
+.do_top:
+ PRED4x4_LOWPASS mm4, mm2, mm1, mm3, mm5
+ movq2dq xmm1, mm4
+ test r2d, r2d
+ jz .fix_tr_2
+ movq mm0, [r0+8]
+ movq mm5, mm0
+ movq mm2, mm0
+ movq mm4, mm0
+ psrlq mm5, 56
+ PALIGNR mm2, mm3, 7, mm3
+ PALIGNR mm5, mm4, 1, mm4
+ PRED4x4_LOWPASS mm1, mm2, mm5, mm0, mm4
+.do_topright:
+ movq2dq xmm5, mm1
+ pslldq xmm5, 8
+ por xmm1, xmm5
+INIT_XMM cpuname
+ lea r2, [r4+r3*2]
+ movdqa xmm2, xmm1
+ movdqa xmm3, xmm1
+ PALIGNR xmm1, xmm0, 7, xmm4
+ PALIGNR xmm2, xmm0, 9, xmm5
+ lea r1, [r2+r3*2]
+ PALIGNR xmm3, xmm0, 8, xmm0
+ movdqa xmm4, xmm1
+ pavgb xmm4, xmm3
+ lea r0, [r1+r3*2]
+ PRED4x4_LOWPASS xmm0, xmm1, xmm2, xmm3, xmm5
+ punpcklbw xmm4, xmm0
+ movhlps xmm0, xmm4
+ movq [r0+r3*2], xmm4
+ movq [r2+r3*2], xmm0
+ psrldq xmm4, 2
+ psrldq xmm0, 2
+ movq [r0+r3*1], xmm4
+ movq [r2+r3*1], xmm0
+ psrldq xmm4, 2
+ psrldq xmm0, 2
+ movq [r1+r3*2], xmm4
+ movq [r4+r3*2], xmm0
+ psrldq xmm4, 2
+ psrldq xmm0, 2
+ movq [r1+r3*1], xmm4
+ movq [r4+r3*1], xmm0
+ RET
+%endmacro
+
+INIT_MMX sse2
+PRED8x8L_HORIZONTAL_DOWN
+INIT_MMX ssse3
+PRED8x8L_HORIZONTAL_DOWN
+
+;-------------------------------------------------------------------------------
+; void ff_pred4x4_dc_8_mmxext(uint8_t *src, const uint8_t *topright,
+; ptrdiff_t stride)
+;-------------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_dc_8, 3,5
+ pxor mm7, mm7
+ mov r4, r0
+ sub r0, r2
+ movd mm0, [r0]
+ psadbw mm0, mm7
+ movzx r1d, byte [r0+r2*1-1]
+ movd r3d, mm0
+ add r3d, r1d
+ movzx r1d, byte [r0+r2*2-1]
+ lea r0, [r0+r2*2]
+ add r3d, r1d
+ movzx r1d, byte [r0+r2*1-1]
+ add r3d, r1d
+ movzx r1d, byte [r0+r2*2-1]
+ add r3d, r1d
+ add r3d, 4
+ shr r3d, 3
+ imul r3d, 0x01010101
+ mov [r4+r2*0], r3d
+ mov [r0+r2*0], r3d
+ mov [r0+r2*1], r3d
+ mov [r0+r2*2], r3d
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_tm_vp8_8_mmxext(uint8_t *src, const uint8_t *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_tm_vp8_8, 3,6
+ sub r0, r2
+ pxor mm7, mm7
+ movd mm0, [r0]
+ punpcklbw mm0, mm7
+ movzx r4d, byte [r0-1]
+ mov r5d, 2
+.loop:
+ movzx r1d, byte [r0+r2*1-1]
+ movzx r3d, byte [r0+r2*2-1]
+ sub r1d, r4d
+ sub r3d, r4d
+ movd mm2, r1d
+ movd mm4, r3d
+ pshufw mm2, mm2, 0
+ pshufw mm4, mm4, 0
+ paddw mm2, mm0
+ paddw mm4, mm0
+ packuswb mm2, mm2
+ packuswb mm4, mm4
+ movd [r0+r2*1], mm2
+ movd [r0+r2*2], mm4
+ lea r0, [r0+r2*2]
+ dec r5d
+ jg .loop
+ RET
+
+INIT_XMM ssse3
+cglobal pred4x4_tm_vp8_8, 3,3
+ sub r0, r2
+ movq mm6, [tm_shuf]
+ pxor mm1, mm1
+ movd mm0, [r0]
+ punpcklbw mm0, mm1
+ movd mm7, [r0-4]
+ pshufb mm7, mm6
+ lea r1, [r0+r2*2]
+ movd mm2, [r0+r2*1-4]
+ movd mm3, [r0+r2*2-4]
+ movd mm4, [r1+r2*1-4]
+ movd mm5, [r1+r2*2-4]
+ pshufb mm2, mm6
+ pshufb mm3, mm6
+ pshufb mm4, mm6
+ pshufb mm5, mm6
+ psubw mm0, mm7
+ paddw mm2, mm0
+ paddw mm3, mm0
+ paddw mm4, mm0
+ paddw mm5, mm0
+ packuswb mm2, mm2
+ packuswb mm3, mm3
+ packuswb mm4, mm4
+ packuswb mm5, mm5
+ movd [r0+r2*1], mm2
+ movd [r0+r2*2], mm3
+ movd [r1+r2*1], mm4
+ movd [r1+r2*2], mm5
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_vertical_vp8_8_mmxext(uint8_t *src, const uint8_t *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_vertical_vp8_8, 3,3
+ sub r0, r2
+ movd m1, [r0-1]
+ movd m0, [r0]
+ mova m2, m0 ;t0 t1 t2 t3
+ punpckldq m0, [r1] ;t0 t1 t2 t3 t4 t5 t6 t7
+ lea r1, [r0+r2*2]
+ psrlq m0, 8 ;t1 t2 t3 t4
+ PRED4x4_LOWPASS m3, m1, m0, m2, m4
+ movd [r0+r2*1], m3
+ movd [r0+r2*2], m3
+ movd [r1+r2*1], m3
+ movd [r1+r2*2], m3
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_down_left_8_mmxext(uint8_t *src, const uint8_t *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_MMX mmxext
+cglobal pred4x4_down_left_8, 3,3
+ sub r0, r2
+ movq m1, [r0]
+ punpckldq m1, [r1]
+ movq m2, m1
+ movq m3, m1
+ psllq m1, 8
+ pxor m2, m1
+ psrlq m2, 8
+ pxor m2, m3
+ PRED4x4_LOWPASS m0, m1, m2, m3, m4
+ lea r1, [r0+r2*2]
+ psrlq m0, 8
+ movd [r0+r2*1], m0
+ psrlq m0, 8
+ movd [r0+r2*2], m0
+ psrlq m0, 8
+ movd [r1+r2*1], m0
+ psrlq m0, 8
+ movd [r1+r2*2], m0
+ RET
+
+;------------------------------------------------------------------------------
+; void ff_pred4x4_vertical_left_8_mmxext(uint8_t *src, const uint8_t *topright,
+; ptrdiff_t stride)
+;------------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_vertical_left_8, 3,3
+ sub r0, r2
+ movq m1, [r0]
+ punpckldq m1, [r1]
+ movq m3, m1
+ movq m2, m1
+ psrlq m3, 8
+ psrlq m2, 16
+ movq m4, m3
+ pavgb m4, m1
+ PRED4x4_LOWPASS m0, m1, m2, m3, m5
+ lea r1, [r0+r2*2]
+ movh [r0+r2*1], m4
+ movh [r0+r2*2], m0
+ psrlq m4, 8
+ psrlq m0, 8
+ movh [r1+r2*1], m4
+ movh [r1+r2*2], m0
+ RET
+
+;------------------------------------------------------------------------------
+; void ff_pred4x4_horizontal_up_8_mmxext(uint8_t *src, const uint8_t *topright,
+; ptrdiff_t stride)
+;------------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_horizontal_up_8, 3,3
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movd m0, [r0+r2*1-4]
+ punpcklbw m0, [r0+r2*2-4]
+ movd m1, [r1+r2*1-4]
+ punpcklbw m1, [r1+r2*2-4]
+ punpckhwd m0, m1
+ movq m1, m0
+ punpckhbw m1, m1
+ pshufw m1, m1, 0xFF
+ punpckhdq m0, m1
+ movq m2, m0
+ movq m3, m0
+ movq m7, m0
+ psrlq m2, 16
+ psrlq m3, 8
+ pavgb m7, m3
+ PRED4x4_LOWPASS m4, m0, m2, m3, m5
+ punpcklbw m7, m4
+ movd [r0+r2*1], m7
+ psrlq m7, 16
+ movd [r0+r2*2], m7
+ psrlq m7, 16
+ movd [r1+r2*1], m7
+ movd [r1+r2*2], m1
+ RET
+
+;------------------------------------------------------------------------------
+; void ff_pred4x4_horizontal_down_8_mmxext(uint8_t *src,
+; const uint8_t *topright,
+; ptrdiff_t stride)
+;------------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_horizontal_down_8, 3,3
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movh m0, [r0-4] ; lt ..
+ punpckldq m0, [r0] ; t3 t2 t1 t0 lt .. .. ..
+ psllq m0, 8 ; t2 t1 t0 lt .. .. .. ..
+ movd m1, [r1+r2*2-4] ; l3
+ punpcklbw m1, [r1+r2*1-4] ; l2 l3
+ movd m2, [r0+r2*2-4] ; l1
+ punpcklbw m2, [r0+r2*1-4] ; l0 l1
+ punpckhwd m1, m2 ; l0 l1 l2 l3
+ punpckhdq m1, m0 ; t2 t1 t0 lt l0 l1 l2 l3
+ movq m0, m1
+ movq m2, m1
+ movq m5, m1
+ psrlq m0, 16 ; .. .. t2 t1 t0 lt l0 l1
+ psrlq m2, 8 ; .. t2 t1 t0 lt l0 l1 l2
+ pavgb m5, m2
+ PRED4x4_LOWPASS m3, m1, m0, m2, m4
+ punpcklbw m5, m3
+ psrlq m3, 32
+ PALIGNR m3, m5, 6, m4
+ movh [r1+r2*2], m5
+ psrlq m5, 16
+ movh [r1+r2*1], m5
+ psrlq m5, 16
+ movh [r0+r2*2], m5
+ movh [r0+r2*1], m3
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_vertical_right_8_mmxext(uint8_t *src,
+; const uint8_t *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_vertical_right_8, 3,3
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movh m0, [r0] ; ........t3t2t1t0
+ movq m5, m0
+ PALIGNR m0, [r0-8], 7, m1 ; ......t3t2t1t0lt
+ pavgb m5, m0
+ PALIGNR m0, [r0+r2*1-8], 7, m1 ; ....t3t2t1t0ltl0
+ movq m1, m0
+ PALIGNR m0, [r0+r2*2-8], 7, m2 ; ..t3t2t1t0ltl0l1
+ movq m2, m0
+ PALIGNR m0, [r1+r2*1-8], 7, m3 ; t3t2t1t0ltl0l1l2
+ PRED4x4_LOWPASS m3, m1, m0, m2, m4
+ movq m1, m3
+ psrlq m3, 16
+ psllq m1, 48
+ movh [r0+r2*1], m5
+ movh [r0+r2*2], m3
+ PALIGNR m5, m1, 7, m2
+ psllq m1, 8
+ movh [r1+r2*1], m5
+ PALIGNR m3, m1, 7, m1
+ movh [r1+r2*2], m3
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_down_right_8_mmxext(uint8_t *src, const uint8_t *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_down_right_8, 3,3
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movq m1, [r1-8]
+ movq m2, [r0+r2*1-8]
+ punpckhbw m2, [r0-8]
+ movh m3, [r0]
+ punpckhwd m1, m2
+ PALIGNR m3, m1, 5, m1
+ movq m1, m3
+ PALIGNR m3, [r1+r2*1-8], 7, m4
+ movq m2, m3
+ PALIGNR m3, [r1+r2*2-8], 7, m4
+ PRED4x4_LOWPASS m0, m3, m1, m2, m4
+ movh [r1+r2*2], m0
+ psrlq m0, 8
+ movh [r1+r2*1], m0
+ psrlq m0, 8
+ movh [r0+r2*2], m0
+ psrlq m0, 8
+ movh [r0+r2*1], m0
+ RET
diff --git a/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm b/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm
new file mode 100644
index 0000000000..2f30807332
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm
@@ -0,0 +1,1119 @@
+;*****************************************************************************
+;* MMX/SSE2/AVX-optimized 10-bit H.264 intra prediction code
+;*****************************************************************************
+;* Copyright (C) 2005-2011 x264 project
+;*
+;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+cextern pw_1023
+%define pw_pixel_max pw_1023
+cextern pw_512
+cextern pw_16
+cextern pw_8
+cextern pw_4
+cextern pw_2
+cextern pw_1
+cextern pd_16
+
+pw_m32101234: dw -3, -2, -1, 0, 1, 2, 3, 4
+pw_m3: times 8 dw -3
+pd_17: times 4 dd 17
+
+SECTION .text
+
+; dest, left, right, src
+; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2
+%macro PRED4x4_LOWPASS 4
+ paddw %2, %3
+ psrlw %2, 1
+ pavgw %1, %4, %2
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_down_right_10(pixel *src, const pixel *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED4x4_DR 0
+cglobal pred4x4_down_right_10, 3, 3
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movhps m1, [r1-8]
+ movhps m2, [r0+r2*1-8]
+ movhps m4, [r0-8]
+ punpckhwd m2, m4
+ movq m3, [r0]
+ punpckhdq m1, m2
+ PALIGNR m3, m1, 10, m1
+ movhps m4, [r1+r2*1-8]
+ PALIGNR m0, m3, m4, 14, m4
+ movhps m4, [r1+r2*2-8]
+ PALIGNR m2, m0, m4, 14, m4
+ PRED4x4_LOWPASS m0, m2, m3, m0
+ movq [r1+r2*2], m0
+ psrldq m0, 2
+ movq [r1+r2*1], m0
+ psrldq m0, 2
+ movq [r0+r2*2], m0
+ psrldq m0, 2
+ movq [r0+r2*1], m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED4x4_DR
+INIT_XMM ssse3
+PRED4x4_DR
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED4x4_DR
+%endif
+
+;------------------------------------------------------------------------------
+; void ff_pred4x4_vertical_right_10(pixel *src, const pixel *topright,
+; ptrdiff_t stride)
+;------------------------------------------------------------------------------
+%macro PRED4x4_VR 0
+cglobal pred4x4_vertical_right_10, 3, 3, 6
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movq m5, [r0] ; ........t3t2t1t0
+ movhps m1, [r0-8]
+ PALIGNR m0, m5, m1, 14, m1 ; ......t3t2t1t0lt
+ pavgw m5, m0
+ movhps m1, [r0+r2*1-8]
+ PALIGNR m0, m1, 14, m1 ; ....t3t2t1t0ltl0
+ movhps m2, [r0+r2*2-8]
+ PALIGNR m1, m0, m2, 14, m2 ; ..t3t2t1t0ltl0l1
+ movhps m3, [r1+r2*1-8]
+ PALIGNR m2, m1, m3, 14, m3 ; t3t2t1t0ltl0l1l2
+ PRED4x4_LOWPASS m1, m0, m2, m1
+ pslldq m0, m1, 12
+ psrldq m1, 4
+ movq [r0+r2*1], m5
+ movq [r0+r2*2], m1
+ PALIGNR m5, m0, 14, m2
+ pslldq m0, 2
+ movq [r1+r2*1], m5
+ PALIGNR m1, m0, 14, m0
+ movq [r1+r2*2], m1
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED4x4_VR
+INIT_XMM ssse3
+PRED4x4_VR
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED4x4_VR
+%endif
+
+;-------------------------------------------------------------------------------
+; void ff_pred4x4_horizontal_down_10(pixel *src, const pixel *topright,
+; ptrdiff_t stride)
+;-------------------------------------------------------------------------------
+%macro PRED4x4_HD 0
+cglobal pred4x4_horizontal_down_10, 3, 3
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movq m0, [r0-8] ; lt ..
+ movhps m0, [r0]
+ pslldq m0, 2 ; t2 t1 t0 lt .. .. .. ..
+ movq m1, [r1+r2*2-8] ; l3
+ movq m3, [r1+r2*1-8]
+ punpcklwd m1, m3 ; l2 l3
+ movq m2, [r0+r2*2-8] ; l1
+ movq m3, [r0+r2*1-8]
+ punpcklwd m2, m3 ; l0 l1
+ punpckhdq m1, m2 ; l0 l1 l2 l3
+ punpckhqdq m1, m0 ; t2 t1 t0 lt l0 l1 l2 l3
+ psrldq m0, m1, 4 ; .. .. t2 t1 t0 lt l0 l1
+ psrldq m3, m1, 2 ; .. t2 t1 t0 lt l0 l1 l2
+ pavgw m5, m1, m3
+ PRED4x4_LOWPASS m3, m1, m0, m3
+ punpcklwd m5, m3
+ psrldq m3, 8
+ PALIGNR m3, m5, 12, m4
+ movq [r1+r2*2], m5
+ movhps [r0+r2*2], m5
+ psrldq m5, 4
+ movq [r1+r2*1], m5
+ movq [r0+r2*1], m3
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED4x4_HD
+INIT_XMM ssse3
+PRED4x4_HD
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED4x4_HD
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_dc_10(pixel *src, const pixel *topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmxext
+cglobal pred4x4_dc_10, 3, 3
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movq m2, [r0+r2*1-8]
+ paddw m2, [r0+r2*2-8]
+ paddw m2, [r1+r2*1-8]
+ paddw m2, [r1+r2*2-8]
+ psrlq m2, 48
+ movq m0, [r0]
+ HADDW m0, m1
+ paddw m0, [pw_4]
+ paddw m0, m2
+ psrlw m0, 3
+ SPLATW m0, m0, 0
+ movq [r0+r2*1], m0
+ movq [r0+r2*2], m0
+ movq [r1+r2*1], m0
+ movq [r1+r2*2], m0
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_down_left_10(pixel *src, const pixel *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED4x4_DL 0
+cglobal pred4x4_down_left_10, 3, 3
+ sub r0, r2
+ movq m0, [r0]
+ movhps m0, [r1]
+ psrldq m2, m0, 2
+ pslldq m3, m0, 2
+ pshufhw m2, m2, 10100100b
+ PRED4x4_LOWPASS m0, m3, m2, m0
+ lea r1, [r0+r2*2]
+ movhps [r1+r2*2], m0
+ psrldq m0, 2
+ movq [r0+r2*1], m0
+ psrldq m0, 2
+ movq [r0+r2*2], m0
+ psrldq m0, 2
+ movq [r1+r2*1], m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED4x4_DL
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED4x4_DL
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_vertical_left_10(pixel *src, const pixel *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED4x4_VL 0
+cglobal pred4x4_vertical_left_10, 3, 3
+ sub r0, r2
+ movu m1, [r0]
+ movhps m1, [r1]
+ psrldq m0, m1, 2
+ psrldq m2, m1, 4
+ pavgw m4, m0, m1
+ PRED4x4_LOWPASS m0, m1, m2, m0
+ lea r1, [r0+r2*2]
+ movq [r0+r2*1], m4
+ movq [r0+r2*2], m0
+ psrldq m4, 2
+ psrldq m0, 2
+ movq [r1+r2*1], m4
+ movq [r1+r2*2], m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED4x4_VL
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED4x4_VL
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred4x4_horizontal_up_10(pixel *src, const pixel *topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_MMX mmxext
+cglobal pred4x4_horizontal_up_10, 3, 3
+ sub r0, r2
+ lea r1, [r0+r2*2]
+ movq m0, [r0+r2*1-8]
+ punpckhwd m0, [r0+r2*2-8]
+ movq m1, [r1+r2*1-8]
+ punpckhwd m1, [r1+r2*2-8]
+ punpckhdq m0, m1
+ pshufw m1, m1, 0xFF
+ movq [r1+r2*2], m1
+ movd [r1+r2*1+4], m1
+ pshufw m2, m0, 11111001b
+ movq m1, m2
+ pavgw m2, m0
+
+ pshufw m5, m0, 11111110b
+ PRED4x4_LOWPASS m1, m0, m5, m1
+ movq m6, m2
+ punpcklwd m6, m1
+ movq [r0+r2*1], m6
+ psrlq m2, 16
+ psrlq m1, 16
+ punpcklwd m2, m1
+ movq [r0+r2*2], m2
+ psrlq m2, 32
+ movd [r1+r2*1], m2
+ RET
+
+
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_vertical_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred8x8_vertical_10, 2, 2
+ sub r0, r1
+ mova m0, [r0]
+%rep 3
+ mova [r0+r1*1], m0
+ mova [r0+r1*2], m0
+ lea r0, [r0+r1*2]
+%endrep
+ mova [r0+r1*1], m0
+ mova [r0+r1*2], m0
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_horizontal_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred8x8_horizontal_10, 2, 3
+ mov r2d, 4
+.loop:
+ movq m0, [r0+r1*0-8]
+ movq m1, [r0+r1*1-8]
+ pshuflw m0, m0, 0xff
+ pshuflw m1, m1, 0xff
+ punpcklqdq m0, m0
+ punpcklqdq m1, m1
+ mova [r0+r1*0], m0
+ mova [r0+r1*1], m1
+ lea r0, [r0+r1*2]
+ dec r2d
+ jg .loop
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_predict_8x8_dc_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro MOV8 2-3
+; sort of a hack, but it works
+ movdqa [%1], %2
+%endmacro
+
+%macro PRED8x8_DC 1
+cglobal pred8x8_dc_10, 2, 6
+ sub r0, r1
+ pxor m4, m4
+ movq m0, [r0+0]
+ movq m1, [r0+8]
+ punpcklwd m0, m1
+ movhlps m1, m0
+ paddw m0, m1
+ %1 m2, m0, 00001110b
+ paddw m0, m2
+
+ lea r5, [r1*3]
+ lea r4, [r0+r1*4]
+ movzx r2d, word [r0+r1*1-2]
+ movzx r3d, word [r0+r1*2-2]
+ add r2d, r3d
+ movzx r3d, word [r0+r5*1-2]
+ add r2d, r3d
+ movzx r3d, word [r4-2]
+ add r2d, r3d
+ movd m2, r2d ; s2
+
+ movzx r2d, word [r4+r1*1-2]
+ movzx r3d, word [r4+r1*2-2]
+ add r2d, r3d
+ movzx r3d, word [r4+r5*1-2]
+ add r2d, r3d
+ movzx r3d, word [r4+r1*4-2]
+ add r2d, r3d
+ movd m3, r2d ; s3
+
+ punpcklwd m2, m3
+ punpckldq m0, m2 ; s0, s1, s2, s3
+ %1 m3, m0, 11110110b ; s2, s1, s3, s3
+ %1 m0, m0, 01110100b ; s0, s1, s3, s1
+ paddw m0, m3
+ psrlw m0, 2
+ pavgw m0, m4 ; s0+s2, s1, s3, s1+s3
+ punpcklwd m0, m0
+ pshufd m3, m0, 11111010b
+ punpckldq m0, m0
+ SWAP 0,1
+ MOV8 r0+r1*1, m1, m2
+ MOV8 r0+r1*2, m1, m2
+ MOV8 r0+r5*1, m1, m2
+ MOV8 r0+r1*4, m1, m2
+ MOV8 r4+r1*1, m3, m4
+ MOV8 r4+r1*2, m3, m4
+ MOV8 r4+r5*1, m3, m4
+ MOV8 r4+r1*4, m3, m4
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED8x8_DC pshuflw
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_top_dc_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred8x8_top_dc_10, 2, 4
+ sub r0, r1
+ mova m0, [r0]
+ pshuflw m1, m0, 0x4e
+ pshufhw m1, m1, 0x4e
+ paddw m0, m1
+ pshuflw m1, m0, 0xb1
+ pshufhw m1, m1, 0xb1
+ paddw m0, m1
+ lea r2, [r1*3]
+ lea r3, [r0+r1*4]
+ paddw m0, [pw_2]
+ psrlw m0, 2
+ mova [r0+r1*1], m0
+ mova [r0+r1*2], m0
+ mova [r0+r2*1], m0
+ mova [r0+r1*4], m0
+ mova [r3+r1*1], m0
+ mova [r3+r1*2], m0
+ mova [r3+r2*1], m0
+ mova [r3+r1*4], m0
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8_plane_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred8x8_plane_10, 2, 7, 7
+ sub r0, r1
+ lea r2, [r1*3]
+ lea r3, [r0+r1*4]
+ mova m2, [r0]
+ pmaddwd m2, [pw_m32101234]
+ HADDD m2, m1
+ movd m0, [r0-4]
+ psrld m0, 14
+ psubw m2, m0 ; H
+ movd m0, [r3+r1*4-4]
+ movd m1, [r0+12]
+ paddw m0, m1
+ psllw m0, 4 ; 16*(src[7*stride-1] + src[-stride+7])
+ movzx r4d, word [r3+r1*1-2] ; src[4*stride-1]
+ movzx r5d, word [r0+r2*1-2] ; src[2*stride-1]
+ sub r4d, r5d
+ movzx r6d, word [r3+r1*2-2] ; src[5*stride-1]
+ movzx r5d, word [r0+r1*2-2] ; src[1*stride-1]
+ sub r6d, r5d
+ lea r4d, [r4+r6*2]
+ movzx r5d, word [r3+r2*1-2] ; src[6*stride-1]
+ movzx r6d, word [r0+r1*1-2] ; src[0*stride-1]
+ sub r5d, r6d
+ lea r5d, [r5*3]
+ add r4d, r5d
+ movzx r6d, word [r3+r1*4-2] ; src[7*stride-1]
+ movzx r5d, word [r0+r1*0-2] ; src[ -stride-1]
+ sub r6d, r5d
+ lea r4d, [r4+r6*4]
+ movd m3, r4d ; V
+ punpckldq m2, m3
+ pmaddwd m2, [pd_17]
+ paddd m2, [pd_16]
+ psrad m2, 5 ; b, c
+
+ mova m3, [pw_pixel_max]
+ pxor m1, m1
+ SPLATW m0, m0, 1
+ SPLATW m4, m2, 2
+ SPLATW m2, m2, 0
+ pmullw m2, [pw_m32101234] ; b
+ pmullw m5, m4, [pw_m3] ; c
+ paddw m5, [pw_16]
+ mov r2d, 8
+ add r0, r1
+.loop:
+ paddsw m6, m2, m5
+ paddsw m6, m0
+ psraw m6, 5
+ CLIPW m6, m1, m3
+ mova [r0], m6
+ paddw m5, m4
+ add r0, r1
+ dec r2d
+ jg .loop
+ RET
+
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_128_dc_10(pixel *src, int has_topleft, int has_topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred8x8l_128_dc_10, 4, 4
+ mova m0, [pw_512] ; (1<<(BIT_DEPTH-1))
+ lea r1, [r3*3]
+ lea r2, [r0+r3*4]
+ MOV8 r0+r3*0, m0, m0
+ MOV8 r0+r3*1, m0, m0
+ MOV8 r0+r3*2, m0, m0
+ MOV8 r0+r1*1, m0, m0
+ MOV8 r2+r3*0, m0, m0
+ MOV8 r2+r3*1, m0, m0
+ MOV8 r2+r3*2, m0, m0
+ MOV8 r2+r1*1, m0, m0
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_top_dc_10(pixel *src, int has_topleft, int has_topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED8x8L_TOP_DC 0
+cglobal pred8x8l_top_dc_10, 4, 4, 6
+ sub r0, r3
+ mova m0, [r0]
+ shr r1d, 14
+ shr r2d, 13
+ neg r1
+ pslldq m1, m0, 2
+ psrldq m2, m0, 2
+ pinsrw m1, [r0+r1], 0
+ pinsrw m2, [r0+r2+14], 7
+ lea r1, [r3*3]
+ lea r2, [r0+r3*4]
+ PRED4x4_LOWPASS m0, m2, m1, m0
+ HADDW m0, m1
+ paddw m0, [pw_4]
+ psrlw m0, 3
+ SPLATW m0, m0, 0
+ mova [r0+r3*1], m0
+ mova [r0+r3*2], m0
+ mova [r0+r1*1], m0
+ mova [r0+r3*4], m0
+ mova [r2+r3*1], m0
+ mova [r2+r3*2], m0
+ mova [r2+r1*1], m0
+ mova [r2+r3*4], m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED8x8L_TOP_DC
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED8x8L_TOP_DC
+%endif
+
+;-------------------------------------------------------------------------------
+; void ff_pred8x8l_dc_10(pixel *src, int has_topleft, int has_topright,
+; ptrdiff_t stride)
+;-------------------------------------------------------------------------------
+;TODO: see if scalar is faster
+%macro PRED8x8L_DC 0
+cglobal pred8x8l_dc_10, 4, 6, 6
+ sub r0, r3
+ lea r4, [r0+r3*4]
+ lea r5, [r3*3]
+ mova m0, [r0+r3*2-16]
+ punpckhwd m0, [r0+r3*1-16]
+ mova m1, [r4+r3*0-16]
+ punpckhwd m1, [r0+r5*1-16]
+ punpckhdq m1, m0
+ mova m2, [r4+r3*2-16]
+ punpckhwd m2, [r4+r3*1-16]
+ mova m3, [r4+r3*4-16]
+ punpckhwd m3, [r4+r5*1-16]
+ punpckhdq m3, m2
+ punpckhqdq m3, m1
+ mova m0, [r0]
+ shr r1d, 14
+ shr r2d, 13
+ neg r1
+ pslldq m1, m0, 2
+ psrldq m2, m0, 2
+ pinsrw m1, [r0+r1], 0
+ pinsrw m2, [r0+r2+14], 7
+ not r1
+ and r1, r3
+ pslldq m4, m3, 2
+ psrldq m5, m3, 2
+ pshuflw m4, m4, 11100101b
+ pinsrw m5, [r0+r1-2], 7
+ PRED4x4_LOWPASS m3, m4, m5, m3
+ PRED4x4_LOWPASS m0, m2, m1, m0
+ paddw m0, m3
+ HADDW m0, m1
+ paddw m0, [pw_8]
+ psrlw m0, 4
+ SPLATW m0, m0
+ mova [r0+r3*1], m0
+ mova [r0+r3*2], m0
+ mova [r0+r5*1], m0
+ mova [r0+r3*4], m0
+ mova [r4+r3*1], m0
+ mova [r4+r3*2], m0
+ mova [r4+r5*1], m0
+ mova [r4+r3*4], m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED8x8L_DC
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED8x8L_DC
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_vertical_10(pixel *src, int has_topleft, int has_topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED8x8L_VERTICAL 0
+cglobal pred8x8l_vertical_10, 4, 4, 6
+ sub r0, r3
+ mova m0, [r0]
+ shr r1d, 14
+ shr r2d, 13
+ neg r1
+ pslldq m1, m0, 2
+ psrldq m2, m0, 2
+ pinsrw m1, [r0+r1], 0
+ pinsrw m2, [r0+r2+14], 7
+ lea r1, [r3*3]
+ lea r2, [r0+r3*4]
+ PRED4x4_LOWPASS m0, m2, m1, m0
+ mova [r0+r3*1], m0
+ mova [r0+r3*2], m0
+ mova [r0+r1*1], m0
+ mova [r0+r3*4], m0
+ mova [r2+r3*1], m0
+ mova [r2+r3*2], m0
+ mova [r2+r1*1], m0
+ mova [r2+r3*4], m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED8x8L_VERTICAL
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED8x8L_VERTICAL
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_horizontal_10(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED8x8L_HORIZONTAL 0
+cglobal pred8x8l_horizontal_10, 4, 4, 5
+ mova m0, [r0-16]
+ shr r1d, 14
+ dec r1
+ and r1, r3
+ sub r1, r3
+ punpckhwd m0, [r0+r1-16]
+ mova m1, [r0+r3*2-16]
+ punpckhwd m1, [r0+r3*1-16]
+ lea r2, [r0+r3*4]
+ lea r1, [r3*3]
+ punpckhdq m1, m0
+ mova m2, [r2+r3*0-16]
+ punpckhwd m2, [r0+r1-16]
+ mova m3, [r2+r3*2-16]
+ punpckhwd m3, [r2+r3*1-16]
+ punpckhdq m3, m2
+ punpckhqdq m3, m1
+ PALIGNR m4, m3, [r2+r1-16], 14, m0
+ pslldq m0, m4, 2
+ pshuflw m0, m0, 11100101b
+ PRED4x4_LOWPASS m4, m3, m0, m4
+ punpckhwd m3, m4, m4
+ punpcklwd m4, m4
+ pshufd m0, m3, 0xff
+ pshufd m1, m3, 0xaa
+ pshufd m2, m3, 0x55
+ pshufd m3, m3, 0x00
+ mova [r0+r3*0], m0
+ mova [r0+r3*1], m1
+ mova [r0+r3*2], m2
+ mova [r0+r1*1], m3
+ pshufd m0, m4, 0xff
+ pshufd m1, m4, 0xaa
+ pshufd m2, m4, 0x55
+ pshufd m3, m4, 0x00
+ mova [r2+r3*0], m0
+ mova [r2+r3*1], m1
+ mova [r2+r3*2], m2
+ mova [r2+r1*1], m3
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED8x8L_HORIZONTAL
+INIT_XMM ssse3
+PRED8x8L_HORIZONTAL
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED8x8L_HORIZONTAL
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_down_left_10(pixel *src, int has_topleft, int has_topright,
+; ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED8x8L_DOWN_LEFT 0
+cglobal pred8x8l_down_left_10, 4, 4, 7
+ sub r0, r3
+ mova m3, [r0]
+ shr r1d, 14
+ neg r1
+ shr r2d, 13
+ pslldq m1, m3, 2
+ psrldq m2, m3, 2
+ pinsrw m1, [r0+r1], 0
+ pinsrw m2, [r0+r2+14], 7
+ PRED4x4_LOWPASS m6, m2, m1, m3
+ jz .fix_tr ; flags from shr r2d
+ mova m1, [r0+16]
+ psrldq m5, m1, 2
+ PALIGNR m2, m1, m3, 14, m3
+ pshufhw m5, m5, 10100100b
+ PRED4x4_LOWPASS m1, m2, m5, m1
+.do_topright:
+ lea r1, [r3*3]
+ psrldq m5, m1, 14
+ lea r2, [r0+r3*4]
+ PALIGNR m2, m1, m6, 2, m0
+ PALIGNR m3, m1, m6, 14, m0
+ PALIGNR m5, m1, 2, m0
+ pslldq m4, m6, 2
+ PRED4x4_LOWPASS m6, m4, m2, m6
+ PRED4x4_LOWPASS m1, m3, m5, m1
+ mova [r2+r3*4], m1
+ PALIGNR m1, m6, 14, m2
+ pslldq m6, 2
+ mova [r2+r1*1], m1
+ PALIGNR m1, m6, 14, m2
+ pslldq m6, 2
+ mova [r2+r3*2], m1
+ PALIGNR m1, m6, 14, m2
+ pslldq m6, 2
+ mova [r2+r3*1], m1
+ PALIGNR m1, m6, 14, m2
+ pslldq m6, 2
+ mova [r0+r3*4], m1
+ PALIGNR m1, m6, 14, m2
+ pslldq m6, 2
+ mova [r0+r1*1], m1
+ PALIGNR m1, m6, 14, m2
+ pslldq m6, 2
+ mova [r0+r3*2], m1
+ PALIGNR m1, m6, 14, m6
+ mova [r0+r3*1], m1
+ RET
+.fix_tr:
+ punpckhwd m3, m3
+ pshufd m1, m3, 0xFF
+ jmp .do_topright
+%endmacro
+
+INIT_XMM sse2
+PRED8x8L_DOWN_LEFT
+INIT_XMM ssse3
+PRED8x8L_DOWN_LEFT
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED8x8L_DOWN_LEFT
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_down_right_10(pixel *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED8x8L_DOWN_RIGHT 0
+; standard forbids this when has_topleft is false
+; no need to check
+cglobal pred8x8l_down_right_10, 4, 5, 8
+ sub r0, r3
+ lea r4, [r0+r3*4]
+ lea r1, [r3*3]
+ mova m0, [r0+r3*1-16]
+ punpckhwd m0, [r0+r3*0-16]
+ mova m1, [r0+r1*1-16]
+ punpckhwd m1, [r0+r3*2-16]
+ punpckhdq m1, m0
+ mova m2, [r4+r3*1-16]
+ punpckhwd m2, [r4+r3*0-16]
+ mova m3, [r4+r1*1-16]
+ punpckhwd m3, [r4+r3*2-16]
+ punpckhdq m3, m2
+ punpckhqdq m3, m1
+ mova m0, [r4+r3*4-16]
+ mova m1, [r0]
+ PALIGNR m4, m3, m0, 14, m0
+ PALIGNR m1, m3, 2, m2
+ pslldq m0, m4, 2
+ pshuflw m0, m0, 11100101b
+ PRED4x4_LOWPASS m6, m1, m4, m3
+ PRED4x4_LOWPASS m4, m3, m0, m4
+ mova m3, [r0]
+ shr r2d, 13
+ pslldq m1, m3, 2
+ psrldq m2, m3, 2
+ pinsrw m1, [r0-2], 0
+ pinsrw m2, [r0+r2+14], 7
+ PRED4x4_LOWPASS m3, m2, m1, m3
+ PALIGNR m2, m3, m6, 2, m0
+ PALIGNR m5, m3, m6, 14, m0
+ psrldq m7, m3, 2
+ PRED4x4_LOWPASS m6, m4, m2, m6
+ PRED4x4_LOWPASS m3, m5, m7, m3
+ mova [r4+r3*4], m6
+ PALIGNR m3, m6, 14, m2
+ pslldq m6, 2
+ mova [r0+r3*1], m3
+ PALIGNR m3, m6, 14, m2
+ pslldq m6, 2
+ mova [r0+r3*2], m3
+ PALIGNR m3, m6, 14, m2
+ pslldq m6, 2
+ mova [r0+r1*1], m3
+ PALIGNR m3, m6, 14, m2
+ pslldq m6, 2
+ mova [r0+r3*4], m3
+ PALIGNR m3, m6, 14, m2
+ pslldq m6, 2
+ mova [r4+r3*1], m3
+ PALIGNR m3, m6, 14, m2
+ pslldq m6, 2
+ mova [r4+r3*2], m3
+ PALIGNR m3, m6, 14, m6
+ mova [r4+r1*1], m3
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED8x8L_DOWN_RIGHT
+INIT_XMM ssse3
+PRED8x8L_DOWN_RIGHT
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED8x8L_DOWN_RIGHT
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_vertical_right_10(pixel *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED8x8L_VERTICAL_RIGHT 0
+; likewise with 8x8l_down_right
+cglobal pred8x8l_vertical_right_10, 4, 5, 7
+ sub r0, r3
+ lea r4, [r0+r3*4]
+ lea r1, [r3*3]
+ mova m0, [r0+r3*1-16]
+ punpckhwd m0, [r0+r3*0-16]
+ mova m1, [r0+r1*1-16]
+ punpckhwd m1, [r0+r3*2-16]
+ punpckhdq m1, m0
+ mova m2, [r4+r3*1-16]
+ punpckhwd m2, [r4+r3*0-16]
+ mova m3, [r4+r1*1-16]
+ punpckhwd m3, [r4+r3*2-16]
+ punpckhdq m3, m2
+ punpckhqdq m3, m1
+ mova m0, [r4+r3*4-16]
+ mova m1, [r0]
+ PALIGNR m4, m3, m0, 14, m0
+ PALIGNR m1, m3, 2, m2
+ PRED4x4_LOWPASS m3, m1, m4, m3
+ mova m2, [r0]
+ shr r2d, 13
+ pslldq m1, m2, 2
+ psrldq m5, m2, 2
+ pinsrw m1, [r0-2], 0
+ pinsrw m5, [r0+r2+14], 7
+ PRED4x4_LOWPASS m2, m5, m1, m2
+ PALIGNR m6, m2, m3, 12, m1
+ PALIGNR m5, m2, m3, 14, m0
+ PRED4x4_LOWPASS m0, m6, m2, m5
+ pavgw m2, m5
+ mova [r0+r3*2], m0
+ mova [r0+r3*1], m2
+ pslldq m6, m3, 4
+ pslldq m1, m3, 2
+ PRED4x4_LOWPASS m1, m3, m6, m1
+ PALIGNR m2, m1, 14, m4
+ mova [r0+r1*1], m2
+ pslldq m1, 2
+ PALIGNR m0, m1, 14, m3
+ mova [r0+r3*4], m0
+ pslldq m1, 2
+ PALIGNR m2, m1, 14, m4
+ mova [r4+r3*1], m2
+ pslldq m1, 2
+ PALIGNR m0, m1, 14, m3
+ mova [r4+r3*2], m0
+ pslldq m1, 2
+ PALIGNR m2, m1, 14, m4
+ mova [r4+r1*1], m2
+ pslldq m1, 2
+ PALIGNR m0, m1, 14, m1
+ mova [r4+r3*4], m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED8x8L_VERTICAL_RIGHT
+INIT_XMM ssse3
+PRED8x8L_VERTICAL_RIGHT
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED8x8L_VERTICAL_RIGHT
+%endif
+
+;-----------------------------------------------------------------------------
+; void ff_pred8x8l_horizontal_up_10(pixel *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro PRED8x8L_HORIZONTAL_UP 0
+cglobal pred8x8l_horizontal_up_10, 4, 4, 6
+ mova m0, [r0+r3*0-16]
+ punpckhwd m0, [r0+r3*1-16]
+ shr r1d, 14
+ dec r1
+ and r1, r3
+ sub r1, r3
+ mova m4, [r0+r1*1-16]
+ lea r1, [r3*3]
+ lea r2, [r0+r3*4]
+ mova m1, [r0+r3*2-16]
+ punpckhwd m1, [r0+r1*1-16]
+ punpckhdq m0, m1
+ mova m2, [r2+r3*0-16]
+ punpckhwd m2, [r2+r3*1-16]
+ mova m3, [r2+r3*2-16]
+ punpckhwd m3, [r2+r1*1-16]
+ punpckhdq m2, m3
+ punpckhqdq m0, m2
+ PALIGNR m1, m0, m4, 14, m4
+ psrldq m2, m0, 2
+ pshufhw m2, m2, 10100100b
+ PRED4x4_LOWPASS m0, m1, m2, m0
+ psrldq m1, m0, 2
+ psrldq m2, m0, 4
+ pshufhw m1, m1, 10100100b
+ pshufhw m2, m2, 01010100b
+ pavgw m4, m0, m1
+ PRED4x4_LOWPASS m1, m2, m0, m1
+ punpckhwd m5, m4, m1
+ punpcklwd m4, m1
+ mova [r2+r3*0], m5
+ mova [r0+r3*0], m4
+ pshufd m0, m5, 11111001b
+ pshufd m1, m5, 11111110b
+ pshufd m2, m5, 11111111b
+ mova [r2+r3*1], m0
+ mova [r2+r3*2], m1
+ mova [r2+r1*1], m2
+ PALIGNR m2, m5, m4, 4, m0
+ PALIGNR m3, m5, m4, 8, m1
+ PALIGNR m5, m5, m4, 12, m4
+ mova [r0+r3*1], m2
+ mova [r0+r3*2], m3
+ mova [r0+r1*1], m5
+ RET
+%endmacro
+
+INIT_XMM sse2
+PRED8x8L_HORIZONTAL_UP
+INIT_XMM ssse3
+PRED8x8L_HORIZONTAL_UP
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+PRED8x8L_HORIZONTAL_UP
+%endif
+
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_vertical_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+%macro MOV16 3-5
+ mova [%1+ 0], %2
+ mova [%1+mmsize], %3
+%endmacro
+
+INIT_XMM sse2
+cglobal pred16x16_vertical_10, 2, 3
+ sub r0, r1
+ mov r2d, 8
+ mova m0, [r0+ 0]
+ mova m1, [r0+mmsize]
+.loop:
+ MOV16 r0+r1*1, m0, m1, m2, m3
+ MOV16 r0+r1*2, m0, m1, m2, m3
+ lea r0, [r0+r1*2]
+ dec r2d
+ jg .loop
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_horizontal_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred16x16_horizontal_10, 2, 3
+ mov r2d, 8
+.vloop:
+ movd m0, [r0+r1*0-4]
+ movd m1, [r0+r1*1-4]
+ SPLATW m0, m0, 1
+ SPLATW m1, m1, 1
+ MOV16 r0+r1*0, m0, m0, m0, m0
+ MOV16 r0+r1*1, m1, m1, m1, m1
+ lea r0, [r0+r1*2]
+ dec r2d
+ jg .vloop
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_dc_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred16x16_dc_10, 2, 6
+ mov r5, r0
+ sub r0, r1
+ mova m0, [r0+0]
+ paddw m0, [r0+mmsize]
+ HADDW m0, m2
+
+ lea r0, [r0+r1-2]
+ movzx r3d, word [r0]
+ movzx r4d, word [r0+r1]
+%rep 7
+ lea r0, [r0+r1*2]
+ movzx r2d, word [r0]
+ add r3d, r2d
+ movzx r2d, word [r0+r1]
+ add r4d, r2d
+%endrep
+ lea r3d, [r3+r4+16]
+
+ movd m1, r3d
+ paddw m0, m1
+ psrlw m0, 5
+ SPLATW m0, m0
+ mov r3d, 8
+.loop:
+ MOV16 r5+r1*0, m0, m0, m0, m0
+ MOV16 r5+r1*1, m0, m0, m0, m0
+ lea r5, [r5+r1*2]
+ dec r3d
+ jg .loop
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_top_dc_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred16x16_top_dc_10, 2, 3
+ sub r0, r1
+ mova m0, [r0+0]
+ paddw m0, [r0+mmsize]
+ HADDW m0, m2
+
+ SPLATW m0, m0
+ paddw m0, [pw_8]
+ psrlw m0, 4
+ mov r2d, 8
+.loop:
+ MOV16 r0+r1*1, m0, m0, m0, m0
+ MOV16 r0+r1*2, m0, m0, m0, m0
+ lea r0, [r0+r1*2]
+ dec r2d
+ jg .loop
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_left_dc_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred16x16_left_dc_10, 2, 6
+ mov r5, r0
+
+ sub r0, 2
+ movzx r3d, word [r0]
+ movzx r4d, word [r0+r1]
+%rep 7
+ lea r0, [r0+r1*2]
+ movzx r2d, word [r0]
+ add r3d, r2d
+ movzx r2d, word [r0+r1]
+ add r4d, r2d
+%endrep
+ lea r3d, [r3+r4+8]
+ shr r3d, 4
+
+ movd m0, r3d
+ SPLATW m0, m0
+ mov r3d, 8
+.loop:
+ MOV16 r5+r1*0, m0, m0, m0, m0
+ MOV16 r5+r1*1, m0, m0, m0, m0
+ lea r5, [r5+r1*2]
+ dec r3d
+ jg .loop
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_pred16x16_128_dc_10(pixel *src, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pred16x16_128_dc_10, 2,3
+ mova m0, [pw_512]
+ mov r2d, 8
+.loop:
+ MOV16 r0+r1*0, m0, m0, m0, m0
+ MOV16 r0+r1*1, m0, m0, m0, m0
+ lea r0, [r0+r1*2]
+ dec r2d
+ jg .loop
+ RET
diff --git a/media/ffvpx/libavcodec/x86/h264_intrapred_init.c b/media/ffvpx/libavcodec/x86/h264_intrapred_init.c
new file mode 100644
index 0000000000..ee46927a24
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/h264_intrapred_init.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/codec_id.h"
+#include "libavcodec/h264pred.h"
+
+#define PRED4x4(TYPE, DEPTH, OPT) \
+void ff_pred4x4_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
+ const uint8_t *topright, \
+ ptrdiff_t stride);
+
+PRED4x4(dc, 10, mmxext)
+PRED4x4(down_left, 10, sse2)
+PRED4x4(down_left, 10, avx)
+PRED4x4(down_right, 10, sse2)
+PRED4x4(down_right, 10, ssse3)
+PRED4x4(down_right, 10, avx)
+PRED4x4(vertical_left, 10, sse2)
+PRED4x4(vertical_left, 10, avx)
+PRED4x4(vertical_right, 10, sse2)
+PRED4x4(vertical_right, 10, ssse3)
+PRED4x4(vertical_right, 10, avx)
+PRED4x4(horizontal_up, 10, mmxext)
+PRED4x4(horizontal_down, 10, sse2)
+PRED4x4(horizontal_down, 10, ssse3)
+PRED4x4(horizontal_down, 10, avx)
+
+#define PRED8x8(TYPE, DEPTH, OPT) \
+void ff_pred8x8_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
+ ptrdiff_t stride);
+
+PRED8x8(dc, 10, sse2)
+PRED8x8(top_dc, 10, sse2)
+PRED8x8(plane, 10, sse2)
+PRED8x8(vertical, 10, sse2)
+PRED8x8(horizontal, 10, sse2)
+
+#define PRED8x8L(TYPE, DEPTH, OPT)\
+void ff_pred8x8l_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
+ int has_topleft, \
+ int has_topright, \
+ ptrdiff_t stride);
+
+PRED8x8L(dc, 10, sse2)
+PRED8x8L(dc, 10, avx)
+PRED8x8L(128_dc, 10, sse2)
+PRED8x8L(top_dc, 10, sse2)
+PRED8x8L(top_dc, 10, avx)
+PRED8x8L(vertical, 10, sse2)
+PRED8x8L(vertical, 10, avx)
+PRED8x8L(horizontal, 10, sse2)
+PRED8x8L(horizontal, 10, ssse3)
+PRED8x8L(horizontal, 10, avx)
+PRED8x8L(down_left, 10, sse2)
+PRED8x8L(down_left, 10, ssse3)
+PRED8x8L(down_left, 10, avx)
+PRED8x8L(down_right, 10, sse2)
+PRED8x8L(down_right, 10, ssse3)
+PRED8x8L(down_right, 10, avx)
+PRED8x8L(vertical_right, 10, sse2)
+PRED8x8L(vertical_right, 10, ssse3)
+PRED8x8L(vertical_right, 10, avx)
+PRED8x8L(horizontal_up, 10, sse2)
+PRED8x8L(horizontal_up, 10, ssse3)
+PRED8x8L(horizontal_up, 10, avx)
+
+#define PRED16x16(TYPE, DEPTH, OPT)\
+void ff_pred16x16_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
+ ptrdiff_t stride);
+
+PRED16x16(dc, 10, sse2)
+PRED16x16(top_dc, 10, sse2)
+PRED16x16(128_dc, 10, sse2)
+PRED16x16(left_dc, 10, sse2)
+PRED16x16(vertical, 10, sse2)
+PRED16x16(horizontal, 10, sse2)
+
+/* 8-bit versions */
+PRED16x16(vertical, 8, sse)
+PRED16x16(horizontal, 8, mmxext)
+PRED16x16(horizontal, 8, ssse3)
+PRED16x16(dc, 8, sse2)
+PRED16x16(dc, 8, ssse3)
+PRED16x16(plane_h264, 8, sse2)
+PRED16x16(plane_h264, 8, ssse3)
+PRED16x16(plane_rv40, 8, sse2)
+PRED16x16(plane_rv40, 8, ssse3)
+PRED16x16(plane_svq3, 8, sse2)
+PRED16x16(plane_svq3, 8, ssse3)
+PRED16x16(tm_vp8, 8, sse2)
+PRED16x16(tm_vp8, 8, avx2)
+
+PRED8x8(top_dc, 8, mmxext)
+PRED8x8(dc_rv40, 8, mmxext)
+PRED8x8(dc, 8, mmxext)
+PRED8x8(vertical, 8, mmx)
+PRED8x8(horizontal, 8, mmxext)
+PRED8x8(horizontal, 8, ssse3)
+PRED8x8(plane, 8, sse2)
+PRED8x8(plane, 8, ssse3)
+PRED8x8(tm_vp8, 8, sse2)
+PRED8x8(tm_vp8, 8, ssse3)
+
+PRED8x8L(top_dc, 8, mmxext)
+PRED8x8L(top_dc, 8, ssse3)
+PRED8x8L(dc, 8, mmxext)
+PRED8x8L(dc, 8, ssse3)
+PRED8x8L(horizontal, 8, mmxext)
+PRED8x8L(horizontal, 8, ssse3)
+PRED8x8L(vertical, 8, mmxext)
+PRED8x8L(vertical, 8, ssse3)
+PRED8x8L(down_left, 8, sse2)
+PRED8x8L(down_left, 8, ssse3)
+PRED8x8L(down_right, 8, sse2)
+PRED8x8L(down_right, 8, ssse3)
+PRED8x8L(vertical_right, 8, sse2)
+PRED8x8L(vertical_right, 8, ssse3)
+PRED8x8L(vertical_left, 8, sse2)
+PRED8x8L(vertical_left, 8, ssse3)
+PRED8x8L(horizontal_up, 8, mmxext)
+PRED8x8L(horizontal_up, 8, ssse3)
+PRED8x8L(horizontal_down, 8, sse2)
+PRED8x8L(horizontal_down, 8, ssse3)
+
+PRED4x4(dc, 8, mmxext)
+PRED4x4(down_left, 8, mmxext)
+PRED4x4(down_right, 8, mmxext)
+PRED4x4(vertical_left, 8, mmxext)
+PRED4x4(vertical_right, 8, mmxext)
+PRED4x4(horizontal_up, 8, mmxext)
+PRED4x4(horizontal_down, 8, mmxext)
+PRED4x4(tm_vp8, 8, mmxext)
+PRED4x4(tm_vp8, 8, ssse3)
+PRED4x4(vertical_vp8, 8, mmxext)
+
+av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
+ const int bit_depth,
+ const int chroma_format_idc)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (bit_depth == 8) {
+ if (EXTERNAL_MMX(cpu_flags)) {
+ if (chroma_format_idc <= 1) {
+ h->pred8x8 [VERT_PRED8x8 ] = ff_pred8x8_vertical_8_mmx;
+ }
+ }
+
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmxext;
+ if (chroma_format_idc <= 1)
+ h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmxext;
+ h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_mmxext;
+ h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_mmxext;
+ h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_8_mmxext;
+ h->pred8x8l [VERT_PRED ] = ff_pred8x8l_vertical_8_mmxext;
+ h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_8_mmxext;
+ h->pred4x4 [DIAG_DOWN_RIGHT_PRED ] = ff_pred4x4_down_right_8_mmxext;
+ h->pred4x4 [VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_8_mmxext;
+ h->pred4x4 [HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_8_mmxext;
+ h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_8_mmxext;
+ if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8 ||
+ codec_id == AV_CODEC_ID_H264) {
+ h->pred4x4 [DIAG_DOWN_LEFT_PRED] = ff_pred4x4_down_left_8_mmxext;
+ }
+ if (codec_id == AV_CODEC_ID_SVQ3 || codec_id == AV_CODEC_ID_H264) {
+ h->pred4x4 [VERT_LEFT_PRED ] = ff_pred4x4_vertical_left_8_mmxext;
+ }
+ if (codec_id != AV_CODEC_ID_RV40) {
+ h->pred4x4 [HOR_UP_PRED ] = ff_pred4x4_horizontal_up_8_mmxext;
+ }
+ if (codec_id == AV_CODEC_ID_SVQ3 || codec_id == AV_CODEC_ID_H264) {
+ if (chroma_format_idc <= 1) {
+ h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_8_mmxext;
+ h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_8_mmxext;
+ }
+ }
+ if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
+ h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_8_mmxext;
+ h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmxext;
+ h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_8_mmxext;
+ }
+ }
+
+ if (EXTERNAL_SSE(cpu_flags)) {
+ h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_8_sse;
+ }
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_sse2;
+ h->pred8x8l [DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_8_sse2;
+ h->pred8x8l [DIAG_DOWN_RIGHT_PRED ] = ff_pred8x8l_down_right_8_sse2;
+ h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_8_sse2;
+ h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_8_sse2;
+ h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_sse2;
+ if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
+ h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_sse2;
+ h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_sse2;
+ } else {
+ if (chroma_format_idc <= 1)
+ h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_sse2;
+ if (codec_id == AV_CODEC_ID_SVQ3) {
+ h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_sse2;
+ } else if (codec_id == AV_CODEC_ID_RV40) {
+ h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_rv40_8_sse2;
+ } else {
+ h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_h264_8_sse2;
+ }
+ }
+ }
+
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_ssse3;
+ h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_ssse3;
+ if (chroma_format_idc <= 1)
+ h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_ssse3;
+ h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_ssse3;
+ h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_ssse3;
+ h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_8_ssse3;
+ h->pred8x8l [VERT_PRED ] = ff_pred8x8l_vertical_8_ssse3;
+ h->pred8x8l [DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_8_ssse3;
+ h->pred8x8l [DIAG_DOWN_RIGHT_PRED ] = ff_pred8x8l_down_right_8_ssse3;
+ h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_8_ssse3;
+ h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_8_ssse3;
+ h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_8_ssse3;
+ h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_ssse3;
+ if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
+ h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_ssse3;
+ h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_ssse3;
+ } else {
+ if (chroma_format_idc <= 1)
+ h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_ssse3;
+ if (codec_id == AV_CODEC_ID_SVQ3) {
+ h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_ssse3;
+ } else if (codec_id == AV_CODEC_ID_RV40) {
+ h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_rv40_8_ssse3;
+ } else {
+ h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_h264_8_ssse3;
+ }
+ }
+ }
+
+ if(EXTERNAL_AVX2(cpu_flags)){
+ if (codec_id == AV_CODEC_ID_VP8) {
+ h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_avx2;
+ }
+ }
+ } else if (bit_depth == 10) {
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ h->pred4x4[DC_PRED ] = ff_pred4x4_dc_10_mmxext;
+ h->pred4x4[HOR_UP_PRED ] = ff_pred4x4_horizontal_up_10_mmxext;
+ }
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_10_sse2;
+ h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_sse2;
+ h->pred4x4[VERT_LEFT_PRED ] = ff_pred4x4_vertical_left_10_sse2;
+ h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_sse2;
+ h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_sse2;
+
+ if (chroma_format_idc <= 1) {
+ h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_sse2;
+ h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_sse2;
+ h->pred8x8[PLANE_PRED8x8 ] = ff_pred8x8_plane_10_sse2;
+ h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vertical_10_sse2;
+ h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_10_sse2;
+ }
+
+ h->pred8x8l[VERT_PRED ] = ff_pred8x8l_vertical_10_sse2;
+ h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_sse2;
+ h->pred8x8l[DC_PRED ] = ff_pred8x8l_dc_10_sse2;
+ h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_sse2;
+ h->pred8x8l[TOP_DC_PRED ] = ff_pred8x8l_top_dc_10_sse2;
+ h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_sse2;
+ h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_sse2;
+ h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_sse2;
+ h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_sse2;
+
+ h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_10_sse2;
+ h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_10_sse2;
+ h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_10_sse2;
+ h->pred16x16[LEFT_DC_PRED8x8 ] = ff_pred16x16_left_dc_10_sse2;
+ h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_sse2;
+ h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_sse2;
+ }
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_ssse3;
+ h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_ssse3;
+ h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_ssse3;
+
+ h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_ssse3;
+ h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_ssse3;
+ h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_ssse3;
+ h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_ssse3;
+ h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_ssse3;
+ }
+ if (EXTERNAL_AVX(cpu_flags)) {
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_10_avx;
+ h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_avx;
+ h->pred4x4[VERT_LEFT_PRED ] = ff_pred4x4_vertical_left_10_avx;
+ h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_avx;
+ h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_avx;
+
+ h->pred8x8l[VERT_PRED ] = ff_pred8x8l_vertical_10_avx;
+ h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_avx;
+ h->pred8x8l[DC_PRED ] = ff_pred8x8l_dc_10_avx;
+ h->pred8x8l[TOP_DC_PRED ] = ff_pred8x8l_top_dc_10_avx;
+ h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_avx;
+ h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_avx;
+ h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_avx;
+ h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_avx;
+ }
+ }
+}
diff --git a/media/ffvpx/libavcodec/x86/idctdsp.asm b/media/ffvpx/libavcodec/x86/idctdsp.asm
new file mode 100644
index 0000000000..1cfdb5419d
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/idctdsp.asm
@@ -0,0 +1,112 @@
+;******************************************************************************
+;* SIMD-optimized IDCT-related routines
+;* Copyright (c) 2008 Loren Merritt
+;* Copyright (c) 2003-2013 Michael Niedermayer
+;* Copyright (c) 2013 Daniel Kang
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+cextern pb_80
+
+SECTION .text
+
+;--------------------------------------------------------------------------
+;void ff_put_signed_pixels_clamped(const int16_t *block, uint8_t *pixels,
+; ptrdiff_t line_size)
+;--------------------------------------------------------------------------
+
+%macro PUT_SIGNED_PIXELS_CLAMPED_HALF 1
+ mova m1, [blockq+mmsize*0+%1]
+ mova m2, [blockq+mmsize*2+%1]
+ packsswb m1, [blockq+mmsize*1+%1]
+ packsswb m2, [blockq+mmsize*3+%1]
+ paddb m1, m0
+ paddb m2, m0
+ movq [pixelsq+lsizeq*0], m1
+ movhps [pixelsq+lsizeq*1], m1
+ movq [pixelsq+lsizeq*2], m2
+ movhps [pixelsq+lsize3q ], m2
+%endmacro
+
+INIT_XMM sse2
+cglobal put_signed_pixels_clamped, 3, 4, 3, block, pixels, lsize, lsize3
+ mova m0, [pb_80]
+ lea lsize3q, [lsizeq*3]
+ PUT_SIGNED_PIXELS_CLAMPED_HALF 0
+ lea pixelsq, [pixelsq+lsizeq*4]
+ PUT_SIGNED_PIXELS_CLAMPED_HALF 64
+ RET
+
+;--------------------------------------------------------------------------
+; void ff_put_pixels_clamped(const int16_t *block, uint8_t *pixels,
+; ptrdiff_t line_size);
+;--------------------------------------------------------------------------
+; %1 = block offset
+%macro PUT_PIXELS_CLAMPED_HALF 1
+ mova m0, [blockq+mmsize*0+%1]
+ mova m1, [blockq+mmsize*2+%1]
+ packuswb m0, [blockq+mmsize*1+%1]
+ packuswb m1, [blockq+mmsize*3+%1]
+ movq [pixelsq], m0
+ movhps [lsizeq+pixelsq], m0
+ movq [2*lsizeq+pixelsq], m1
+ movhps [lsize3q+pixelsq], m1
+%endmacro
+
+INIT_XMM sse2
+cglobal put_pixels_clamped, 3, 4, 2, block, pixels, lsize, lsize3
+ lea lsize3q, [lsizeq*3]
+ PUT_PIXELS_CLAMPED_HALF 0
+ lea pixelsq, [pixelsq+lsizeq*4]
+ PUT_PIXELS_CLAMPED_HALF 64
+ RET
+
+;--------------------------------------------------------------------------
+; void ff_add_pixels_clamped(const int16_t *block, uint8_t *pixels,
+; ptrdiff_t line_size);
+;--------------------------------------------------------------------------
+; %1 = block offset
+%macro ADD_PIXELS_CLAMPED 1
+ mova m0, [blockq+mmsize*0+%1]
+ mova m1, [blockq+mmsize*1+%1]
+ movq m2, [pixelsq]
+ movq m3, [pixelsq+lsizeq]
+ punpcklbw m2, m4
+ punpcklbw m3, m4
+ paddsw m0, m2
+ paddsw m1, m3
+ packuswb m0, m1
+ movq [pixelsq], m0
+ movhps [pixelsq+lsizeq], m0
+%endmacro
+
+INIT_XMM sse2
+cglobal add_pixels_clamped, 3, 3, 5, block, pixels, lsize
+ pxor m4, m4
+ ADD_PIXELS_CLAMPED 0
+ lea pixelsq, [pixelsq+lsizeq*2]
+ ADD_PIXELS_CLAMPED 32
+ lea pixelsq, [pixelsq+lsizeq*2]
+ ADD_PIXELS_CLAMPED 64
+ lea pixelsq, [pixelsq+lsizeq*2]
+ ADD_PIXELS_CLAMPED 96
+ RET
diff --git a/media/ffvpx/libavcodec/x86/idctdsp.h b/media/ffvpx/libavcodec/x86/idctdsp.h
new file mode 100644
index 0000000000..738e4e36e4
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/idctdsp.h
@@ -0,0 +1,33 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_IDCTDSP_H
+#define AVCODEC_X86_IDCTDSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+void ff_add_pixels_clamped_sse2(const int16_t *block, uint8_t *pixels,
+ ptrdiff_t line_size);
+void ff_put_pixels_clamped_sse2(const int16_t *block, uint8_t *pixels,
+ ptrdiff_t line_size);
+void ff_put_signed_pixels_clamped_sse2(const int16_t *block, uint8_t *pixels,
+ ptrdiff_t line_size);
+
+
+#endif /* AVCODEC_X86_IDCTDSP_H */
diff --git a/media/ffvpx/libavcodec/x86/idctdsp_init.c b/media/ffvpx/libavcodec/x86/idctdsp_init.c
new file mode 100644
index 0000000000..f28a1ad744
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/idctdsp_init.c
@@ -0,0 +1,159 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/idctdsp.h"
+#include "idctdsp.h"
+#include "simple_idct.h"
+
+/* Input permutation for the simple_idct_mmx */
+static const uint8_t simple_mmx_permutation[64] = {
+ 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
+ 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
+ 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
+ 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
+ 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
+ 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
+ 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
+ 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
+};
+
+static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
+
+av_cold int ff_init_scantable_permutation_x86(uint8_t *idct_permutation,
+ enum idct_permutation_type perm_type)
+{
+ int i;
+
+ switch (perm_type) {
+ case FF_IDCT_PERM_SIMPLE:
+ for (i = 0; i < 64; i++)
+ idct_permutation[i] = simple_mmx_permutation[i];
+ return 1;
+ case FF_IDCT_PERM_SSE2:
+ for (i = 0; i < 64; i++)
+ idct_permutation[i] = (i & 0x38) | idct_sse2_row_perm[i & 7];
+ return 1;
+ }
+
+ return 0;
+}
+
+av_cold void ff_idctdsp_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+#if ARCH_X86_32
+ if (EXTERNAL_MMX(cpu_flags)) {
+ if (!high_bit_depth &&
+ avctx->lowres == 0 &&
+ (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEMMX)) {
+ c->idct = ff_simple_idct_mmx;
+ }
+ }
+#endif
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_sse2;
+ c->put_pixels_clamped = ff_put_pixels_clamped_sse2;
+ c->add_pixels_clamped = ff_add_pixels_clamped_sse2;
+
+#if ARCH_X86_32
+ if (!high_bit_depth &&
+ avctx->lowres == 0 &&
+ (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEMMX)) {
+ c->idct_put = ff_simple_idct_put_sse2;
+ c->idct_add = ff_simple_idct_add_sse2;
+ c->perm_type = FF_IDCT_PERM_SIMPLE;
+ }
+#endif
+
+ if (ARCH_X86_64 &&
+ !high_bit_depth &&
+ avctx->lowres == 0 &&
+ (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEMMX ||
+ avctx->idct_algo == FF_IDCT_SIMPLE)) {
+ c->idct = ff_simple_idct8_sse2;
+ c->idct_put = ff_simple_idct8_put_sse2;
+ c->idct_add = ff_simple_idct8_add_sse2;
+ c->perm_type = FF_IDCT_PERM_TRANSPOSE;
+ }
+ }
+
+ if (ARCH_X86_64 && avctx->lowres == 0) {
+ if (EXTERNAL_AVX(cpu_flags) &&
+ !high_bit_depth &&
+ (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEMMX ||
+ avctx->idct_algo == FF_IDCT_SIMPLE)) {
+ c->idct = ff_simple_idct8_avx;
+ c->idct_put = ff_simple_idct8_put_avx;
+ c->idct_add = ff_simple_idct8_add_avx;
+ c->perm_type = FF_IDCT_PERM_TRANSPOSE;
+ }
+
+ if (avctx->bits_per_raw_sample == 10 &&
+ avctx->codec_id != AV_CODEC_ID_MPEG4 &&
+ (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLE)) {
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ c->idct_put = ff_simple_idct10_put_sse2;
+ c->idct_add = NULL;
+ c->idct = ff_simple_idct10_sse2;
+ c->perm_type = FF_IDCT_PERM_TRANSPOSE;
+
+ }
+ if (EXTERNAL_AVX(cpu_flags)) {
+ c->idct_put = ff_simple_idct10_put_avx;
+ c->idct_add = NULL;
+ c->idct = ff_simple_idct10_avx;
+ c->perm_type = FF_IDCT_PERM_TRANSPOSE;
+ }
+ }
+
+ if (avctx->bits_per_raw_sample == 12 &&
+ (avctx->idct_algo == FF_IDCT_AUTO ||
+ avctx->idct_algo == FF_IDCT_SIMPLEMMX)) {
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ c->idct_put = ff_simple_idct12_put_sse2;
+ c->idct_add = NULL;
+ c->idct = ff_simple_idct12_sse2;
+ c->perm_type = FF_IDCT_PERM_TRANSPOSE;
+ }
+ if (EXTERNAL_AVX(cpu_flags)) {
+ c->idct_put = ff_simple_idct12_put_avx;
+ c->idct_add = NULL;
+ c->idct = ff_simple_idct12_avx;
+ c->perm_type = FF_IDCT_PERM_TRANSPOSE;
+ }
+ }
+ }
+}
diff --git a/media/ffvpx/libavcodec/x86/imdct36.asm b/media/ffvpx/libavcodec/x86/imdct36.asm
new file mode 100644
index 0000000000..888c6bf4d6
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/imdct36.asm
@@ -0,0 +1,736 @@
+;******************************************************************************
+;* 36 point SSE-optimized IMDCT transform
+;* Copyright (c) 2011 Vitor Sessak
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+ps_mask: dd 0, ~0, ~0, ~0
+ps_mask2: dd 0, ~0, 0, ~0
+ps_mask3: dd 0, 0, 0, ~0
+ps_mask4: dd 0, ~0, 0, 0
+
+ps_val1: dd -0.5, -0.5, -0.8660254038, -0.8660254038
+ps_val2: dd 1.0, 1.0, 0.8660254038, 0.8660254038
+ps_val3: dd 0.1736481777, 0.1736481777, 0.3420201433, 0.3420201433
+ps_val4: dd -0.7660444431, -0.7660444431, 0.8660254038, 0.8660254038
+ps_val5: dd -0.9396926208, -0.9396926208, -0.9848077530, -0.9848077530
+ps_val6: dd 0.5, 0.5, -0.6427876097, -0.6427876097
+ps_val7: dd 1.0, 1.0, -0.6427876097, -0.6427876097
+
+ps_p1p1m1m1: dd 0, 0, 0x80000000, 0x80000000
+ps_p1m1p1m1: dd 0, 0x80000000, 0, 0x80000000
+
+ps_cosh: dd 1.0, 0.50190991877167369479, 1.0, 5.73685662283492756461
+ dd 1.0, 0.51763809020504152469, 1.0, 1.93185165257813657349
+ dd 1.0, 0.55168895948124587824, -1.0, -1.18310079157624925896
+ dd 1.0, 0.61038729438072803416, -1.0, -0.87172339781054900991
+ dd 1.0, 0.70710678118654752439, 0.0, 0.0
+
+ps_cosh_sse3: dd 1.0, -0.50190991877167369479, 1.0, -5.73685662283492756461
+ dd 1.0, -0.51763809020504152469, 1.0, -1.93185165257813657349
+ dd 1.0, -0.55168895948124587824, -1.0, 1.18310079157624925896
+ dd 1.0, -0.61038729438072803416, -1.0, 0.87172339781054900991
+ dd 1.0, -0.70710678118654752439, 0.0, 0.0
+
+costabs: times 4 dd 0.98480773
+ times 4 dd 0.93969262
+ times 4 dd 0.86602539
+ times 4 dd -0.76604444
+ times 4 dd -0.64278764
+ times 4 dd 0.50000000
+ times 4 dd -0.50000000
+ times 4 dd -0.34202015
+ times 4 dd -0.17364818
+ times 4 dd 0.50190992
+ times 4 dd 0.51763808
+ times 4 dd 0.55168896
+ times 4 dd 0.61038726
+ times 4 dd 0.70710677
+ times 4 dd 0.87172341
+ times 4 dd 1.18310082
+ times 4 dd 1.93185163
+ times 4 dd 5.73685646
+
+%define SBLIMIT 32
+SECTION .text
+
+%macro PSHUFD 3
+%if cpuflag(sse2) && notcpuflag(avx)
+ pshufd %1, %2, %3
+%else
+ shufps %1, %2, %2, %3
+%endif
+%endmacro
+
+; input %2={x1,x2,x3,x4}, %3={y1,y2,y3,y4}
+; output %1={x3,x4,y1,y2}
+%macro BUILDINVHIGHLOW 3
+%if cpuflag(avx)
+ shufps %1, %2, %3, 0x4e
+%else
+ movlhps %1, %3
+ movhlps %1, %2
+%endif
+%endmacro
+
+; input %2={x1,x2,x3,x4}, %3={y1,y2,y3,y4}
+; output %1={x4,y1,y2,y3}
+%macro ROTLEFT 3
+%if cpuflag(ssse3)
+ palignr %1, %3, %2, 12
+%else
+ BUILDINVHIGHLOW %1, %2, %3
+ shufps %1, %1, %3, 0x99
+%endif
+%endmacro
+
+%macro INVERTHL 2
+%if cpuflag(sse2)
+ PSHUFD %1, %2, 0x4e
+%else
+ movhlps %1, %2
+ movlhps %1, %2
+%endif
+%endmacro
+
+%macro BUTTERF 3
+ INVERTHL %2, %1
+ xorps %1, [ps_p1p1m1m1]
+ addps %1, %2
+%if cpuflag(sse3)
+ mulps %1, %1, [ps_cosh_sse3 + %3]
+ PSHUFD %2, %1, 0xb1
+ addsubps %1, %1, %2
+%else
+ mulps %1, [ps_cosh + %3]
+ PSHUFD %2, %1, 0xb1
+ xorps %1, [ps_p1m1p1m1]
+ addps %1, %2
+%endif
+%endmacro
+
+%macro BUTTERF2 3
+%if cpuflag(sse3)
+ mulps %1, %1, [ps_cosh_sse3 + %3]
+ PSHUFD %2, %1, 0xe1
+ addsubps %1, %1, %2
+%else
+ mulps %1, [ps_cosh + %3]
+ PSHUFD %2, %1, 0xe1
+ xorps %1, [ps_p1m1p1m1]
+ addps %1, %2
+%endif
+%endmacro
+
+%macro STORE 4
+%if cpuflag(sse4)
+ movss [%3 ], %1
+ extractps dword [%3 + %4], %1, 1
+ extractps dword [%3 + 2*%4], %1, 2
+ extractps dword [%3 + 3*%4], %1, 3
+%else
+ movhlps %2, %1
+ movss [%3 ], %1
+ movss [%3 + 2*%4], %2
+ shufps %1, %1, 0xb1
+ movss [%3 + %4], %1
+ movhlps %2, %1
+ movss [%3 + 3*%4], %2
+%endif
+%endmacro
+
+%macro LOAD 4
+ movlps %1, [%3 ]
+ movhps %1, [%3 + %4]
+ movlps %2, [%3 + 2*%4]
+ movhps %2, [%3 + 3*%4]
+ shufps %1, %2, 0x88
+%endmacro
+
+%macro LOADA64 2
+%if cpuflag(avx)
+ movu %1, [%2]
+%else
+ movlps %1, [%2]
+ movhps %1, [%2 + 8]
+%endif
+%endmacro
+
+%macro DEFINE_IMDCT 0
+cglobal imdct36_float, 4,4,9, out, buf, in, win
+
+ ; for(i=17;i>=1;i--) in[i] += in[i-1];
+ LOADA64 m0, inq
+ LOADA64 m1, inq + 16
+
+ ROTLEFT m5, m0, m1
+
+ PSHUFD m6, m0, 0x93
+ andps m6, m6, [ps_mask]
+ addps m0, m0, m6
+
+ LOADA64 m2, inq + 32
+
+ ROTLEFT m7, m1, m2
+
+ addps m1, m1, m5
+ LOADA64 m3, inq + 48
+
+ ROTLEFT m5, m2, m3
+
+ xorps m4, m4, m4
+ movlps m4, [inq+64]
+ BUILDINVHIGHLOW m6, m3, m4
+ shufps m6, m6, m4, 0xa9
+
+ addps m4, m4, m6
+ addps m2, m2, m7
+ addps m3, m3, m5
+
+ ; for(i=17;i>=3;i-=2) in[i] += in[i-2];
+ movlhps m5, m5, m0
+ andps m5, m5, [ps_mask3]
+
+ BUILDINVHIGHLOW m7, m0, m1
+ andps m7, m7, [ps_mask2]
+
+ addps m0, m0, m5
+
+ BUILDINVHIGHLOW m6, m1, m2
+ andps m6, m6, [ps_mask2]
+
+ addps m1, m1, m7
+
+ BUILDINVHIGHLOW m7, m2, m3
+ andps m7, m7, [ps_mask2]
+
+ addps m2, m2, m6
+
+ movhlps m6, m6, m3
+ andps m6, m6, [ps_mask4]
+
+ addps m3, m3, m7
+ addps m4, m4, m6
+
+ ; Populate tmp[]
+ movlhps m6, m1, m5 ; zero out high values
+ subps m6, m6, m4
+
+ subps m5, m0, m3
+
+%if ARCH_X86_64
+ SWAP m5, m8
+%endif
+
+ mulps m7, m2, [ps_val1]
+
+%if ARCH_X86_64
+ mulps m5, m8, [ps_val2]
+%else
+ mulps m5, m5, [ps_val2]
+%endif
+ addps m7, m7, m5
+
+ mulps m5, m6, [ps_val1]
+ subps m7, m7, m5
+
+%if ARCH_X86_64
+ SWAP m5, m8
+%else
+ subps m5, m0, m3
+%endif
+
+ subps m5, m5, m6
+ addps m5, m5, m2
+
+ shufps m6, m4, m3, 0xe4
+ subps m6, m6, m2
+ mulps m6, m6, [ps_val3]
+
+ addps m4, m4, m1
+ mulps m4, m4, [ps_val4]
+
+ shufps m1, m1, m0, 0xe4
+ addps m1, m1, m2
+ mulps m1, m1, [ps_val5]
+
+ mulps m3, m3, [ps_val6]
+ mulps m0, m0, [ps_val7]
+ addps m0, m0, m3
+
+ xorps m2, m1, [ps_p1p1m1m1]
+ subps m2, m2, m4
+ addps m2, m2, m0
+
+ addps m3, m4, m0
+ subps m3, m3, m6
+ xorps m3, m3, [ps_p1p1m1m1]
+
+ shufps m0, m0, m4, 0xe4
+ subps m0, m0, m1
+ addps m0, m0, m6
+
+ BUILDINVHIGHLOW m4, m2, m3
+ shufps m3, m3, m2, 0x4e
+
+ ; we have tmp = {SwAPLH(m0), SwAPLH(m7), m3, m4, m5}
+
+ BUTTERF m0, m1, 0
+ BUTTERF m7, m2, 16
+ BUTTERF m3, m6, 32
+ BUTTERF m4, m1, 48
+ BUTTERF2 m5, m1, 64
+
+ ; permutates:
+ ; m0 0 1 2 3 => 2 6 10 14 m1
+ ; m7 4 5 6 7 => 3 7 11 15 m2
+ ; m3 8 9 10 11 => 17 13 9 5 m3
+ ; m4 12 13 14 15 => 16 12 8 4 m5
+ ; m5 16 17 xx xx => 0 1 xx xx m0
+
+ unpckhps m1, m0, m7
+ unpckhps m6, m3, m4
+ movhlps m2, m6, m1
+ movlhps m1, m1, m6
+
+ unpcklps m5, m5, m4
+ unpcklps m3, m3, m7
+ movhlps m4, m3, m5
+ movlhps m5, m5, m3
+ SWAP m4, m3
+ ; permutation done
+
+ PSHUFD m6, m2, 0xb1
+ movss m4, [bufq + 4*68]
+ movss m7, [bufq + 4*64]
+ unpcklps m7, m7, m4
+ mulps m6, m6, [winq + 16*4]
+ addps m6, m6, m7
+ movss [outq + 64*SBLIMIT], m6
+ shufps m6, m6, m6, 0xb1
+ movss [outq + 68*SBLIMIT], m6
+
+ mulps m6, m3, [winq + 4*4]
+ LOAD m4, m7, bufq + 4*16, 16
+ addps m6, m6, m4
+ STORE m6, m7, outq + 16*SBLIMIT, 4*SBLIMIT
+
+ shufps m4, m0, m3, 0xb5
+ mulps m4, m4, [winq + 8*4]
+ LOAD m7, m6, bufq + 4*32, 16
+ addps m4, m4, m7
+ STORE m4, m6, outq + 32*SBLIMIT, 4*SBLIMIT
+
+ shufps m3, m3, m2, 0xb1
+ mulps m3, m3, [winq + 12*4]
+ LOAD m7, m6, bufq + 4*48, 16
+ addps m3, m3, m7
+ STORE m3, m7, outq + 48*SBLIMIT, 4*SBLIMIT
+
+ mulps m2, m2, [winq]
+ LOAD m6, m7, bufq, 16
+ addps m2, m2, m6
+ STORE m2, m7, outq, 4*SBLIMIT
+
+ mulps m4, m1, [winq + 20*4]
+ STORE m4, m7, bufq, 16
+
+ mulps m3, m5, [winq + 24*4]
+ STORE m3, m7, bufq + 4*16, 16
+
+ shufps m0, m0, m5, 0xb0
+ mulps m0, m0, [winq + 28*4]
+ STORE m0, m7, bufq + 4*32, 16
+
+ shufps m5, m5, m1, 0xb1
+ mulps m5, m5, [winq + 32*4]
+ STORE m5, m7, bufq + 4*48, 16
+
+ shufps m1, m1, m1, 0xb1
+ mulps m1, m1, [winq + 36*4]
+ movss [bufq + 4*64], m1
+ shufps m1, m1, 0xb1
+ movss [bufq + 4*68], m1
+ RET
+%endmacro
+
+INIT_XMM sse2
+DEFINE_IMDCT
+
+INIT_XMM sse3
+DEFINE_IMDCT
+
+INIT_XMM ssse3
+DEFINE_IMDCT
+
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+DEFINE_IMDCT
+%endif
+
+INIT_XMM sse
+
+%if ARCH_X86_64
+%define SPILL SWAP
+%define UNSPILL SWAP
+%define SPILLED(x) m %+ x
+%else
+%define SPILLED(x) [tmpq+(x-8)*16 + 32*4]
+%macro SPILL 2 ; xmm#, mempos
+ movaps SPILLED(%2), m%1
+%endmacro
+%macro UNSPILL 2
+ movaps m%1, SPILLED(%2)
+%endmacro
+%endif
+
+%macro DEFINE_FOUR_IMDCT 0
+cglobal four_imdct36_float, 5,5,16, out, buf, in, win, tmp
+ movlps m0, [inq+64]
+ movhps m0, [inq+64 + 72]
+ movlps m3, [inq+64 + 2*72]
+ movhps m3, [inq+64 + 3*72]
+
+ shufps m5, m0, m3, 0xdd
+ shufps m0, m0, m3, 0x88
+
+ mova m1, [inq+48]
+ movu m6, [inq+48 + 72]
+ mova m7, [inq+48 + 2*72]
+ movu m3, [inq+48 + 3*72]
+
+ TRANSPOSE4x4PS 1, 6, 7, 3, 4
+
+ addps m4, m6, m7
+ mova [tmpq+4*28], m4
+
+ addps m7, m3
+ addps m6, m1
+ addps m3, m0
+ addps m0, m5
+ addps m0, m7
+ addps m7, m6
+ mova [tmpq+4*12], m7
+ SPILL 3, 12
+
+ mova m4, [inq+32]
+ movu m5, [inq+32 + 72]
+ mova m2, [inq+32 + 2*72]
+ movu m7, [inq+32 + 3*72]
+
+ TRANSPOSE4x4PS 4, 5, 2, 7, 3
+
+ addps m1, m7
+ SPILL 1, 11
+
+ addps m3, m5, m2
+ SPILL 3, 13
+
+ addps m7, m2
+ addps m5, m4
+ addps m6, m7
+ mova [tmpq], m6
+ addps m7, m5
+ mova [tmpq+4*16], m7
+
+ mova m2, [inq+16]
+ movu m7, [inq+16 + 72]
+ mova m1, [inq+16 + 2*72]
+ movu m6, [inq+16 + 3*72]
+
+ TRANSPOSE4x4PS 2, 7, 1, 6, 3
+
+ addps m4, m6
+ addps m6, m1
+ addps m1, m7
+ addps m7, m2
+ addps m5, m6
+ SPILL 5, 15
+ addps m6, m7
+ mulps m6, [costabs + 16*2]
+ mova [tmpq+4*8], m6
+ SPILL 1, 10
+ SPILL 0, 14
+
+ mova m1, [inq]
+ movu m6, [inq + 72]
+ mova m3, [inq + 2*72]
+ movu m5, [inq + 3*72]
+
+ TRANSPOSE4x4PS 1, 6, 3, 5, 0
+
+ addps m2, m5
+ addps m5, m3
+ addps m7, m5
+ addps m3, m6
+ addps m6, m1
+ SPILL 7, 8
+ addps m5, m6
+ SPILL 6, 9
+ addps m6, m4, SPILLED(12)
+ subps m6, m2
+ UNSPILL 7, 11
+ SPILL 5, 11
+ subps m5, m1, m7
+ mulps m7, [costabs + 16*5]
+ addps m7, m1
+ mulps m0, m6, [costabs + 16*6]
+ addps m0, m5
+ mova [tmpq+4*24], m0
+ addps m6, m5
+ mova [tmpq+4*4], m6
+ addps m6, m4, m2
+ mulps m6, [costabs + 16*1]
+ subps m4, SPILLED(12)
+ mulps m4, [costabs + 16*8]
+ addps m2, SPILLED(12)
+ mulps m2, [costabs + 16*3]
+ subps m5, m7, m6
+ subps m5, m2
+ addps m6, m7
+ addps m6, m4
+ addps m7, m2
+ subps m7, m4
+ mova [tmpq+4*20], m7
+ mova m2, [tmpq+4*28]
+ mova [tmpq+4*28], m5
+ UNSPILL 7, 13
+ subps m5, m7, m2
+ mulps m5, [costabs + 16*7]
+ UNSPILL 1, 10
+ mulps m1, [costabs + 16*2]
+ addps m4, m3, m2
+ mulps m4, [costabs + 16*4]
+ addps m2, m7
+ addps m7, m3
+ mulps m7, [costabs]
+ subps m3, m2
+ mulps m3, [costabs + 16*2]
+ addps m2, m7, m5
+ addps m2, m1
+ SPILL 2, 10
+ addps m7, m4
+ subps m7, m1
+ SPILL 7, 12
+ subps m5, m4
+ subps m5, m1
+ UNSPILL 0, 14
+ SPILL 5, 13
+ addps m1, m0, SPILLED(15)
+ subps m1, SPILLED(8)
+ mova m4, [costabs + 16*5]
+ mulps m4, [tmpq]
+ UNSPILL 2, 9
+ addps m4, m2
+ subps m2, [tmpq]
+ mulps m5, m1, [costabs + 16*6]
+ addps m5, m2
+ SPILL 5, 9
+ addps m2, m1
+ SPILL 2, 14
+ UNSPILL 5, 15
+ subps m7, m5, m0
+ addps m5, SPILLED(8)
+ mulps m5, [costabs + 16*1]
+ mulps m7, [costabs + 16*8]
+ addps m0, SPILLED(8)
+ mulps m0, [costabs + 16*3]
+ subps m2, m4, m5
+ subps m2, m0
+ SPILL 2, 15
+ addps m5, m4
+ addps m5, m7
+ addps m4, m0
+ subps m4, m7
+ SPILL 4, 8
+ mova m7, [tmpq+4*16]
+ mova m2, [tmpq+4*12]
+ addps m0, m7, m2
+ subps m0, SPILLED(11)
+ mulps m0, [costabs + 16*2]
+ addps m4, m7, SPILLED(11)
+ mulps m4, [costabs]
+ subps m7, m2
+ mulps m7, [costabs + 16*7]
+ addps m2, SPILLED(11)
+ mulps m2, [costabs + 16*4]
+ addps m1, m7, [tmpq+4*8]
+ addps m1, m4
+ addps m4, m2
+ subps m4, [tmpq+4*8]
+ SPILL 4, 11
+ subps m7, m2
+ subps m7, [tmpq+4*8]
+ addps m4, m6, SPILLED(10)
+ subps m6, SPILLED(10)
+ addps m2, m5, m1
+ mulps m2, [costabs + 16*9]
+ subps m5, m1
+ mulps m5, [costabs + 16*17]
+ subps m1, m4, m2
+ addps m4, m2
+ mulps m2, m1, [winq+4*36]
+ addps m2, [bufq+4*36]
+ mova [outq+1152], m2
+ mulps m1, [winq+4*32]
+ addps m1, [bufq+4*32]
+ mova [outq+1024], m1
+ mulps m1, m4, [winq+4*116]
+ mova [bufq+4*36], m1
+ mulps m4, [winq+4*112]
+ mova [bufq+4*32], m4
+ addps m2, m6, m5
+ subps m6, m5
+ mulps m1, m6, [winq+4*68]
+ addps m1, [bufq+4*68]
+ mova [outq+2176], m1
+ mulps m6, [winq]
+ addps m6, [bufq]
+ mova [outq], m6
+ mulps m1, m2, [winq+4*148]
+ mova [bufq+4*68], m1
+ mulps m2, [winq+4*80]
+ mova [bufq], m2
+ addps m5, m3, [tmpq+4*24]
+ mova m2, [tmpq+4*24]
+ subps m2, m3
+ mova m1, SPILLED(9)
+ subps m1, m0
+ mulps m1, [costabs + 16*10]
+ addps m0, SPILLED(9)
+ mulps m0, [costabs + 16*16]
+ addps m6, m5, m1
+ subps m5, m1
+ mulps m3, m5, [winq+4*40]
+ addps m3, [bufq+4*40]
+ mova [outq+1280], m3
+ mulps m5, [winq+4*28]
+ addps m5, [bufq+4*28]
+ mova [outq+896], m5
+ mulps m1, m6, [winq+4*120]
+ mova [bufq+4*40], m1
+ mulps m6, [winq+4*108]
+ mova [bufq+4*28], m6
+ addps m1, m2, m0
+ subps m2, m0
+ mulps m5, m2, [winq+4*64]
+ addps m5, [bufq+4*64]
+ mova [outq+2048], m5
+ mulps m2, [winq+4*4]
+ addps m2, [bufq+4*4]
+ mova [outq+128], m2
+ mulps m0, m1, [winq+4*144]
+ mova [bufq+4*64], m0
+ mulps m1, [winq+4*84]
+ mova [bufq+4*4], m1
+ mova m1, [tmpq+4*28]
+ mova m5, m1
+ addps m1, SPILLED(13)
+ subps m5, SPILLED(13)
+ UNSPILL 3, 15
+ addps m2, m7, m3
+ mulps m2, [costabs + 16*11]
+ subps m3, m7
+ mulps m3, [costabs + 16*15]
+ addps m0, m2, m1
+ subps m1, m2
+ SWAP m0, m2
+ mulps m6, m1, [winq+4*44]
+ addps m6, [bufq+4*44]
+ mova [outq+1408], m6
+ mulps m1, [winq+4*24]
+ addps m1, [bufq+4*24]
+ mova [outq+768], m1
+ mulps m0, m2, [winq+4*124]
+ mova [bufq+4*44], m0
+ mulps m2, [winq+4*104]
+ mova [bufq+4*24], m2
+ addps m0, m5, m3
+ subps m5, m3
+ mulps m1, m5, [winq+4*60]
+ addps m1, [bufq+4*60]
+ mova [outq+1920], m1
+ mulps m5, [winq+4*8]
+ addps m5, [bufq+4*8]
+ mova [outq+256], m5
+ mulps m1, m0, [winq+4*140]
+ mova [bufq+4*60], m1
+ mulps m0, [winq+4*88]
+ mova [bufq+4*8], m0
+ mova m1, [tmpq+4*20]
+ addps m1, SPILLED(12)
+ mova m2, [tmpq+4*20]
+ subps m2, SPILLED(12)
+ UNSPILL 7, 8
+ subps m0, m7, SPILLED(11)
+ addps m7, SPILLED(11)
+ mulps m4, m7, [costabs + 16*12]
+ mulps m0, [costabs + 16*14]
+ addps m5, m1, m4
+ subps m1, m4
+ mulps m7, m1, [winq+4*48]
+ addps m7, [bufq+4*48]
+ mova [outq+1536], m7
+ mulps m1, [winq+4*20]
+ addps m1, [bufq+4*20]
+ mova [outq+640], m1
+ mulps m1, m5, [winq+4*128]
+ mova [bufq+4*48], m1
+ mulps m5, [winq+4*100]
+ mova [bufq+4*20], m5
+ addps m6, m2, m0
+ subps m2, m0
+ mulps m1, m2, [winq+4*56]
+ addps m1, [bufq+4*56]
+ mova [outq+1792], m1
+ mulps m2, [winq+4*12]
+ addps m2, [bufq+4*12]
+ mova [outq+384], m2
+ mulps m0, m6, [winq+4*136]
+ mova [bufq+4*56], m0
+ mulps m6, [winq+4*92]
+ mova [bufq+4*12], m6
+ UNSPILL 0, 14
+ mulps m0, [costabs + 16*13]
+ mova m3, [tmpq+4*4]
+ addps m2, m0, m3
+ subps m3, m0
+ mulps m0, m3, [winq+4*52]
+ addps m0, [bufq+4*52]
+ mova [outq+1664], m0
+ mulps m3, [winq+4*16]
+ addps m3, [bufq+4*16]
+ mova [outq+512], m3
+ mulps m0, m2, [winq+4*132]
+ mova [bufq+4*52], m0
+ mulps m2, [winq+4*96]
+ mova [bufq+4*16], m2
+ RET
+%endmacro
+
+INIT_XMM sse
+DEFINE_FOUR_IMDCT
+
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+DEFINE_FOUR_IMDCT
+%endif
diff --git a/media/ffvpx/libavcodec/x86/mathops.h b/media/ffvpx/libavcodec/x86/mathops.h
new file mode 100644
index 0000000000..ca7e2dffc1
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/mathops.h
@@ -0,0 +1,153 @@
+/*
+ * simple math operations
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_MATHOPS_H
+#define AVCODEC_X86_MATHOPS_H
+
+#include "config.h"
+
+#include "libavutil/common.h"
+#include "libavutil/x86/asm.h"
+
+#if HAVE_INLINE_ASM
+
+#if ARCH_X86_32
+
+#define MULL MULL
+static av_always_inline av_const int MULL(int a, int b, unsigned shift)
+{
+ int rt, dummy;
+ if (__builtin_constant_p(shift))
+ __asm__ (
+ "imull %3 \n\t"
+ "shrdl %4, %%edx, %%eax \n\t"
+ :"=a"(rt), "=d"(dummy)
+ :"a"(a), "rm"(b), "i"(shift & 0x1F)
+ );
+ else
+ __asm__ (
+ "imull %3 \n\t"
+ "shrdl %4, %%edx, %%eax \n\t"
+ :"=a"(rt), "=d"(dummy)
+ :"a"(a), "rm"(b), "c"((uint8_t)shift)
+ );
+ return rt;
+}
+
+#define MULH MULH
+static av_always_inline av_const int MULH(int a, int b)
+{
+ int rt, dummy;
+ __asm__ (
+ "imull %3"
+ :"=d"(rt), "=a"(dummy)
+ :"a"(a), "rm"(b)
+ );
+ return rt;
+}
+
+#define MUL64 MUL64
+static av_always_inline av_const int64_t MUL64(int a, int b)
+{
+ int64_t rt;
+ __asm__ (
+ "imull %2"
+ :"=A"(rt)
+ :"a"(a), "rm"(b)
+ );
+ return rt;
+}
+
+#endif /* ARCH_X86_32 */
+
+#if HAVE_I686
+/* median of 3 */
+#define mid_pred mid_pred
+static inline av_const int mid_pred(int a, int b, int c)
+{
+ int i=b;
+ __asm__ (
+ "cmp %2, %1 \n\t"
+ "cmovg %1, %0 \n\t"
+ "cmovg %2, %1 \n\t"
+ "cmp %3, %1 \n\t"
+ "cmovl %3, %1 \n\t"
+ "cmp %1, %0 \n\t"
+ "cmovg %1, %0 \n\t"
+ :"+&r"(i), "+&r"(a)
+ :"r"(b), "r"(c)
+ );
+ return i;
+}
+
+#if HAVE_6REGS
+#define COPY3_IF_LT(x, y, a, b, c, d)\
+__asm__ volatile(\
+ "cmpl %0, %3 \n\t"\
+ "cmovl %3, %0 \n\t"\
+ "cmovl %4, %1 \n\t"\
+ "cmovl %5, %2 \n\t"\
+ : "+&r" (x), "+&r" (a), "+r" (c)\
+ : "r" (y), "r" (b), "r" (d)\
+);
+#endif /* HAVE_6REGS */
+
+#endif /* HAVE_I686 */
+
+#define MASK_ABS(mask, level) \
+ __asm__ ("cdq \n\t" \
+ "xorl %1, %0 \n\t" \
+ "subl %1, %0 \n\t" \
+ : "+a"(level), "=&d"(mask))
+
+// avoid +32 for shift optimization (gcc should do that ...)
+#define NEG_SSR32 NEG_SSR32
+static inline int32_t NEG_SSR32( int32_t a, int8_t s){
+ if (__builtin_constant_p(s))
+ __asm__ ("sarl %1, %0\n\t"
+ : "+r" (a)
+ : "i" (-s & 0x1F)
+ );
+ else
+ __asm__ ("sarl %1, %0\n\t"
+ : "+r" (a)
+ : "c" ((uint8_t)(-s))
+ );
+ return a;
+}
+
+#define NEG_USR32 NEG_USR32
+static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
+ if (__builtin_constant_p(s))
+ __asm__ ("shrl %1, %0\n\t"
+ : "+r" (a)
+ : "i" (-s & 0x1F)
+ );
+ else
+ __asm__ ("shrl %1, %0\n\t"
+ : "+r" (a)
+ : "c" ((uint8_t)(-s))
+ );
+ return a;
+}
+
+#endif /* HAVE_INLINE_ASM */
+#endif /* AVCODEC_X86_MATHOPS_H */
diff --git a/media/ffvpx/libavcodec/x86/moz.build b/media/ffvpx/libavcodec/x86/moz.build
new file mode 100644
index 0000000000..693218099a
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/moz.build
@@ -0,0 +1,55 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ 'constants.c',
+ 'dct32.asm',
+ 'dct_init.c',
+ 'fdct.c',
+ 'fdctdsp_init.c',
+ 'flacdsp.asm',
+ 'flacdsp_init.c',
+ 'h264_intrapred.asm',
+ 'h264_intrapred_10bit.asm',
+ 'h264_intrapred_init.c',
+ 'idctdsp.asm',
+ 'idctdsp_init.c',
+ 'imdct36.asm',
+ 'mpegaudiodsp.c',
+ 'videodsp.asm',
+ 'videodsp_init.c',
+ 'vp8dsp.asm',
+ 'vp8dsp_init.c',
+ 'vp8dsp_loopfilter.asm',
+ 'vp9dsp_init.c',
+ 'vp9dsp_init_10bpp.c',
+ 'vp9dsp_init_12bpp.c',
+ 'vp9dsp_init_16bpp.c',
+ 'vp9intrapred.asm',
+ 'vp9intrapred_16bpp.asm',
+ 'vp9itxfm.asm',
+ 'vp9itxfm_16bpp.asm',
+ 'vp9lpf.asm',
+ 'vp9lpf_16bpp.asm',
+ 'vp9mc.asm',
+ 'vp9mc_16bpp.asm',
+]
+
+if CONFIG['CPU_ARCH'] == 'x86':
+ SOURCES += [ 'simple_idct.asm' ]
+
+if CONFIG['CPU_ARCH'] == 'x86_64':
+ SOURCES += [ 'simple_idct10.asm' ]
+
+if CONFIG['MOZ_LIBAV_FFT']:
+ SOURCES += [
+ 'fft.asm',
+ 'fft_init.c',
+ ]
+
+FINAL_LIBRARY = 'mozavcodec'
+
+include('/media/ffvpx/ffvpxcommon.mozbuild')
diff --git a/media/ffvpx/libavcodec/x86/mpegaudiodsp.c b/media/ffvpx/libavcodec/x86/mpegaudiodsp.c
new file mode 100644
index 0000000000..6586fe0726
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/mpegaudiodsp.c
@@ -0,0 +1,284 @@
+/*
+ * SIMD-optimized MP3 decoding functions
+ * Copyright (c) 2010 Vitor Sessak
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/mpegaudiodsp.h"
+
+#define DECL(CPU)\
+static void imdct36_blocks_ ## CPU(float *out, float *buf, float *in, int count, int switch_point, int block_type);\
+void ff_imdct36_float_ ## CPU(float *out, float *buf, float *in, float *win);
+
+#if HAVE_X86ASM
+DECL(sse2)
+DECL(sse3)
+DECL(ssse3)
+DECL(avx)
+#endif /* HAVE_X86ASM */
+
+void ff_four_imdct36_float_sse(float *out, float *buf, float *in, float *win,
+ float *tmpbuf);
+void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win,
+ float *tmpbuf);
+
+DECLARE_ALIGNED(16, static float, mdct_win_sse)[2][4][4*40];
+
+#if HAVE_6REGS && HAVE_SSE_INLINE
+
+#define MACS(rt, ra, rb) rt+=(ra)*(rb)
+#define MLSS(rt, ra, rb) rt-=(ra)*(rb)
+
+#define SUM8(op, sum, w, p) \
+{ \
+ op(sum, (w)[0 * 64], (p)[0 * 64]); \
+ op(sum, (w)[1 * 64], (p)[1 * 64]); \
+ op(sum, (w)[2 * 64], (p)[2 * 64]); \
+ op(sum, (w)[3 * 64], (p)[3 * 64]); \
+ op(sum, (w)[4 * 64], (p)[4 * 64]); \
+ op(sum, (w)[5 * 64], (p)[5 * 64]); \
+ op(sum, (w)[6 * 64], (p)[6 * 64]); \
+ op(sum, (w)[7 * 64], (p)[7 * 64]); \
+}
+
+static void apply_window(const float *buf, const float *win1,
+ const float *win2, float *sum1, float *sum2, int len)
+{
+ x86_reg count = - 4*len;
+ const float *win1a = win1+len;
+ const float *win2a = win2+len;
+ const float *bufa = buf+len;
+ float *sum1a = sum1+len;
+ float *sum2a = sum2+len;
+
+
+#define MULT(a, b) \
+ "movaps " #a "(%1,%0), %%xmm1 \n\t" \
+ "movaps " #a "(%3,%0), %%xmm2 \n\t" \
+ "mulps %%xmm2, %%xmm1 \n\t" \
+ "subps %%xmm1, %%xmm0 \n\t" \
+ "mulps " #b "(%2,%0), %%xmm2 \n\t" \
+ "subps %%xmm2, %%xmm4 \n\t" \
+
+ __asm__ volatile(
+ "1: \n\t"
+ "xorps %%xmm0, %%xmm0 \n\t"
+ "xorps %%xmm4, %%xmm4 \n\t"
+
+ MULT( 0, 0)
+ MULT( 256, 64)
+ MULT( 512, 128)
+ MULT( 768, 192)
+ MULT(1024, 256)
+ MULT(1280, 320)
+ MULT(1536, 384)
+ MULT(1792, 448)
+
+ "movaps %%xmm0, (%4,%0) \n\t"
+ "movaps %%xmm4, (%5,%0) \n\t"
+ "add $16, %0 \n\t"
+ "jl 1b \n\t"
+ :"+&r"(count)
+ :"r"(win1a), "r"(win2a), "r"(bufa), "r"(sum1a), "r"(sum2a)
+ );
+
+#undef MULT
+}
+
+static void apply_window_mp3(float *in, float *win, int *unused, float *out,
+ ptrdiff_t incr)
+{
+ LOCAL_ALIGNED_16(float, suma, [17]);
+ LOCAL_ALIGNED_16(float, sumb, [17]);
+ LOCAL_ALIGNED_16(float, sumc, [17]);
+ LOCAL_ALIGNED_16(float, sumd, [17]);
+
+ float sum;
+
+ /* copy to avoid wrap */
+ __asm__ volatile(
+ "movaps 0(%0), %%xmm0 \n\t" \
+ "movaps 16(%0), %%xmm1 \n\t" \
+ "movaps 32(%0), %%xmm2 \n\t" \
+ "movaps 48(%0), %%xmm3 \n\t" \
+ "movaps %%xmm0, 0(%1) \n\t" \
+ "movaps %%xmm1, 16(%1) \n\t" \
+ "movaps %%xmm2, 32(%1) \n\t" \
+ "movaps %%xmm3, 48(%1) \n\t" \
+ "movaps 64(%0), %%xmm0 \n\t" \
+ "movaps 80(%0), %%xmm1 \n\t" \
+ "movaps 96(%0), %%xmm2 \n\t" \
+ "movaps 112(%0), %%xmm3 \n\t" \
+ "movaps %%xmm0, 64(%1) \n\t" \
+ "movaps %%xmm1, 80(%1) \n\t" \
+ "movaps %%xmm2, 96(%1) \n\t" \
+ "movaps %%xmm3, 112(%1) \n\t"
+ ::"r"(in), "r"(in+512)
+ :"memory"
+ );
+
+ apply_window(in + 16, win , win + 512, suma, sumc, 16);
+ apply_window(in + 32, win + 48, win + 640, sumb, sumd, 16);
+
+ SUM8(MACS, suma[0], win + 32, in + 48);
+
+ sumc[ 0] = 0;
+ sumb[16] = 0;
+ sumd[16] = 0;
+
+#define SUMS(suma, sumb, sumc, sumd, out1, out2) \
+ "movups " #sumd "(%4), %%xmm0 \n\t" \
+ "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
+ "subps " #suma "(%1), %%xmm0 \n\t" \
+ "movaps %%xmm0," #out1 "(%0) \n\t" \
+\
+ "movups " #sumc "(%3), %%xmm0 \n\t" \
+ "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
+ "addps " #sumb "(%2), %%xmm0 \n\t" \
+ "movaps %%xmm0," #out2 "(%0) \n\t"
+
+ if (incr == 1) {
+ __asm__ volatile(
+ SUMS( 0, 48, 4, 52, 0, 112)
+ SUMS(16, 32, 20, 36, 16, 96)
+ SUMS(32, 16, 36, 20, 32, 80)
+ SUMS(48, 0, 52, 4, 48, 64)
+
+ :"+&r"(out)
+ :"r"(&suma[0]), "r"(&sumb[0]), "r"(&sumc[0]), "r"(&sumd[0])
+ :"memory"
+ );
+ out += 16*incr;
+ } else {
+ int j;
+ float *out2 = out + 32 * incr;
+ out[0 ] = -suma[ 0];
+ out += incr;
+ out2 -= incr;
+ for(j=1;j<16;j++) {
+ *out = -suma[ j] + sumd[16-j];
+ *out2 = sumb[16-j] + sumc[ j];
+ out += incr;
+ out2 -= incr;
+ }
+ }
+
+ sum = 0;
+ SUM8(MLSS, sum, win + 16 + 32, in + 32);
+ *out = sum;
+}
+
+#endif /* HAVE_6REGS && HAVE_SSE_INLINE */
+
+#if HAVE_X86ASM
+#define DECL_IMDCT_BLOCKS(CPU1, CPU2) \
+static void imdct36_blocks_ ## CPU1(float *out, float *buf, float *in, \
+ int count, int switch_point, int block_type) \
+{ \
+ int align_end = count - (count & 3); \
+ int j; \
+ for (j = 0; j < align_end; j+= 4) { \
+ LOCAL_ALIGNED_16(float, tmpbuf, [1024]); \
+ float *win = mdct_win_sse[switch_point && j < 4][block_type]; \
+ /* apply window & overlap with previous buffer */ \
+ \
+ /* select window */ \
+ ff_four_imdct36_float_ ## CPU2(out, buf, in, win, tmpbuf); \
+ in += 4*18; \
+ buf += 4*18; \
+ out += 4; \
+ } \
+ for (; j < count; j++) { \
+ /* apply window & overlap with previous buffer */ \
+ \
+ /* select window */ \
+ int win_idx = (switch_point && j < 2) ? 0 : block_type; \
+ float *win = ff_mdct_win_float[win_idx + (4 & -(j & 1))]; \
+ \
+ ff_imdct36_float_ ## CPU1(out, buf, in, win); \
+ \
+ in += 18; \
+ buf++; \
+ out++; \
+ } \
+}
+
+#if HAVE_SSE
+DECL_IMDCT_BLOCKS(sse2,sse)
+DECL_IMDCT_BLOCKS(sse3,sse)
+DECL_IMDCT_BLOCKS(ssse3,sse)
+#endif
+#if HAVE_AVX_EXTERNAL
+DECL_IMDCT_BLOCKS(avx,avx)
+#endif
+#endif /* HAVE_X86ASM */
+
+av_cold void ff_mpadsp_init_x86_tabs(void)
+{
+ int i, j;
+ for (j = 0; j < 4; j++) {
+ for (i = 0; i < 40; i ++) {
+ mdct_win_sse[0][j][4*i ] = ff_mdct_win_float[j ][i];
+ mdct_win_sse[0][j][4*i + 1] = ff_mdct_win_float[j + 4][i];
+ mdct_win_sse[0][j][4*i + 2] = ff_mdct_win_float[j ][i];
+ mdct_win_sse[0][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
+ mdct_win_sse[1][j][4*i ] = ff_mdct_win_float[0 ][i];
+ mdct_win_sse[1][j][4*i + 1] = ff_mdct_win_float[4 ][i];
+ mdct_win_sse[1][j][4*i + 2] = ff_mdct_win_float[j ][i];
+ mdct_win_sse[1][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
+ }
+ }
+}
+
+av_cold void ff_mpadsp_init_x86(MPADSPContext *s)
+{
+ av_unused int cpu_flags = av_get_cpu_flags();
+
+#if HAVE_6REGS && HAVE_SSE_INLINE
+ if (INLINE_SSE(cpu_flags)) {
+ s->apply_window_float = apply_window_mp3;
+ }
+#endif /* HAVE_SSE_INLINE */
+
+#if HAVE_X86ASM
+#if HAVE_SSE
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ s->imdct36_blocks_float = imdct36_blocks_sse2;
+ }
+ if (EXTERNAL_SSE3(cpu_flags)) {
+ s->imdct36_blocks_float = imdct36_blocks_sse3;
+ }
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ s->imdct36_blocks_float = imdct36_blocks_ssse3;
+ }
+#endif
+#if HAVE_AVX_EXTERNAL
+ if (EXTERNAL_AVX(cpu_flags)) {
+ s->imdct36_blocks_float = imdct36_blocks_avx;
+ }
+#endif
+#endif /* HAVE_X86ASM */
+}
diff --git a/media/ffvpx/libavcodec/x86/simple_idct.asm b/media/ffvpx/libavcodec/x86/simple_idct.asm
new file mode 100644
index 0000000000..982b2f0bbb
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/simple_idct.asm
@@ -0,0 +1,871 @@
+;
+; Simple IDCT MMX
+;
+; Copyright (c) 2001, 2002 Michael Niedermayer <michaelni@gmx.at>
+;
+; Conversion from gcc syntax to x264asm syntax with minimal modifications
+; by James Darnley <jdarnley@obe.tv>.
+;
+; This file is part of FFmpeg.
+;
+; FFmpeg is free software; you can redistribute it and/or
+; modify it under the terms of the GNU Lesser General Public
+; License as published by the Free Software Foundation; either
+; version 2.1 of the License, or (at your option) any later version.
+;
+; FFmpeg is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; Lesser General Public License for more details.
+;
+; You should have received a copy of the GNU Lesser General Public
+; License along with FFmpeg; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;/
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+%if ARCH_X86_32
+cextern pb_80
+
+wm1010: dw 0, 0xffff, 0, 0xffff
+d40000: dd 4 << 16, 0
+
+; 23170.475006
+; 22725.260826
+; 21406.727617
+; 19265.545870
+; 16384.000000
+; 12872.826198
+; 8866.956905
+; 4520.335430
+
+%define C0 23170 ; cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+%define C1 22725 ; cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+%define C2 21407 ; cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+%define C3 19266 ; cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+%define C4 16383 ; cos(i*M_PI/16)*sqrt(2)*(1<<14) - 0.5
+%define C5 12873 ; cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+%define C6 8867 ; cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+%define C7 4520 ; cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+
+%define ROW_SHIFT 11
+%define COL_SHIFT 20 ; 6
+
+coeffs:
+ dw 1 << (ROW_SHIFT - 1), 0
+ dw 1 << (ROW_SHIFT - 1), 0
+ dw 1 << (ROW_SHIFT - 1), 1
+ dw 1 << (ROW_SHIFT - 1), 0
+
+ dw C4, C4, C4, C4
+ dw C4, -C4, C4, -C4
+
+ dw C2, C6, C2, C6
+ dw C6, -C2, C6, -C2
+
+ dw C1, C3, C1, C3
+ dw C5, C7, C5, C7
+
+ dw C3, -C7, C3, -C7
+ dw -C1, -C5, -C1, -C5
+
+ dw C5, -C1, C5, -C1
+ dw C7, C3, C7, C3
+
+ dw C7, -C5, C7, -C5
+ dw C3, -C1, C3, -C1
+
+SECTION .text
+
+%macro DC_COND_IDCT 7
+ movq mm0, [blockq + %1] ; R4 R0 r4 r0
+ movq mm1, [blockq + %2] ; R6 R2 r6 r2
+ movq mm2, [blockq + %3] ; R3 R1 r3 r1
+ movq mm3, [blockq + %4] ; R7 R5 r7 r5
+ movq mm4, [wm1010]
+ pand mm4, mm0
+ por mm4, mm1
+ por mm4, mm2
+ por mm4, mm3
+ packssdw mm4, mm4
+ movd t0d, mm4
+ or t0d, t0d
+ jz %%1
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm5, [coeffs + 32] ; C6 C2 C6 C2
+ pmaddwd mm5, mm1 ; C6R6+C2R2 C6r6+C2r2
+ movq mm6, [coeffs + 40] ; -C2 C6 -C2 C6
+ pmaddwd mm1, mm6 ; -C2R6+C6R2 -C2r6+C6r2
+ movq mm7, [coeffs + 48] ; C3 C1 C3 C1
+ pmaddwd mm7, mm2 ; C3R3+C1R1 C3r3+C1r1
+ paddd mm4, [coeffs + 8]
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ paddd mm4, mm5 ; A0 a0
+ psubd mm6, mm5 ; A3 a3
+ movq mm5, [coeffs + 56] ; C7 C5 C7 C5
+ pmaddwd mm5, mm3 ; C7R7+C5R5 C7r7+C5r5
+ paddd mm0, [coeffs + 8]
+ paddd mm1, mm0 ; A1 a1
+ paddd mm0, mm0
+ psubd mm0, mm1 ; A2 a2
+ pmaddwd mm2, [coeffs + 64] ; -C7R3+C3R1 -C7r3+C3r1
+ paddd mm7, mm5 ; B0 b0
+ movq mm5, [coeffs + 72] ; -C5 -C1 -C5 -C1
+ pmaddwd mm5, mm3 ; -C5R7-C1R5 -C5r7-C1r5
+ paddd mm7, mm4 ; A0+B0 a0+b0
+ paddd mm4, mm4 ; 2A0 2a0
+ psubd mm4, mm7 ; A0-B0 a0-b0
+ paddd mm5, mm2 ; B1 b1
+ psrad mm7, %7
+ psrad mm4, %7
+ movq mm2, mm1 ; A1 a1
+ paddd mm1, mm5 ; A1+B1 a1+b1
+ psubd mm2, mm5 ; A1-B1 a1-b1
+ psrad mm1, %7
+ psrad mm2, %7
+ packssdw mm7, mm1 ; A1+B1 a1+b1 A0+B0 a0+b0
+ packssdw mm2, mm4 ; A0-B0 a0-b0 A1-B1 a1-b1
+ movq [%5], mm7
+ movq mm1, [blockq + %3] ; R3 R1 r3 r1
+ movq mm4, [coeffs + 80] ; -C1 C5 -C1 C5
+ movq [24 + %5], mm2
+ pmaddwd mm4, mm1 ; -C1R3+C5R1 -C1r3+C5r1
+ movq mm7, [coeffs + 88] ; C3 C7 C3 C7
+ pmaddwd mm1, [coeffs + 96] ; -C5R3+C7R1 -C5r3+C7r1
+ pmaddwd mm7, mm3 ; C3R7+C7R5 C3r7+C7r5
+ movq mm2, mm0 ; A2 a2
+ pmaddwd mm3, [coeffs + 104] ; -C1R7+C3R5 -C1r7+C3r5
+ paddd mm4, mm7 ; B2 b2
+ paddd mm2, mm4 ; A2+B2 a2+b2
+ psubd mm0, mm4 ; a2-B2 a2-b2
+ psrad mm2, %7
+ psrad mm0, %7
+ movq mm4, mm6 ; A3 a3
+ paddd mm3, mm1 ; B3 b3
+ paddd mm6, mm3 ; A3+B3 a3+b3
+ psubd mm4, mm3 ; a3-B3 a3-b3
+ psrad mm6, %7
+ packssdw mm2, mm6 ; A3+B3 a3+b3 A2+B2 a2+b2
+ movq [8 + %5], mm2
+ psrad mm4, %7
+ packssdw mm4, mm0 ; A2-B2 a2-b2 A3-B3 a3-b3
+ movq [16 + %5], mm4
+ jmp %%2
+%%1:
+ pslld mm0, 16
+ paddd mm0, [d40000]
+ psrad mm0, 13
+ packssdw mm0, mm0
+ movq [%5], mm0
+ movq [8 + %5], mm0
+ movq [16 + %5], mm0
+ movq [24 + %5], mm0
+%%2:
+%endmacro
+
+%macro Z_COND_IDCT 8
+ movq mm0, [blockq + %1] ; R4 R0 r4 r0
+ movq mm1, [blockq + %2] ; R6 R2 r6 r2
+ movq mm2, [blockq + %3] ; R3 R1 r3 r1
+ movq mm3, [blockq + %4] ; R7 R5 r7 r5
+ movq mm4, mm0
+ por mm4, mm1
+ por mm4, mm2
+ por mm4, mm3
+ packssdw mm4, mm4
+ movd t0d, mm4
+ or t0d, t0d
+ jz %8
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm5, [coeffs + 32] ; C6 C2 C6 C2
+ pmaddwd mm5, mm1 ; C6R6+C2R2 C6r6+C2r2
+ movq mm6, [coeffs + 40] ; -C2 C6 -C2 C6
+ pmaddwd mm1, mm6 ; -C2R6+C6R2 -C2r6+C6r2
+ movq mm7, [coeffs + 48] ; C3 C1 C3 C1
+ pmaddwd mm7, mm2 ; C3R3+C1R1 C3r3+C1r1
+ paddd mm4, [coeffs]
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ paddd mm4, mm5 ; A0 a0
+ psubd mm6, mm5 ; A3 a3
+ movq mm5, [coeffs + 56] ; C7 C5 C7 C5
+ pmaddwd mm5, mm3 ; C7R7+C5R5 C7r7+C5r5
+ paddd mm0, [coeffs]
+ paddd mm1, mm0 ; A1 a1
+ paddd mm0, mm0
+ psubd mm0, mm1 ; A2 a2
+ pmaddwd mm2, [coeffs + 64] ; -C7R3+C3R1 -C7r3+C3r1
+ paddd mm7, mm5 ; B0 b0
+ movq mm5, [coeffs + 72] ; -C5 -C1 -C5 -C1
+ pmaddwd mm5, mm3 ; -C5R7-C1R5 -C5r7-C1r5
+ paddd mm7, mm4 ; A0+B0 a0+b0
+ paddd mm4, mm4 ; 2A0 2a0
+ psubd mm4, mm7 ; A0-B0 a0-b0
+ paddd mm5, mm2 ; B1 b1
+ psrad mm7, %7
+ psrad mm4, %7
+ movq mm2, mm1 ; A1 a1
+ paddd mm1, mm5 ; A1+B1 a1+b1
+ psubd mm2, mm5 ; A1-B1 a1-b1
+ psrad mm1, %7
+ psrad mm2, %7
+ packssdw mm7, mm1 ; A1+B1 a1+b1 A0+B0 a0+b0
+ packssdw mm2, mm4 ; A0-B0 a0-b0 A1-B1 a1-b1
+ movq [%5], mm7
+ movq mm1, [blockq + %3] ; R3 R1 r3 r1
+ movq mm4, [coeffs + 80] ; -C1 C5 -C1 C5
+ movq [24 + %5], mm2
+ pmaddwd mm4, mm1 ; -C1R3+C5R1 -C1r3+C5r1
+ movq mm7, [coeffs + 88] ; C3 C7 C3 C7
+ pmaddwd mm1, [coeffs + 96] ; -C5R3+C7R1 -C5r3+C7r1
+ pmaddwd mm7, mm3 ; C3R7+C7R5 C3r7+C7r5
+ movq mm2, mm0 ; A2 a2
+ pmaddwd mm3, [coeffs + 104] ; -C1R7+C3R5 -C1r7+C3r5
+ paddd mm4, mm7 ; B2 b2
+ paddd mm2, mm4 ; A2+B2 a2+b2
+ psubd mm0, mm4 ; a2-B2 a2-b2
+ psrad mm2, %7
+ psrad mm0, %7
+ movq mm4, mm6 ; A3 a3
+ paddd mm3, mm1 ; B3 b3
+ paddd mm6, mm3 ; A3+B3 a3+b3
+ psubd mm4, mm3 ; a3-B3 a3-b3
+ psrad mm6, %7
+ packssdw mm2, mm6 ; A3+B3 a3+b3 A2+B2 a2+b2
+ movq [8 + %5], mm2
+ psrad mm4, %7
+ packssdw mm4, mm0 ; A2-B2 a2-b2 A3-B3 a3-b3
+ movq [16 + %5], mm4
+%endmacro
+
+%macro IDCT1 6
+ movq mm0, %1 ; R4 R0 r4 r0
+ movq mm1, %2 ; R6 R2 r6 r2
+ movq mm2, %3 ; R3 R1 r3 r1
+ movq mm3, %4 ; R7 R5 r7 r5
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm5, [coeffs + 32] ; C6 C2 C6 C2
+ pmaddwd mm5, mm1 ; C6R6+C2R2 C6r6+C2r2
+ movq mm6, [coeffs + 40] ; -C2 C6 -C2 C6
+ pmaddwd mm1, mm6 ; -C2R6+C6R2 -C2r6+C6r2
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ movq mm7, [coeffs + 48] ; C3 C1 C3 C1
+ pmaddwd mm7, mm2 ; C3R3+C1R1 C3r3+C1r1
+ paddd mm4, mm5 ; A0 a0
+ psubd mm6, mm5 ; A3 a3
+ movq mm5, mm0 ; -C4R4+C4R0 -C4r4+C4r0
+ paddd mm0, mm1 ; A1 a1
+ psubd mm5, mm1 ; A2 a2
+ movq mm1, [coeffs + 56] ; C7 C5 C7 C5
+ pmaddwd mm1, mm3 ; C7R7+C5R5 C7r7+C5r5
+ pmaddwd mm2, [coeffs + 64] ; -C7R3+C3R1 -C7r3+C3r1
+ paddd mm7, mm1 ; B0 b0
+ movq mm1, [coeffs + 72] ; -C5 -C1 -C5 -C1
+ pmaddwd mm1, mm3 ; -C5R7-C1R5 -C5r7-C1r5
+ paddd mm7, mm4 ; A0+B0 a0+b0
+ paddd mm4, mm4 ; 2A0 2a0
+ psubd mm4, mm7 ; A0-B0 a0-b0
+ paddd mm1, mm2 ; B1 b1
+ psrad mm7, %6
+ psrad mm4, %6
+ movq mm2, mm0 ; A1 a1
+ paddd mm0, mm1 ; A1+B1 a1+b1
+ psubd mm2, mm1 ; A1-B1 a1-b1
+ psrad mm0, %6
+ psrad mm2, %6
+ packssdw mm7, mm7 ; A0+B0 a0+b0
+ movd [%5], mm7
+ packssdw mm0, mm0 ; A1+B1 a1+b1
+ movd [16 + %5], mm0
+ packssdw mm2, mm2 ; A1-B1 a1-b1
+ movd [96 + %5], mm2
+ packssdw mm4, mm4 ; A0-B0 a0-b0
+ movd [112 + %5], mm4
+ movq mm0, %3 ; R3 R1 r3 r1
+ movq mm4, [coeffs + 80] ; -C1 C5 -C1 C5
+ pmaddwd mm4, mm0 ; -C1R3+C5R1 -C1r3+C5r1
+ movq mm7, [coeffs + 88] ; C3 C7 C3 C7
+ pmaddwd mm0, [coeffs + 96] ; -C5R3+C7R1 -C5r3+C7r1
+ pmaddwd mm7, mm3 ; C3R7+C7R5 C3r7+C7r5
+ movq mm2, mm5 ; A2 a2
+ pmaddwd mm3, [coeffs + 104] ; -C1R7+C3R5 -C1r7+C3r5
+ paddd mm4, mm7 ; B2 b2
+ paddd mm2, mm4 ; A2+B2 a2+b2
+ psubd mm5, mm4 ; a2-B2 a2-b2
+ psrad mm2, %6
+ psrad mm5, %6
+ movq mm4, mm6 ; A3 a3
+ paddd mm3, mm0 ; B3 b3
+ paddd mm6, mm3 ; A3+B3 a3+b3
+ psubd mm4, mm3 ; a3-B3 a3-b3
+ psrad mm6, %6
+ psrad mm4, %6
+ packssdw mm2, mm2 ; A2+B2 a2+b2
+ packssdw mm6, mm6 ; A3+B3 a3+b3
+ movd [32 + %5], mm2
+ packssdw mm4, mm4 ; A3-B3 a3-b3
+ packssdw mm5, mm5 ; A2-B2 a2-b2
+ movd [48 + %5], mm6
+ movd [64 + %5], mm4
+ movd [80 + %5], mm5
+%endmacro
+
+%macro IDCT2 6
+ movq mm0, %1 ; R4 R0 r4 r0
+ movq mm1, %2 ; R6 R2 r6 r2
+ movq mm3, %4 ; R7 R5 r7 r5
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm5, [coeffs + 32] ; C6 C2 C6 C2
+ pmaddwd mm5, mm1 ; C6R6+C2R2 C6r6+C2r2
+ movq mm6, [coeffs + 40] ; -C2 C6 -C2 C6
+ pmaddwd mm1, mm6 ; -C2R6+C6R2 -C2r6+C6r2
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ paddd mm4, mm5 ; A0 a0
+ psubd mm6, mm5 ; A3 a3
+ movq mm5, mm0 ; -C4R4+C4R0 -C4r4+C4r0
+ paddd mm0, mm1 ; A1 a1
+ psubd mm5, mm1 ; A2 a2
+ movq mm1, [coeffs + 56] ; C7 C5 C7 C5
+ pmaddwd mm1, mm3 ; C7R7+C5R5 C7r7+C5r5
+ movq mm7, [coeffs + 72] ; -C5 -C1 -C5 -C1
+ pmaddwd mm7, mm3 ; -C5R7-C1R5 -C5r7-C1r5
+ paddd mm1, mm4 ; A0+B0 a0+b0
+ paddd mm4, mm4 ; 2A0 2a0
+ psubd mm4, mm1 ; A0-B0 a0-b0
+ psrad mm1, %6
+ psrad mm4, %6
+ movq mm2, mm0 ; A1 a1
+ paddd mm0, mm7 ; A1+B1 a1+b1
+ psubd mm2, mm7 ; A1-B1 a1-b1
+ psrad mm0, %6
+ psrad mm2, %6
+ packssdw mm1, mm1 ; A0+B0 a0+b0
+ movd [%5], mm1
+ packssdw mm0, mm0 ; A1+B1 a1+b1
+ movd [16 + %5], mm0
+ packssdw mm2, mm2 ; A1-B1 a1-b1
+ movd [96 + %5], mm2
+ packssdw mm4, mm4 ; A0-B0 a0-b0
+ movd [112 + %5], mm4
+ movq mm1, [coeffs + 88] ; C3 C7 C3 C7
+ pmaddwd mm1, mm3 ; C3R7+C7R5 C3r7+C7r5
+ movq mm2, mm5 ; A2 a2
+ pmaddwd mm3, [coeffs + 104] ; -C1R7+C3R5 -C1r7+C3r5
+ paddd mm2, mm1 ; A2+B2 a2+b2
+ psubd mm5, mm1 ; a2-B2 a2-b2
+ psrad mm2, %6
+ psrad mm5, %6
+ movq mm1, mm6 ; A3 a3
+ paddd mm6, mm3 ; A3+B3 a3+b3
+ psubd mm1, mm3 ; a3-B3 a3-b3
+ psrad mm6, %6
+ psrad mm1, %6
+ packssdw mm2, mm2 ; A2+B2 a2+b2
+ packssdw mm6, mm6 ; A3+B3 a3+b3
+ movd [32 + %5], mm2
+ packssdw mm1, mm1 ; A3-B3 a3-b3
+ packssdw mm5, mm5 ; A2-B2 a2-b2
+ movd [48 + %5], mm6
+ movd [64 + %5], mm1
+ movd [80 + %5], mm5
+%endmacro
+
+%macro IDCT3 6
+ movq mm0, %1 ; R4 R0 r4 r0
+ movq mm3, %4 ; R7 R5 r7 r5
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, mm0 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm1, [coeffs + 56] ; C7 C5 C7 C5
+ pmaddwd mm1, mm3 ; C7R7+C5R5 C7r7+C5r5
+ movq mm7, [coeffs + 72] ; -C5 -C1 -C5 -C1
+ pmaddwd mm7, mm3 ; -C5R7-C1R5 -C5r7-C1r5
+ paddd mm1, mm4 ; A0+B0 a0+b0
+ paddd mm4, mm4 ; 2A0 2a0
+ psubd mm4, mm1 ; A0-B0 a0-b0
+ psrad mm1, %6
+ psrad mm4, %6
+ movq mm2, mm0 ; A1 a1
+ paddd mm0, mm7 ; A1+B1 a1+b1
+ psubd mm2, mm7 ; A1-B1 a1-b1
+ psrad mm0, %6
+ psrad mm2, %6
+ packssdw mm1, mm1 ; A0+B0 a0+b0
+ movd [%5], mm1
+ packssdw mm0, mm0 ; A1+B1 a1+b1
+ movd [16 + %5], mm0
+ packssdw mm2, mm2 ; A1-B1 a1-b1
+ movd [96 + %5], mm2
+ packssdw mm4, mm4 ; A0-B0 a0-b0
+ movd [112 + %5], mm4
+ movq mm1, [coeffs + 88] ; C3 C7 C3 C7
+ pmaddwd mm1, mm3 ; C3R7+C7R5 C3r7+C7r5
+ movq mm2, mm5 ; A2 a2
+ pmaddwd mm3, [coeffs + 104] ; -C1R7+C3R5 -C1r7+C3r5
+ paddd mm2, mm1 ; A2+B2 a2+b2
+ psubd mm5, mm1 ; a2-B2 a2-b2
+ psrad mm2, %6
+ psrad mm5, %6
+ movq mm1, mm6 ; A3 a3
+ paddd mm6, mm3 ; A3+B3 a3+b3
+ psubd mm1, mm3 ; a3-B3 a3-b3
+ psrad mm6, %6
+ psrad mm1, %6
+ packssdw mm2, mm2 ; A2+B2 a2+b2
+ packssdw mm6, mm6 ; A3+B3 a3+b3
+ movd [32 + %5], mm2
+ packssdw mm1, mm1 ; A3-B3 a3-b3
+ packssdw mm5, mm5 ; A2-B2 a2-b2
+ movd [48 + %5], mm6
+ movd [64 + %5], mm1
+ movd [80 + %5], mm5
+%endmacro
+
+%macro IDCT4 6
+ movq mm0, %1 ; R4 R0 r4 r0
+ movq mm2, %3 ; R3 R1 r3 r1
+ movq mm3, %4 ; R7 R5 r7 r5
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ movq mm7, [coeffs + 48] ; C3 C1 C3 C1
+ pmaddwd mm7, mm2 ; C3R3+C1R1 C3r3+C1r1
+ movq mm5, mm0 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm1, [coeffs + 56] ; C7 C5 C7 C5
+ pmaddwd mm1, mm3 ; C7R7+C5R5 C7r7+C5r5
+ pmaddwd mm2, [coeffs + 64] ; -C7R3+C3R1 -C7r3+C3r1
+ paddd mm7, mm1 ; B0 b0
+ movq mm1, [coeffs + 72] ; -C5 -C1 -C5 -C1
+ pmaddwd mm1, mm3 ; -C5R7-C1R5 -C5r7-C1r5
+ paddd mm7, mm4 ; A0+B0 a0+b0
+ paddd mm4, mm4 ; 2A0 2a0
+ psubd mm4, mm7 ; A0-B0 a0-b0
+ paddd mm1, mm2 ; B1 b1
+ psrad mm7, %6
+ psrad mm4, %6
+ movq mm2, mm0 ; A1 a1
+ paddd mm0, mm1 ; A1+B1 a1+b1
+ psubd mm2, mm1 ; A1-B1 a1-b1
+ psrad mm0, %6
+ psrad mm2, %6
+ packssdw mm7, mm7 ; A0+B0 a0+b0
+ movd [%5], mm7
+ packssdw mm0, mm0 ; A1+B1 a1+b1
+ movd [16 + %5], mm0
+ packssdw mm2, mm2 ; A1-B1 a1-b1
+ movd [96 + %5], mm2
+ packssdw mm4, mm4 ; A0-B0 a0-b0
+ movd [112 + %5], mm4
+ movq mm0, %3 ; R3 R1 r3 r1
+ movq mm4, [coeffs + 80] ; -C1 C5 -C1 C5
+ pmaddwd mm4, mm0 ; -C1R3+C5R1 -C1r3+C5r1
+ movq mm7, [coeffs + 88] ; C3 C7 C3 C7
+ pmaddwd mm0, [coeffs + 96] ; -C5R3+C7R1 -C5r3+C7r1
+ pmaddwd mm7, mm3 ; C3R7+C7R5 C3r7+C7r5
+ movq mm2, mm5 ; A2 a2
+ pmaddwd mm3, [coeffs + 104] ; -C1R7+C3R5 -C1r7+C3r5
+ paddd mm4, mm7 ; B2 b2
+ paddd mm2, mm4 ; A2+B2 a2+b2
+ psubd mm5, mm4 ; a2-B2 a2-b2
+ psrad mm2, %6
+ psrad mm5, %6
+ movq mm4, mm6 ; A3 a3
+ paddd mm3, mm0 ; B3 b3
+ paddd mm6, mm3 ; A3+B3 a3+b3
+ psubd mm4, mm3 ; a3-B3 a3-b3
+ psrad mm6, %6
+ psrad mm4, %6
+ packssdw mm2, mm2 ; A2+B2 a2+b2
+ packssdw mm6, mm6 ; A3+B3 a3+b3
+ movd [32 + %5], mm2
+ packssdw mm4, mm4 ; A3-B3 a3-b3
+ packssdw mm5, mm5 ; A2-B2 a2-b2
+ movd [48 + %5], mm6
+ movd [64 + %5], mm4
+ movd [80 + %5], mm5
+%endmacro
+
+%macro IDCT5 6
+ movq mm0, %1 ; R4 R0 r4 r0
+ movq mm2, %3 ; R3 R1 r3 r1
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ movq mm7, [coeffs + 48] ; C3 C1 C3 C1
+ pmaddwd mm7, mm2 ; C3R3+C1R1 C3r3+C1r1
+ movq mm5, mm0 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm3, [coeffs + 64]
+ pmaddwd mm3, mm2 ; -C7R3+C3R1 -C7r3+C3r1
+ paddd mm7, mm4 ; A0+B0 a0+b0
+ paddd mm4, mm4 ; 2A0 2a0
+ psubd mm4, mm7 ; A0-B0 a0-b0
+ psrad mm7, %6
+ psrad mm4, %6
+ movq mm1, mm0 ; A1 a1
+ paddd mm0, mm3 ; A1+B1 a1+b1
+ psubd mm1, mm3 ; A1-B1 a1-b1
+ psrad mm0, %6
+ psrad mm1, %6
+ packssdw mm7, mm7 ; A0+B0 a0+b0
+ movd [%5], mm7
+ packssdw mm0, mm0 ; A1+B1 a1+b1
+ movd [16 + %5], mm0
+ packssdw mm1, mm1 ; A1-B1 a1-b1
+ movd [96 + %5], mm1
+ packssdw mm4, mm4 ; A0-B0 a0-b0
+ movd [112 + %5], mm4
+ movq mm4, [coeffs + 80] ; -C1 C5 -C1 C5
+ pmaddwd mm4, mm2 ; -C1R3+C5R1 -C1r3+C5r1
+ pmaddwd mm2, [coeffs + 96] ; -C5R3+C7R1 -C5r3+C7r1
+ movq mm1, mm5 ; A2 a2
+ paddd mm1, mm4 ; A2+B2 a2+b2
+ psubd mm5, mm4 ; a2-B2 a2-b2
+ psrad mm1, %6
+ psrad mm5, %6
+ movq mm4, mm6 ; A3 a3
+ paddd mm6, mm2 ; A3+B3 a3+b3
+ psubd mm4, mm2 ; a3-B3 a3-b3
+ psrad mm6, %6
+ psrad mm4, %6
+ packssdw mm1, mm1 ; A2+B2 a2+b2
+ packssdw mm6, mm6 ; A3+B3 a3+b3
+ movd [32 + %5], mm1
+ packssdw mm4, mm4 ; A3-B3 a3-b3
+ packssdw mm5, mm5 ; A2-B2 a2-b2
+ movd [48 + %5], mm6
+ movd [64 + %5], mm4
+ movd [80 + %5], mm5
+%endmacro
+
+%macro IDCT6 6
+ movq mm0, [%1] ; R4 R0 r4 r0
+ movq mm1, [%2] ; R6 R2 r6 r2
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm5, [coeffs + 32] ; C6 C2 C6 C2
+ pmaddwd mm5, mm1 ; C6R6+C2R2 C6r6+C2r2
+ movq mm6, [coeffs + 40] ; -C2 C6 -C2 C6
+ pmaddwd mm1, mm6 ; -C2R6+C6R2 -C2r6+C6r2
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ paddd mm4, mm5 ; A0 a0
+ psubd mm6, mm5 ; A3 a3
+ movq mm5, mm0 ; -C4R4+C4R0 -C4r4+C4r0
+ paddd mm0, mm1 ; A1 a1
+ psubd mm5, mm1 ; A2 a2
+ movq mm2, [8 + %1] ; R4 R0 r4 r0
+ movq mm3, [8 + %2] ; R6 R2 r6 r2
+ movq mm1, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm1, mm2 ; C4R4+C4R0 C4r4+C4r0
+ movq mm7, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm2, mm7 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm7, [coeffs + 32] ; C6 C2 C6 C2
+ pmaddwd mm7, mm3 ; C6R6+C2R2 C6r6+C2r2
+ pmaddwd mm3, [coeffs + 40] ; -C2R6+C6R2 -C2r6+C6r2
+ paddd mm7, mm1 ; A0 a0
+ paddd mm1, mm1 ; 2C0 2c0
+ psubd mm1, mm7 ; A3 a3
+ paddd mm3, mm2 ; A1 a1
+ paddd mm2, mm2 ; 2C1 2c1
+ psubd mm2, mm3 ; A2 a2
+ psrad mm4, %6
+ psrad mm7, %6
+ psrad mm3, %6
+ packssdw mm4, mm7 ; A0 a0
+ movq [%5], mm4
+ psrad mm0, %6
+ packssdw mm0, mm3 ; A1 a1
+ movq [16 + %5], mm0
+ movq [96 + %5], mm0
+ movq [112 + %5], mm4
+ psrad mm5, %6
+ psrad mm6, %6
+ psrad mm2, %6
+ packssdw mm5, mm2 ; A2-B2 a2-b2
+ movq [32 + %5], mm5
+ psrad mm1, %6
+ packssdw mm6, mm1 ; A3+B3 a3+b3
+ movq [48 + %5], mm6
+ movq [64 + %5], mm6
+ movq [80 + %5], mm5
+%endmacro
+
+%macro IDCT7 6
+ movq mm0, %1 ; R4 R0 r4 r0
+ movq mm1, %2 ; R6 R2 r6 r2
+ movq mm2, %3 ; R3 R1 r3 r1
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm5, [coeffs + 32] ; C6 C2 C6 C2
+ pmaddwd mm5, mm1 ; C6R6+C2R2 C6r6+C2r2
+ movq mm6, [coeffs + 40] ; -C2 C6 -C2 C6
+ pmaddwd mm1, mm6 ; -C2R6+C6R2 -C2r6+C6r2
+ movq mm6, mm4 ; C4R4+C4R0 C4r4+C4r0
+ movq mm7, [coeffs + 48] ; C3 C1 C3 C1
+ pmaddwd mm7, mm2 ; C3R3+C1R1 C3r3+C1r1
+ paddd mm4, mm5 ; A0 a0
+ psubd mm6, mm5 ; A3 a3
+ movq mm5, mm0 ; -C4R4+C4R0 -C4r4+C4r0
+ paddd mm0, mm1 ; A1 a1
+ psubd mm5, mm1 ; A2 a2
+ movq mm1, [coeffs + 64]
+ pmaddwd mm1, mm2 ; -C7R3+C3R1 -C7r3+C3r1
+ paddd mm7, mm4 ; A0+B0 a0+b0
+ paddd mm4, mm4 ; 2A0 2a0
+ psubd mm4, mm7 ; A0-B0 a0-b0
+ psrad mm7, %6
+ psrad mm4, %6
+ movq mm3, mm0 ; A1 a1
+ paddd mm0, mm1 ; A1+B1 a1+b1
+ psubd mm3, mm1 ; A1-B1 a1-b1
+ psrad mm0, %6
+ psrad mm3, %6
+ packssdw mm7, mm7 ; A0+B0 a0+b0
+ movd [%5], mm7
+ packssdw mm0, mm0 ; A1+B1 a1+b1
+ movd [16 + %5], mm0
+ packssdw mm3, mm3 ; A1-B1 a1-b1
+ movd [96 + %5], mm3
+ packssdw mm4, mm4 ; A0-B0 a0-b0
+ movd [112 + %5], mm4
+ movq mm4, [coeffs + 80] ; -C1 C5 -C1 C5
+ pmaddwd mm4, mm2 ; -C1R3+C5R1 -C1r3+C5r1
+ pmaddwd mm2, [coeffs + 96] ; -C5R3+C7R1 -C5r3+C7r1
+ movq mm3, mm5 ; A2 a2
+ paddd mm3, mm4 ; A2+B2 a2+b2
+ psubd mm5, mm4 ; a2-B2 a2-b2
+ psrad mm3, %6
+ psrad mm5, %6
+ movq mm4, mm6 ; A3 a3
+ paddd mm6, mm2 ; A3+B3 a3+b3
+ psubd mm4, mm2 ; a3-B3 a3-b3
+ psrad mm6, %6
+ packssdw mm3, mm3 ; A2+B2 a2+b2
+ movd [32 + %5], mm3
+ psrad mm4, %6
+ packssdw mm6, mm6 ; A3+B3 a3+b3
+ movd [48 + %5], mm6
+ packssdw mm4, mm4 ; A3-B3 a3-b3
+ packssdw mm5, mm5 ; A2-B2 a2-b2
+ movd [64 + %5], mm4
+ movd [80 + %5], mm5
+%endmacro
+
+%macro IDCT8 6
+ movq mm0, [%1] ; R4 R0 r4 r0
+ movq mm4, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm4, mm0 ; C4R4+C4R0 C4r4+C4r0
+ movq mm5, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm0, mm5 ; -C4R4+C4R0 -C4r4+C4r0
+ psrad mm4, %6
+ psrad mm0, %6
+ movq mm2, [8 + %1] ; R4 R0 r4 r0
+ movq mm1, [coeffs + 16] ; C4 C4 C4 C4
+ pmaddwd mm1, mm2 ; C4R4+C4R0 C4r4+C4r0
+ movq mm7, [coeffs + 24] ; -C4 C4 -C4 C4
+ pmaddwd mm2, mm7 ; -C4R4+C4R0 -C4r4+C4r0
+ movq mm7, [coeffs + 32] ; C6 C2 C6 C2
+ psrad mm1, %6
+ packssdw mm4, mm1 ; A0 a0
+ movq [%5], mm4
+ psrad mm2, %6
+ packssdw mm0, mm2 ; A1 a1
+ movq [16 + %5], mm0
+ movq [96 + %5], mm0
+ movq [112 + %5], mm4
+ movq [32 + %5], mm0
+ movq [48 + %5], mm4
+ movq [64 + %5], mm4
+ movq [80 + %5], mm0
+%endmacro
+
+%macro IDCT 0
+ DC_COND_IDCT 0, 8, 16, 24, rsp + 0, null, 11
+ Z_COND_IDCT 32, 40, 48, 56, rsp + 32, null, 11, %%4
+ Z_COND_IDCT 64, 72, 80, 88, rsp + 64, null, 11, %%2
+ Z_COND_IDCT 96, 104, 112, 120, rsp + 96, null, 11, %%1
+
+ IDCT1 [rsp + 0], [rsp + 64], [rsp + 32], [rsp + 96], blockq + 0, 20
+ IDCT1 [rsp + 8], [rsp + 72], [rsp + 40], [rsp + 104], blockq + 4, 20
+ IDCT1 [rsp + 16], [rsp + 80], [rsp + 48], [rsp + 112], blockq + 8, 20
+ IDCT1 [rsp + 24], [rsp + 88], [rsp + 56], [rsp + 120], blockq + 12, 20
+ jmp %%9
+
+ ALIGN 16
+ %%4:
+ Z_COND_IDCT 64, 72, 80, 88, rsp + 64, null, 11, %%6
+ Z_COND_IDCT 96, 104, 112, 120, rsp + 96, null, 11, %%5
+
+ IDCT2 [rsp + 0], [rsp + 64], [rsp + 32], [rsp + 96], blockq + 0, 20
+ IDCT2 [rsp + 8], [rsp + 72], [rsp + 40], [rsp + 104], blockq + 4, 20
+ IDCT2 [rsp + 16], [rsp + 80], [rsp + 48], [rsp + 112], blockq + 8, 20
+ IDCT2 [rsp + 24], [rsp + 88], [rsp + 56], [rsp + 120], blockq + 12, 20
+ jmp %%9
+
+ ALIGN 16
+ %%6:
+ Z_COND_IDCT 96, 104, 112, 120, rsp + 96, null, 11, %%7
+
+ IDCT3 [rsp + 0], [rsp + 64], [rsp + 32], [rsp + 96], blockq + 0, 20
+ IDCT3 [rsp + 8], [rsp + 72], [rsp + 40], [rsp + 104], blockq + 4, 20
+ IDCT3 [rsp + 16], [rsp + 80], [rsp + 48], [rsp + 112], blockq + 8, 20
+ IDCT3 [rsp + 24], [rsp + 88], [rsp + 56], [rsp + 120], blockq + 12, 20
+ jmp %%9
+
+ ALIGN 16
+ %%2:
+ Z_COND_IDCT 96, 104, 112, 120, rsp + 96, null, 11, %%3
+
+ IDCT4 [rsp + 0], [rsp + 64], [rsp + 32], [rsp + 96], blockq + 0, 20
+ IDCT4 [rsp + 8], [rsp + 72], [rsp + 40], [rsp + 104], blockq + 4, 20
+ IDCT4 [rsp + 16], [rsp + 80], [rsp + 48], [rsp + 112], blockq + 8, 20
+ IDCT4 [rsp + 24], [rsp + 88], [rsp + 56], [rsp + 120], blockq + 12, 20
+ jmp %%9
+
+ ALIGN 16
+ %%3:
+
+ IDCT5 [rsp + 0], [rsp + 64], [rsp + 32], [rsp + 96], blockq + 0, 20
+ IDCT5 [rsp + 8], [rsp + 72], [rsp + 40], [rsp + 104], blockq + 4, 20
+ IDCT5 [rsp + 16], [rsp + 80], [rsp + 48], [rsp + 112], blockq + 8, 20
+ IDCT5 [rsp + 24], [rsp + 88], [rsp + 56], [rsp + 120], blockq + 12, 20
+ jmp %%9
+
+ ALIGN 16
+ %%5:
+
+ IDCT6 rsp + 0, rsp + 64, rsp + 32, rsp + 96, blockq + 0, 20
+ IDCT6 rsp + 16, rsp + 80, rsp + 48, rsp + 112, blockq + 8, 20
+ jmp %%9
+
+ ALIGN 16
+ %%1:
+
+ IDCT7 [rsp + 0], [rsp + 64], [rsp + 32], [rsp + 96], blockq + 0, 20
+ IDCT7 [rsp + 8], [rsp + 72], [rsp + 40], [rsp + 104], blockq + 4, 20
+ IDCT7 [rsp + 16], [rsp + 80], [rsp + 48], [rsp + 112], blockq + 8, 20
+ IDCT7 [rsp + 24], [rsp + 88], [rsp + 56], [rsp + 120], blockq + 12, 20
+ jmp %%9
+
+ ALIGN 16
+ %%7:
+
+ IDCT8 rsp + 0, rsp + 64, rsp + 32, rsp + 96, blockq + 0, 20
+ IDCT8 rsp + 16, rsp + 80, rsp + 48, rsp + 112, blockq + 8, 20
+
+ %%9:
+%endmacro
+
+%macro PUT_PIXELS_CLAMPED_HALF 1
+ mova m0, [blockq+mmsize*0+%1]
+ mova m1, [blockq+mmsize*2+%1]
+%if mmsize == 8
+ mova m2, [blockq+mmsize*4+%1]
+ mova m3, [blockq+mmsize*6+%1]
+%endif
+ packuswb m0, [blockq+mmsize*1+%1]
+ packuswb m1, [blockq+mmsize*3+%1]
+%if mmsize == 8
+ packuswb m2, [blockq+mmsize*5+%1]
+ packuswb m3, [blockq+mmsize*7+%1]
+ movq [pixelsq], m0
+ movq [lsizeq+pixelsq], m1
+ movq [2*lsizeq+pixelsq], m2
+ movq [lsize3q+pixelsq], m3
+%else
+ movq [pixelsq], m0
+ movhps [lsizeq+pixelsq], m0
+ movq [2*lsizeq+pixelsq], m1
+ movhps [lsize3q+pixelsq], m1
+%endif
+%endmacro
+
+%macro ADD_PIXELS_CLAMPED 1
+ mova m0, [blockq+mmsize*0+%1]
+ mova m1, [blockq+mmsize*1+%1]
+%if mmsize == 8
+ mova m5, [blockq+mmsize*2+%1]
+ mova m6, [blockq+mmsize*3+%1]
+%endif
+ movq m2, [pixelsq]
+ movq m3, [pixelsq+lsizeq]
+%if mmsize == 8
+ mova m7, m2
+ punpcklbw m2, m4
+ punpckhbw m7, m4
+ paddsw m0, m2
+ paddsw m1, m7
+ mova m7, m3
+ punpcklbw m3, m4
+ punpckhbw m7, m4
+ paddsw m5, m3
+ paddsw m6, m7
+%else
+ punpcklbw m2, m4
+ punpcklbw m3, m4
+ paddsw m0, m2
+ paddsw m1, m3
+%endif
+ packuswb m0, m1
+%if mmsize == 8
+ packuswb m5, m6
+ movq [pixelsq], m0
+ movq [pixelsq+lsizeq], m5
+%else
+ movq [pixelsq], m0
+ movhps [pixelsq+lsizeq], m0
+%endif
+%endmacro
+
+INIT_MMX mmx
+
+cglobal simple_idct, 1, 2, 8, 128, block, t0
+ IDCT
+RET
+
+INIT_XMM sse2
+
+cglobal simple_idct_put, 3, 5, 8, 128, pixels, lsize, block, lsize3, t0
+ IDCT
+ lea lsize3q, [lsizeq*3]
+ PUT_PIXELS_CLAMPED_HALF 0
+ lea pixelsq, [pixelsq+lsizeq*4]
+ PUT_PIXELS_CLAMPED_HALF 64
+RET
+
+cglobal simple_idct_add, 3, 4, 8, 128, pixels, lsize, block, t0
+ IDCT
+ pxor m4, m4
+ ADD_PIXELS_CLAMPED 0
+ lea pixelsq, [pixelsq+lsizeq*2]
+ ADD_PIXELS_CLAMPED 32
+ lea pixelsq, [pixelsq+lsizeq*2]
+ ADD_PIXELS_CLAMPED 64
+ lea pixelsq, [pixelsq+lsizeq*2]
+ ADD_PIXELS_CLAMPED 96
+RET
+%endif
diff --git a/media/ffvpx/libavcodec/x86/simple_idct.h b/media/ffvpx/libavcodec/x86/simple_idct.h
new file mode 100644
index 0000000000..9b64cfe9bc
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/simple_idct.h
@@ -0,0 +1,53 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_SIMPLE_IDCT_H
+#define AVCODEC_X86_SIMPLE_IDCT_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+void ff_simple_idct_mmx(int16_t *block);
+void ff_simple_idct_add_mmx(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_put_mmx(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+void ff_simple_idct_add_sse2(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct_put_sse2(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+void ff_simple_idct8_sse2(int16_t *block);
+void ff_simple_idct8_avx(int16_t *block);
+
+void ff_simple_idct8_put_sse2(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct8_put_avx(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+void ff_simple_idct8_add_sse2(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct8_add_avx(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+void ff_simple_idct10_sse2(int16_t *block);
+void ff_simple_idct10_avx(int16_t *block);
+
+void ff_simple_idct10_put_sse2(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct10_put_avx(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+void ff_simple_idct12_sse2(int16_t *block);
+void ff_simple_idct12_avx(int16_t *block);
+
+void ff_simple_idct12_put_sse2(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+void ff_simple_idct12_put_avx(uint8_t *dest, ptrdiff_t line_size, int16_t *block);
+
+#endif /* AVCODEC_X86_SIMPLE_IDCT_H */
diff --git a/media/ffvpx/libavcodec/x86/simple_idct10.asm b/media/ffvpx/libavcodec/x86/simple_idct10.asm
new file mode 100644
index 0000000000..069bb61378
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/simple_idct10.asm
@@ -0,0 +1,205 @@
+;******************************************************************************
+;* x86-SIMD-optimized IDCT for prores
+;* this is identical to "simple" IDCT written by Michael Niedermayer
+;* except for the clip range
+;*
+;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
+;* Copyright (c) 2015 Christophe Gisquet
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+%if ARCH_X86_64
+
+SECTION_RODATA
+
+cextern pw_2
+cextern pw_16
+cextern pw_32
+cextern pw_1023
+cextern pw_4095
+pd_round_11: times 4 dd 1<<(11-1)
+pd_round_12: times 4 dd 1<<(12-1)
+pd_round_15: times 4 dd 1<<(15-1)
+pd_round_19: times 4 dd 1<<(19-1)
+pd_round_20: times 4 dd 1<<(20-1)
+
+%macro CONST_DEC 3
+const %1
+times 4 dw %2, %3
+%endmacro
+
+%define W1sh2 22725 ; W1 = 90901 = 22725<<2 + 1
+%define W2sh2 21407 ; W2 = 85627 = 21407<<2 - 1
+%define W3sh2 19265 ; W3 = 77062 = 19265<<2 + 2
+%define W4sh2 16384 ; W4 = 65535 = 16384<<2 - 1
+%define W3sh2_lo 19266
+%define W4sh2_lo 16383
+%define W5sh2 12873 ; W5 = 51491 = 12873<<2 - 1
+%define W6sh2 8867 ; W6 = 35468 = 8867<<2
+%define W7sh2 4520 ; W7 = 18081 = 4520<<2 + 1
+
+CONST_DEC w4_plus_w2_hi, W4sh2, +W2sh2
+CONST_DEC w4_min_w2_hi, W4sh2, -W2sh2
+CONST_DEC w4_plus_w6_hi, W4sh2, +W6sh2
+CONST_DEC w4_min_w6_hi, W4sh2, -W6sh2
+CONST_DEC w1_plus_w3_hi, W1sh2, +W3sh2
+CONST_DEC w3_min_w1_hi, W3sh2, -W1sh2
+CONST_DEC w7_plus_w3_hi, W7sh2, +W3sh2
+CONST_DEC w3_min_w7_hi, W3sh2, -W7sh2
+CONST_DEC w1_plus_w5, W1sh2, +W5sh2
+CONST_DEC w5_min_w1, W5sh2, -W1sh2
+CONST_DEC w5_plus_w7, W5sh2, +W7sh2
+CONST_DEC w7_min_w5, W7sh2, -W5sh2
+CONST_DEC w4_plus_w2_lo, W4sh2_lo, +W2sh2
+CONST_DEC w4_min_w2_lo, W4sh2_lo, -W2sh2
+CONST_DEC w4_plus_w6_lo, W4sh2_lo, +W6sh2
+CONST_DEC w4_min_w6_lo, W4sh2_lo, -W6sh2
+CONST_DEC w1_plus_w3_lo, W1sh2, +W3sh2_lo
+CONST_DEC w3_min_w1_lo, W3sh2_lo, -W1sh2
+CONST_DEC w7_plus_w3_lo, W7sh2, +W3sh2_lo
+CONST_DEC w3_min_w7_lo, W3sh2_lo, -W7sh2
+
+%include "libavcodec/x86/simple_idct10_template.asm"
+
+SECTION .text
+
+%macro STORE_HI_LO 12
+ movq %1, %9
+ movq %3, %10
+ movq %5, %11
+ movq %7, %12
+ movhps %2, %9
+ movhps %4, %10
+ movhps %6, %11
+ movhps %8, %12
+%endmacro
+
+%macro LOAD_ZXBW_8 16
+ pmovzxbw %1, %9
+ pmovzxbw %2, %10
+ pmovzxbw %3, %11
+ pmovzxbw %4, %12
+ pmovzxbw %5, %13
+ pmovzxbw %6, %14
+ pmovzxbw %7, %15
+ pmovzxbw %8, %16
+%endmacro
+
+%macro LOAD_ZXBW_4 9
+ movh %1, %5
+ movh %2, %6
+ movh %3, %7
+ movh %4, %8
+ punpcklbw %1, %9
+ punpcklbw %2, %9
+ punpcklbw %3, %9
+ punpcklbw %4, %9
+%endmacro
+
+%define PASS4ROWS(base, stride, stride3) \
+ [base], [base + stride], [base + 2*stride], [base + stride3]
+
+%macro idct_fn 0
+
+define_constants _lo
+
+cglobal simple_idct8, 1, 1, 16, 32, block
+ IDCT_FN "", 11, pw_32, 20, "store"
+RET
+
+cglobal simple_idct8_put, 3, 4, 16, 32, pixels, lsize, block
+ IDCT_FN "", 11, pw_32, 20
+ lea r3, [3*lsizeq]
+ lea r2, [pixelsq + r3]
+ packuswb m8, m0
+ packuswb m1, m2
+ packuswb m4, m11
+ packuswb m9, m10
+ STORE_HI_LO PASS8ROWS(pixelsq, r2, lsizeq, r3), m8, m1, m4, m9
+RET
+
+cglobal simple_idct8_add, 3, 4, 16, 32, pixels, lsize, block
+ IDCT_FN "", 11, pw_32, 20
+ lea r2, [3*lsizeq]
+ %if cpuflag(sse4)
+ lea r3, [pixelsq + r2]
+ LOAD_ZXBW_8 m3, m5, m6, m7, m12, m13, m14, m15, PASS8ROWS(pixelsq, r3, lsizeq, r2)
+ paddsw m8, m3
+ paddsw m0, m5
+ paddsw m1, m6
+ paddsw m2, m7
+ paddsw m4, m12
+ paddsw m11, m13
+ paddsw m9, m14
+ paddsw m10, m15
+ %else
+ pxor m12, m12
+ LOAD_ZXBW_4 m3, m5, m6, m7, PASS4ROWS(pixelsq, lsizeq, r2), m12
+ paddsw m8, m3
+ paddsw m0, m5
+ paddsw m1, m6
+ paddsw m2, m7
+ lea r3, [pixelsq + 4*lsizeq]
+ LOAD_ZXBW_4 m3, m5, m6, m7, PASS4ROWS(r3, lsizeq, r2), m12
+ paddsw m4, m3
+ paddsw m11, m5
+ paddsw m9, m6
+ paddsw m10, m7
+ lea r3, [pixelsq + r2]
+ %endif
+ packuswb m8, m0
+ packuswb m1, m2
+ packuswb m4, m11
+ packuswb m9, m10
+ STORE_HI_LO PASS8ROWS(pixelsq, r3, lsizeq, r2), m8, m1, m4, m9
+RET
+
+define_constants _hi
+
+cglobal simple_idct10, 1, 1, 16, block
+ IDCT_FN "", 12, "", 19, "store"
+ RET
+
+cglobal simple_idct10_put, 3, 3, 16, pixels, lsize, block
+ IDCT_FN "", 12, "", 19, "put", 0, pw_1023
+ RET
+
+cglobal simple_idct12, 1, 1, 16, block
+ ; coeffs are already 15bits, adding the offset would cause
+ ; overflow in the input
+ IDCT_FN "", 15, pw_2, 16, "store"
+ RET
+
+cglobal simple_idct12_put, 3, 3, 16, pixels, lsize, block
+ ; range isn't known, so the C simple_idct range is used
+ ; Also, using a bias on input overflows, so use the bias
+ ; on output of the first butterfly instead
+ IDCT_FN "", 15, pw_2, 16, "put", 0, pw_4095
+ RET
+%endmacro
+
+INIT_XMM sse2
+idct_fn
+%if HAVE_AVX_EXTERNAL
+INIT_XMM avx
+idct_fn
+%endif
+
+%endif
diff --git a/media/ffvpx/libavcodec/x86/simple_idct10_template.asm b/media/ffvpx/libavcodec/x86/simple_idct10_template.asm
new file mode 100644
index 0000000000..0d04a9818a
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/simple_idct10_template.asm
@@ -0,0 +1,369 @@
+;******************************************************************************
+;* x86-SIMD-optimized IDCT for prores
+;* this is identical to "simple" IDCT written by Michael Niedermayer
+;* except for the clip range
+;*
+;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+; add SECTION_RODATA and proper include before including this file!
+
+%if ARCH_X86_64
+
+%macro define_constants 1
+ %undef w4_plus_w2
+ %undef w4_min_w2
+ %undef w4_plus_w6
+ %undef w4_min_w6
+ %undef w1_plus_w3
+ %undef w3_min_w1
+ %undef w7_plus_w3
+ %undef w3_min_w7
+ %define w4_plus_w2 w4_plus_w2%1
+ %define w4_min_w2 w4_min_w2%1
+ %define w4_plus_w6 w4_plus_w6%1
+ %define w4_min_w6 w4_min_w6%1
+ %define w1_plus_w3 w1_plus_w3%1
+ %define w3_min_w1 w3_min_w1%1
+ %define w7_plus_w3 w7_plus_w3%1
+ %define w3_min_w7 w3_min_w7%1
+%endmacro
+
+; interleave data while maintaining source
+; %1=type, %2=dstlo, %3=dsthi, %4=src, %5=interleave
+%macro SBUTTERFLY3 5
+ punpckl%1 m%2, m%4, m%5
+ punpckh%1 m%3, m%4, m%5
+%endmacro
+
+; %1/%2=src1/dst1, %3/%4=dst2, %5/%6=src2, %7=shift
+; action: %3/%4 = %1/%2 - %5/%6; %1/%2 += %5/%6
+; %1/%2/%3/%4 >>= %7; dword -> word (in %1/%3)
+%macro SUMSUB_SHPK 7
+ psubd %3, %1, %5 ; { a0 - b0 }[0-3]
+ psubd %4, %2, %6 ; { a0 - b0 }[4-7]
+ paddd %1, %5 ; { a0 + b0 }[0-3]
+ paddd %2, %6 ; { a0 + b0 }[4-7]
+ psrad %1, %7
+ psrad %2, %7
+ psrad %3, %7
+ psrad %4, %7
+ packssdw %1, %2 ; row[0]
+ packssdw %3, %4 ; row[7]
+%endmacro
+
+; %1 = initial bias ("" if nop)
+; %2 = number of bits to shift at the end
+; %3 = qmat (for prores)
+%macro IDCT_1D 2-3
+ ; a0 = (W4 * row[0]) + (1 << (15 - 1));
+ ; a1 = a0;
+ ; a2 = a0;
+ ; a3 = a0;
+ ; a0 += W2 * row[2];
+ ; a1 += W6 * row[2];
+ ; a2 -= W6 * row[2];
+ ; a3 -= W2 * row[2];
+%ifstr %1
+ mova m15, [pd_round_ %+ %2]
+%else
+ paddw m10, [%1]
+%endif
+ SBUTTERFLY3 wd, 0, 1, 10, 8 ; { row[0], row[2] }[0-3]/[4-7]
+ pmaddwd m2, m0, [w4_plus_w6]
+ pmaddwd m3, m1, [w4_plus_w6]
+ pmaddwd m4, m0, [w4_min_w6]
+ pmaddwd m5, m1, [w4_min_w6]
+ pmaddwd m6, m0, [w4_min_w2]
+ pmaddwd m7, m1, [w4_min_w2]
+ pmaddwd m0, [w4_plus_w2]
+ pmaddwd m1, [w4_plus_w2]
+%ifstr %1
+ ; Adding 1<<(%2-1) for >=15 bits values
+ paddd m2, m15
+ paddd m3, m15
+ paddd m4, m15
+ paddd m5, m15
+ paddd m6, m15
+ paddd m7, m15
+ paddd m0, m15
+ paddd m1, m15
+%endif
+
+ ; a0: -1*row[0]-1*row[2]
+ ; a1: -1*row[0]
+ ; a2: -1*row[0]
+ ; a3: -1*row[0]+1*row[2]
+
+ ; a0 += W4*row[4] + W6*row[6]; i.e. -1*row[4]
+ ; a1 -= W4*row[4] + W2*row[6]; i.e. -1*row[4]-1*row[6]
+ ; a2 -= W4*row[4] - W2*row[6]; i.e. -1*row[4]+1*row[6]
+ ; a3 += W4*row[4] - W6*row[6]; i.e. -1*row[4]
+ SBUTTERFLY3 wd, 8, 9, 13, 12 ; { row[4], row[6] }[0-3]/[4-7]
+ pmaddwd m10, m8, [w4_plus_w6]
+ pmaddwd m11, m9, [w4_plus_w6]
+ paddd m0, m10 ; a0[0-3]
+ paddd m1, m11 ; a0[4-7]
+ pmaddwd m10, m8, [w4_min_w6]
+ pmaddwd m11, m9, [w4_min_w6]
+ paddd m6, m10 ; a3[0-3]
+ paddd m7, m11 ; a3[4-7]
+ pmaddwd m10, m8, [w4_min_w2]
+ pmaddwd m11, m9, [w4_min_w2]
+ pmaddwd m8, [w4_plus_w2]
+ pmaddwd m9, [w4_plus_w2]
+ psubd m4, m10 ; a2[0-3] intermediate
+ psubd m5, m11 ; a2[4-7] intermediate
+ psubd m2, m8 ; a1[0-3] intermediate
+ psubd m3, m9 ; a1[4-7] intermediate
+
+ ; load/store
+ mova [blockq+ 0], m0
+ mova [blockq+ 32], m2
+ mova [blockq+ 64], m4
+ mova [blockq+ 96], m6
+ mova m10,[blockq+ 16] ; { row[1] }[0-7]
+ mova m8, [blockq+ 48] ; { row[3] }[0-7]
+ mova m13,[blockq+ 80] ; { row[5] }[0-7]
+ mova m14,[blockq+112] ; { row[7] }[0-7]
+ mova [blockq+ 16], m1
+ mova [blockq+ 48], m3
+ mova [blockq+ 80], m5
+ mova [blockq+112], m7
+%if %0 == 3
+ pmullw m10,[%3+ 16]
+ pmullw m8, [%3+ 48]
+ pmullw m13,[%3+ 80]
+ pmullw m14,[%3+112]
+%endif
+
+ ; b0 = MUL(W1, row[1]);
+ ; MAC(b0, W3, row[3]);
+ ; b1 = MUL(W3, row[1]);
+ ; MAC(b1, -W7, row[3]);
+ ; b2 = MUL(W5, row[1]);
+ ; MAC(b2, -W1, row[3]);
+ ; b3 = MUL(W7, row[1]);
+ ; MAC(b3, -W5, row[3]);
+ SBUTTERFLY3 wd, 0, 1, 10, 8 ; { row[1], row[3] }[0-3]/[4-7]
+ pmaddwd m2, m0, [w3_min_w7]
+ pmaddwd m3, m1, [w3_min_w7]
+ pmaddwd m4, m0, [w5_min_w1]
+ pmaddwd m5, m1, [w5_min_w1]
+ pmaddwd m6, m0, [w7_min_w5]
+ pmaddwd m7, m1, [w7_min_w5]
+ pmaddwd m0, [w1_plus_w3]
+ pmaddwd m1, [w1_plus_w3]
+
+ ; b0: +1*row[1]+2*row[3]
+ ; b1: +2*row[1]-1*row[3]
+ ; b2: -1*row[1]-1*row[3]
+ ; b3: +1*row[1]+1*row[3]
+
+ ; MAC(b0, W5, row[5]);
+ ; MAC(b0, W7, row[7]);
+ ; MAC(b1, -W1, row[5]);
+ ; MAC(b1, -W5, row[7]);
+ ; MAC(b2, W7, row[5]);
+ ; MAC(b2, W3, row[7]);
+ ; MAC(b3, W3, row[5]);
+ ; MAC(b3, -W1, row[7]);
+ SBUTTERFLY3 wd, 8, 9, 13, 14 ; { row[5], row[7] }[0-3]/[4-7]
+
+ ; b0: -1*row[5]+1*row[7]
+ ; b1: -1*row[5]+1*row[7]
+ ; b2: +1*row[5]+2*row[7]
+ ; b3: +2*row[5]-1*row[7]
+
+ pmaddwd m10, m8, [w1_plus_w5]
+ pmaddwd m11, m9, [w1_plus_w5]
+ pmaddwd m12, m8, [w5_plus_w7]
+ pmaddwd m13, m9, [w5_plus_w7]
+ psubd m2, m10 ; b1[0-3]
+ psubd m3, m11 ; b1[4-7]
+ paddd m0, m12 ; b0[0-3]
+ paddd m1, m13 ; b0[4-7]
+ pmaddwd m12, m8, [w7_plus_w3]
+ pmaddwd m13, m9, [w7_plus_w3]
+ pmaddwd m8, [w3_min_w1]
+ pmaddwd m9, [w3_min_w1]
+ paddd m4, m12 ; b2[0-3]
+ paddd m5, m13 ; b2[4-7]
+ paddd m6, m8 ; b3[0-3]
+ paddd m7, m9 ; b3[4-7]
+
+ ; row[0] = (a0 + b0) >> 15;
+ ; row[7] = (a0 - b0) >> 15;
+ ; row[1] = (a1 + b1) >> 15;
+ ; row[6] = (a1 - b1) >> 15;
+ ; row[2] = (a2 + b2) >> 15;
+ ; row[5] = (a2 - b2) >> 15;
+ ; row[3] = (a3 + b3) >> 15;
+ ; row[4] = (a3 - b3) >> 15;
+ mova m8, [blockq+ 0] ; a0[0-3]
+ mova m9, [blockq+16] ; a0[4-7]
+ SUMSUB_SHPK m8, m9, m10, m11, m0, m1, %2
+ mova m0, [blockq+32] ; a1[0-3]
+ mova m1, [blockq+48] ; a1[4-7]
+ SUMSUB_SHPK m0, m1, m9, m11, m2, m3, %2
+ mova m1, [blockq+64] ; a2[0-3]
+ mova m2, [blockq+80] ; a2[4-7]
+ SUMSUB_SHPK m1, m2, m11, m3, m4, m5, %2
+ mova m2, [blockq+96] ; a3[0-3]
+ mova m3, [blockq+112] ; a3[4-7]
+ SUMSUB_SHPK m2, m3, m4, m5, m6, m7, %2
+%endmacro
+
+; void ff_prores_idct_put_10_<opt>(uint8_t *pixels, ptrdiff_t stride,
+; int16_t *block, const int16_t *qmat);
+
+; %1 = row shift
+; %2 = row bias macro
+; %3 = column shift
+; %4 = column bias macro
+; %5 = final action (nothing, "store", "put", "add")
+; %6 = min pixel value
+; %7 = max pixel value
+; %8 = qmat (for prores)
+
+%macro IDCT_FN 4-8
+ ; for (i = 0; i < 8; i++)
+ ; idctRowCondDC(block + i*8);
+ mova m10,[blockq+ 0] ; { row[0] }[0-7]
+ mova m8, [blockq+32] ; { row[2] }[0-7]
+ mova m13,[blockq+64] ; { row[4] }[0-7]
+ mova m12,[blockq+96] ; { row[6] }[0-7]
+
+%if %0 == 8
+ pmullw m10,[%8+ 0]
+ pmullw m8, [%8+32]
+ pmullw m13,[%8+64]
+ pmullw m12,[%8+96]
+
+ IDCT_1D %1, %2, %8
+%elif %2 == 11
+ ; This copies the DC-only shortcut. When there is only a DC coefficient the
+ ; C shifts the value and splats it to all coeffs rather than multiplying and
+ ; doing the full IDCT. This causes a difference on 8-bit because the
+ ; coefficient is 16383 rather than 16384 (which you can get with shifting).
+ por m1, m8, m13
+ por m1, m12
+ por m1, [blockq+ 16] ; { row[1] }[0-7]
+ por m1, [blockq+ 48] ; { row[3] }[0-7]
+ por m1, [blockq+ 80] ; { row[5] }[0-7]
+ por m1, [blockq+112] ; { row[7] }[0-7]
+ pxor m2, m2
+ pcmpeqw m1, m2
+ psllw m2, m10, 3
+ pand m2, m1
+ pcmpeqb m3, m3
+ pxor m1, m3
+ mova [rsp], m1
+ mova [rsp+16], m2
+
+ IDCT_1D %1, %2
+
+ mova m5, [rsp]
+ mova m6, [rsp+16]
+ pand m8, m5
+ por m8, m6
+ pand m0, m5
+ por m0, m6
+ pand m1, m5
+ por m1, m6
+ pand m2, m5
+ por m2, m6
+ pand m4, m5
+ por m4, m6
+ pand m11, m5
+ por m11, m6
+ pand m9, m5
+ por m9, m6
+ pand m10, m5
+ por m10, m6
+%else
+ IDCT_1D %1, %2
+%endif
+
+ ; transpose for second part of IDCT
+ TRANSPOSE8x8W 8, 0, 1, 2, 4, 11, 9, 10, 3
+ mova [blockq+ 16], m0
+ mova [blockq+ 48], m2
+ mova [blockq+ 80], m11
+ mova [blockq+112], m10
+ SWAP 8, 10
+ SWAP 1, 8
+ SWAP 4, 13
+ SWAP 9, 12
+
+ ; for (i = 0; i < 8; i++)
+ ; idctSparseColAdd(dest + i, line_size, block + i);
+ IDCT_1D %3, %4
+
+ ; clip/store
+%if %0 >= 5
+%ifidn %5,"store"
+ ; No clamping, means pure idct
+ mova [blockq+ 0], m8
+ mova [blockq+ 16], m0
+ mova [blockq+ 32], m1
+ mova [blockq+ 48], m2
+ mova [blockq+ 64], m4
+ mova [blockq+ 80], m11
+ mova [blockq+ 96], m9
+ mova [blockq+112], m10
+%elifidn %5,"put"
+%ifidn %6, 0
+ pxor m3, m3
+%else
+ mova m3, [%6]
+%endif ; ifidn %6, 0
+ mova m5, [%7]
+ pmaxsw m8, m3
+ pmaxsw m0, m3
+ pmaxsw m1, m3
+ pmaxsw m2, m3
+ pmaxsw m4, m3
+ pmaxsw m11, m3
+ pmaxsw m9, m3
+ pmaxsw m10, m3
+ pminsw m8, m5
+ pminsw m0, m5
+ pminsw m1, m5
+ pminsw m2, m5
+ pminsw m4, m5
+ pminsw m11, m5
+ pminsw m9, m5
+ pminsw m10, m5
+
+ lea r2, [r1*3]
+ mova [r0 ], m8
+ mova [r0+r1 ], m0
+ mova [r0+r1*2], m1
+ mova [r0+r2 ], m2
+ lea r0, [r0+r1*4]
+ mova [r0 ], m4
+ mova [r0+r1 ], m11
+ mova [r0+r1*2], m9
+ mova [r0+r2 ], m10
+%endif ; %5 action
+%endif; if %0 >= 5
+%endmacro
+
+%endif
diff --git a/media/ffvpx/libavcodec/x86/videodsp.asm b/media/ffvpx/libavcodec/x86/videodsp.asm
new file mode 100644
index 0000000000..3cc07878d3
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/videodsp.asm
@@ -0,0 +1,436 @@
+;******************************************************************************
+;* Core video DSP functions
+;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+; slow vertical extension loop function. Works with variable-width, and
+; does per-line reading/writing of source data
+
+%macro V_COPY_ROW 2 ; type (top/body/bottom), h
+.%1_y_loop: ; do {
+ mov wq, r7mp ; initialize w (r7mp = wmp)
+.%1_x_loop: ; do {
+ movu m0, [srcq+wq] ; m0 = read($mmsize)
+ movu [dstq+wq], m0 ; write(m0, $mmsize)
+ add wq, mmsize ; w -= $mmsize
+ cmp wq, -mmsize ; } while (w > $mmsize);
+ jl .%1_x_loop
+ movu m0, [srcq-mmsize] ; m0 = read($mmsize)
+ movu [dstq-mmsize], m0 ; write(m0, $mmsize)
+%ifidn %1, body ; if ($type == body) {
+ add srcq, src_strideq ; src += src_stride
+%endif ; }
+ add dstq, dst_strideq ; dst += dst_stride
+ dec %2 ; } while (--$h);
+ jnz .%1_y_loop
+%endmacro
+
+; .----. <- zero
+; | | <- top is copied from first line in body of source
+; |----| <- start_y
+; | | <- body is copied verbatim (line-by-line) from source
+; |----| <- end_y
+; | | <- bottom is copied from last line in body of source
+; '----' <- bh
+INIT_XMM sse
+%if ARCH_X86_64
+cglobal emu_edge_vvar, 7, 8, 1, dst, dst_stride, src, src_stride, \
+ start_y, end_y, bh, w
+%else ; x86-32
+cglobal emu_edge_vvar, 1, 6, 1, dst, src, start_y, end_y, bh, w
+%define src_strideq r3mp
+%define dst_strideq r1mp
+ mov srcq, r2mp
+ mov start_yq, r4mp
+ mov end_yq, r5mp
+ mov bhq, r6mp
+%endif
+ sub bhq, end_yq ; bh -= end_q
+ sub end_yq, start_yq ; end_q -= start_q
+ add srcq, r7mp ; (r7mp = wmp)
+ add dstq, r7mp ; (r7mp = wmp)
+ neg r7mp ; (r7mp = wmp)
+ test start_yq, start_yq ; if (start_q) {
+ jz .body
+ V_COPY_ROW top, start_yq ; v_copy_row(top, start_yq)
+.body: ; }
+ V_COPY_ROW body, end_yq ; v_copy_row(body, end_yq)
+ test bhq, bhq ; if (bh) {
+ jz .end
+ sub srcq, src_strideq ; src -= src_stride
+ V_COPY_ROW bottom, bhq ; v_copy_row(bottom, bh)
+.end: ; }
+ RET
+
+%macro hvar_fn 0
+cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
+ lea dstq, [dstq+n_wordsq*2]
+ neg n_wordsq
+ lea start_xq, [start_xq+n_wordsq*2]
+.y_loop: ; do {
+%if cpuflag(avx2)
+ vpbroadcastb m0, [dstq+start_xq]
+ mov wq, n_wordsq ; initialize w
+%else
+ movzx wd, byte [dstq+start_xq] ; w = read(1)
+ imul wd, 0x01010101 ; w *= 0x01010101
+ movd m0, wd
+ mov wq, n_wordsq ; initialize w
+ pshufd m0, m0, q0000 ; splat
+%endif ; avx2
+.x_loop: ; do {
+ movu [dstq+wq*2], m0 ; write($reg, $mmsize)
+ add wq, mmsize/2 ; w -= $mmsize/2
+ cmp wq, -(mmsize/2) ; } while (w > $mmsize/2)
+ jl .x_loop
+ movu [dstq-mmsize], m0 ; write($reg, $mmsize)
+ add dstq, dst_strideq ; dst += dst_stride
+ dec hq ; } while (h--)
+ jnz .y_loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+hvar_fn
+
+%if HAVE_AVX2_EXTERNAL
+INIT_XMM avx2
+hvar_fn
+%endif
+
+; macro to read/write a horizontal number of pixels (%2) to/from registers
+; on sse, - fills xmm0-15 for consecutive sets of 16 pixels
+; - if (%2 & 8) fills 8 bytes into xmm$next
+; - if (%2 & 4) fills 4 bytes into xmm$next
+; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
+; on mmx, - fills mm0-7 for consecutive sets of 8 pixels
+; - if (%2 & 4) fills 4 bytes into mm$next
+; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
+; writing data out is in the same way
+%macro READ_NUM_BYTES 2
+%assign %%off 0 ; offset in source buffer
+%assign %%mmx_idx 0 ; mmx register index
+%assign %%xmm_idx 0 ; xmm register index
+
+%rep %2/mmsize
+%if mmsize == 16
+ movu xmm %+ %%xmm_idx, [srcq+%%off]
+%assign %%xmm_idx %%xmm_idx+1
+%else ; mmx
+ movu mm %+ %%mmx_idx, [srcq+%%off]
+%assign %%mmx_idx %%mmx_idx+1
+%endif
+%assign %%off %%off+mmsize
+%endrep ; %2/mmsize
+
+%if mmsize == 16
+%if (%2-%%off) >= 8
+%if %2 > 16 && (%2-%%off) > 8
+ movu xmm %+ %%xmm_idx, [srcq+%2-16]
+%assign %%xmm_idx %%xmm_idx+1
+%assign %%off %2
+%else
+ movq mm %+ %%mmx_idx, [srcq+%%off]
+%assign %%mmx_idx %%mmx_idx+1
+%assign %%off %%off+8
+%endif
+%endif ; (%2-%%off) >= 8
+%endif
+
+%if (%2-%%off) >= 4
+%if %2 > 8 && (%2-%%off) > 4
+ movq mm %+ %%mmx_idx, [srcq+%2-8]
+%assign %%off %2
+%else
+ movd mm %+ %%mmx_idx, [srcq+%%off]
+%assign %%off %%off+4
+%endif
+%assign %%mmx_idx %%mmx_idx+1
+%endif ; (%2-%%off) >= 4
+
+%if (%2-%%off) >= 1
+%if %2 >= 4
+ movd mm %+ %%mmx_idx, [srcq+%2-4]
+%elif (%2-%%off) == 1
+ mov valb, [srcq+%2-1]
+%elif (%2-%%off) == 2
+ mov valw, [srcq+%2-2]
+%else
+ mov valb, [srcq+%2-1]
+ ror vald, 16
+ mov valw, [srcq+%2-3]
+%endif
+%endif ; (%2-%%off) >= 1
+%endmacro ; READ_NUM_BYTES
+
+%macro WRITE_NUM_BYTES 2
+%assign %%off 0 ; offset in destination buffer
+%assign %%mmx_idx 0 ; mmx register index
+%assign %%xmm_idx 0 ; xmm register index
+
+%rep %2/mmsize
+%if mmsize == 16
+ movu [dstq+%%off], xmm %+ %%xmm_idx
+%assign %%xmm_idx %%xmm_idx+1
+%else ; mmx
+ movu [dstq+%%off], mm %+ %%mmx_idx
+%assign %%mmx_idx %%mmx_idx+1
+%endif
+%assign %%off %%off+mmsize
+%endrep ; %2/mmsize
+
+%if mmsize == 16
+%if (%2-%%off) >= 8
+%if %2 > 16 && (%2-%%off) > 8
+ movu [dstq+%2-16], xmm %+ %%xmm_idx
+%assign %%xmm_idx %%xmm_idx+1
+%assign %%off %2
+%else
+ movq [dstq+%%off], mm %+ %%mmx_idx
+%assign %%mmx_idx %%mmx_idx+1
+%assign %%off %%off+8
+%endif
+%endif ; (%2-%%off) >= 8
+%endif
+
+%if (%2-%%off) >= 4
+%if %2 > 8 && (%2-%%off) > 4
+ movq [dstq+%2-8], mm %+ %%mmx_idx
+%assign %%off %2
+%else
+ movd [dstq+%%off], mm %+ %%mmx_idx
+%assign %%off %%off+4
+%endif
+%assign %%mmx_idx %%mmx_idx+1
+%endif ; (%2-%%off) >= 4
+
+%if (%2-%%off) >= 1
+%if %2 >= 4
+ movd [dstq+%2-4], mm %+ %%mmx_idx
+%elif (%2-%%off) == 1
+ mov [dstq+%2-1], valb
+%elif (%2-%%off) == 2
+ mov [dstq+%2-2], valw
+%else
+ mov [dstq+%2-3], valw
+ ror vald, 16
+ mov [dstq+%2-1], valb
+%ifnidn %1, body
+ ror vald, 16
+%endif
+%endif
+%endif ; (%2-%%off) >= 1
+%endmacro ; WRITE_NUM_BYTES
+
+; vertical top/bottom extend and body copy fast loops
+; these are function pointers to set-width line copy functions, i.e.
+; they read a fixed number of pixels into set registers, and write
+; those out into the destination buffer
+%macro VERTICAL_EXTEND 2
+%assign %%n %1
+%rep 1+%2-%1
+%if %%n <= 3
+%if ARCH_X86_64
+cglobal emu_edge_vfix %+ %%n, 6, 8, 0, dst, dst_stride, src, src_stride, \
+ start_y, end_y, val, bh
+ mov bhq, r6mp ; r6mp = bhmp
+%else ; x86-32
+cglobal emu_edge_vfix %+ %%n, 0, 6, 0, val, dst, src, start_y, end_y, bh
+ mov dstq, r0mp
+ mov srcq, r2mp
+ mov start_yq, r4mp
+ mov end_yq, r5mp
+ mov bhq, r6mp
+%define dst_strideq r1mp
+%define src_strideq r3mp
+%endif ; x86-64/32
+%else
+%if ARCH_X86_64
+cglobal emu_edge_vfix %+ %%n, 7, 7, 1, dst, dst_stride, src, src_stride, \
+ start_y, end_y, bh
+%else ; x86-32
+cglobal emu_edge_vfix %+ %%n, 1, 5, 1, dst, src, start_y, end_y, bh
+ mov srcq, r2mp
+ mov start_yq, r4mp
+ mov end_yq, r5mp
+ mov bhq, r6mp
+%define dst_strideq r1mp
+%define src_strideq r3mp
+%endif ; x86-64/32
+%endif
+ ; FIXME move this to c wrapper?
+ sub bhq, end_yq ; bh -= end_y
+ sub end_yq, start_yq ; end_y -= start_y
+
+ ; extend pixels above body
+ test start_yq, start_yq ; if (start_y) {
+ jz .body_loop
+ READ_NUM_BYTES top, %%n ; $variable_regs = read($n)
+.top_loop: ; do {
+ WRITE_NUM_BYTES top, %%n ; write($variable_regs, $n)
+ add dstq, dst_strideq ; dst += linesize
+ dec start_yq ; } while (--start_y)
+ jnz .top_loop ; }
+
+ ; copy body pixels
+.body_loop: ; do {
+ READ_NUM_BYTES body, %%n ; $variable_regs = read($n)
+ WRITE_NUM_BYTES body, %%n ; write($variable_regs, $n)
+ add dstq, dst_strideq ; dst += dst_stride
+ add srcq, src_strideq ; src += src_stride
+ dec end_yq ; } while (--end_y)
+ jnz .body_loop
+
+ ; copy bottom pixels
+ test bhq, bhq ; if (block_h) {
+ jz .end
+ sub srcq, src_strideq ; src -= linesize
+ READ_NUM_BYTES bottom, %%n ; $variable_regs = read($n)
+.bottom_loop: ; do {
+ WRITE_NUM_BYTES bottom, %%n ; write($variable_regs, $n)
+ add dstq, dst_strideq ; dst += linesize
+ dec bhq ; } while (--bh)
+ jnz .bottom_loop ; }
+
+.end:
+ RET
+%assign %%n %%n+1
+%endrep ; 1+%2-%1
+%endmacro ; VERTICAL_EXTEND
+
+INIT_MMX mmx
+VERTICAL_EXTEND 1, 15
+
+INIT_XMM sse
+VERTICAL_EXTEND 16, 22
+
+; left/right (horizontal) fast extend functions
+; these are essentially identical to the vertical extend ones above,
+; just left/right separated because number of pixels to extend is
+; obviously not the same on both sides.
+
+%macro READ_V_PIXEL 2
+%if cpuflag(avx2)
+ vpbroadcastb m0, %2
+%else
+ movzx vald, byte %2
+ imul vald, 0x01010101
+%if %1 >= 8
+ movd m0, vald
+%if mmsize == 16
+ pshufd m0, m0, q0000
+%else
+ punpckldq m0, m0
+%endif ; mmsize == 16
+%endif ; %1 > 16
+%endif ; avx2
+%endmacro ; READ_V_PIXEL
+
+%macro WRITE_V_PIXEL 2
+%assign %%off 0
+
+%if %1 >= 8
+
+%rep %1/mmsize
+ movu [%2+%%off], m0
+%assign %%off %%off+mmsize
+%endrep ; %1/mmsize
+
+%if mmsize == 16
+%if %1-%%off >= 8
+%if %1 > 16 && %1-%%off > 8
+ movu [%2+%1-16], m0
+%assign %%off %1
+%else
+ movq [%2+%%off], m0
+%assign %%off %%off+8
+%endif
+%endif ; %1-%%off >= 8
+%endif ; mmsize == 16
+
+%if %1-%%off >= 4
+%if %1 > 8 && %1-%%off > 4
+ movq [%2+%1-8], m0
+%assign %%off %1
+%else
+ movd [%2+%%off], m0
+%assign %%off %%off+4
+%endif
+%endif ; %1-%%off >= 4
+
+%else ; %1 < 8
+
+%rep %1/4
+ mov [%2+%%off], vald
+%assign %%off %%off+4
+%endrep ; %1/4
+
+%endif ; %1 >=/< 8
+
+%if %1-%%off == 2
+%if cpuflag(avx2)
+ movd [%2+%%off-2], m0
+%else
+ mov [%2+%%off], valw
+%endif ; avx2
+%endif ; (%1-%%off)/2
+%endmacro ; WRITE_V_PIXEL
+
+%macro H_EXTEND 2
+%assign %%n %1
+%rep 1+(%2-%1)/2
+%if cpuflag(avx2)
+cglobal emu_edge_hfix %+ %%n, 4, 4, 1, dst, dst_stride, start_x, bh
+%else
+cglobal emu_edge_hfix %+ %%n, 4, 5, 1, dst, dst_stride, start_x, bh, val
+%endif
+.loop_y: ; do {
+ READ_V_PIXEL %%n, [dstq+start_xq] ; $variable_regs = read($n)
+ WRITE_V_PIXEL %%n, dstq ; write($variable_regs, $n)
+ add dstq, dst_strideq ; dst += dst_stride
+ dec bhq ; } while (--bh)
+ jnz .loop_y
+ RET
+%assign %%n %%n+2
+%endrep ; 1+(%2-%1)/2
+%endmacro ; H_EXTEND
+
+INIT_MMX mmx
+H_EXTEND 2, 14
+
+INIT_XMM sse2
+H_EXTEND 16, 22
+
+%if HAVE_AVX2_EXTERNAL
+INIT_XMM avx2
+H_EXTEND 8, 22
+%endif
+
+INIT_MMX mmxext
+cglobal prefetch, 3, 3, 0, buf, stride, h
+.loop:
+ prefetcht0 [bufq]
+ add bufq, strideq
+ dec hd
+ jg .loop
+ RET
diff --git a/media/ffvpx/libavcodec/x86/videodsp_init.c b/media/ffvpx/libavcodec/x86/videodsp_init.c
new file mode 100644
index 0000000000..ae9db95624
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/videodsp_init.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2002-2012 Michael Niedermayer
+ * Copyright (C) 2012 Ronald S. Bultje
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/videodsp.h"
+
+#if HAVE_X86ASM
+typedef void emu_edge_vfix_func(uint8_t *dst, x86_reg dst_stride,
+ const uint8_t *src, x86_reg src_stride,
+ x86_reg start_y, x86_reg end_y, x86_reg bh);
+typedef void emu_edge_vvar_func(uint8_t *dst, x86_reg dst_stride,
+ const uint8_t *src, x86_reg src_stride,
+ x86_reg start_y, x86_reg end_y, x86_reg bh,
+ x86_reg w);
+
+extern emu_edge_vfix_func ff_emu_edge_vfix1_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix2_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix3_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix4_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix5_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix6_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix7_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix8_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix9_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix10_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix11_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix12_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix13_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix14_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix15_mmx;
+extern emu_edge_vfix_func ff_emu_edge_vfix16_sse;
+extern emu_edge_vfix_func ff_emu_edge_vfix17_sse;
+extern emu_edge_vfix_func ff_emu_edge_vfix18_sse;
+extern emu_edge_vfix_func ff_emu_edge_vfix19_sse;
+extern emu_edge_vfix_func ff_emu_edge_vfix20_sse;
+extern emu_edge_vfix_func ff_emu_edge_vfix21_sse;
+extern emu_edge_vfix_func ff_emu_edge_vfix22_sse;
+static emu_edge_vfix_func * const vfixtbl_sse[22] = {
+ ff_emu_edge_vfix1_mmx, ff_emu_edge_vfix2_mmx, ff_emu_edge_vfix3_mmx,
+ ff_emu_edge_vfix4_mmx, ff_emu_edge_vfix5_mmx, ff_emu_edge_vfix6_mmx,
+ ff_emu_edge_vfix7_mmx, ff_emu_edge_vfix8_mmx, ff_emu_edge_vfix9_mmx,
+ ff_emu_edge_vfix10_mmx, ff_emu_edge_vfix11_mmx, ff_emu_edge_vfix12_mmx,
+ ff_emu_edge_vfix13_mmx, ff_emu_edge_vfix14_mmx, ff_emu_edge_vfix15_mmx,
+ ff_emu_edge_vfix16_sse, ff_emu_edge_vfix17_sse, ff_emu_edge_vfix18_sse,
+ ff_emu_edge_vfix19_sse, ff_emu_edge_vfix20_sse, ff_emu_edge_vfix21_sse,
+ ff_emu_edge_vfix22_sse
+};
+extern emu_edge_vvar_func ff_emu_edge_vvar_sse;
+
+typedef void emu_edge_hfix_func(uint8_t *dst, x86_reg dst_stride,
+ x86_reg start_x, x86_reg bh);
+typedef void emu_edge_hvar_func(uint8_t *dst, x86_reg dst_stride,
+ x86_reg start_x, x86_reg n_words, x86_reg bh);
+
+extern emu_edge_hfix_func ff_emu_edge_hfix2_mmx;
+extern emu_edge_hfix_func ff_emu_edge_hfix4_mmx;
+extern emu_edge_hfix_func ff_emu_edge_hfix6_mmx;
+extern emu_edge_hfix_func ff_emu_edge_hfix8_mmx;
+extern emu_edge_hfix_func ff_emu_edge_hfix10_mmx;
+extern emu_edge_hfix_func ff_emu_edge_hfix12_mmx;
+extern emu_edge_hfix_func ff_emu_edge_hfix14_mmx;
+extern emu_edge_hfix_func ff_emu_edge_hfix16_sse2;
+extern emu_edge_hfix_func ff_emu_edge_hfix18_sse2;
+extern emu_edge_hfix_func ff_emu_edge_hfix20_sse2;
+extern emu_edge_hfix_func ff_emu_edge_hfix22_sse2;
+static emu_edge_hfix_func * const hfixtbl_sse2[11] = {
+ ff_emu_edge_hfix2_mmx, ff_emu_edge_hfix4_mmx, ff_emu_edge_hfix6_mmx,
+ ff_emu_edge_hfix8_mmx, ff_emu_edge_hfix10_mmx, ff_emu_edge_hfix12_mmx,
+ ff_emu_edge_hfix14_mmx, ff_emu_edge_hfix16_sse2, ff_emu_edge_hfix18_sse2,
+ ff_emu_edge_hfix20_sse2, ff_emu_edge_hfix22_sse2
+};
+extern emu_edge_hvar_func ff_emu_edge_hvar_sse2;
+#if HAVE_AVX2_EXTERNAL
+extern emu_edge_hfix_func ff_emu_edge_hfix8_avx2;
+extern emu_edge_hfix_func ff_emu_edge_hfix10_avx2;
+extern emu_edge_hfix_func ff_emu_edge_hfix12_avx2;
+extern emu_edge_hfix_func ff_emu_edge_hfix14_avx2;
+extern emu_edge_hfix_func ff_emu_edge_hfix16_avx2;
+extern emu_edge_hfix_func ff_emu_edge_hfix18_avx2;
+extern emu_edge_hfix_func ff_emu_edge_hfix20_avx2;
+extern emu_edge_hfix_func ff_emu_edge_hfix22_avx2;
+static emu_edge_hfix_func * const hfixtbl_avx2[11] = {
+ ff_emu_edge_hfix2_mmx, ff_emu_edge_hfix4_mmx, ff_emu_edge_hfix6_mmx,
+ ff_emu_edge_hfix8_avx2, ff_emu_edge_hfix10_avx2, ff_emu_edge_hfix12_avx2,
+ ff_emu_edge_hfix14_avx2, ff_emu_edge_hfix16_avx2, ff_emu_edge_hfix18_avx2,
+ ff_emu_edge_hfix20_avx2, ff_emu_edge_hfix22_avx2
+};
+extern emu_edge_hvar_func ff_emu_edge_hvar_avx2;
+#endif
+
+static av_always_inline void emulated_edge_mc(uint8_t *dst, const uint8_t *src,
+ ptrdiff_t dst_stride,
+ ptrdiff_t src_stride,
+ x86_reg block_w, x86_reg block_h,
+ x86_reg src_x, x86_reg src_y,
+ x86_reg w, x86_reg h,
+ emu_edge_vfix_func * const *vfix_tbl,
+ emu_edge_vvar_func *v_extend_var,
+ emu_edge_hfix_func * const *hfix_tbl,
+ emu_edge_hvar_func *h_extend_var)
+{
+ x86_reg start_y, start_x, end_y, end_x, src_y_add = 0, p;
+
+ if (!w || !h)
+ return;
+
+ av_assert2(block_w <= FFABS(dst_stride));
+
+ if (src_y >= h) {
+ src -= src_y*src_stride;
+ src_y_add = h - 1;
+ src_y = h - 1;
+ } else if (src_y <= -block_h) {
+ src -= src_y*src_stride;
+ src_y_add = 1 - block_h;
+ src_y = 1 - block_h;
+ }
+ if (src_x >= w) {
+ src += w - 1 - src_x;
+ src_x = w - 1;
+ } else if (src_x <= -block_w) {
+ src += 1 - block_w - src_x;
+ src_x = 1 - block_w;
+ }
+
+ start_y = FFMAX(0, -src_y);
+ start_x = FFMAX(0, -src_x);
+ end_y = FFMIN(block_h, h-src_y);
+ end_x = FFMIN(block_w, w-src_x);
+ av_assert2(start_x < end_x && block_w > 0);
+ av_assert2(start_y < end_y && block_h > 0);
+
+ // fill in the to-be-copied part plus all above/below
+ src += (src_y_add + start_y) * src_stride + start_x;
+ w = end_x - start_x;
+ if (w <= 22) {
+ vfix_tbl[w - 1](dst + start_x, dst_stride, src, src_stride,
+ start_y, end_y, block_h);
+ } else {
+ v_extend_var(dst + start_x, dst_stride, src, src_stride,
+ start_y, end_y, block_h, w);
+ }
+
+ // fill left
+ if (start_x) {
+ if (start_x <= 22) {
+ hfix_tbl[(start_x - 1) >> 1](dst, dst_stride, start_x, block_h);
+ } else {
+ h_extend_var(dst, dst_stride,
+ start_x, (start_x + 1) >> 1, block_h);
+ }
+ }
+
+ // fill right
+ p = block_w - end_x;
+ if (p) {
+ if (p <= 22) {
+ hfix_tbl[(p - 1) >> 1](dst + end_x - (p & 1), dst_stride,
+ -!(p & 1), block_h);
+ } else {
+ h_extend_var(dst + end_x - (p & 1), dst_stride,
+ -!(p & 1), (p + 1) >> 1, block_h);
+ }
+ }
+}
+
+static av_noinline void emulated_edge_mc_sse2(uint8_t *buf, const uint8_t *src,
+ ptrdiff_t buf_stride,
+ ptrdiff_t src_stride,
+ int block_w, int block_h,
+ int src_x, int src_y, int w,
+ int h)
+{
+ emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h,
+ src_x, src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse,
+ hfixtbl_sse2, &ff_emu_edge_hvar_sse2);
+}
+
+#if HAVE_AVX2_EXTERNAL
+static av_noinline void emulated_edge_mc_avx2(uint8_t *buf, const uint8_t *src,
+ ptrdiff_t buf_stride,
+ ptrdiff_t src_stride,
+ int block_w, int block_h,
+ int src_x, int src_y, int w,
+ int h)
+{
+ emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h,
+ src_x, src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse,
+ hfixtbl_avx2, &ff_emu_edge_hvar_avx2);
+}
+#endif /* HAVE_AVX2_EXTERNAL */
+#endif /* HAVE_X86ASM */
+
+void ff_prefetch_mmxext(const uint8_t *buf, ptrdiff_t stride, int h);
+
+av_cold void ff_videodsp_init_x86(VideoDSPContext *ctx, int bpc)
+{
+#if HAVE_X86ASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ ctx->prefetch = ff_prefetch_mmxext;
+ }
+ if (EXTERNAL_SSE2(cpu_flags) && bpc <= 8) {
+ ctx->emulated_edge_mc = emulated_edge_mc_sse2;
+ }
+#if HAVE_AVX2_EXTERNAL
+ if (EXTERNAL_AVX2(cpu_flags) && bpc <= 8) {
+ ctx->emulated_edge_mc = emulated_edge_mc_avx2;
+ }
+#endif
+#endif /* HAVE_X86ASM */
+}
diff --git a/media/ffvpx/libavcodec/x86/vp56_arith.h b/media/ffvpx/libavcodec/x86/vp56_arith.h
new file mode 100644
index 0000000000..9f7639980c
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp56_arith.h
@@ -0,0 +1,53 @@
+/**
+ * VP5 and VP6 compatible video decoder (arith decoder)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ * Copyright (C) 2010 Eli Friedman
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_VP56_ARITH_H
+#define AVCODEC_X86_VP56_ARITH_H
+
+#if HAVE_INLINE_ASM && HAVE_FAST_CMOV && HAVE_6REGS
+#include "libavutil/attributes.h"
+
+#define vp56_rac_get_prob vp56_rac_get_prob
+static av_always_inline int vp56_rac_get_prob(VP56RangeCoder *c, uint8_t prob)
+{
+ unsigned int code_word = vp56_rac_renorm(c);
+ unsigned int low = 1 + (((c->high - 1) * prob) >> 8);
+ unsigned int low_shift = low << 16;
+ int bit = 0;
+ c->code_word = code_word;
+
+ __asm__(
+ "subl %4, %1 \n\t"
+ "subl %3, %2 \n\t"
+ "setae %b0 \n\t"
+ "cmovb %4, %1 \n\t"
+ "cmovb %5, %2 \n\t"
+ : "+q"(bit), "+&r"(c->high), "+&r"(c->code_word)
+ : "r"(low_shift), "r"(low), "r"(code_word)
+ );
+
+ return bit;
+}
+#endif
+
+#endif /* AVCODEC_X86_VP56_ARITH_H */
diff --git a/media/ffvpx/libavcodec/x86/vp8dsp.asm b/media/ffvpx/libavcodec/x86/vp8dsp.asm
new file mode 100644
index 0000000000..6ac5a7721b
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp8dsp.asm
@@ -0,0 +1,1116 @@
+;******************************************************************************
+;* VP8 MMXEXT optimizations
+;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
+;* Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+fourtap_filter_hw_m: times 4 dw -6, 123
+ times 4 dw 12, -1
+ times 4 dw -9, 93
+ times 4 dw 50, -6
+ times 4 dw -6, 50
+ times 4 dw 93, -9
+ times 4 dw -1, 12
+ times 4 dw 123, -6
+
+sixtap_filter_hw_m: times 4 dw 2, -11
+ times 4 dw 108, 36
+ times 4 dw -8, 1
+ times 4 dw 3, -16
+ times 4 dw 77, 77
+ times 4 dw -16, 3
+ times 4 dw 1, -8
+ times 4 dw 36, 108
+ times 4 dw -11, 2
+
+fourtap_filter_hb_m: times 8 db -6, 123
+ times 8 db 12, -1
+ times 8 db -9, 93
+ times 8 db 50, -6
+ times 8 db -6, 50
+ times 8 db 93, -9
+ times 8 db -1, 12
+ times 8 db 123, -6
+
+sixtap_filter_hb_m: times 8 db 2, 1
+ times 8 db -11, 108
+ times 8 db 36, -8
+ times 8 db 3, 3
+ times 8 db -16, 77
+ times 8 db 77, -16
+ times 8 db 1, 2
+ times 8 db -8, 36
+ times 8 db 108, -11
+
+fourtap_filter_v_m: times 8 dw -6
+ times 8 dw 123
+ times 8 dw 12
+ times 8 dw -1
+ times 8 dw -9
+ times 8 dw 93
+ times 8 dw 50
+ times 8 dw -6
+ times 8 dw -6
+ times 8 dw 50
+ times 8 dw 93
+ times 8 dw -9
+ times 8 dw -1
+ times 8 dw 12
+ times 8 dw 123
+ times 8 dw -6
+
+sixtap_filter_v_m: times 8 dw 2
+ times 8 dw -11
+ times 8 dw 108
+ times 8 dw 36
+ times 8 dw -8
+ times 8 dw 1
+ times 8 dw 3
+ times 8 dw -16
+ times 8 dw 77
+ times 8 dw 77
+ times 8 dw -16
+ times 8 dw 3
+ times 8 dw 1
+ times 8 dw -8
+ times 8 dw 36
+ times 8 dw 108
+ times 8 dw -11
+ times 8 dw 2
+
+bilinear_filter_vw_m: times 8 dw 1
+ times 8 dw 2
+ times 8 dw 3
+ times 8 dw 4
+ times 8 dw 5
+ times 8 dw 6
+ times 8 dw 7
+
+bilinear_filter_vb_m: times 8 db 7, 1
+ times 8 db 6, 2
+ times 8 db 5, 3
+ times 8 db 4, 4
+ times 8 db 3, 5
+ times 8 db 2, 6
+ times 8 db 1, 7
+
+%ifdef PIC
+%define fourtap_filter_hw picregq
+%define sixtap_filter_hw picregq
+%define fourtap_filter_hb picregq
+%define sixtap_filter_hb picregq
+%define fourtap_filter_v picregq
+%define sixtap_filter_v picregq
+%define bilinear_filter_vw picregq
+%define bilinear_filter_vb picregq
+%define npicregs 1
+%else
+%define fourtap_filter_hw fourtap_filter_hw_m
+%define sixtap_filter_hw sixtap_filter_hw_m
+%define fourtap_filter_hb fourtap_filter_hb_m
+%define sixtap_filter_hb sixtap_filter_hb_m
+%define fourtap_filter_v fourtap_filter_v_m
+%define sixtap_filter_v sixtap_filter_v_m
+%define bilinear_filter_vw bilinear_filter_vw_m
+%define bilinear_filter_vb bilinear_filter_vb_m
+%define npicregs 0
+%endif
+
+filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
+filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
+
+filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
+filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
+filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
+
+pw_20091: times 4 dw 20091
+pw_17734: times 4 dw 17734
+
+cextern pw_3
+cextern pw_4
+cextern pw_64
+cextern pw_256
+
+SECTION .text
+
+;-------------------------------------------------------------------------------
+; subpel MC functions:
+;
+; void ff_put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, ptrdiff_t deststride,
+; const uint8_t *src, ptrdiff_t srcstride,
+; int height, int mx, int my);
+;-------------------------------------------------------------------------------
+
+%macro FILTER_SSSE3 1
+cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, height, mx, picreg
+ lea mxd, [mxq*3]
+ mova m3, [filter_h6_shuf2]
+ mova m4, [filter_h6_shuf3]
+%ifdef PIC
+ lea picregq, [sixtap_filter_hb_m]
+%endif
+ mova m5, [sixtap_filter_hb+mxq*8-48] ; set up 6tap filter in bytes
+ mova m6, [sixtap_filter_hb+mxq*8-32]
+ mova m7, [sixtap_filter_hb+mxq*8-16]
+
+.nextrow:
+ movu m0, [srcq-2]
+ mova m1, m0
+ mova m2, m0
+%if mmsize == 8
+; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
+; shuffle with a memory operand
+ punpcklbw m0, [srcq+3]
+%else
+ pshufb m0, [filter_h6_shuf1]
+%endif
+ pshufb m1, m3
+ pshufb m2, m4
+ pmaddubsw m0, m5
+ pmaddubsw m1, m6
+ pmaddubsw m2, m7
+ paddsw m0, m1
+ paddsw m0, m2
+ pmulhrsw m0, [pw_256]
+ packuswb m0, m0
+ movh [dstq], m0 ; store
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+
+cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
+ shl mxd, 4
+ mova m2, [pw_256]
+ mova m3, [filter_h2_shuf]
+ mova m4, [filter_h4_shuf]
+%ifdef PIC
+ lea picregq, [fourtap_filter_hb_m]
+%endif
+ mova m5, [fourtap_filter_hb+mxq-16] ; set up 4tap filter in bytes
+ mova m6, [fourtap_filter_hb+mxq]
+
+.nextrow:
+ movu m0, [srcq-1]
+ mova m1, m0
+ pshufb m0, m3
+ pshufb m1, m4
+ pmaddubsw m0, m5
+ pmaddubsw m1, m6
+ paddsw m0, m1
+ pmulhrsw m0, m2
+ packuswb m0, m0
+ movh [dstq], m0 ; store
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+
+cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
+ shl myd, 4
+%ifdef PIC
+ lea picregq, [fourtap_filter_hb_m]
+%endif
+ mova m5, [fourtap_filter_hb+myq-16]
+ mova m6, [fourtap_filter_hb+myq]
+ mova m7, [pw_256]
+
+ ; read 3 lines
+ sub srcq, srcstrideq
+ movh m0, [srcq]
+ movh m1, [srcq+ srcstrideq]
+ movh m2, [srcq+2*srcstrideq]
+ add srcq, srcstrideq
+
+.nextrow:
+ movh m3, [srcq+2*srcstrideq] ; read new row
+ mova m4, m0
+ mova m0, m1
+ punpcklbw m4, m1
+ mova m1, m2
+ punpcklbw m2, m3
+ pmaddubsw m4, m5
+ pmaddubsw m2, m6
+ paddsw m4, m2
+ mova m2, m3
+ pmulhrsw m4, m7
+ packuswb m4, m4
+ movh [dstq], m4
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+
+cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
+ lea myd, [myq*3]
+%ifdef PIC
+ lea picregq, [sixtap_filter_hb_m]
+%endif
+ lea myq, [sixtap_filter_hb+myq*8]
+
+ ; read 5 lines
+ sub srcq, srcstrideq
+ sub srcq, srcstrideq
+ movh m0, [srcq]
+ movh m1, [srcq+srcstrideq]
+ movh m2, [srcq+srcstrideq*2]
+ lea srcq, [srcq+srcstrideq*2]
+ add srcq, srcstrideq
+ movh m3, [srcq]
+ movh m4, [srcq+srcstrideq]
+
+.nextrow:
+ movh m5, [srcq+2*srcstrideq] ; read new row
+ mova m6, m0
+ punpcklbw m6, m5
+ mova m0, m1
+ punpcklbw m1, m2
+ mova m7, m3
+ punpcklbw m7, m4
+ pmaddubsw m6, [myq-48]
+ pmaddubsw m1, [myq-32]
+ pmaddubsw m7, [myq-16]
+ paddsw m6, m1
+ paddsw m6, m7
+ mova m1, m2
+ mova m2, m3
+ pmulhrsw m6, [pw_256]
+ mova m3, m4
+ packuswb m6, m6
+ mova m4, m5
+ movh [dstq], m6
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+%endmacro
+
+INIT_MMX ssse3
+FILTER_SSSE3 4
+INIT_XMM ssse3
+FILTER_SSSE3 8
+
+; 4x4 block, H-only 4-tap filter
+INIT_MMX mmxext
+cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
+ shl mxd, 4
+%ifdef PIC
+ lea picregq, [fourtap_filter_hw_m]
+%endif
+ movq mm4, [fourtap_filter_hw+mxq-16] ; set up 4tap filter in words
+ movq mm5, [fourtap_filter_hw+mxq]
+ movq mm7, [pw_64]
+ pxor mm6, mm6
+
+.nextrow:
+ movq mm1, [srcq-1] ; (ABCDEFGH) load 8 horizontal pixels
+
+ ; first set of 2 pixels
+ movq mm2, mm1 ; byte ABCD..
+ punpcklbw mm1, mm6 ; byte->word ABCD
+ pshufw mm0, mm2, 9 ; byte CDEF..
+ punpcklbw mm0, mm6 ; byte->word CDEF
+ pshufw mm3, mm1, 0x94 ; word ABBC
+ pshufw mm1, mm0, 0x94 ; word CDDE
+ pmaddwd mm3, mm4 ; multiply 2px with F0/F1
+ movq mm0, mm1 ; backup for second set of pixels
+ pmaddwd mm1, mm5 ; multiply 2px with F2/F3
+ paddd mm3, mm1 ; finish 1st 2px
+
+ ; second set of 2 pixels, use backup of above
+ punpckhbw mm2, mm6 ; byte->word EFGH
+ pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
+ pshufw mm1, mm2, 0x94 ; word EFFG
+ pmaddwd mm1, mm5 ; multiply 2px with F2/F3
+ paddd mm0, mm1 ; finish 2nd 2px
+
+ ; merge two sets of 2 pixels into one set of 4, round/clip/store
+ packssdw mm3, mm0 ; merge dword->word (4px)
+ paddsw mm3, mm7 ; rounding
+ psraw mm3, 7
+ packuswb mm3, mm6 ; clip and word->bytes
+ movd [dstq], mm3 ; store
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+
+; 4x4 block, H-only 6-tap filter
+INIT_MMX mmxext
+cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
+ lea mxd, [mxq*3]
+%ifdef PIC
+ lea picregq, [sixtap_filter_hw_m]
+%endif
+ movq mm4, [sixtap_filter_hw+mxq*8-48] ; set up 4tap filter in words
+ movq mm5, [sixtap_filter_hw+mxq*8-32]
+ movq mm6, [sixtap_filter_hw+mxq*8-16]
+ movq mm7, [pw_64]
+ pxor mm3, mm3
+
+.nextrow:
+ movq mm1, [srcq-2] ; (ABCDEFGH) load 8 horizontal pixels
+
+ ; first set of 2 pixels
+ movq mm2, mm1 ; byte ABCD..
+ punpcklbw mm1, mm3 ; byte->word ABCD
+ pshufw mm0, mm2, 0x9 ; byte CDEF..
+ punpckhbw mm2, mm3 ; byte->word EFGH
+ punpcklbw mm0, mm3 ; byte->word CDEF
+ pshufw mm1, mm1, 0x94 ; word ABBC
+ pshufw mm2, mm2, 0x94 ; word EFFG
+ pmaddwd mm1, mm4 ; multiply 2px with F0/F1
+ pshufw mm3, mm0, 0x94 ; word CDDE
+ movq mm0, mm3 ; backup for second set of pixels
+ pmaddwd mm3, mm5 ; multiply 2px with F2/F3
+ paddd mm1, mm3 ; add to 1st 2px cache
+ movq mm3, mm2 ; backup for second set of pixels
+ pmaddwd mm2, mm6 ; multiply 2px with F4/F5
+ paddd mm1, mm2 ; finish 1st 2px
+
+ ; second set of 2 pixels, use backup of above
+ movd mm2, [srcq+3] ; byte FGHI (prevent overreads)
+ pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
+ pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
+ paddd mm0, mm3 ; add to 2nd 2px cache
+ pxor mm3, mm3
+ punpcklbw mm2, mm3 ; byte->word FGHI
+ pshufw mm2, mm2, 0xE9 ; word GHHI
+ pmaddwd mm2, mm6 ; multiply 2px with F4/F5
+ paddd mm0, mm2 ; finish 2nd 2px
+
+ ; merge two sets of 2 pixels into one set of 4, round/clip/store
+ packssdw mm1, mm0 ; merge dword->word (4px)
+ paddsw mm1, mm7 ; rounding
+ psraw mm1, 7
+ packuswb mm1, mm3 ; clip and word->bytes
+ movd [dstq], mm1 ; store
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+
+INIT_XMM sse2
+cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg
+ shl mxd, 5
+%ifdef PIC
+ lea picregq, [fourtap_filter_v_m]
+%endif
+ lea mxq, [fourtap_filter_v+mxq-32]
+ pxor m7, m7
+ mova m4, [pw_64]
+ mova m5, [mxq+ 0]
+ mova m6, [mxq+16]
+%ifdef m8
+ mova m8, [mxq+32]
+ mova m9, [mxq+48]
+%endif
+.nextrow:
+ movq m0, [srcq-1]
+ movq m1, [srcq-0]
+ movq m2, [srcq+1]
+ movq m3, [srcq+2]
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ punpcklbw m3, m7
+ pmullw m0, m5
+ pmullw m1, m6
+%ifdef m8
+ pmullw m2, m8
+ pmullw m3, m9
+%else
+ pmullw m2, [mxq+32]
+ pmullw m3, [mxq+48]
+%endif
+ paddsw m0, m1
+ paddsw m2, m3
+ paddsw m0, m2
+ paddsw m0, m4
+ psraw m0, 7
+ packuswb m0, m7
+ movh [dstq], m0 ; store
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+
+INIT_XMM sse2
+cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg
+ lea mxd, [mxq*3]
+ shl mxd, 4
+%ifdef PIC
+ lea picregq, [sixtap_filter_v_m]
+%endif
+ lea mxq, [sixtap_filter_v+mxq-96]
+ pxor m7, m7
+ mova m6, [pw_64]
+%ifdef m8
+ mova m8, [mxq+ 0]
+ mova m9, [mxq+16]
+ mova m10, [mxq+32]
+ mova m11, [mxq+48]
+ mova m12, [mxq+64]
+ mova m13, [mxq+80]
+%endif
+.nextrow:
+ movq m0, [srcq-2]
+ movq m1, [srcq-1]
+ movq m2, [srcq-0]
+ movq m3, [srcq+1]
+ movq m4, [srcq+2]
+ movq m5, [srcq+3]
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ punpcklbw m3, m7
+ punpcklbw m4, m7
+ punpcklbw m5, m7
+%ifdef m8
+ pmullw m0, m8
+ pmullw m1, m9
+ pmullw m2, m10
+ pmullw m3, m11
+ pmullw m4, m12
+ pmullw m5, m13
+%else
+ pmullw m0, [mxq+ 0]
+ pmullw m1, [mxq+16]
+ pmullw m2, [mxq+32]
+ pmullw m3, [mxq+48]
+ pmullw m4, [mxq+64]
+ pmullw m5, [mxq+80]
+%endif
+ paddsw m1, m4
+ paddsw m0, m5
+ paddsw m1, m2
+ paddsw m0, m3
+ paddsw m0, m1
+ paddsw m0, m6
+ psraw m0, 7
+ packuswb m0, m7
+ movh [dstq], m0 ; store
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+
+%macro FILTER_V 1
+; 4x4 block, V-only 4-tap filter
+cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
+ shl myd, 5
+%ifdef PIC
+ lea picregq, [fourtap_filter_v_m]
+%endif
+ lea myq, [fourtap_filter_v+myq-32]
+ mova m6, [pw_64]
+ pxor m7, m7
+ mova m5, [myq+48]
+
+ ; read 3 lines
+ sub srcq, srcstrideq
+ movh m0, [srcq]
+ movh m1, [srcq+ srcstrideq]
+ movh m2, [srcq+2*srcstrideq]
+ add srcq, srcstrideq
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+
+.nextrow:
+ ; first calculate negative taps (to prevent losing positive overflows)
+ movh m4, [srcq+2*srcstrideq] ; read new row
+ punpcklbw m4, m7
+ mova m3, m4
+ pmullw m0, [myq+0]
+ pmullw m4, m5
+ paddsw m4, m0
+
+ ; then calculate positive taps
+ mova m0, m1
+ pmullw m1, [myq+16]
+ paddsw m4, m1
+ mova m1, m2
+ pmullw m2, [myq+32]
+ paddsw m4, m2
+ mova m2, m3
+
+ ; round/clip/store
+ paddsw m4, m6
+ psraw m4, 7
+ packuswb m4, m7
+ movh [dstq], m4
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+
+
+; 4x4 block, V-only 6-tap filter
+cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
+ shl myd, 4
+ lea myq, [myq*3]
+%ifdef PIC
+ lea picregq, [sixtap_filter_v_m]
+%endif
+ lea myq, [sixtap_filter_v+myq-96]
+ pxor m7, m7
+
+ ; read 5 lines
+ sub srcq, srcstrideq
+ sub srcq, srcstrideq
+ movh m0, [srcq]
+ movh m1, [srcq+srcstrideq]
+ movh m2, [srcq+srcstrideq*2]
+ lea srcq, [srcq+srcstrideq*2]
+ add srcq, srcstrideq
+ movh m3, [srcq]
+ movh m4, [srcq+srcstrideq]
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ punpcklbw m3, m7
+ punpcklbw m4, m7
+
+.nextrow:
+ ; first calculate negative taps (to prevent losing positive overflows)
+ mova m5, m1
+ pmullw m5, [myq+16]
+ mova m6, m4
+ pmullw m6, [myq+64]
+ paddsw m6, m5
+
+ ; then calculate positive taps
+ movh m5, [srcq+2*srcstrideq] ; read new row
+ punpcklbw m5, m7
+ pmullw m0, [myq+0]
+ paddsw m6, m0
+ mova m0, m1
+ mova m1, m2
+ pmullw m2, [myq+32]
+ paddsw m6, m2
+ mova m2, m3
+ pmullw m3, [myq+48]
+ paddsw m6, m3
+ mova m3, m4
+ mova m4, m5
+ pmullw m5, [myq+80]
+ paddsw m6, m5
+
+ ; round/clip/store
+ paddsw m6, [pw_64]
+ psraw m6, 7
+ packuswb m6, m7
+ movh [dstq], m6
+
+ ; go to next line
+ add dstq, dststrideq
+ add srcq, srcstrideq
+ dec heightd ; next row
+ jg .nextrow
+ RET
+%endmacro
+
+INIT_MMX mmxext
+FILTER_V 4
+INIT_XMM sse2
+FILTER_V 8
+
+%macro FILTER_BILINEAR 1
+%if cpuflag(ssse3)
+cglobal put_vp8_bilinear%1_v, 7, 7, 5, dst, dststride, src, srcstride, height, picreg, my
+ shl myd, 4
+%ifdef PIC
+ lea picregq, [bilinear_filter_vb_m]
+%endif
+ pxor m4, m4
+ mova m3, [bilinear_filter_vb+myq-16]
+.nextrow:
+ movh m0, [srcq+srcstrideq*0]
+ movh m1, [srcq+srcstrideq*1]
+ movh m2, [srcq+srcstrideq*2]
+ punpcklbw m0, m1
+ punpcklbw m1, m2
+ pmaddubsw m0, m3
+ pmaddubsw m1, m3
+ psraw m0, 2
+ psraw m1, 2
+ pavgw m0, m4
+ pavgw m1, m4
+%if mmsize==8
+ packuswb m0, m0
+ packuswb m1, m1
+ movh [dstq+dststrideq*0], m0
+ movh [dstq+dststrideq*1], m1
+%else
+ packuswb m0, m1
+ movh [dstq+dststrideq*0], m0
+ movhps [dstq+dststrideq*1], m0
+%endif
+%else ; cpuflag(ssse3)
+cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, picreg, my
+ shl myd, 4
+%ifdef PIC
+ lea picregq, [bilinear_filter_vw_m]
+%endif
+ pxor m6, m6
+ mova m5, [bilinear_filter_vw+myq-1*16]
+ neg myq
+ mova m4, [bilinear_filter_vw+myq+7*16]
+.nextrow:
+ movh m0, [srcq+srcstrideq*0]
+ movh m1, [srcq+srcstrideq*1]
+ movh m3, [srcq+srcstrideq*2]
+ punpcklbw m0, m6
+ punpcklbw m1, m6
+ punpcklbw m3, m6
+ mova m2, m1
+ pmullw m0, m4
+ pmullw m1, m5
+ pmullw m2, m4
+ pmullw m3, m5
+ paddsw m0, m1
+ paddsw m2, m3
+ psraw m0, 2
+ psraw m2, 2
+ pavgw m0, m6
+ pavgw m2, m6
+%if mmsize == 8
+ packuswb m0, m0
+ packuswb m2, m2
+ movh [dstq+dststrideq*0], m0
+ movh [dstq+dststrideq*1], m2
+%else
+ packuswb m0, m2
+ movh [dstq+dststrideq*0], m0
+ movhps [dstq+dststrideq*1], m0
+%endif
+%endif ; cpuflag(ssse3)
+
+ lea dstq, [dstq+dststrideq*2]
+ lea srcq, [srcq+srcstrideq*2]
+ sub heightd, 2
+ jg .nextrow
+ RET
+
+%if cpuflag(ssse3)
+cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg
+ shl mxd, 4
+%ifdef PIC
+ lea picregq, [bilinear_filter_vb_m]
+%endif
+ pxor m4, m4
+ mova m2, [filter_h2_shuf]
+ mova m3, [bilinear_filter_vb+mxq-16]
+.nextrow:
+ movu m0, [srcq+srcstrideq*0]
+ movu m1, [srcq+srcstrideq*1]
+ pshufb m0, m2
+ pshufb m1, m2
+ pmaddubsw m0, m3
+ pmaddubsw m1, m3
+ psraw m0, 2
+ psraw m1, 2
+ pavgw m0, m4
+ pavgw m1, m4
+%if mmsize==8
+ packuswb m0, m0
+ packuswb m1, m1
+ movh [dstq+dststrideq*0], m0
+ movh [dstq+dststrideq*1], m1
+%else
+ packuswb m0, m1
+ movh [dstq+dststrideq*0], m0
+ movhps [dstq+dststrideq*1], m0
+%endif
+%else ; cpuflag(ssse3)
+cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
+ shl mxd, 4
+%ifdef PIC
+ lea picregq, [bilinear_filter_vw_m]
+%endif
+ pxor m6, m6
+ mova m5, [bilinear_filter_vw+mxq-1*16]
+ neg mxq
+ mova m4, [bilinear_filter_vw+mxq+7*16]
+.nextrow:
+ movh m0, [srcq+srcstrideq*0+0]
+ movh m1, [srcq+srcstrideq*0+1]
+ movh m2, [srcq+srcstrideq*1+0]
+ movh m3, [srcq+srcstrideq*1+1]
+ punpcklbw m0, m6
+ punpcklbw m1, m6
+ punpcklbw m2, m6
+ punpcklbw m3, m6
+ pmullw m0, m4
+ pmullw m1, m5
+ pmullw m2, m4
+ pmullw m3, m5
+ paddsw m0, m1
+ paddsw m2, m3
+ psraw m0, 2
+ psraw m2, 2
+ pavgw m0, m6
+ pavgw m2, m6
+%if mmsize == 8
+ packuswb m0, m0
+ packuswb m2, m2
+ movh [dstq+dststrideq*0], m0
+ movh [dstq+dststrideq*1], m2
+%else
+ packuswb m0, m2
+ movh [dstq+dststrideq*0], m0
+ movhps [dstq+dststrideq*1], m0
+%endif
+%endif ; cpuflag(ssse3)
+
+ lea dstq, [dstq+dststrideq*2]
+ lea srcq, [srcq+srcstrideq*2]
+ sub heightd, 2
+ jg .nextrow
+ RET
+%endmacro
+
+INIT_MMX mmxext
+FILTER_BILINEAR 4
+INIT_XMM sse2
+FILTER_BILINEAR 8
+INIT_MMX ssse3
+FILTER_BILINEAR 4
+INIT_XMM ssse3
+FILTER_BILINEAR 8
+
+INIT_MMX mmx
+cglobal put_vp8_pixels8, 5, 5, 0, dst, dststride, src, srcstride, height
+.nextrow:
+ movq mm0, [srcq+srcstrideq*0]
+ movq mm1, [srcq+srcstrideq*1]
+ lea srcq, [srcq+srcstrideq*2]
+ movq [dstq+dststrideq*0], mm0
+ movq [dstq+dststrideq*1], mm1
+ lea dstq, [dstq+dststrideq*2]
+ sub heightd, 2
+ jg .nextrow
+ RET
+
+INIT_XMM sse
+cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
+.nextrow:
+ movups xmm0, [srcq+srcstrideq*0]
+ movups xmm1, [srcq+srcstrideq*1]
+ lea srcq, [srcq+srcstrideq*2]
+ movaps [dstq+dststrideq*0], xmm0
+ movaps [dstq+dststrideq*1], xmm1
+ lea dstq, [dstq+dststrideq*2]
+ sub heightd, 2
+ jg .nextrow
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_vp8_idct_dc_add_<opt>(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+;-----------------------------------------------------------------------------
+
+%macro ADD_DC 4
+ %4 m2, [dst1q+%3]
+ %4 m3, [dst1q+strideq+%3]
+ %4 m4, [dst2q+%3]
+ %4 m5, [dst2q+strideq+%3]
+ paddusb m2, %1
+ paddusb m3, %1
+ paddusb m4, %1
+ paddusb m5, %1
+ psubusb m2, %2
+ psubusb m3, %2
+ psubusb m4, %2
+ psubusb m5, %2
+ %4 [dst1q+%3], m2
+ %4 [dst1q+strideq+%3], m3
+ %4 [dst2q+%3], m4
+ %4 [dst2q+strideq+%3], m5
+%endmacro
+
+%macro VP8_IDCT_DC_ADD 0
+cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride
+ ; load data
+ movd m0, [blockq]
+ pxor m1, m1
+
+ ; calculate DC
+ paddw m0, [pw_4]
+ movd [blockq], m1
+ DEFINE_ARGS dst1, dst2, stride
+ lea dst2q, [dst1q+strideq*2]
+ movd m2, [dst1q]
+ movd m3, [dst1q+strideq]
+ movd m4, [dst2q]
+ movd m5, [dst2q+strideq]
+ psraw m0, 3
+ pshuflw m0, m0, 0
+ punpcklqdq m0, m0
+ punpckldq m2, m3
+ punpckldq m4, m5
+ punpcklbw m2, m1
+ punpcklbw m4, m1
+ paddw m2, m0
+ paddw m4, m0
+ packuswb m2, m4
+ movd [dst1q], m2
+%if cpuflag(sse4)
+ pextrd [dst1q+strideq], m2, 1
+ pextrd [dst2q], m2, 2
+ pextrd [dst2q+strideq], m2, 3
+%else
+ psrldq m2, 4
+ movd [dst1q+strideq], m2
+ psrldq m2, 4
+ movd [dst2q], m2
+ psrldq m2, 4
+ movd [dst2q+strideq], m2
+%endif
+ RET
+%endmacro
+
+INIT_XMM sse2
+VP8_IDCT_DC_ADD
+INIT_XMM sse4
+VP8_IDCT_DC_ADD
+
+;-----------------------------------------------------------------------------
+; void ff_vp8_idct_dc_add4y_<opt>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
+;-----------------------------------------------------------------------------
+
+INIT_XMM sse2
+cglobal vp8_idct_dc_add4y, 3, 3, 6, dst, block, stride
+ ; load data
+ movd m0, [blockq+32*0] ; A
+ movd m1, [blockq+32*2] ; C
+ punpcklwd m0, [blockq+32*1] ; A B
+ punpcklwd m1, [blockq+32*3] ; C D
+ punpckldq m0, m1 ; A B C D
+ pxor m1, m1
+
+ ; calculate DC
+ paddw m0, [pw_4]
+ movd [blockq+32*0], m1
+ movd [blockq+32*1], m1
+ movd [blockq+32*2], m1
+ movd [blockq+32*3], m1
+ psraw m0, 3
+ psubw m1, m0
+ packuswb m0, m0
+ packuswb m1, m1
+ punpcklbw m0, m0
+ punpcklbw m1, m1
+ punpcklbw m0, m0
+ punpcklbw m1, m1
+
+ ; add DC
+ DEFINE_ARGS dst1, dst2, stride
+ lea dst2q, [dst1q+strideq*2]
+ ADD_DC m0, m1, 0, mova
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_vp8_idct_dc_add4uv_<opt>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
+;-----------------------------------------------------------------------------
+
+INIT_MMX mmx
+cglobal vp8_idct_dc_add4uv, 3, 3, 0, dst, block, stride
+ ; load data
+ movd m0, [blockq+32*0] ; A
+ movd m1, [blockq+32*2] ; C
+ punpcklwd m0, [blockq+32*1] ; A B
+ punpcklwd m1, [blockq+32*3] ; C D
+ punpckldq m0, m1 ; A B C D
+ pxor m6, m6
+
+ ; calculate DC
+ paddw m0, [pw_4]
+ movd [blockq+32*0], m6
+ movd [blockq+32*1], m6
+ movd [blockq+32*2], m6
+ movd [blockq+32*3], m6
+ psraw m0, 3
+ psubw m6, m0
+ packuswb m0, m0
+ packuswb m6, m6
+ punpcklbw m0, m0 ; AABBCCDD
+ punpcklbw m6, m6 ; AABBCCDD
+ movq m1, m0
+ movq m7, m6
+ punpcklbw m0, m0 ; AAAABBBB
+ punpckhbw m1, m1 ; CCCCDDDD
+ punpcklbw m6, m6 ; AAAABBBB
+ punpckhbw m7, m7 ; CCCCDDDD
+
+ ; add DC
+ DEFINE_ARGS dst1, dst2, stride
+ lea dst2q, [dst1q+strideq*2]
+ ADD_DC m0, m6, 0, mova
+ lea dst1q, [dst1q+strideq*4]
+ lea dst2q, [dst2q+strideq*4]
+ ADD_DC m1, m7, 0, mova
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_vp8_idct_add_<opt>(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+;-----------------------------------------------------------------------------
+
+; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
+; this macro assumes that m6/m7 have words for 20091/17734 loaded
+%macro VP8_MULTIPLY_SUMSUB 4
+ mova %3, %1
+ mova %4, %2
+ pmulhw %3, m6 ;20091(1)
+ pmulhw %4, m6 ;20091(2)
+ paddw %3, %1
+ paddw %4, %2
+ paddw %1, %1
+ paddw %2, %2
+ pmulhw %1, m7 ;35468(1)
+ pmulhw %2, m7 ;35468(2)
+ psubw %1, %4
+ paddw %2, %3
+%endmacro
+
+; calculate x0=%1+%3; x1=%1-%3
+; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
+; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
+; %5/%6 are temporary registers
+; we assume m6/m7 have constant words 20091/17734 loaded in them
+%macro VP8_IDCT_TRANSFORM4x4_1D 6
+ SUMSUB_BA w, %3, %1, %5 ;t0, t1
+ VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
+ SUMSUB_BA w, %4, %3, %5 ;tmp0, tmp3
+ SUMSUB_BA w, %2, %1, %5 ;tmp1, tmp2
+ SWAP %4, %1
+ SWAP %4, %3
+%endmacro
+
+INIT_MMX sse
+cglobal vp8_idct_add, 3, 3, 0, dst, block, stride
+ ; load block data
+ movq m0, [blockq+ 0]
+ movq m1, [blockq+ 8]
+ movq m2, [blockq+16]
+ movq m3, [blockq+24]
+ movq m6, [pw_20091]
+ movq m7, [pw_17734]
+ xorps xmm0, xmm0
+ movaps [blockq+ 0], xmm0
+ movaps [blockq+16], xmm0
+
+ ; actual IDCT
+ VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ paddw m0, [pw_4]
+ VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+
+ ; store
+ pxor m4, m4
+ DEFINE_ARGS dst1, dst2, stride
+ lea dst2q, [dst1q+2*strideq]
+ STORE_DIFFx2 m0, m1, m6, m7, m4, 3, dst1q, strideq
+ STORE_DIFFx2 m2, m3, m6, m7, m4, 3, dst2q, strideq
+
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_vp8_luma_dc_wht(int16_t block[4][4][16], int16_t dc[16])
+;-----------------------------------------------------------------------------
+
+%macro SCATTER_WHT 3
+ movd dc1d, m%1
+ movd dc2d, m%2
+ mov [blockq+2*16*(0+%3)], dc1w
+ mov [blockq+2*16*(1+%3)], dc2w
+ shr dc1d, 16
+ shr dc2d, 16
+ psrlq m%1, 32
+ psrlq m%2, 32
+ mov [blockq+2*16*(4+%3)], dc1w
+ mov [blockq+2*16*(5+%3)], dc2w
+ movd dc1d, m%1
+ movd dc2d, m%2
+ mov [blockq+2*16*(8+%3)], dc1w
+ mov [blockq+2*16*(9+%3)], dc2w
+ shr dc1d, 16
+ shr dc2d, 16
+ mov [blockq+2*16*(12+%3)], dc1w
+ mov [blockq+2*16*(13+%3)], dc2w
+%endmacro
+
+%macro HADAMARD4_1D 4
+ SUMSUB_BADC w, %2, %1, %4, %3
+ SUMSUB_BADC w, %4, %2, %3, %1
+ SWAP %1, %4, %3
+%endmacro
+
+INIT_MMX sse
+cglobal vp8_luma_dc_wht, 2, 3, 0, block, dc1, dc2
+ movq m0, [dc1q]
+ movq m1, [dc1q+8]
+ movq m2, [dc1q+16]
+ movq m3, [dc1q+24]
+ xorps xmm0, xmm0
+ movaps [dc1q+ 0], xmm0
+ movaps [dc1q+16], xmm0
+ HADAMARD4_1D 0, 1, 2, 3
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ paddw m0, [pw_3]
+ HADAMARD4_1D 0, 1, 2, 3
+ psraw m0, 3
+ psraw m1, 3
+ psraw m2, 3
+ psraw m3, 3
+ SCATTER_WHT 0, 1, 0
+ SCATTER_WHT 2, 3, 2
+ RET
diff --git a/media/ffvpx/libavcodec/x86/vp8dsp_init.c b/media/ffvpx/libavcodec/x86/vp8dsp_init.c
new file mode 100644
index 0000000000..bd20da1fc9
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp8dsp_init.c
@@ -0,0 +1,383 @@
+/*
+ * VP8 DSP functions x86-optimized
+ * Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
+ * Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem_internal.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/vp8dsp.h"
+
+#if HAVE_X86ASM
+
+/*
+ * MC functions
+ */
+void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+
+void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+
+void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+
+void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+
+void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+
+
+void ff_put_vp8_pixels8_mmx (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+void ff_put_vp8_pixels16_sse(uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride,
+ int height, int mx, int my);
+
+#define TAP_W16(OPT, FILTERTYPE, TAPTYPE) \
+static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
+ uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
+ ptrdiff_t srcstride, int height, int mx, int my) \
+{ \
+ ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
+ dst, dststride, src, srcstride, height, mx, my); \
+ ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
+ dst + 8, dststride, src + 8, srcstride, height, mx, my); \
+}
+#define TAP_W8(OPT, FILTERTYPE, TAPTYPE) \
+static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
+ uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
+ ptrdiff_t srcstride, int height, int mx, int my) \
+{ \
+ ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
+ dst, dststride, src, srcstride, height, mx, my); \
+ ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
+ dst + 4, dststride, src + 4, srcstride, height, mx, my); \
+}
+
+TAP_W16(sse2, epel, h6)
+TAP_W16(sse2, epel, v6)
+TAP_W16(sse2, bilinear, h)
+TAP_W16(sse2, bilinear, v)
+
+TAP_W16(ssse3, epel, h6)
+TAP_W16(ssse3, epel, v6)
+TAP_W16(ssse3, bilinear, h)
+TAP_W16(ssse3, bilinear, v)
+
+#define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \
+static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \
+ uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
+ ptrdiff_t srcstride, int height, int mx, int my) \
+{ \
+ LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + TAPNUMY - 1)]); \
+ uint8_t *tmpptr = tmp + SIZE * (TAPNUMY / 2 - 1); \
+ src -= srcstride * (TAPNUMY / 2 - 1); \
+ ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## _ ## OPT( \
+ tmp, SIZE, src, srcstride, height + TAPNUMY - 1, mx, my); \
+ ff_put_vp8_epel ## SIZE ## _v ## TAPNUMY ## _ ## OPT( \
+ dst, dststride, tmpptr, SIZE, height, mx, my); \
+}
+
+#define HVTAPMMX(x, y) \
+HVTAP(mmxext, 8, x, y, 4, 8)
+
+HVTAPMMX(4, 4)
+HVTAPMMX(4, 6)
+HVTAPMMX(6, 4)
+HVTAPMMX(6, 6)
+
+#define HVTAPSSE2(x, y, w) \
+HVTAP(sse2, 16, x, y, w, 16) \
+HVTAP(ssse3, 16, x, y, w, 16)
+
+HVTAPSSE2(4, 4, 8)
+HVTAPSSE2(4, 6, 8)
+HVTAPSSE2(6, 4, 8)
+HVTAPSSE2(6, 6, 8)
+HVTAPSSE2(6, 6, 16)
+
+HVTAP(ssse3, 16, 4, 4, 4, 8)
+HVTAP(ssse3, 16, 4, 6, 4, 8)
+HVTAP(ssse3, 16, 6, 4, 4, 8)
+HVTAP(ssse3, 16, 6, 6, 4, 8)
+
+#define HVBILIN(OPT, ALIGN, SIZE, MAXHEIGHT) \
+static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
+ uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
+ ptrdiff_t srcstride, int height, int mx, int my) \
+{ \
+ LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + 2)]); \
+ ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT( \
+ tmp, SIZE, src, srcstride, height + 1, mx, my); \
+ ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT( \
+ dst, dststride, tmp, SIZE, height, mx, my); \
+}
+
+HVBILIN(mmxext, 8, 4, 8)
+HVBILIN(sse2, 8, 8, 16)
+HVBILIN(sse2, 8, 16, 16)
+HVBILIN(ssse3, 8, 4, 8)
+HVBILIN(ssse3, 8, 8, 16)
+HVBILIN(ssse3, 8, 16, 16)
+
+void ff_vp8_idct_dc_add_sse2(uint8_t *dst, int16_t block[16],
+ ptrdiff_t stride);
+void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16],
+ ptrdiff_t stride);
+void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride);
+void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, int16_t block[2][16],
+ ptrdiff_t stride);
+void ff_vp8_luma_dc_wht_sse(int16_t block[4][4][16], int16_t dc[16]);
+void ff_vp8_idct_add_sse(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
+
+#define DECLARE_LOOP_FILTER(NAME) \
+void ff_vp8_v_loop_filter_simple_ ## NAME(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim); \
+void ff_vp8_h_loop_filter_simple_ ## NAME(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim); \
+void ff_vp8_v_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
+ ptrdiff_t stride, \
+ int e, int i, int hvt); \
+void ff_vp8_h_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
+ ptrdiff_t stride, \
+ int e, int i, int hvt); \
+void ff_vp8_v_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
+ uint8_t *dstV, \
+ ptrdiff_t s, \
+ int e, int i, int hvt); \
+void ff_vp8_h_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
+ uint8_t *dstV, \
+ ptrdiff_t s, \
+ int e, int i, int hvt); \
+void ff_vp8_v_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int e, int i, int hvt); \
+void ff_vp8_h_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int e, int i, int hvt); \
+void ff_vp8_v_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
+ uint8_t *dstV, \
+ ptrdiff_t s, \
+ int e, int i, int hvt); \
+void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
+ uint8_t *dstV, \
+ ptrdiff_t s, \
+ int e, int i, int hvt);
+
+DECLARE_LOOP_FILTER(sse2)
+DECLARE_LOOP_FILTER(ssse3)
+DECLARE_LOOP_FILTER(sse4)
+
+#endif /* HAVE_X86ASM */
+
+#define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \
+ c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \
+ c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \
+ c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT
+
+#define VP8_MC_FUNC(IDX, SIZE, OPT) \
+ c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \
+ c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \
+ c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \
+ c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \
+ c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \
+ VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
+
+#define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \
+ c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
+ c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
+ c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
+ c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
+ c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
+ c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
+ c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
+ c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT
+
+
+av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c)
+{
+#if HAVE_X86ASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMX(cpu_flags)) {
+ c->put_vp8_epel_pixels_tab[1][0][0] =
+ c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
+ }
+
+ /* note that 4-tap width=16 functions are missing because w=16
+ * is only used for luma, and luma is always a copy or sixtap. */
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ VP8_MC_FUNC(2, 4, mmxext);
+ VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
+ }
+
+ if (EXTERNAL_SSE(cpu_flags)) {
+ c->put_vp8_epel_pixels_tab[0][0][0] =
+ c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
+ }
+
+ if (EXTERNAL_SSE2_SLOW(cpu_flags)) {
+ VP8_LUMA_MC_FUNC(0, 16, sse2);
+ VP8_MC_FUNC(1, 8, sse2);
+ VP8_BILINEAR_MC_FUNC(0, 16, sse2);
+ VP8_BILINEAR_MC_FUNC(1, 8, sse2);
+ }
+
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ VP8_LUMA_MC_FUNC(0, 16, ssse3);
+ VP8_MC_FUNC(1, 8, ssse3);
+ VP8_MC_FUNC(2, 4, ssse3);
+ VP8_BILINEAR_MC_FUNC(0, 16, ssse3);
+ VP8_BILINEAR_MC_FUNC(1, 8, ssse3);
+ VP8_BILINEAR_MC_FUNC(2, 4, ssse3);
+ }
+#endif /* HAVE_X86ASM */
+}
+
+av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c)
+{
+#if HAVE_X86ASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMX(cpu_flags)) {
+ c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
+ }
+
+ if (EXTERNAL_SSE(cpu_flags)) {
+ c->vp8_idct_add = ff_vp8_idct_add_sse;
+ c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
+ }
+
+ if (EXTERNAL_SSE2_SLOW(cpu_flags)) {
+ c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
+
+ c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
+ c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
+
+ c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
+ c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
+ }
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse2;
+ c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
+
+ c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
+
+ c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
+ c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
+
+ c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2;
+ c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
+ }
+
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3;
+ c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3;
+
+ c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3;
+ c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3;
+ c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3;
+ c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3;
+
+ c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3;
+ c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3;
+ c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3;
+ c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
+ }
+
+ if (EXTERNAL_SSE4(cpu_flags)) {
+ c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
+
+ c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
+ c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4;
+ c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4;
+ }
+#endif /* HAVE_X86ASM */
+}
diff --git a/media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm b/media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm
new file mode 100644
index 0000000000..ef397efd3e
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm
@@ -0,0 +1,1234 @@
+;******************************************************************************
+;* VP8 MMXEXT optimizations
+;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
+;* Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_27: times 8 dw 27
+pw_63: times 8 dw 63
+
+pb_4: times 16 db 4
+pb_F8: times 16 db 0xF8
+pb_FE: times 16 db 0xFE
+pb_27_63: times 8 db 27, 63
+pb_18_63: times 8 db 18, 63
+pb_9_63: times 8 db 9, 63
+
+cextern pb_1
+cextern pb_3
+cextern pw_9
+cextern pw_18
+cextern pb_80
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; void ff_vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, ptrdiff_t stride, int flim);
+;-----------------------------------------------------------------------------
+
+; macro called with 7 mm register indexes as argument, and 5 regular registers
+; first 11 mean the same as READ_8x4_TRANSPOSED above
+; fifth regular register is scratchspace to reach the bottom 8 rows, it
+; will be set to second regular register + 8*stride at the end
+%macro READ_16x4_INTERLEAVED 12
+ ; transpose 16 (A-P) rows of 4 pixels each
+ lea %12, [r0+8*r2]
+
+ ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
+ movd m%1, [%8+%10*4] ; A0-3
+ movd m%3, [%12+%10*4] ; I0-3
+ movd m%2, [%8+%10*2] ; C0-3
+ movd m%4, [%12+%10*2] ; K0-3
+ movd m%6, [%8+%10] ; D0-3
+ movd m%5, [%12+%10] ; L0-3
+ movd m%7, [%12] ; M0-3
+ add %12, %11
+ punpcklbw m%1, m%3 ; A/I
+ movd m%3, [%8] ; E0-3
+ punpcklbw m%2, m%4 ; C/K
+ punpcklbw m%6, m%5 ; D/L
+ punpcklbw m%3, m%7 ; E/M
+ punpcklbw m%2, m%6 ; C/D/K/L interleaved
+
+ ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
+ movd m%5, [%9+%10*4] ; B0-3
+ movd m%4, [%12+%10*4] ; J0-3
+ movd m%7, [%9] ; F0-3
+ movd m%6, [%12] ; N0-3
+ punpcklbw m%5, m%4 ; B/J
+ punpcklbw m%7, m%6 ; F/N
+ punpcklbw m%1, m%5 ; A/B/I/J interleaved
+ punpcklbw m%3, m%7 ; E/F/M/N interleaved
+ movd m%4, [%9+%11] ; G0-3
+ movd m%6, [%12+%11] ; O0-3
+ movd m%5, [%9+%11*2] ; H0-3
+ movd m%7, [%12+%11*2] ; P0-3
+ punpcklbw m%4, m%6 ; G/O
+ punpcklbw m%5, m%7 ; H/P
+ punpcklbw m%4, m%5 ; G/H/O/P interleaved
+%endmacro
+
+; write 4 xmm registers of 4 dwords each
+; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
+; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
+; we add 1*stride to the third regular registry in the process
+; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
+; same memory region), or 8 if they cover two separate buffers (third one points to
+; a different memory region than the first two), allowing for more optimal code for
+; the 16-width case
+%macro WRITE_4x4D 10
+ ; write out (4 dwords per register), start with dwords zero
+ movd [%5+%8*4], m%1
+ movd [%5], m%2
+ movd [%7+%8*4], m%3
+ movd [%7], m%4
+
+ ; store dwords 1
+ psrldq m%1, 4
+ psrldq m%2, 4
+ psrldq m%3, 4
+ psrldq m%4, 4
+ movd [%6+%8*4], m%1
+ movd [%6], m%2
+%if %10 == 16
+ movd [%6+%9*4], m%3
+%endif
+ movd [%7+%9], m%4
+
+ ; write dwords 2
+ psrldq m%1, 4
+ psrldq m%2, 4
+%if %10 == 8
+ movd [%5+%8*2], m%1
+ movd %5d, m%3
+%endif
+ psrldq m%3, 4
+ psrldq m%4, 4
+%if %10 == 16
+ movd [%5+%8*2], m%1
+%endif
+ movd [%6+%9], m%2
+ movd [%7+%8*2], m%3
+ movd [%7+%9*2], m%4
+ add %7, %9
+
+ ; store dwords 3
+ psrldq m%1, 4
+ psrldq m%2, 4
+ psrldq m%3, 4
+ psrldq m%4, 4
+%if %10 == 8
+ mov [%7+%8*4], %5d
+ movd [%6+%8*2], m%1
+%else
+ movd [%5+%8], m%1
+%endif
+ movd [%6+%9*2], m%2
+ movd [%7+%8*2], m%3
+ movd [%7+%9*2], m%4
+%endmacro
+
+%macro WRITE_8W 5
+%if cpuflag(sse4)
+ pextrw [%3+%4*4], %1, 0
+ pextrw [%2+%4*4], %1, 1
+ pextrw [%3+%4*2], %1, 2
+ pextrw [%3+%4 ], %1, 3
+ pextrw [%3 ], %1, 4
+ pextrw [%2 ], %1, 5
+ pextrw [%2+%5 ], %1, 6
+ pextrw [%2+%5*2], %1, 7
+%else
+ movd %2d, %1
+ psrldq %1, 4
+ mov [%3+%4*4], %2w
+ shr %2, 16
+ add %3, %5
+ mov [%3+%4*4], %2w
+
+ movd %2d, %1
+ psrldq %1, 4
+ add %3, %4
+ mov [%3+%4*2], %2w
+ shr %2, 16
+ mov [%3+%4 ], %2w
+
+ movd %2d, %1
+ psrldq %1, 4
+ mov [%3 ], %2w
+ shr %2, 16
+ mov [%3+%5 ], %2w
+
+ movd %2d, %1
+ add %3, %5
+ mov [%3+%5 ], %2w
+ shr %2, 16
+ mov [%3+%5*2], %2w
+%endif
+%endmacro
+
+%macro SIMPLE_LOOPFILTER 2
+cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
+%if cpuflag(ssse3)
+ pxor m0, m0
+%endif
+ SPLATB_REG m7, flim, m0 ; splat "flim" into register
+
+ ; set up indexes to address 4 rows
+ DEFINE_ARGS dst1, mstride, stride, dst3, dst2
+ mov strideq, mstrideq
+ neg mstrideq
+%ifidn %1, h
+ lea dst1q, [dst1q+4*strideq-2]
+%endif
+
+%ifidn %1, v
+ ; read 4 half/full rows of pixels
+ mova m0, [dst1q+mstrideq*2] ; p1
+ mova m1, [dst1q+mstrideq] ; p0
+ mova m2, [dst1q] ; q0
+ mova m3, [dst1q+ strideq] ; q1
+%else ; h
+ lea dst2q, [dst1q+ strideq]
+
+ READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq, dst3q
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+%endif
+
+ ; simple_limit
+ mova m5, m2 ; m5=backup of q0
+ mova m6, m1 ; m6=backup of p0
+ psubusb m1, m2 ; p0-q0
+ psubusb m2, m6 ; q0-p0
+ por m1, m2 ; FFABS(p0-q0)
+ paddusb m1, m1 ; m1=FFABS(p0-q0)*2
+
+ mova m4, m3
+ mova m2, m0
+ psubusb m3, m0 ; q1-p1
+ psubusb m0, m4 ; p1-q1
+ por m3, m0 ; FFABS(p1-q1)
+ mova m0, [pb_80]
+ pxor m2, m0
+ pxor m4, m0
+ psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
+ pand m3, [pb_FE]
+ psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
+ paddusb m3, m1
+ psubusb m3, m7
+ pxor m1, m1
+ pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
+
+ ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
+ mova m4, m5
+ pxor m5, m0
+ pxor m0, m6
+ psubsb m5, m0 ; q0-p0 (signed)
+ paddsb m2, m5
+ paddsb m2, m5
+ paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
+ pand m2, m3 ; apply filter mask (m3)
+
+ mova m3, [pb_F8]
+ mova m1, m2
+ paddsb m2, [pb_4] ; f1<<3=a+4
+ paddsb m1, [pb_3] ; f2<<3=a+3
+ pand m2, m3
+ pand m1, m3 ; cache f2<<3
+
+ pxor m0, m0
+ pxor m3, m3
+ pcmpgtb m0, m2 ; which values are <0?
+ psubb m3, m2 ; -f1<<3
+ psrlq m2, 3 ; +f1
+ psrlq m3, 3 ; -f1
+ pand m3, m0
+ pandn m0, m2
+ psubusb m4, m0
+ paddusb m4, m3 ; q0-f1
+
+ pxor m0, m0
+ pxor m3, m3
+ pcmpgtb m0, m1 ; which values are <0?
+ psubb m3, m1 ; -f2<<3
+ psrlq m1, 3 ; +f2
+ psrlq m3, 3 ; -f2
+ pand m3, m0
+ pandn m0, m1
+ paddusb m6, m0
+ psubusb m6, m3 ; p0+f2
+
+ ; store
+%ifidn %1, v
+ mova [dst1q], m4
+ mova [dst1q+mstrideq], m6
+%else ; h
+ inc dst1q
+ SBUTTERFLY bw, 6, 4, 0
+
+%if cpuflag(sse4)
+ inc dst2q
+%endif
+ WRITE_8W m6, dst2q, dst1q, mstrideq, strideq
+ lea dst2q, [dst3q+mstrideq+1]
+%if cpuflag(sse4)
+ inc dst3q
+%endif
+ WRITE_8W m4, dst3q, dst2q, mstrideq, strideq
+%endif
+
+ RET
+%endmacro
+
+INIT_XMM sse2
+SIMPLE_LOOPFILTER v, 3
+SIMPLE_LOOPFILTER h, 5
+INIT_XMM ssse3
+SIMPLE_LOOPFILTER v, 3
+SIMPLE_LOOPFILTER h, 5
+INIT_XMM sse4
+SIMPLE_LOOPFILTER h, 5
+
+;-----------------------------------------------------------------------------
+; void ff_vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] ptrdiff_t stride,
+; int flimE, int flimI, int hev_thr);
+;-----------------------------------------------------------------------------
+
+%macro INNER_LOOPFILTER 2
+%define stack_size 0
+%ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
+%ifidn %1, v ; [3]=hev() result
+%define stack_size mmsize * -4
+%else ; h ; extra storage space for transposes
+%define stack_size mmsize * -5
+%endif
+%endif
+
+%if %2 == 8 ; chroma
+cglobal vp8_%1_loop_filter8uv_inner, 6, 6, 13, stack_size, dst, dst8, stride, flimE, flimI, hevthr
+%else ; luma
+cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, flimI, hevthr
+%endif
+
+%if cpuflag(ssse3)
+ pxor m7, m7
+%endif
+
+%ifndef m8
+ ; splat function arguments
+ SPLATB_REG m0, flimEq, m7 ; E
+ SPLATB_REG m1, flimIq, m7 ; I
+ SPLATB_REG m2, hevthrq, m7 ; hev_thresh
+
+%define m_flimE [rsp]
+%define m_flimI [rsp+mmsize]
+%define m_hevthr [rsp+mmsize*2]
+%define m_maskres [rsp+mmsize*3]
+%define m_p0backup [rsp+mmsize*3]
+%define m_q0backup [rsp+mmsize*4]
+
+ mova m_flimE, m0
+ mova m_flimI, m1
+ mova m_hevthr, m2
+%else
+%define m_flimE m9
+%define m_flimI m10
+%define m_hevthr m11
+%define m_maskres m12
+%define m_p0backup m12
+%define m_q0backup m8
+
+ ; splat function arguments
+ SPLATB_REG m_flimE, flimEq, m7 ; E
+ SPLATB_REG m_flimI, flimIq, m7 ; I
+ SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh
+%endif
+
+%if %2 == 8 ; chroma
+ DEFINE_ARGS dst1, dst8, mstride, stride, dst2
+%else
+ DEFINE_ARGS dst1, mstride, stride, dst2, dst8
+%endif
+ mov strideq, mstrideq
+ neg mstrideq
+%ifidn %1, h
+ lea dst1q, [dst1q+strideq*4-4]
+%if %2 == 8 ; chroma
+ lea dst8q, [dst8q+strideq*4-4]
+%endif
+%endif
+
+ ; read
+ lea dst2q, [dst1q+strideq]
+%ifidn %1, v
+%if %2 == 8 && mmsize == 16
+%define movrow movh
+%else
+%define movrow mova
+%endif
+ movrow m0, [dst1q+mstrideq*4] ; p3
+ movrow m1, [dst2q+mstrideq*4] ; p2
+ movrow m2, [dst1q+mstrideq*2] ; p1
+ movrow m5, [dst2q] ; q1
+ movrow m6, [dst2q+ strideq*1] ; q2
+ movrow m7, [dst2q+ strideq*2] ; q3
+%if mmsize == 16 && %2 == 8
+ movhps m0, [dst8q+mstrideq*4]
+ movhps m2, [dst8q+mstrideq*2]
+ add dst8q, strideq
+ movhps m1, [dst8q+mstrideq*4]
+ movhps m5, [dst8q]
+ movhps m6, [dst8q+ strideq ]
+ movhps m7, [dst8q+ strideq*2]
+ add dst8q, mstrideq
+%endif
+%else ; h
+%if %2 == 16
+ lea dst8q, [dst1q+ strideq*8]
+%endif
+
+ ; read 16 rows of 8px each, interleave
+ movh m0, [dst1q+mstrideq*4]
+ movh m1, [dst8q+mstrideq*4]
+ movh m2, [dst1q+mstrideq*2]
+ movh m5, [dst8q+mstrideq*2]
+ movh m3, [dst1q+mstrideq ]
+ movh m6, [dst8q+mstrideq ]
+ movh m4, [dst1q]
+ movh m7, [dst8q]
+ punpcklbw m0, m1 ; A/I
+ punpcklbw m2, m5 ; C/K
+ punpcklbw m3, m6 ; D/L
+ punpcklbw m4, m7 ; E/M
+
+ add dst8q, strideq
+ movh m1, [dst2q+mstrideq*4]
+ movh m6, [dst8q+mstrideq*4]
+ movh m5, [dst2q]
+ movh m7, [dst8q]
+ punpcklbw m1, m6 ; B/J
+ punpcklbw m5, m7 ; F/N
+ movh m6, [dst2q+ strideq ]
+ movh m7, [dst8q+ strideq ]
+ punpcklbw m6, m7 ; G/O
+
+ ; 8x16 transpose
+ TRANSPOSE4x4B 0, 1, 2, 3, 7
+%ifdef m8
+ SWAP 1, 8
+%else
+ mova m_q0backup, m1
+%endif
+ movh m7, [dst2q+ strideq*2]
+ movh m1, [dst8q+ strideq*2]
+ punpcklbw m7, m1 ; H/P
+ TRANSPOSE4x4B 4, 5, 6, 7, 1
+ SBUTTERFLY dq, 0, 4, 1 ; p3/p2
+ SBUTTERFLY dq, 2, 6, 1 ; q0/q1
+ SBUTTERFLY dq, 3, 7, 1 ; q2/q3
+%ifdef m8
+ SWAP 1, 8
+ SWAP 2, 8
+%else
+ mova m1, m_q0backup
+ mova m_q0backup, m2 ; store q0
+%endif
+ SBUTTERFLY dq, 1, 5, 2 ; p1/p0
+%ifdef m12
+ SWAP 5, 12
+%else
+ mova m_p0backup, m5 ; store p0
+%endif
+ SWAP 1, 4
+ SWAP 2, 4
+ SWAP 6, 3
+ SWAP 5, 3
+%endif
+
+ ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
+ mova m4, m1
+ SWAP 4, 1
+ psubusb m4, m0 ; p2-p3
+ psubusb m0, m1 ; p3-p2
+ por m0, m4 ; abs(p3-p2)
+
+ mova m4, m2
+ SWAP 4, 2
+ psubusb m4, m1 ; p1-p2
+ psubusb m1, m2 ; p2-p1
+ por m1, m4 ; abs(p2-p1)
+
+ mova m4, m6
+ SWAP 4, 6
+ psubusb m4, m7 ; q2-q3
+ psubusb m7, m6 ; q3-q2
+ por m7, m4 ; abs(q3-q2)
+
+ mova m4, m5
+ SWAP 4, 5
+ psubusb m4, m6 ; q1-q2
+ psubusb m6, m5 ; q2-q1
+ por m6, m4 ; abs(q2-q1)
+
+ pmaxub m0, m1
+ pmaxub m6, m7
+ pmaxub m0, m6
+
+ ; normal_limit and high_edge_variance for p1-p0, q1-q0
+ SWAP 7, 3 ; now m7 is zero
+%ifidn %1, v
+ movrow m3, [dst1q+mstrideq ] ; p0
+%if mmsize == 16 && %2 == 8
+ movhps m3, [dst8q+mstrideq ]
+%endif
+%elifdef m12
+ SWAP 3, 12
+%else
+ mova m3, m_p0backup
+%endif
+
+ mova m1, m2
+ SWAP 1, 2
+ mova m6, m3
+ SWAP 3, 6
+ psubusb m1, m3 ; p1-p0
+ psubusb m6, m2 ; p0-p1
+ por m1, m6 ; abs(p1-p0)
+ pmaxub m0, m1 ; max_I
+ SWAP 1, 4 ; max_hev_thresh
+
+ SWAP 6, 4 ; now m6 is I
+%ifidn %1, v
+ movrow m4, [dst1q] ; q0
+%if mmsize == 16 && %2 == 8
+ movhps m4, [dst8q]
+%endif
+%elifdef m8
+ SWAP 4, 8
+%else
+ mova m4, m_q0backup
+%endif
+ mova m1, m4
+ SWAP 1, 4
+ mova m7, m5
+ SWAP 7, 5
+ psubusb m1, m5 ; q0-q1
+ psubusb m7, m4 ; q1-q0
+ por m1, m7 ; abs(q1-q0)
+ pxor m7, m7
+ pmaxub m0, m1
+ pmaxub m6, m1
+ psubusb m0, m_flimI
+ psubusb m6, m_hevthr
+ pcmpeqb m0, m7 ; max(abs(..)) <= I
+ pcmpeqb m6, m7 ; !(max(abs..) > thresh)
+%ifdef m12
+ SWAP 6, 12
+%else
+ mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
+%endif
+
+ ; simple_limit
+ mova m1, m3
+ SWAP 1, 3
+ mova m6, m4 ; keep copies of p0/q0 around for later use
+ SWAP 6, 4
+ psubusb m1, m4 ; p0-q0
+ psubusb m6, m3 ; q0-p0
+ por m1, m6 ; abs(q0-p0)
+ paddusb m1, m1 ; m1=2*abs(q0-p0)
+
+ mova m7, m2
+ SWAP 7, 2
+ mova m6, m5
+ SWAP 6, 5
+ psubusb m7, m5 ; p1-q1
+ psubusb m6, m2 ; q1-p1
+ por m7, m6 ; abs(q1-p1)
+ pxor m6, m6
+ pand m7, [pb_FE]
+ psrlq m7, 1 ; abs(q1-p1)/2
+ paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
+ psubusb m7, m_flimE
+ pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
+ pand m0, m7 ; normal_limit result
+
+ ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
+%ifdef m8 ; x86-64 && sse2
+ mova m8, [pb_80]
+%define m_pb_80 m8
+%else ; x86-32 or mmx/mmxext
+%define m_pb_80 [pb_80]
+%endif
+ mova m1, m4
+ mova m7, m3
+ pxor m1, m_pb_80
+ pxor m7, m_pb_80
+ psubsb m1, m7 ; (signed) q0-p0
+ mova m6, m2
+ mova m7, m5
+ pxor m6, m_pb_80
+ pxor m7, m_pb_80
+ psubsb m6, m7 ; (signed) p1-q1
+ mova m7, m_maskres
+ pandn m7, m6
+ paddsb m7, m1
+ paddsb m7, m1
+ paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
+
+ pand m7, m0
+ mova m1, [pb_F8]
+ mova m6, m7
+ paddsb m7, [pb_3]
+ paddsb m6, [pb_4]
+ pand m7, m1
+ pand m6, m1
+
+ pxor m1, m1
+ pxor m0, m0
+ pcmpgtb m1, m7
+ psubb m0, m7
+ psrlq m7, 3 ; +f2
+ psrlq m0, 3 ; -f2
+ pand m0, m1
+ pandn m1, m7
+ psubusb m3, m0
+ paddusb m3, m1 ; p0+f2
+
+ pxor m1, m1
+ pxor m0, m0
+ pcmpgtb m0, m6
+ psubb m1, m6
+ psrlq m6, 3 ; +f1
+ psrlq m1, 3 ; -f1
+ pand m1, m0
+ pandn m0, m6
+ psubusb m4, m0
+ paddusb m4, m1 ; q0-f1
+
+%ifdef m12
+ SWAP 6, 12
+%else
+ mova m6, m_maskres
+%endif
+ pxor m7, m7
+ pand m0, m6
+ pand m1, m6
+ psubusb m1, [pb_1]
+ pavgb m0, m7 ; a
+ pavgb m1, m7 ; -a
+ psubusb m5, m0
+ psubusb m2, m1
+ paddusb m5, m1 ; q1-a
+ paddusb m2, m0 ; p1+a
+
+ ; store
+%ifidn %1, v
+ movrow [dst1q+mstrideq*2], m2
+ movrow [dst1q+mstrideq ], m3
+ movrow [dst1q], m4
+ movrow [dst1q+ strideq ], m5
+%if mmsize == 16 && %2 == 8
+ movhps [dst8q+mstrideq*2], m2
+ movhps [dst8q+mstrideq ], m3
+ movhps [dst8q], m4
+ movhps [dst8q+ strideq ], m5
+%endif
+%else ; h
+ add dst1q, 2
+ add dst2q, 2
+
+ ; 4x8/16 transpose
+ TRANSPOSE4x4B 2, 3, 4, 5, 6
+
+ lea dst8q, [dst8q+mstrideq +2]
+ WRITE_4x4D 2, 3, 4, 5, dst1q, dst2q, dst8q, mstrideq, strideq, %2
+%endif
+
+ RET
+%endmacro
+
+INIT_XMM sse2
+INNER_LOOPFILTER v, 16
+INNER_LOOPFILTER h, 16
+INNER_LOOPFILTER v, 8
+INNER_LOOPFILTER h, 8
+
+INIT_XMM ssse3
+INNER_LOOPFILTER v, 16
+INNER_LOOPFILTER h, 16
+INNER_LOOPFILTER v, 8
+INNER_LOOPFILTER h, 8
+
+;-----------------------------------------------------------------------------
+; void ff_vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] ptrdiff_t stride,
+; int flimE, int flimI, int hev_thr);
+;-----------------------------------------------------------------------------
+
+%macro MBEDGE_LOOPFILTER 2
+%define stack_size 0
+%ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
+%if mmsize == 16 ; [3]=hev() result
+ ; [4]=filter tmp result
+ ; [5]/[6] = p2/q2 backup
+ ; [7]=lim_res sign result
+%define stack_size mmsize * -7
+%else ; 8 ; extra storage space for transposes
+%define stack_size mmsize * -8
+%endif
+%endif
+
+%if %2 == 8 ; chroma
+cglobal vp8_%1_loop_filter8uv_mbedge, 6, 6, 15, stack_size, dst1, dst8, stride, flimE, flimI, hevthr
+%else ; luma
+cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE, flimI, hevthr
+%endif
+
+%if cpuflag(ssse3)
+ pxor m7, m7
+%endif
+
+%ifndef m8
+ ; splat function arguments
+ SPLATB_REG m0, flimEq, m7 ; E
+ SPLATB_REG m1, flimIq, m7 ; I
+ SPLATB_REG m2, hevthrq, m7 ; hev_thresh
+
+%define m_flimE [rsp]
+%define m_flimI [rsp+mmsize]
+%define m_hevthr [rsp+mmsize*2]
+%define m_maskres [rsp+mmsize*3]
+%define m_limres [rsp+mmsize*4]
+%define m_p0backup [rsp+mmsize*3]
+%define m_q0backup [rsp+mmsize*4]
+%define m_p2backup [rsp+mmsize*5]
+%define m_q2backup [rsp+mmsize*6]
+%if mmsize == 16
+%define m_limsign [rsp]
+%else
+%define m_limsign [rsp+mmsize*7]
+%endif
+
+ mova m_flimE, m0
+ mova m_flimI, m1
+ mova m_hevthr, m2
+%else ; sse2 on x86-64
+%define m_flimE m9
+%define m_flimI m10
+%define m_hevthr m11
+%define m_maskres m12
+%define m_limres m8
+%define m_p0backup m12
+%define m_q0backup m8
+%define m_p2backup m13
+%define m_q2backup m14
+%define m_limsign m9
+
+ ; splat function arguments
+ SPLATB_REG m_flimE, flimEq, m7 ; E
+ SPLATB_REG m_flimI, flimIq, m7 ; I
+ SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh
+%endif
+
+%if %2 == 8 ; chroma
+ DEFINE_ARGS dst1, dst8, mstride, stride, dst2
+%else
+ DEFINE_ARGS dst1, mstride, stride, dst2, dst8
+%endif
+ mov strideq, mstrideq
+ neg mstrideq
+%ifidn %1, h
+ lea dst1q, [dst1q+strideq*4-4]
+%if %2 == 8 ; chroma
+ lea dst8q, [dst8q+strideq*4-4]
+%endif
+%endif
+
+ ; read
+ lea dst2q, [dst1q+ strideq ]
+%ifidn %1, v
+%if %2 == 8 && mmsize == 16
+%define movrow movh
+%else
+%define movrow mova
+%endif
+ movrow m0, [dst1q+mstrideq*4] ; p3
+ movrow m1, [dst2q+mstrideq*4] ; p2
+ movrow m2, [dst1q+mstrideq*2] ; p1
+ movrow m5, [dst2q] ; q1
+ movrow m6, [dst2q+ strideq ] ; q2
+ movrow m7, [dst2q+ strideq*2] ; q3
+%if mmsize == 16 && %2 == 8
+ movhps m0, [dst8q+mstrideq*4]
+ movhps m2, [dst8q+mstrideq*2]
+ add dst8q, strideq
+ movhps m1, [dst8q+mstrideq*4]
+ movhps m5, [dst8q]
+ movhps m6, [dst8q+ strideq ]
+ movhps m7, [dst8q+ strideq*2]
+ add dst8q, mstrideq
+%endif
+%else ; h
+%if %2 == 16
+ lea dst8q, [dst1q+ strideq*8 ]
+%endif
+
+ ; read 16 rows of 8px each, interleave
+ movh m0, [dst1q+mstrideq*4]
+ movh m1, [dst8q+mstrideq*4]
+ movh m2, [dst1q+mstrideq*2]
+ movh m5, [dst8q+mstrideq*2]
+ movh m3, [dst1q+mstrideq ]
+ movh m6, [dst8q+mstrideq ]
+ movh m4, [dst1q]
+ movh m7, [dst8q]
+ punpcklbw m0, m1 ; A/I
+ punpcklbw m2, m5 ; C/K
+ punpcklbw m3, m6 ; D/L
+ punpcklbw m4, m7 ; E/M
+
+ add dst8q, strideq
+ movh m1, [dst2q+mstrideq*4]
+ movh m6, [dst8q+mstrideq*4]
+ movh m5, [dst2q]
+ movh m7, [dst8q]
+ punpcklbw m1, m6 ; B/J
+ punpcklbw m5, m7 ; F/N
+ movh m6, [dst2q+ strideq ]
+ movh m7, [dst8q+ strideq ]
+ punpcklbw m6, m7 ; G/O
+
+ ; 8x16 transpose
+ TRANSPOSE4x4B 0, 1, 2, 3, 7
+%ifdef m8
+ SWAP 1, 8
+%else
+ mova m_q0backup, m1
+%endif
+ movh m7, [dst2q+ strideq*2]
+ movh m1, [dst8q+ strideq*2]
+ punpcklbw m7, m1 ; H/P
+ TRANSPOSE4x4B 4, 5, 6, 7, 1
+ SBUTTERFLY dq, 0, 4, 1 ; p3/p2
+ SBUTTERFLY dq, 2, 6, 1 ; q0/q1
+ SBUTTERFLY dq, 3, 7, 1 ; q2/q3
+%ifdef m8
+ SWAP 1, 8
+ SWAP 2, 8
+%else
+ mova m1, m_q0backup
+ mova m_q0backup, m2 ; store q0
+%endif
+ SBUTTERFLY dq, 1, 5, 2 ; p1/p0
+%ifdef m12
+ SWAP 5, 12
+%else
+ mova m_p0backup, m5 ; store p0
+%endif
+ SWAP 1, 4
+ SWAP 2, 4
+ SWAP 6, 3
+ SWAP 5, 3
+%endif
+
+ ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
+ mova m4, m1
+ SWAP 4, 1
+ psubusb m4, m0 ; p2-p3
+ psubusb m0, m1 ; p3-p2
+ por m0, m4 ; abs(p3-p2)
+
+ mova m4, m2
+ SWAP 4, 2
+ psubusb m4, m1 ; p1-p2
+ mova m_p2backup, m1
+ psubusb m1, m2 ; p2-p1
+ por m1, m4 ; abs(p2-p1)
+
+ mova m4, m6
+ SWAP 4, 6
+ psubusb m4, m7 ; q2-q3
+ psubusb m7, m6 ; q3-q2
+ por m7, m4 ; abs(q3-q2)
+
+ mova m4, m5
+ SWAP 4, 5
+ psubusb m4, m6 ; q1-q2
+ mova m_q2backup, m6
+ psubusb m6, m5 ; q2-q1
+ por m6, m4 ; abs(q2-q1)
+
+ pmaxub m0, m1
+ pmaxub m6, m7
+ pmaxub m0, m6
+
+ ; normal_limit and high_edge_variance for p1-p0, q1-q0
+ SWAP 7, 3 ; now m7 is zero
+%ifidn %1, v
+ movrow m3, [dst1q+mstrideq ] ; p0
+%if mmsize == 16 && %2 == 8
+ movhps m3, [dst8q+mstrideq ]
+%endif
+%elifdef m12
+ SWAP 3, 12
+%else
+ mova m3, m_p0backup
+%endif
+
+ mova m1, m2
+ SWAP 1, 2
+ mova m6, m3
+ SWAP 3, 6
+ psubusb m1, m3 ; p1-p0
+ psubusb m6, m2 ; p0-p1
+ por m1, m6 ; abs(p1-p0)
+ pmaxub m0, m1 ; max_I
+ SWAP 1, 4 ; max_hev_thresh
+
+ SWAP 6, 4 ; now m6 is I
+%ifidn %1, v
+ movrow m4, [dst1q] ; q0
+%if mmsize == 16 && %2 == 8
+ movhps m4, [dst8q]
+%endif
+%elifdef m8
+ SWAP 4, 8
+%else
+ mova m4, m_q0backup
+%endif
+ mova m1, m4
+ SWAP 1, 4
+ mova m7, m5
+ SWAP 7, 5
+ psubusb m1, m5 ; q0-q1
+ psubusb m7, m4 ; q1-q0
+ por m1, m7 ; abs(q1-q0)
+ pxor m7, m7
+ pmaxub m0, m1
+ pmaxub m6, m1
+ psubusb m0, m_flimI
+ psubusb m6, m_hevthr
+ pcmpeqb m0, m7 ; max(abs(..)) <= I
+ pcmpeqb m6, m7 ; !(max(abs..) > thresh)
+%ifdef m12
+ SWAP 6, 12
+%else
+ mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
+%endif
+
+ ; simple_limit
+ mova m1, m3
+ SWAP 1, 3
+ mova m6, m4 ; keep copies of p0/q0 around for later use
+ SWAP 6, 4
+ psubusb m1, m4 ; p0-q0
+ psubusb m6, m3 ; q0-p0
+ por m1, m6 ; abs(q0-p0)
+ paddusb m1, m1 ; m1=2*abs(q0-p0)
+
+ mova m7, m2
+ SWAP 7, 2
+ mova m6, m5
+ SWAP 6, 5
+ psubusb m7, m5 ; p1-q1
+ psubusb m6, m2 ; q1-p1
+ por m7, m6 ; abs(q1-p1)
+ pxor m6, m6
+ pand m7, [pb_FE]
+ psrlq m7, 1 ; abs(q1-p1)/2
+ paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
+ psubusb m7, m_flimE
+ pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
+ pand m0, m7 ; normal_limit result
+
+ ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
+%ifdef m8 ; x86-64 && sse2
+ mova m8, [pb_80]
+%define m_pb_80 m8
+%else ; x86-32 or mmx/mmxext
+%define m_pb_80 [pb_80]
+%endif
+ mova m1, m4
+ mova m7, m3
+ pxor m1, m_pb_80
+ pxor m7, m_pb_80
+ psubsb m1, m7 ; (signed) q0-p0
+ mova m6, m2
+ mova m7, m5
+ pxor m6, m_pb_80
+ pxor m7, m_pb_80
+ psubsb m6, m7 ; (signed) p1-q1
+ mova m7, m_maskres
+ paddsb m6, m1
+ paddsb m6, m1
+ paddsb m6, m1
+ pand m6, m0
+%ifdef m8
+ mova m_limres, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
+ pand m_limres, m7
+%else
+ mova m0, m6
+ pand m0, m7
+ mova m_limres, m0
+%endif
+ pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common
+
+ mova m1, [pb_F8]
+ mova m6, m7
+ paddsb m7, [pb_3]
+ paddsb m6, [pb_4]
+ pand m7, m1
+ pand m6, m1
+
+ pxor m1, m1
+ pxor m0, m0
+ pcmpgtb m1, m7
+ psubb m0, m7
+ psrlq m7, 3 ; +f2
+ psrlq m0, 3 ; -f2
+ pand m0, m1
+ pandn m1, m7
+ psubusb m3, m0
+ paddusb m3, m1 ; p0+f2
+
+ pxor m1, m1
+ pxor m0, m0
+ pcmpgtb m0, m6
+ psubb m1, m6
+ psrlq m6, 3 ; +f1
+ psrlq m1, 3 ; -f1
+ pand m1, m0
+ pandn m0, m6
+ psubusb m4, m0
+ paddusb m4, m1 ; q0-f1
+
+ ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
+%if cpuflag(ssse3)
+ mova m7, [pb_1]
+%else
+ mova m7, [pw_63]
+%endif
+%ifdef m8
+ SWAP 1, 8
+%else
+ mova m1, m_limres
+%endif
+ pxor m0, m0
+ mova m6, m1
+ pcmpgtb m0, m1 ; which are negative
+%if cpuflag(ssse3)
+ punpcklbw m6, m7 ; interleave with "1" for rounding
+ punpckhbw m1, m7
+%else
+ punpcklbw m6, m0 ; signed byte->word
+ punpckhbw m1, m0
+%endif
+ mova m_limsign, m0
+%if cpuflag(ssse3)
+ mova m7, [pb_27_63]
+%ifndef m8
+ mova m_limres, m1
+%endif
+%ifdef m10
+ SWAP 0, 10 ; don't lose lim_sign copy
+%endif
+ mova m0, m7
+ pmaddubsw m7, m6
+ SWAP 6, 7
+ pmaddubsw m0, m1
+ SWAP 1, 0
+%ifdef m10
+ SWAP 0, 10
+%else
+ mova m0, m_limsign
+%endif
+%else
+ mova m_maskres, m6 ; backup for later in filter
+ mova m_limres, m1
+ pmullw m6, [pw_27]
+ pmullw m1, [pw_27]
+ paddw m6, m7
+ paddw m1, m7
+%endif
+ psraw m6, 7
+ psraw m1, 7
+ packsswb m6, m1 ; a0
+ pxor m1, m1
+ psubb m1, m6
+ pand m1, m0 ; -a0
+ pandn m0, m6 ; +a0
+%if cpuflag(ssse3)
+ mova m6, [pb_18_63] ; pipelining
+%endif
+ psubusb m3, m1
+ paddusb m4, m1
+ paddusb m3, m0 ; p0+a0
+ psubusb m4, m0 ; q0-a0
+
+%if cpuflag(ssse3)
+ SWAP 6, 7
+%ifdef m10
+ SWAP 1, 10
+%else
+ mova m1, m_limres
+%endif
+ mova m0, m7
+ pmaddubsw m7, m6
+ SWAP 6, 7
+ pmaddubsw m0, m1
+ SWAP 1, 0
+%ifdef m10
+ SWAP 0, 10
+%endif
+ mova m0, m_limsign
+%else
+ mova m6, m_maskres
+ mova m1, m_limres
+ pmullw m6, [pw_18]
+ pmullw m1, [pw_18]
+ paddw m6, m7
+ paddw m1, m7
+%endif
+ mova m0, m_limsign
+ psraw m6, 7
+ psraw m1, 7
+ packsswb m6, m1 ; a1
+ pxor m1, m1
+ psubb m1, m6
+ pand m1, m0 ; -a1
+ pandn m0, m6 ; +a1
+%if cpuflag(ssse3)
+ mova m6, [pb_9_63]
+%endif
+ psubusb m2, m1
+ paddusb m5, m1
+ paddusb m2, m0 ; p1+a1
+ psubusb m5, m0 ; q1-a1
+
+%if cpuflag(ssse3)
+ SWAP 6, 7
+%ifdef m10
+ SWAP 1, 10
+%else
+ mova m1, m_limres
+%endif
+ mova m0, m7
+ pmaddubsw m7, m6
+ SWAP 6, 7
+ pmaddubsw m0, m1
+ SWAP 1, 0
+%else
+%ifdef m8
+ SWAP 6, 12
+ SWAP 1, 8
+%else
+ mova m6, m_maskres
+ mova m1, m_limres
+%endif
+ pmullw m6, [pw_9]
+ pmullw m1, [pw_9]
+ paddw m6, m7
+ paddw m1, m7
+%endif
+%ifdef m9
+ SWAP 7, 9
+%else
+ mova m7, m_limsign
+%endif
+ psraw m6, 7
+ psraw m1, 7
+ packsswb m6, m1 ; a1
+ pxor m0, m0
+ psubb m0, m6
+ pand m0, m7 ; -a1
+ pandn m7, m6 ; +a1
+%ifdef m8
+ SWAP 1, 13
+ SWAP 6, 14
+%else
+ mova m1, m_p2backup
+ mova m6, m_q2backup
+%endif
+ psubusb m1, m0
+ paddusb m6, m0
+ paddusb m1, m7 ; p1+a1
+ psubusb m6, m7 ; q1-a1
+
+ ; store
+%ifidn %1, v
+ movrow [dst2q+mstrideq*4], m1
+ movrow [dst1q+mstrideq*2], m2
+ movrow [dst1q+mstrideq ], m3
+ movrow [dst1q], m4
+ movrow [dst2q], m5
+ movrow [dst2q+ strideq ], m6
+%if mmsize == 16 && %2 == 8
+ add dst8q, mstrideq
+ movhps [dst8q+mstrideq*2], m1
+ movhps [dst8q+mstrideq ], m2
+ movhps [dst8q], m3
+ add dst8q, strideq
+ movhps [dst8q], m4
+ movhps [dst8q+ strideq ], m5
+ movhps [dst8q+ strideq*2], m6
+%endif
+%else ; h
+ inc dst1q
+ inc dst2q
+
+ ; 4x8/16 transpose
+ TRANSPOSE4x4B 1, 2, 3, 4, 0
+ SBUTTERFLY bw, 5, 6, 0
+
+ lea dst8q, [dst8q+mstrideq+1]
+ WRITE_4x4D 1, 2, 3, 4, dst1q, dst2q, dst8q, mstrideq, strideq, %2
+ lea dst1q, [dst2q+mstrideq+4]
+ lea dst8q, [dst8q+mstrideq+4]
+%if cpuflag(sse4)
+ add dst2q, 4
+%endif
+ WRITE_8W m5, dst2q, dst1q, mstrideq, strideq
+%if cpuflag(sse4)
+ lea dst2q, [dst8q+ strideq ]
+%endif
+ WRITE_8W m6, dst2q, dst8q, mstrideq, strideq
+%endif
+
+ RET
+%endmacro
+
+INIT_XMM sse2
+MBEDGE_LOOPFILTER v, 16
+MBEDGE_LOOPFILTER h, 16
+MBEDGE_LOOPFILTER v, 8
+MBEDGE_LOOPFILTER h, 8
+
+INIT_XMM ssse3
+MBEDGE_LOOPFILTER v, 16
+MBEDGE_LOOPFILTER h, 16
+MBEDGE_LOOPFILTER v, 8
+MBEDGE_LOOPFILTER h, 8
+
+INIT_XMM sse4
+MBEDGE_LOOPFILTER h, 16
+MBEDGE_LOOPFILTER h, 8
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init.c b/media/ffvpx/libavcodec/x86/vp9dsp_init.c
new file mode 100644
index 0000000000..8d11dbc348
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init.c
@@ -0,0 +1,415 @@
+/*
+ * VP9 SIMD optimizations
+ *
+ * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/vp9dsp.h"
+#include "libavcodec/x86/vp9dsp_init.h"
+
+#if HAVE_X86ASM
+
+decl_fpel_func(put, 4, , mmx);
+decl_fpel_func(put, 8, , mmx);
+decl_fpel_func(put, 16, , sse);
+decl_fpel_func(put, 32, , sse);
+decl_fpel_func(put, 64, , sse);
+decl_fpel_func(avg, 4, _8, mmxext);
+decl_fpel_func(avg, 8, _8, mmxext);
+decl_fpel_func(avg, 16, _8, sse2);
+decl_fpel_func(avg, 32, _8, sse2);
+decl_fpel_func(avg, 64, _8, sse2);
+decl_fpel_func(put, 32, , avx);
+decl_fpel_func(put, 64, , avx);
+decl_fpel_func(avg, 32, _8, avx2);
+decl_fpel_func(avg, 64, _8, avx2);
+
+decl_mc_funcs(4, mmxext, int16_t, 8, 8);
+decl_mc_funcs(8, sse2, int16_t, 8, 8);
+decl_mc_funcs(4, ssse3, int8_t, 32, 8);
+decl_mc_funcs(8, ssse3, int8_t, 32, 8);
+#if ARCH_X86_64
+decl_mc_funcs(16, ssse3, int8_t, 32, 8);
+decl_mc_funcs(32, avx2, int8_t, 32, 8);
+#endif
+
+mc_rep_funcs(16, 8, 8, sse2, int16_t, 8, 8)
+#if ARCH_X86_32
+mc_rep_funcs(16, 8, 8, ssse3, int8_t, 32, 8)
+#endif
+mc_rep_funcs(32, 16, 16, sse2, int16_t, 8, 8)
+mc_rep_funcs(32, 16, 16, ssse3, int8_t, 32, 8)
+mc_rep_funcs(64, 32, 32, sse2, int16_t, 8, 8)
+mc_rep_funcs(64, 32, 32, ssse3, int8_t, 32, 8)
+#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+mc_rep_funcs(64, 32, 32, avx2, int8_t, 32, 8)
+#endif
+
+extern const int8_t ff_filters_ssse3[3][15][4][32];
+extern const int16_t ff_filters_sse2[3][15][8][8];
+
+filters_8tap_2d_fn2(put, 16, 8, 1, mmxext, sse2, sse2)
+filters_8tap_2d_fn2(avg, 16, 8, 1, mmxext, sse2, sse2)
+filters_8tap_2d_fn2(put, 16, 8, 1, ssse3, ssse3, ssse3)
+filters_8tap_2d_fn2(avg, 16, 8, 1, ssse3, ssse3, ssse3)
+#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+filters_8tap_2d_fn(put, 64, 32, 8, 1, avx2, ssse3)
+filters_8tap_2d_fn(put, 32, 32, 8, 1, avx2, ssse3)
+filters_8tap_2d_fn(avg, 64, 32, 8, 1, avx2, ssse3)
+filters_8tap_2d_fn(avg, 32, 32, 8, 1, avx2, ssse3)
+#endif
+
+filters_8tap_1d_fn3(put, 8, mmxext, sse2, sse2)
+filters_8tap_1d_fn3(avg, 8, mmxext, sse2, sse2)
+filters_8tap_1d_fn3(put, 8, ssse3, ssse3, ssse3)
+filters_8tap_1d_fn3(avg, 8, ssse3, ssse3, ssse3)
+#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+filters_8tap_1d_fn2(put, 64, 8, avx2, ssse3)
+filters_8tap_1d_fn2(put, 32, 8, avx2, ssse3)
+filters_8tap_1d_fn2(avg, 64, 8, avx2, ssse3)
+filters_8tap_1d_fn2(avg, 32, 8, avx2, ssse3)
+#endif
+
+#define itxfm_func(typea, typeb, size, opt) \
+void ff_vp9_##typea##_##typeb##_##size##x##size##_add_##opt(uint8_t *dst, ptrdiff_t stride, \
+ int16_t *block, int eob)
+#define itxfm_funcs(size, opt) \
+itxfm_func(idct, idct, size, opt); \
+itxfm_func(iadst, idct, size, opt); \
+itxfm_func(idct, iadst, size, opt); \
+itxfm_func(iadst, iadst, size, opt)
+
+itxfm_func(idct, idct, 4, mmxext);
+itxfm_func(idct, iadst, 4, sse2);
+itxfm_func(iadst, idct, 4, sse2);
+itxfm_func(iadst, iadst, 4, sse2);
+itxfm_funcs(4, ssse3);
+itxfm_funcs(8, sse2);
+itxfm_funcs(8, ssse3);
+itxfm_funcs(8, avx);
+itxfm_funcs(16, sse2);
+itxfm_funcs(16, ssse3);
+itxfm_funcs(16, avx);
+itxfm_func(idct, idct, 32, sse2);
+itxfm_func(idct, idct, 32, ssse3);
+itxfm_func(idct, idct, 32, avx);
+itxfm_func(iwht, iwht, 4, mmx);
+itxfm_funcs(16, avx2);
+itxfm_func(idct, idct, 32, avx2);
+
+#undef itxfm_func
+#undef itxfm_funcs
+
+#define lpf_funcs(size1, size2, opt) \
+void ff_vp9_loop_filter_v_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \
+ int E, int I, int H); \
+void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \
+ int E, int I, int H)
+
+lpf_funcs(4, 8, mmxext);
+lpf_funcs(8, 8, mmxext);
+lpf_funcs(16, 16, sse2);
+lpf_funcs(16, 16, ssse3);
+lpf_funcs(16, 16, avx);
+lpf_funcs(44, 16, sse2);
+lpf_funcs(44, 16, ssse3);
+lpf_funcs(44, 16, avx);
+lpf_funcs(84, 16, sse2);
+lpf_funcs(84, 16, ssse3);
+lpf_funcs(84, 16, avx);
+lpf_funcs(48, 16, sse2);
+lpf_funcs(48, 16, ssse3);
+lpf_funcs(48, 16, avx);
+lpf_funcs(88, 16, sse2);
+lpf_funcs(88, 16, ssse3);
+lpf_funcs(88, 16, avx);
+
+#undef lpf_funcs
+
+#define ipred_func(size, type, opt) \
+void ff_vp9_ipred_##type##_##size##x##size##_##opt(uint8_t *dst, ptrdiff_t stride, \
+ const uint8_t *l, const uint8_t *a)
+
+ipred_func(8, v, mmx);
+
+#define ipred_dc_funcs(size, opt) \
+ipred_func(size, dc, opt); \
+ipred_func(size, dc_left, opt); \
+ipred_func(size, dc_top, opt)
+
+ipred_dc_funcs(4, mmxext);
+ipred_dc_funcs(8, mmxext);
+
+#define ipred_dir_tm_funcs(size, opt) \
+ipred_func(size, tm, opt); \
+ipred_func(size, dl, opt); \
+ipred_func(size, dr, opt); \
+ipred_func(size, hd, opt); \
+ipred_func(size, hu, opt); \
+ipred_func(size, vl, opt); \
+ipred_func(size, vr, opt)
+
+ipred_dir_tm_funcs(4, mmxext);
+
+ipred_func(16, v, sse);
+ipred_func(32, v, sse);
+
+ipred_dc_funcs(16, sse2);
+ipred_dc_funcs(32, sse2);
+
+#define ipred_dir_tm_h_funcs(size, opt) \
+ipred_dir_tm_funcs(size, opt); \
+ipred_func(size, h, opt)
+
+ipred_dir_tm_h_funcs(8, sse2);
+ipred_dir_tm_h_funcs(16, sse2);
+ipred_dir_tm_h_funcs(32, sse2);
+
+ipred_func(4, h, sse2);
+
+#define ipred_all_funcs(size, opt) \
+ipred_dc_funcs(size, opt); \
+ipred_dir_tm_h_funcs(size, opt)
+
+// FIXME hd/vl_4x4_ssse3 does not exist
+ipred_all_funcs(4, ssse3);
+ipred_all_funcs(8, ssse3);
+ipred_all_funcs(16, ssse3);
+ipred_all_funcs(32, ssse3);
+
+ipred_dir_tm_h_funcs(8, avx);
+ipred_dir_tm_h_funcs(16, avx);
+ipred_dir_tm_h_funcs(32, avx);
+
+ipred_func(32, v, avx);
+
+ipred_dc_funcs(32, avx2);
+ipred_func(32, h, avx2);
+ipred_func(32, tm, avx2);
+
+#undef ipred_func
+#undef ipred_dir_tm_h_funcs
+#undef ipred_dir_tm_funcs
+#undef ipred_dc_funcs
+
+#endif /* HAVE_X86ASM */
+
+av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact)
+{
+#if HAVE_X86ASM
+ int cpu_flags;
+
+ if (bpp == 10) {
+ ff_vp9dsp_init_10bpp_x86(dsp, bitexact);
+ return;
+ } else if (bpp == 12) {
+ ff_vp9dsp_init_12bpp_x86(dsp, bitexact);
+ return;
+ }
+
+ cpu_flags = av_get_cpu_flags();
+
+#define init_lpf(opt) do { \
+ dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \
+ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \
+ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \
+ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \
+ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \
+ dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \
+ dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \
+ dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \
+ dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \
+ dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \
+} while (0)
+
+#define init_ipred(sz, opt, t, e) \
+ dsp->intra_pred[TX_##sz##X##sz][e##_PRED] = ff_vp9_ipred_##t##_##sz##x##sz##_##opt
+
+#define ff_vp9_ipred_hd_4x4_ssse3 ff_vp9_ipred_hd_4x4_mmxext
+#define ff_vp9_ipred_vl_4x4_ssse3 ff_vp9_ipred_vl_4x4_mmxext
+#define init_dir_tm_ipred(sz, opt) do { \
+ init_ipred(sz, opt, dl, DIAG_DOWN_LEFT); \
+ init_ipred(sz, opt, dr, DIAG_DOWN_RIGHT); \
+ init_ipred(sz, opt, hd, HOR_DOWN); \
+ init_ipred(sz, opt, vl, VERT_LEFT); \
+ init_ipred(sz, opt, hu, HOR_UP); \
+ init_ipred(sz, opt, tm, TM_VP8); \
+ init_ipred(sz, opt, vr, VERT_RIGHT); \
+} while (0)
+#define init_dir_tm_h_ipred(sz, opt) do { \
+ init_dir_tm_ipred(sz, opt); \
+ init_ipred(sz, opt, h, HOR); \
+} while (0)
+#define init_dc_ipred(sz, opt) do { \
+ init_ipred(sz, opt, dc, DC); \
+ init_ipred(sz, opt, dc_left, LEFT_DC); \
+ init_ipred(sz, opt, dc_top, TOP_DC); \
+} while (0)
+#define init_all_ipred(sz, opt) do { \
+ init_dc_ipred(sz, opt); \
+ init_dir_tm_h_ipred(sz, opt); \
+} while (0)
+
+ if (EXTERNAL_MMX(cpu_flags)) {
+ init_fpel_func(4, 0, 4, put, , mmx);
+ init_fpel_func(3, 0, 8, put, , mmx);
+ if (!bitexact) {
+ dsp->itxfm_add[4 /* lossless */][DCT_DCT] =
+ dsp->itxfm_add[4 /* lossless */][ADST_DCT] =
+ dsp->itxfm_add[4 /* lossless */][DCT_ADST] =
+ dsp->itxfm_add[4 /* lossless */][ADST_ADST] = ff_vp9_iwht_iwht_4x4_add_mmx;
+ }
+ init_ipred(8, mmx, v, VERT);
+ }
+
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ dsp->loop_filter_8[0][0] = ff_vp9_loop_filter_h_4_8_mmxext;
+ dsp->loop_filter_8[0][1] = ff_vp9_loop_filter_v_4_8_mmxext;
+ dsp->loop_filter_8[1][0] = ff_vp9_loop_filter_h_8_8_mmxext;
+ dsp->loop_filter_8[1][1] = ff_vp9_loop_filter_v_8_8_mmxext;
+ init_subpel2(4, 0, 4, put, 8, mmxext);
+ init_subpel2(4, 1, 4, avg, 8, mmxext);
+ init_fpel_func(4, 1, 4, avg, _8, mmxext);
+ init_fpel_func(3, 1, 8, avg, _8, mmxext);
+ dsp->itxfm_add[TX_4X4][DCT_DCT] = ff_vp9_idct_idct_4x4_add_mmxext;
+ init_dc_ipred(4, mmxext);
+ init_dc_ipred(8, mmxext);
+ init_dir_tm_ipred(4, mmxext);
+ }
+
+ if (EXTERNAL_SSE(cpu_flags)) {
+ init_fpel_func(2, 0, 16, put, , sse);
+ init_fpel_func(1, 0, 32, put, , sse);
+ init_fpel_func(0, 0, 64, put, , sse);
+ init_ipred(16, sse, v, VERT);
+ init_ipred(32, sse, v, VERT);
+ }
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ init_subpel3_8to64(0, put, 8, sse2);
+ init_subpel3_8to64(1, avg, 8, sse2);
+ init_fpel_func(2, 1, 16, avg, _8, sse2);
+ init_fpel_func(1, 1, 32, avg, _8, sse2);
+ init_fpel_func(0, 1, 64, avg, _8, sse2);
+ init_lpf(sse2);
+ dsp->itxfm_add[TX_4X4][ADST_DCT] = ff_vp9_idct_iadst_4x4_add_sse2;
+ dsp->itxfm_add[TX_4X4][DCT_ADST] = ff_vp9_iadst_idct_4x4_add_sse2;
+ dsp->itxfm_add[TX_4X4][ADST_ADST] = ff_vp9_iadst_iadst_4x4_add_sse2;
+ dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_sse2;
+ dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_sse2;
+ dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_sse2;
+ dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_sse2;
+ dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_sse2;
+ dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_sse2;
+ dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_sse2;
+ dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_sse2;
+ dsp->itxfm_add[TX_32X32][ADST_ADST] =
+ dsp->itxfm_add[TX_32X32][ADST_DCT] =
+ dsp->itxfm_add[TX_32X32][DCT_ADST] =
+ dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_sse2;
+ init_dc_ipred(16, sse2);
+ init_dc_ipred(32, sse2);
+ init_dir_tm_h_ipred(8, sse2);
+ init_dir_tm_h_ipred(16, sse2);
+ init_dir_tm_h_ipred(32, sse2);
+ init_ipred(4, sse2, h, HOR);
+ }
+
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ init_subpel3(0, put, 8, ssse3);
+ init_subpel3(1, avg, 8, ssse3);
+ dsp->itxfm_add[TX_4X4][DCT_DCT] = ff_vp9_idct_idct_4x4_add_ssse3;
+ dsp->itxfm_add[TX_4X4][ADST_DCT] = ff_vp9_idct_iadst_4x4_add_ssse3;
+ dsp->itxfm_add[TX_4X4][DCT_ADST] = ff_vp9_iadst_idct_4x4_add_ssse3;
+ dsp->itxfm_add[TX_4X4][ADST_ADST] = ff_vp9_iadst_iadst_4x4_add_ssse3;
+ dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_ssse3;
+ dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_ssse3;
+ dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_ssse3;
+ dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_ssse3;
+ dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_ssse3;
+ dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_ssse3;
+ dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_ssse3;
+ dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_ssse3;
+ dsp->itxfm_add[TX_32X32][ADST_ADST] =
+ dsp->itxfm_add[TX_32X32][ADST_DCT] =
+ dsp->itxfm_add[TX_32X32][DCT_ADST] =
+ dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_ssse3;
+ init_lpf(ssse3);
+ init_all_ipred(4, ssse3);
+ init_all_ipred(8, ssse3);
+ init_all_ipred(16, ssse3);
+ init_all_ipred(32, ssse3);
+ }
+
+ if (EXTERNAL_AVX(cpu_flags)) {
+ dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_avx;
+ dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_avx;
+ dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_avx;
+ dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_avx;
+ dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_avx;
+ dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_avx;
+ dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_avx;
+ dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_avx;
+ dsp->itxfm_add[TX_32X32][ADST_ADST] =
+ dsp->itxfm_add[TX_32X32][ADST_DCT] =
+ dsp->itxfm_add[TX_32X32][DCT_ADST] =
+ dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_avx;
+ init_lpf(avx);
+ init_dir_tm_h_ipred(8, avx);
+ init_dir_tm_h_ipred(16, avx);
+ init_dir_tm_h_ipred(32, avx);
+ }
+ if (EXTERNAL_AVX_FAST(cpu_flags)) {
+ init_fpel_func(1, 0, 32, put, , avx);
+ init_fpel_func(0, 0, 64, put, , avx);
+ init_ipred(32, avx, v, VERT);
+ }
+
+ if (EXTERNAL_AVX2_FAST(cpu_flags)) {
+ init_fpel_func(1, 1, 32, avg, _8, avx2);
+ init_fpel_func(0, 1, 64, avg, _8, avx2);
+ if (ARCH_X86_64) {
+#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+ dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_avx2;
+ dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_avx2;
+ dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_avx2;
+ dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_avx2;
+ dsp->itxfm_add[TX_32X32][ADST_ADST] =
+ dsp->itxfm_add[TX_32X32][ADST_DCT] =
+ dsp->itxfm_add[TX_32X32][DCT_ADST] =
+ dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_avx2;
+ init_subpel3_32_64(0, put, 8, avx2);
+ init_subpel3_32_64(1, avg, 8, avx2);
+#endif
+ }
+ init_dc_ipred(32, avx2);
+ init_ipred(32, avx2, h, HOR);
+ init_ipred(32, avx2, tm, TM_VP8);
+ }
+
+#undef init_fpel
+#undef init_subpel1
+#undef init_subpel2
+#undef init_subpel3
+
+#endif /* HAVE_X86ASM */
+}
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init.h b/media/ffvpx/libavcodec/x86/vp9dsp_init.h
new file mode 100644
index 0000000000..fc1e0557fa
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init.h
@@ -0,0 +1,192 @@
+/*
+ * VP9 SIMD optimizations
+ *
+ * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_VP9DSP_INIT_H
+#define AVCODEC_X86_VP9DSP_INIT_H
+
+#include "libavutil/attributes.h"
+#include "libavutil/mem_internal.h"
+
+#include "libavcodec/vp9dsp.h"
+
+// hack to force-expand BPC
+#define cat(a, bpp, b) a##bpp##b
+
+#define decl_fpel_func(avg, sz, bpp, opt) \
+void ff_vp9_##avg##sz##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my)
+
+#define decl_mc_func(avg, sz, dir, opt, type, f_sz, bpp) \
+void ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, const type (*filter)[f_sz])
+
+#define decl_mc_funcs(sz, opt, type, fsz, bpp) \
+decl_mc_func(put, sz, h, opt, type, fsz, bpp); \
+decl_mc_func(avg, sz, h, opt, type, fsz, bpp); \
+decl_mc_func(put, sz, v, opt, type, fsz, bpp); \
+decl_mc_func(avg, sz, v, opt, type, fsz, bpp)
+
+#define decl_ipred_fn(type, sz, bpp, opt) \
+void ff_vp9_ipred_##type##_##sz##x##sz##_##bpp##_##opt(uint8_t *dst, \
+ ptrdiff_t stride, \
+ const uint8_t *l, \
+ const uint8_t *a)
+
+#define decl_ipred_fns(type, bpp, opt4, opt8_16_32) \
+decl_ipred_fn(type, 4, bpp, opt4); \
+decl_ipred_fn(type, 8, bpp, opt8_16_32); \
+decl_ipred_fn(type, 16, bpp, opt8_16_32); \
+decl_ipred_fn(type, 32, bpp, opt8_16_32)
+
+#define decl_itxfm_func(typea, typeb, size, bpp, opt) \
+void cat(ff_vp9_##typea##_##typeb##_##size##x##size##_add_, bpp, _##opt)(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int16_t *block, \
+ int eob)
+
+#define decl_itxfm_funcs(size, bpp, opt) \
+decl_itxfm_func(idct, idct, size, bpp, opt); \
+decl_itxfm_func(iadst, idct, size, bpp, opt); \
+decl_itxfm_func(idct, iadst, size, bpp, opt); \
+decl_itxfm_func(iadst, iadst, size, bpp, opt)
+
+#define mc_rep_func(avg, sz, hsz, hszb, dir, opt, type, f_sz, bpp) \
+static av_always_inline void \
+ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, const type (*filter)[f_sz]) \
+{ \
+ ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##bpp##_##opt(dst, dst_stride, src, \
+ src_stride, h, filter); \
+ ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##bpp##_##opt(dst + hszb, dst_stride, src + hszb, \
+ src_stride, h, filter); \
+}
+
+#define mc_rep_funcs(sz, hsz, hszb, opt, type, fsz, bpp) \
+mc_rep_func(put, sz, hsz, hszb, h, opt, type, fsz, bpp) \
+mc_rep_func(avg, sz, hsz, hszb, h, opt, type, fsz, bpp) \
+mc_rep_func(put, sz, hsz, hszb, v, opt, type, fsz, bpp) \
+mc_rep_func(avg, sz, hsz, hszb, v, opt, type, fsz, bpp)
+
+#define filter_8tap_1d_fn(op, sz, f, f_opt, fname, dir, dvar, bpp, opt) \
+static void op##_8tap_##fname##_##sz##dir##_##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ ff_vp9_##op##_8tap_1d_##dir##_##sz##_##bpp##_##opt(dst, dst_stride, src, src_stride, \
+ h, ff_filters_##f_opt[f][dvar - 1]); \
+}
+
+#define filters_8tap_1d_fn(op, sz, dir, dvar, bpp, opt, f_opt) \
+filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, dir, dvar, bpp, opt) \
+filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, dir, dvar, bpp, opt) \
+filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, dir, dvar, bpp, opt)
+
+#define filters_8tap_1d_fn2(op, sz, bpp, opt, f_opt) \
+filters_8tap_1d_fn(op, sz, h, mx, bpp, opt, f_opt) \
+filters_8tap_1d_fn(op, sz, v, my, bpp, opt, f_opt)
+
+#define filters_8tap_1d_fn3(op, bpp, opt4, opt8, f_opt) \
+filters_8tap_1d_fn2(op, 64, bpp, opt8, f_opt) \
+filters_8tap_1d_fn2(op, 32, bpp, opt8, f_opt) \
+filters_8tap_1d_fn2(op, 16, bpp, opt8, f_opt) \
+filters_8tap_1d_fn2(op, 8, bpp, opt8, f_opt) \
+filters_8tap_1d_fn2(op, 4, bpp, opt4, f_opt)
+
+#define filter_8tap_2d_fn(op, sz, f, f_opt, fname, align, bpp, bytes, opt) \
+static void op##_8tap_##fname##_##sz##hv_##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *src, ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ LOCAL_ALIGNED_##align(uint8_t, temp, [71 * 64 * bytes]); \
+ ff_vp9_put_8tap_1d_h_##sz##_##bpp##_##opt(temp, 64 * bytes, src - 3 * src_stride, \
+ src_stride, h + 7, \
+ ff_filters_##f_opt[f][mx - 1]); \
+ ff_vp9_##op##_8tap_1d_v_##sz##_##bpp##_##opt(dst, dst_stride, temp + 3 * bytes * 64, \
+ 64 * bytes, h, \
+ ff_filters_##f_opt[f][my - 1]); \
+}
+
+#define filters_8tap_2d_fn(op, sz, align, bpp, bytes, opt, f_opt) \
+filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, align, bpp, bytes, opt) \
+filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, align, bpp, bytes, opt) \
+filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, align, bpp, bytes, opt)
+
+#define filters_8tap_2d_fn2(op, align, bpp, bytes, opt4, opt8, f_opt) \
+filters_8tap_2d_fn(op, 64, align, bpp, bytes, opt8, f_opt) \
+filters_8tap_2d_fn(op, 32, align, bpp, bytes, opt8, f_opt) \
+filters_8tap_2d_fn(op, 16, align, bpp, bytes, opt8, f_opt) \
+filters_8tap_2d_fn(op, 8, align, bpp, bytes, opt8, f_opt) \
+filters_8tap_2d_fn(op, 4, align, bpp, bytes, opt4, f_opt)
+
+#define init_fpel_func(idx1, idx2, sz, type, bpp, opt) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \
+ dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_##type##sz##bpp##_##opt
+
+#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, bpp, opt) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = \
+ type##_8tap_smooth_##sz##dir##_##bpp##_##opt; \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = \
+ type##_8tap_regular_##sz##dir##_##bpp##_##opt; \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = \
+ type##_8tap_sharp_##sz##dir##_##bpp##_##opt
+
+#define init_subpel2(idx1, idx2, sz, type, bpp, opt) \
+ init_subpel1(idx1, idx2, 1, 1, sz, hv, type, bpp, opt); \
+ init_subpel1(idx1, idx2, 0, 1, sz, v, type, bpp, opt); \
+ init_subpel1(idx1, idx2, 1, 0, sz, h, type, bpp, opt)
+
+#define init_subpel3_32_64(idx, type, bpp, opt) \
+ init_subpel2(0, idx, 64, type, bpp, opt); \
+ init_subpel2(1, idx, 32, type, bpp, opt)
+
+#define init_subpel3_8to64(idx, type, bpp, opt) \
+ init_subpel3_32_64(idx, type, bpp, opt); \
+ init_subpel2(2, idx, 16, type, bpp, opt); \
+ init_subpel2(3, idx, 8, type, bpp, opt)
+
+#define init_subpel3(idx, type, bpp, opt) \
+ init_subpel3_8to64(idx, type, bpp, opt); \
+ init_subpel2(4, idx, 4, type, bpp, opt)
+
+#define init_ipred_func(type, enum, sz, bpp, opt) \
+ dsp->intra_pred[TX_##sz##X##sz][enum##_PRED] = \
+ cat(ff_vp9_ipred_##type##_##sz##x##sz##_, bpp, _##opt)
+
+#define init_8_16_32_ipred_funcs(type, enum, bpp, opt) \
+ init_ipred_func(type, enum, 8, bpp, opt); \
+ init_ipred_func(type, enum, 16, bpp, opt); \
+ init_ipred_func(type, enum, 32, bpp, opt)
+
+#define init_ipred_funcs(type, enum, bpp, opt) \
+ init_ipred_func(type, enum, 4, bpp, opt); \
+ init_8_16_32_ipred_funcs(type, enum, bpp, opt)
+
+void ff_vp9dsp_init_10bpp_x86(VP9DSPContext *dsp, int bitexact);
+void ff_vp9dsp_init_12bpp_x86(VP9DSPContext *dsp, int bitexact);
+void ff_vp9dsp_init_16bpp_x86(VP9DSPContext *dsp);
+
+#endif /* AVCODEC_X86_VP9DSP_INIT_H */
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init_10bpp.c b/media/ffvpx/libavcodec/x86/vp9dsp_init_10bpp.c
new file mode 100644
index 0000000000..2694c06cb2
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init_10bpp.c
@@ -0,0 +1,25 @@
+/*
+ * VP9 SIMD optimizations
+ *
+ * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BPC 10
+#define INIT_FUNC ff_vp9dsp_init_10bpp_x86
+#include "vp9dsp_init_16bpp_template.c"
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init_12bpp.c b/media/ffvpx/libavcodec/x86/vp9dsp_init_12bpp.c
new file mode 100644
index 0000000000..5da3bc1840
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init_12bpp.c
@@ -0,0 +1,25 @@
+/*
+ * VP9 SIMD optimizations
+ *
+ * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BPC 12
+#define INIT_FUNC ff_vp9dsp_init_12bpp_x86
+#include "vp9dsp_init_16bpp_template.c"
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c b/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c
new file mode 100644
index 0000000000..e5afea1512
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c
@@ -0,0 +1,152 @@
+/*
+ * VP9 SIMD optimizations
+ *
+ * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/vp9dsp.h"
+#include "libavcodec/x86/vp9dsp_init.h"
+
+#if HAVE_X86ASM
+
+decl_fpel_func(put, 8, , mmx);
+decl_fpel_func(avg, 8, _16, mmxext);
+decl_fpel_func(put, 16, , sse);
+decl_fpel_func(put, 32, , sse);
+decl_fpel_func(put, 64, , sse);
+decl_fpel_func(put, 128, , sse);
+decl_fpel_func(avg, 16, _16, sse2);
+decl_fpel_func(avg, 32, _16, sse2);
+decl_fpel_func(avg, 64, _16, sse2);
+decl_fpel_func(avg, 128, _16, sse2);
+decl_fpel_func(put, 32, , avx);
+decl_fpel_func(put, 64, , avx);
+decl_fpel_func(put, 128, , avx);
+decl_fpel_func(avg, 32, _16, avx2);
+decl_fpel_func(avg, 64, _16, avx2);
+decl_fpel_func(avg, 128, _16, avx2);
+
+decl_ipred_fns(v, 16, mmx, sse);
+decl_ipred_fns(h, 16, mmxext, sse2);
+decl_ipred_fns(dc, 16, mmxext, sse2);
+decl_ipred_fns(dc_top, 16, mmxext, sse2);
+decl_ipred_fns(dc_left, 16, mmxext, sse2);
+decl_ipred_fn(dl, 16, 16, avx2);
+decl_ipred_fn(dl, 32, 16, avx2);
+decl_ipred_fn(dr, 16, 16, avx2);
+decl_ipred_fn(dr, 32, 16, avx2);
+decl_ipred_fn(vl, 16, 16, avx2);
+decl_ipred_fn(hd, 16, 16, avx2);
+
+#define decl_ipred_dir_funcs(type) \
+decl_ipred_fns(type, 16, sse2, sse2); \
+decl_ipred_fns(type, 16, ssse3, ssse3); \
+decl_ipred_fns(type, 16, avx, avx)
+
+decl_ipred_dir_funcs(dl);
+decl_ipred_dir_funcs(dr);
+decl_ipred_dir_funcs(vl);
+decl_ipred_dir_funcs(vr);
+decl_ipred_dir_funcs(hu);
+decl_ipred_dir_funcs(hd);
+#endif /* HAVE_X86ASM */
+
+av_cold void ff_vp9dsp_init_16bpp_x86(VP9DSPContext *dsp)
+{
+#if HAVE_X86ASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMX(cpu_flags)) {
+ init_fpel_func(4, 0, 8, put, , mmx);
+ init_ipred_func(v, VERT, 4, 16, mmx);
+ }
+
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ init_fpel_func(4, 1, 8, avg, _16, mmxext);
+ init_ipred_func(h, HOR, 4, 16, mmxext);
+ init_ipred_func(dc, DC, 4, 16, mmxext);
+ init_ipred_func(dc_top, TOP_DC, 4, 16, mmxext);
+ init_ipred_func(dc_left, LEFT_DC, 4, 16, mmxext);
+ }
+
+ if (EXTERNAL_SSE(cpu_flags)) {
+ init_fpel_func(3, 0, 16, put, , sse);
+ init_fpel_func(2, 0, 32, put, , sse);
+ init_fpel_func(1, 0, 64, put, , sse);
+ init_fpel_func(0, 0, 128, put, , sse);
+ init_8_16_32_ipred_funcs(v, VERT, 16, sse);
+ }
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ init_fpel_func(3, 1, 16, avg, _16, sse2);
+ init_fpel_func(2, 1, 32, avg, _16, sse2);
+ init_fpel_func(1, 1, 64, avg, _16, sse2);
+ init_fpel_func(0, 1, 128, avg, _16, sse2);
+ init_8_16_32_ipred_funcs(h, HOR, 16, sse2);
+ init_8_16_32_ipred_funcs(dc, DC, 16, sse2);
+ init_8_16_32_ipred_funcs(dc_top, TOP_DC, 16, sse2);
+ init_8_16_32_ipred_funcs(dc_left, LEFT_DC, 16, sse2);
+ init_ipred_funcs(dl, DIAG_DOWN_LEFT, 16, sse2);
+ init_ipred_funcs(dr, DIAG_DOWN_RIGHT, 16, sse2);
+ init_ipred_funcs(vl, VERT_LEFT, 16, sse2);
+ init_ipred_funcs(vr, VERT_RIGHT, 16, sse2);
+ init_ipred_funcs(hu, HOR_UP, 16, sse2);
+ init_ipred_funcs(hd, HOR_DOWN, 16, sse2);
+ }
+
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ init_ipred_funcs(dl, DIAG_DOWN_LEFT, 16, ssse3);
+ init_ipred_funcs(dr, DIAG_DOWN_RIGHT, 16, ssse3);
+ init_ipred_funcs(vl, VERT_LEFT, 16, ssse3);
+ init_ipred_funcs(vr, VERT_RIGHT, 16, ssse3);
+ init_ipred_funcs(hu, HOR_UP, 16, ssse3);
+ init_ipred_funcs(hd, HOR_DOWN, 16, ssse3);
+ }
+
+ if (EXTERNAL_AVX_FAST(cpu_flags)) {
+ init_fpel_func(2, 0, 32, put, , avx);
+ init_fpel_func(1, 0, 64, put, , avx);
+ init_fpel_func(0, 0, 128, put, , avx);
+ init_ipred_funcs(dl, DIAG_DOWN_LEFT, 16, avx);
+ init_ipred_funcs(dr, DIAG_DOWN_RIGHT, 16, avx);
+ init_ipred_funcs(vl, VERT_LEFT, 16, avx);
+ init_ipred_funcs(vr, VERT_RIGHT, 16, avx);
+ init_ipred_funcs(hu, HOR_UP, 16, avx);
+ init_ipred_funcs(hd, HOR_DOWN, 16, avx);
+ }
+
+ if (EXTERNAL_AVX2_FAST(cpu_flags)) {
+ init_fpel_func(2, 1, 32, avg, _16, avx2);
+ init_fpel_func(1, 1, 64, avg, _16, avx2);
+ init_fpel_func(0, 1, 128, avg, _16, avx2);
+ init_ipred_func(dl, DIAG_DOWN_LEFT, 16, 16, avx2);
+ init_ipred_func(dl, DIAG_DOWN_LEFT, 32, 16, avx2);
+ init_ipred_func(dr, DIAG_DOWN_RIGHT, 16, 16, avx2);
+ init_ipred_func(vl, VERT_LEFT, 16, 16, avx2);
+ init_ipred_func(hd, HOR_DOWN, 16, 16, avx2);
+#if ARCH_X86_64
+ init_ipred_func(dr, DIAG_DOWN_RIGHT, 32, 16, avx2);
+#endif
+ }
+
+#endif /* HAVE_X86ASM */
+}
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c b/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c
new file mode 100644
index 0000000000..f93ea2468e
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c
@@ -0,0 +1,239 @@
+/*
+ * VP9 SIMD optimizations
+ *
+ * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavcodec/vp9dsp.h"
+#include "libavcodec/x86/vp9dsp_init.h"
+
+#if HAVE_X86ASM
+
+extern const int16_t ff_filters_16bpp[3][15][4][16];
+
+decl_mc_funcs(4, sse2, int16_t, 16, BPC);
+decl_mc_funcs(8, sse2, int16_t, 16, BPC);
+decl_mc_funcs(16, avx2, int16_t, 16, BPC);
+
+mc_rep_funcs(16, 8, 16, sse2, int16_t, 16, BPC)
+mc_rep_funcs(32, 16, 32, sse2, int16_t, 16, BPC)
+mc_rep_funcs(64, 32, 64, sse2, int16_t, 16, BPC)
+#if HAVE_AVX2_EXTERNAL
+mc_rep_funcs(32, 16, 32, avx2, int16_t, 16, BPC)
+mc_rep_funcs(64, 32, 64, avx2, int16_t, 16, BPC)
+#endif
+
+filters_8tap_2d_fn2(put, 16, BPC, 2, sse2, sse2, 16bpp)
+filters_8tap_2d_fn2(avg, 16, BPC, 2, sse2, sse2, 16bpp)
+#if HAVE_AVX2_EXTERNAL
+filters_8tap_2d_fn(put, 64, 32, BPC, 2, avx2, 16bpp)
+filters_8tap_2d_fn(avg, 64, 32, BPC, 2, avx2, 16bpp)
+filters_8tap_2d_fn(put, 32, 32, BPC, 2, avx2, 16bpp)
+filters_8tap_2d_fn(avg, 32, 32, BPC, 2, avx2, 16bpp)
+filters_8tap_2d_fn(put, 16, 32, BPC, 2, avx2, 16bpp)
+filters_8tap_2d_fn(avg, 16, 32, BPC, 2, avx2, 16bpp)
+#endif
+
+filters_8tap_1d_fn3(put, BPC, sse2, sse2, 16bpp)
+filters_8tap_1d_fn3(avg, BPC, sse2, sse2, 16bpp)
+#if HAVE_AVX2_EXTERNAL
+filters_8tap_1d_fn2(put, 64, BPC, avx2, 16bpp)
+filters_8tap_1d_fn2(avg, 64, BPC, avx2, 16bpp)
+filters_8tap_1d_fn2(put, 32, BPC, avx2, 16bpp)
+filters_8tap_1d_fn2(avg, 32, BPC, avx2, 16bpp)
+filters_8tap_1d_fn2(put, 16, BPC, avx2, 16bpp)
+filters_8tap_1d_fn2(avg, 16, BPC, avx2, 16bpp)
+#endif
+
+#define decl_lpf_func(dir, wd, bpp, opt) \
+void ff_vp9_loop_filter_##dir##_##wd##_##bpp##_##opt(uint8_t *dst, ptrdiff_t stride, \
+ int E, int I, int H)
+
+#define decl_lpf_funcs(dir, wd, bpp) \
+decl_lpf_func(dir, wd, bpp, sse2); \
+decl_lpf_func(dir, wd, bpp, ssse3); \
+decl_lpf_func(dir, wd, bpp, avx)
+
+#define decl_lpf_funcs_wd(dir) \
+decl_lpf_funcs(dir, 4, BPC); \
+decl_lpf_funcs(dir, 8, BPC); \
+decl_lpf_funcs(dir, 16, BPC)
+
+decl_lpf_funcs_wd(h);
+decl_lpf_funcs_wd(v);
+
+#define lpf_16_wrapper(dir, off, bpp, opt) \
+static void loop_filter_##dir##_16_##bpp##_##opt(uint8_t *dst, ptrdiff_t stride, \
+ int E, int I, int H) \
+{ \
+ ff_vp9_loop_filter_##dir##_16_##bpp##_##opt(dst, stride, E, I, H); \
+ ff_vp9_loop_filter_##dir##_16_##bpp##_##opt(dst + off, stride, E, I, H); \
+}
+
+#define lpf_16_wrappers(bpp, opt) \
+lpf_16_wrapper(h, 8 * stride, bpp, opt) \
+lpf_16_wrapper(v, 16, bpp, opt)
+
+lpf_16_wrappers(BPC, sse2)
+lpf_16_wrappers(BPC, ssse3)
+lpf_16_wrappers(BPC, avx)
+
+#define lpf_mix2_wrapper(dir, off, wd1, wd2, bpp, opt) \
+static void loop_filter_##dir##_##wd1##wd2##_##bpp##_##opt(uint8_t *dst, ptrdiff_t stride, \
+ int E, int I, int H) \
+{ \
+ ff_vp9_loop_filter_##dir##_##wd1##_##bpp##_##opt(dst, stride, \
+ E & 0xff, I & 0xff, H & 0xff); \
+ ff_vp9_loop_filter_##dir##_##wd2##_##bpp##_##opt(dst + off, stride, \
+ E >> 8, I >> 8, H >> 8); \
+}
+
+#define lpf_mix2_wrappers(wd1, wd2, bpp, opt) \
+lpf_mix2_wrapper(h, 8 * stride, wd1, wd2, bpp, opt) \
+lpf_mix2_wrapper(v, 16, wd1, wd2, bpp, opt)
+
+#define lpf_mix2_wrappers_set(bpp, opt) \
+lpf_mix2_wrappers(4, 4, bpp, opt) \
+lpf_mix2_wrappers(4, 8, bpp, opt) \
+lpf_mix2_wrappers(8, 4, bpp, opt) \
+lpf_mix2_wrappers(8, 8, bpp, opt) \
+
+lpf_mix2_wrappers_set(BPC, sse2)
+lpf_mix2_wrappers_set(BPC, ssse3)
+lpf_mix2_wrappers_set(BPC, avx)
+
+decl_ipred_fns(tm, BPC, mmxext, sse2);
+
+decl_itxfm_func(iwht, iwht, 4, BPC, mmxext);
+#if BPC == 10
+decl_itxfm_func(idct, idct, 4, BPC, mmxext);
+decl_itxfm_funcs(4, BPC, ssse3);
+#else
+decl_itxfm_func(idct, idct, 4, BPC, sse2);
+#endif
+decl_itxfm_func(idct, iadst, 4, BPC, sse2);
+decl_itxfm_func(iadst, idct, 4, BPC, sse2);
+decl_itxfm_func(iadst, iadst, 4, BPC, sse2);
+decl_itxfm_funcs(8, BPC, sse2);
+decl_itxfm_funcs(16, BPC, sse2);
+decl_itxfm_func(idct, idct, 32, BPC, sse2);
+#endif /* HAVE_X86ASM */
+
+av_cold void INIT_FUNC(VP9DSPContext *dsp, int bitexact)
+{
+#if HAVE_X86ASM
+ int cpu_flags = av_get_cpu_flags();
+
+#define init_lpf_8_func(idx1, idx2, dir, wd, bpp, opt) \
+ dsp->loop_filter_8[idx1][idx2] = ff_vp9_loop_filter_##dir##_##wd##_##bpp##_##opt
+#define init_lpf_16_func(idx, dir, bpp, opt) \
+ dsp->loop_filter_16[idx] = loop_filter_##dir##_16_##bpp##_##opt
+#define init_lpf_mix2_func(idx1, idx2, idx3, dir, wd1, wd2, bpp, opt) \
+ dsp->loop_filter_mix2[idx1][idx2][idx3] = loop_filter_##dir##_##wd1##wd2##_##bpp##_##opt
+
+#define init_lpf_funcs(bpp, opt) \
+ init_lpf_8_func(0, 0, h, 4, bpp, opt); \
+ init_lpf_8_func(0, 1, v, 4, bpp, opt); \
+ init_lpf_8_func(1, 0, h, 8, bpp, opt); \
+ init_lpf_8_func(1, 1, v, 8, bpp, opt); \
+ init_lpf_8_func(2, 0, h, 16, bpp, opt); \
+ init_lpf_8_func(2, 1, v, 16, bpp, opt); \
+ init_lpf_16_func(0, h, bpp, opt); \
+ init_lpf_16_func(1, v, bpp, opt); \
+ init_lpf_mix2_func(0, 0, 0, h, 4, 4, bpp, opt); \
+ init_lpf_mix2_func(0, 1, 0, h, 4, 8, bpp, opt); \
+ init_lpf_mix2_func(1, 0, 0, h, 8, 4, bpp, opt); \
+ init_lpf_mix2_func(1, 1, 0, h, 8, 8, bpp, opt); \
+ init_lpf_mix2_func(0, 0, 1, v, 4, 4, bpp, opt); \
+ init_lpf_mix2_func(0, 1, 1, v, 4, 8, bpp, opt); \
+ init_lpf_mix2_func(1, 0, 1, v, 8, 4, bpp, opt); \
+ init_lpf_mix2_func(1, 1, 1, v, 8, 8, bpp, opt)
+
+#define init_itx_func(idxa, idxb, typea, typeb, size, bpp, opt) \
+ dsp->itxfm_add[idxa][idxb] = \
+ cat(ff_vp9_##typea##_##typeb##_##size##x##size##_add_, bpp, _##opt);
+#define init_itx_func_one(idx, typea, typeb, size, bpp, opt) \
+ init_itx_func(idx, DCT_DCT, typea, typeb, size, bpp, opt); \
+ init_itx_func(idx, ADST_DCT, typea, typeb, size, bpp, opt); \
+ init_itx_func(idx, DCT_ADST, typea, typeb, size, bpp, opt); \
+ init_itx_func(idx, ADST_ADST, typea, typeb, size, bpp, opt)
+#define init_itx_funcs(idx, size, bpp, opt) \
+ init_itx_func(idx, DCT_DCT, idct, idct, size, bpp, opt); \
+ init_itx_func(idx, ADST_DCT, idct, iadst, size, bpp, opt); \
+ init_itx_func(idx, DCT_ADST, iadst, idct, size, bpp, opt); \
+ init_itx_func(idx, ADST_ADST, iadst, iadst, size, bpp, opt); \
+
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ init_ipred_func(tm, TM_VP8, 4, BPC, mmxext);
+ if (!bitexact) {
+ init_itx_func_one(4 /* lossless */, iwht, iwht, 4, BPC, mmxext);
+#if BPC == 10
+ init_itx_func(TX_4X4, DCT_DCT, idct, idct, 4, 10, mmxext);
+#endif
+ }
+ }
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ init_subpel3(0, put, BPC, sse2);
+ init_subpel3(1, avg, BPC, sse2);
+ init_lpf_funcs(BPC, sse2);
+ init_8_16_32_ipred_funcs(tm, TM_VP8, BPC, sse2);
+#if BPC == 10
+ if (!bitexact) {
+ init_itx_func(TX_4X4, ADST_DCT, idct, iadst, 4, 10, sse2);
+ init_itx_func(TX_4X4, DCT_ADST, iadst, idct, 4, 10, sse2);
+ init_itx_func(TX_4X4, ADST_ADST, iadst, iadst, 4, 10, sse2);
+ }
+#else
+ init_itx_funcs(TX_4X4, 4, 12, sse2);
+#endif
+ init_itx_funcs(TX_8X8, 8, BPC, sse2);
+ init_itx_funcs(TX_16X16, 16, BPC, sse2);
+ init_itx_func_one(TX_32X32, idct, idct, 32, BPC, sse2);
+ }
+
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ init_lpf_funcs(BPC, ssse3);
+#if BPC == 10
+ if (!bitexact) {
+ init_itx_funcs(TX_4X4, 4, BPC, ssse3);
+ }
+#endif
+ }
+
+ if (EXTERNAL_AVX(cpu_flags)) {
+ init_lpf_funcs(BPC, avx);
+ }
+
+ if (EXTERNAL_AVX2_FAST(cpu_flags)) {
+#if HAVE_AVX2_EXTERNAL
+ init_subpel3_32_64(0, put, BPC, avx2);
+ init_subpel3_32_64(1, avg, BPC, avx2);
+ init_subpel2(2, 0, 16, put, BPC, avx2);
+ init_subpel2(2, 1, 16, avg, BPC, avx2);
+#endif
+ }
+
+#endif /* HAVE_X86ASM */
+
+ ff_vp9dsp_init_16bpp_x86(dsp);
+}
diff --git a/media/ffvpx/libavcodec/x86/vp9intrapred.asm b/media/ffvpx/libavcodec/x86/vp9intrapred.asm
new file mode 100644
index 0000000000..31f7d449fd
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9intrapred.asm
@@ -0,0 +1,2044 @@
+;******************************************************************************
+;* VP9 Intra prediction SIMD optimizations
+;*
+;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+;*
+;* Parts based on:
+;* H.264 intra prediction asm optimizations
+;* Copyright (c) 2010 Fiona Glaser
+;* Copyright (c) 2010 Holger Lubitz
+;* Copyright (c) 2010 Loren Merritt
+;* Copyright (c) 2010 Ronald S. Bultje
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA 32
+
+pw_m256: times 16 dw -256
+pw_m255: times 16 dw -255
+pw_4096: times 8 dw 4096
+
+pb_4x3_4x2_4x1_4x0: times 4 db 3
+ times 4 db 2
+ times 4 db 1
+ times 4 db 0
+pb_8x1_8x0: times 8 db 1
+ times 8 db 0
+pb_8x3_8x2: times 8 db 3
+ times 8 db 2
+pb_0to5_2x7: db 0, 1, 2, 3, 4, 5, 7, 7
+ times 8 db -1
+pb_0to6_9x7: db 0, 1, 2, 3, 4, 5, 6
+ times 9 db 7
+pb_1to6_10x7: db 1, 2, 3, 4, 5, 6
+ times 10 db 7
+pb_2to6_3x7:
+pb_2to6_11x7: db 2, 3, 4, 5, 6
+ times 11 db 7
+pb_1toE_2xF: db 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15
+pb_2toE_3xF: db 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15
+pb_13456_3xm1: db 1, 3, 4, 5, 6
+ times 3 db -1
+pb_6012_4xm1: db 6, 0, 1, 2
+ times 4 db -1
+pb_6xm1_246_8toE: times 6 db -1
+ db 2, 4, 6, 8, 9, 10, 11, 12, 13, 14
+pb_6xm1_BDF_0to6: times 6 db -1
+ db 11, 13, 15, 0, 1, 2, 3, 4, 5, 6
+pb_02468ACE_13579BDF: db 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15
+
+pb_15x0_1xm1: times 15 db 0
+ db -1
+pb_0to2_5x3: db 0, 1, 2
+ times 5 db 3
+pb_6xm1_2x0: times 6 db -1
+ times 2 db 0
+pb_6x0_2xm1: times 6 db 0
+ times 2 db -1
+
+cextern pb_1
+cextern pb_2
+cextern pb_3
+cextern pb_15
+cextern pw_2
+cextern pw_4
+cextern pw_8
+cextern pw_16
+cextern pw_32
+cextern pw_255
+cextern pw_512
+cextern pw_1024
+cextern pw_2048
+cextern pw_8192
+
+SECTION .text
+
+; dc_NxN(uint8_t *dst, ptrdiff_t stride, const uint8_t *l, const uint8_t *a)
+
+%macro DC_4to8_FUNCS 0
+cglobal vp9_ipred_dc_4x4, 4, 4, 0, dst, stride, l, a
+ movd m0, [lq]
+ punpckldq m0, [aq]
+ pxor m1, m1
+ psadbw m0, m1
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_4096]
+ pshufb m0, m1
+%else
+ paddw m0, [pw_4]
+ psraw m0, 3
+ punpcklbw m0, m0
+ pshufw m0, m0, q0000
+%endif
+ movd [dstq+strideq*0], m0
+ movd [dstq+strideq*1], m0
+ lea dstq, [dstq+strideq*2]
+ movd [dstq+strideq*0], m0
+ movd [dstq+strideq*1], m0
+ RET
+
+cglobal vp9_ipred_dc_8x8, 4, 4, 0, dst, stride, l, a
+ movq m0, [lq]
+ movq m1, [aq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pxor m2, m2
+ psadbw m0, m2
+ psadbw m1, m2
+ paddw m0, m1
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_2048]
+ pshufb m0, m2
+%else
+ paddw m0, [pw_8]
+ psraw m0, 4
+ punpcklbw m0, m0
+ pshufw m0, m0, q0000
+%endif
+ movq [dstq+strideq*0], m0
+ movq [dstq+strideq*1], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ movq [dstq+strideq*0], m0
+ movq [dstq+strideq*1], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+DC_4to8_FUNCS
+INIT_MMX ssse3
+DC_4to8_FUNCS
+
+%macro DC_16to32_FUNCS 0
+cglobal vp9_ipred_dc_16x16, 4, 4, 3, dst, stride, l, a
+ mova m0, [lq]
+ mova m1, [aq]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ pxor m2, m2
+ psadbw m0, m2
+ psadbw m1, m2
+ paddw m0, m1
+ movhlps m1, m0
+ paddw m0, m1
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_1024]
+ pshufb m0, m2
+%else
+ paddw m0, [pw_16]
+ psraw m0, 5
+ punpcklbw m0, m0
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+%endif
+ mov cntd, 4
+.loop:
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_dc_32x32, 4, 4, 5, dst, stride, l, a
+ mova m0, [lq]
+ mova m1, [lq+16]
+ mova m2, [aq]
+ mova m3, [aq+16]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ pxor m4, m4
+ psadbw m0, m4
+ psadbw m1, m4
+ psadbw m2, m4
+ psadbw m3, m4
+ paddw m0, m1
+ paddw m2, m3
+ paddw m0, m2
+ movhlps m1, m0
+ paddw m0, m1
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_512]
+ pshufb m0, m4
+%else
+ paddw m0, [pw_32]
+ psraw m0, 6
+ punpcklbw m0, m0
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+%endif
+ mov cntd, 8
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m0
+ mova [dstq+strideq*2+ 0], m0
+ mova [dstq+strideq*2+16], m0
+ mova [dstq+stride3q + 0], m0
+ mova [dstq+stride3q +16], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+DC_16to32_FUNCS
+INIT_XMM ssse3
+DC_16to32_FUNCS
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_ipred_dc_32x32, 4, 4, 3, dst, stride, l, a
+ mova m0, [lq]
+ mova m1, [aq]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ pxor m2, m2
+ psadbw m0, m2
+ psadbw m1, m2
+ paddw m0, m1
+ vextracti128 xm1, m0, 1
+ paddw xm0, xm1
+ movhlps xm1, xm0
+ paddw xm0, xm1
+ pmulhrsw xm0, [pw_512]
+ vpbroadcastb m0, xm0
+ mov cntd, 4
+.loop:
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+%endif
+
+; dc_top/left_NxN(uint8_t *dst, ptrdiff_t stride, const uint8_t *l, const uint8_t *a)
+
+%macro DC_1D_4to8_FUNCS 2 ; dir (top or left), arg (a or l)
+cglobal vp9_ipred_dc_%1_4x4, 4, 4, 0, dst, stride, l, a
+ movd m0, [%2q]
+ pxor m1, m1
+ psadbw m0, m1
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_8192]
+ pshufb m0, m1
+%else
+ paddw m0, [pw_2]
+ psraw m0, 2
+ punpcklbw m0, m0
+ pshufw m0, m0, q0000
+%endif
+ movd [dstq+strideq*0], m0
+ movd [dstq+strideq*1], m0
+ lea dstq, [dstq+strideq*2]
+ movd [dstq+strideq*0], m0
+ movd [dstq+strideq*1], m0
+ RET
+
+cglobal vp9_ipred_dc_%1_8x8, 4, 4, 0, dst, stride, l, a
+ movq m0, [%2q]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pxor m1, m1
+ psadbw m0, m1
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_4096]
+ pshufb m0, m1
+%else
+ paddw m0, [pw_4]
+ psraw m0, 3
+ punpcklbw m0, m0
+ pshufw m0, m0, q0000
+%endif
+ movq [dstq+strideq*0], m0
+ movq [dstq+strideq*1], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ movq [dstq+strideq*0], m0
+ movq [dstq+strideq*1], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+DC_1D_4to8_FUNCS top, a
+DC_1D_4to8_FUNCS left, l
+INIT_MMX ssse3
+DC_1D_4to8_FUNCS top, a
+DC_1D_4to8_FUNCS left, l
+
+%macro DC_1D_16to32_FUNCS 2; dir (top or left), arg (a or l)
+cglobal vp9_ipred_dc_%1_16x16, 4, 4, 3, dst, stride, l, a
+ mova m0, [%2q]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ pxor m2, m2
+ psadbw m0, m2
+ movhlps m1, m0
+ paddw m0, m1
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_2048]
+ pshufb m0, m2
+%else
+ paddw m0, [pw_8]
+ psraw m0, 4
+ punpcklbw m0, m0
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+%endif
+ mov cntd, 4
+.loop:
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_dc_%1_32x32, 4, 4, 3, dst, stride, l, a
+ mova m0, [%2q]
+ mova m1, [%2q+16]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ pxor m2, m2
+ psadbw m0, m2
+ psadbw m1, m2
+ paddw m0, m1
+ movhlps m1, m0
+ paddw m0, m1
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_1024]
+ pshufb m0, m2
+%else
+ paddw m0, [pw_16]
+ psraw m0, 5
+ punpcklbw m0, m0
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+%endif
+ mov cntd, 8
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m0
+ mova [dstq+strideq*2+ 0], m0
+ mova [dstq+strideq*2+16], m0
+ mova [dstq+stride3q + 0], m0
+ mova [dstq+stride3q +16], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+DC_1D_16to32_FUNCS top, a
+DC_1D_16to32_FUNCS left, l
+INIT_XMM ssse3
+DC_1D_16to32_FUNCS top, a
+DC_1D_16to32_FUNCS left, l
+
+%macro DC_1D_AVX2_FUNCS 2 ; dir (top or left), arg (a or l)
+%if HAVE_AVX2_EXTERNAL
+cglobal vp9_ipred_dc_%1_32x32, 4, 4, 3, dst, stride, l, a
+ mova m0, [%2q]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ pxor m2, m2
+ psadbw m0, m2
+ vextracti128 xm1, m0, 1
+ paddw xm0, xm1
+ movhlps xm1, xm0
+ paddw xm0, xm1
+ pmulhrsw xm0, [pw_1024]
+ vpbroadcastb m0, xm0
+ mov cntd, 4
+.loop:
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+%endif
+%endmacro
+
+INIT_YMM avx2
+DC_1D_AVX2_FUNCS top, a
+DC_1D_AVX2_FUNCS left, l
+
+; v
+
+INIT_MMX mmx
+cglobal vp9_ipred_v_8x8, 4, 4, 0, dst, stride, l, a
+ movq m0, [aq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ movq [dstq+strideq*0], m0
+ movq [dstq+strideq*1], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ movq [dstq+strideq*0], m0
+ movq [dstq+strideq*1], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse
+cglobal vp9_ipred_v_16x16, 4, 4, 1, dst, stride, l, a
+ mova m0, [aq]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+.loop:
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+INIT_XMM sse
+cglobal vp9_ipred_v_32x32, 4, 4, 2, dst, stride, l, a
+ mova m0, [aq]
+ mova m1, [aq+16]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 8
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m1
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m1
+ mova [dstq+strideq*2+ 0], m0
+ mova [dstq+strideq*2+16], m1
+ mova [dstq+stride3q + 0], m0
+ mova [dstq+stride3q +16], m1
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+INIT_YMM avx
+cglobal vp9_ipred_v_32x32, 4, 4, 1, dst, stride, l, a
+ mova m0, [aq]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+.loop:
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+; h
+
+%macro H_XMM_FUNCS 2
+%if notcpuflag(avx)
+cglobal vp9_ipred_h_4x4, 3, 4, 1, dst, stride, l, stride3
+ movd m0, [lq]
+%if cpuflag(ssse3)
+ pshufb m0, [pb_4x3_4x2_4x1_4x0]
+%else
+ punpcklbw m0, m0
+ pshuflw m0, m0, q0123
+ punpcklwd m0, m0
+%endif
+ lea stride3q, [strideq*3]
+ movd [dstq+strideq*0], m0
+ psrldq m0, 4
+ movd [dstq+strideq*1], m0
+ psrldq m0, 4
+ movd [dstq+strideq*2], m0
+ psrldq m0, 4
+ movd [dstq+stride3q ], m0
+ RET
+%endif
+
+cglobal vp9_ipred_h_8x8, 3, 5, %1, dst, stride, l, stride3, cnt
+%if cpuflag(ssse3)
+ mova m2, [pb_8x1_8x0]
+ mova m3, [pb_8x3_8x2]
+%endif
+ lea stride3q, [strideq*3]
+ mov cntq, 1
+.loop:
+ movd m0, [lq+cntq*4]
+%if cpuflag(ssse3)
+ pshufb m1, m0, m3
+ pshufb m0, m2
+%else
+ punpcklbw m0, m0
+ punpcklwd m0, m0
+ pshufd m1, m0, q2233
+ pshufd m0, m0, q0011
+%endif
+ movq [dstq+strideq*0], m1
+ movhps [dstq+strideq*1], m1
+ movq [dstq+strideq*2], m0
+ movhps [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntq
+ jge .loop
+ RET
+
+cglobal vp9_ipred_h_16x16, 3, 5, %2, dst, stride, l, stride3, cnt
+%if cpuflag(ssse3)
+ mova m5, [pb_1]
+ mova m6, [pb_2]
+ mova m7, [pb_3]
+ pxor m4, m4
+%endif
+ lea stride3q, [strideq*3]
+ mov cntq, 3
+.loop:
+ movd m3, [lq+cntq*4]
+%if cpuflag(ssse3)
+ pshufb m0, m3, m7
+ pshufb m1, m3, m6
+%else
+ punpcklbw m3, m3
+ punpcklwd m3, m3
+ pshufd m0, m3, q3333
+ pshufd m1, m3, q2222
+%endif
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m1
+%if cpuflag(ssse3)
+ pshufb m2, m3, m5
+ pshufb m3, m4
+%else
+ pshufd m2, m3, q1111
+ pshufd m3, m3, q0000
+%endif
+ mova [dstq+strideq*2], m2
+ mova [dstq+stride3q ], m3
+ lea dstq, [dstq+strideq*4]
+ dec cntq
+ jge .loop
+ RET
+
+cglobal vp9_ipred_h_32x32, 3, 5, %2, dst, stride, l, stride3, cnt
+%if cpuflag(ssse3)
+ mova m5, [pb_1]
+ mova m6, [pb_2]
+ mova m7, [pb_3]
+ pxor m4, m4
+%endif
+ lea stride3q, [strideq*3]
+ mov cntq, 7
+.loop:
+ movd m3, [lq+cntq*4]
+%if cpuflag(ssse3)
+ pshufb m0, m3, m7
+ pshufb m1, m3, m6
+%else
+ punpcklbw m3, m3
+ punpcklwd m3, m3
+ pshufd m0, m3, q3333
+ pshufd m1, m3, q2222
+%endif
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*1+ 0], m1
+ mova [dstq+strideq*1+16], m1
+%if cpuflag(ssse3)
+ pshufb m2, m3, m5
+ pshufb m3, m4
+%else
+ pshufd m2, m3, q1111
+ pshufd m3, m3, q0000
+%endif
+ mova [dstq+strideq*2+ 0], m2
+ mova [dstq+strideq*2+16], m2
+ mova [dstq+stride3q + 0], m3
+ mova [dstq+stride3q +16], m3
+ lea dstq, [dstq+strideq*4]
+ dec cntq
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+H_XMM_FUNCS 2, 4
+INIT_XMM ssse3
+H_XMM_FUNCS 4, 8
+INIT_XMM avx
+H_XMM_FUNCS 4, 8
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_ipred_h_32x32, 3, 5, 8, dst, stride, l, stride3, cnt
+ mova m5, [pb_1]
+ mova m6, [pb_2]
+ mova m7, [pb_3]
+ pxor m4, m4
+ lea stride3q, [strideq*3]
+ mov cntq, 7
+.loop:
+ movd xm3, [lq+cntq*4]
+ vinserti128 m3, m3, xm3, 1
+ pshufb m0, m3, m7
+ pshufb m1, m3, m6
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m1
+ pshufb m2, m3, m5
+ pshufb m3, m4
+ mova [dstq+strideq*2], m2
+ mova [dstq+stride3q ], m3
+ lea dstq, [dstq+strideq*4]
+ dec cntq
+ jge .loop
+ RET
+%endif
+
+; tm
+
+%macro TM_MMX_FUNCS 0
+cglobal vp9_ipred_tm_4x4, 4, 4, 0, dst, stride, l, a
+ pxor m1, m1
+ movd m0, [aq]
+ pinsrw m2, [aq-1], 0
+ punpcklbw m0, m1
+ DEFINE_ARGS dst, stride, l, cnt
+%if cpuflag(ssse3)
+ mova m3, [pw_m256]
+ mova m1, [pw_m255]
+ pshufb m2, m3
+%else
+ punpcklbw m2, m1
+ pshufw m2, m2, q0000
+%endif
+ psubw m0, m2
+ mov cntq, 1
+.loop:
+ pinsrw m2, [lq+cntq*2], 0
+%if cpuflag(ssse3)
+ pshufb m4, m2, m1
+ pshufb m2, m3
+%else
+ punpcklbw m2, m1
+ pshufw m4, m2, q1111
+ pshufw m2, m2, q0000
+%endif
+ paddw m4, m0
+ paddw m2, m0
+ packuswb m4, m4
+ packuswb m2, m2
+ movd [dstq+strideq*0], m4
+ movd [dstq+strideq*1], m2
+ lea dstq, [dstq+strideq*2]
+ dec cntq
+ jge .loop
+ RET
+%endmacro
+
+INIT_MMX mmxext
+TM_MMX_FUNCS
+INIT_MMX ssse3
+TM_MMX_FUNCS
+
+%macro TM_XMM_FUNCS 0
+cglobal vp9_ipred_tm_8x8, 4, 4, 5, dst, stride, l, a
+ pxor m1, m1
+ movh m0, [aq]
+ pinsrw m2, [aq-1], 0
+ punpcklbw m0, m1
+ DEFINE_ARGS dst, stride, l, cnt
+%if cpuflag(ssse3)
+ mova m3, [pw_m256]
+ mova m1, [pw_m255]
+ pshufb m2, m3
+%else
+ punpcklbw m2, m1
+ punpcklwd m2, m2
+ pshufd m2, m2, q0000
+%endif
+ psubw m0, m2
+ mov cntq, 3
+.loop:
+ pinsrw m2, [lq+cntq*2], 0
+%if cpuflag(ssse3)
+ pshufb m4, m2, m1
+ pshufb m2, m3
+%else
+ punpcklbw m2, m1
+ punpcklwd m2, m2
+ pshufd m4, m2, q1111
+ pshufd m2, m2, q0000
+%endif
+ paddw m4, m0
+ paddw m2, m0
+ packuswb m4, m2
+ movh [dstq+strideq*0], m4
+ movhps [dstq+strideq*1], m4
+ lea dstq, [dstq+strideq*2]
+ dec cntq
+ jge .loop
+ RET
+
+cglobal vp9_ipred_tm_16x16, 4, 4, 8, dst, stride, l, a
+ pxor m3, m3
+ mova m0, [aq]
+ pinsrw m2, [aq-1], 0
+ punpckhbw m1, m0, m3
+ punpcklbw m0, m3
+ DEFINE_ARGS dst, stride, l, cnt
+%if cpuflag(ssse3)
+ mova m4, [pw_m256]
+ mova m3, [pw_m255]
+ pshufb m2, m4
+%else
+ punpcklbw m2, m3
+ punpcklwd m2, m2
+ pshufd m2, m2, q0000
+%endif
+ psubw m1, m2
+ psubw m0, m2
+ mov cntq, 7
+.loop:
+ pinsrw m7, [lq+cntq*2], 0
+%if cpuflag(ssse3)
+ pshufb m5, m7, m3
+ pshufb m7, m4
+%else
+ punpcklbw m7, m3
+ punpcklwd m7, m7
+ pshufd m5, m7, q1111
+ pshufd m7, m7, q0000
+%endif
+ paddw m2, m5, m0
+ paddw m5, m1
+ paddw m6, m7, m0
+ paddw m7, m1
+ packuswb m2, m5
+ packuswb m6, m7
+ mova [dstq+strideq*0], m2
+ mova [dstq+strideq*1], m6
+ lea dstq, [dstq+strideq*2]
+ dec cntq
+ jge .loop
+ RET
+
+%if ARCH_X86_64
+%define mem 0
+%else
+%define mem 64
+%endif
+cglobal vp9_ipred_tm_32x32, 4, 4, 14, mem, dst, stride, l, a
+ pxor m5, m5
+ pinsrw m4, [aq-1], 0
+ mova m0, [aq]
+ mova m2, [aq+16]
+ DEFINE_ARGS dst, stride, l, cnt
+%if cpuflag(ssse3)
+%if ARCH_X86_64
+ mova m12, [pw_m256]
+ mova m13, [pw_m255]
+%define pw_m256_reg m12
+%define pw_m255_reg m13
+%else
+%define pw_m256_reg [pw_m256]
+%define pw_m255_reg [pw_m255]
+%endif
+ pshufb m4, pw_m256_reg
+%else
+ punpcklbw m4, m5
+ punpcklwd m4, m4
+ pshufd m4, m4, q0000
+%endif
+ punpckhbw m1, m0, m5
+ punpckhbw m3, m2, m5
+ punpcklbw m0, m5
+ punpcklbw m2, m5
+ psubw m1, m4
+ psubw m0, m4
+ psubw m3, m4
+ psubw m2, m4
+%if ARCH_X86_64
+ SWAP 0, 8
+ SWAP 1, 9
+ SWAP 2, 10
+ SWAP 3, 11
+%else
+ mova [rsp+0*16], m0
+ mova [rsp+1*16], m1
+ mova [rsp+2*16], m2
+ mova [rsp+3*16], m3
+%endif
+ mov cntq, 15
+.loop:
+ pinsrw m3, [lq+cntq*2], 0
+%if cpuflag(ssse3)
+ pshufb m7, m3, pw_m255_reg
+ pshufb m3, pw_m256_reg
+%else
+ pxor m7, m7
+ punpcklbw m3, m7
+ punpcklwd m3, m3
+ pshufd m7, m3, q1111
+ pshufd m3, m3, q0000
+%endif
+%if ARCH_X86_64
+ paddw m4, m7, m8
+ paddw m5, m7, m9
+ paddw m6, m7, m10
+ paddw m7, m11
+ paddw m0, m3, m8
+ paddw m1, m3, m9
+ paddw m2, m3, m10
+ paddw m3, m11
+%else
+ paddw m4, m7, [rsp+0*16]
+ paddw m5, m7, [rsp+1*16]
+ paddw m6, m7, [rsp+2*16]
+ paddw m7, [rsp+3*16]
+ paddw m0, m3, [rsp+0*16]
+ paddw m1, m3, [rsp+1*16]
+ paddw m2, m3, [rsp+2*16]
+ paddw m3, [rsp+3*16]
+%endif
+ packuswb m4, m5
+ packuswb m6, m7
+ packuswb m0, m1
+ packuswb m2, m3
+ mova [dstq+strideq*0+ 0], m4
+ mova [dstq+strideq*0+16], m6
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m2
+ lea dstq, [dstq+strideq*2]
+ dec cntq
+ jge .loop
+ RET
+%undef pw_m256_reg
+%undef pw_m255_reg
+%undef mem
+%endmacro
+
+INIT_XMM sse2
+TM_XMM_FUNCS
+INIT_XMM ssse3
+TM_XMM_FUNCS
+INIT_XMM avx
+TM_XMM_FUNCS
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_ipred_tm_32x32, 4, 4, 8, dst, stride, l, a
+ pxor m3, m3
+ pinsrw xm2, [aq-1], 0
+ vinserti128 m2, m2, xm2, 1
+ mova m0, [aq]
+ DEFINE_ARGS dst, stride, l, cnt
+ mova m4, [pw_m256]
+ mova m5, [pw_m255]
+ pshufb m2, m4
+ punpckhbw m1, m0, m3
+ punpcklbw m0, m3
+ psubw m1, m2
+ psubw m0, m2
+ mov cntq, 15
+.loop:
+ pinsrw xm7, [lq+cntq*2], 0
+ vinserti128 m7, m7, xm7, 1
+ pshufb m3, m7, m5
+ pshufb m7, m4
+ paddw m2, m3, m0
+ paddw m3, m1
+ paddw m6, m7, m0
+ paddw m7, m1
+ packuswb m2, m3
+ packuswb m6, m7
+ mova [dstq+strideq*0], m2
+ mova [dstq+strideq*1], m6
+ lea dstq, [dstq+strideq*2]
+ dec cntq
+ jge .loop
+ RET
+%endif
+
+; dl
+
+%macro LOWPASS 4 ; left [dst], center, right, tmp
+ pxor m%4, m%1, m%3
+ pand m%4, [pb_1]
+ pavgb m%1, m%3
+ psubusb m%1, m%4
+ pavgb m%1, m%2
+%endmacro
+
+%macro DL_MMX_FUNCS 0
+cglobal vp9_ipred_dl_4x4, 4, 4, 0, dst, stride, l, a
+ movq m1, [aq]
+%if cpuflag(ssse3)
+ pshufb m0, m1, [pb_0to5_2x7]
+ pshufb m2, m1, [pb_2to6_3x7]
+%else
+ punpckhbw m3, m1, m1 ; 44556677
+ pand m0, m1, [pb_6xm1_2x0] ; 012345__
+ pand m3, [pb_6x0_2xm1] ; ______77
+ psrlq m2, m1, 16 ; 234567__
+ por m0, m3 ; 01234577
+ por m2, m3 ; 23456777
+%endif
+ psrlq m1, 8
+ LOWPASS 0, 1, 2, 3
+
+ pshufw m1, m0, q3321
+ movd [dstq+strideq*0], m0
+ movd [dstq+strideq*2], m1
+ psrlq m0, 8
+ psrlq m1, 8
+ add dstq, strideq
+ movd [dstq+strideq*0], m0
+ movd [dstq+strideq*2], m1
+ RET
+%endmacro
+
+INIT_MMX mmxext
+DL_MMX_FUNCS
+INIT_MMX ssse3
+DL_MMX_FUNCS
+
+%macro DL_XMM_FUNCS 0
+cglobal vp9_ipred_dl_8x8, 4, 4, 4, dst, stride, stride5, a
+ movq m0, [aq]
+ lea stride5q, [strideq*5]
+%if cpuflag(ssse3)
+ pshufb m1, m0, [pb_1to6_10x7]
+%else
+ punpcklbw m1, m0, m0 ; 0011223344556677
+ punpckhwd m1, m1 ; 4x4,4x5,4x6,4x7
+%endif
+ shufps m0, m1, q3310
+%if notcpuflag(ssse3)
+ psrldq m1, m0, 1
+ shufps m1, m0, q3210
+%endif
+ psrldq m2, m1, 1
+ LOWPASS 0, 1, 2, 3
+
+ pshufd m1, m0, q3321
+ movq [dstq+strideq*0], m0
+ movq [dstq+strideq*4], m1
+ psrldq m0, 1
+ psrldq m1, 1
+ movq [dstq+strideq*1], m0
+ movq [dstq+stride5q ], m1
+ lea dstq, [dstq+strideq*2]
+ psrldq m0, 1
+ psrldq m1, 1
+ movq [dstq+strideq*0], m0
+ movq [dstq+strideq*4], m1
+ psrldq m0, 1
+ psrldq m1, 1
+ movq [dstq+strideq*1], m0
+ movq [dstq+stride5q ], m1
+ RET
+
+cglobal vp9_ipred_dl_16x16, 4, 4, 6, dst, stride, l, a
+ mova m0, [aq]
+%if cpuflag(ssse3)
+ mova m5, [pb_1toE_2xF]
+ pshufb m1, m0, m5
+ pshufb m2, m1, m5
+ pshufb m4, m0, [pb_15]
+%else
+ pand m5, m0, [pb_15x0_1xm1] ; _______________F
+ psrldq m1, m0, 1 ; 123456789ABCDEF_
+ por m1, m5 ; 123456789ABCDEFF
+ psrldq m2, m1, 1 ; 23456789ABCDEFF_
+ por m2, m5 ; 23456789ABCDEFFF
+ pshufhw m4, m1, q3333 ; xxxxxxxxFFFFFFFF
+%endif
+ LOWPASS 0, 1, 2, 3
+ DEFINE_ARGS dst, stride, cnt, stride9
+ lea stride9q, [strideq+strideq*8]
+ mov cntd, 4
+
+.loop:
+ movhlps m4, m0
+ mova [dstq+strideq*0], m0
+%if cpuflag(ssse3)
+ pshufb m0, m5
+%else
+ psrldq m0, 1
+ por m0, m5
+%endif
+ mova [dstq+strideq*8], m4
+ movhlps m4, m0
+ mova [dstq+strideq*1], m0
+%if cpuflag(ssse3)
+ pshufb m0, m5
+%else
+ psrldq m0, 1
+ por m0, m5
+%endif
+ mova [dstq+stride9q ], m4
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_dl_32x32, 4, 5, 8, dst, stride, cnt, a, dst16
+ mova m0, [aq]
+ mova m1, [aq+16]
+ PALIGNR m2, m1, m0, 1, m4
+ PALIGNR m3, m1, m0, 2, m4
+ LOWPASS 0, 2, 3, 4
+%if cpuflag(ssse3)
+ mova m5, [pb_1toE_2xF]
+ pshufb m2, m1, m5
+ pshufb m3, m2, m5
+ pshufb m6, m1, [pb_15]
+ mova m7, m6
+%else
+ pand m5, m1, [pb_15x0_1xm1] ; _______________F
+ psrldq m2, m1, 1 ; 123456789ABCDEF_
+ por m2, m5 ; 123456789ABCDEFF
+ psrldq m3, m2, 1 ; 23456789ABCDEFF_
+ por m3, m5 ; 23456789ABCDEFFF
+ pshufhw m7, m2, q3333 ; xxxxxxxxFFFFFFFF
+ pshufd m6, m7, q3333
+%endif
+ LOWPASS 1, 2, 3, 4
+ lea dst16q, [dstq +strideq*8]
+ mov cntd, 8
+ lea dst16q, [dst16q+strideq*8]
+.loop:
+ movhlps m7, m1
+ mova [dstq +strideq*0+ 0], m0
+ mova [dstq +strideq*0+16], m1
+ movhps [dstq+strideq*8+ 0], m0
+ movq [dstq +strideq*8+ 8], m1
+ mova [dstq +strideq*8+16], m7
+ mova [dst16q+strideq*0+ 0], m1
+ mova [dst16q+strideq*0+16], m6
+ mova [dst16q+strideq*8+ 0], m7
+ mova [dst16q+strideq*8+16], m6
+%if cpuflag(avx)
+ vpalignr m0, m1, m0, 1
+ pshufb m1, m5
+%elif cpuflag(ssse3)
+ palignr m2, m1, m0, 1
+ pshufb m1, m5
+ mova m0, m2
+%else
+ mova m4, m1
+ psrldq m0, 1
+ pslldq m4, 15
+ psrldq m1, 1
+ por m0, m4
+ por m1, m5
+%endif
+ add dstq, strideq
+ add dst16q, strideq
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+DL_XMM_FUNCS
+INIT_XMM ssse3
+DL_XMM_FUNCS
+INIT_XMM avx
+DL_XMM_FUNCS
+
+; dr
+
+%macro DR_MMX_FUNCS 0
+cglobal vp9_ipred_dr_4x4, 4, 4, 0, dst, stride, l, a
+ movd m0, [lq]
+ punpckldq m0, [aq-1]
+ movd m1, [aq+3]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ PALIGNR m1, m0, 1, m3
+ psrlq m2, m1, 8
+ LOWPASS 0, 1, 2, 3
+
+ movd [dstq+stride3q ], m0
+ psrlq m0, 8
+ movd [dstq+strideq*2], m0
+ psrlq m0, 8
+ movd [dstq+strideq*1], m0
+ psrlq m0, 8
+ movd [dstq+strideq*0], m0
+ RET
+%endmacro
+
+INIT_MMX mmxext
+DR_MMX_FUNCS
+INIT_MMX ssse3
+DR_MMX_FUNCS
+
+%macro DR_XMM_FUNCS 0
+cglobal vp9_ipred_dr_8x8, 4, 4, 4, dst, stride, l, a
+ movq m1, [lq]
+ movhps m1, [aq-1]
+ movd m2, [aq+7]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pslldq m0, m1, 1
+ PALIGNR m2, m1, 1, m3
+ LOWPASS 0, 1, 2, 3
+
+ movhps [dstq+strideq*0], m0
+ pslldq m0, 1
+ movhps [dstq+strideq*1], m0
+ pslldq m0, 1
+ movhps [dstq+strideq*2], m0
+ pslldq m0, 1
+ movhps [dstq+stride3q ], m0
+ pslldq m0, 1
+ lea dstq, [dstq+strideq*4]
+ movhps [dstq+strideq*0], m0
+ pslldq m0, 1
+ movhps [dstq+strideq*1], m0
+ pslldq m0, 1
+ movhps [dstq+strideq*2], m0
+ pslldq m0, 1
+ movhps [dstq+stride3q ], m0
+ RET
+
+cglobal vp9_ipred_dr_16x16, 4, 4, 6, dst, stride, l, a
+ mova m1, [lq]
+ movu m2, [aq-1]
+ movd m4, [aq+15]
+ DEFINE_ARGS dst, stride, stride9, cnt
+ lea stride9q, [strideq *3]
+ mov cntd, 4
+ lea stride9q, [stride9q*3]
+ PALIGNR m4, m2, 1, m5
+ PALIGNR m3, m2, m1, 15, m5
+ LOWPASS 3, 2, 4, 5
+ pslldq m0, m1, 1
+ PALIGNR m2, m1, 1, m4
+ LOWPASS 0, 1, 2, 4
+
+.loop:
+ mova [dstq+strideq*0 ], m3
+ movhps [dstq+strideq*8+0], m0
+ movq [dstq+strideq*8+8], m3
+ PALIGNR m3, m0, 15, m1
+ pslldq m0, 1
+ mova [dstq+strideq*1 ], m3
+ movhps [dstq+stride9q +0], m0
+ movq [dstq+stride9q +8], m3
+ PALIGNR m3, m0, 15, m1
+ pslldq m0, 1
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_dr_32x32, 4, 4, 8, dst, stride, l, a
+ mova m1, [lq]
+ mova m2, [lq+16]
+ movu m3, [aq-1]
+ movu m4, [aq+15]
+ movd m5, [aq+31]
+ DEFINE_ARGS dst, stride, stride8, cnt
+ lea stride8q, [strideq*8]
+ PALIGNR m5, m4, 1, m7
+ PALIGNR m6, m4, m3, 15, m7
+ LOWPASS 5, 4, 6, 7
+ PALIGNR m4, m3, 1, m7
+ PALIGNR m6, m3, m2, 15, m7
+ LOWPASS 4, 3, 6, 7
+ PALIGNR m3, m2, 1, m7
+ PALIGNR m6, m2, m1, 15, m7
+ LOWPASS 3, 2, 6, 7
+ PALIGNR m2, m1, 1, m6
+ pslldq m0, m1, 1
+ LOWPASS 2, 1, 0, 6
+ mov cntd, 16
+
+ ; out=m2/m3/m4/m5
+.loop:
+ mova [dstq+stride8q*0+ 0], m4
+ mova [dstq+stride8q*0+16], m5
+ mova [dstq+stride8q*2+ 0], m3
+ mova [dstq+stride8q*2+16], m4
+ PALIGNR m5, m4, 15, m6
+ PALIGNR m4, m3, 15, m6
+ PALIGNR m3, m2, 15, m6
+ pslldq m2, 1
+ add dstq, strideq
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+DR_XMM_FUNCS
+INIT_XMM ssse3
+DR_XMM_FUNCS
+INIT_XMM avx
+DR_XMM_FUNCS
+
+; vl
+
+INIT_MMX mmxext
+cglobal vp9_ipred_vl_4x4, 4, 4, 0, dst, stride, l, a
+ movq m0, [aq]
+ psrlq m1, m0, 8
+ psrlq m2, m1, 8
+ LOWPASS 2, 1, 0, 3
+ pavgb m1, m0
+ movd [dstq+strideq*0], m1
+ movd [dstq+strideq*1], m2
+ lea dstq, [dstq+strideq*2]
+ psrlq m1, 8
+ psrlq m2, 8
+ movd [dstq+strideq*0], m1
+ movd [dstq+strideq*1], m2
+ RET
+
+%macro VL_XMM_FUNCS 0
+cglobal vp9_ipred_vl_8x8, 4, 4, 4, dst, stride, l, a
+ movq m0, [aq]
+%if cpuflag(ssse3)
+ pshufb m0, [pb_0to6_9x7]
+%else
+ punpcklbw m1, m0, m0
+ punpckhwd m1, m1
+ shufps m0, m1, q3310
+%endif
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ psrldq m1, m0, 1
+ psrldq m2, m0, 2
+ LOWPASS 2, 1, 0, 3
+ pavgb m1, m0
+
+ movq [dstq+strideq*0], m1
+ movq [dstq+strideq*1], m2
+ psrldq m1, 1
+ psrldq m2, 1
+ movq [dstq+strideq*2], m1
+ movq [dstq+stride3q ], m2
+ lea dstq, [dstq+strideq*4]
+ psrldq m1, 1
+ psrldq m2, 1
+ movq [dstq+strideq*0], m1
+ movq [dstq+strideq*1], m2
+ psrldq m1, 1
+ psrldq m2, 1
+ movq [dstq+strideq*2], m1
+ movq [dstq+stride3q ], m2
+ RET
+
+cglobal vp9_ipred_vl_16x16, 4, 4, 5, dst, stride, l, a
+ mova m0, [aq]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+%if cpuflag(ssse3)
+ mova m4, [pb_1toE_2xF]
+ pshufb m1, m0, m4
+ pshufb m2, m1, m4
+%else
+ pand m4, m0, [pb_15x0_1xm1] ; _______________F
+ psrldq m1, m0, 1 ; 123456789ABCDEF_
+ por m1, m4 ; 123456789ABCDEFF
+ psrldq m2, m1, 1 ; 23456789ABCDEFF_
+ por m2, m4 ; 23456789ABCDEFFF
+%endif
+ LOWPASS 2, 1, 0, 3
+ pavgb m1, m0
+ mov cntd, 4
+.loop:
+ mova [dstq+strideq*0], m1
+ mova [dstq+strideq*1], m2
+%if cpuflag(ssse3)
+ pshufb m1, m4
+ pshufb m2, m4
+%else
+ psrldq m1, 1
+ psrldq m2, 1
+ por m1, m4
+ por m2, m4
+%endif
+ mova [dstq+strideq*2], m1
+ mova [dstq+stride3q ], m2
+%if cpuflag(ssse3)
+ pshufb m1, m4
+ pshufb m2, m4
+%else
+ psrldq m1, 1
+ psrldq m2, 1
+ por m1, m4
+ por m2, m4
+%endif
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_vl_32x32, 4, 4, 7, dst, stride, l, a
+ mova m0, [aq]
+ mova m5, [aq+16]
+ DEFINE_ARGS dst, stride, dst16, cnt
+ PALIGNR m2, m5, m0, 1, m4
+ PALIGNR m3, m5, m0, 2, m4
+ lea dst16q, [dstq +strideq*8]
+ LOWPASS 3, 2, 0, 6
+ pavgb m2, m0
+%if cpuflag(ssse3)
+ mova m4, [pb_1toE_2xF]
+ pshufb m0, m5, m4
+ pshufb m1, m0, m4
+%else
+ pand m4, m5, [pb_15x0_1xm1] ; _______________F
+ psrldq m0, m5, 1 ; 123456789ABCDEF_
+ por m0, m4 ; 123456789ABCDEFF
+ psrldq m1, m0, 1 ; 23456789ABCDEFF_
+ por m1, m4 ; 23456789ABCDEFFF
+%endif
+ lea dst16q, [dst16q+strideq*8]
+ LOWPASS 1, 0, 5, 6
+ pavgb m0, m5
+%if cpuflag(ssse3)
+ pshufb m5, [pb_15]
+%else
+ punpckhbw m5, m4, m4
+ pshufhw m5, m5, q3333
+ punpckhqdq m5, m5
+%endif
+ mov cntd, 8
+
+.loop:
+%macro %%write 3
+ mova [dstq+stride%1+ 0], %2
+ mova [dstq+stride%1+16], %3
+ movhps [dst16q+stride%1 ], %2
+ movu [dst16q+stride%1+ 8], %3
+ movq [dst16q+stride%1+24], m5
+%if cpuflag(avx)
+ palignr %2, %3, %2, 1
+ pshufb %3, m4
+%elif cpuflag(ssse3)
+ palignr m6, %3, %2, 1
+ pshufb %3, m4
+ mova %2, m6
+%else
+ pslldq m6, %3, 15
+ psrldq %3, 1
+ psrldq %2, 1
+ por %3, m4
+ por %2, m6
+%endif
+%endmacro
+
+ %%write q*0, m2, m0
+ %%write q*1, m3, m1
+ lea dstq, [dstq +strideq*2]
+ lea dst16q, [dst16q+strideq*2]
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+VL_XMM_FUNCS
+INIT_XMM ssse3
+VL_XMM_FUNCS
+INIT_XMM avx
+VL_XMM_FUNCS
+
+; vr
+
+%macro VR_MMX_FUNCS 0
+cglobal vp9_ipred_vr_4x4, 4, 4, 0, dst, stride, l, a
+ movq m1, [aq-1]
+ punpckldq m2, [lq]
+ movd m0, [aq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pavgb m0, m1
+ PALIGNR m1, m2, 5, m3
+ psrlq m2, m1, 8
+ psllq m3, m1, 8
+ LOWPASS 2, 1, 3, 4
+
+ ; ABCD <- for the following predictor:
+ ; EFGH
+ ; IABC | m0 contains ABCDxxxx
+ ; JEFG | m2 contains xJIEFGHx
+
+%if cpuflag(ssse3)
+ punpckldq m0, m2
+ pshufb m2, [pb_13456_3xm1]
+ movd [dstq+strideq*0], m0
+ pshufb m0, [pb_6012_4xm1]
+ movd [dstq+stride3q ], m2
+ psrlq m2, 8
+ movd [dstq+strideq*2], m0
+ movd [dstq+strideq*1], m2
+%else
+ psllq m1, m2, 40
+ psrlq m2, 24
+ movd [dstq+strideq*0], m0
+ movd [dstq+strideq*1], m2
+ PALIGNR m0, m1, 7, m3
+ psllq m1, 8
+ PALIGNR m2, m1, 7, m3
+ movd [dstq+strideq*2], m0
+ movd [dstq+stride3q ], m2
+%endif
+ RET
+%endmacro
+
+INIT_MMX mmxext
+VR_MMX_FUNCS
+INIT_MMX ssse3
+VR_MMX_FUNCS
+
+%macro VR_XMM_FUNCS 1 ; n_xmm_regs for 16x16
+cglobal vp9_ipred_vr_8x8, 4, 4, 5, dst, stride, l, a
+ movu m1, [aq-1]
+ movhps m2, [lq]
+ movq m0, [aq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pavgb m0, m1
+ PALIGNR m1, m2, 9, m3
+ pslldq m2, m1, 1
+ pslldq m3, m1, 2
+ LOWPASS 1, 2, 3, 4
+
+ ; ABCDEFGH <- for the following predictor:
+ ; IJKLMNOP
+ ; QABCDEFG | m0 contains ABCDEFGHxxxxxxxx
+ ; RIJKLMNO | m1 contains xxVUTSRQIJKLMNOP
+ ; SQABCDEF
+ ; TRIJKLMN
+ ; USQABCDE
+ ; VTRIJKLM
+
+%if cpuflag(ssse3)
+ punpcklqdq m0, m1 ; ABCDEFGHxxVUTSRQ
+%endif
+ movq [dstq+strideq*0], m0
+ movhps [dstq+strideq*1], m1
+%if cpuflag(ssse3)
+ pshufb m0, [pb_6xm1_BDF_0to6] ; xxxxxxUSQABCDEFG
+ pshufb m1, [pb_6xm1_246_8toE] ; xxxxxxVTRIJKLMNO
+%else
+ psrlw m2, m1, 8 ; x_U_S_Q_xxxxxxxx
+ pand m3, m1, [pw_255] ; x_V_T_R_xxxxxxxx
+ packuswb m3, m2 ; xVTRxxxxxUSQxxxx
+ pslldq m3, 4 ; xxxxxVTRxxxxxUSQ
+ PALIGNR m0, m3, 7, m4 ; xxxxxxUSQABCDEFG
+ psrldq m1, 8
+ pslldq m3, 8
+ PALIGNR m1, m3, 7, m4 ; xxxxxxVTRIJKLMNO
+%endif
+ movhps [dstq+strideq*2], m0
+ movhps [dstq+stride3q ], m1
+ lea dstq, [dstq+strideq*4]
+ pslldq m0, 1
+ pslldq m1, 1
+ movhps [dstq+strideq*0], m0
+ movhps [dstq+strideq*1], m1
+ pslldq m0, 1
+ pslldq m1, 1
+ movhps [dstq+strideq*2], m0
+ movhps [dstq+stride3q ], m1
+ RET
+
+cglobal vp9_ipred_vr_16x16, 4, 4, %1, dst, stride, l, a
+ mova m0, [aq]
+ movu m1, [aq-1]
+ mova m2, [lq]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ PALIGNR m3, m1, m2, 15, m6
+ LOWPASS 3, 1, 0, 4
+ pavgb m0, m1
+ PALIGNR m1, m2, 1, m6
+ pslldq m4, m2, 1
+ LOWPASS 1, 2, 4, 5
+%if cpuflag(ssse3)
+ pshufb m1, [pb_02468ACE_13579BDF]
+%else
+ psrlw m5, m1, 8
+ pand m1, [pw_255]
+ packuswb m1, m5
+%endif
+ mov cntd, 4
+
+.loop:
+ movlhps m2, m1
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m3
+ PALIGNR m4, m0, m1, 15, m6
+ PALIGNR m5, m3, m2, 15, m6
+ mova [dstq+strideq*2], m4
+ mova [dstq+stride3q ], m5
+ lea dstq, [dstq+strideq*4]
+ PALIGNR m0, m1, 14, m6
+ PALIGNR m3, m2, 14, m6
+ pslldq m1, 2
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_vr_32x32, 4, 4, 9, dst, stride, l, a
+ mova m0, [aq]
+ mova m2, [aq+16]
+ movu m1, [aq-1]
+ PALIGNR m3, m2, m0, 15, m6
+ PALIGNR m4, m2, m0, 14, m6
+ LOWPASS 4, 3, 2, 5
+ pavgb m3, m2
+ mova m2, [lq+16]
+ PALIGNR m5, m1, m2, 15, m6
+ LOWPASS 5, 1, 0, 6
+ pavgb m0, m1
+ mova m6, [lq]
+%if ARCH_X86_64
+ SWAP 0, 8
+%else
+ mova [dstq], m0
+%endif
+ PALIGNR m1, m2, 1, m0
+ PALIGNR m7, m2, m6, 15, m0
+ LOWPASS 1, 2, 7, 0
+ PALIGNR m2, m6, 1, m0
+ pslldq m7, m6, 1
+ LOWPASS 2, 6, 7, 0
+%if cpuflag(ssse3)
+ pshufb m1, [pb_02468ACE_13579BDF]
+ pshufb m2, [pb_02468ACE_13579BDF]
+%else
+ psrlw m0, m1, 8
+ psrlw m6, m2, 8
+ pand m1, [pw_255]
+ pand m2, [pw_255]
+ packuswb m1, m0
+ packuswb m2, m6
+%endif
+ DEFINE_ARGS dst, stride, dst16, cnt
+ lea dst16q, [dstq +strideq*8]
+ lea dst16q, [dst16q+strideq*8]
+ SBUTTERFLY qdq, 2, 1, 6
+%if ARCH_X86_64
+ SWAP 0, 8
+%else
+ mova m0, [dstq]
+%endif
+ mov cntd, 8
+
+.loop:
+ ; even lines (0, 2, 4, ...): m1 | m0, m3
+ ; odd lines (1, 3, 5, ...): m2 | m5, m4
+%macro %%write 4
+ mova [dstq+stride%1+ 0], %3
+ mova [dstq+stride%1+16], %4
+ movhps [dst16q+stride%1 ], %2
+ movu [dst16q+stride%1+ 8], %3
+ movq [dst16q+stride%1+24], %4
+ PALIGNR %4, %3, 15, m6
+ PALIGNR %3, %2, 15, m6
+ pslldq %2, 1
+%endmacro
+
+ %%write q*0, m1, m0, m3
+ %%write q*1, m2, m5, m4
+ lea dstq, [dstq +strideq*2]
+ lea dst16q, [dst16q+strideq*2]
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+VR_XMM_FUNCS 7
+INIT_XMM ssse3
+VR_XMM_FUNCS 6
+INIT_XMM avx
+VR_XMM_FUNCS 6
+
+; hd
+
+INIT_MMX mmxext
+cglobal vp9_ipred_hd_4x4, 4, 4, 0, dst, stride, l, a
+ movd m0, [lq]
+ punpckldq m0, [aq-1]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ psrlq m1, m0, 8
+ psrlq m2, m1, 8
+ LOWPASS 2, 1, 0, 3
+ pavgb m1, m0
+
+ ; DHIJ <- for the following predictor:
+ ; CGDH
+ ; BFCG | m1 contains ABCDxxxx
+ ; AEBF | m2 contains EFGHIJxx
+
+ punpcklbw m1, m2
+ punpckhdq m0, m1, m2
+
+ ; m1 contains AEBFCGDH
+ ; m0 contains CGDHIJxx
+
+ movd [dstq+stride3q ], m1
+ movd [dstq+strideq*1], m0
+ psrlq m1, 16
+ psrlq m0, 16
+ movd [dstq+strideq*2], m1
+ movd [dstq+strideq*0], m0
+ RET
+
+%macro HD_XMM_FUNCS 0
+cglobal vp9_ipred_hd_8x8, 4, 4, 5, dst, stride, l, a
+ movq m0, [lq]
+ movhps m0, [aq-1]
+ DEFINE_ARGS dst, stride, stride3, dst4
+ lea stride3q, [strideq*3]
+ lea dst4q, [dstq+strideq*4]
+ psrldq m1, m0, 1
+ psrldq m2, m1, 1
+ LOWPASS 2, 1, 0, 3
+ pavgb m1, m0
+
+ ; HPQRSTUV <- for the following predictor
+ ; GOHPQRST
+ ; FNGOHPQR | m1 contains ABCDEFGHxxxxxxxx
+ ; EMFNGOHP | m2 contains IJKLMNOPQRSTUVxx
+ ; DLEMFNGO
+ ; CKDLEMFN
+ ; BJCKDLEM
+ ; AIBJCKDL
+
+ punpcklbw m1, m2
+ movhlps m2, m2
+
+ ; m1 contains AIBJCKDLEMFNGOHP
+ ; m2 contains QRSTUVxxxxxxxxxx
+
+ movhps [dstq +stride3q ], m1
+ movq [dst4q+stride3q ], m1
+ PALIGNR m3, m2, m1, 2, m4
+ movhps [dstq +strideq*2], m3
+ movq [dst4q+strideq*2], m3
+ PALIGNR m3, m2, m1, 4, m4
+ movhps [dstq +strideq*1], m3
+ movq [dst4q+strideq*1], m3
+ PALIGNR m2, m1, 6, m4
+ movhps [dstq +strideq*0], m2
+ movq [dst4q+strideq*0], m2
+ RET
+
+cglobal vp9_ipred_hd_16x16, 4, 6, 7, dst, stride, l, a
+ mova m0, [lq]
+ movu m3, [aq-1]
+ DEFINE_ARGS dst, stride, stride4, dst4, dst8, dst12
+ lea stride4q, [strideq*4]
+ lea dst4q, [dstq +stride4q]
+ lea dst8q, [dst4q+stride4q]
+ lea dst12q, [dst8q+stride4q]
+ psrldq m4, m3, 1
+ psrldq m5, m3, 2
+ LOWPASS 5, 4, 3, 6
+ PALIGNR m1, m3, m0, 1, m6
+ PALIGNR m2, m3, m0, 2, m6
+ LOWPASS 2, 1, 0, 6
+ pavgb m1, m0
+ SBUTTERFLY bw, 1, 2, 6
+
+ ; I PROBABLY INVERTED L0 ad L16 here
+ ; m1, m2, m5
+.loop:
+ sub stride4q, strideq
+ movhps [dstq +stride4q +0], m2
+ movq [dstq +stride4q +8], m5
+ mova [dst4q+stride4q ], m2
+ movhps [dst8q+stride4q +0], m1
+ movq [dst8q+stride4q +8], m2
+ mova [dst12q+stride4q ], m1
+%if cpuflag(avx)
+ palignr m1, m2, m1, 2
+ palignr m2, m5, m2, 2
+%elif cpuflag(ssse3)
+ palignr m3, m2, m1, 2
+ palignr m0, m5, m2, 2
+ mova m1, m3
+ mova m2, m0
+%else
+ ; slightly modified version of PALIGNR
+ mova m6, m2
+ mova m4, m5
+ pslldq m6, 14
+ pslldq m4, 14
+ psrldq m1, 2
+ psrldq m2, 2
+ por m1, m6
+ por m2, m4
+%endif
+ psrldq m5, 2
+ jg .loop
+ RET
+
+cglobal vp9_ipred_hd_32x32, 4, 6, 8, dst, stride, l, a
+ mova m0, [lq]
+ mova m1, [lq+16]
+ movu m2, [aq-1]
+ movu m3, [aq+15]
+ DEFINE_ARGS dst, stride, stride8, dst8, dst16, dst24
+ lea stride8q, [strideq*8]
+ lea dst8q, [dstq +stride8q]
+ lea dst16q, [dst8q +stride8q]
+ lea dst24q, [dst16q+stride8q]
+ psrldq m4, m3, 1
+ psrldq m5, m3, 2
+ LOWPASS 5, 4, 3, 6
+ PALIGNR m4, m3, m2, 2, m6
+ PALIGNR m3, m2, 1, m6
+ LOWPASS 4, 3, 2, 6
+ PALIGNR m3, m2, m1, 2, m6
+ PALIGNR m2, m1, 1, m6
+ LOWPASS 3, 2, 1, 6
+ pavgb m2, m1
+ PALIGNR m6, m1, m0, 1, m7
+ PALIGNR m1, m0, 2, m7
+ LOWPASS 1, 6, 0, 7
+ pavgb m0, m6
+ SBUTTERFLY bw, 2, 3, 6
+ SBUTTERFLY bw, 0, 1, 6
+
+ ; m0, m1, m2, m3, m4, m5
+.loop:
+ sub stride8q, strideq
+ mova [dstq +stride8q+ 0], m3
+ mova [dstq +stride8q+16], m4
+ mova [dst8q +stride8q+ 0], m2
+ mova [dst8q +stride8q+16], m3
+ mova [dst16q+stride8q+ 0], m1
+ mova [dst16q+stride8q+16], m2
+ mova [dst24q+stride8q+ 0], m0
+ mova [dst24q+stride8q+16], m1
+%if cpuflag(avx)
+ palignr m0, m1, m0, 2
+ palignr m1, m2, m1, 2
+ palignr m2, m3, m2, 2
+ palignr m3, m4, m3, 2
+ palignr m4, m5, m4, 2
+ psrldq m5, 2
+%elif cpuflag(ssse3)
+ psrldq m6, m5, 2
+ palignr m5, m4, 2
+ palignr m4, m3, 2
+ palignr m3, m2, 2
+ palignr m2, m1, 2
+ palignr m1, m0, 2
+ mova m0, m1
+ mova m1, m2
+ mova m2, m3
+ mova m3, m4
+ mova m4, m5
+ mova m5, m6
+%else
+ ; sort of a half-integrated version of PALIGNR
+ pslldq m7, m4, 14
+ pslldq m6, m5, 14
+ psrldq m4, 2
+ psrldq m5, 2
+ por m4, m6
+ pslldq m6, m3, 14
+ psrldq m3, 2
+ por m3, m7
+ pslldq m7, m2, 14
+ psrldq m2, 2
+ por m2, m6
+ pslldq m6, m1, 14
+ psrldq m1, 2
+ por m1, m7
+ psrldq m0, 2
+ por m0, m6
+%endif
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+HD_XMM_FUNCS
+INIT_XMM ssse3
+HD_XMM_FUNCS
+INIT_XMM avx
+HD_XMM_FUNCS
+
+%macro HU_MMX_FUNCS 0
+cglobal vp9_ipred_hu_4x4, 3, 3, 0, dst, stride, l
+ movd m0, [lq]
+%if cpuflag(ssse3)
+ pshufb m0, [pb_0to2_5x3]
+%else
+ punpcklbw m1, m0, m0 ; 00112233
+ pshufw m1, m1, q3333 ; 33333333
+ punpckldq m0, m1 ; 01233333
+%endif
+ psrlq m1, m0, 8
+ psrlq m2, m1, 8
+ LOWPASS 2, 1, 0, 3
+ pavgb m1, m0
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ SBUTTERFLY bw, 1, 2, 0
+ PALIGNR m2, m1, 2, m0
+ movd [dstq+strideq*0], m1
+ movd [dstq+strideq*1], m2
+ punpckhdq m1, m1
+ punpckhdq m2, m2
+ movd [dstq+strideq*2], m1
+ movd [dstq+stride3q ], m2
+ RET
+%endmacro
+
+INIT_MMX mmxext
+HU_MMX_FUNCS
+INIT_MMX ssse3
+HU_MMX_FUNCS
+
+%macro HU_XMM_FUNCS 1 ; n_xmm_regs in hu_32x32
+cglobal vp9_ipred_hu_8x8, 3, 4, 4, dst, stride, l
+ movq m0, [lq]
+%if cpuflag(ssse3)
+ pshufb m0, [pb_0to6_9x7]
+%else
+ punpcklbw m1, m0, m0 ; 0011223344556677
+ punpckhwd m1, m1 ; 4444555566667777
+ shufps m0, m1, q3310 ; 0123456777777777
+%endif
+ psrldq m1, m0, 1
+ psrldq m2, m1, 1
+ LOWPASS 2, 1, 0, 3
+ pavgb m1, m0
+ DEFINE_ARGS dst, stride, stride3, dst4
+ lea stride3q, [strideq*3]
+ lea dst4q, [dstq+strideq*4]
+ SBUTTERFLY bw, 1, 2, 0
+ movq [dstq +strideq*0], m1
+ movhps [dst4q+strideq*0], m1
+ PALIGNR m0, m2, m1, 2, m3
+ movq [dstq +strideq*1], m0
+ movhps [dst4q+strideq*1], m0
+ PALIGNR m0, m2, m1, 4, m3
+ movq [dstq +strideq*2], m0
+ movhps [dst4q+strideq*2], m0
+ PALIGNR m2, m1, 6, m3
+ movq [dstq +stride3q ], m2
+ movhps [dst4q+stride3q ], m2
+ RET
+
+cglobal vp9_ipred_hu_16x16, 3, 4, 5, dst, stride, l
+ mova m0, [lq]
+%if cpuflag(ssse3)
+ mova m3, [pb_2toE_3xF]
+ pshufb m1, m0, [pb_1toE_2xF]
+ pshufb m2, m0, m3
+%else
+ pand m3, m0, [pb_15x0_1xm1]
+ psrldq m1, m0, 1
+ por m1, m3
+ punpckhbw m3, m3
+ psrldq m2, m0, 2
+ por m2, m3
+%endif
+ LOWPASS 2, 1, 0, 4
+ pavgb m1, m0
+ DEFINE_ARGS dst, stride, stride9, cnt
+ lea stride9q, [strideq*8+strideq]
+ mov cntd, 4
+ SBUTTERFLY bw, 1, 2, 0
+
+.loop:
+ mova [dstq+strideq*0], m1
+ mova [dstq+strideq*8], m2
+ PALIGNR m0, m2, m1, 2, m4
+%if cpuflag(ssse3)
+ pshufb m2, m3
+%else
+ psrldq m2, 2
+ por m2, m3
+%endif
+ mova [dstq+strideq*1], m0
+ mova [dstq+stride9q ], m2
+ PALIGNR m1, m2, m0, 2, m4
+%if cpuflag(ssse3)
+ pshufb m2, m3
+%else
+ psrldq m2, 2
+ por m2, m3
+%endif
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_hu_32x32, 3, 7, %1, dst, stride, l
+ mova m1, [lq]
+ mova m0, [lq+16]
+ PALIGNR m2, m0, m1, 1, m5
+ PALIGNR m3, m0, m1, 2, m5
+ LOWPASS 3, 2, 1, 5
+ pavgb m2, m1
+%if cpuflag(ssse3)
+ mova m4, [pb_2toE_3xF]
+ pshufb m5, m0, [pb_1toE_2xF]
+ pshufb m1, m0, m4
+%else
+ pand m4, m0, [pb_15x0_1xm1]
+ psrldq m5, m0, 1
+ por m5, m4
+ punpckhbw m4, m4
+ psrldq m1, m0, 2
+ por m1, m4
+%endif
+ LOWPASS 1, 5, 0, 6
+ pavgb m0, m5
+ DEFINE_ARGS dst, stride, cnt, stride0, dst8, dst16, dst24
+ mov cntd, 8
+ xor stride0q, stride0q
+ lea dst8q, [dstq +strideq*8]
+ lea dst16q, [dst8q +strideq*8]
+ lea dst24q, [dst16q+strideq*8]
+ SBUTTERFLY bw, 0, 1, 5
+ SBUTTERFLY bw, 2, 3, 5
+%if cpuflag(ssse3)
+ pshufb m6, m1, [pb_15]
+%else
+ pshufhw m6, m4, q3333
+ punpckhqdq m6, m6
+%endif
+
+.loop:
+ mova [dstq +stride0q+ 0], m2
+ mova [dstq +stride0q+16], m3
+ mova [dst8q +stride0q+ 0], m3
+ mova [dst8q +stride0q+16], m0
+ mova [dst16q+stride0q+ 0], m0
+ mova [dst16q+stride0q+16], m1
+ mova [dst24q+stride0q+ 0], m1
+ mova [dst24q+stride0q+16], m6
+%if cpuflag(avx)
+ palignr m2, m3, m2, 2
+ palignr m3, m0, m3, 2
+ palignr m0, m1, m0, 2
+ pshufb m1, m4
+%elif cpuflag(ssse3)
+ pshufb m5, m1, m4
+ palignr m1, m0, 2
+ palignr m0, m3, 2
+ palignr m3, m2, 2
+ mova m2, m3
+ mova m3, m0
+ mova m0, m1
+ mova m1, m5
+%else
+ ; half-integrated version of PALIGNR
+ pslldq m5, m1, 14
+ pslldq m7, m0, 14
+ psrldq m1, 2
+ psrldq m0, 2
+ por m1, m4
+ por m0, m5
+ pslldq m5, m3, 14
+ psrldq m3, 2
+ por m3, m7
+ psrldq m2, 2
+ por m2, m5
+%endif
+ add stride0q, strideq
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+HU_XMM_FUNCS 8
+INIT_XMM ssse3
+HU_XMM_FUNCS 7
+INIT_XMM avx
+HU_XMM_FUNCS 7
+
+; FIXME 127, 128, 129 ?
diff --git a/media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm b/media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm
new file mode 100644
index 0000000000..808056a809
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm
@@ -0,0 +1,2497 @@
+;******************************************************************************
+;* VP9 Intra prediction SIMD optimizations
+;*
+;* Copyright (c) 2015 Ronald S. Bultje <rsbultje gmail com>
+;* Copyright (c) 2015 Henrik Gramner <henrik gramner com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA 32
+
+pd_2: times 8 dd 2
+pd_4: times 8 dd 4
+pd_8: times 8 dd 8
+
+pb_2to15_14_15: db 2,3,4,5,6,7,8,9,10,11,12,13,14,15,14,15
+pb_4_5_8to13_8x0: db 4,5,8,9,10,11,12,13,0,0,0,0,0,0,0,0
+pb_0to7_67x4: db 0,1,2,3,4,5,6,7,6,7,6,7,6,7,6,7
+
+cextern pw_1
+cextern pw_1023
+cextern pw_4095
+cextern pd_16
+cextern pd_32
+cextern pd_65535;
+
+; FIXME most top-only functions (ddl, vl, v, dc_top) can be modified to take
+; only 3 registers on x86-32, which would make it one cycle faster, but that
+; would make the code quite a bit uglier...
+
+SECTION .text
+
+%macro SCRATCH 3-4
+%if ARCH_X86_64
+ SWAP %1, %2
+%if %0 == 4
+%define reg_%4 m%2
+%endif
+%else
+ mova [%3], m%1
+%if %0 == 4
+%define reg_%4 [%3]
+%endif
+%endif
+%endmacro
+
+%macro UNSCRATCH 3-4
+%if ARCH_X86_64
+ SWAP %1, %2
+%else
+ mova m%1, [%3]
+%endif
+%if %0 == 4
+%undef reg_%4
+%endif
+%endmacro
+
+%macro PRELOAD 2-3
+%if ARCH_X86_64
+ mova m%1, [%2]
+%if %0 == 3
+%define reg_%3 m%1
+%endif
+%elif %0 == 3
+%define reg_%3 [%2]
+%endif
+%endmacro
+
+INIT_MMX mmx
+cglobal vp9_ipred_v_4x4_16, 2, 4, 1, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse
+cglobal vp9_ipred_v_8x8_16, 2, 4, 1, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse
+cglobal vp9_ipred_v_16x16_16, 2, 4, 2, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq]
+ mova m1, [aq+mmsize]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m1
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m1
+ mova [dstq+strideq*2+ 0], m0
+ mova [dstq+strideq*2+16], m1
+ mova [dstq+stride3q + 0], m0
+ mova [dstq+stride3q +16], m1
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+INIT_XMM sse
+cglobal vp9_ipred_v_32x32_16, 2, 4, 4, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq+mmsize*0]
+ mova m1, [aq+mmsize*1]
+ mova m2, [aq+mmsize*2]
+ mova m3, [aq+mmsize*3]
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 16
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m1
+ mova [dstq+strideq*0+32], m2
+ mova [dstq+strideq*0+48], m3
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m1
+ mova [dstq+strideq*1+32], m2
+ mova [dstq+strideq*1+48], m3
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop
+ RET
+
+INIT_MMX mmxext
+cglobal vp9_ipred_h_4x4_16, 3, 3, 4, dst, stride, l, a
+ mova m3, [lq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pshufw m0, m3, q3333
+ pshufw m1, m3, q2222
+ pshufw m2, m3, q1111
+ pshufw m3, m3, q0000
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m1
+ mova [dstq+strideq*2], m2
+ mova [dstq+stride3q ], m3
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_h_8x8_16, 3, 3, 4, dst, stride, l, a
+ mova m2, [lq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ punpckhwd m3, m2, m2
+ pshufd m0, m3, q3333
+ pshufd m1, m3, q2222
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m1
+ pshufd m0, m3, q1111
+ pshufd m1, m3, q0000
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m1
+ lea dstq, [dstq+strideq*4]
+ punpcklwd m2, m2
+ pshufd m0, m2, q3333
+ pshufd m1, m2, q2222
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m1
+ pshufd m0, m2, q1111
+ pshufd m1, m2, q0000
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m1
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_h_16x16_16, 3, 5, 4, dst, stride, l, stride3, cnt
+ mov cntd, 3
+ lea stride3q, [strideq*3]
+.loop:
+ movh m3, [lq+cntq*8]
+ punpcklwd m3, m3
+ pshufd m0, m3, q3333
+ pshufd m1, m3, q2222
+ pshufd m2, m3, q1111
+ pshufd m3, m3, q0000
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*1+ 0], m1
+ mova [dstq+strideq*1+16], m1
+ mova [dstq+strideq*2+ 0], m2
+ mova [dstq+strideq*2+16], m2
+ mova [dstq+stride3q + 0], m3
+ mova [dstq+stride3q +16], m3
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jge .loop
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_h_32x32_16, 3, 5, 4, dst, stride, l, stride3, cnt
+ mov cntd, 7
+ lea stride3q, [strideq*3]
+.loop:
+ movh m3, [lq+cntq*8]
+ punpcklwd m3, m3
+ pshufd m0, m3, q3333
+ pshufd m1, m3, q2222
+ pshufd m2, m3, q1111
+ pshufd m3, m3, q0000
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*0+32], m0
+ mova [dstq+strideq*0+48], m0
+ mova [dstq+strideq*1+ 0], m1
+ mova [dstq+strideq*1+16], m1
+ mova [dstq+strideq*1+32], m1
+ mova [dstq+strideq*1+48], m1
+ mova [dstq+strideq*2+ 0], m2
+ mova [dstq+strideq*2+16], m2
+ mova [dstq+strideq*2+32], m2
+ mova [dstq+strideq*2+48], m2
+ mova [dstq+stride3q + 0], m3
+ mova [dstq+stride3q +16], m3
+ mova [dstq+stride3q +32], m3
+ mova [dstq+stride3q +48], m3
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jge .loop
+ RET
+
+INIT_MMX mmxext
+cglobal vp9_ipred_dc_4x4_16, 4, 4, 2, dst, stride, l, a
+ mova m0, [lq]
+ paddw m0, [aq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pmaddwd m0, [pw_1]
+ pshufw m1, m0, q3232
+ paddd m0, [pd_4]
+ paddd m0, m1
+ psrad m0, 3
+ pshufw m0, m0, q0000
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_dc_8x8_16, 4, 4, 2, dst, stride, l, a
+ mova m0, [lq]
+ paddw m0, [aq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pmaddwd m0, [pw_1]
+ pshufd m1, m0, q3232
+ paddd m0, m1
+ pshufd m1, m0, q1111
+ paddd m0, [pd_8]
+ paddd m0, m1
+ psrad m0, 4
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_dc_16x16_16, 4, 4, 2, dst, stride, l, a
+ mova m0, [lq]
+ paddw m0, [lq+mmsize]
+ paddw m0, [aq]
+ paddw m0, [aq+mmsize]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+ pmaddwd m0, [pw_1]
+ pshufd m1, m0, q3232
+ paddd m0, m1
+ pshufd m1, m0, q1111
+ paddd m0, [pd_16]
+ paddd m0, m1
+ psrad m0, 5
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m0
+ mova [dstq+strideq*2+ 0], m0
+ mova [dstq+strideq*2+16], m0
+ mova [dstq+stride3q + 0], m0
+ mova [dstq+stride3q +16], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_dc_32x32_16, 4, 4, 2, dst, stride, l, a
+ mova m0, [lq+mmsize*0]
+ paddw m0, [lq+mmsize*1]
+ paddw m0, [lq+mmsize*2]
+ paddw m0, [lq+mmsize*3]
+ paddw m0, [aq+mmsize*0]
+ paddw m0, [aq+mmsize*1]
+ paddw m0, [aq+mmsize*2]
+ paddw m0, [aq+mmsize*3]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 16
+ pmaddwd m0, [pw_1]
+ pshufd m1, m0, q3232
+ paddd m0, m1
+ pshufd m1, m0, q1111
+ paddd m0, [pd_32]
+ paddd m0, m1
+ psrad m0, 6
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*0+32], m0
+ mova [dstq+strideq*0+48], m0
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m0
+ mova [dstq+strideq*1+32], m0
+ mova [dstq+strideq*1+48], m0
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop
+ RET
+
+%macro DC_1D_FNS 2
+INIT_MMX mmxext
+cglobal vp9_ipred_dc_%1_4x4_16, 4, 4, 2, dst, stride, l, a
+ mova m0, [%2]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pmaddwd m0, [pw_1]
+ pshufw m1, m0, q3232
+ paddd m0, [pd_2]
+ paddd m0, m1
+ psrad m0, 2
+ pshufw m0, m0, q0000
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_dc_%1_8x8_16, 4, 4, 2, dst, stride, l, a
+ mova m0, [%2]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pmaddwd m0, [pw_1]
+ pshufd m1, m0, q3232
+ paddd m0, m1
+ pshufd m1, m0, q1111
+ paddd m0, [pd_4]
+ paddd m0, m1
+ psrad m0, 3
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_dc_%1_16x16_16, 4, 4, 2, dst, stride, l, a
+ mova m0, [%2]
+ paddw m0, [%2+mmsize]
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+ pmaddwd m0, [pw_1]
+ pshufd m1, m0, q3232
+ paddd m0, m1
+ pshufd m1, m0, q1111
+ paddd m0, [pd_8]
+ paddd m0, m1
+ psrad m0, 4
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m0
+ mova [dstq+strideq*2+ 0], m0
+ mova [dstq+strideq*2+16], m0
+ mova [dstq+stride3q + 0], m0
+ mova [dstq+stride3q +16], m0
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+
+INIT_XMM sse2
+cglobal vp9_ipred_dc_%1_32x32_16, 4, 4, 2, dst, stride, l, a
+ mova m0, [%2+mmsize*0]
+ paddw m0, [%2+mmsize*1]
+ paddw m0, [%2+mmsize*2]
+ paddw m0, [%2+mmsize*3]
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 16
+ pmaddwd m0, [pw_1]
+ pshufd m1, m0, q3232
+ paddd m0, m1
+ pshufd m1, m0, q1111
+ paddd m0, [pd_16]
+ paddd m0, m1
+ psrad m0, 5
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m0
+ mova [dstq+strideq*0+32], m0
+ mova [dstq+strideq*0+48], m0
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m0
+ mova [dstq+strideq*1+32], m0
+ mova [dstq+strideq*1+48], m0
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+DC_1D_FNS top, aq
+DC_1D_FNS left, lq
+
+INIT_MMX mmxext
+cglobal vp9_ipred_tm_4x4_10, 4, 4, 6, dst, stride, l, a
+ mova m5, [pw_1023]
+.body:
+ mova m4, [aq]
+ mova m3, [lq]
+ movd m0, [aq-4]
+ pshufw m0, m0, q1111
+ psubw m4, m0
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ pshufw m0, m3, q3333
+ pshufw m1, m3, q2222
+ pshufw m2, m3, q1111
+ pshufw m3, m3, q0000
+ paddw m0, m4
+ paddw m1, m4
+ paddw m2, m4
+ paddw m3, m4
+ pxor m4, m4
+ pmaxsw m0, m4
+ pmaxsw m1, m4
+ pmaxsw m2, m4
+ pmaxsw m3, m4
+ pminsw m0, m5
+ pminsw m1, m5
+ pminsw m2, m5
+ pminsw m3, m5
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m1
+ mova [dstq+strideq*2], m2
+ mova [dstq+stride3q ], m3
+ RET
+
+cglobal vp9_ipred_tm_4x4_12, 4, 4, 6, dst, stride, l, a
+ mova m5, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_ipred_tm_4x4_10 %+ SUFFIX).body
+
+INIT_XMM sse2
+cglobal vp9_ipred_tm_8x8_10, 4, 5, 7, dst, stride, l, a
+ mova m4, [pw_1023]
+.body:
+ pxor m6, m6
+ mova m5, [aq]
+ movd m0, [aq-4]
+ pshuflw m0, m0, q1111
+ punpcklqdq m0, m0
+ psubw m5, m0
+ DEFINE_ARGS dst, stride, l, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 1
+.loop:
+ movh m3, [lq+cntq*8]
+ punpcklwd m3, m3
+ pshufd m0, m3, q3333
+ pshufd m1, m3, q2222
+ pshufd m2, m3, q1111
+ pshufd m3, m3, q0000
+ paddw m0, m5
+ paddw m1, m5
+ paddw m2, m5
+ paddw m3, m5
+ pmaxsw m0, m6
+ pmaxsw m1, m6
+ pmaxsw m2, m6
+ pmaxsw m3, m6
+ pminsw m0, m4
+ pminsw m1, m4
+ pminsw m2, m4
+ pminsw m3, m4
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m1
+ mova [dstq+strideq*2], m2
+ mova [dstq+stride3q ], m3
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jge .loop
+ RET
+
+cglobal vp9_ipred_tm_8x8_12, 4, 5, 7, dst, stride, l, a
+ mova m4, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_ipred_tm_8x8_10 %+ SUFFIX).body
+
+INIT_XMM sse2
+cglobal vp9_ipred_tm_16x16_10, 4, 4, 8, dst, stride, l, a
+ mova m7, [pw_1023]
+.body:
+ pxor m6, m6
+ mova m4, [aq]
+ mova m5, [aq+mmsize]
+ movd m0, [aq-4]
+ pshuflw m0, m0, q1111
+ punpcklqdq m0, m0
+ psubw m4, m0
+ psubw m5, m0
+ DEFINE_ARGS dst, stride, l, cnt
+ mov cntd, 7
+.loop:
+ movd m3, [lq+cntq*4]
+ punpcklwd m3, m3
+ pshufd m2, m3, q1111
+ pshufd m3, m3, q0000
+ paddw m0, m2, m4
+ paddw m2, m5
+ paddw m1, m3, m4
+ paddw m3, m5
+ pmaxsw m0, m6
+ pmaxsw m2, m6
+ pmaxsw m1, m6
+ pmaxsw m3, m6
+ pminsw m0, m7
+ pminsw m2, m7
+ pminsw m1, m7
+ pminsw m3, m7
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m2
+ mova [dstq+strideq*1+ 0], m1
+ mova [dstq+strideq*1+16], m3
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jge .loop
+ RET
+
+cglobal vp9_ipred_tm_16x16_12, 4, 4, 8, dst, stride, l, a
+ mova m7, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_ipred_tm_16x16_10 %+ SUFFIX).body
+
+INIT_XMM sse2
+cglobal vp9_ipred_tm_32x32_10, 4, 4, 10, 32 * -ARCH_X86_32, dst, stride, l, a
+ mova m0, [pw_1023]
+.body:
+ pxor m1, m1
+%if ARCH_X86_64
+ SWAP 0, 8
+ SWAP 1, 9
+%define reg_min m9
+%define reg_max m8
+%else
+ mova [rsp+ 0], m0
+ mova [rsp+16], m1
+%define reg_min [rsp+16]
+%define reg_max [rsp+ 0]
+%endif
+
+ mova m4, [aq+mmsize*0]
+ mova m5, [aq+mmsize*1]
+ mova m6, [aq+mmsize*2]
+ mova m7, [aq+mmsize*3]
+ movd m0, [aq-4]
+ pshuflw m0, m0, q1111
+ punpcklqdq m0, m0
+ psubw m4, m0
+ psubw m5, m0
+ psubw m6, m0
+ psubw m7, m0
+ DEFINE_ARGS dst, stride, l, cnt
+ mov cntd, 31
+.loop:
+ pinsrw m3, [lq+cntq*2], 0
+ punpcklwd m3, m3
+ pshufd m3, m3, q0000
+ paddw m0, m3, m4
+ paddw m1, m3, m5
+ paddw m2, m3, m6
+ paddw m3, m7
+ pmaxsw m0, reg_min
+ pmaxsw m1, reg_min
+ pmaxsw m2, reg_min
+ pmaxsw m3, reg_min
+ pminsw m0, reg_max
+ pminsw m1, reg_max
+ pminsw m2, reg_max
+ pminsw m3, reg_max
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m1
+ mova [dstq+strideq*0+32], m2
+ mova [dstq+strideq*0+48], m3
+ add dstq, strideq
+ dec cntd
+ jge .loop
+ RET
+
+cglobal vp9_ipred_tm_32x32_12, 4, 4, 10, 32 * -ARCH_X86_32, dst, stride, l, a
+ mova m0, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_ipred_tm_32x32_10 %+ SUFFIX).body
+
+; Directional intra predicion functions
+;
+; in the functions below, 'abcdefgh' refers to above data (sometimes simply
+; abbreviated as a[N-M]). 'stuvwxyz' refers to left data (sometimes simply
+; abbreviated as l[N-M]). * is top-left data. ABCDEFG or A[N-M] is filtered
+; above data, STUVWXYZ or L[N-M] is filtered left data, and # is filtered
+; top-left data.
+
+; left=(left+2*center+right+2)>>2
+%macro LOWPASS 3 ; left [dst], center, right
+ paddw m%1, m%3
+ psraw m%1, 1
+ pavgw m%1, m%2
+%endmacro
+
+; abcdefgh (src) -> bcdefghh (dst)
+; dst/src can be the same register
+%macro SHIFT_RIGHT 2-3 [pb_2to15_14_15] ; dst, src, [ssse3_shift_reg]
+%if cpuflag(ssse3)
+ pshufb %1, %2, %3 ; abcdefgh -> bcdefghh
+%else
+ psrldq %1, %2, 2 ; abcdefgh -> bcdefgh.
+ pshufhw %1, %1, q2210 ; bcdefgh. -> bcdefghh
+%endif
+%endmacro
+
+; abcdefgh (src) -> bcdefghh (dst1) and cdefghhh (dst2)
+%macro SHIFT_RIGHTx2 3-4 [pb_2to15_14_15] ; dst1, dst2, src, [ssse3_shift_reg]
+%if cpuflag(ssse3)
+ pshufb %1, %3, %4 ; abcdefgh -> bcdefghh
+ pshufb %2, %1, %4 ; bcdefghh -> cdefghhh
+%else
+ psrldq %1, %3, 2 ; abcdefgh -> bcdefgh.
+ psrldq %2, %3, 4 ; abcdefgh -> cdefgh..
+ pshufhw %1, %1, q2210 ; bcdefgh. -> bcdefghh
+ pshufhw %2, %2, q1110 ; cdefgh.. -> cdefghhh
+%endif
+%endmacro
+
+%macro DL_FUNCS 0
+cglobal vp9_ipred_dl_4x4_16, 2, 4, 3, dst, stride, l, a
+ movifnidn aq, amp
+ movu m1, [aq] ; abcdefgh
+ pshufhw m0, m1, q3310 ; abcdefhh
+ SHIFT_RIGHT m1, m1 ; bcdefghh
+ psrldq m2, m1, 2 ; cdefghh.
+ LOWPASS 0, 1, 2 ; BCDEFGh.
+ pshufd m1, m0, q3321 ; DEFGh...
+ movh [dstq+strideq*0], m0
+ movh [dstq+strideq*2], m1
+ add dstq, strideq
+ psrldq m0, 2 ; CDEFGh..
+ psrldq m1, 2 ; EFGh....
+ movh [dstq+strideq*0], m0
+ movh [dstq+strideq*2], m1
+ RET
+
+cglobal vp9_ipred_dl_8x8_16, 2, 4, 5, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq] ; abcdefgh
+%if cpuflag(ssse3)
+ mova m4, [pb_2to15_14_15]
+%endif
+ SHIFT_RIGHTx2 m1, m2, m0, m4 ; bcdefghh/cdefghhh
+ LOWPASS 0, 1, 2 ; BCDEFGHh
+ shufps m1, m0, m2, q3332 ; FGHhhhhh
+ shufps m3, m0, m1, q2121 ; DEFGHhhh
+ DEFINE_ARGS dst, stride, stride5
+ lea stride5q, [strideq*5]
+
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*4], m1
+ SHIFT_RIGHT m0, m0, m4 ; CDEFGHhh
+ pshuflw m1, m1, q3321 ; GHhhhhhh
+ pshufd m2, m0, q3321 ; EFGHhhhh
+ mova [dstq+strideq*1], m0
+ mova [dstq+stride5q ], m1
+ lea dstq, [dstq+strideq*2]
+ pshuflw m1, m1, q3321 ; Hhhhhhhh
+ mova [dstq+strideq*0], m3
+ mova [dstq+strideq*4], m1
+ pshuflw m1, m1, q3321 ; hhhhhhhh
+ mova [dstq+strideq*1], m2
+ mova [dstq+stride5q ], m1
+ RET
+
+cglobal vp9_ipred_dl_16x16_16, 2, 4, 5, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq] ; abcdefgh
+ mova m3, [aq+mmsize] ; ijklmnop
+ PALIGNR m1, m3, m0, 2, m4 ; bcdefghi
+ PALIGNR m2, m3, m0, 4, m4 ; cdefghij
+ LOWPASS 0, 1, 2 ; BCDEFGHI
+%if cpuflag(ssse3)
+ mova m4, [pb_2to15_14_15]
+%endif
+ SHIFT_RIGHTx2 m2, m1, m3, m4 ; jklmnopp/klmnoppp
+ LOWPASS 1, 2, 3 ; JKLMNOPp
+ pshufd m2, m2, q3333 ; pppppppp
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 8
+
+.loop:
+ mova [dstq+strideq*0+ 0], m0
+ mova [dstq+strideq*0+16], m1
+ mova [dstq+strideq*8+ 0], m1
+ mova [dstq+strideq*8+16], m2
+ add dstq, strideq
+%if cpuflag(avx)
+ vpalignr m0, m1, m0, 2
+%else
+ PALIGNR m3, m1, m0, 2, m4
+ mova m0, m3
+%endif
+ SHIFT_RIGHT m1, m1, m4
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_dl_32x32_16, 2, 5, 7, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq+mmsize*0] ; abcdefgh
+ mova m1, [aq+mmsize*1] ; ijklmnop
+ mova m2, [aq+mmsize*2] ; qrstuvwx
+ mova m3, [aq+mmsize*3] ; yz012345
+ PALIGNR m4, m1, m0, 2, m6
+ PALIGNR m5, m1, m0, 4, m6
+ LOWPASS 0, 4, 5 ; BCDEFGHI
+ PALIGNR m4, m2, m1, 2, m6
+ PALIGNR m5, m2, m1, 4, m6
+ LOWPASS 1, 4, 5 ; JKLMNOPQ
+ PALIGNR m4, m3, m2, 2, m6
+ PALIGNR m5, m3, m2, 4, m6
+ LOWPASS 2, 4, 5 ; RSTUVWXY
+%if cpuflag(ssse3)
+ mova m6, [pb_2to15_14_15]
+%endif
+ SHIFT_RIGHTx2 m4, m5, m3, m6
+ LOWPASS 3, 4, 5 ; Z0123455
+ pshufd m4, m4, q3333 ; 55555555
+ DEFINE_ARGS dst, stride, stride8, stride24, cnt
+ mov cntd, 8
+ lea stride8q, [strideq*8]
+ lea stride24q, [stride8q*3]
+
+.loop:
+ mova [dstq+stride8q*0+ 0], m0
+ mova [dstq+stride8q*0+16], m1
+ mova [dstq+stride8q*0+32], m2
+ mova [dstq+stride8q*0+48], m3
+ mova [dstq+stride8q*1+ 0], m1
+ mova [dstq+stride8q*1+16], m2
+ mova [dstq+stride8q*1+32], m3
+ mova [dstq+stride8q*1+48], m4
+ mova [dstq+stride8q*2+ 0], m2
+ mova [dstq+stride8q*2+16], m3
+ mova [dstq+stride8q*2+32], m4
+ mova [dstq+stride8q*2+48], m4
+ mova [dstq+stride24q + 0], m3
+ mova [dstq+stride24q +16], m4
+ mova [dstq+stride24q +32], m4
+ mova [dstq+stride24q +48], m4
+ add dstq, strideq
+%if cpuflag(avx)
+ vpalignr m0, m1, m0, 2
+ vpalignr m1, m2, m1, 2
+ vpalignr m2, m3, m2, 2
+%else
+ PALIGNR m5, m1, m0, 2, m6
+ mova m0, m5
+ PALIGNR m5, m2, m1, 2, m6
+ mova m1, m5
+ PALIGNR m5, m3, m2, 2, m6
+ mova m2, m5
+%endif
+ SHIFT_RIGHT m3, m3, m6
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+DL_FUNCS
+INIT_XMM ssse3
+DL_FUNCS
+INIT_XMM avx
+DL_FUNCS
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_ipred_dl_16x16_16, 2, 4, 5, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq] ; abcdefghijklmnop
+ vpbroadcastw xm1, [aq+30] ; pppppppp
+ vperm2i128 m2, m0, m1, q0201 ; ijklmnoppppppppp
+ vpalignr m3, m2, m0, 2 ; bcdefghijklmnopp
+ vpalignr m4, m2, m0, 4 ; cdefghijklmnoppp
+ LOWPASS 0, 3, 4 ; BCDEFGHIJKLMNOPp
+ vperm2i128 m2, m0, m1, q0201 ; JKLMNOPppppppppp
+ DEFINE_ARGS dst, stride, stride3, cnt
+ mov cntd, 2
+ lea stride3q, [strideq*3]
+
+.loop:
+ mova [dstq+strideq*0], m0
+ vpalignr m3, m2, m0, 2
+ vpalignr m4, m2, m0, 4
+ mova [dstq+strideq*1], m3
+ mova [dstq+strideq*2], m4
+ vpalignr m3, m2, m0, 6
+ vpalignr m4, m2, m0, 8
+ mova [dstq+stride3q ], m3
+ lea dstq, [dstq+strideq*4]
+ mova [dstq+strideq*0], m4
+ vpalignr m3, m2, m0, 10
+ vpalignr m4, m2, m0, 12
+ mova [dstq+strideq*1], m3
+ mova [dstq+strideq*2], m4
+ vpalignr m3, m2, m0, 14
+ mova [dstq+stride3q ], m3
+ lea dstq, [dstq+strideq*4]
+ mova m0, m2
+ vperm2i128 m2, m2, m2, q0101 ; pppppppppppppppp
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_dl_32x32_16, 2, 6, 7, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq+mmsize*0+ 0] ; abcdefghijklmnop
+ mova m1, [aq+mmsize*1+ 0] ; qrstuvwxyz012345
+ vpbroadcastw xm4, [aq+mmsize*1+30] ; 55555555
+ vperm2i128 m5, m0, m1, q0201 ; ijklmnopqrstuvwx
+ vpalignr m2, m5, m0, 2 ; bcdefghijklmnopq
+ vpalignr m3, m5, m0, 4 ; cdefghijklmnopqr
+ LOWPASS 0, 2, 3 ; BCDEFGHIJKLMNOPQ
+ vperm2i128 m5, m1, m4, q0201 ; yz01234555555555
+ vpalignr m2, m5, m1, 2 ; rstuvwxyz0123455
+ vpalignr m3, m5, m1, 4 ; stuvwxyz01234555
+ LOWPASS 1, 2, 3 ; RSTUVWXYZ......5
+ vperm2i128 m2, m1, m4, q0201 ; Z......555555555
+ vperm2i128 m5, m0, m1, q0201 ; JKLMNOPQRSTUVWXY
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+
+.loop:
+ mova [dstq+strideq*0 + 0], m0
+ mova [dstq+strideq*0 +32], m1
+ vpalignr m3, m5, m0, 2
+ vpalignr m4, m2, m1, 2
+ mova [dstq+strideq*1 + 0], m3
+ mova [dstq+strideq*1 +32], m4
+ vpalignr m3, m5, m0, 4
+ vpalignr m4, m2, m1, 4
+ mova [dstq+strideq*2 + 0], m3
+ mova [dstq+strideq*2 +32], m4
+ vpalignr m3, m5, m0, 6
+ vpalignr m4, m2, m1, 6
+ mova [dstq+stride3q*1+ 0], m3
+ mova [dstq+stride3q*1+32], m4
+ lea dstq, [dstq+strideq*4]
+ vpalignr m3, m5, m0, 8
+ vpalignr m4, m2, m1, 8
+ mova [dstq+strideq*0 + 0], m3
+ mova [dstq+strideq*0 +32], m4
+ vpalignr m3, m5, m0, 10
+ vpalignr m4, m2, m1, 10
+ mova [dstq+strideq*1 + 0], m3
+ mova [dstq+strideq*1 +32], m4
+ vpalignr m3, m5, m0, 12
+ vpalignr m4, m2, m1, 12
+ mova [dstq+strideq*2+ 0], m3
+ mova [dstq+strideq*2+32], m4
+ vpalignr m3, m5, m0, 14
+ vpalignr m4, m2, m1, 14
+ mova [dstq+stride3q+ 0], m3
+ mova [dstq+stride3q+ 32], m4
+ vpalignr m3, m5, m0, 16
+ vpalignr m4, m2, m1, 16
+ vperm2i128 m5, m3, m4, q0201
+ vperm2i128 m2, m4, m4, q0101
+ mova m0, m3
+ mova m1, m4
+ lea dstq, [dstq+strideq*4]
+ dec cntd
+ jg .loop
+ RET
+%endif
+
+%macro DR_FUNCS 1 ; stack_mem_for_32x32_32bit_function
+cglobal vp9_ipred_dr_4x4_16, 4, 4, 3, dst, stride, l, a
+ movh m0, [lq] ; wxyz....
+ movhps m0, [aq-2] ; wxyz*abc
+ movd m1, [aq+6] ; d.......
+ PALIGNR m1, m0, 2, m2 ; xyz*abcd
+ psrldq m2, m1, 2 ; yz*abcd.
+ LOWPASS 0, 1, 2 ; XYZ#ABC.
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+
+ movh [dstq+stride3q ], m0
+ psrldq m0, 2 ; YZ#ABC..
+ movh [dstq+strideq*2], m0
+ psrldq m0, 2 ; Z#ABC...
+ movh [dstq+strideq*1], m0
+ psrldq m0, 2 ; #ABC....
+ movh [dstq+strideq*0], m0
+ RET
+
+cglobal vp9_ipred_dr_8x8_16, 4, 4, 5, dst, stride, l, a
+ mova m0, [lq] ; stuvwxyz
+ movu m1, [aq-2] ; *abcdefg
+ mova m2, [aq] ; abcdefgh
+ psrldq m3, m2, 2 ; bcdefgh.
+ LOWPASS 3, 2, 1 ; ABCDEFG.
+ PALIGNR m1, m0, 2, m4 ; tuvwxyz*
+ PALIGNR m2, m1, 2, m4 ; uvwxyz*a
+ LOWPASS 2, 1, 0 ; TUVWXYZ#
+ DEFINE_ARGS dst, stride, dst4, stride3
+ lea stride3q, [strideq*3]
+ lea dst4q, [dstq+strideq*4]
+
+ movhps [dstq +stride3q +0], m2
+ movh [dstq+ stride3q +8], m3
+ mova [dst4q+stride3q +0], m2
+ PALIGNR m1, m3, m2, 2, m0
+ psrldq m3, 2
+ movhps [dstq +strideq*2+0], m1
+ movh [dstq+ strideq*2+8], m3
+ mova [dst4q+strideq*2+0], m1
+ PALIGNR m2, m3, m1, 2, m0
+ psrldq m3, 2
+ movhps [dstq +strideq*1+0], m2
+ movh [dstq+ strideq*1+8], m3
+ mova [dst4q+strideq*1+0], m2
+ PALIGNR m1, m3, m2, 2, m0
+ psrldq m3, 2
+ movhps [dstq +strideq*0+0], m1
+ movh [dstq+ strideq*0+8], m3
+ mova [dst4q+strideq*0+0], m1
+ RET
+
+cglobal vp9_ipred_dr_16x16_16, 4, 4, 7, dst, stride, l, a
+ mova m0, [lq] ; klmnopqr
+ mova m1, [lq+mmsize] ; stuvwxyz
+ movu m2, [aq-2] ; *abcdefg
+ movu m3, [aq+mmsize-2] ; hijklmno
+ mova m4, [aq] ; abcdefgh
+ mova m5, [aq+mmsize] ; ijklmnop
+ psrldq m6, m5, 2 ; jklmnop.
+ LOWPASS 6, 5, 3 ; IJKLMNO.
+ PALIGNR m5, m4, 2, m3 ; bcdefghi
+ LOWPASS 5, 4, 2 ; ABCDEFGH
+ PALIGNR m2, m1, 2, m3 ; tuvwxyz*
+ PALIGNR m4, m2, 2, m3 ; uvwxyz*a
+ LOWPASS 4, 2, 1 ; TUVWXYZ#
+ PALIGNR m1, m0, 2, m3 ; lmnopqrs
+ PALIGNR m2, m1, 2, m3 ; mnopqrst
+ LOWPASS 2, 1, 0 ; LMNOPQRS
+ DEFINE_ARGS dst, stride, dst8, cnt
+ lea dst8q, [dstq+strideq*8]
+ mov cntd, 8
+
+.loop:
+ sub dst8q, strideq
+ mova [dst8q+strideq*0+ 0], m4
+ mova [dst8q+strideq*0+16], m5
+ mova [dst8q+strideq*8+ 0], m2
+ mova [dst8q+strideq*8+16], m4
+%if cpuflag(avx)
+ vpalignr m2, m4, m2, 2
+ vpalignr m4, m5, m4, 2
+ vpalignr m5, m6, m5, 2
+%else
+ PALIGNR m0, m4, m2, 2, m1
+ mova m2, m0
+ PALIGNR m0, m5, m4, 2, m1
+ mova m4, m0
+ PALIGNR m0, m6, m5, 2, m1
+ mova m5, m0
+%endif
+ psrldq m6, 2
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_dr_32x32_16, 4, 5, 10 + notcpuflag(ssse3), \
+ %1 * ARCH_X86_32 * -mmsize, dst, stride, l, a
+ mova m0, [aq+mmsize*3] ; a[24-31]
+ movu m1, [aq+mmsize*3-2] ; a[23-30]
+ psrldq m2, m0, 2 ; a[25-31].
+ LOWPASS 2, 0, 1 ; A[24-30].
+ mova m1, [aq+mmsize*2] ; a[16-23]
+ movu m3, [aq+mmsize*2-2] ; a[15-22]
+ PALIGNR m0, m1, 2, m4 ; a[17-24]
+ LOWPASS 0, 1, 3 ; A[16-23]
+ mova m3, [aq+mmsize*1] ; a[8-15]
+ movu m4, [aq+mmsize*1-2] ; a[7-14]
+ PALIGNR m1, m3, 2, m5 ; a[9-16]
+ LOWPASS 1, 3, 4 ; A[8-15]
+ mova m4, [aq+mmsize*0] ; a[0-7]
+ movu m5, [aq+mmsize*0-2] ; *a[0-6]
+ PALIGNR m3, m4, 2, m6 ; a[1-8]
+ LOWPASS 3, 4, 5 ; A[0-7]
+ SCRATCH 1, 8, rsp+0*mmsize
+ SCRATCH 3, 9, rsp+1*mmsize
+%if notcpuflag(ssse3)
+ SCRATCH 0, 10, rsp+2*mmsize
+%endif
+ mova m6, [lq+mmsize*3] ; l[24-31]
+ PALIGNR m5, m6, 2, m0 ; l[25-31]*
+ PALIGNR m4, m5, 2, m0 ; l[26-31]*a
+ LOWPASS 4, 5, 6 ; L[25-31]#
+ mova m7, [lq+mmsize*2] ; l[16-23]
+ PALIGNR m6, m7, 2, m0 ; l[17-24]
+ PALIGNR m5, m6, 2, m0 ; l[18-25]
+ LOWPASS 5, 6, 7 ; L[17-24]
+ mova m1, [lq+mmsize*1] ; l[8-15]
+ PALIGNR m7, m1, 2, m0 ; l[9-16]
+ PALIGNR m6, m7, 2, m0 ; l[10-17]
+ LOWPASS 6, 7, 1 ; L[9-16]
+ mova m3, [lq+mmsize*0] ; l[0-7]
+ PALIGNR m1, m3, 2, m0 ; l[1-8]
+ PALIGNR m7, m1, 2, m0 ; l[2-9]
+ LOWPASS 7, 1, 3 ; L[1-8]
+%if cpuflag(ssse3)
+%if cpuflag(avx)
+ UNSCRATCH 1, 8, rsp+0*mmsize
+%endif
+ UNSCRATCH 3, 9, rsp+1*mmsize
+%else
+ UNSCRATCH 0, 10, rsp+2*mmsize
+%endif
+ DEFINE_ARGS dst8, stride, stride8, stride24, cnt
+ lea stride8q, [strideq*8]
+ lea stride24q, [stride8q*3]
+ lea dst8q, [dst8q+strideq*8]
+ mov cntd, 8
+
+.loop:
+ sub dst8q, strideq
+%if notcpuflag(avx)
+ UNSCRATCH 1, 8, rsp+0*mmsize
+%if notcpuflag(ssse3)
+ UNSCRATCH 3, 9, rsp+1*mmsize
+%endif
+%endif
+ mova [dst8q+stride8q*0+ 0], m4
+ mova [dst8q+stride8q*0+16], m3
+ mova [dst8q+stride8q*0+32], m1
+ mova [dst8q+stride8q*0+48], m0
+ mova [dst8q+stride8q*1+ 0], m5
+ mova [dst8q+stride8q*1+16], m4
+ mova [dst8q+stride8q*1+32], m3
+ mova [dst8q+stride8q*1+48], m1
+ mova [dst8q+stride8q*2+ 0], m6
+ mova [dst8q+stride8q*2+16], m5
+ mova [dst8q+stride8q*2+32], m4
+ mova [dst8q+stride8q*2+48], m3
+ mova [dst8q+stride24q + 0], m7
+ mova [dst8q+stride24q +16], m6
+ mova [dst8q+stride24q +32], m5
+ mova [dst8q+stride24q +48], m4
+%if cpuflag(avx)
+ vpalignr m7, m6, m7, 2
+ vpalignr m6, m5, m6, 2
+ vpalignr m5, m4, m5, 2
+ vpalignr m4, m3, m4, 2
+ vpalignr m3, m1, m3, 2
+ vpalignr m1, m0, m1, 2
+ vpalignr m0, m2, m0, 2
+%else
+ SCRATCH 2, 8, rsp+0*mmsize
+%if notcpuflag(ssse3)
+ SCRATCH 0, 9, rsp+1*mmsize
+%endif
+ PALIGNR m2, m6, m7, 2, m0
+ mova m7, m2
+ PALIGNR m2, m5, m6, 2, m0
+ mova m6, m2
+ PALIGNR m2, m4, m5, 2, m0
+ mova m5, m2
+ PALIGNR m2, m3, m4, 2, m0
+ mova m4, m2
+ PALIGNR m2, m1, m3, 2, m0
+ mova m3, m2
+%if notcpuflag(ssse3)
+ UNSCRATCH 0, 9, rsp+1*mmsize
+ SCRATCH 3, 9, rsp+1*mmsize
+%endif
+ PALIGNR m2, m0, m1, 2, m3
+ mova m1, m2
+ UNSCRATCH 2, 8, rsp+0*mmsize
+ SCRATCH 1, 8, rsp+0*mmsize
+ PALIGNR m1, m2, m0, 2, m3
+ mova m0, m1
+%endif
+ psrldq m2, 2
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+DR_FUNCS 3
+INIT_XMM ssse3
+DR_FUNCS 2
+INIT_XMM avx
+DR_FUNCS 2
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_ipred_dr_16x16_16, 4, 5, 6, dst, stride, l, a
+ mova m0, [lq] ; klmnopqrstuvwxyz
+ movu m1, [aq-2] ; *abcdefghijklmno
+ mova m2, [aq] ; abcdefghijklmnop
+ vperm2i128 m4, m2, m2, q2001 ; ijklmnop........
+ vpalignr m5, m4, m2, 2 ; bcdefghijklmnop.
+ vperm2i128 m3, m0, m1, q0201 ; stuvwxyz*abcdefg
+ LOWPASS 1, 2, 5 ; ABCDEFGHIJKLMNO.
+ vpalignr m4, m3, m0, 2 ; lmnopqrstuvwxyz*
+ vpalignr m5, m3, m0, 4 ; mnopqrstuvwxyz*a
+ LOWPASS 0, 4, 5 ; LMNOPQRSTUVWXYZ#
+ vperm2i128 m5, m0, m1, q0201 ; TUVWXYZ#ABCDEFGH
+ DEFINE_ARGS dst, stride, stride3, stride5, dst3
+ lea dst3q, [dstq+strideq*4]
+ lea stride3q, [strideq*3]
+ lea stride5q, [stride3q+strideq*2]
+
+ vpalignr m3, m5, m0, 2
+ vpalignr m4, m1, m5, 2
+ mova [dst3q+stride5q*2], m3 ; 14
+ mova [ dstq+stride3q*2], m4 ; 6
+ vpalignr m3, m5, m0, 4
+ vpalignr m4, m1, m5, 4
+ sub dst3q, strideq
+ mova [dst3q+stride5q*2], m3 ; 13
+ mova [dst3q+strideq*2 ], m4 ; 5
+ mova [dst3q+stride3q*4], m0 ; 15
+ vpalignr m3, m5, m0, 6
+ vpalignr m4, m1, m5, 6
+ mova [dstq+stride3q*4], m3 ; 12
+ mova [dst3q+strideq*1], m4 ; 4
+ vpalignr m3, m5, m0, 8
+ vpalignr m4, m1, m5, 8
+ mova [dst3q+strideq*8], m3 ; 11
+ mova [dst3q+strideq*0], m4 ; 3
+ vpalignr m3, m5, m0, 10
+ vpalignr m4, m1, m5, 10
+ mova [dstq+stride5q*2], m3 ; 10
+ mova [dstq+strideq*2 ], m4 ; 2
+ vpalignr m3, m5, m0, 12
+ vpalignr m4, m1, m5, 12
+ mova [dst3q+stride3q*2], m3 ; 9
+ mova [dstq+strideq*1 ], m4 ; 1
+ vpalignr m3, m5, m0, 14
+ vpalignr m4, m1, m5, 14
+ mova [dstq+strideq*8], m3 ; 8
+ mova [dstq+strideq*0], m4 ; 0
+ mova [dst3q+strideq*4], m5 ; 7
+ RET
+
+cglobal vp9_ipred_vl_16x16_16, 4, 5, 7, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq] ; abcdefghijklmnop
+ vpbroadcastw xm1, [aq+30] ; pppppppp
+ vperm2i128 m2, m0, m1, q0201 ; ijklmnoppppppppp
+ vpalignr m3, m2, m0, 2 ; bcdefghijklmnopp
+ vperm2i128 m4, m3, m1, q0201 ; jklmnopppppppppp
+ vpalignr m5, m2, m0, 4 ; cdefghijklmnoppp
+ vperm2i128 m6, m5, m1, q0201 ; klmnoppppppppppp
+ LOWPASS 5, 3, 0 ; BCDEFGHIJKLMNOPP
+ LOWPASS 6, 4, 2 ; JKLMNOPPPPPPPPPP
+ pavgw m3, m0 ; abcdefghijklmnop
+ pavgw m4, m2 ; ijklmnoppppppppp
+ DEFINE_ARGS dst, stride, stride3, stride5, dst4
+ lea dst4q, [dstq+strideq*4]
+ lea stride3q, [strideq*3]
+ lea stride5q, [stride3q+strideq*2]
+
+ mova [dstq+strideq*0], m3 ; 0 abcdefghijklmnop
+ mova [dstq+strideq*1], m5 ; 1 BCDEFGHIJKLMNOPP
+ vpalignr m0, m4, m3, 2
+ vpalignr m1, m6, m5, 2
+ mova [dstq+strideq*2 ], m0 ; 2 bcdefghijklmnopp
+ mova [dstq+stride3q*1], m1 ; 3 CDEFGHIJKLMNOPPP
+ vpalignr m0, m4, m3, 4
+ vpalignr m1, m6, m5, 4
+ mova [dst4q+strideq*0], m0 ; 4 cdefghijklmnoppp
+ mova [dstq+stride5q*1], m1 ; 5 DEFGHIJKLMNOPPPP
+ vpalignr m0, m4, m3, 6
+ vpalignr m1, m6, m5, 6
+ mova [ dstq+stride3q*2], m0 ; 6 defghijklmnopppp
+ mova [dst4q+stride3q*1], m1 ; 7 EFGHIJKLMNOPPPPP
+ vpalignr m0, m4, m3, 8
+ vpalignr m1, m6, m5, 8
+ mova [ dstq+strideq*8], m0 ; 8 efghijklmnoppppp
+ mova [dst4q+stride5q*1], m1 ; 9 FGHIJKLMNOPPPPPP
+ vpalignr m0, m4, m3, 10
+ mova [dstq+stride5q*2], m0 ; 10 fghijklmnopppppp
+ vpalignr m0, m4, m3, 12
+ mova [dst4q+strideq*8], m0 ; 12 ghijklmnoppppppp
+ vpalignr m0, m4, m3, 14
+ mova [dst4q+stride5q*2], m0 ; 14 hijklmnopppppppp
+ sub dst4q, strideq
+ vpalignr m1, m6, m5, 10
+ mova [dst4q+strideq*8], m1 ; 11 GHIJKLMNOPPPPPPP
+ vpalignr m1, m6, m5, 12
+ mova [dst4q+stride5q*2], m1 ; 13 HIJKLMNOPPPPPPPP
+ vpalignr m1, m6, m5, 14
+ mova [dst4q+stride3q*4], m1 ; 15 IJKLMNOPPPPPPPPP
+ RET
+
+cglobal vp9_ipred_hd_16x16_16, 4, 5, 7, dst, stride, l, a
+ movu m0, [aq-2] ; *abcdefghijklmno
+ mova m1, [lq] ; klmnopqrstuvwxyz
+ vperm2i128 m2, m1, m0, q0201 ; stuvwxyz*abcdefg
+ vpalignr m3, m2, m1, 2 ; lmnopqrstuvwxyz*
+ vpalignr m4, m2, m1, 4 ; mnopqrstuvwxyz*a
+ LOWPASS 4, 3, 1 ; LMNOPQRSTUVWXYZ#
+ pavgw m3, m1 ; klmnopqrstuvwxyz
+ mova m1, [aq] ; abcdefghijklmnop
+ movu m2, [aq+2] ; bcdefghijklmnop.
+ LOWPASS 2, 1, 0 ; ABCDEFGHIJKLMNO.
+ vpunpcklwd m0, m3, m4 ; kLlMmNnOsTtUuVvW
+ vpunpckhwd m1, m3, m4 ; oPpQqRrSwXxYyZz#
+ vperm2i128 m3, m1, m0, q0002 ; kLlMmNnOoPpQqRrS
+ vperm2i128 m4, m0, m1, q0301 ; sTtUuVvWwXxYyZz#
+ vperm2i128 m0, m4, m2, q0201 ; wXxYyZz#ABCDEFGH
+ vperm2i128 m1, m3, m4, q0201 ; oPpQqRrSsTtUuVvW
+ DEFINE_ARGS dst, stride, stride3, stride5, dst5
+ lea stride3q, [strideq*3]
+ lea stride5q, [stride3q+strideq*2]
+ lea dst5q, [dstq+stride5q]
+
+ mova [dst5q+stride5q*2], m3 ; 15 kLlMmNnOoPpQqRrS
+ mova [dst5q+stride3q*2], m1 ; 11 oPpQqRrSsTtUuVvW
+ mova [dst5q+strideq*2], m4 ; 7 sTtUuVvWwXxYyZz#
+ mova [dstq+stride3q*1], m0 ; 3 wXxYyZz#ABCDEFGH
+ vpalignr m5, m4, m1, 4
+ mova [dstq+stride5q*2], m5 ; 10 pQqRrSsTtUuVvWwX
+ vpalignr m5, m0, m4, 4
+ vpalignr m6, m2, m0, 4
+ mova [dstq+stride3q*2], m5 ; 6 tUuVvWwXxYyZz#AB
+ mova [dstq+strideq*2], m6 ; 2 xYyZz#ABCDEFGHIJ
+ vpalignr m5, m4, m1, 8
+ mova [dst5q+strideq*4], m5 ; 9 qRrSsTtUuVvWwXxY
+ vpalignr m5, m0, m4, 8
+ vpalignr m6, m2, m0, 8
+ mova [dstq+stride5q*1], m5 ; 5 uVvWwXxYyZz#ABCD
+ mova [dstq+strideq*1], m6 ; 1 yZz#ABCDEFGHIJKL
+ vpalignr m5, m1, m3, 12
+ vpalignr m6, m4, m1, 12
+ mova [dstq+stride3q*4], m5 ; 12 nOoPpQqRrSsTtUuV
+ mova [dst5q+stride3q], m6 ; 8 rSsTtUuVvWwXxYyZ
+ vpalignr m5, m0, m4, 12
+ vpalignr m6, m2, m0, 12
+ mova [dstq+strideq*4], m5 ; 4 nOoPpQqRrSsTtUuV
+ mova [dstq+strideq*0], m6 ; 0 z#ABCDEFGHIJKLMN
+ sub dst5q, strideq
+ vpalignr m5, m1, m3, 4
+ mova [dst5q+stride5q*2], m5 ; 14 lMmNnOoPpQqRrSsT
+ sub dst5q, strideq
+ vpalignr m5, m1, m3, 8
+ mova [dst5q+stride5q*2], m5 ; 13 mNnOoPpQqRrSsTtU
+ RET
+
+%if ARCH_X86_64
+cglobal vp9_ipred_dr_32x32_16, 4, 7, 10, dst, stride, l, a
+ mova m0, [lq+mmsize*0+0] ; l[0-15]
+ mova m1, [lq+mmsize*1+0] ; l[16-31]
+ movu m2, [aq+mmsize*0-2] ; *abcdefghijklmno
+ mova m3, [aq+mmsize*0+0] ; abcdefghijklmnop
+ mova m4, [aq+mmsize*1+0] ; qrstuvwxyz012345
+ vperm2i128 m5, m0, m1, q0201 ; lmnopqrstuvwxyz0
+ vpalignr m6, m5, m0, 2 ; mnopqrstuvwxyz01
+ vpalignr m7, m5, m0, 4 ; nopqrstuvwxyz012
+ LOWPASS 0, 6, 7 ; L[0-15]
+ vperm2i128 m7, m1, m2, q0201 ; stuvwxyz*abcdefg
+ vpalignr m5, m7, m1, 2 ; lmnopqrstuvwxyz*
+ vpalignr m6, m7, m1, 4 ; mnopqrstuvwxyz*a
+ LOWPASS 1, 5, 6 ; L[16-31]#
+ vperm2i128 m5, m3, m4, q0201 ; ijklmnopqrstuvwx
+ vpalignr m6, m5, m3, 2 ; bcdefghijklmnopq
+ LOWPASS 2, 3, 6 ; A[0-15]
+ movu m3, [aq+mmsize*1-2] ; pqrstuvwxyz01234
+ vperm2i128 m6, m4, m4, q2001 ; yz012345........
+ vpalignr m7, m6, m4, 2 ; rstuvwxyz012345.
+ LOWPASS 3, 4, 7 ; A[16-31].
+ vperm2i128 m4, m1, m2, q0201 ; TUVWXYZ#ABCDEFGH
+ vperm2i128 m5, m0, m1, q0201 ; L[7-15]L[16-23]
+ vperm2i128 m8, m2, m3, q0201 ; IJKLMNOPQRSTUVWX
+ DEFINE_ARGS dst8, stride, stride3, stride7, stride5, dst24, cnt
+ lea stride3q, [strideq*3]
+ lea stride5q, [stride3q+strideq*2]
+ lea stride7q, [strideq*4+stride3q]
+ lea dst24q, [dst8q+stride3q*8]
+ lea dst8q, [dst8q+strideq*8]
+ mov cntd, 2
+
+.loop:
+ mova [dst24q+stride7q+0 ], m0 ; 31 23 15 7
+ mova [dst24q+stride7q+32], m1
+ mova [dst8q+stride7q+0], m1
+ mova [dst8q+stride7q+32], m2
+ vpalignr m6, m4, m1, 2
+ vpalignr m7, m5, m0, 2
+ vpalignr m9, m8, m2, 2
+ mova [dst24q+stride3q*2+0], m7 ; 30 22 14 6
+ mova [dst24q+stride3q*2+32], m6
+ mova [dst8q+stride3q*2+0], m6
+ mova [dst8q+stride3q*2+32], m9
+ vpalignr m6, m4, m1, 4
+ vpalignr m7, m5, m0, 4
+ vpalignr m9, m8, m2, 4
+ mova [dst24q+stride5q+0], m7 ; 29 21 13 5
+ mova [dst24q+stride5q+32], m6
+ mova [dst8q+stride5q+0], m6
+ mova [dst8q+stride5q+32], m9
+ vpalignr m6, m4, m1, 6
+ vpalignr m7, m5, m0, 6
+ vpalignr m9, m8, m2, 6
+ mova [dst24q+strideq*4+0 ], m7 ; 28 20 12 4
+ mova [dst24q+strideq*4+32], m6
+ mova [dst8q+strideq*4+0], m6
+ mova [dst8q+strideq*4+32], m9
+ vpalignr m6, m4, m1, 8
+ vpalignr m7, m5, m0, 8
+ vpalignr m9, m8, m2, 8
+ mova [dst24q+stride3q+0 ], m7 ; 27 19 11 3
+ mova [dst24q+stride3q+32], m6
+ mova [dst8q+stride3q+0], m6
+ mova [dst8q+stride3q+32], m9
+ vpalignr m6, m4, m1, 10
+ vpalignr m7, m5, m0, 10
+ vpalignr m9, m8, m2, 10
+ mova [dst24q+strideq*2+0 ], m7 ; 26 18 10 2
+ mova [dst24q+strideq*2+32], m6
+ mova [dst8q+strideq*2+0], m6
+ mova [dst8q+strideq*2+32], m9
+ vpalignr m6, m4, m1, 12
+ vpalignr m7, m5, m0, 12
+ vpalignr m9, m8, m2, 12
+ mova [dst24q+strideq+0 ], m7 ; 25 17 9 1
+ mova [dst24q+strideq+32], m6
+ mova [dst8q+strideq+0], m6
+ mova [dst8q+strideq+32], m9
+ vpalignr m6, m4, m1, 14
+ vpalignr m7, m5, m0, 14
+ vpalignr m9, m8, m2, 14
+ mova [dst24q+strideq*0+0 ], m7 ; 24 16 8 0
+ mova [dst24q+strideq*0+32], m6
+ mova [dst8q+strideq*0+0], m6
+ mova [dst8q+strideq*0+32], m9
+ mova m0, m5
+ mova m5, m1
+ mova m1, m4
+ mova m4, m2
+ mova m2, m8
+ mova m8, m3
+ sub dst24q, stride7q
+ sub dst24q, strideq
+ sub dst8q, stride7q
+ sub dst8q, strideq
+ dec cntd
+ jg .loop
+ RET
+%endif
+%endif
+
+%macro VL_FUNCS 1 ; stack_mem_for_32x32_32bit_function
+cglobal vp9_ipred_vl_4x4_16, 2, 4, 3, dst, stride, l, a
+ movifnidn aq, amp
+ movu m0, [aq] ; abcdefgh
+ psrldq m1, m0, 2 ; bcdefgh.
+ psrldq m2, m0, 4 ; cdefgh..
+ LOWPASS 2, 1, 0 ; BCDEFGH.
+ pavgw m1, m0 ; ABCDEFG.
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+
+ movh [dstq+strideq*0], m1
+ movh [dstq+strideq*1], m2
+ psrldq m1, 2
+ psrldq m2, 2
+ movh [dstq+strideq*2], m1
+ movh [dstq+stride3q ], m2
+ RET
+
+cglobal vp9_ipred_vl_8x8_16, 2, 4, 4, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq] ; abcdefgh
+%if cpuflag(ssse3)
+ mova m3, [pb_2to15_14_15]
+%endif
+ SHIFT_RIGHTx2 m1, m2, m0, m3 ; bcdefghh/cdefghhh
+ LOWPASS 2, 1, 0 ; BCDEFGHh
+ pavgw m1, m0 ; ABCDEFGh
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+
+ mova [dstq+strideq*0], m1
+ mova [dstq+strideq*1], m2
+ SHIFT_RIGHT m1, m1, m3
+ SHIFT_RIGHT m2, m2, m3
+ mova [dstq+strideq*2], m1
+ mova [dstq+stride3q ], m2
+ lea dstq, [dstq+strideq*4]
+ SHIFT_RIGHT m1, m1, m3
+ SHIFT_RIGHT m2, m2, m3
+ mova [dstq+strideq*0], m1
+ mova [dstq+strideq*1], m2
+ SHIFT_RIGHT m1, m1, m3
+ SHIFT_RIGHT m2, m2, m3
+ mova [dstq+strideq*2], m1
+ mova [dstq+stride3q ], m2
+ RET
+
+cglobal vp9_ipred_vl_16x16_16, 2, 4, 6, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq]
+ mova m1, [aq+mmsize]
+ PALIGNR m2, m1, m0, 2, m3
+ PALIGNR m3, m1, m0, 4, m4
+ LOWPASS 3, 2, 0
+ pavgw m2, m0
+%if cpuflag(ssse3)
+ mova m4, [pb_2to15_14_15]
+%endif
+ SHIFT_RIGHTx2 m5, m0, m1, m4
+ LOWPASS 0, 5, 1
+ pavgw m1, m5
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 8
+
+.loop:
+ mova [dstq+strideq*0+ 0], m2
+ mova [dstq+strideq*0+16], m1
+ mova [dstq+strideq*1+ 0], m3
+ mova [dstq+strideq*1+16], m0
+ lea dstq, [dstq+strideq*2]
+%if cpuflag(avx)
+ vpalignr m2, m1, m2, 2
+ vpalignr m3, m0, m3, 2
+%else
+ PALIGNR m5, m1, m2, 2, m4
+ mova m2, m5
+ PALIGNR m5, m0, m3, 2, m4
+ mova m3, m5
+%endif
+ SHIFT_RIGHT m1, m1, m4
+ SHIFT_RIGHT m0, m0, m4
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_vl_32x32_16, 2, 5, 11, %1 * mmsize * ARCH_X86_32, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq+mmsize*0]
+ mova m1, [aq+mmsize*1]
+ mova m2, [aq+mmsize*2]
+ PALIGNR m6, m1, m0, 2, m5
+ PALIGNR m7, m1, m0, 4, m5
+ LOWPASS 7, 6, 0
+ pavgw m6, m0
+ SCRATCH 6, 8, rsp+0*mmsize
+ PALIGNR m4, m2, m1, 2, m0
+ PALIGNR m5, m2, m1, 4, m0
+ LOWPASS 5, 4, 1
+ pavgw m4, m1
+ mova m0, [aq+mmsize*3]
+ PALIGNR m1, m0, m2, 2, m6
+ PALIGNR m3, m0, m2, 4, m6
+ LOWPASS 3, 1, 2
+ pavgw m2, m1
+%if cpuflag(ssse3)
+ PRELOAD 10, pb_2to15_14_15, shuf
+%endif
+ SHIFT_RIGHTx2 m6, m1, m0, reg_shuf
+ LOWPASS 1, 6, 0
+ pavgw m0, m6
+%if ARCH_X86_64
+ pshufd m9, m6, q3333
+%endif
+%if cpuflag(avx)
+ UNSCRATCH 6, 8, rsp+0*mmsize
+%endif
+ DEFINE_ARGS dst, stride, cnt, stride16, stride17
+ mov stride16q, strideq
+ mov cntd, 8
+ shl stride16q, 4
+ lea stride17q, [stride16q+strideq]
+
+ ; FIXME m8 is unused for avx, so we could save one register here for win64
+.loop:
+%if notcpuflag(avx)
+ UNSCRATCH 6, 8, rsp+0*mmsize
+%endif
+ mova [dstq+strideq*0+ 0], m6
+ mova [dstq+strideq*0+16], m4
+ mova [dstq+strideq*0+32], m2
+ mova [dstq+strideq*0+48], m0
+ mova [dstq+strideq*1+ 0], m7
+ mova [dstq+strideq*1+16], m5
+ mova [dstq+strideq*1+32], m3
+ mova [dstq+strideq*1+48], m1
+ mova [dstq+stride16q+ 0], m4
+ mova [dstq+stride16q+16], m2
+ mova [dstq+stride16q+32], m0
+%if ARCH_X86_64
+ mova [dstq+stride16q+48], m9
+%endif
+ mova [dstq+stride17q+ 0], m5
+ mova [dstq+stride17q+16], m3
+ mova [dstq+stride17q+32], m1
+%if ARCH_X86_64
+ mova [dstq+stride17q+48], m9
+%endif
+ lea dstq, [dstq+strideq*2]
+%if cpuflag(avx)
+ vpalignr m6, m4, m6, 2
+ vpalignr m4, m2, m4, 2
+ vpalignr m2, m0, m2, 2
+ vpalignr m7, m5, m7, 2
+ vpalignr m5, m3, m5, 2
+ vpalignr m3, m1, m3, 2
+%else
+ SCRATCH 3, 8, rsp+0*mmsize
+%if notcpuflag(ssse3)
+ SCRATCH 1, 10, rsp+1*mmsize
+%endif
+ PALIGNR m3, m4, m6, 2, m1
+ mova m6, m3
+ PALIGNR m3, m2, m4, 2, m1
+ mova m4, m3
+ PALIGNR m3, m0, m2, 2, m1
+ mova m2, m3
+ PALIGNR m3, m5, m7, 2, m1
+ mova m7, m3
+ UNSCRATCH 3, 8, rsp+0*mmsize
+ SCRATCH 6, 8, rsp+0*mmsize
+%if notcpuflag(ssse3)
+ UNSCRATCH 1, 10, rsp+1*mmsize
+ SCRATCH 7, 10, rsp+1*mmsize
+%endif
+ PALIGNR m6, m3, m5, 2, m7
+ mova m5, m6
+ PALIGNR m6, m1, m3, 2, m7
+ mova m3, m6
+%if notcpuflag(ssse3)
+ UNSCRATCH 7, 10, rsp+1*mmsize
+%endif
+%endif
+ SHIFT_RIGHT m1, m1, reg_shuf
+ SHIFT_RIGHT m0, m0, reg_shuf
+ dec cntd
+ jg .loop
+
+%if ARCH_X86_32
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+%assign %%n 0
+%rep 4
+ mova [dstq+strideq*0+48], m0
+ mova [dstq+strideq*1+48], m0
+ mova [dstq+strideq*2+48], m0
+ mova [dstq+stride3q +48], m0
+%if %%n < 3
+ lea dstq, [dstq+strideq*4]
+%endif
+%assign %%n (%%n+1)
+%endrep
+%endif
+ RET
+%endmacro
+
+INIT_XMM sse2
+VL_FUNCS 2
+INIT_XMM ssse3
+VL_FUNCS 1
+INIT_XMM avx
+VL_FUNCS 1
+
+%macro VR_FUNCS 0
+cglobal vp9_ipred_vr_4x4_16, 4, 4, 3, dst, stride, l, a
+ movu m0, [aq-2]
+ movhps m1, [lq]
+ PALIGNR m0, m1, 10, m2 ; xyz*abcd
+ pslldq m1, m0, 2 ; .xyz*abc
+ pslldq m2, m0, 4 ; ..xyz*ab
+ LOWPASS 2, 1, 0 ; ..YZ#ABC
+ pavgw m1, m0 ; ....#ABC
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+
+ movhps [dstq+strideq*0], m1
+ movhps [dstq+strideq*1], m2
+ shufps m0, m2, m1, q3210
+%if cpuflag(ssse3)
+ pshufb m2, [pb_4_5_8to13_8x0]
+%else
+ pshuflw m2, m2, q2222
+ psrldq m2, 6
+%endif
+ psrldq m0, 6
+ movh [dstq+strideq*2], m0
+ movh [dstq+stride3q ], m2
+ RET
+
+cglobal vp9_ipred_vr_8x8_16, 4, 4, 5, dst, stride, l, a
+ movu m1, [aq-2] ; *abcdefg
+ movu m2, [lq] ; stuvwxyz
+ mova m0, [aq] ; abcdefgh
+ PALIGNR m3, m1, m2, 14, m4 ; z*abcdef
+ LOWPASS 3, 1, 0
+ pavgw m0, m1
+ PALIGNR m1, m2, 2, m4 ; tuvwxyz*
+ pslldq m4, m2, 2 ; .stuvwxy
+ LOWPASS 4, 2, 1
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m3
+ PALIGNR m0, m4, 14, m1
+ pslldq m4, 2
+ PALIGNR m3, m4, 14, m1
+ pslldq m4, 2
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m3
+ lea dstq, [dstq+strideq*4]
+ PALIGNR m0, m4, 14, m1
+ pslldq m4, 2
+ PALIGNR m3, m4, 14, m1
+ pslldq m4, 2
+ mova [dstq+strideq*0], m0
+ mova [dstq+strideq*1], m3
+ PALIGNR m0, m4, 14, m1
+ pslldq m4, 2
+ PALIGNR m3, m4, 14, m4
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m3
+ RET
+
+cglobal vp9_ipred_vr_16x16_16, 4, 4, 8, dst, stride, l, a
+ movu m1, [aq-2] ; *abcdefg
+ movu m2, [aq+mmsize-2] ; hijklmno
+ mova m3, [aq] ; abcdefgh
+ mova m4, [aq+mmsize] ; ijklmnop
+ mova m5, [lq+mmsize] ; stuvwxyz
+ PALIGNR m0, m1, m5, 14, m6 ; z*abcdef
+ movu m6, [aq+mmsize-4] ; ghijklmn
+ LOWPASS 6, 2, 4
+ pavgw m2, m4
+ LOWPASS 0, 1, 3
+ pavgw m3, m1
+ PALIGNR m1, m5, 2, m7 ; tuvwxyz*
+ movu m7, [lq+mmsize-2] ; rstuvwxy
+ LOWPASS 1, 5, 7
+ movu m5, [lq+2] ; lmnopqrs
+ pslldq m4, m5, 2 ; .lmnopqr
+ pslldq m7, m5, 4 ; ..lmnopq
+ LOWPASS 5, 4, 7
+ psrld m4, m1, 16
+ psrld m7, m5, 16
+ pand m1, [pd_65535]
+ pand m5, [pd_65535]
+ packssdw m7, m4
+ packssdw m5, m1
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 8
+
+.loop:
+ mova [dstq+strideq*0+ 0], m3
+ mova [dstq+strideq*0+16], m2
+ mova [dstq+strideq*1+ 0], m0
+ mova [dstq+strideq*1+16], m6
+ lea dstq, [dstq+strideq*2]
+ PALIGNR m2, m3, 14, m4
+ PALIGNR m3, m7, 14, m4
+ pslldq m7, 2
+ PALIGNR m6, m0, 14, m4
+ PALIGNR m0, m5, 14, m4
+ pslldq m5, 2
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_vr_32x32_16, 4, 5, 14, 6 * mmsize * ARCH_X86_32, dst, stride, l, a
+ movu m0, [aq+mmsize*0-2] ; *a[0-6]
+ movu m1, [aq+mmsize*1-2] ; a[7-14]
+ movu m2, [aq+mmsize*2-2] ; a[15-22]
+ movu m3, [aq+mmsize*3-2] ; a[23-30]
+ mova m4, [aq+mmsize*3+0] ; a[24-31]
+ movu m5, [aq+mmsize*3-4] ; a[22-29]
+ LOWPASS 5, 3, 4 ; A[23-30]
+ SCRATCH 5, 8, rsp+0*mmsize
+ pavgw m3, m4
+ mova m4, [aq+mmsize*2+0] ; a[16-23]
+ movu m6, [aq+mmsize*2-4] ; a[14-21]
+ LOWPASS 6, 2, 4 ; A[15-22]
+ SCRATCH 6, 9, rsp+1*mmsize
+ pavgw m2, m4
+ mova m4, [aq+mmsize*1+0] ; a[8-15]
+ movu m7, [aq+mmsize*1-4] ; a[6-13]
+ LOWPASS 7, 1, 4 ; A[7-14]
+ SCRATCH 7, 10, rsp+2*mmsize
+ pavgw m1, m4
+ mova m4, [aq+mmsize*0+0] ; a[0-7]
+ mova m5, [lq+mmsize*3+0] ; l[24-31]
+ PALIGNR m6, m0, m5, 14, m7 ; l[31]*a[0-5]
+ LOWPASS 6, 0, 4 ; #A[0-6]
+ SCRATCH 6, 11, rsp+3*mmsize
+ pavgw m4, m0
+ PALIGNR m0, m5, 2, m7 ; l[25-31]*
+ movu m7, [lq+mmsize*3-2] ; l[23-30]
+ LOWPASS 0, 5, 7 ; L[24-31]
+ movu m5, [lq+mmsize*2-2] ; l[15-22]
+ mova m7, [lq+mmsize*2+0] ; l[16-23]
+ movu m6, [lq+mmsize*2+2] ; l[17-24]
+ LOWPASS 5, 7, 6 ; L[16-23]
+ psrld m7, m0, 16
+ psrld m6, m5, 16
+ pand m0, [pd_65535]
+ pand m5, [pd_65535]
+ packssdw m6, m7
+ packssdw m5, m0
+ SCRATCH 5, 12, rsp+4*mmsize
+ SCRATCH 6, 13, rsp+5*mmsize
+ movu m6, [lq+mmsize*1-2] ; l[7-14]
+ mova m0, [lq+mmsize*1+0] ; l[8-15]
+ movu m5, [lq+mmsize*1+2] ; l[9-16]
+ LOWPASS 6, 0, 5 ; L[8-15]
+ movu m0, [lq+mmsize*0+2] ; l[1-8]
+ pslldq m5, m0, 2 ; .l[1-7]
+ pslldq m7, m0, 4 ; ..l[1-6]
+ LOWPASS 0, 5, 7
+ psrld m5, m6, 16
+ psrld m7, m0, 16
+ pand m6, [pd_65535]
+ pand m0, [pd_65535]
+ packssdw m7, m5
+ packssdw m0, m6
+ UNSCRATCH 6, 13, rsp+5*mmsize
+ DEFINE_ARGS dst, stride, stride16, cnt, stride17
+ mov stride16q, strideq
+ mov cntd, 8
+ shl stride16q, 4
+%if ARCH_X86_64
+ lea stride17q, [stride16q+strideq]
+%endif
+
+.loop:
+ mova [dstq+strideq*0+ 0], m4
+ mova [dstq+strideq*0+16], m1
+ mova [dstq+strideq*0+32], m2
+ mova [dstq+strideq*0+48], m3
+%if ARCH_X86_64
+ mova [dstq+strideq*1+ 0], m11
+ mova [dstq+strideq*1+16], m10
+ mova [dstq+strideq*1+32], m9
+ mova [dstq+strideq*1+48], m8
+%endif
+ mova [dstq+stride16q+ 0], m6
+ mova [dstq+stride16q+16], m4
+ mova [dstq+stride16q+32], m1
+ mova [dstq+stride16q+48], m2
+%if ARCH_X86_64
+ mova [dstq+stride17q+ 0], m12
+ mova [dstq+stride17q+16], m11
+ mova [dstq+stride17q+32], m10
+ mova [dstq+stride17q+48], m9
+%endif
+ lea dstq, [dstq+strideq*2]
+ PALIGNR m3, m2, 14, m5
+ PALIGNR m2, m1, 14, m5
+ PALIGNR m1, m4, 14, m5
+ PALIGNR m4, m6, 14, m5
+ PALIGNR m6, m7, 14, m5
+ pslldq m7, 2
+%if ARCH_X86_64
+ PALIGNR m8, m9, 14, m5
+ PALIGNR m9, m10, 14, m5
+ PALIGNR m10, m11, 14, m5
+ PALIGNR m11, m12, 14, m5
+ PALIGNR m12, m0, 14, m5
+ pslldq m0, 2
+%endif
+ dec cntd
+ jg .loop
+
+%if ARCH_X86_32
+ UNSCRATCH 5, 12, rsp+4*mmsize
+ UNSCRATCH 4, 11, rsp+3*mmsize
+ UNSCRATCH 3, 10, rsp+2*mmsize
+ UNSCRATCH 2, 9, rsp+1*mmsize
+ UNSCRATCH 1, 8, rsp+0*mmsize
+ mov dstq, dstm
+ mov cntd, 8
+ add dstq, strideq
+.loop2:
+ mova [dstq+strideq*0+ 0], m4
+ mova [dstq+strideq*0+16], m3
+ mova [dstq+strideq*0+32], m2
+ mova [dstq+strideq*0+48], m1
+ mova [dstq+stride16q+ 0], m5
+ mova [dstq+stride16q+16], m4
+ mova [dstq+stride16q+32], m3
+ mova [dstq+stride16q+48], m2
+ lea dstq, [dstq+strideq*2]
+ PALIGNR m1, m2, 14, m6
+ PALIGNR m2, m3, 14, m6
+ PALIGNR m3, m4, 14, m6
+ PALIGNR m4, m5, 14, m6
+ PALIGNR m5, m0, 14, m6
+ pslldq m0, 2
+ dec cntd
+ jg .loop2
+%endif
+ RET
+%endmacro
+
+INIT_XMM sse2
+VR_FUNCS
+INIT_XMM ssse3
+VR_FUNCS
+INIT_XMM avx
+VR_FUNCS
+
+%macro HU_FUNCS 1 ; stack_mem_for_32x32_32bit_function
+cglobal vp9_ipred_hu_4x4_16, 3, 3, 3, dst, stride, l, a
+ movh m0, [lq] ; abcd
+%if cpuflag(ssse3)
+ pshufb m0, [pb_0to7_67x4] ; abcddddd
+%else
+ punpcklqdq m0, m0
+ pshufhw m0, m0, q3333 ; abcddddd
+%endif
+ psrldq m1, m0, 2 ; bcddddd.
+ psrldq m2, m0, 4 ; cddddd..
+ LOWPASS 2, 1, 0 ; BCDddd..
+ pavgw m1, m0 ; abcddddd
+ SBUTTERFLY wd, 1, 2, 0 ; aBbCcDdd, dddddddd
+ PALIGNR m2, m1, 4, m0 ; bCcDdddd
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+
+ movh [dstq+strideq*0], m1 ; aBbC
+ movh [dstq+strideq*1], m2 ; bCcD
+ movhps [dstq+strideq*2], m1 ; cDdd
+ movhps [dstq+stride3q ], m2 ; dddd
+ RET
+
+cglobal vp9_ipred_hu_8x8_16, 3, 3, 4, dst, stride, l, a
+ mova m0, [lq]
+%if cpuflag(ssse3)
+ mova m3, [pb_2to15_14_15]
+%endif
+ SHIFT_RIGHTx2 m1, m2, m0, m3
+ LOWPASS 2, 1, 0
+ pavgw m1, m0
+ SBUTTERFLY wd, 1, 2, 0
+ shufps m0, m1, m2, q1032
+ pshufd m3, m2, q3332
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+
+ mova [dstq+strideq *0], m1
+ mova [dstq+strideq *2], m0
+ mova [dstq+strideq *4], m2
+ mova [dstq+stride3q*2], m3
+ add dstq, strideq
+%if cpuflag(avx)
+ vpalignr m1, m2, m1, 4
+%else
+ PALIGNR m0, m2, m1, 4, m3
+ mova m1, m0
+%endif
+ pshufd m2, m2, q3321
+ shufps m0, m1, m2, q1032
+ pshufd m3, m2, q3332
+ mova [dstq+strideq *0], m1
+ mova [dstq+strideq *2], m0
+ mova [dstq+strideq *4], m2
+ mova [dstq+stride3q*2], m3
+ RET
+
+cglobal vp9_ipred_hu_16x16_16, 3, 4, 6 + notcpuflag(ssse3), dst, stride, l, a
+ mova m0, [lq]
+ mova m3, [lq+mmsize]
+ movu m1, [lq+2]
+ movu m2, [lq+4]
+ LOWPASS 2, 1, 0
+ pavgw m1, m0
+ SBUTTERFLY wd, 1, 2, 0
+%if cpuflag(ssse3)
+ mova m5, [pb_2to15_14_15]
+%endif
+ SHIFT_RIGHTx2 m0, m4, m3, m5
+ LOWPASS 4, 0, 3
+ pavgw m3, m0
+ SBUTTERFLY wd, 3, 4, 5
+ pshufd m0, m0, q3333
+ DEFINE_ARGS dst, stride, stride3, cnt
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+
+.loop:
+ mova [dstq+strideq *0+ 0], m1
+ mova [dstq+strideq *0+16], m2
+ mova [dstq+strideq *4+ 0], m2
+ mova [dstq+strideq *4+16], m3
+ mova [dstq+strideq *8+ 0], m3
+ mova [dstq+strideq *8+16], m4
+ mova [dstq+stride3q*4+ 0], m4
+ mova [dstq+stride3q*4+16], m0
+ add dstq, strideq
+%if cpuflag(avx)
+ vpalignr m1, m2, m1, 4
+ vpalignr m2, m3, m2, 4
+ vpalignr m3, m4, m3, 4
+ vpalignr m4, m0, m4, 4
+%else
+ PALIGNR m5, m2, m1, 4, m6
+ mova m1, m5
+ PALIGNR m5, m3, m2, 4, m6
+ mova m2, m5
+ PALIGNR m5, m4, m3, 4, m6
+ mova m3, m5
+ PALIGNR m5, m0, m4, 4, m6
+ mova m4, m5
+%endif
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_hu_32x32_16, 3, 7, 10 + notcpuflag(ssse3), \
+ %1 * -mmsize * ARCH_X86_32, dst, stride, l, a
+ mova m2, [lq+mmsize*0+0]
+ movu m1, [lq+mmsize*0+2]
+ movu m0, [lq+mmsize*0+4]
+ LOWPASS 0, 1, 2
+ pavgw m1, m2
+ SBUTTERFLY wd, 1, 0, 2
+ SCRATCH 1, 8, rsp+0*mmsize
+ mova m4, [lq+mmsize*1+0]
+ movu m3, [lq+mmsize*1+2]
+ movu m2, [lq+mmsize*1+4]
+ LOWPASS 2, 3, 4
+ pavgw m3, m4
+ SBUTTERFLY wd, 3, 2, 4
+ mova m6, [lq+mmsize*2+0]
+ movu m5, [lq+mmsize*2+2]
+ movu m4, [lq+mmsize*2+4]
+ LOWPASS 4, 5, 6
+ pavgw m5, m6
+ SBUTTERFLY wd, 5, 4, 6
+ mova m7, [lq+mmsize*3+0]
+ SCRATCH 0, 9, rsp+1*mmsize
+%if cpuflag(ssse3)
+ mova m0, [pb_2to15_14_15]
+%endif
+ SHIFT_RIGHTx2 m1, m6, m7, m0
+ LOWPASS 6, 1, 7
+ pavgw m7, m1
+ SBUTTERFLY wd, 7, 6, 0
+ pshufd m1, m1, q3333
+ UNSCRATCH 0, 9, rsp+1*mmsize
+ DEFINE_ARGS dst, stride, cnt, stride3, stride4, stride20, stride28
+ lea stride3q, [strideq*3]
+ lea stride4q, [strideq*4]
+ lea stride28q, [stride4q*8]
+ lea stride20q, [stride4q*5]
+ sub stride28q, stride4q
+ mov cntd, 4
+
+.loop:
+%if ARCH_X86_64
+ SWAP 1, 8
+%else
+ mova [rsp+1*mmsize], m1
+ mova m1, [rsp+0*mmsize]
+%endif
+ mova [dstq+strideq *0+ 0], m1
+ mova [dstq+strideq *0+16], m0
+ mova [dstq+strideq *0+32], m3
+ mova [dstq+strideq *0+48], m2
+ mova [dstq+stride4q*1+ 0], m0
+ mova [dstq+stride4q*1+16], m3
+ mova [dstq+stride4q*1+32], m2
+ mova [dstq+stride4q*1+48], m5
+ mova [dstq+stride4q*2+ 0], m3
+ mova [dstq+stride4q*2+16], m2
+ mova [dstq+stride4q*2+32], m5
+ mova [dstq+stride4q*2+48], m4
+%if cpuflag(avx)
+ vpalignr m1, m0, m1, 4
+ vpalignr m0, m3, m0, 4
+ vpalignr m3, m2, m3, 4
+%else
+ SCRATCH 6, 9, rsp+2*mmsize
+%if notcpuflag(ssse3)
+ SCRATCH 7, 10, rsp+3*mmsize
+%endif
+ PALIGNR m6, m0, m1, 4, m7
+ mova m1, m6
+ PALIGNR m6, m3, m0, 4, m7
+ mova m0, m6
+ PALIGNR m6, m2, m3, 4, m7
+ mova m3, m6
+ UNSCRATCH 6, 9, rsp+2*mmsize
+ SCRATCH 0, 9, rsp+2*mmsize
+%if notcpuflag(ssse3)
+ UNSCRATCH 7, 10, rsp+3*mmsize
+ SCRATCH 3, 10, rsp+3*mmsize
+%endif
+%endif
+%if ARCH_X86_64
+ SWAP 1, 8
+%else
+ mova [rsp+0*mmsize], m1
+ mova m1, [rsp+1*mmsize]
+%endif
+ mova [dstq+stride3q*4+ 0], m2
+ mova [dstq+stride3q*4+16], m5
+ mova [dstq+stride3q*4+32], m4
+ mova [dstq+stride3q*4+48], m7
+ mova [dstq+stride4q*4+ 0], m5
+ mova [dstq+stride4q*4+16], m4
+ mova [dstq+stride4q*4+32], m7
+ mova [dstq+stride4q*4+48], m6
+ mova [dstq+stride20q + 0], m4
+ mova [dstq+stride20q +16], m7
+ mova [dstq+stride20q +32], m6
+ mova [dstq+stride20q +48], m1
+ mova [dstq+stride3q*8+ 0], m7
+ mova [dstq+stride3q*8+16], m6
+ mova [dstq+stride3q*8+32], m1
+ mova [dstq+stride3q*8+48], m1
+ mova [dstq+stride28q + 0], m6
+ mova [dstq+stride28q +16], m1
+ mova [dstq+stride28q +32], m1
+ mova [dstq+stride28q +48], m1
+%if cpuflag(avx)
+ vpalignr m2, m5, m2, 4
+ vpalignr m5, m4, m5, 4
+ vpalignr m4, m7, m4, 4
+ vpalignr m7, m6, m7, 4
+ vpalignr m6, m1, m6, 4
+%else
+ PALIGNR m0, m5, m2, 4, m3
+ mova m2, m0
+ PALIGNR m0, m4, m5, 4, m3
+ mova m5, m0
+ PALIGNR m0, m7, m4, 4, m3
+ mova m4, m0
+ PALIGNR m0, m6, m7, 4, m3
+ mova m7, m0
+ PALIGNR m0, m1, m6, 4, m3
+ mova m6, m0
+ UNSCRATCH 0, 9, rsp+2*mmsize
+%if notcpuflag(ssse3)
+ UNSCRATCH 3, 10, rsp+3*mmsize
+%endif
+%endif
+ add dstq, strideq
+ dec cntd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+HU_FUNCS 4
+INIT_XMM ssse3
+HU_FUNCS 3
+INIT_XMM avx
+HU_FUNCS 2
+
+%macro HD_FUNCS 0
+cglobal vp9_ipred_hd_4x4_16, 4, 4, 4, dst, stride, l, a
+ movh m0, [lq]
+ movhps m0, [aq-2]
+ psrldq m1, m0, 2
+ psrldq m2, m0, 4
+ LOWPASS 2, 1, 0
+ pavgw m1, m0
+ punpcklwd m1, m2
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+
+ movh [dstq+stride3q ], m1
+ movhps [dstq+strideq*1], m1
+ movhlps m2, m2
+ PALIGNR m2, m1, 4, m0
+ movh [dstq+strideq*2], m2
+ movhps [dstq+strideq*0], m2
+ RET
+
+cglobal vp9_ipred_hd_8x8_16, 4, 4, 5, dst, stride, l, a
+ mova m0, [lq]
+ movu m1, [aq-2]
+ PALIGNR m2, m1, m0, 2, m3
+ PALIGNR m3, m1, m0, 4, m4
+ LOWPASS 3, 2, 0
+ pavgw m2, m0
+ SBUTTERFLY wd, 2, 3, 0
+ psrldq m0, m1, 2
+ psrldq m4, m1, 4
+ LOWPASS 1, 0, 4
+ DEFINE_ARGS dst8, mstride, cnt
+ lea dst8q, [dst8q+mstrideq*8]
+ neg mstrideq
+ mov cntd, 4
+
+.loop:
+ add dst8q, mstrideq
+ mova [dst8q+mstrideq*0], m2
+ mova [dst8q+mstrideq*4], m3
+%if cpuflag(avx)
+ vpalignr m2, m3, m2, 4
+ vpalignr m3, m1, m3, 4
+%else
+ PALIGNR m0, m3, m2, 4, m4
+ mova m2, m0
+ PALIGNR m0, m1, m3, 4, m4
+ mova m3, m0
+%endif
+ psrldq m1, 4
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_hd_16x16_16, 4, 4, 8, dst, stride, l, a
+ mova m2, [lq]
+ movu m1, [lq+2]
+ movu m0, [lq+4]
+ LOWPASS 0, 1, 2
+ pavgw m1, m2
+ mova m4, [lq+mmsize]
+ movu m5, [aq-2]
+ PALIGNR m3, m5, m4, 2, m6
+ PALIGNR m2, m5, m4, 4, m6
+ LOWPASS 2, 3, 4
+ pavgw m3, m4
+ SBUTTERFLY wd, 1, 0, 4
+ SBUTTERFLY wd, 3, 2, 4
+ mova m6, [aq]
+ movu m4, [aq+2]
+ LOWPASS 4, 6, 5
+ movu m5, [aq+mmsize-2]
+ psrldq m6, m5, 2
+ psrldq m7, m5, 4
+ LOWPASS 5, 6, 7
+ DEFINE_ARGS dst, mstride, mstride3, cnt
+ lea dstq, [dstq+mstrideq*8]
+ lea dstq, [dstq+mstrideq*8]
+ neg mstrideq
+ lea mstride3q, [mstrideq*3]
+ mov cntd, 4
+
+.loop:
+ add dstq, mstrideq
+ mova [dstq+mstride3q*4+ 0], m2
+ mova [dstq+mstride3q*4+16], m4
+ mova [dstq+mstrideq *8+ 0], m3
+ mova [dstq+mstrideq *8+16], m2
+ mova [dstq+mstrideq *4+ 0], m0
+ mova [dstq+mstrideq *4+16], m3
+ mova [dstq+mstrideq *0+ 0], m1
+ mova [dstq+mstrideq *0+16], m0
+%if cpuflag(avx)
+ vpalignr m1, m0, m1, 4
+ vpalignr m0, m3, m0, 4
+ vpalignr m3, m2, m3, 4
+ vpalignr m2, m4, m2, 4
+ vpalignr m4, m5, m4, 4
+%else
+ PALIGNR m6, m0, m1, 4, m7
+ mova m1, m6
+ PALIGNR m6, m3, m0, 4, m7
+ mova m0, m6
+ PALIGNR m6, m2, m3, 4, m7
+ mova m3, m6
+ PALIGNR m6, m4, m2, 4, m7
+ mova m2, m6
+ PALIGNR m6, m5, m4, 4, m7
+ mova m4, m6
+%endif
+ psrldq m5, 4
+ dec cntd
+ jg .loop
+ RET
+
+cglobal vp9_ipred_hd_32x32_16, 4, 4 + 3 * ARCH_X86_64, 14, \
+ 10 * -mmsize * ARCH_X86_32, dst, stride, l, a
+ mova m2, [lq+mmsize*0+0]
+ movu m1, [lq+mmsize*0+2]
+ movu m0, [lq+mmsize*0+4]
+ LOWPASS 0, 1, 2
+ pavgw m1, m2
+ SBUTTERFLY wd, 1, 0, 2
+ mova m4, [lq+mmsize*1+0]
+ movu m3, [lq+mmsize*1+2]
+ movu m2, [lq+mmsize*1+4]
+ LOWPASS 2, 3, 4
+ pavgw m3, m4
+ SBUTTERFLY wd, 3, 2, 4
+ SCRATCH 0, 8, rsp+0*mmsize
+ SCRATCH 1, 9, rsp+1*mmsize
+ SCRATCH 2, 10, rsp+2*mmsize
+ SCRATCH 3, 11, rsp+3*mmsize
+ mova m6, [lq+mmsize*2+0]
+ movu m5, [lq+mmsize*2+2]
+ movu m4, [lq+mmsize*2+4]
+ LOWPASS 4, 5, 6
+ pavgw m5, m6
+ SBUTTERFLY wd, 5, 4, 6
+ mova m0, [lq+mmsize*3+0]
+ movu m1, [aq+mmsize*0-2]
+ PALIGNR m7, m1, m0, 2, m2
+ PALIGNR m6, m1, m0, 4, m2
+ LOWPASS 6, 7, 0
+ pavgw m7, m0
+ SBUTTERFLY wd, 7, 6, 0
+ mova m2, [aq+mmsize*0+0]
+ movu m0, [aq+mmsize*0+2]
+ LOWPASS 0, 2, 1
+ movu m1, [aq+mmsize*1-2]
+ mova m2, [aq+mmsize*1+0]
+ movu m3, [aq+mmsize*1+2]
+ LOWPASS 1, 2, 3
+ SCRATCH 6, 12, rsp+6*mmsize
+ SCRATCH 7, 13, rsp+7*mmsize
+ movu m2, [aq+mmsize*2-2]
+ mova m3, [aq+mmsize*2+0]
+ movu m6, [aq+mmsize*2+2]
+ LOWPASS 2, 3, 6
+ movu m3, [aq+mmsize*3-2]
+ psrldq m6, m3, 2
+ psrldq m7, m3, 4
+ LOWPASS 3, 6, 7
+ UNSCRATCH 6, 12, rsp+6*mmsize
+ UNSCRATCH 7, 13, rsp+7*mmsize
+%if ARCH_X86_32
+ mova [rsp+4*mmsize], m4
+ mova [rsp+5*mmsize], m5
+ ; we already backed up m6/m7 earlier on x86-32 in SCRATCH, so we don't need
+ ; to do it again here
+%endif
+ DEFINE_ARGS dst, stride, cnt, stride3, stride4, stride20, stride28
+ mov cntd, 4
+ lea stride3q, [strideq*3]
+%if ARCH_X86_64
+ lea stride4q, [strideq*4]
+ lea stride28q, [stride4q*8]
+ lea stride20q, [stride4q*5]
+ sub stride28q, stride4q
+%endif
+ add dstq, stride3q
+
+ ; x86-32 doesn't have enough registers, so on that platform, we split
+ ; the loop in 2... Otherwise you spend most of the loop (un)scratching
+.loop:
+%if ARCH_X86_64
+ mova [dstq+stride28q + 0], m9
+ mova [dstq+stride28q +16], m8
+ mova [dstq+stride28q +32], m11
+ mova [dstq+stride28q +48], m10
+ mova [dstq+stride3q*8+ 0], m8
+ mova [dstq+stride3q*8+16], m11
+ mova [dstq+stride3q*8+32], m10
+ mova [dstq+stride3q*8+48], m5
+ mova [dstq+stride20q + 0], m11
+ mova [dstq+stride20q +16], m10
+ mova [dstq+stride20q +32], m5
+ mova [dstq+stride20q +48], m4
+ mova [dstq+stride4q*4+ 0], m10
+ mova [dstq+stride4q*4+16], m5
+ mova [dstq+stride4q*4+32], m4
+ mova [dstq+stride4q*4+48], m7
+%endif
+ mova [dstq+stride3q*4+ 0], m5
+ mova [dstq+stride3q*4+16], m4
+ mova [dstq+stride3q*4+32], m7
+ mova [dstq+stride3q*4+48], m6
+ mova [dstq+strideq* 8+ 0], m4
+ mova [dstq+strideq* 8+16], m7
+ mova [dstq+strideq* 8+32], m6
+ mova [dstq+strideq* 8+48], m0
+ mova [dstq+strideq* 4+ 0], m7
+ mova [dstq+strideq* 4+16], m6
+ mova [dstq+strideq* 4+32], m0
+ mova [dstq+strideq* 4+48], m1
+ mova [dstq+strideq* 0+ 0], m6
+ mova [dstq+strideq* 0+16], m0
+ mova [dstq+strideq* 0+32], m1
+ mova [dstq+strideq* 0+48], m2
+ sub dstq, strideq
+%if cpuflag(avx)
+%if ARCH_X86_64
+ vpalignr m9, m8, m9, 4
+ vpalignr m8, m11, m8, 4
+ vpalignr m11, m10, m11, 4
+ vpalignr m10, m5, m10, 4
+%endif
+ vpalignr m5, m4, m5, 4
+ vpalignr m4, m7, m4, 4
+ vpalignr m7, m6, m7, 4
+ vpalignr m6, m0, m6, 4
+ vpalignr m0, m1, m0, 4
+ vpalignr m1, m2, m1, 4
+ vpalignr m2, m3, m2, 4
+%else
+%if ARCH_X86_64
+ PALIGNR m12, m8, m9, 4, m13
+ mova m9, m12
+ PALIGNR m12, m11, m8, 4, m13
+ mova m8, m12
+ PALIGNR m12, m10, m11, 4, m13
+ mova m11, m12
+ PALIGNR m12, m5, m10, 4, m13
+ mova m10, m12
+%endif
+ SCRATCH 3, 12, rsp+8*mmsize, sh
+%if notcpuflag(ssse3)
+ SCRATCH 2, 13, rsp+9*mmsize
+%endif
+ PALIGNR m3, m4, m5, 4, m2
+ mova m5, m3
+ PALIGNR m3, m7, m4, 4, m2
+ mova m4, m3
+ PALIGNR m3, m6, m7, 4, m2
+ mova m7, m3
+ PALIGNR m3, m0, m6, 4, m2
+ mova m6, m3
+ PALIGNR m3, m1, m0, 4, m2
+ mova m0, m3
+%if notcpuflag(ssse3)
+ UNSCRATCH 2, 13, rsp+9*mmsize
+ SCRATCH 0, 13, rsp+9*mmsize
+%endif
+ PALIGNR m3, m2, m1, 4, m0
+ mova m1, m3
+ PALIGNR m3, reg_sh, m2, 4, m0
+ mova m2, m3
+%if notcpuflag(ssse3)
+ UNSCRATCH 0, 13, rsp+9*mmsize
+%endif
+ UNSCRATCH 3, 12, rsp+8*mmsize, sh
+%endif
+ psrldq m3, 4
+ dec cntd
+ jg .loop
+
+%if ARCH_X86_32
+ UNSCRATCH 0, 8, rsp+0*mmsize
+ UNSCRATCH 1, 9, rsp+1*mmsize
+ UNSCRATCH 2, 10, rsp+2*mmsize
+ UNSCRATCH 3, 11, rsp+3*mmsize
+ mova m4, [rsp+4*mmsize]
+ mova m5, [rsp+5*mmsize]
+ mova m6, [rsp+6*mmsize]
+ mova m7, [rsp+7*mmsize]
+ DEFINE_ARGS dst, stride, stride5, stride3
+ lea stride5q, [strideq*5]
+ lea dstq, [dstq+stride5q*4]
+ DEFINE_ARGS dst, stride, cnt, stride3
+ mov cntd, 4
+.loop_2:
+ mova [dstq+stride3q*4+ 0], m1
+ mova [dstq+stride3q*4+16], m0
+ mova [dstq+stride3q*4+32], m3
+ mova [dstq+stride3q*4+48], m2
+ mova [dstq+strideq* 8+ 0], m0
+ mova [dstq+strideq* 8+16], m3
+ mova [dstq+strideq* 8+32], m2
+ mova [dstq+strideq* 8+48], m5
+ mova [dstq+strideq* 4+ 0], m3
+ mova [dstq+strideq* 4+16], m2
+ mova [dstq+strideq* 4+32], m5
+ mova [dstq+strideq* 4+48], m4
+ mova [dstq+strideq* 0+ 0], m2
+ mova [dstq+strideq* 0+16], m5
+ mova [dstq+strideq* 0+32], m4
+ mova [dstq+strideq* 0+48], m7
+ sub dstq, strideq
+%if cpuflag(avx)
+ vpalignr m1, m0, m1, 4
+ vpalignr m0, m3, m0, 4
+ vpalignr m3, m2, m3, 4
+ vpalignr m2, m5, m2, 4
+ vpalignr m5, m4, m5, 4
+ vpalignr m4, m7, m4, 4
+ vpalignr m7, m6, m7, 4
+%else
+ SCRATCH 6, 12, rsp+8*mmsize, sh
+%if notcpuflag(ssse3)
+ SCRATCH 7, 13, rsp+9*mmsize
+%endif
+ PALIGNR m6, m0, m1, 4, m7
+ mova m1, m6
+ PALIGNR m6, m3, m0, 4, m7
+ mova m0, m6
+ PALIGNR m6, m2, m3, 4, m7
+ mova m3, m6
+ PALIGNR m6, m5, m2, 4, m7
+ mova m2, m6
+ PALIGNR m6, m4, m5, 4, m7
+ mova m5, m6
+%if notcpuflag(ssse3)
+ UNSCRATCH 7, 13, rsp+9*mmsize
+ SCRATCH 5, 13, rsp+9*mmsize
+%endif
+ PALIGNR m6, m7, m4, 4, m5
+ mova m4, m6
+ PALIGNR m6, reg_sh, m7, 4, m5
+ mova m7, m6
+%if notcpuflag(ssse3)
+ UNSCRATCH 5, 13, rsp+9*mmsize
+%endif
+ UNSCRATCH 6, 12, rsp+8*mmsize, sh
+%endif
+ psrldq m6, 4
+ dec cntd
+ jg .loop_2
+%endif
+ RET
+%endmacro
+
+INIT_XMM sse2
+HD_FUNCS
+INIT_XMM ssse3
+HD_FUNCS
+INIT_XMM avx
+HD_FUNCS
diff --git a/media/ffvpx/libavcodec/x86/vp9itxfm.asm b/media/ffvpx/libavcodec/x86/vp9itxfm.asm
new file mode 100644
index 0000000000..2c63fe514a
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9itxfm.asm
@@ -0,0 +1,3197 @@
+;******************************************************************************
+;* VP9 IDCT SIMD optimizations
+;*
+;* Copyright (C) 2013 Clément Bœsch <u pkh me>
+;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+%include "vp9itxfm_template.asm"
+
+SECTION_RODATA 32
+
+%macro VP9_IDCT_COEFFS 2-3 0
+const pw_m%1_%2
+times 8 dw -%1, %2
+const pw_%2_%1
+times 8 dw %2, %1
+
+%if %3 == 1
+const pw_m%2_m%1
+times 8 dw -%2, -%1
+%if %1 != %2
+const pw_m%2_%1
+times 8 dw -%2, %1
+const pw_%1_%2
+times 8 dw %1, %2
+%endif
+%endif
+
+%if %1 < 11585
+pw_m%1x2: times 16 dw -%1*2
+%elif %1 > 11585
+pw_%1x2: times 16 dw %1*2
+%else
+const pw_%1x2
+times 16 dw %1*2
+%endif
+
+%if %2 != %1
+pw_%2x2: times 16 dw %2*2
+%endif
+%endmacro
+
+VP9_IDCT_COEFFS 16364, 804
+VP9_IDCT_COEFFS 16305, 1606
+VP9_IDCT_COEFFS 16069, 3196, 1
+VP9_IDCT_COEFFS 15893, 3981
+VP9_IDCT_COEFFS 15137, 6270, 1
+VP9_IDCT_COEFFS 14811, 7005
+VP9_IDCT_COEFFS 14449, 7723
+VP9_IDCT_COEFFS 13160, 9760
+VP9_IDCT_COEFFS 11585, 11585, 1
+VP9_IDCT_COEFFS 11003, 12140
+VP9_IDCT_COEFFS 10394, 12665
+VP9_IDCT_COEFFS 9102, 13623, 1
+VP9_IDCT_COEFFS 8423, 14053
+VP9_IDCT_COEFFS 5520, 15426
+VP9_IDCT_COEFFS 4756, 15679
+VP9_IDCT_COEFFS 2404, 16207
+
+const pw_5283_13377
+times 4 dw 5283, 13377
+const pw_9929_13377
+times 4 dw 9929, 13377
+const pw_15212_m13377
+times 4 dw 15212, -13377
+const pw_15212_9929
+times 4 dw 15212, 9929
+const pw_m5283_m15212
+times 4 dw -5283, -15212
+const pw_13377x2
+times 8 dw 13377*2
+const pw_m13377_13377
+times 4 dw -13377, 13377
+const pw_13377_0
+times 4 dw 13377, 0
+
+cextern pw_8
+cextern pw_16
+cextern pw_32
+cextern pw_512
+cextern pw_1024
+cextern pw_2048
+cextern pw_m1
+cextern pd_8192
+
+SECTION .text
+
+%macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
+ punpckhwd m%4, m%2, m%1
+ punpcklwd m%2, m%1
+ pmaddwd m%3, m%4, [pw_m%5_%6]
+ pmaddwd m%4, [pw_%6_%5]
+ pmaddwd m%1, m%2, [pw_m%5_%6]
+ pmaddwd m%2, [pw_%6_%5]
+%endmacro
+
+%macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
+ SUMSUB_BA d, %1, %2, %5
+ SUMSUB_BA d, %3, %4, %5
+ paddd m%1, %6
+ paddd m%2, %6
+ paddd m%3, %6
+ paddd m%4, %6
+ psrad m%1, 14
+ psrad m%2, 14
+ psrad m%3, 14
+ psrad m%4, 14
+ packssdw m%1, m%3
+ packssdw m%2, m%4
+%endmacro
+
+%macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
+%if mmsize == 32
+ pmovzxbw m%3, [%6]
+ pmovzxbw m%4, [%6+strideq]
+%else
+ movh m%3, [%6]
+ movh m%4, [%6+strideq]
+ punpcklbw m%3, m%5
+ punpcklbw m%4, m%5
+%endif
+ paddw m%3, m%1
+ paddw m%4, m%2
+%if mmsize == 32
+ packuswb m%3, m%4
+ ; Intel...
+ vpermq m%3, m%3, q3120
+ mova [%6], xm%3
+ vextracti128 [%6+strideq], m%3, 1
+%elif mmsize == 16
+ packuswb m%3, m%4
+ movh [%6], m%3
+ movhps [%6+strideq], m%3
+%else
+ packuswb m%3, m%5
+ packuswb m%4, m%5
+ movh [%6], m%3
+ movh [%6+strideq], m%4
+%endif
+%endmacro
+
+%macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
+%assign %%y 0
+%rep %3
+%assign %%x 0
+%rep %3*2/mmsize
+ mova [%1+%%y+%%x], %4
+%assign %%x (%%x+mmsize)
+%endrep
+%assign %%y (%%y+%2)
+%endrep
+%endmacro
+
+;-------------------------------------------------------------------------------------------
+; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;-------------------------------------------------------------------------------------------
+
+INIT_MMX mmx
+cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
+ mova m0, [blockq+0*8]
+ mova m1, [blockq+1*8]
+ mova m2, [blockq+2*8]
+ mova m3, [blockq+3*8]
+ psraw m0, 2
+ psraw m1, 2
+ psraw m2, 2
+ psraw m3, 2
+
+ VP9_IWHT4_1D
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ VP9_IWHT4_1D
+
+ pxor m4, m4
+ VP9_STORE_2X 0, 1, 5, 6, 4
+ lea dstq, [dstq+strideq*2]
+ VP9_STORE_2X 2, 3, 5, 6, 4
+ ZERO_BLOCK blockq, 8, 4, m4
+ RET
+
+;-------------------------------------------------------------------------------------------
+; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;-------------------------------------------------------------------------------------------
+
+; 2x2 top left corner
+%macro VP9_IDCT4_2x2_1D 0
+ pmulhrsw m0, m5 ; m0=t1
+ mova m2, m0 ; m2=t0
+ mova m3, m1
+ pmulhrsw m1, m6 ; m1=t2
+ pmulhrsw m3, m7 ; m3=t3
+ VP9_IDCT4_1D_FINALIZE
+%endmacro
+
+%macro VP9_IDCT4_WRITEOUT 0
+%if cpuflag(ssse3)
+ mova m5, [pw_2048]
+ pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
+ pmulhrsw m1, m5
+%else
+ mova m5, [pw_8]
+ paddw m0, m5
+ paddw m1, m5
+ psraw m0, 4
+ psraw m1, 4
+%endif
+ VP9_STORE_2X 0, 1, 6, 7, 4
+ lea dstq, [dstq+2*strideq]
+%if cpuflag(ssse3)
+ pmulhrsw m2, m5
+ pmulhrsw m3, m5
+%else
+ paddw m2, m5
+ paddw m3, m5
+ psraw m2, 4
+ psraw m3, 4
+%endif
+ VP9_STORE_2X 2, 3, 6, 7, 4
+%endmacro
+
+%macro IDCT_4x4_FN 1
+INIT_MMX %1
+cglobal vp9_idct_idct_4x4_add, 4, 4, 0, dst, stride, block, eob
+
+%if cpuflag(ssse3)
+ cmp eobd, 4 ; 2x2 or smaller
+ jg .idctfull
+
+ cmp eobd, 1 ; faster path for when only DC is set
+ jne .idct2x2
+%else
+ cmp eobd, 1
+ jg .idctfull
+%endif
+
+%if cpuflag(ssse3)
+ movd m0, [blockq]
+ mova m5, [pw_11585x2]
+ pmulhrsw m0, m5
+ pmulhrsw m0, m5
+%else
+ DEFINE_ARGS dst, stride, block, coef
+ movsx coefd, word [blockq]
+ imul coefd, 11585
+ add coefd, 8192
+ sar coefd, 14
+ imul coefd, 11585
+ add coefd, (8 << 14) + 8192
+ sar coefd, 14 + 4
+ movd m0, coefd
+%endif
+ pshufw m0, m0, 0
+ pxor m4, m4
+ movh [blockq], m4
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
+%endif
+ VP9_STORE_2X 0, 0, 6, 7, 4
+ lea dstq, [dstq+2*strideq]
+ VP9_STORE_2X 0, 0, 6, 7, 4
+ RET
+
+%if cpuflag(ssse3)
+; faster path for when only top left 2x2 block is set
+.idct2x2:
+ movd m0, [blockq+0]
+ movd m1, [blockq+8]
+ mova m5, [pw_11585x2]
+ mova m6, [pw_6270x2]
+ mova m7, [pw_15137x2]
+ VP9_IDCT4_2x2_1D
+ ; partial 2x4 transpose
+ punpcklwd m0, m1
+ punpcklwd m2, m3
+ SBUTTERFLY dq, 0, 2, 1
+ SWAP 1, 2
+ VP9_IDCT4_2x2_1D
+ pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
+ movh [blockq+ 0], m4
+ movh [blockq+ 8], m4
+ VP9_IDCT4_WRITEOUT
+ RET
+%endif
+
+.idctfull: ; generic full 4x4 idct/idct
+ mova m0, [blockq+ 0]
+ mova m1, [blockq+ 8]
+ mova m2, [blockq+16]
+ mova m3, [blockq+24]
+%if cpuflag(ssse3)
+ mova m6, [pw_11585x2]
+%endif
+ mova m7, [pd_8192] ; rounding
+ VP9_IDCT4_1D
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ VP9_IDCT4_1D
+ pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
+ mova [blockq+ 0], m4
+ mova [blockq+ 8], m4
+ mova [blockq+16], m4
+ mova [blockq+24], m4
+ VP9_IDCT4_WRITEOUT
+ RET
+%endmacro
+
+IDCT_4x4_FN mmxext
+IDCT_4x4_FN ssse3
+
+;-------------------------------------------------------------------------------------------
+; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;-------------------------------------------------------------------------------------------
+
+%macro IADST4_FN 5
+INIT_MMX %5
+cglobal vp9_%1_%3_4x4_add, 3, 3, 0, dst, stride, block, eob
+%if WIN64 && notcpuflag(ssse3)
+ WIN64_SPILL_XMM 8
+%endif
+ movdqa xmm5, [pd_8192]
+ mova m0, [blockq+ 0]
+ mova m1, [blockq+ 8]
+ mova m2, [blockq+16]
+ mova m3, [blockq+24]
+%if cpuflag(ssse3)
+ mova m6, [pw_11585x2]
+%endif
+%ifnidn %1%3, iadstiadst
+ movdq2q m7, xmm5
+%endif
+ VP9_%2_1D
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ VP9_%4_1D
+ pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
+ mova [blockq+ 0], m4
+ mova [blockq+ 8], m4
+ mova [blockq+16], m4
+ mova [blockq+24], m4
+ VP9_IDCT4_WRITEOUT
+ RET
+%endmacro
+
+IADST4_FN idct, IDCT4, iadst, IADST4, sse2
+IADST4_FN iadst, IADST4, idct, IDCT4, sse2
+IADST4_FN iadst, IADST4, iadst, IADST4, sse2
+
+IADST4_FN idct, IDCT4, iadst, IADST4, ssse3
+IADST4_FN iadst, IADST4, idct, IDCT4, ssse3
+IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
+
+%macro SCRATCH 3
+%if ARCH_X86_64
+ SWAP %1, %2
+%else
+ mova [%3], m%1
+%endif
+%endmacro
+
+%macro UNSCRATCH 3
+%if ARCH_X86_64
+ SWAP %1, %2
+%else
+ mova m%1, [%3]
+%endif
+%endmacro
+
+;-------------------------------------------------------------------------------------------
+; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;-------------------------------------------------------------------------------------------
+
+%macro VP9_IDCT8_1D_FINALIZE 0
+ SUMSUB_BA w, 3, 6, 5 ; m3=t0+t7, m6=t0-t7
+ SUMSUB_BA w, 1, 2, 5 ; m1=t1+t6, m2=t1-t6
+ SUMSUB_BA w, 7, 0, 5 ; m7=t2+t5, m0=t2-t5
+
+ UNSCRATCH 5, 8, blockq+ 0
+ SCRATCH 2, 8, blockq+ 0
+
+ SUMSUB_BA w, 5, 4, 2 ; m5=t3+t4, m4=t3-t4
+ SWAP 7, 6, 2
+ SWAP 3, 5, 0
+
+%if ARCH_X86_64
+ SWAP 6, 8
+%endif
+%endmacro
+
+; x86-32
+; - in: m0/m4 is in mem
+; - out: m6 is in mem
+; x86-64:
+; - everything is in registers (m0-7)
+%macro VP9_IDCT8_1D 0
+%if ARCH_X86_64
+ SWAP 0, 8
+ SWAP 4, 9
+%endif
+
+ VP9_UNPACK_MULSUB_2W_4X 5, 3, 9102, 13623, D_8192_REG, 0, 4 ; m5=t5a, m3=t6a
+ VP9_UNPACK_MULSUB_2W_4X 1, 7, 16069, 3196, D_8192_REG, 0, 4 ; m1=t4a, m7=t7a
+ SUMSUB_BA w, 5, 1, 0 ; m5=t4a+t5a (t4), m1=t4a-t5a (t5a)
+ SUMSUB_BA w, 3, 7, 0 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
+%if cpuflag(ssse3)
+ SUMSUB_BA w, 1, 7, 0 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
+ pmulhrsw m1, W_11585x2_REG ; m1=t6
+ pmulhrsw m7, W_11585x2_REG ; m7=t5
+%else
+ VP9_UNPACK_MULSUB_2W_4X 7, 1, 11585, 11585, D_8192_REG, 0, 4
+%endif
+ VP9_UNPACK_MULSUB_2W_4X 2, 6, 15137, 6270, D_8192_REG, 0, 4 ; m2=t2a, m6=t3a
+
+ UNSCRATCH 0, 8, blockq+ 0 ; IN(0)
+ UNSCRATCH 4, 9, blockq+64 ; IN(4)
+ SCRATCH 5, 8, blockq+ 0
+
+%if cpuflag(ssse3)
+ SUMSUB_BA w, 4, 0, 5 ; m4=IN(0)+IN(4) m0=IN(0)-IN(4)
+ pmulhrsw m4, W_11585x2_REG ; m4=t0a
+ pmulhrsw m0, W_11585x2_REG ; m0=t1a
+%else
+ SCRATCH 7, 9, blockq+64
+ VP9_UNPACK_MULSUB_2W_4X 0, 4, 11585, 11585, D_8192_REG, 5, 7
+ UNSCRATCH 7, 9, blockq+64
+%endif
+ SUMSUB_BA w, 6, 4, 5 ; m6=t0a+t3a (t0), m4=t0a-t3a (t3)
+ SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
+
+ VP9_IDCT8_1D_FINALIZE
+%endmacro
+
+%macro VP9_IDCT8_4x4_1D 0
+ pmulhrsw m0, W_11585x2_REG ; m0=t1a/t0a
+ pmulhrsw m6, m2, [pw_15137x2] ; m6=t3a
+ pmulhrsw m2, [pw_6270x2] ; m2=t2a
+ pmulhrsw m7, m1, [pw_16069x2] ; m7=t7a
+ pmulhrsw m1, [pw_3196x2] ; m1=t4a
+ pmulhrsw m5, m3, [pw_m9102x2] ; m5=t5a
+ pmulhrsw m3, [pw_13623x2] ; m3=t6a
+ SUMSUB_BA w, 5, 1, 4 ; m1=t4a+t5a (t4), m5=t4a-t5a (t5a)
+ SUMSUB_BA w, 3, 7, 4 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
+ SUMSUB_BA w, 1, 7, 4 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
+ pmulhrsw m1, W_11585x2_REG ; m1=t6
+ pmulhrsw m7, W_11585x2_REG ; m7=t5
+ psubw m4, m0, m6 ; m4=t0a-t3a (t3)
+ paddw m6, m0 ; m6=t0a+t3a (t0)
+ SCRATCH 5, 8, blockq+ 0
+ SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
+ VP9_IDCT8_1D_FINALIZE
+%endmacro
+
+%macro VP9_IDCT8_2x2_1D 1
+ pmulhrsw m0, W_11585x2_REG ; m0=t0
+ pmulhrsw m3, m1, W_16069x2_REG ; m3=t7
+ pmulhrsw m1, W_3196x2_REG ; m1=t4
+ psubw m7, m3, m1 ; t5 = t7a - t4a
+ paddw m5, m3, m1 ; t6 = t7a + t4a
+ pmulhrsw m7, W_11585x2_REG ; m7=t5
+ pmulhrsw m5, W_11585x2_REG ; m5=t6
+ SWAP 5, 1
+ ; merged VP9_IDCT8_1D_FINALIZE to make register-sharing w/ avx easier
+ psubw m6, m0, m3 ; m6=t0-t7
+ paddw m3, m0 ; m3=t0+t7
+ psubw m2, m0, m1 ; m2=t1-t6
+ paddw m1, m0 ; m1=t1+t6
+%if %1 == 1
+ punpcklwd m3, m1
+%define SCRATCH_REG 1
+%elif ARCH_X86_32
+ mova [blockq+ 0], m2
+%define SCRATCH_REG 2
+%else
+%define SCRATCH_REG 8
+%endif
+ psubw m4, m0, m5 ; m4=t3-t4
+ paddw m5, m0 ; m5=t3+t4
+ SUMSUB_BA w, 7, 0, SCRATCH_REG ; m7=t2+t5, m0=t2-t5
+ SWAP 7, 6, 2
+ SWAP 3, 5, 0
+%undef SCRATCH_REG
+%endmacro
+
+%macro VP9_IDCT8_WRITEx2 6-8 5 ; line1, line2, tmp1, tmp2, zero, pw_1024/pw_16, shift
+%if cpuflag(ssse3)
+ pmulhrsw m%1, %6 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
+ pmulhrsw m%2, %6
+%else
+ paddw m%1, %6
+ paddw m%2, %6
+ psraw m%1, %7
+ psraw m%2, %7
+%endif
+%if %0 <= 7
+ VP9_STORE_2X %1, %2, %3, %4, %5
+%else
+ VP9_STORE_2X %1, %2, %3, %4, %5, %8
+%endif
+%endmacro
+
+; x86-32:
+; - m6 is in mem
+; x86-64:
+; - m8 holds m6 (SWAP)
+; m6 holds zero
+%macro VP9_IDCT8_WRITEOUT 0
+%if ARCH_X86_64
+%if cpuflag(ssse3)
+ mova m9, [pw_1024]
+%else
+ mova m9, [pw_16]
+%endif
+%define ROUND_REG m9
+%else
+%if cpuflag(ssse3)
+%define ROUND_REG [pw_1024]
+%else
+%define ROUND_REG [pw_16]
+%endif
+%endif
+ SCRATCH 5, 10, blockq+16
+ SCRATCH 7, 11, blockq+32
+ VP9_IDCT8_WRITEx2 0, 1, 5, 7, 6, ROUND_REG
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 2, 3, 5, 7, 6, ROUND_REG
+ lea dstq, [dstq+2*strideq]
+ UNSCRATCH 5, 10, blockq+16
+ UNSCRATCH 7, 11, blockq+32
+ VP9_IDCT8_WRITEx2 4, 5, 0, 1, 6, ROUND_REG
+ lea dstq, [dstq+2*strideq]
+ UNSCRATCH 5, 8, blockq+ 0
+ VP9_IDCT8_WRITEx2 5, 7, 0, 1, 6, ROUND_REG
+
+%undef ROUND_REG
+%endmacro
+
+%macro VP9_IDCT_IDCT_8x8_ADD_XMM 2
+INIT_XMM %1
+cglobal vp9_idct_idct_8x8_add, 4, 4, %2, dst, stride, block, eob
+
+%if cpuflag(ssse3)
+%if ARCH_X86_64
+ mova m12, [pw_11585x2] ; often used
+%define W_11585x2_REG m12
+%else
+%define W_11585x2_REG [pw_11585x2]
+%endif
+
+ cmp eobd, 12 ; top left half or less
+ jg .idctfull
+
+ cmp eobd, 3 ; top left corner or less
+ jg .idcthalf
+
+ cmp eobd, 1 ; faster path for when only DC is set
+ jne .idcttopleftcorner
+%else
+ cmp eobd, 1
+ jg .idctfull
+%endif
+
+%if cpuflag(ssse3)
+ movd m0, [blockq]
+ pmulhrsw m0, W_11585x2_REG
+ pmulhrsw m0, W_11585x2_REG
+%else
+ DEFINE_ARGS dst, stride, block, coef
+ movsx coefd, word [blockq]
+ imul coefd, 11585
+ add coefd, 8192
+ sar coefd, 14
+ imul coefd, 11585
+ add coefd, (16 << 14) + 8192
+ sar coefd, 14 + 5
+ movd m0, coefd
+%endif
+ SPLATW m0, m0, 0
+ pxor m4, m4
+ movd [blockq], m4
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_1024] ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
+%endif
+%rep 3
+ VP9_STORE_2X 0, 0, 6, 7, 4
+ lea dstq, [dstq+2*strideq]
+%endrep
+ VP9_STORE_2X 0, 0, 6, 7, 4
+ RET
+
+%if cpuflag(ssse3)
+; faster path for when only left corner is set (3 input: DC, right to DC, below
+; to DC). Note: also working with a 2x2 block
+.idcttopleftcorner:
+ movd m0, [blockq+0]
+ movd m1, [blockq+16]
+%if ARCH_X86_64
+ mova m10, [pw_3196x2]
+ mova m11, [pw_16069x2]
+%define W_3196x2_REG m10
+%define W_16069x2_REG m11
+%else
+%define W_3196x2_REG [pw_3196x2]
+%define W_16069x2_REG [pw_16069x2]
+%endif
+ VP9_IDCT8_2x2_1D 1
+ ; partial 2x8 transpose
+ ; punpcklwd m0, m1 already done inside idct
+ punpcklwd m2, m3
+ punpcklwd m4, m5
+ punpcklwd m6, m7
+ punpckldq m0, m2
+ punpckldq m4, m6
+ SBUTTERFLY qdq, 0, 4, 1
+ SWAP 1, 4
+ VP9_IDCT8_2x2_1D 2
+%if ARCH_X86_64
+ SWAP 6, 8
+%endif
+ pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
+ VP9_IDCT8_WRITEOUT
+%if ARCH_X86_64
+ movd [blockq+ 0], m6
+ movd [blockq+16], m6
+%else
+ mova [blockq+ 0], m6
+ mova [blockq+16], m6
+ mova [blockq+32], m6
+%endif
+ RET
+
+.idcthalf:
+ movh m0, [blockq + 0]
+ movh m1, [blockq +16]
+ movh m2, [blockq +32]
+ movh m3, [blockq +48]
+ VP9_IDCT8_4x4_1D
+ ; partial 4x8 transpose
+%if ARCH_X86_32
+ mova m6, [blockq+ 0]
+%endif
+ punpcklwd m0, m1
+ punpcklwd m2, m3
+ punpcklwd m4, m5
+ punpcklwd m6, m7
+ SBUTTERFLY dq, 0, 2, 1
+ SBUTTERFLY dq, 4, 6, 5
+ SBUTTERFLY qdq, 0, 4, 1
+ SBUTTERFLY qdq, 2, 6, 5
+ SWAP 1, 4
+ SWAP 3, 6
+ VP9_IDCT8_4x4_1D
+%if ARCH_X86_64
+ SWAP 6, 8
+%endif
+ pxor m6, m6
+ VP9_IDCT8_WRITEOUT
+%if ARCH_X86_64
+ movh [blockq+ 0], m6
+ movh [blockq+16], m6
+ movh [blockq+32], m6
+%else
+ mova [blockq+ 0], m6
+ mova [blockq+16], m6
+ mova [blockq+32], m6
+%endif
+ movh [blockq+48], m6
+ RET
+%endif
+
+.idctfull: ; generic full 8x8 idct/idct
+%if ARCH_X86_64
+ mova m0, [blockq+ 0] ; IN(0)
+%endif
+ mova m1, [blockq+ 16] ; IN(1)
+ mova m2, [blockq+ 32] ; IN(2)
+ mova m3, [blockq+ 48] ; IN(3)
+%if ARCH_X86_64
+ mova m4, [blockq+ 64] ; IN(4)
+%endif
+ mova m5, [blockq+ 80] ; IN(5)
+ mova m6, [blockq+ 96] ; IN(6)
+ mova m7, [blockq+112] ; IN(7)
+%if ARCH_X86_64
+ mova m11, [pd_8192] ; rounding
+%define D_8192_REG m11
+%else
+%define D_8192_REG [pd_8192]
+%endif
+ VP9_IDCT8_1D
+%if ARCH_X86_64
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
+%else
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
+ mova [blockq+0], m0
+%endif
+ VP9_IDCT8_1D
+
+%if ARCH_X86_64
+ SWAP 6, 8
+%endif
+ pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
+ VP9_IDCT8_WRITEOUT
+ ZERO_BLOCK blockq, 16, 8, m6
+ RET
+%undef W_11585x2_REG
+%endmacro
+
+VP9_IDCT_IDCT_8x8_ADD_XMM sse2, 12
+VP9_IDCT_IDCT_8x8_ADD_XMM ssse3, 13
+VP9_IDCT_IDCT_8x8_ADD_XMM avx, 13
+
+;---------------------------------------------------------------------------------------------
+; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;---------------------------------------------------------------------------------------------
+
+; x86-32:
+; - in: m0/3/4/7 are in mem [blockq+N*16]
+; - out: m6 is in mem [blockq+0]
+; x86-64:
+; - everything is in registers
+%macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/4/5/6/7
+%if ARCH_X86_64
+ SWAP 0, 8
+ SWAP 3, 9
+ SWAP 4, 10
+ SWAP 7, 11
+%endif
+
+ VP9_UNPACK_MULSUB_2D_4X 5, 2, 0, 3, 14449, 7723 ; m5/2=t3[d], m2/4=t2[d]
+ VP9_UNPACK_MULSUB_2D_4X 1, 6, 4, 7, 4756, 15679 ; m1/4=t7[d], m6/7=t6[d]
+ SCRATCH 4, 12, blockq+1*16
+ VP9_RND_SH_SUMSUB_BA 6, 2, 7, 3, 4, D_8192_REG ; m6=t2[w], m2=t6[w]
+ UNSCRATCH 4, 12, blockq+1*16
+ VP9_RND_SH_SUMSUB_BA 1, 5, 4, 0, 3, D_8192_REG ; m1=t3[w], m5=t7[w]
+
+ UNSCRATCH 0, 8, blockq+16*0
+ UNSCRATCH 3, 9, blockq+16*3
+ UNSCRATCH 4, 10, blockq+16*4
+ UNSCRATCH 7, 11, blockq+16*7
+ SCRATCH 1, 8, blockq+16*1
+ SCRATCH 2, 9, blockq+16*2
+ SCRATCH 5, 10, blockq+16*5
+ SCRATCH 6, 11, blockq+16*6
+
+ VP9_UNPACK_MULSUB_2D_4X 7, 0, 1, 2, 16305, 1606 ; m7/1=t1[d], m0/2=t0[d]
+ VP9_UNPACK_MULSUB_2D_4X 3, 4, 5, 6, 10394, 12665 ; m3/5=t5[d], m4/6=t4[d]
+ SCRATCH 1, 12, blockq+ 0*16
+ VP9_RND_SH_SUMSUB_BA 4, 0, 6, 2, 1, D_8192_REG ; m4=t0[w], m0=t4[w]
+ UNSCRATCH 1, 12, blockq+ 0*16
+ VP9_RND_SH_SUMSUB_BA 3, 7, 5, 1, 2, D_8192_REG ; m3=t1[w], m7=t5[w]
+
+ UNSCRATCH 2, 9, blockq+16*2
+ UNSCRATCH 5, 10, blockq+16*5
+ SCRATCH 3, 9, blockq+16*3
+ SCRATCH 4, 10, blockq+16*4
+
+ ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7
+
+ VP9_UNPACK_MULSUB_2D_4X 0, 7, 1, 3, 15137, 6270 ; m0/1=t5[d], m7/3=t4[d]
+ VP9_UNPACK_MULSUB_2D_4X 5, 2, 4, 6, 6270, 15137 ; m5/4=t6[d], m2/6=t7[d]
+ SCRATCH 1, 12, blockq+ 0*16
+ VP9_RND_SH_SUMSUB_BA 5, 7, 4, 3, 1, D_8192_REG
+ UNSCRATCH 1, 12, blockq+ 0*16
+ PSIGNW m5, W_M1_REG ; m5=out1[w], m7=t6[w]
+ VP9_RND_SH_SUMSUB_BA 2, 0, 6, 1, 3, D_8192_REG ; m2=out6[w], m0=t7[w]
+
+ UNSCRATCH 1, 8, blockq+16*1
+ UNSCRATCH 3, 9, blockq+16*3
+ UNSCRATCH 4, 10, blockq+16*4
+ UNSCRATCH 6, 11, blockq+16*6
+ SCRATCH 2, 8, blockq+16*0
+
+ SUMSUB_BA w, 6, 4, 2 ; m6=out0[w], m4=t2[w]
+ SUMSUB_BA w, 1, 3, 2
+ PSIGNW m1, W_M1_REG ; m1=out7[w], m3=t3[w]
+
+ ; m6=out0, m5=out1, m4=t2, m3=t3, m7=t6, m0=t7, m2=out6, m1=out7
+
+ ; unfortunately, the code below overflows in some cases
+%if 0; cpuflag(ssse3)
+ SUMSUB_BA w, 3, 4, 2
+ SUMSUB_BA w, 0, 7, 2
+ pmulhrsw m3, W_11585x2_REG
+ pmulhrsw m7, W_11585x2_REG
+ pmulhrsw m4, W_11585x2_REG ; out4
+ pmulhrsw m0, W_11585x2_REG ; out2
+%else
+ SCRATCH 5, 9, blockq+16*1
+ VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, D_8192_REG, 2, 5
+ VP9_UNPACK_MULSUB_2W_4X 7, 0, 11585, 11585, D_8192_REG, 2, 5
+ UNSCRATCH 5, 9, blockq+16*1
+%endif
+ PSIGNW m3, W_M1_REG ; out3
+ PSIGNW m7, W_M1_REG ; out5
+
+ ; m6=out0, m5=out1, m0=out2, m3=out3, m4=out4, m7=out5, m2=out6, m1=out7
+
+%if ARCH_X86_64
+ SWAP 2, 8
+%endif
+ SWAP 0, 6, 2
+ SWAP 7, 1, 5
+%endmacro
+
+%macro IADST8_FN 6
+INIT_XMM %5
+cglobal vp9_%1_%3_8x8_add, 3, 3, %6, dst, stride, block, eob
+
+%ifidn %1, idct
+%define first_is_idct 1
+%else
+%define first_is_idct 0
+%endif
+
+%ifidn %3, idct
+%define second_is_idct 1
+%else
+%define second_is_idct 0
+%endif
+
+%if ARCH_X86_64
+ mova m0, [blockq+ 0] ; IN(0)
+%endif
+ mova m1, [blockq+ 16] ; IN(1)
+ mova m2, [blockq+ 32] ; IN(2)
+%if ARCH_X86_64 || first_is_idct
+ mova m3, [blockq+ 48] ; IN(3)
+%endif
+%if ARCH_X86_64
+ mova m4, [blockq+ 64] ; IN(4)
+%endif
+ mova m5, [blockq+ 80] ; IN(5)
+ mova m6, [blockq+ 96] ; IN(6)
+%if ARCH_X86_64 || first_is_idct
+ mova m7, [blockq+112] ; IN(7)
+%endif
+%if ARCH_X86_64
+%if cpuflag(ssse3)
+ mova m15, [pw_11585x2] ; often used
+%endif
+ mova m13, [pd_8192] ; rounding
+ mova m14, [pw_m1]
+%define W_11585x2_REG m15
+%define D_8192_REG m13
+%define W_M1_REG m14
+%else
+%define W_11585x2_REG [pw_11585x2]
+%define D_8192_REG [pd_8192]
+%define W_M1_REG [pw_m1]
+%endif
+
+ ; note different calling conventions for idct8 vs. iadst8 on x86-32
+ VP9_%2_1D
+%if ARCH_X86_64
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
+%else
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
+ mova [blockq+ 0], m0
+%if second_is_idct == 0
+ mova [blockq+ 48], m3
+ mova [blockq+112], m7
+%endif
+%endif
+ VP9_%4_1D
+
+%if ARCH_X86_64
+ SWAP 6, 8
+%endif
+ pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
+ VP9_IDCT8_WRITEOUT
+ ZERO_BLOCK blockq, 16, 8, m6
+ RET
+
+%undef W_11585x2_REG
+%undef first_is_idct
+%undef second_is_idct
+
+%endmacro
+
+IADST8_FN idct, IDCT8, iadst, IADST8, sse2, 15
+IADST8_FN iadst, IADST8, idct, IDCT8, sse2, 15
+IADST8_FN iadst, IADST8, iadst, IADST8, sse2, 15
+IADST8_FN idct, IDCT8, iadst, IADST8, ssse3, 16
+IADST8_FN idct, IDCT8, iadst, IADST8, avx, 16
+IADST8_FN iadst, IADST8, idct, IDCT8, ssse3, 16
+IADST8_FN iadst, IADST8, idct, IDCT8, avx, 16
+IADST8_FN iadst, IADST8, iadst, IADST8, ssse3, 16
+IADST8_FN iadst, IADST8, iadst, IADST8, avx, 16
+
+;---------------------------------------------------------------------------------------------
+; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;---------------------------------------------------------------------------------------------
+
+; x86-64:
+; at the end of this macro, m7 is stored in [%4+15*%5]
+; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
+; the following sumsubs have not been done yet:
+; SUMSUB_BA w, 6, 9, 15 ; t6, t9
+; SUMSUB_BA w, 7, 8, 15 ; t7, t8
+; or (x86-32) t0-t5 are in m0-m5, t10-t15 are in x11/9/7/5/3/1,
+; and the following simsubs have not been done yet:
+; SUMSUB_BA w, x13, x14, 7 ; t6, t9
+; SUMSUB_BA w, x15, x12, 7 ; t7, t8
+
+%macro VP9_IDCT16_1D_START 6 ; src, nnzc, stride, scratch, scratch_stride, is_iadst
+%if %2 <= 4
+ mova m3, [%1+ 1*%3] ; IN(1)
+ mova m0, [%1+ 3*%3] ; IN(3)
+
+ pmulhrsw m4, m3, [pw_16305x2] ; t14-15
+ pmulhrsw m3, [pw_1606x2] ; t8-9
+ pmulhrsw m7, m0, [pw_m4756x2] ; t10-11
+ pmulhrsw m0, [pw_15679x2] ; t12-13
+
+ ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
+ ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
+
+ VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137, 6270, [pd_8192], 1, 6 ; t9, t14
+ SCRATCH 4, 10, %4+ 1*%5
+ SCRATCH 5, 11, %4+ 7*%5
+ VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
+ UNSCRATCH 5, 11, %4+ 7*%5
+
+ ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
+ ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
+%else
+ mova m5, [%1+ 1*%3] ; IN(1)
+ mova m4, [%1+ 7*%3] ; IN(7)
+%if %2 <= 8
+ pmulhrsw m2, m5, [pw_16305x2] ; t15
+ pmulhrsw m5, [pw_1606x2] ; t8
+ pmulhrsw m3, m4, [pw_m10394x2] ; t9
+ pmulhrsw m4, [pw_12665x2] ; t14
+%else
+ mova m3, [%1+ 9*%3] ; IN(9)
+ mova m2, [%1+15*%3] ; IN(15)
+
+ ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
+ ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
+
+ VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 0, 1 ; t8, t15
+ VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 0, 1 ; t9, t14
+%endif
+
+ SUMSUB_BA w, 3, 5, 0 ; t8, t9
+ SUMSUB_BA w, 4, 2, 0 ; t15, t14
+
+ VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 0, 1 ; t9, t14
+
+ SCRATCH 4, 10, %4+ 1*%5
+ SCRATCH 5, 11, %4+ 7*%5
+
+ mova m6, [%1+ 3*%3] ; IN(3)
+ mova m7, [%1+ 5*%3] ; IN(5)
+%if %2 <= 8
+ pmulhrsw m0, m7, [pw_14449x2] ; t13
+ pmulhrsw m7, [pw_7723x2] ; t10
+ pmulhrsw m1, m6, [pw_m4756x2] ; t11
+ pmulhrsw m6, [pw_15679x2] ; t12
+%else
+ mova m0, [%1+11*%3] ; IN(11)
+ mova m1, [%1+13*%3] ; IN(13)
+
+ VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 4, 5 ; t10, t13
+ VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 4, 5 ; t11, t12
+%endif
+
+ ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
+ ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
+
+ SUMSUB_BA w, 7, 1, 4 ; t11, t10
+ SUMSUB_BA w, 0, 6, 4 ; t12, t13
+
+ ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
+ ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
+
+ VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
+
+ UNSCRATCH 5, 11, %4+ 7*%5
+%endif
+
+ ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
+ ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
+
+ SUMSUB_BA w, 7, 3, 4 ; t8, t11
+
+ ; backup first register
+ mova [%4+15*%5], m7
+
+ SUMSUB_BA w, 6, 2, 7 ; t9, t10
+ UNSCRATCH 4, 10, %4+ 1*%5
+ SUMSUB_BA w, 0, 4, 7 ; t15, t12
+ SUMSUB_BA w, 1, 5, 7 ; t14. t13
+
+ ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
+ ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
+
+%if cpuflag(ssse3) && %6 == 0
+ SUMSUB_BA w, 2, 5, 7
+ SUMSUB_BA w, 3, 4, 7
+ pmulhrsw m5, [pw_11585x2] ; t10
+ pmulhrsw m4, [pw_11585x2] ; t11
+ pmulhrsw m3, [pw_11585x2] ; t12
+ pmulhrsw m2, [pw_11585x2] ; t13
+%else
+ SCRATCH 6, 10, %4+ 1*%5
+ VP9_UNPACK_MULSUB_2W_4X 5, 2, 11585, 11585, [pd_8192], 6, 7 ; t10, t13
+ VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, [pd_8192], 6, 7 ; t11, t12
+ UNSCRATCH 6, 10, %4+ 1*%5
+%endif
+
+ ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
+ ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
+
+ SCRATCH 0, 8, %4+ 1*%5
+ SCRATCH 1, 9, %4+ 3*%5
+ SCRATCH 2, 10, %4+ 5*%5
+ SCRATCH 3, 11, %4+ 7*%5
+ SCRATCH 4, 12, %4+ 9*%5
+ SCRATCH 5, 13, %4+11*%5
+ SCRATCH 6, 14, %4+13*%5
+
+ ; even (tx8x8)
+%if %2 <= 4
+ mova m3, [%1+ 0*%3] ; IN(0)
+ mova m4, [%1+ 2*%3] ; IN(2)
+
+ pmulhrsw m3, [pw_11585x2] ; t0-t3
+ pmulhrsw m7, m4, [pw_16069x2] ; t6-7
+ pmulhrsw m4, [pw_3196x2] ; t4-5
+
+%if 0 ; overflows :(
+ paddw m6, m7, m4
+ psubw m5, m7, m4
+ pmulhrsw m5, [pw_11585x2] ; t5
+ pmulhrsw m6, [pw_11585x2] ; t6
+%else
+ VP9_UNPACK_MULSUB_2W_4X 5, 6, 7, 4, 11585, 11585, [pd_8192], 0, 1 ; t5, t6
+%endif
+
+ psubw m0, m3, m7
+ paddw m7, m3
+ psubw m1, m3, m6
+ paddw m6, m3
+ psubw m2, m3, m5
+ paddw m5, m3
+
+%if ARCH_X86_32
+ SWAP 0, 7
+%endif
+ SCRATCH 7, 15, %4+12*%5
+%else
+ mova m6, [%1+ 2*%3] ; IN(2)
+ mova m1, [%1+ 4*%3] ; IN(4)
+ mova m7, [%1+ 6*%3] ; IN(6)
+%if %2 <= 8
+ pmulhrsw m0, m1, [pw_15137x2] ; t3
+ pmulhrsw m1, [pw_6270x2] ; t2
+ pmulhrsw m5, m6, [pw_16069x2] ; t7
+ pmulhrsw m6, [pw_3196x2] ; t4
+ pmulhrsw m4, m7, [pw_m9102x2] ; t5
+ pmulhrsw m7, [pw_13623x2] ; t6
+%else
+ mova m4, [%1+10*%3] ; IN(10)
+ mova m0, [%1+12*%3] ; IN(12)
+ mova m5, [%1+14*%3] ; IN(14)
+
+ VP9_UNPACK_MULSUB_2W_4X 1, 0, 15137, 6270, [pd_8192], 2, 3 ; t2, t3
+ VP9_UNPACK_MULSUB_2W_4X 6, 5, 16069, 3196, [pd_8192], 2, 3 ; t4, t7
+ VP9_UNPACK_MULSUB_2W_4X 4, 7, 9102, 13623, [pd_8192], 2, 3 ; t5, t6
+%endif
+
+ SUMSUB_BA w, 4, 6, 2 ; t4, t5
+ SUMSUB_BA w, 7, 5, 2 ; t7, t6
+
+%if cpuflag(ssse3) && %6 == 0
+ SUMSUB_BA w, 6, 5, 2
+ pmulhrsw m5, [pw_11585x2] ; t5
+ pmulhrsw m6, [pw_11585x2] ; t6
+%else
+ VP9_UNPACK_MULSUB_2W_4X 5, 6, 11585, 11585, [pd_8192], 2, 3 ; t5, t6
+%endif
+
+ SCRATCH 5, 15, %4+10*%5
+ mova m2, [%1+ 0*%3] ; IN(0)
+%if %2 <= 8
+ pmulhrsw m2, [pw_11585x2] ; t0 and t1
+ psubw m3, m2, m0
+ paddw m0, m2
+
+ SUMSUB_BA w, 7, 0, 5 ; t0, t7
+%else
+ mova m3, [%1+ 8*%3] ; IN(8)
+
+ ; from 3 stages back
+%if cpuflag(ssse3) && %6 == 0
+ SUMSUB_BA w, 3, 2, 5
+ pmulhrsw m3, [pw_11585x2] ; t0
+ pmulhrsw m2, [pw_11585x2] ; t1
+%else
+ mova [%1+ 0*%3], m0
+ VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 0 ; t0, t1
+ mova m0, [%1+ 0*%3]
+%endif
+
+ ; from 2 stages back
+ SUMSUB_BA w, 0, 3, 5 ; t0, t3
+
+ SUMSUB_BA w, 7, 0, 5 ; t0, t7
+%endif
+ UNSCRATCH 5, 15, %4+10*%5
+%if ARCH_X86_32
+ SWAP 0, 7
+%endif
+ SCRATCH 7, 15, %4+12*%5
+ SUMSUB_BA w, 1, 2, 7 ; t1, t2
+
+ ; from 1 stage back
+ SUMSUB_BA w, 6, 1, 7 ; t1, t6
+ SUMSUB_BA w, 5, 2, 7 ; t2, t5
+%endif
+ SUMSUB_BA w, 4, 3, 7 ; t3, t4
+
+%if ARCH_X86_64
+ SWAP 0, 8
+ SWAP 1, 9
+ SWAP 2, 10
+ SWAP 3, 11
+ SWAP 4, 12
+ SWAP 5, 13
+ SWAP 6, 14
+
+ SUMSUB_BA w, 0, 15, 7 ; t0, t15
+ SUMSUB_BA w, 1, 14, 7 ; t1, t14
+ SUMSUB_BA w, 2, 13, 7 ; t2, t13
+ SUMSUB_BA w, 3, 12, 7 ; t3, t12
+ SUMSUB_BA w, 4, 11, 7 ; t4, t11
+ SUMSUB_BA w, 5, 10, 7 ; t5, t10
+%else
+ SWAP 1, 6
+ SWAP 2, 5
+ SWAP 3, 4
+ mova [%4+14*%5], m6
+
+%macro %%SUMSUB_BA_STORE 5 ; reg, from_mem, to_mem, scratch, scratch_stride
+ mova m6, [%4+%2*%5]
+ SUMSUB_BA w, 6, %1, 7
+ SWAP %1, 6
+ mova [%4+%3*%5], m6
+%endmacro
+
+ %%SUMSUB_BA_STORE 0, 1, 1, %4, %5 ; t0, t15
+ %%SUMSUB_BA_STORE 1, 3, 3, %4, %5 ; t1, t14
+ %%SUMSUB_BA_STORE 2, 5, 5, %4, %5 ; t2, t13
+ %%SUMSUB_BA_STORE 3, 7, 7, %4, %5 ; t3, t12
+ %%SUMSUB_BA_STORE 4, 9, 9, %4, %5 ; t4, t11
+ %%SUMSUB_BA_STORE 5, 11, 11, %4, %5 ; t5, t10
+%endif
+%endmacro
+
+%macro VP9_IDCT16_1D 2-4 16, 1 ; src, pass, nnzc, is_iadst
+%if %2 == 1
+ VP9_IDCT16_1D_START %1, %3, 32, tmpq, 16, %4
+
+%if ARCH_X86_64
+ ; backup a different register
+ mova m7, [tmpq+15*16]
+ mova [tmpq+ 1*16], m15
+
+ SUMSUB_BA w, 6, 9, 15 ; t6, t9
+ SUMSUB_BA w, 7, 8, 15 ; t7, t8
+
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
+ mova [tmpq+ 0], m0
+ mova [tmpq+ 32], m1
+ mova [tmpq+ 64], m2
+ mova [tmpq+ 96], m3
+ mova [tmpq+128], m4
+ mova [tmpq+160], m5
+ mova [tmpq+192], m6
+ mova [tmpq+224], m7
+
+ mova m15, [tmpq+ 1*16]
+ TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
+ mova [tmpq+ 16], m8
+ mova [tmpq+ 48], m9
+ mova [tmpq+ 80], m10
+ mova [tmpq+112], m11
+ mova [tmpq+144], m12
+ mova [tmpq+176], m13
+ mova [tmpq+208], m14
+ mova [tmpq+240], m15
+%else
+ mova m6, [tmpq+13*16]
+ mova m7, [tmpq+14*16]
+ SUMSUB_BA w, 6, 7 ; t6, t9
+ mova [tmpq+14*16], m6
+ mova [tmpq+13*16], m7
+ mova m7, [tmpq+15*16]
+ mova m6, [tmpq+12*16]
+ SUMSUB_BA w, 7, 6 ; t7, t8
+ mova [tmpq+15*16], m6
+
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+14*16], [tmpq+ 8*16], 1
+ mova [tmpq+ 0*16], m0
+ mova [tmpq+ 2*16], m1
+ mova [tmpq+ 4*16], m2
+ mova [tmpq+ 6*16], m3
+ mova [tmpq+10*16], m5
+ mova [tmpq+12*16], m6
+ mova [tmpq+14*16], m7
+
+ mova m0, [tmpq+15*16]
+ mova m1, [tmpq+13*16]
+ mova m2, [tmpq+11*16]
+ mova m3, [tmpq+ 9*16]
+ mova m4, [tmpq+ 7*16]
+ mova m5, [tmpq+ 5*16]
+ mova m7, [tmpq+ 1*16]
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+ 3*16], [tmpq+ 9*16], 1
+ mova [tmpq+ 1*16], m0
+ mova [tmpq+ 3*16], m1
+ mova [tmpq+ 5*16], m2
+ mova [tmpq+ 7*16], m3
+ mova [tmpq+11*16], m5
+ mova [tmpq+13*16], m6
+ mova [tmpq+15*16], m7
+%endif
+%else ; %2 == 2
+ VP9_IDCT16_1D_START %1, %3, 32, %1, 32, %4
+
+%if cpuflag(ssse3)
+%define ROUND_REG [pw_512]
+%else
+%define ROUND_REG [pw_32]
+%endif
+
+ pxor m7, m7
+%if ARCH_X86_64
+ ; backup more registers
+ mova [%1+ 2*32], m8
+ mova [%1+ 3*32], m9
+
+ VP9_IDCT8_WRITEx2 0, 1, 8, 9, 7, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 2, 3, 8, 9, 7, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 4, 5, 8, 9, 7, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+
+ ; restore from cache
+ SWAP 0, 7 ; move zero from m7 to m0
+ mova m7, [%1+15*32]
+ mova m8, [%1+ 2*32]
+ mova m9, [%1+ 3*32]
+
+ SUMSUB_BA w, 6, 9, 3 ; t6, t9
+ SUMSUB_BA w, 7, 8, 3 ; t7, t8
+
+ VP9_IDCT8_WRITEx2 6, 7, 3, 4, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 8, 9, 3, 4, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 10, 11, 1, 2, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 12, 13, 1, 2, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 14, 15, 1, 2, 0, ROUND_REG, 6
+%else
+ mova [tmpq+ 0*32], m5
+
+ VP9_IDCT8_WRITEx2 0, 1, 5, 6, 7, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 2, 3, 5, 6, 7, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+
+ SWAP 0, 7 ; move zero from m7 to m0
+ mova m5, [tmpq+ 0*32]
+
+ VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+
+ mova m4, [tmpq+13*32]
+ mova m7, [tmpq+14*32]
+ mova m5, [tmpq+15*32]
+ mova m6, [tmpq+12*32]
+ SUMSUB_BADC w, 4, 7, 5, 6, 1
+
+ VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+
+ mova m4, [tmpq+11*32]
+ mova m5, [tmpq+ 9*32]
+ mova m6, [tmpq+ 7*32]
+ mova m7, [tmpq+ 5*32]
+
+ VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+
+ mova m4, [tmpq+ 3*32]
+ mova m5, [tmpq+ 1*32]
+
+ VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+%endif
+
+%undef ROUND_REG
+%endif ; %2 == 1/2
+%endmacro
+
+%macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
+ mova m%3, [dstq]
+ mova m%5, [dstq+%7]
+ punpcklbw m%2, m%3, m%6
+ punpckhbw m%3, m%6
+ punpcklbw m%4, m%5, m%6
+ punpckhbw m%5, m%6
+ paddw m%2, m%1
+ paddw m%3, m%1
+ paddw m%4, m%1
+ paddw m%5, m%1
+ packuswb m%2, m%3
+ packuswb m%4, m%5
+ mova [dstq], m%2
+ mova [dstq+%7], m%4
+%endmacro
+
+%macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
+INIT_XMM %1
+cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
+%if cpuflag(ssse3)
+ ; 2x2=eob=3, 4x4=eob=10
+ cmp eobd, 38
+ jg .idctfull
+ cmp eobd, 1 ; faster path for when only DC is set
+ jne .idct8x8
+%else
+ cmp eobd, 1 ; faster path for when only DC is set
+ jg .idctfull
+%endif
+
+ ; dc-only
+%if cpuflag(ssse3)
+ movd m0, [blockq]
+ mova m1, [pw_11585x2]
+ pmulhrsw m0, m1
+ pmulhrsw m0, m1
+%else
+ DEFINE_ARGS dst, stride, block, coef
+ movsx coefd, word [blockq]
+ imul coefd, 11585
+ add coefd, 8192
+ sar coefd, 14
+ imul coefd, 11585
+ add coefd, (32 << 14) + 8192
+ sar coefd, 14 + 6
+ movd m0, coefd
+%endif
+ SPLATW m0, m0, q0000
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_512]
+%endif
+ pxor m5, m5
+ movd [blockq], m5
+%rep 7
+ VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
+ lea dstq, [dstq+2*strideq]
+%endrep
+ VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
+ RET
+
+ DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
+%if cpuflag(ssse3)
+.idct8x8:
+ mov tmpq, rsp
+ VP9_IDCT16_1D blockq, 1, 8, 0
+
+ mov cntd, 2
+ mov dst_bakq, dstq
+.loop2_8x8:
+ VP9_IDCT16_1D tmpq, 2, 8, 0
+ lea dstq, [dst_bakq+8]
+ add tmpq, 16
+ dec cntd
+ jg .loop2_8x8
+
+ ; at the end of the loop, m0 should still be zero
+ ; use that to zero out block coefficients
+ ZERO_BLOCK blockq, 32, 8, m0
+ RET
+%endif
+
+.idctfull:
+ mov cntd, 2
+ mov tmpq, rsp
+.loop1_full:
+ VP9_IDCT16_1D blockq, 1, 16, 0
+ add blockq, 16
+ add tmpq, 256
+ dec cntd
+ jg .loop1_full
+ sub blockq, 32
+
+ mov cntd, 2
+ mov tmpq, rsp
+ mov dst_bakq, dstq
+.loop2_full:
+ VP9_IDCT16_1D tmpq, 2, 16, 0
+ lea dstq, [dst_bakq+8]
+ add tmpq, 16
+ dec cntd
+ jg .loop2_full
+
+ ; at the end of the loop, m0 should still be zero
+ ; use that to zero out block coefficients
+ ZERO_BLOCK blockq, 32, 16, m0
+ RET
+%endmacro
+
+VP9_IDCT_IDCT_16x16_ADD_XMM sse2
+VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
+VP9_IDCT_IDCT_16x16_ADD_XMM avx
+
+%macro VP9_IDCT16_YMM_1D 0
+ VP9_UNPACK_MULSUB_2W_4X 1, 15, 16305, 1606, [pd_8192], 0, 4 ; t8, t15
+ VP9_UNPACK_MULSUB_2W_4X 9, 7, 10394, 12665, [pd_8192], 0, 4 ; t9, t14
+
+ SUMSUB_BA w, 9, 1, 0 ; t8, t9
+ SUMSUB_BA w, 7, 15, 0 ; t15, t14
+
+ VP9_UNPACK_MULSUB_2W_4X 15, 1, 15137, 6270, [pd_8192], 0, 4 ; t9, t14
+
+ VP9_UNPACK_MULSUB_2W_4X 5, 11, 14449, 7723, [pd_8192], 0, 4 ; t10, t13
+ VP9_UNPACK_MULSUB_2W_4X 13, 3, 4756, 15679, [pd_8192], 0, 4 ; t11, t12
+
+ SUMSUB_BA w, 5, 13, 0 ; t11, t10
+ SUMSUB_BA w, 11, 3, 0 ; t12, t13
+
+ VP9_UNPACK_MULSUB_2W_4X 3, 13, 6270, m15137, [pd_8192], 0, 4 ; t10, t13
+
+ SUMSUB_BA w, 5, 9, 0 ; t8, t11
+ SUMSUB_BA w, 3, 15, 0 ; t9, t10
+ SUMSUB_BA w, 11, 7, 0 ; t15, t12
+ SUMSUB_BA w, 13, 1, 0 ; t14, t13
+
+ SUMSUB_BA w, 15, 1, 0
+ SUMSUB_BA w, 9, 7, 0
+ pmulhrsw m1, [pw_11585x2] ; t10
+ pmulhrsw m7, [pw_11585x2] ; t11
+ pmulhrsw m9, [pw_11585x2] ; t12
+ pmulhrsw m15, [pw_11585x2] ; t13
+
+ ; even (tx8x8)
+ mova m4, [blockq+128]
+ mova [blockq+128], m5
+ VP9_UNPACK_MULSUB_2W_4X 4, 12, 15137, 6270, [pd_8192], 0, 5 ; t2, t3
+ VP9_UNPACK_MULSUB_2W_4X 2, 14, 16069, 3196, [pd_8192], 0, 5 ; t4, t7
+ VP9_UNPACK_MULSUB_2W_4X 10, 6, 9102, 13623, [pd_8192], 0, 5 ; t5, t6
+ mova m0, [blockq+ 0]
+ SUMSUB_BA w, 8, 0, 5
+ pmulhrsw m8, [pw_11585x2] ; t0
+ pmulhrsw m0, [pw_11585x2] ; t1
+
+ SUMSUB_BA w, 10, 2, 5 ; t4, t5
+ SUMSUB_BA w, 6, 14, 5 ; t7, t6
+ SUMSUB_BA w, 12, 8, 5 ; t0, t3
+ SUMSUB_BA w, 4, 0, 5 ; t1, t2
+
+ SUMSUB_BA w, 2, 14, 5
+ pmulhrsw m14, [pw_11585x2] ; t5
+ pmulhrsw m2, [pw_11585x2] ; t6
+
+ SUMSUB_BA w, 6, 12, 5 ; t0, t7
+ SUMSUB_BA w, 2, 4, 5 ; t1, t6
+ SUMSUB_BA w, 14, 0, 5 ; t2, t5
+ SUMSUB_BA w, 10, 8, 5 ; t3, t4
+
+ ; final stage
+ SUMSUB_BA w, 11, 6, 5 ; out0, out15
+ SUMSUB_BA w, 13, 2, 5 ; out1, out14
+ SUMSUB_BA w, 15, 14, 5 ; out2, out13
+ SUMSUB_BA w, 9, 10, 5 ; out3, out12
+ SUMSUB_BA w, 7, 8, 5 ; out4, out11
+ SUMSUB_BA w, 1, 0, 5 ; out5, out10
+ SUMSUB_BA w, 3, 4, 5 ; out6, out9
+ mova m5, [blockq+128]
+ mova [blockq+192], m3
+ SUMSUB_BA w, 5, 12, 3 ; out7, out8
+
+ SWAP 0, 11, 8, 12, 10
+ SWAP 1, 13, 14, 2, 15, 6, 3, 9, 4, 7, 5
+%endmacro
+
+; this is almost identical to VP9_STORE_2X, but it does two rows
+; for slightly improved interleaving, and it omits vpermq since the
+; input is DC so all values are identical
+%macro VP9_STORE_YMM_DC_4X 6 ; reg, tmp1, tmp2, tmp3, tmp4, zero
+ mova xm%2, [dstq]
+ mova xm%4, [dstq+strideq*2]
+ vinserti128 m%2, m%2, [dstq+strideq], 1
+ vinserti128 m%4, m%4, [dstq+stride3q], 1
+ punpckhbw m%3, m%2, m%6
+ punpcklbw m%2, m%6
+ punpckhbw m%5, m%4, m%6
+ punpcklbw m%4, m%6
+ paddw m%3, m%1
+ paddw m%2, m%1
+ paddw m%5, m%1
+ paddw m%4, m%1
+ packuswb m%2, m%3
+ packuswb m%4, m%5
+ mova [dstq], xm%2
+ mova [dstq+strideq*2], xm%4
+ vextracti128 [dstq+strideq], m%2, 1
+ vextracti128 [dstq+stride3q], m%4, 1
+%endmacro
+
+%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_idct_idct_16x16_add, 4, 4, 16, dst, stride, block, eob
+ cmp eobd, 1 ; faster path for when only DC is set
+ jg .idctfull
+
+ ; dc-only
+ mova m1, [pw_11585x2]
+ vpbroadcastw m0, [blockq]
+ pmulhrsw m0, m1
+ pmulhrsw m0, m1
+ pxor m5, m5
+ pmulhrsw m0, [pw_512]
+ movd [blockq], xm5
+
+ DEFINE_ARGS dst, stride, stride3, cnt
+ mov cntd, 4
+ lea stride3q, [strideq*3]
+.loop_dc:
+ VP9_STORE_YMM_DC_4X 0, 1, 2, 3, 4, 5
+ lea dstq, [dstq+4*strideq]
+ dec cntd
+ jg .loop_dc
+ RET
+
+ DEFINE_ARGS dst, stride, block, eob
+.idctfull:
+ mova m1, [blockq+ 32]
+ mova m2, [blockq+ 64]
+ mova m3, [blockq+ 96]
+ mova m5, [blockq+160]
+ mova m6, [blockq+192]
+ mova m7, [blockq+224]
+ mova m8, [blockq+256]
+ mova m9, [blockq+288]
+ mova m10, [blockq+320]
+ mova m11, [blockq+352]
+ mova m12, [blockq+384]
+ mova m13, [blockq+416]
+ mova m14, [blockq+448]
+ mova m15, [blockq+480]
+
+ VP9_IDCT16_YMM_1D
+ TRANSPOSE16x16W 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
+ [blockq+192], [blockq+128], 1
+ mova [blockq+ 0], m0
+ VP9_IDCT16_YMM_1D
+
+ mova [blockq+224], m7
+
+ ; store
+ VP9_IDCT8_WRITEx2 0, 1, 6, 7, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 2, 3, 6, 7, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 4, 5, 6, 7, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ mova m6, [blockq+192]
+ mova m7, [blockq+224]
+ VP9_IDCT8_WRITEx2 6, 7, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 8, 9, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 10, 11, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 12, 13, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 14, 15, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+
+ ; at the end of the loop, m0 should still be zero
+ ; use that to zero out block coefficients
+ pxor m0, m0
+ ZERO_BLOCK blockq, 32, 16, m0
+ RET
+%endif
+
+;---------------------------------------------------------------------------------------------
+; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;---------------------------------------------------------------------------------------------
+
+%macro VP9_IADST16_1D 2 ; src, pass
+%assign %%str 16*%2
+ mova m0, [%1+ 0*32] ; in0
+ mova m1, [%1+15*32] ; in15
+ mova m2, [%1+ 7*32] ; in7
+ mova m3, [%1+ 8*32] ; in8
+
+ VP9_UNPACK_MULSUB_2D_4X 1, 0, 4, 5, 16364, 804 ; m1/4=t1[d], m0/5=t0[d]
+ VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 6, 11003, 12140 ; m2/7=t9[d], m3/6=t8[d]
+ SCRATCH 4, 8, tmpq+ 0*%%str
+ VP9_RND_SH_SUMSUB_BA 3, 0, 6, 5, 4, [pd_8192] ; m3=t0[w], m0=t8[w]
+ UNSCRATCH 4, 8, tmpq+ 0*%%str
+ VP9_RND_SH_SUMSUB_BA 2, 1, 7, 4, 5, [pd_8192] ; m2=t1[w], m1=t9[w]
+
+ SCRATCH 0, 10, tmpq+ 0*%%str
+ SCRATCH 1, 11, tmpq+15*%%str
+ mova [tmpq+ 7*%%str], m2
+ mova [tmpq+ 8*%%str], m3
+
+ mova m1, [%1+ 2*32] ; in2
+ mova m0, [%1+13*32] ; in13
+ mova m3, [%1+ 5*32] ; in5
+ mova m2, [%1+10*32] ; in10
+
+ VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 15893, 3981 ; m0/6=t3[d], m1/7=t2[d]
+ VP9_UNPACK_MULSUB_2D_4X 3, 2, 4, 5, 8423, 14053 ; m3/4=t11[d], m2/5=t10[d]
+ SCRATCH 4, 12, tmpq+ 2*%%str
+ VP9_RND_SH_SUMSUB_BA 2, 1, 5, 7, 4, [pd_8192] ; m2=t2[w], m1=t10[w]
+ UNSCRATCH 4, 12, tmpq+ 2*%%str
+ VP9_RND_SH_SUMSUB_BA 3, 0, 4, 6, 5, [pd_8192] ; m3=t3[w], m0=t11[w]
+
+ SCRATCH 0, 12, tmpq+ 2*%%str
+ SCRATCH 1, 13, tmpq+13*%%str
+ mova [tmpq+ 5*%%str], m2
+ mova [tmpq+10*%%str], m3
+
+ mova m2, [%1+ 4*32] ; in4
+ mova m3, [%1+11*32] ; in11
+ mova m0, [%1+ 3*32] ; in3
+ mova m1, [%1+12*32] ; in12
+
+ VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 14811, 7005 ; m3/7=t5[d], m2/6=t4[d]
+ VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 5520, 15426 ; m0/4=t13[d], m1/5=t12[d]
+ SCRATCH 4, 9, tmpq+ 4*%%str
+ VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t4[w], m2=t12[w]
+ UNSCRATCH 4, 9, tmpq+ 4*%%str
+ VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t5[w], m3=t13[w]
+
+ SCRATCH 0, 8, tmpq+ 4*%%str
+ mova [tmpq+11*%%str], m1 ; t4:m1->r11
+ UNSCRATCH 0, 10, tmpq+ 0*%%str
+ UNSCRATCH 1, 11, tmpq+15*%%str
+
+ ; round 2 interleaved part 1
+ VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 16069, 3196 ; m1/7=t8[d], m0/6=t9[d]
+ VP9_UNPACK_MULSUB_2D_4X 3, 2, 5, 4, 3196, 16069 ; m3/5=t12[d], m2/4=t13[d]
+ SCRATCH 4, 9, tmpq+ 3*%%str
+ VP9_RND_SH_SUMSUB_BA 3, 1, 5, 7, 4, [pd_8192] ; m3=t8[w], m1=t12[w]
+ UNSCRATCH 4, 9, tmpq+ 3*%%str
+ VP9_RND_SH_SUMSUB_BA 2, 0, 4, 6, 5, [pd_8192] ; m2=t9[w], m0=t13[w]
+
+ SCRATCH 0, 10, tmpq+ 0*%%str
+ SCRATCH 1, 11, tmpq+15*%%str
+ SCRATCH 2, 14, tmpq+ 3*%%str
+ SCRATCH 3, 15, tmpq+12*%%str
+
+ mova m2, [%1+ 6*32] ; in6
+ mova m3, [%1+ 9*32] ; in9
+ mova m0, [%1+ 1*32] ; in1
+ mova m1, [%1+14*32] ; in14
+
+ VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 13160, 9760 ; m3/7=t7[d], m2/6=t6[d]
+ VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 2404, 16207 ; m0/4=t15[d], m1/5=t14[d]
+ SCRATCH 4, 9, tmpq+ 6*%%str
+ VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t6[w], m2=t14[w]
+ UNSCRATCH 4, 9, tmpq+ 6*%%str
+ VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t7[w], m3=t15[w]
+
+ ; r8=t0, r7=t1, r5=t2, r10=t3, r11=t4, m8|r4=t5, m1=t6, m0=t7
+ ; m10|r0=t8, m11|r15=t9, m13|r13=t10, m12|r2=t11, m14|r3=t12, m15|r12=t13, m2=t14, m3=t15
+
+ UNSCRATCH 4, 12, tmpq+ 2*%%str
+ UNSCRATCH 5, 13, tmpq+13*%%str
+ SCRATCH 0, 12, tmpq+ 1*%%str
+ SCRATCH 1, 13, tmpq+14*%%str
+
+ ; remainder of round 2 (rest of t8-15)
+ VP9_UNPACK_MULSUB_2D_4X 5, 4, 6, 7, 9102, 13623 ; m5/6=t11[d], m4/7=t10[d]
+ VP9_UNPACK_MULSUB_2D_4X 3, 2, 1, 0, 13623, 9102 ; m3/1=t14[d], m2/0=t15[d]
+ SCRATCH 0, 9, tmpq+ 6*%%str
+ VP9_RND_SH_SUMSUB_BA 3, 4, 1, 7, 0, [pd_8192] ; m3=t10[w], m4=t14[w]
+ UNSCRATCH 0, 9, tmpq+ 6*%%str
+ VP9_RND_SH_SUMSUB_BA 2, 5, 0, 6, 1, [pd_8192] ; m2=t11[w], m5=t15[w]
+
+ ; m15|r12=t8, m14|r3=t9, m3=t10, m2=t11, m11|r15=t12, m10|r0=t13, m4=t14, m5=t15
+
+ UNSCRATCH 6, 14, tmpq+ 3*%%str
+ UNSCRATCH 7, 15, tmpq+12*%%str
+
+ SUMSUB_BA w, 3, 7, 1
+ PSIGNW m3, [pw_m1] ; m3=out1[w], m7=t10[w]
+ SUMSUB_BA w, 2, 6, 1 ; m2=out14[w], m6=t11[w]
+
+ ; unfortunately, the code below overflows in some cases, e.g.
+ ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8.webm
+%if 0; cpuflag(ssse3)
+ SUMSUB_BA w, 7, 6, 1
+ pmulhrsw m7, [pw_11585x2] ; m7=out6[w]
+ pmulhrsw m6, [pw_11585x2] ; m6=out9[w]
+%else
+ VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, 11585, [pd_8192], 1, 0
+%endif
+
+ mova [tmpq+ 3*%%str], m6
+ mova [tmpq+ 6*%%str], m7
+ UNSCRATCH 6, 10, tmpq+ 0*%%str
+ UNSCRATCH 7, 11, tmpq+15*%%str
+ mova [tmpq+13*%%str], m2
+ SCRATCH 3, 11, tmpq+ 9*%%str
+
+ VP9_UNPACK_MULSUB_2D_4X 7, 6, 2, 3, 15137, 6270 ; m6/3=t13[d], m7/2=t12[d]
+ VP9_UNPACK_MULSUB_2D_4X 5, 4, 1, 0, 6270, 15137 ; m5/1=t14[d], m4/0=t15[d]
+ SCRATCH 0, 9, tmpq+ 2*%%str
+ VP9_RND_SH_SUMSUB_BA 5, 6, 1, 3, 0, [pd_8192] ; m5=out2[w], m6=t14[w]
+ UNSCRATCH 0, 9, tmpq+ 2*%%str
+ VP9_RND_SH_SUMSUB_BA 4, 7, 0, 2, 1, [pd_8192]
+ PSIGNW m4, [pw_m1] ; m4=out13[w], m7=t15[w]
+
+ ; unfortunately, the code below overflows in some cases
+%if 0; cpuflag(ssse3)
+ SUMSUB_BA w, 7, 6, 1
+ pmulhrsw m7, [pw_m11585x2] ; m7=out5[w]
+ pmulhrsw m6, [pw_11585x2] ; m6=out10[w]
+%else
+ PSIGNW m7, [pw_m1]
+ VP9_UNPACK_MULSUB_2W_4X 7, 6, 11585, 11585, [pd_8192], 1, 0
+%endif
+
+ ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, m6=out10, m4=out13, r2=out14
+
+ mova m2, [tmpq+ 8*%%str]
+ mova m3, [tmpq+ 7*%%str]
+ mova m1, [tmpq+11*%%str]
+ mova [tmpq+ 7*%%str], m6
+ mova [tmpq+11*%%str], m4
+ mova m4, [tmpq+ 5*%%str]
+ SCRATCH 5, 14, tmpq+ 5*%%str
+ SCRATCH 7, 15, tmpq+ 8*%%str
+ UNSCRATCH 6, 8, tmpq+ 4*%%str
+ UNSCRATCH 5, 12, tmpq+ 1*%%str
+ UNSCRATCH 7, 13, tmpq+14*%%str
+
+ ; m2=t0, m3=t1, m9=t2, m0=t3, m1=t4, m8=t5, m13=t6, m12=t7
+ ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
+
+ SUMSUB_BA w, 1, 2, 0 ; m1=t0[w], m2=t4[w]
+ mova m0, [tmpq+10*%%str]
+ SCRATCH 1, 12, tmpq+ 1*%%str
+ SUMSUB_BA w, 6, 3, 1 ; m8=t1[w], m3=t5[w]
+ SCRATCH 6, 13, tmpq+ 4*%%str
+ SUMSUB_BA w, 7, 4, 1 ; m13=t2[w], m9=t6[w]
+ SCRATCH 7, 8, tmpq+10*%%str
+ SUMSUB_BA w, 5, 0, 1 ; m12=t3[w], m0=t7[w]
+ SCRATCH 5, 9, tmpq+14*%%str
+
+ VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 5, 15137, 6270 ; m2/6=t5[d], m3/10=t4[d]
+ VP9_UNPACK_MULSUB_2D_4X 0, 4, 1, 6, 6270, 15137 ; m0/14=t6[d], m9/15=t7[d]
+ SCRATCH 6, 10, tmpq+ 0*%%str
+ VP9_RND_SH_SUMSUB_BA 0, 3, 1, 5, 6, [pd_8192]
+ UNSCRATCH 6, 10, tmpq+ 0*%%str
+ PSIGNW m0, [pw_m1] ; m0=out3[w], m3=t6[w]
+ VP9_RND_SH_SUMSUB_BA 4, 2, 6, 7, 5, [pd_8192] ; m9=out12[w], m2=t7[w]
+
+ UNSCRATCH 1, 8, tmpq+10*%%str
+ UNSCRATCH 5, 9, tmpq+14*%%str
+ UNSCRATCH 6, 12, tmpq+ 1*%%str
+ UNSCRATCH 7, 13, tmpq+ 4*%%str
+ SCRATCH 4, 9, tmpq+14*%%str
+
+ SUMSUB_BA w, 1, 6, 4 ; m13=out0[w], m1=t2[w]
+ SUMSUB_BA w, 5, 7, 4
+ PSIGNW m5, [pw_m1] ; m12=out15[w], m8=t3[w]
+
+ ; unfortunately, the code below overflows in some cases, e.g.
+ ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm
+%if 0 ; cpuflag(ssse3)
+ SUMSUB_BA w, 7, 6, 4
+ pmulhrsw m7, [pw_m11585x2] ; m8=out7[w]
+ pmulhrsw m6, [pw_11585x2] ; m1=out8[w]
+ SWAP 6, 7
+ SUMSUB_BA w, 3, 2, 4
+ pmulhrsw m3, [pw_11585x2] ; m3=out4[w]
+ pmulhrsw m2, [pw_11585x2] ; m2=out11[w]
+%else
+ SCRATCH 5, 8, tmpq+10*%%str
+ VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, m11585, [pd_8192], 5, 4
+ VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 4
+ UNSCRATCH 5, 8, tmpq+10*%%str
+%endif
+
+ ; m13=out0, m0=out3, m3=out4, m8=out7, m1=out8, m2=out11, m9=out12, m12=out15
+ ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
+
+%if %2 == 1
+%if ARCH_X86_64
+ mova m13, [tmpq+ 6*%%str]
+ TRANSPOSE8x8W 1, 11, 14, 0, 3, 15, 13, 6, 10
+ mova [tmpq+ 0*16], m1
+ mova [tmpq+ 2*16], m11
+ mova [tmpq+ 4*16], m14
+ mova [tmpq+ 6*16], m0
+ mova m1, [tmpq+ 3*%%str]
+ mova m11, [tmpq+ 7*%%str]
+ mova m14, [tmpq+11*%%str]
+ mova m0, [tmpq+13*%%str]
+ mova [tmpq+ 8*16], m3
+ mova [tmpq+10*16], m15
+ mova [tmpq+12*16], m13
+ mova [tmpq+14*16], m6
+
+ TRANSPOSE8x8W 7, 1, 11, 2, 9, 14, 0, 5, 10
+ mova [tmpq+ 1*16], m7
+ mova [tmpq+ 3*16], m1
+ mova [tmpq+ 5*16], m11
+ mova [tmpq+ 7*16], m2
+ mova [tmpq+ 9*16], m9
+ mova [tmpq+11*16], m14
+ mova [tmpq+13*16], m0
+ mova [tmpq+15*16], m5
+%else
+ mova [tmpq+12*%%str], m2
+ mova [tmpq+ 1*%%str], m5
+ mova [tmpq+15*%%str], m7
+ mova m2, [tmpq+ 9*%%str]
+ mova m5, [tmpq+ 5*%%str]
+ mova m7, [tmpq+ 8*%%str]
+ TRANSPOSE8x8W 1, 2, 5, 0, 3, 7, 4, 6, [tmpq+ 6*%%str], [tmpq+ 8*%%str], 1
+ mova [tmpq+ 0*16], m1
+ mova [tmpq+ 2*16], m2
+ mova [tmpq+ 4*16], m5
+ mova [tmpq+ 6*16], m0
+ mova [tmpq+10*16], m7
+ mova m3, [tmpq+12*%%str]
+ mova [tmpq+12*16], m4
+ mova m4, [tmpq+14*%%str]
+ mova [tmpq+14*16], m6
+
+ mova m0, [tmpq+15*%%str]
+ mova m1, [tmpq+ 3*%%str]
+ mova m2, [tmpq+ 7*%%str]
+ mova m5, [tmpq+11*%%str]
+ mova m7, [tmpq+ 1*%%str]
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+13*%%str], [tmpq+ 9*%%str], 1
+ mova [tmpq+ 1*16], m0
+ mova [tmpq+ 3*16], m1
+ mova [tmpq+ 5*16], m2
+ mova [tmpq+ 7*16], m3
+ mova [tmpq+11*16], m5
+ mova [tmpq+13*16], m6
+ mova [tmpq+15*16], m7
+%endif
+%else
+ pxor m4, m4
+
+%if cpuflag(ssse3)
+%define ROUND_REG [pw_512]
+%else
+%define ROUND_REG [pw_32]
+%endif
+
+%if ARCH_X86_64
+ mova m12, [tmpq+ 6*%%str]
+ VP9_IDCT8_WRITEx2 1, 11, 10, 8, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 14, 0, 10, 8, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 3, 15, 10, 8, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 12, 6, 10, 8, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+
+ mova m1, [tmpq+ 3*%%str]
+ mova m11, [tmpq+ 7*%%str]
+ mova m14, [tmpq+11*%%str]
+ mova m0, [tmpq+13*%%str]
+
+ VP9_IDCT8_WRITEx2 7, 1, 10, 8, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 11, 2, 10, 8, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 9, 14, 10, 8, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ VP9_IDCT8_WRITEx2 0, 5, 10, 8, 4, ROUND_REG, 6
+%else
+ mova [tmpq+ 0*%%str], m2
+ mova [tmpq+ 1*%%str], m5
+ mova [tmpq+ 2*%%str], m7
+ mova m2, [tmpq+ 9*%%str]
+ VP9_IDCT8_WRITEx2 1, 2, 5, 7, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ mova m5, [tmpq+ 5*%%str]
+ VP9_IDCT8_WRITEx2 5, 0, 1, 2, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ mova m5, [tmpq+ 8*%%str]
+ VP9_IDCT8_WRITEx2 3, 5, 1, 2, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ mova m5, [tmpq+ 6*%%str]
+ VP9_IDCT8_WRITEx2 5, 6, 1, 2, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+
+ mova m0, [tmpq+ 2*%%str]
+ mova m3, [tmpq+ 3*%%str]
+ VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ mova m0, [tmpq+ 7*%%str]
+ mova m3, [tmpq+ 0*%%str]
+ VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ mova m0, [tmpq+14*%%str]
+ mova m3, [tmpq+11*%%str]
+ VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
+ lea dstq, [dstq+strideq*2]
+ mova m0, [tmpq+13*%%str]
+ mova m3, [tmpq+ 1*%%str]
+ VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
+%endif
+
+ SWAP 0, 4 ; zero
+%undef ROUND_REG
+%endif
+%endmacro
+
+%macro IADST16_FN 5
+INIT_XMM %5
+cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
+ mov cntd, 2
+ mov tmpq, rsp
+.loop1_full:
+ VP9_%2_1D blockq, 1
+ add blockq, 16
+ add tmpq, 256
+ dec cntd
+ jg .loop1_full
+ sub blockq, 32
+
+ mov cntd, 2
+ mov tmpq, rsp
+ mov dst_bakq, dstq
+.loop2_full:
+ VP9_%4_1D tmpq, 2
+ lea dstq, [dst_bakq+8]
+ add tmpq, 16
+ dec cntd
+ jg .loop2_full
+
+ ; at the end of the loop, m0 should still be zero
+ ; use that to zero out block coefficients
+ ZERO_BLOCK blockq, 32, 16, m0
+ RET
+%endmacro
+
+IADST16_FN idct, IDCT16, iadst, IADST16, sse2
+IADST16_FN iadst, IADST16, idct, IDCT16, sse2
+IADST16_FN iadst, IADST16, iadst, IADST16, sse2
+IADST16_FN idct, IDCT16, iadst, IADST16, ssse3
+IADST16_FN iadst, IADST16, idct, IDCT16, ssse3
+IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
+IADST16_FN idct, IDCT16, iadst, IADST16, avx
+IADST16_FN iadst, IADST16, idct, IDCT16, avx
+IADST16_FN iadst, IADST16, iadst, IADST16, avx
+
+; in: data in m[0-15] except m0/m4, which are in [blockq+0] and [blockq+128]
+; out: m[0-15] except m6, which is in [blockq+192]
+; uses blockq as scratch space
+%macro VP9_IADST16_YMM_1D 0
+ mova [blockq+ 32], m3
+ mova [blockq+ 64], m7
+ mova [blockq+ 96], m8
+
+ ; first half of round 1
+ VP9_UNPACK_MULSUB_2D_4X 9, 6, 0, 3, 13160, 9760 ; m9/x=t7[d], m6/x=t6[d]
+ VP9_UNPACK_MULSUB_2D_4X 1, 14, 4, 7, 2404, 16207 ; m1/x=t15[d], m14/x=t14[d]
+ VP9_RND_SH_SUMSUB_BA 14, 6, 7, 3, 8, [pd_8192] ; m14=t6[w], m6=t14[w]
+ VP9_RND_SH_SUMSUB_BA 1, 9, 4, 0, 8, [pd_8192] ; m1=t7[w], m9=t15[w]
+
+ VP9_UNPACK_MULSUB_2D_4X 13, 2, 4, 7, 15893, 3981 ; m13/x=t3[d], m2/x=t2[d]
+ VP9_UNPACK_MULSUB_2D_4X 5, 10, 0, 3, 8423, 14053 ; m5/x=t11[d], m10/x=t10[d]
+ VP9_RND_SH_SUMSUB_BA 10, 2, 3, 7, 8, [pd_8192] ; m10=t2[w], m2=t10[w]
+ VP9_RND_SH_SUMSUB_BA 5, 13, 0, 4, 8, [pd_8192] ; m5=t3[w], m13=t11[w]
+
+ ; half of round 2 t8-15
+ VP9_UNPACK_MULSUB_2D_4X 2, 13, 4, 7, 9102, 13623 ; m2/x=t11[d], m13/x=t10[d]
+ VP9_UNPACK_MULSUB_2D_4X 9, 6, 3, 0, 13623, 9102 ; m9/x=t14[d], m6/x=t15[d]
+ VP9_RND_SH_SUMSUB_BA 9, 13, 3, 7, 8, [pd_8192] ; m9=t10[w], m13=t14[w]
+ VP9_RND_SH_SUMSUB_BA 6, 2, 0, 4, 8, [pd_8192] ; m6=t11[w], m2=t15[w]
+
+ SUMSUB_BA w, 14, 10, 8 ; m14=t2, m10=t6
+ SUMSUB_BA w, 1, 5, 8 ; m1=t3, m5=t7
+
+ mova m0, [blockq+ 0]
+ mova m4, [blockq+128]
+ mova m3, [blockq+ 32]
+ mova m7, [blockq+ 64]
+ mova m8, [blockq+ 96]
+ mova [blockq+ 0], m1
+ mova [blockq+128], m14
+ mova [blockq+ 32], m6
+ mova [blockq+ 64], m9
+ mova [blockq+ 96], m10
+
+ ; second half of round 1
+ VP9_UNPACK_MULSUB_2D_4X 15, 0, 1, 9, 16364, 804 ; m15/x=t1[d], m0/x=t0[d]
+ VP9_UNPACK_MULSUB_2D_4X 7, 8, 10, 6, 11003, 12140 ; m7/x=t9[d], m8/x=t8[d]
+ VP9_RND_SH_SUMSUB_BA 8, 0, 6, 9, 14, [pd_8192] ; m8=t0[w], m0=t8[w]
+ VP9_RND_SH_SUMSUB_BA 7, 15, 10, 1, 14, [pd_8192] ; m7=t1[w], m15=t9[w]
+
+ VP9_UNPACK_MULSUB_2D_4X 11, 4, 10, 6, 14811, 7005 ; m11/x=t5[d], m4/x=t4[d]
+ VP9_UNPACK_MULSUB_2D_4X 3, 12, 1, 9, 5520, 15426 ; m3/x=t13[d], m12/x=t12[d]
+ VP9_RND_SH_SUMSUB_BA 12, 4, 9, 6, 14, [pd_8192] ; m12=t4[w], m4=t12[w]
+ VP9_RND_SH_SUMSUB_BA 3, 11, 1, 10, 14, [pd_8192] ; m3=t5[w], m11=t13[w]
+
+ ; second half of round 2 t8-15
+ VP9_UNPACK_MULSUB_2D_4X 0, 15, 6, 10, 16069, 3196 ; m15/x=t8[d], m0/x=t9[d]
+ VP9_UNPACK_MULSUB_2D_4X 11, 4, 9, 1, 3196, 16069 ; m11/x=t12[d], m4/x=t13[d]
+ VP9_RND_SH_SUMSUB_BA 11, 15, 9, 10, 14, [pd_8192] ; m11=t8[w], m15=t12[w]
+ VP9_RND_SH_SUMSUB_BA 4, 0, 1, 6, 14, [pd_8192] ; m4=t9[w], m0=t13[w]
+
+ SUMSUB_BA w, 12, 8, 14 ; m12=t0, m8=t4
+ SUMSUB_BA w, 3, 7, 14 ; m3=t1, m7=t5
+
+ mova m10, [blockq+ 96]
+ mova [blockq+ 96], m12
+
+ ; round 3
+ VP9_UNPACK_MULSUB_2D_4X 15, 0, 9, 12, 15137, 6270 ; m15/x=t13[d], m0/x=t12[d]
+ VP9_UNPACK_MULSUB_2D_4X 2, 13, 1, 6, 6270, 15137 ; m2/x=t14[d], m13/x=t15[d]
+ VP9_RND_SH_SUMSUB_BA 2, 0, 1, 12, 14, [pd_8192] ; m2=out2[w], m0=t14a[w]
+ VP9_RND_SH_SUMSUB_BA 13, 15, 6, 9, 14, [pd_8192]
+ PSIGNW m13, [pw_m1] ; m13=out13[w], m15=t15a[w]
+
+ VP9_UNPACK_MULSUB_2D_4X 8, 7, 12, 9, 15137, 6270 ; m8/x=t5[d], m7/x=t4[d]
+ VP9_UNPACK_MULSUB_2D_4X 5, 10, 1, 6, 6270, 15137 ; m5/x=t6[d], m10/x=t7[d]
+ VP9_RND_SH_SUMSUB_BA 5, 7, 1, 9, 14, [pd_8192]
+ PSIGNW m5, [pw_m1] ; m5=out3[w], m7=t6[w]
+ VP9_RND_SH_SUMSUB_BA 10, 8, 6, 12, 14, [pd_8192] ; m10=out12[w], m8=t7[w]
+
+ mova m1, [blockq+ 0]
+ mova m14, [blockq+128]
+ mova m6, [blockq+ 32]
+ mova m9, [blockq+ 64]
+ mova m12, [blockq+ 96]
+ mova [blockq+ 0], m10
+ mova [blockq+128], m5
+
+ SUMSUB_BA w, 14, 12, 5 ; m14=out0, m12=t2a
+ SUMSUB_BA w, 1, 3, 5
+ PSIGNW m1, [pw_m1] ; m1=out15, m3=t3a
+
+ SUMSUB_BA w, 9, 11, 5
+ PSIGNW m9, [pw_m1] ; m9=out1, m11=t10
+ SUMSUB_BA w, 6, 4, 5 ; m6=out14, m4=t11
+
+ VP9_UNPACK_MULSUB_2W_4X 4, 11, 11585, 11585, [pd_8192], 5, 10 ; m4=out9, m11=out6
+ mova m5, [blockq+128]
+ mova [blockq+192], m11
+ PSIGNW m15, [pw_m1]
+ VP9_UNPACK_MULSUB_2W_4X 15, 0, 11585, 11585, [pd_8192], 10, 11 ; m15=out5, m0=out10
+
+ PSIGNW m3, [pw_m1]
+ VP9_UNPACK_MULSUB_2W_4X 3, 12, 11585, 11585, [pd_8192], 10, 11 ; m3=out7,m12=out8
+ VP9_UNPACK_MULSUB_2W_4X 8, 7, 11585, 11585, [pd_8192], 10, 11 ; m8=out11,m7=out4
+
+ mova m10, [blockq+ 0]
+
+ SWAP 0, 14, 6, 11, 8, 12, 10
+ SWAP 1, 9, 15, 4, 7, 3, 5
+ SWAP 5, 9, 15
+%endmacro
+
+%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+%macro IADST16_YMM_FN 4
+INIT_YMM avx2
+cglobal vp9_%1_%3_16x16_add, 4, 4, 16, dst, stride, block, eob
+ mova m1, [blockq+ 32]
+ mova m2, [blockq+ 64]
+ mova m3, [blockq+ 96]
+ mova m5, [blockq+160]
+ mova m6, [blockq+192]
+ mova m7, [blockq+224]
+ mova m8, [blockq+256]
+ mova m9, [blockq+288]
+ mova m10, [blockq+320]
+ mova m11, [blockq+352]
+ mova m12, [blockq+384]
+ mova m13, [blockq+416]
+ mova m14, [blockq+448]
+ mova m15, [blockq+480]
+
+ VP9_%2_YMM_1D
+ TRANSPOSE16x16W 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
+ [blockq+192], [blockq+128], 1
+ mova [blockq+ 0], m0
+ VP9_%4_YMM_1D
+
+ mova [blockq+224], m7
+
+ ; store
+ VP9_IDCT8_WRITEx2 0, 1, 6, 7, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 2, 3, 6, 7, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 4, 5, 6, 7, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ mova m6, [blockq+192]
+ mova m7, [blockq+224]
+ VP9_IDCT8_WRITEx2 6, 7, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 8, 9, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 10, 11, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 12, 13, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 14, 15, 1, 2, unused, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+
+ ; at the end of the loop, m0 should still be zero
+ ; use that to zero out block coefficients
+ pxor m0, m0
+ ZERO_BLOCK blockq, 32, 16, m0
+ RET
+%endmacro
+
+IADST16_YMM_FN idct, IDCT16, iadst, IADST16
+IADST16_YMM_FN iadst, IADST16, idct, IDCT16
+IADST16_YMM_FN iadst, IADST16, iadst, IADST16
+%endif
+
+;---------------------------------------------------------------------------------------------
+; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;---------------------------------------------------------------------------------------------
+
+%macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
+%if %2 == 1
+%assign %%str mmsize
+%else
+%assign %%str 64
+%endif
+
+ ; first do t0-15, this can be done identical to idct16x16
+ VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str, 1
+
+ ; store everything on stack to make space available for t16-31
+ ; we store interleaved with the output of the second half (t16-31)
+ ; so we don't need to allocate extra stack space
+ mova [tmpq+ 0*%%str], m0 ; t0
+ mova [tmpq+ 4*%%str], m1 ; t1
+ mova [tmpq+ 8*%%str], m2 ; t2
+ mova [tmpq+12*%%str], m3 ; t3
+ mova [tmpq+16*%%str], m4 ; t4
+ mova [tmpq+20*%%str], m5 ; t5
+%if ARCH_X86_64
+ mova [tmpq+22*%%str], m10 ; t10
+ mova [tmpq+18*%%str], m11 ; t11
+ mova [tmpq+14*%%str], m12 ; t12
+ mova [tmpq+10*%%str], m13 ; t13
+ mova [tmpq+ 6*%%str], m14 ; t14
+ mova [tmpq+ 2*%%str], m15 ; t15
+%endif
+
+ mova m0, [tmpq+ 30*%%str]
+ UNSCRATCH 1, 6, tmpq+26*%%str
+ UNSCRATCH 2, 8, tmpq+24*%%str
+ UNSCRATCH 3, 9, tmpq+28*%%str
+ SUMSUB_BA w, 1, 3, 4 ; t6, t9
+ SUMSUB_BA w, 0, 2, 4 ; t7, t8
+
+ mova [tmpq+24*%%str], m1 ; t6
+ mova [tmpq+28*%%str], m0 ; t7
+ mova [tmpq+30*%%str], m2 ; t8
+ mova [tmpq+26*%%str], m3 ; t9
+
+ ; then, secondly, do t16-31
+%if %3 <= 8
+ mova m4, [%1+ 1*64]
+ mova m7, [%1+ 7*64]
+
+ pmulhrsw m1, m4, [pw_16364x2] ;t31
+ pmulhrsw m4, [pw_804x2] ;t16
+
+ VP9_UNPACK_MULSUB_2W_4X 5, 0, 1, 4, 16069, 3196, [pd_8192], 6, 2 ; t17, t30
+
+ pmulhrsw m3, m7, [pw_m5520x2] ;t19
+ pmulhrsw m7, [pw_15426x2] ;t28
+
+ SCRATCH 4, 13, tmpq+ 1*%%str
+ SCRATCH 5, 12, tmpq+15*%%str
+
+ VP9_UNPACK_MULSUB_2W_4X 2, 6, 7, 3, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
+%else
+ mova m0, [%1+ 1*64]
+ mova m1, [%1+15*64]
+%if %3 <= 16
+ pmulhrsw m5, m0, [pw_16364x2]
+ pmulhrsw m0, [pw_804x2]
+ pmulhrsw m4, m1, [pw_m11003x2]
+ pmulhrsw m1, [pw_12140x2]
+%else
+ mova m4, [%1+17*64]
+ mova m5, [%1+31*64]
+
+ VP9_UNPACK_MULSUB_2W_4X 0, 5, 16364, 804, [pd_8192], 2, 3 ; t16, t31
+ VP9_UNPACK_MULSUB_2W_4X 4, 1, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
+%endif
+ SUMSUB_BA w, 4, 0, 2
+ SUMSUB_BA w, 1, 5, 2
+
+ VP9_UNPACK_MULSUB_2W_4X 5, 0, 16069, 3196, [pd_8192], 2, 3 ; t17, t30
+
+ SCRATCH 4, 13, tmpq+ 1*%%str
+ SCRATCH 5, 12, tmpq+15*%%str
+
+ mova m2, [%1+ 7*64]
+ mova m3, [%1+ 9*64]
+%if %3 <= 16
+ pmulhrsw m7, m3, [pw_14811x2]
+ pmulhrsw m3, [pw_7005x2]
+ pmulhrsw m6, m2, [pw_m5520x2]
+ pmulhrsw m2, [pw_15426x2]
+%else
+ mova m7, [%1+23*64]
+ mova m6, [%1+25*64]
+
+ VP9_UNPACK_MULSUB_2W_4X 3, 7, 14811, 7005, [pd_8192], 4, 5 ; t18, t29
+ VP9_UNPACK_MULSUB_2W_4X 6, 2, 5520, 15426, [pd_8192], 4, 5 ; t19, t28
+%endif
+ SUMSUB_BA w, 3, 6, 4
+ SUMSUB_BA w, 7, 2, 4
+
+ VP9_UNPACK_MULSUB_2W_4X 2, 6, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
+%endif
+
+ UNSCRATCH 5, 12, tmpq+15*%%str
+ SUMSUB_BA w, 6, 0, 4
+ mova [tmpq+25*%%str], m6 ; t19
+ UNSCRATCH 4, 13, tmpq+ 1*%%str
+ SUMSUB_BA w, 7, 1, 6
+ SUMSUB_BA w, 3, 4, 6
+ mova [tmpq+23*%%str], m3 ; t16
+ SUMSUB_BA w, 2, 5, 6
+
+ VP9_UNPACK_MULSUB_2W_4X 0, 5, 15137, 6270, [pd_8192], 6, 3 ; t18, t29
+ VP9_UNPACK_MULSUB_2W_4X 1, 4, 15137, 6270, [pd_8192], 6, 3 ; t19, t28
+
+ SCRATCH 0, 10, tmpq+ 1*%%str
+ SCRATCH 1, 11, tmpq+ 7*%%str
+ SCRATCH 2, 9, tmpq+ 9*%%str
+ SCRATCH 4, 14, tmpq+15*%%str
+ SCRATCH 5, 15, tmpq+17*%%str
+ SCRATCH 7, 13, tmpq+31*%%str
+
+%if %3 <= 8
+ mova m0, [%1+ 5*64]
+ mova m3, [%1+ 3*64]
+
+ pmulhrsw m5, m0, [pw_15893x2] ;t27
+ pmulhrsw m0, [pw_3981x2] ;t20
+
+ VP9_UNPACK_MULSUB_2W_4X 1, 4, 5, 0, 9102, 13623, [pd_8192], 7, 2 ; t21, t26
+
+ pmulhrsw m6, m3, [pw_m2404x2] ;t23
+ pmulhrsw m3, [pw_16207x2] ;t24
+
+ SCRATCH 5, 8, tmpq+ 5*%%str
+ SCRATCH 4, 12, tmpq+11*%%str
+
+ VP9_UNPACK_MULSUB_2W_4X 7, 2, 3, 6, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
+%else
+ mova m4, [%1+ 5*64]
+ mova m5, [%1+11*64]
+%if %3 <= 16
+ pmulhrsw m1, m4, [pw_15893x2]
+ pmulhrsw m4, [pw_3981x2]
+ pmulhrsw m0, m5, [pw_m8423x2]
+ pmulhrsw m5, [pw_14053x2]
+%else
+ mova m0, [%1+21*64]
+ mova m1, [%1+27*64]
+
+ VP9_UNPACK_MULSUB_2W_4X 4, 1, 15893, 3981, [pd_8192], 2, 3 ; t20, t27
+ VP9_UNPACK_MULSUB_2W_4X 0, 5, 8423, 14053, [pd_8192], 2, 3 ; t21, t26
+%endif
+ SUMSUB_BA w, 0, 4, 2
+ SUMSUB_BA w, 5, 1, 2
+
+ VP9_UNPACK_MULSUB_2W_4X 1, 4, 9102, 13623, [pd_8192], 2, 3 ; t21, t26
+
+ SCRATCH 5, 8, tmpq+ 5*%%str
+ SCRATCH 4, 12, tmpq+11*%%str
+
+ mova m7, [%1+ 3*64]
+ mova m6, [%1+13*64]
+%if %3 <= 16
+ pmulhrsw m3, m6, [pw_13160x2]
+ pmulhrsw m6, [pw_9760x2]
+ pmulhrsw m2, m7, [pw_m2404x2]
+ pmulhrsw m7, [pw_16207x2]
+%else
+ mova m2, [%1+29*64]
+ mova m3, [%1+19*64]
+ VP9_UNPACK_MULSUB_2W_4X 6, 3, 13160, 9760, [pd_8192], 4, 5 ; t22, t25
+ VP9_UNPACK_MULSUB_2W_4X 2, 7, 2404, 16207, [pd_8192], 4, 5 ; t23, t24
+%endif
+ SUMSUB_BA w, 6, 2, 4
+ SUMSUB_BA w, 3, 7, 4
+
+ VP9_UNPACK_MULSUB_2W_4X 7, 2, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
+%endif
+
+ ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
+ ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
+
+ UNSCRATCH 4, 12, tmpq+11*%%str
+ SUMSUB_BA w, 0, 6, 5
+ SUMSUB_BA w, 4, 2, 5
+ UNSCRATCH 5, 8, tmpq+ 5*%%str
+ SCRATCH 4, 8, tmpq+11*%%str
+ SUMSUB_BA w, 1, 7, 4
+ SUMSUB_BA w, 5, 3, 4
+ SCRATCH 5, 12, tmpq+ 5*%%str
+
+ VP9_UNPACK_MULSUB_2W_4X 3, 6, 6270, m15137, [pd_8192], 4, 5 ; t20, t27
+ VP9_UNPACK_MULSUB_2W_4X 2, 7, 6270, m15137, [pd_8192], 4, 5 ; t21, t26
+
+ ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
+ ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
+
+ UNSCRATCH 5, 9, tmpq+ 9*%%str
+ mova m4, [tmpq+23*%%str] ; t16
+%if ARCH_X86_64
+ SUMSUB_BA w, 1, 5, 9
+ SUMSUB_BA w, 0, 4, 9
+%else
+ SUMSUB_BADC w, 1, 5, 0, 4
+%endif
+ mova [tmpq+29*%%str], m1 ; t17
+ mova [tmpq+21*%%str], m0 ; t16
+ UNSCRATCH 0, 10, tmpq+ 1*%%str
+ UNSCRATCH 1, 11, tmpq+ 7*%%str
+%if ARCH_X86_64
+ SUMSUB_BA w, 2, 0, 9
+ SUMSUB_BA w, 3, 1, 9
+%else
+ SUMSUB_BADC w, 2, 0, 3, 1
+%endif
+ mova [tmpq+ 9*%%str], m2 ; t18
+ mova [tmpq+13*%%str], m3 ; t19
+ SCRATCH 0, 10, tmpq+23*%%str
+ SCRATCH 1, 11, tmpq+27*%%str
+
+ UNSCRATCH 2, 14, tmpq+15*%%str
+ UNSCRATCH 3, 15, tmpq+17*%%str
+ SUMSUB_BA w, 6, 2, 0
+ SUMSUB_BA w, 7, 3, 0
+ SCRATCH 6, 14, tmpq+ 3*%%str
+ SCRATCH 7, 15, tmpq+ 7*%%str
+
+ UNSCRATCH 0, 8, tmpq+11*%%str
+ mova m1, [tmpq+25*%%str] ; t19
+ UNSCRATCH 6, 12, tmpq+ 5*%%str
+ UNSCRATCH 7, 13, tmpq+31*%%str
+%if ARCH_X86_64
+ SUMSUB_BA w, 0, 1, 9
+ SUMSUB_BA w, 6, 7, 9
+%else
+ SUMSUB_BADC w, 0, 1, 6, 7
+%endif
+
+ ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
+ ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
+
+%if 0; cpuflag(ssse3)
+%if ARCH_X86_64
+ SUMSUB_BA w, 4, 7, 8
+ SUMSUB_BA w, 5, 1, 8
+%else
+ SUMSUB_BADC w, 4, 7, 5, 1
+%endif
+
+ pmulhrsw m7, [pw_11585x2]
+ pmulhrsw m4, [pw_11585x2]
+ pmulhrsw m1, [pw_11585x2]
+ pmulhrsw m5, [pw_11585x2]
+
+ mova [tmpq+ 5*%%str], m7 ; t23
+ SCRATCH 1, 13, tmpq+25*%%str
+ UNSCRATCH 7, 10, tmpq+23*%%str
+ UNSCRATCH 1, 11, tmpq+27*%%str
+
+%if ARCH_X86_64
+ SUMSUB_BA w, 7, 3, 10
+ SUMSUB_BA w, 1, 2, 10
+%else
+ SUMSUB_BADC w, 7, 3, 1, 2
+%endif
+
+ pmulhrsw m3, [pw_11585x2]
+ pmulhrsw m7, [pw_11585x2]
+ pmulhrsw m2, [pw_11585x2]
+ pmulhrsw m1, [pw_11585x2]
+%else
+ SCRATCH 0, 8, tmpq+15*%%str
+ SCRATCH 6, 9, tmpq+17*%%str
+ VP9_UNPACK_MULSUB_2W_4X 7, 4, 11585, 11585, [pd_8192], 0, 6
+ mova [tmpq+ 5*%%str], m7 ; t23
+ UNSCRATCH 7, 10, tmpq+23*%%str
+ VP9_UNPACK_MULSUB_2W_4X 1, 5, 11585, 11585, [pd_8192], 0, 6
+ SCRATCH 1, 13, tmpq+25*%%str
+ UNSCRATCH 1, 11, tmpq+27*%%str
+ VP9_UNPACK_MULSUB_2W_4X 3, 7, 11585, 11585, [pd_8192], 0, 6
+ VP9_UNPACK_MULSUB_2W_4X 2, 1, 11585, 11585, [pd_8192], 0, 6
+ UNSCRATCH 0, 8, tmpq+15*%%str
+ UNSCRATCH 6, 9, tmpq+17*%%str
+%endif
+
+ ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
+ ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
+
+ ; then do final pass to sumsub+store the two halves
+%if %2 == 1
+ mova [tmpq+17*%%str], m2 ; t20
+ mova [tmpq+ 1*%%str], m3 ; t21
+%if ARCH_X86_64
+ mova [tmpq+25*%%str], m13 ; t22
+
+ mova m8, [tmpq+ 0*%%str] ; t0
+ mova m9, [tmpq+ 4*%%str] ; t1
+ mova m12, [tmpq+ 8*%%str] ; t2
+ mova m11, [tmpq+12*%%str] ; t3
+ mova m2, [tmpq+16*%%str] ; t4
+ mova m3, [tmpq+20*%%str] ; t5
+ mova m13, [tmpq+24*%%str] ; t6
+
+ SUMSUB_BA w, 6, 8, 10
+ mova [tmpq+ 3*%%str], m8 ; t15
+ SUMSUB_BA w, 0, 9, 8
+ SUMSUB_BA w, 15, 12, 8
+ SUMSUB_BA w, 14, 11, 8
+ SUMSUB_BA w, 1, 2, 8
+ SUMSUB_BA w, 7, 3, 8
+ SUMSUB_BA w, 5, 13, 8
+ mova m10, [tmpq+28*%%str] ; t7
+ SUMSUB_BA w, 4, 10, 8
+%if cpuflag(avx2)
+ ; the "shitty" about this idct is that the final pass does the outermost
+ ; interleave sumsubs (t0/31, t1/30, etc) but the tN for the 16x16 need
+ ; to be sequential, which means I need to load/store half of the sumsub
+ ; intermediates back to/from memory to get a 16x16 transpose going...
+ ; This would be easier if we had more (e.g. 32) YMM regs here.
+ mova [tmpq+ 7*%%str], m9
+ mova [tmpq+11*%%str], m12
+ mova [tmpq+15*%%str], m11
+ mova [tmpq+19*%%str], m2
+ mova [tmpq+23*%%str], m3
+ mova [tmpq+27*%%str], m13
+ mova [tmpq+31*%%str], m10
+ mova [tmpq+12*%%str], m5
+
+ mova m13, [tmpq+30*%%str] ; t8
+ mova m12, [tmpq+26*%%str] ; t9
+ mova m11, [tmpq+22*%%str] ; t10
+ mova m10, [tmpq+18*%%str] ; t11
+ mova m9, [tmpq+17*%%str] ; t20
+ mova m8, [tmpq+ 1*%%str] ; t21
+ mova m3, [tmpq+25*%%str] ; t22
+ mova m2, [tmpq+ 5*%%str] ; t23
+
+ SUMSUB_BA w, 9, 10, 5
+ SUMSUB_BA w, 8, 11, 5
+ SUMSUB_BA w, 3, 12, 5
+ SUMSUB_BA w, 2, 13, 5
+ mova [tmpq+ 1*%%str], m10
+ mova [tmpq+ 5*%%str], m11
+ mova [tmpq+17*%%str], m12
+ mova [tmpq+25*%%str], m13
+
+ mova m13, [tmpq+14*%%str] ; t12
+ mova m12, [tmpq+10*%%str] ; t13
+ mova m11, [tmpq+ 9*%%str] ; t18
+ mova m10, [tmpq+13*%%str] ; t19
+
+ SUMSUB_BA w, 11, 12, 5
+ SUMSUB_BA w, 10, 13, 5
+ mova [tmpq+ 9*%%str], m13
+ mova [tmpq+13*%%str], m12
+ mova [tmpq+10*%%str], m10
+ mova [tmpq+14*%%str], m11
+
+ mova m13, [tmpq+ 6*%%str] ; t14
+ mova m12, [tmpq+ 2*%%str] ; t15
+ mova m11, [tmpq+21*%%str] ; t16
+ mova m10, [tmpq+29*%%str] ; t17
+ SUMSUB_BA w, 11, 12, 5
+ SUMSUB_BA w, 10, 13, 5
+ mova [tmpq+21*%%str], m12
+ mova [tmpq+29*%%str], m13
+ mova m12, [tmpq+10*%%str]
+ mova m13, [tmpq+14*%%str]
+
+ TRANSPOSE16x16W 6, 0, 15, 14, 1, 7, 5, 4, \
+ 2, 3, 8, 9, 12, 13, 10, 11, \
+ [tmpq+12*%%str], [tmpq+ 8*%%str], 1
+ mova [tmpq+ 0*%%str], m6
+ mova [tmpq+ 2*%%str], m0
+ mova [tmpq+ 4*%%str], m15
+ mova [tmpq+ 6*%%str], m14
+ mova [tmpq+10*%%str], m7
+ mova [tmpq+12*%%str], m5
+ mova [tmpq+14*%%str], m4
+ mova [tmpq+16*%%str], m2
+ mova [tmpq+18*%%str], m3
+ mova [tmpq+20*%%str], m8
+ mova [tmpq+22*%%str], m9
+ mova [tmpq+24*%%str], m12
+ mova [tmpq+26*%%str], m13
+ mova [tmpq+28*%%str], m10
+ mova [tmpq+30*%%str], m11
+
+ mova m0, [tmpq+21*%%str]
+ mova m1, [tmpq+29*%%str]
+ mova m2, [tmpq+13*%%str]
+ mova m3, [tmpq+ 9*%%str]
+ mova m4, [tmpq+ 1*%%str]
+ mova m5, [tmpq+ 5*%%str]
+ mova m7, [tmpq+25*%%str]
+ mova m8, [tmpq+31*%%str]
+ mova m9, [tmpq+27*%%str]
+ mova m10, [tmpq+23*%%str]
+ mova m11, [tmpq+19*%%str]
+ mova m12, [tmpq+15*%%str]
+ mova m13, [tmpq+11*%%str]
+ mova m14, [tmpq+ 7*%%str]
+ mova m15, [tmpq+ 3*%%str]
+ TRANSPOSE16x16W 0, 1, 2, 3, 4, 5, 6, 7, \
+ 8, 9, 10, 11, 12, 13, 14, 15, \
+ [tmpq+17*%%str], [tmpq+ 9*%%str], 1
+ mova [tmpq+ 1*%%str], m0
+ mova [tmpq+ 3*%%str], m1
+ mova [tmpq+ 5*%%str], m2
+ mova [tmpq+ 7*%%str], m3
+ mova [tmpq+11*%%str], m5
+ mova [tmpq+13*%%str], m6
+ mova [tmpq+15*%%str], m7
+ mova [tmpq+17*%%str], m8
+ mova [tmpq+19*%%str], m9
+ mova [tmpq+21*%%str], m10
+ mova [tmpq+23*%%str], m11
+ mova [tmpq+25*%%str], m12
+ mova [tmpq+27*%%str], m13
+ mova [tmpq+29*%%str], m14
+ mova [tmpq+31*%%str], m15
+%else ; !avx2
+ TRANSPOSE8x8W 6, 0, 15, 14, 1, 7, 5, 4, 8
+ mova [tmpq+ 0*%%str], m6
+ mova [tmpq+ 4*%%str], m0
+ mova [tmpq+ 8*%%str], m15
+ mova [tmpq+12*%%str], m14
+ mova [tmpq+16*%%str], m1
+ mova [tmpq+20*%%str], m7
+ mova [tmpq+24*%%str], m5
+ mova [tmpq+28*%%str], m4
+
+ mova m8, [tmpq+ 3*%%str] ; t15
+ TRANSPOSE8x8W 10, 13, 3, 2, 11, 12, 9, 8, 0
+ mova [tmpq+ 3*%%str], m10
+ mova [tmpq+ 7*%%str], m13
+ mova [tmpq+11*%%str], m3
+ mova [tmpq+15*%%str], m2
+ mova [tmpq+19*%%str], m11
+ mova [tmpq+23*%%str], m12
+ mova [tmpq+27*%%str], m9
+ mova [tmpq+31*%%str], m8
+
+ mova m15, [tmpq+30*%%str] ; t8
+ mova m14, [tmpq+26*%%str] ; t9
+ mova m13, [tmpq+22*%%str] ; t10
+ mova m12, [tmpq+18*%%str] ; t11
+ mova m11, [tmpq+14*%%str] ; t12
+ mova m10, [tmpq+10*%%str] ; t13
+ mova m9, [tmpq+ 6*%%str] ; t14
+ mova m8, [tmpq+ 2*%%str] ; t15
+ mova m7, [tmpq+21*%%str] ; t16
+ mova m6, [tmpq+29*%%str] ; t17
+ mova m5, [tmpq+ 9*%%str] ; t18
+ mova m4, [tmpq+13*%%str] ; t19
+ mova m3, [tmpq+17*%%str] ; t20
+ mova m2, [tmpq+ 1*%%str] ; t21
+ mova m1, [tmpq+25*%%str] ; t22
+
+ SUMSUB_BA w, 7, 8, 0
+ mova [tmpq+ 2*%%str], m8
+ mova m0, [tmpq+ 5*%%str] ; t23
+ SUMSUB_BA w, 6, 9, 8
+ SUMSUB_BA w, 5, 10, 8
+ SUMSUB_BA w, 4, 11, 8
+ SUMSUB_BA w, 3, 12, 8
+ SUMSUB_BA w, 2, 13, 8
+ SUMSUB_BA w, 1, 14, 8
+ SUMSUB_BA w, 0, 15, 8
+
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
+ mova [tmpq+ 1*%%str], m0
+ mova [tmpq+ 5*%%str], m1
+ mova [tmpq+ 9*%%str], m2
+ mova [tmpq+13*%%str], m3
+ mova [tmpq+17*%%str], m4
+ mova [tmpq+21*%%str], m5
+ mova [tmpq+25*%%str], m6
+ mova [tmpq+29*%%str], m7
+
+ mova m8, [tmpq+ 2*%%str]
+ TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
+ mova [tmpq+ 2*%%str], m8
+ mova [tmpq+ 6*%%str], m9
+ mova [tmpq+10*%%str], m10
+ mova [tmpq+14*%%str], m11
+ mova [tmpq+18*%%str], m12
+ mova [tmpq+22*%%str], m13
+ mova [tmpq+26*%%str], m14
+ mova [tmpq+30*%%str], m15
+%endif ; avx2
+%else
+ mova m2, [tmpq+24*%%str] ; t6
+ mova m3, [tmpq+28*%%str] ; t7
+ SUMSUB_BADC w, 5, 2, 4, 3
+ mova [tmpq+24*%%str], m5
+ mova [tmpq+23*%%str], m2
+ mova [tmpq+28*%%str], m4
+ mova [tmpq+19*%%str], m3
+
+ mova m2, [tmpq+16*%%str] ; t4
+ mova m3, [tmpq+20*%%str] ; t5
+ SUMSUB_BA w, 1, 2, 5
+ SUMSUB_BA w, 7, 3, 5
+ mova [tmpq+15*%%str], m2
+ mova [tmpq+11*%%str], m3
+
+ mova m2, [tmpq+ 0*%%str] ; t0
+ mova m3, [tmpq+ 4*%%str] ; t1
+ SUMSUB_BA w, 6, 2, 5
+ SUMSUB_BA w, 0, 3, 5
+ mova [tmpq+31*%%str], m2
+ mova [tmpq+27*%%str], m3
+
+ mova m2, [tmpq+ 8*%%str] ; t2
+ mova m3, [tmpq+12*%%str] ; t3
+ mova m5, [tmpq+ 7*%%str]
+ mova m4, [tmpq+ 3*%%str]
+ SUMSUB_BADC w, 5, 2, 4, 3
+ mova [tmpq+ 7*%%str], m2
+ mova [tmpq+ 3*%%str], m3
+
+ mova m3, [tmpq+28*%%str]
+ TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+24*%%str], [tmpq+16*%%str], 1
+ mova [tmpq+ 0*%%str], m6
+ mova [tmpq+ 4*%%str], m0
+ mova [tmpq+ 8*%%str], m5
+ mova [tmpq+12*%%str], m4
+ mova [tmpq+20*%%str], m7
+ mova [tmpq+24*%%str], m2
+ mova [tmpq+28*%%str], m3
+
+ mova m6, [tmpq+19*%%str]
+ mova m0, [tmpq+23*%%str]
+ mova m5, [tmpq+11*%%str]
+ mova m4, [tmpq+15*%%str]
+ mova m1, [tmpq+ 3*%%str]
+ mova m7, [tmpq+ 7*%%str]
+ mova m3, [tmpq+31*%%str]
+ TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+27*%%str], [tmpq+19*%%str], 1
+ mova [tmpq+ 3*%%str], m6
+ mova [tmpq+ 7*%%str], m0
+ mova [tmpq+11*%%str], m5
+ mova [tmpq+15*%%str], m4
+ mova [tmpq+23*%%str], m7
+ mova [tmpq+27*%%str], m2
+ mova [tmpq+31*%%str], m3
+
+ mova m1, [tmpq+ 6*%%str] ; t14
+ mova m0, [tmpq+ 2*%%str] ; t15
+ mova m7, [tmpq+21*%%str] ; t16
+ mova m6, [tmpq+29*%%str] ; t17
+ SUMSUB_BA w, 7, 0, 2
+ SUMSUB_BA w, 6, 1, 2
+ mova [tmpq+29*%%str], m7
+ mova [tmpq+ 2*%%str], m0
+ mova [tmpq+21*%%str], m6
+ mova [tmpq+ 6*%%str], m1
+
+ mova m1, [tmpq+14*%%str] ; t12
+ mova m0, [tmpq+10*%%str] ; t13
+ mova m5, [tmpq+ 9*%%str] ; t18
+ mova m4, [tmpq+13*%%str] ; t19
+ SUMSUB_BA w, 5, 0, 2
+ SUMSUB_BA w, 4, 1, 2
+ mova [tmpq+10*%%str], m0
+ mova [tmpq+14*%%str], m1
+
+ mova m1, [tmpq+22*%%str] ; t10
+ mova m0, [tmpq+18*%%str] ; t11
+ mova m3, [tmpq+17*%%str] ; t20
+ mova m2, [tmpq+ 1*%%str] ; t21
+ SUMSUB_BA w, 3, 0, 6
+ SUMSUB_BA w, 2, 1, 6
+ mova [tmpq+18*%%str], m0
+ mova [tmpq+22*%%str], m1
+
+ mova m7, [tmpq+30*%%str] ; t8
+ mova m6, [tmpq+26*%%str] ; t9
+ mova m1, [tmpq+25*%%str] ; t22
+ mova m0, [tmpq+ 5*%%str] ; t23
+ SUMSUB_BADC w, 1, 6, 0, 7
+ mova [tmpq+26*%%str], m6
+ mova [tmpq+30*%%str], m7
+
+ mova m7, [tmpq+29*%%str]
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+21*%%str], [tmpq+17*%%str], 1
+ mova [tmpq+ 1*%%str], m0
+ mova [tmpq+ 5*%%str], m1
+ mova [tmpq+ 9*%%str], m2
+ mova [tmpq+13*%%str], m3
+ mova [tmpq+21*%%str], m5
+ mova [tmpq+25*%%str], m6
+ mova [tmpq+29*%%str], m7
+
+ mova m0, [tmpq+ 2*%%str]
+ mova m1, [tmpq+ 6*%%str]
+ mova m2, [tmpq+10*%%str]
+ mova m3, [tmpq+14*%%str]
+ mova m4, [tmpq+18*%%str]
+ mova m5, [tmpq+22*%%str]
+ mova m7, [tmpq+30*%%str]
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+26*%%str], [tmpq+18*%%str], 1
+ mova [tmpq+ 2*%%str], m0
+ mova [tmpq+ 6*%%str], m1
+ mova [tmpq+10*%%str], m2
+ mova [tmpq+14*%%str], m3
+ mova [tmpq+22*%%str], m5
+ mova [tmpq+26*%%str], m6
+ mova [tmpq+30*%%str], m7
+%endif
+%else
+ ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
+ ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
+ ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
+ ; t20-22 is in m4-6
+ ; t24-31 is in m8-15
+
+%if cpuflag(ssse3)
+%define ROUND_REG [pw_512]
+%else
+%define ROUND_REG [pw_32]
+%endif
+
+%macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
+ SUMSUB_BA w, %4, %1, %5
+ SUMSUB_BA w, %3, %2, %5
+ VP9_IDCT8_WRITEx2 %4, %3, %5, %6, %7, ROUND_REG, 6
+%if %8 == 1
+ add dstq, stride2q
+%endif
+ VP9_IDCT8_WRITEx2 %2, %1, %5, %6, %7, ROUND_REG, 6, dst_endq
+%if %8 == 1
+ sub dst_endq, stride2q
+%endif
+%endmacro
+
+%if ARCH_X86_64
+ pxor m10, m10
+
+ ; store t0-1 and t30-31
+ mova m8, [tmpq+ 0*%%str]
+ mova m9, [tmpq+ 4*%%str]
+ %%STORE_2X2 8, 9, 0, 6, 12, 11, 10
+
+ ; store t2-3 and t28-29
+ mova m8, [tmpq+ 8*%%str]
+ mova m9, [tmpq+12*%%str]
+ %%STORE_2X2 8, 9, 14, 15, 12, 11, 10
+
+ ; store t4-5 and t26-27
+ mova m8, [tmpq+16*%%str]
+ mova m9, [tmpq+20*%%str]
+ %%STORE_2X2 8, 9, 7, 1, 12, 11, 10
+
+ ; store t6-7 and t24-25
+ mova m8, [tmpq+24*%%str]
+ mova m9, [tmpq+28*%%str]
+ %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
+
+ ; store t8-9 and t22-23
+ mova m8, [tmpq+30*%%str]
+ mova m9, [tmpq+26*%%str]
+ mova m0, [tmpq+ 5*%%str]
+ %%STORE_2X2 8, 9, 13, 0, 12, 11, 10
+
+ ; store t10-11 and t20-21
+ mova m8, [tmpq+22*%%str]
+ mova m9, [tmpq+18*%%str]
+ %%STORE_2X2 8, 9, 2, 3, 12, 11, 10
+
+ ; store t12-13 and t18-19
+ mova m8, [tmpq+14*%%str]
+ mova m9, [tmpq+10*%%str]
+ mova m5, [tmpq+13*%%str]
+ mova m4, [tmpq+ 9*%%str]
+ %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
+
+ ; store t14-17
+ mova m8, [tmpq+ 6*%%str]
+ mova m9, [tmpq+ 2*%%str]
+ mova m5, [tmpq+29*%%str]
+ mova m4, [tmpq+21*%%str]
+ %%STORE_2X2 8, 9, 4, 5, 12, 11, 10, 0
+
+ SWAP 1, 10 ; zero
+%else
+ mova [tmpq+ 1*%%str], m1
+ mova [tmpq+11*%%str], m2
+ mova [tmpq+15*%%str], m3
+ mova [tmpq+17*%%str], m4
+ mova [tmpq+19*%%str], m5
+ pxor m1, m1
+
+ ; store t0-1 and t30-31
+ mova m2, [tmpq+ 0*%%str]
+ mova m3, [tmpq+ 4*%%str]
+ %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
+
+ ; store t2-3 and t28-29
+ mova m2, [tmpq+ 8*%%str]
+ mova m3, [tmpq+12*%%str]
+ mova m0, [tmpq+ 3*%%str]
+ mova m6, [tmpq+ 7*%%str]
+ %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
+
+ ; store t4-5 and t26-27
+ mova m2, [tmpq+16*%%str]
+ mova m3, [tmpq+20*%%str]
+ mova m0, [tmpq+ 1*%%str]
+ %%STORE_2X2 2, 3, 7, 0, 4, 5, 1
+
+ ; store t6-7 and t24-25
+ mova m2, [tmpq+24*%%str]
+ mova m3, [tmpq+28*%%str]
+ mova m0, [tmpq+17*%%str]
+ mova m6, [tmpq+19*%%str]
+ %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
+
+ ; store t8-9 and t22-23
+ mova m2, [tmpq+30*%%str]
+ mova m3, [tmpq+26*%%str]
+ mova m0, [tmpq+25*%%str]
+ mova m6, [tmpq+ 5*%%str]
+ %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
+
+ ; store t10-11 and t20-21
+ mova m2, [tmpq+22*%%str]
+ mova m3, [tmpq+18*%%str]
+ mova m0, [tmpq+11*%%str]
+ mova m6, [tmpq+15*%%str]
+ %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
+
+ ; store t12-13 and t18-19
+ mova m2, [tmpq+14*%%str]
+ mova m3, [tmpq+10*%%str]
+ mova m6, [tmpq+13*%%str]
+ mova m0, [tmpq+ 9*%%str]
+ %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
+
+ ; store t14-17
+ mova m2, [tmpq+ 6*%%str]
+ mova m3, [tmpq+ 2*%%str]
+ mova m6, [tmpq+29*%%str]
+ mova m0, [tmpq+21*%%str]
+ %%STORE_2X2 2, 3, 0, 6, 4, 5, 1, 0
+%endif
+%undef ROUND_REG
+%endif
+%endmacro
+
+%macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
+INIT_XMM %1
+cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride, block, eob
+ movifnidn eobd, dword eobm
+%if cpuflag(ssse3)
+ cmp eobd, 135
+ jg .idctfull
+ cmp eobd, 34
+ jg .idct16x16
+ cmp eobd, 1
+ jg .idct8x8
+%else
+ cmp eobd, 1
+ jg .idctfull
+%endif
+
+ ; dc-only case
+ movifnidn blockq, blockmp
+ movifnidn dstq, dstmp
+ movifnidn strideq, stridemp
+%if cpuflag(ssse3)
+ movd m0, [blockq]
+ mova m1, [pw_11585x2]
+ pmulhrsw m0, m1
+ pmulhrsw m0, m1
+%else
+ DEFINE_ARGS dst, stride, block, coef
+ movsx coefd, word [blockq]
+ imul coefd, 11585
+ add coefd, 8192
+ sar coefd, 14
+ imul coefd, 11585
+ add coefd, (32 << 14) + 8192
+ sar coefd, 14 + 6
+ movd m0, coefd
+%endif
+ SPLATW m0, m0, q0000
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_512]
+%endif
+ pxor m5, m5
+ movd [blockq], m5
+%rep 31
+ VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
+ add dstq, strideq
+%endrep
+ VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
+ RET
+
+%if ARCH_X86_64
+ DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
+%else
+%define dst_bakq r0mp
+%endif
+%if cpuflag(ssse3)
+.idct8x8:
+%if ARCH_X86_32
+ DEFINE_ARGS block, u1, u2, u3, u4, tmp
+ mov blockq, r2mp
+%endif
+ mov tmpq, rsp
+ VP9_IDCT32_1D blockq, 1, 8
+
+%if ARCH_X86_32
+ DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
+ mov strideq, r1mp
+%define cntd dword r3m
+%endif
+ mov stride30q, strideq ; stride
+ lea stride2q, [strideq*2] ; stride*2
+ shl stride30q, 5 ; stride*32
+ mov cntd, 4
+ sub stride30q, stride2q ; stride*30
+.loop2_8x8:
+ mov dstq, dst_bakq
+ lea dst_endq, [dstq+stride30q]
+ VP9_IDCT32_1D tmpq, 2, 8
+ add dst_bakq, 8
+ add tmpq, 16
+ dec cntd
+ jg .loop2_8x8
+
+ ; at the end of the loop, m7 should still be zero
+ ; use that to zero out block coefficients
+%if ARCH_X86_32
+ DEFINE_ARGS block
+ mov blockq, r2mp
+%endif
+ ZERO_BLOCK blockq, 64, 8, m1
+ RET
+
+.idct16x16:
+%if ARCH_X86_32
+ DEFINE_ARGS block, tmp, cnt
+ mov blockq, r2mp
+%endif
+ mov cntd, 2
+ mov tmpq, rsp
+.loop1_16x16:
+ VP9_IDCT32_1D blockq, 1, 16
+ add blockq, 16
+ add tmpq, 512
+ dec cntd
+ jg .loop1_16x16
+
+%if ARCH_X86_64
+ sub blockq, 32
+%else
+ DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
+ mov strideq, r1mp
+%define cntd dword r3m
+%endif
+
+ mov stride30q, strideq ; stride
+ lea stride2q, [strideq*2] ; stride*2
+ shl stride30q, 5 ; stride*32
+ mov cntd, 4
+ mov tmpq, rsp
+ sub stride30q, stride2q ; stride*30
+.loop2_16x16:
+ mov dstq, dst_bakq
+ lea dst_endq, [dstq+stride30q]
+ VP9_IDCT32_1D tmpq, 2, 16
+ add dst_bakq, 8
+ add tmpq, 16
+ dec cntd
+ jg .loop2_16x16
+
+ ; at the end of the loop, m7 should still be zero
+ ; use that to zero out block coefficients
+%if ARCH_X86_32
+ DEFINE_ARGS block
+ mov blockq, r2mp
+%endif
+ ZERO_BLOCK blockq, 64, 16, m1
+ RET
+%endif
+
+.idctfull:
+%if ARCH_X86_32
+ DEFINE_ARGS block, tmp, cnt
+ mov blockq, r2mp
+%endif
+ mov cntd, 4
+ mov tmpq, rsp
+.loop1_full:
+ VP9_IDCT32_1D blockq, 1
+ add blockq, 16
+ add tmpq, 512
+ dec cntd
+ jg .loop1_full
+
+%if ARCH_X86_64
+ sub blockq, 64
+%else
+ DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
+ mov strideq, r1mp
+%define cntd dword r3m
+%endif
+
+ mov stride30q, strideq ; stride
+ lea stride2q, [strideq*2] ; stride*2
+ shl stride30q, 5 ; stride*32
+ mov cntd, 4
+ mov tmpq, rsp
+ sub stride30q, stride2q ; stride*30
+.loop2_full:
+ mov dstq, dst_bakq
+ lea dst_endq, [dstq+stride30q]
+ VP9_IDCT32_1D tmpq, 2
+ add dst_bakq, 8
+ add tmpq, 16
+ dec cntd
+ jg .loop2_full
+
+ ; at the end of the loop, m7 should still be zero
+ ; use that to zero out block coefficients
+%if ARCH_X86_32
+ DEFINE_ARGS block
+ mov blockq, r2mp
+%endif
+ ZERO_BLOCK blockq, 64, 32, m1
+ RET
+%endmacro
+
+VP9_IDCT_IDCT_32x32_ADD_XMM sse2
+VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
+VP9_IDCT_IDCT_32x32_ADD_XMM avx
+
+; this is almost identical to VP9_STORE_2X, but it does two rows
+; for slightly improved interleaving, and it omits vpermq since the
+; input is DC so all values are identical
+%macro VP9_STORE_YMM_DC_2X2 6 ; reg, tmp1, tmp2, tmp3, tmp4, zero
+ mova m%2, [dstq]
+ mova m%4, [dstq+strideq]
+ punpckhbw m%3, m%2, m%6
+ punpcklbw m%2, m%6
+ punpckhbw m%5, m%4, m%6
+ punpcklbw m%4, m%6
+ paddw m%3, m%1
+ paddw m%2, m%1
+ paddw m%5, m%1
+ paddw m%4, m%1
+ packuswb m%2, m%3
+ packuswb m%4, m%5
+ mova [dstq+strideq*0], m%2
+ mova [dstq+strideq*1], m%4
+%endmacro
+
+%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
+ cmp eobd, 135
+ jg .idctfull
+ cmp eobd, 1
+ jg .idct16x16
+
+ ; dc-only case
+ mova m1, [pw_11585x2]
+ vpbroadcastw m0, [blockq]
+ pmulhrsw m0, m1
+ pmulhrsw m0, m1
+ pxor m5, m5
+ pmulhrsw m0, [pw_512]
+ movd [blockq], xm5
+
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 16
+.loop_dc:
+ VP9_STORE_YMM_DC_2X2 0, 1, 2, 3, 4, 5
+ lea dstq, [dstq+2*strideq]
+ dec cntd
+ jg .loop_dc
+ RET
+
+ DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
+.idct16x16:
+ mov tmpq, rsp
+ VP9_IDCT32_1D blockq, 1, 16
+
+ mov stride30q, strideq ; stride
+ lea stride2q, [strideq*2] ; stride*2
+ shl stride30q, 5 ; stride*32
+ mov cntd, 2
+ sub stride30q, stride2q ; stride*30
+.loop2_16x16:
+ mov dstq, dst_bakq
+ lea dst_endq, [dstq+stride30q]
+ VP9_IDCT32_1D tmpq, 2, 16
+ add dst_bakq, 16
+ add tmpq, 32
+ dec cntd
+ jg .loop2_16x16
+
+ ; at the end of the loop, m1 should still be zero
+ ; use that to zero out block coefficients
+ ZERO_BLOCK blockq, 64, 16, m1
+ RET
+
+.idctfull:
+ mov cntd, 2
+ mov tmpq, rsp
+.loop1_full:
+ VP9_IDCT32_1D blockq, 1
+ add blockq, 32
+ add tmpq, 1024
+ dec cntd
+ jg .loop1_full
+
+ sub blockq, 64
+
+ mov stride30q, strideq ; stride
+ lea stride2q, [strideq*2] ; stride*2
+ shl stride30q, 5 ; stride*32
+ mov cntd, 2
+ mov tmpq, rsp
+ sub stride30q, stride2q ; stride*30
+.loop2_full:
+ mov dstq, dst_bakq
+ lea dst_endq, [dstq+stride30q]
+ VP9_IDCT32_1D tmpq, 2
+ add dst_bakq, 16
+ add tmpq, 32
+ dec cntd
+ jg .loop2_full
+
+ ; at the end of the loop, m1 should still be zero
+ ; use that to zero out block coefficients
+ ZERO_BLOCK blockq, 64, 32, m1
+ RET
+%endif
diff --git a/media/ffvpx/libavcodec/x86/vp9itxfm_16bpp.asm b/media/ffvpx/libavcodec/x86/vp9itxfm_16bpp.asm
new file mode 100644
index 0000000000..902685edf6
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9itxfm_16bpp.asm
@@ -0,0 +1,2044 @@
+;******************************************************************************
+;* VP9 inverse transform x86 SIMD optimizations
+;*
+;* Copyright (C) 2015 Ronald S. Bultje <rsbultje gmail com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+%include "vp9itxfm_template.asm"
+
+SECTION_RODATA
+
+cextern pw_8
+cextern pw_1023
+cextern pw_2048
+cextern pw_4095
+cextern pw_m1
+cextern pd_1
+cextern pd_16
+cextern pd_32
+cextern pd_8192
+
+pd_8: times 4 dd 8
+pd_3fff: times 4 dd 0x3fff
+
+cextern pw_11585x2
+
+cextern pw_5283_13377
+cextern pw_9929_13377
+cextern pw_15212_m13377
+cextern pw_15212_9929
+cextern pw_m5283_m15212
+cextern pw_13377x2
+cextern pw_m13377_13377
+cextern pw_13377_0
+
+pw_9929_m5283: times 4 dw 9929, -5283
+
+%macro COEF_PAIR 2-3
+cextern pw_m%1_%2
+cextern pw_%2_%1
+%if %0 == 3
+cextern pw_m%1_m%2
+%if %1 != %2
+cextern pw_m%2_%1
+cextern pw_%1_%2
+%endif
+%endif
+%endmacro
+
+COEF_PAIR 2404, 16207
+COEF_PAIR 3196, 16069, 1
+COEF_PAIR 4756, 15679
+COEF_PAIR 5520, 15426
+COEF_PAIR 6270, 15137, 1
+COEF_PAIR 8423, 14053
+COEF_PAIR 10394, 12665
+COEF_PAIR 11003, 12140
+COEF_PAIR 11585, 11585, 1
+COEF_PAIR 13160, 9760
+COEF_PAIR 13623, 9102, 1
+COEF_PAIR 14449, 7723
+COEF_PAIR 14811, 7005
+COEF_PAIR 15893, 3981
+COEF_PAIR 16305, 1606
+COEF_PAIR 16364, 804
+
+default_8x8:
+times 12 db 1
+times 52 db 2
+row_8x8:
+times 18 db 1
+times 46 db 2
+col_8x8:
+times 6 db 1
+times 58 db 2
+default_16x16:
+times 10 db 1
+times 28 db 2
+times 51 db 3
+times 167 db 4
+row_16x16:
+times 21 db 1
+times 45 db 2
+times 60 db 3
+times 130 db 4
+col_16x16:
+times 5 db 1
+times 12 db 2
+times 25 db 3
+times 214 db 4
+default_32x32:
+times 9 db 1
+times 25 db 2
+times 36 db 3
+times 65 db 4
+times 105 db 5
+times 96 db 6
+times 112 db 7
+times 576 db 8
+
+SECTION .text
+
+%macro VP9_STORE_2X 6-7 dstq ; reg1, reg2, tmp1, tmp2, min, max, dst
+ mova m%3, [%7]
+ mova m%4, [%7+strideq]
+ paddw m%3, m%1
+ paddw m%4, m%2
+ pmaxsw m%3, m%5
+ pmaxsw m%4, m%5
+ pminsw m%3, m%6
+ pminsw m%4, m%6
+ mova [%7], m%3
+ mova [%7+strideq], m%4
+%endmacro
+
+%macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
+%assign %%y 0
+%rep %3
+%assign %%x 0
+%rep %3*4/mmsize
+ mova [%1+%%y+%%x], %4
+%assign %%x (%%x+mmsize)
+%endrep
+%assign %%y (%%y+%2)
+%endrep
+%endmacro
+
+; the input coefficients are scaled up by 2 bit (which we downscale immediately
+; in the iwht), and is otherwise orthonormally increased by 1 bit per iwht_1d.
+; therefore, a diff of 10-12+sign bit will fit in 12-14+sign bit after scaling,
+; i.e. everything can be done in 15+1bpp words. Since the quant fractional bits
+; add 2 bits, we need to scale before converting to word in 12bpp, since the
+; input will be 16+sign bit which doesn't fit in 15+sign words, but in 10bpp
+; we can scale after converting to words (which is half the instructions),
+; since the input is only 14+sign bit, which fits in 15+sign words directly.
+
+%macro IWHT4_FN 2 ; bpp, max
+cglobal vp9_iwht_iwht_4x4_add_%1, 3, 3, 8, dst, stride, block, eob
+ mova m7, [pw_%2]
+ mova m0, [blockq+0*16+0]
+ mova m1, [blockq+1*16+0]
+%if %1 >= 12
+ mova m4, [blockq+0*16+8]
+ mova m5, [blockq+1*16+8]
+ psrad m0, 2
+ psrad m1, 2
+ psrad m4, 2
+ psrad m5, 2
+ packssdw m0, m4
+ packssdw m1, m5
+%else
+ packssdw m0, [blockq+0*16+8]
+ packssdw m1, [blockq+1*16+8]
+ psraw m0, 2
+ psraw m1, 2
+%endif
+ mova m2, [blockq+2*16+0]
+ mova m3, [blockq+3*16+0]
+%if %1 >= 12
+ mova m4, [blockq+2*16+8]
+ mova m5, [blockq+3*16+8]
+ psrad m2, 2
+ psrad m3, 2
+ psrad m4, 2
+ psrad m5, 2
+ packssdw m2, m4
+ packssdw m3, m5
+%else
+ packssdw m2, [blockq+2*16+8]
+ packssdw m3, [blockq+3*16+8]
+ psraw m2, 2
+ psraw m3, 2
+%endif
+
+ VP9_IWHT4_1D
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ VP9_IWHT4_1D
+
+ pxor m6, m6
+ VP9_STORE_2X 0, 1, 4, 5, 6, 7
+ lea dstq, [dstq+strideq*2]
+ VP9_STORE_2X 2, 3, 4, 5, 6, 7
+ ZERO_BLOCK blockq, 16, 4, m6
+ RET
+%endmacro
+
+INIT_MMX mmxext
+IWHT4_FN 10, 1023
+INIT_MMX mmxext
+IWHT4_FN 12, 4095
+
+%macro VP9_IDCT4_WRITEOUT 0
+%if cpuflag(ssse3)
+ mova m5, [pw_2048]
+ pmulhrsw m0, m5
+ pmulhrsw m1, m5
+ pmulhrsw m2, m5
+ pmulhrsw m3, m5
+%else
+ mova m5, [pw_8]
+ paddw m0, m5
+ paddw m1, m5
+ paddw m2, m5
+ paddw m3, m5
+ psraw m0, 4
+ psraw m1, 4
+ psraw m2, 4
+ psraw m3, 4
+%endif
+ mova m5, [pw_1023]
+ VP9_STORE_2X 0, 1, 6, 7, 4, 5
+ lea dstq, [dstq+2*strideq]
+ VP9_STORE_2X 2, 3, 6, 7, 4, 5
+%endmacro
+
+%macro DC_ONLY 2 ; shift, zero
+ mov coefd, dword [blockq]
+ movd [blockq], %2
+ imul coefd, 11585
+ add coefd, 8192
+ sar coefd, 14
+ imul coefd, 11585
+ add coefd, ((1 << (%1 - 1)) << 14) + 8192
+ sar coefd, 14 + %1
+%endmacro
+
+; 4x4 coefficients are 5+depth+sign bits, so for 10bpp, everything still fits
+; in 15+1 words without additional effort, since the coefficients are 15bpp.
+
+%macro IDCT4_10_FN 0
+cglobal vp9_idct_idct_4x4_add_10, 4, 4, 8, dst, stride, block, eob
+ cmp eobd, 1
+ jg .idctfull
+
+ ; dc-only
+ pxor m4, m4
+%if cpuflag(ssse3)
+ movd m0, [blockq]
+ movd [blockq], m4
+ mova m5, [pw_11585x2]
+ pmulhrsw m0, m5
+ pmulhrsw m0, m5
+%else
+ DEFINE_ARGS dst, stride, block, coef
+ DC_ONLY 4, m4
+ movd m0, coefd
+%endif
+ pshufw m0, m0, 0
+ mova m5, [pw_1023]
+%if cpuflag(ssse3)
+ pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
+%endif
+ VP9_STORE_2X 0, 0, 6, 7, 4, 5
+ lea dstq, [dstq+2*strideq]
+ VP9_STORE_2X 0, 0, 6, 7, 4, 5
+ RET
+
+.idctfull:
+ mova m0, [blockq+0*16+0]
+ mova m1, [blockq+1*16+0]
+ packssdw m0, [blockq+0*16+8]
+ packssdw m1, [blockq+1*16+8]
+ mova m2, [blockq+2*16+0]
+ mova m3, [blockq+3*16+0]
+ packssdw m2, [blockq+2*16+8]
+ packssdw m3, [blockq+3*16+8]
+
+%if cpuflag(ssse3)
+ mova m6, [pw_11585x2]
+%endif
+ mova m7, [pd_8192] ; rounding
+ VP9_IDCT4_1D
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ VP9_IDCT4_1D
+
+ pxor m4, m4
+ ZERO_BLOCK blockq, 16, 4, m4
+ VP9_IDCT4_WRITEOUT
+ RET
+%endmacro
+
+INIT_MMX mmxext
+IDCT4_10_FN
+INIT_MMX ssse3
+IDCT4_10_FN
+
+%macro IADST4_FN 4
+cglobal vp9_%1_%3_4x4_add_10, 3, 3, 0, dst, stride, block, eob
+%if WIN64 && notcpuflag(ssse3)
+ WIN64_SPILL_XMM 8
+%endif
+ movdqa xmm5, [pd_8192]
+ mova m0, [blockq+0*16+0]
+ mova m1, [blockq+1*16+0]
+ packssdw m0, [blockq+0*16+8]
+ packssdw m1, [blockq+1*16+8]
+ mova m2, [blockq+2*16+0]
+ mova m3, [blockq+3*16+0]
+ packssdw m2, [blockq+2*16+8]
+ packssdw m3, [blockq+3*16+8]
+
+%if cpuflag(ssse3)
+ mova m6, [pw_11585x2]
+%endif
+%ifnidn %1%3, iadstiadst
+ movdq2q m7, xmm5
+%endif
+ VP9_%2_1D
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+ VP9_%4_1D
+
+ pxor m4, m4
+ ZERO_BLOCK blockq, 16, 4, m4
+ VP9_IDCT4_WRITEOUT
+ RET
+%endmacro
+
+INIT_MMX sse2
+IADST4_FN idct, IDCT4, iadst, IADST4
+IADST4_FN iadst, IADST4, idct, IDCT4
+IADST4_FN iadst, IADST4, iadst, IADST4
+
+INIT_MMX ssse3
+IADST4_FN idct, IDCT4, iadst, IADST4
+IADST4_FN iadst, IADST4, idct, IDCT4
+IADST4_FN iadst, IADST4, iadst, IADST4
+
+; inputs and outputs are dwords, coefficients are words
+;
+; dst1 = src1 * coef1 + src2 * coef2 + rnd >> 14
+; dst2 = src1 * coef2 - src2 * coef1 + rnd >> 14
+%macro SUMSUB_MUL 6-8 [pd_8192], [pd_3fff] ; src/dst 1-2, tmp1-2, coef1-2, rnd, mask
+ pand m%3, m%1, %8
+ pand m%4, m%2, %8
+ psrad m%1, 14
+ psrad m%2, 14
+ packssdw m%4, m%2
+ packssdw m%3, m%1
+ punpckhwd m%2, m%4, m%3
+ punpcklwd m%4, m%3
+ pmaddwd m%3, m%4, [pw_%6_%5]
+ pmaddwd m%1, m%2, [pw_%6_%5]
+ pmaddwd m%4, [pw_m%5_%6]
+ pmaddwd m%2, [pw_m%5_%6]
+ paddd m%3, %7
+ paddd m%4, %7
+ psrad m%3, 14
+ psrad m%4, 14
+ paddd m%1, m%3
+ paddd m%2, m%4
+%endmacro
+
+%macro IDCT4_12BPP_1D 0-8 [pd_8192], [pd_3fff], 0, 1, 2, 3, 4, 5 ; rnd, mask, in/out0-3, tmp0-1
+ SUMSUB_MUL %3, %5, %7, %8, 11585, 11585, %1, %2
+ SUMSUB_MUL %4, %6, %7, %8, 15137, 6270, %1, %2
+ SUMSUB_BA d, %4, %3, %7
+ SUMSUB_BA d, %6, %5, %7
+ SWAP %4, %6, %3
+%endmacro
+
+%macro STORE_4x4 6 ; tmp1-2, reg1-2, min, max
+ movh m%1, [dstq+strideq*0]
+ movh m%2, [dstq+strideq*2]
+ movhps m%1, [dstq+strideq*1]
+ movhps m%2, [dstq+stride3q ]
+ paddw m%1, m%3
+ paddw m%2, m%4
+ pmaxsw m%1, %5
+ pmaxsw m%2, %5
+ pminsw m%1, %6
+ pminsw m%2, %6
+ movh [dstq+strideq*0], m%1
+ movhps [dstq+strideq*1], m%1
+ movh [dstq+strideq*2], m%2
+ movhps [dstq+stride3q ], m%2
+%endmacro
+
+%macro ROUND_AND_STORE_4x4 8 ; reg1-4, min, max, rnd, shift
+ paddd m%1, %7
+ paddd m%2, %7
+ paddd m%3, %7
+ paddd m%4, %7
+ psrad m%1, %8
+ psrad m%2, %8
+ psrad m%3, %8
+ psrad m%4, %8
+ packssdw m%1, m%2
+ packssdw m%3, m%4
+ STORE_4x4 %2, %4, %1, %3, %5, %6
+%endmacro
+
+INIT_XMM sse2
+cglobal vp9_idct_idct_4x4_add_12, 4, 4, 8, dst, stride, block, eob
+ cmp eobd, 1
+ jg .idctfull
+
+ ; dc-only - this is special, since for 4x4 12bpp, the max coef size is
+ ; 17+sign bpp. Since the multiply is with 11585, which is 14bpp, the
+ ; result of each multiply is 31+sign bit, i.e. it _exactly_ fits in a
+ ; dword. After the final shift (4), the result is 13+sign bits, so we
+ ; don't need any additional processing to fit it in a word
+ DEFINE_ARGS dst, stride, block, coef
+ pxor m4, m4
+ DC_ONLY 4, m4
+ movd m0, coefd
+ pshuflw m0, m0, q0000
+ punpcklqdq m0, m0
+ mova m5, [pw_4095]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ STORE_4x4 1, 3, 0, 0, m4, m5
+ RET
+
+.idctfull:
+ DEFINE_ARGS dst, stride, block, eob
+ mova m0, [blockq+0*16]
+ mova m1, [blockq+1*16]
+ mova m2, [blockq+2*16]
+ mova m3, [blockq+3*16]
+ mova m6, [pd_8192]
+ mova m7, [pd_3fff]
+
+ IDCT4_12BPP_1D m6, m7
+ TRANSPOSE4x4D 0, 1, 2, 3, 4
+ IDCT4_12BPP_1D m6, m7
+
+ pxor m4, m4
+ ZERO_BLOCK blockq, 16, 4, m4
+
+ ; writeout
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ mova m5, [pw_4095]
+ mova m6, [pd_8]
+ ROUND_AND_STORE_4x4 0, 1, 2, 3, m4, m5, m6, 4
+ RET
+
+%macro SCRATCH 3-4
+%if ARCH_X86_64
+ SWAP %1, %2
+%if %0 == 4
+%define reg_%4 m%2
+%endif
+%else
+ mova [%3], m%1
+%if %0 == 4
+%define reg_%4 [%3]
+%endif
+%endif
+%endmacro
+
+%macro UNSCRATCH 3-4
+%if ARCH_X86_64
+ SWAP %1, %2
+%else
+ mova m%1, [%3]
+%endif
+%if %0 == 4
+%undef reg_%4
+%endif
+%endmacro
+
+%macro PRELOAD 2-3
+%if ARCH_X86_64
+ mova m%1, [%2]
+%if %0 == 3
+%define reg_%3 m%1
+%endif
+%elif %0 == 3
+%define reg_%3 [%2]
+%endif
+%endmacro
+
+; out0 = 5283 * in0 + 13377 + in1 + 15212 * in2 + 9929 * in3 + rnd >> 14
+; out1 = 9929 * in0 + 13377 * in1 - 5283 * in2 - 15282 * in3 + rnd >> 14
+; out2 = 13377 * in0 - 13377 * in2 + 13377 * in3 + rnd >> 14
+; out3 = 15212 * in0 - 13377 * in1 + 9929 * in2 - 5283 * in3 + rnd >> 14
+%macro IADST4_12BPP_1D 0-2 [pd_8192], [pd_3fff] ; rnd, mask
+ pand m4, m0, %2
+ pand m5, m1, %2
+ psrad m0, 14
+ psrad m1, 14
+ packssdw m5, m1
+ packssdw m4, m0
+ punpckhwd m1, m4, m5
+ punpcklwd m4, m5
+ pand m5, m2, %2
+ pand m6, m3, %2
+ psrad m2, 14
+ psrad m3, 14
+ packssdw m6, m3
+ packssdw m5, m2
+ punpckhwd m3, m5, m6
+ punpcklwd m5, m6
+ SCRATCH 1, 8, rsp+0*mmsize, a
+ SCRATCH 5, 9, rsp+1*mmsize, b
+
+ ; m1/3 have the high bits of 0,1,2,3
+ ; m4/5 have the low bits of 0,1,2,3
+ ; m0/2/6/7 are free
+
+ mova m2, [pw_15212_9929]
+ mova m0, [pw_5283_13377]
+ pmaddwd m7, m2, reg_b
+ pmaddwd m6, m4, m0
+ pmaddwd m2, m3
+ pmaddwd m0, reg_a
+ paddd m6, m7
+ paddd m0, m2
+ mova m1, [pw_m13377_13377]
+ mova m5, [pw_13377_0]
+ pmaddwd m7, m1, reg_b
+ pmaddwd m2, m4, m5
+ pmaddwd m1, m3
+ pmaddwd m5, reg_a
+ paddd m2, m7
+ paddd m1, m5
+ paddd m6, %1
+ paddd m2, %1
+ psrad m6, 14
+ psrad m2, 14
+ paddd m0, m6 ; t0
+ paddd m2, m1 ; t2
+
+ mova m7, [pw_m5283_m15212]
+ mova m5, [pw_9929_13377]
+ pmaddwd m1, m7, reg_b
+ pmaddwd m6, m4, m5
+ pmaddwd m7, m3
+ pmaddwd m5, reg_a
+ paddd m6, m1
+ paddd m7, m5
+ UNSCRATCH 5, 9, rsp+1*mmsize, b
+ pmaddwd m5, [pw_9929_m5283]
+ pmaddwd m4, [pw_15212_m13377]
+ pmaddwd m3, [pw_9929_m5283]
+ UNSCRATCH 1, 8, rsp+0*mmsize, a
+ pmaddwd m1, [pw_15212_m13377]
+ paddd m4, m5
+ paddd m3, m1
+ paddd m6, %1
+ paddd m4, %1
+ psrad m6, 14
+ psrad m4, 14
+ paddd m7, m6 ; t1
+ paddd m3, m4 ; t3
+
+ SWAP 1, 7
+%endmacro
+
+%macro IADST4_12BPP_FN 4
+cglobal vp9_%1_%3_4x4_add_12, 3, 3, 12, 2 * ARCH_X86_32 * mmsize, dst, stride, block, eob
+ mova m0, [blockq+0*16]
+ mova m1, [blockq+1*16]
+ mova m2, [blockq+2*16]
+ mova m3, [blockq+3*16]
+
+ PRELOAD 10, pd_8192, rnd
+ PRELOAD 11, pd_3fff, mask
+ %2_12BPP_1D reg_rnd, reg_mask
+ TRANSPOSE4x4D 0, 1, 2, 3, 4
+ %4_12BPP_1D reg_rnd, reg_mask
+
+ pxor m4, m4
+ ZERO_BLOCK blockq, 16, 4, m4
+
+ ; writeout
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ mova m5, [pw_4095]
+ mova m6, [pd_8]
+ ROUND_AND_STORE_4x4 0, 1, 2, 3, m4, m5, m6, 4
+ RET
+%endmacro
+
+INIT_XMM sse2
+IADST4_12BPP_FN idct, IDCT4, iadst, IADST4
+IADST4_12BPP_FN iadst, IADST4, idct, IDCT4
+IADST4_12BPP_FN iadst, IADST4, iadst, IADST4
+
+; the following line has not been executed at the end of this macro:
+; UNSCRATCH 6, 8, rsp+%3*mmsize
+%macro IDCT8_1D 1-5 [pd_8192], [pd_3fff], 2 * mmsize, 17 ; src, rnd, mask, src_stride, stack_offset
+ mova m0, [%1+0*%4]
+ mova m2, [%1+2*%4]
+ mova m4, [%1+4*%4]
+ mova m6, [%1+6*%4]
+ IDCT4_12BPP_1D %2, %3, 0, 2, 4, 6, 1, 3 ; m0/2/4/6 have t0/1/2/3
+ SCRATCH 4, 8, rsp+(%5+0)*mmsize
+ SCRATCH 6, 9, rsp+(%5+1)*mmsize
+ mova m1, [%1+1*%4]
+ mova m3, [%1+3*%4]
+ mova m5, [%1+5*%4]
+ mova m7, [%1+7*%4]
+ SUMSUB_MUL 1, 7, 4, 6, 16069, 3196, %2, %3 ; m1=t7a, m7=t4a
+ SUMSUB_MUL 5, 3, 4, 6, 9102, 13623, %2, %3 ; m5=t6a, m3=t5a
+ SUMSUB_BA d, 3, 7, 4 ; m3=t4, m7=t5a
+ SUMSUB_BA d, 5, 1, 4 ; m5=t7, m1=t6a
+ SUMSUB_MUL 1, 7, 4, 6, 11585, 11585, %2, %3 ; m1=t6, m7=t5
+ SUMSUB_BA d, 5, 0, 4 ; m5=out0, m0=out7
+ SUMSUB_BA d, 1, 2, 4 ; m1=out1, m2=out6
+ UNSCRATCH 4, 8, rsp+(%5+0)*mmsize
+ UNSCRATCH 6, 9, rsp+(%5+1)*mmsize
+ SCRATCH 2, 8, rsp+(%5+0)*mmsize
+ SUMSUB_BA d, 7, 4, 2 ; m7=out2, m4=out5
+ SUMSUB_BA d, 3, 6, 2 ; m3=out3, m6=out4
+ SWAP 0, 5, 4, 6, 2, 7
+%endmacro
+
+%macro STORE_2x8 5-7 dstq, strideq ; tmp1-2, reg, min, max
+ mova m%1, [%6+%7*0]
+ mova m%2, [%6+%7*1]
+ paddw m%1, m%3
+ paddw m%2, m%3
+ pmaxsw m%1, %4
+ pmaxsw m%2, %4
+ pminsw m%1, %5
+ pminsw m%2, %5
+ mova [%6+%7*0], m%1
+ mova [%6+%7*1], m%2
+%endmacro
+
+; FIXME we can use the intermediate storage (rsp[0-15]) on x86-32 for temp
+; storage also instead of allocating two more stack spaces. This doesn't
+; matter much but it's something...
+INIT_XMM sse2
+cglobal vp9_idct_idct_8x8_add_10, 4, 6 + ARCH_X86_64, 14, \
+ 16 * mmsize + 3 * ARCH_X86_32 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_1023]
+ cmp eobd, 1
+ jg .idctfull
+
+ ; dc-only - the 10bit version can be done entirely in 32bit, since the max
+ ; coef values are 16+sign bit, and the coef is 14bit, so 30+sign easily
+ ; fits in 32bit
+ DEFINE_ARGS dst, stride, block, coef
+ pxor m2, m2
+ DC_ONLY 5, m2
+ movd m1, coefd
+ pshuflw m1, m1, q0000
+ punpcklqdq m1, m1
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 4
+.loop_dc:
+ STORE_2x8 3, 4, 1, m2, m0
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop_dc
+ RET
+
+.idctfull:
+ SCRATCH 0, 12, rsp+16*mmsize, max
+ DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
+%if ARCH_X86_64
+ mov dstbakq, dstq
+ movsxd cntq, cntd
+%endif
+%ifdef PIC
+ lea ptrq, [default_8x8]
+ movzx cntd, byte [ptrq+cntq-1]
+%else
+ movzx cntd, byte [default_8x8+cntq-1]
+%endif
+ mov skipd, 2
+ sub skipd, cntd
+ mov ptrq, rsp
+ PRELOAD 10, pd_8192, rnd
+ PRELOAD 11, pd_3fff, mask
+ PRELOAD 13, pd_16, srnd
+.loop_1:
+ IDCT8_1D blockq, reg_rnd, reg_mask
+
+ TRANSPOSE4x4D 0, 1, 2, 3, 6
+ mova [ptrq+ 0*mmsize], m0
+ mova [ptrq+ 2*mmsize], m1
+ mova [ptrq+ 4*mmsize], m2
+ mova [ptrq+ 6*mmsize], m3
+ UNSCRATCH 6, 8, rsp+17*mmsize
+ TRANSPOSE4x4D 4, 5, 6, 7, 0
+ mova [ptrq+ 1*mmsize], m4
+ mova [ptrq+ 3*mmsize], m5
+ mova [ptrq+ 5*mmsize], m6
+ mova [ptrq+ 7*mmsize], m7
+ add ptrq, 8 * mmsize
+ add blockq, mmsize
+ dec cntd
+ jg .loop_1
+
+ ; zero-pad the remainder (skipped cols)
+ test skipd, skipd
+ jz .end
+ add skipd, skipd
+ lea blockq, [blockq+skipq*(mmsize/2)]
+ pxor m0, m0
+.loop_z:
+ mova [ptrq+mmsize*0], m0
+ mova [ptrq+mmsize*1], m0
+ mova [ptrq+mmsize*2], m0
+ mova [ptrq+mmsize*3], m0
+ add ptrq, 4 * mmsize
+ dec skipd
+ jg .loop_z
+.end:
+
+ DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
+ lea stride3q, [strideq*3]
+ mov cntd, 2
+ mov ptrq, rsp
+.loop_2:
+ IDCT8_1D ptrq, reg_rnd, reg_mask
+
+ pxor m6, m6
+ ROUND_AND_STORE_4x4 0, 1, 2, 3, m6, reg_max, reg_srnd, 5
+ lea dstq, [dstq+strideq*4]
+ UNSCRATCH 0, 8, rsp+17*mmsize
+ UNSCRATCH 1, 12, rsp+16*mmsize, max
+ UNSCRATCH 2, 13, pd_16, srnd
+ ROUND_AND_STORE_4x4 4, 5, 0, 7, m6, m1, m2, 5
+ add ptrq, 16
+%if ARCH_X86_64
+ lea dstq, [dstbakq+8]
+%else
+ mov dstq, dstm
+ add dstq, 8
+%endif
+ dec cntd
+ jg .loop_2
+
+ ; m6 is still zero
+ ZERO_BLOCK blockq-2*mmsize, 32, 8, m6
+ RET
+
+%macro DC_ONLY_64BIT 2 ; shift, zero
+%if ARCH_X86_64
+ movsxd coefq, dword [blockq]
+ movd [blockq], %2
+ imul coefq, 11585
+ add coefq, 8192
+ sar coefq, 14
+ imul coefq, 11585
+ add coefq, ((1 << (%1 - 1)) << 14) + 8192
+ sar coefq, 14 + %1
+%else
+ mov coefd, dword [blockq]
+ movd [blockq], %2
+ DEFINE_ARGS dst, stride, cnt, coef, coefl
+ mov cntd, 2
+.loop_dc_calc:
+ mov coefld, coefd
+ sar coefd, 14
+ and coefld, 0x3fff
+ imul coefd, 11585
+ imul coefld, 11585
+ add coefld, 8192
+ sar coefld, 14
+ add coefd, coefld
+ dec cntd
+ jg .loop_dc_calc
+ add coefd, 1 << (%1 - 1)
+ sar coefd, %1
+%endif
+%endmacro
+
+INIT_XMM sse2
+cglobal vp9_idct_idct_8x8_add_12, 4, 6 + ARCH_X86_64, 14, \
+ 16 * mmsize + 3 * ARCH_X86_32 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_4095]
+ cmp eobd, 1
+ jg mangle(private_prefix %+ _ %+ vp9_idct_idct_8x8_add_10 %+ SUFFIX).idctfull
+
+ ; dc-only - unfortunately, this one can overflow, since coefs are 18+sign
+ ; bpp, and 18+14+sign does not fit in 32bit, so we do 2-stage multiplies
+ DEFINE_ARGS dst, stride, block, coef, coefl
+ pxor m2, m2
+ DC_ONLY_64BIT 5, m2
+ movd m1, coefd
+ pshuflw m1, m1, q0000
+ punpcklqdq m1, m1
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 4
+.loop_dc:
+ STORE_2x8 3, 4, 1, m2, m0
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop_dc
+ RET
+
+; inputs and outputs are dwords, coefficients are words
+;
+; dst1[hi]:dst3[lo] = src1 * coef1 + src2 * coef2
+; dst2[hi]:dst4[lo] = src1 * coef2 - src2 * coef1
+%macro SUMSUB_MUL_D 6-7 [pd_3fff] ; src/dst 1-2, dst3-4, coef1-2, mask
+ pand m%3, m%1, %7
+ pand m%4, m%2, %7
+ psrad m%1, 14
+ psrad m%2, 14
+ packssdw m%4, m%2
+ packssdw m%3, m%1
+ punpckhwd m%2, m%4, m%3
+ punpcklwd m%4, m%3
+ pmaddwd m%3, m%4, [pw_%6_%5]
+ pmaddwd m%1, m%2, [pw_%6_%5]
+ pmaddwd m%4, [pw_m%5_%6]
+ pmaddwd m%2, [pw_m%5_%6]
+%endmacro
+
+; dst1 = src2[hi]:src4[lo] + src1[hi]:src3[lo] + rnd >> 14
+; dst2 = src2[hi]:src4[lo] - src1[hi]:src3[lo] + rnd >> 14
+%macro SUMSUB_PACK_D 5-6 [pd_8192] ; src/dst 1-2, src3-4, tmp, rnd
+ SUMSUB_BA d, %1, %2, %5
+ SUMSUB_BA d, %3, %4, %5
+ paddd m%3, %6
+ paddd m%4, %6
+ psrad m%3, 14
+ psrad m%4, 14
+ paddd m%1, m%3
+ paddd m%2, m%4
+%endmacro
+
+%macro NEGD 1
+%if cpuflag(ssse3)
+ psignd %1, [pw_m1]
+%else
+ pxor %1, [pw_m1]
+ paddd %1, [pd_1]
+%endif
+%endmacro
+
+; the following line has not been executed at the end of this macro:
+; UNSCRATCH 6, 8, rsp+17*mmsize
+%macro IADST8_1D 1-3 [pd_8192], [pd_3fff] ; src, rnd, mask
+ mova m0, [%1+ 0*mmsize]
+ mova m3, [%1+ 6*mmsize]
+ mova m4, [%1+ 8*mmsize]
+ mova m7, [%1+14*mmsize]
+ SUMSUB_MUL_D 7, 0, 1, 2, 16305, 1606, %3 ; m7/1=t0a, m0/2=t1a
+ SUMSUB_MUL_D 3, 4, 5, 6, 10394, 12665, %3 ; m3/5=t4a, m4/6=t5a
+ SCRATCH 0, 8, rsp+17*mmsize
+ SUMSUB_PACK_D 3, 7, 5, 1, 0, %2 ; m3=t0, m7=t4
+ UNSCRATCH 0, 8, rsp+17*mmsize
+ SUMSUB_PACK_D 4, 0, 6, 2, 1, %2 ; m4=t1, m0=t5
+
+ SCRATCH 3, 8, rsp+17*mmsize
+ SCRATCH 4, 9, rsp+18*mmsize
+ SCRATCH 7, 10, rsp+19*mmsize
+ SCRATCH 0, 11, rsp+20*mmsize
+
+ mova m1, [%1+ 2*mmsize]
+ mova m2, [%1+ 4*mmsize]
+ mova m5, [%1+10*mmsize]
+ mova m6, [%1+12*mmsize]
+ SUMSUB_MUL_D 5, 2, 3, 4, 14449, 7723, %3 ; m5/8=t2a, m2/9=t3a
+ SUMSUB_MUL_D 1, 6, 7, 0, 4756, 15679, %3 ; m1/10=t6a, m6/11=t7a
+ SCRATCH 2, 12, rsp+21*mmsize
+ SUMSUB_PACK_D 1, 5, 7, 3, 2, %2 ; m1=t2, m5=t6
+ UNSCRATCH 2, 12, rsp+21*mmsize
+ SUMSUB_PACK_D 6, 2, 0, 4, 3, %2 ; m6=t3, m2=t7
+
+ UNSCRATCH 7, 10, rsp+19*mmsize
+ UNSCRATCH 0, 11, rsp+20*mmsize
+ SCRATCH 1, 10, rsp+19*mmsize
+ SCRATCH 6, 11, rsp+20*mmsize
+
+ SUMSUB_MUL_D 7, 0, 3, 4, 15137, 6270, %3 ; m7/8=t4a, m0/9=t5a
+ SUMSUB_MUL_D 2, 5, 1, 6, 6270, 15137, %3 ; m2/10=t7a, m5/11=t6a
+ SCRATCH 2, 12, rsp+21*mmsize
+ SUMSUB_PACK_D 5, 7, 6, 3, 2, %2 ; m5=-out1, m7=t6
+ UNSCRATCH 2, 12, rsp+21*mmsize
+ NEGD m5 ; m5=out1
+ SUMSUB_PACK_D 2, 0, 1, 4, 3, %2 ; m2=out6, m0=t7
+ SUMSUB_MUL 7, 0, 3, 4, 11585, 11585, %2, %3 ; m7=out2, m0=-out5
+ NEGD m0 ; m0=out5
+
+ UNSCRATCH 3, 8, rsp+17*mmsize
+ UNSCRATCH 4, 9, rsp+18*mmsize
+ UNSCRATCH 1, 10, rsp+19*mmsize
+ UNSCRATCH 6, 11, rsp+20*mmsize
+ SCRATCH 2, 8, rsp+17*mmsize
+ SCRATCH 0, 9, rsp+18*mmsize
+
+ SUMSUB_BA d, 1, 3, 2 ; m1=out0, m3=t2
+ SUMSUB_BA d, 6, 4, 2 ; m6=-out7, m4=t3
+ NEGD m6 ; m6=out7
+ SUMSUB_MUL 3, 4, 2, 0, 11585, 11585, %2, %3 ; m3=-out3, m4=out4
+ NEGD m3 ; m3=out3
+
+ UNSCRATCH 0, 9, rsp+18*mmsize
+
+ SWAP 0, 1, 5
+ SWAP 2, 7, 6
+%endmacro
+
+%macro IADST8_FN 5
+cglobal vp9_%1_%3_8x8_add_10, 4, 6 + ARCH_X86_64, 16, \
+ 16 * mmsize + ARCH_X86_32 * 6 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_1023]
+
+.body:
+ SCRATCH 0, 13, rsp+16*mmsize, max
+ DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
+%if ARCH_X86_64
+ mov dstbakq, dstq
+ movsxd cntq, cntd
+%endif
+%ifdef PIC
+ lea ptrq, [%5_8x8]
+ movzx cntd, byte [ptrq+cntq-1]
+%else
+ movzx cntd, byte [%5_8x8+cntq-1]
+%endif
+ mov skipd, 2
+ sub skipd, cntd
+ mov ptrq, rsp
+ PRELOAD 14, pd_8192, rnd
+ PRELOAD 15, pd_3fff, mask
+.loop_1:
+ %2_1D blockq, reg_rnd, reg_mask
+
+ TRANSPOSE4x4D 0, 1, 2, 3, 6
+ mova [ptrq+ 0*mmsize], m0
+ mova [ptrq+ 2*mmsize], m1
+ mova [ptrq+ 4*mmsize], m2
+ mova [ptrq+ 6*mmsize], m3
+ UNSCRATCH 6, 8, rsp+17*mmsize
+ TRANSPOSE4x4D 4, 5, 6, 7, 0
+ mova [ptrq+ 1*mmsize], m4
+ mova [ptrq+ 3*mmsize], m5
+ mova [ptrq+ 5*mmsize], m6
+ mova [ptrq+ 7*mmsize], m7
+ add ptrq, 8 * mmsize
+ add blockq, mmsize
+ dec cntd
+ jg .loop_1
+
+ ; zero-pad the remainder (skipped cols)
+ test skipd, skipd
+ jz .end
+ add skipd, skipd
+ lea blockq, [blockq+skipq*(mmsize/2)]
+ pxor m0, m0
+.loop_z:
+ mova [ptrq+mmsize*0], m0
+ mova [ptrq+mmsize*1], m0
+ mova [ptrq+mmsize*2], m0
+ mova [ptrq+mmsize*3], m0
+ add ptrq, 4 * mmsize
+ dec skipd
+ jg .loop_z
+.end:
+
+ DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
+ lea stride3q, [strideq*3]
+ mov cntd, 2
+ mov ptrq, rsp
+.loop_2:
+ %4_1D ptrq, reg_rnd, reg_mask
+
+ pxor m6, m6
+ PRELOAD 9, pd_16, srnd
+ ROUND_AND_STORE_4x4 0, 1, 2, 3, m6, reg_max, reg_srnd, 5
+ lea dstq, [dstq+strideq*4]
+ UNSCRATCH 0, 8, rsp+17*mmsize
+ UNSCRATCH 1, 13, rsp+16*mmsize, max
+ UNSCRATCH 2, 9, pd_16, srnd
+ ROUND_AND_STORE_4x4 4, 5, 0, 7, m6, m1, m2, 5
+ add ptrq, 16
+%if ARCH_X86_64
+ lea dstq, [dstbakq+8]
+%else
+ mov dstq, dstm
+ add dstq, 8
+%endif
+ dec cntd
+ jg .loop_2
+
+ ; m6 is still zero
+ ZERO_BLOCK blockq-2*mmsize, 32, 8, m6
+ RET
+
+cglobal vp9_%1_%3_8x8_add_12, 4, 6 + ARCH_X86_64, 16, \
+ 16 * mmsize + ARCH_X86_32 * 6 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_%1_%3_8x8_add_10 %+ SUFFIX).body
+%endmacro
+
+INIT_XMM sse2
+IADST8_FN idct, IDCT8, iadst, IADST8, row
+IADST8_FN iadst, IADST8, idct, IDCT8, col
+IADST8_FN iadst, IADST8, iadst, IADST8, default
+
+%macro IDCT16_1D 1-4 4 * mmsize, 65, 67 ; src, src_stride, stack_offset, mm32bit_stack_offset
+ IDCT8_1D %1, [pd_8192], [pd_3fff], %2 * 2, %4 ; m0-3=t0-3a, m4-5/m8|r67/m7=t4-7
+ ; SCRATCH 6, 8, rsp+(%4+0)*mmsize ; t6
+ SCRATCH 0, 15, rsp+(%4+7)*mmsize ; t0a
+ SCRATCH 1, 14, rsp+(%4+6)*mmsize ; t1a
+ SCRATCH 2, 13, rsp+(%4+5)*mmsize ; t2a
+ SCRATCH 3, 12, rsp+(%4+4)*mmsize ; t3a
+ SCRATCH 4, 11, rsp+(%4+3)*mmsize ; t4
+ mova [rsp+(%3+0)*mmsize], m5 ; t5
+ mova [rsp+(%3+1)*mmsize], m7 ; t7
+
+ mova m0, [%1+ 1*%2] ; in1
+ mova m3, [%1+ 7*%2] ; in7
+ mova m4, [%1+ 9*%2] ; in9
+ mova m7, [%1+15*%2] ; in15
+
+ SUMSUB_MUL 0, 7, 1, 2, 16305, 1606 ; m0=t15a, m7=t8a
+ SUMSUB_MUL 4, 3, 1, 2, 10394, 12665 ; m4=t14a, m3=t9a
+ SUMSUB_BA d, 3, 7, 1 ; m3=t8, m7=t9
+ SUMSUB_BA d, 4, 0, 1 ; m4=t15,m0=t14
+ SUMSUB_MUL 0, 7, 1, 2, 15137, 6270 ; m0=t14a, m7=t9a
+
+ mova m1, [%1+ 3*%2] ; in3
+ mova m2, [%1+ 5*%2] ; in5
+ mova m5, [%1+11*%2] ; in11
+ mova m6, [%1+13*%2] ; in13
+
+ SCRATCH 0, 9, rsp+(%4+1)*mmsize
+ SCRATCH 7, 10, rsp+(%4+2)*mmsize
+
+ SUMSUB_MUL 2, 5, 0, 7, 14449, 7723 ; m2=t13a, m5=t10a
+ SUMSUB_MUL 6, 1, 0, 7, 4756, 15679 ; m6=t12a, m1=t11a
+ SUMSUB_BA d, 5, 1, 0 ; m5=t11,m1=t10
+ SUMSUB_BA d, 2, 6, 0 ; m2=t12,m6=t13
+ NEGD m1 ; m1=-t10
+ SUMSUB_MUL 1, 6, 0, 7, 15137, 6270 ; m1=t13a, m6=t10a
+
+ UNSCRATCH 7, 10, rsp+(%4+2)*mmsize
+ SUMSUB_BA d, 5, 3, 0 ; m5=t8a, m3=t11a
+ SUMSUB_BA d, 6, 7, 0 ; m6=t9, m7=t10
+ SUMSUB_BA d, 2, 4, 0 ; m2=t15a,m4=t12a
+ SCRATCH 5, 10, rsp+(%4+2)*mmsize
+ SUMSUB_MUL 4, 3, 0, 5, 11585, 11585 ; m4=t12, m3=t11
+ UNSCRATCH 0, 9, rsp+(%4+1)*mmsize
+ SUMSUB_BA d, 1, 0, 5 ; m1=t14, m0=t13
+ SCRATCH 6, 9, rsp+(%4+1)*mmsize
+ SUMSUB_MUL 0, 7, 6, 5, 11585, 11585 ; m0=t13a,m7=t10a
+
+ ; order: 15|r74,14|r73,13|r72,12|r71,11|r70,r65,8|r67,r66,10|r69,9|r68,7,3,4,0,1,2
+ ; free: 6,5
+
+ UNSCRATCH 5, 15, rsp+(%4+7)*mmsize
+ SUMSUB_BA d, 2, 5, 6 ; m2=out0, m5=out15
+ SCRATCH 5, 15, rsp+(%4+7)*mmsize
+ UNSCRATCH 5, 14, rsp+(%4+6)*mmsize
+ SUMSUB_BA d, 1, 5, 6 ; m1=out1, m5=out14
+ SCRATCH 5, 14, rsp+(%4+6)*mmsize
+ UNSCRATCH 5, 13, rsp+(%4+5)*mmsize
+ SUMSUB_BA d, 0, 5, 6 ; m0=out2, m5=out13
+ SCRATCH 5, 13, rsp+(%4+5)*mmsize
+ UNSCRATCH 5, 12, rsp+(%4+4)*mmsize
+ SUMSUB_BA d, 4, 5, 6 ; m4=out3, m5=out12
+ SCRATCH 5, 12, rsp+(%4+4)*mmsize
+ UNSCRATCH 5, 11, rsp+(%4+3)*mmsize
+ SUMSUB_BA d, 3, 5, 6 ; m3=out4, m5=out11
+ SCRATCH 4, 11, rsp+(%4+3)*mmsize
+ mova m4, [rsp+(%3+0)*mmsize]
+ SUMSUB_BA d, 7, 4, 6 ; m7=out5, m4=out10
+ mova [rsp+(%3+0)*mmsize], m5
+ UNSCRATCH 5, 8, rsp+(%4+0)*mmsize
+ UNSCRATCH 6, 9, rsp+(%4+1)*mmsize
+ SCRATCH 2, 8, rsp+(%4+0)*mmsize
+ SCRATCH 1, 9, rsp+(%4+1)*mmsize
+ UNSCRATCH 1, 10, rsp+(%4+2)*mmsize
+ SCRATCH 0, 10, rsp+(%4+2)*mmsize
+ mova m0, [rsp+(%3+1)*mmsize]
+ SUMSUB_BA d, 6, 5, 2 ; m6=out6, m5=out9
+ SUMSUB_BA d, 1, 0, 2 ; m1=out7, m0=out8
+
+ SWAP 0, 3, 1, 7, 2, 6, 4
+
+ ; output order: 8-11|r67-70=out0-3
+ ; 0-6,r65=out4-11
+ ; 12-15|r71-74=out12-15
+%endmacro
+
+INIT_XMM sse2
+cglobal vp9_idct_idct_16x16_add_10, 4, 6 + ARCH_X86_64, 16, \
+ 67 * mmsize + ARCH_X86_32 * 8 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_1023]
+ cmp eobd, 1
+ jg .idctfull
+
+ ; dc-only - the 10bit version can be done entirely in 32bit, since the max
+ ; coef values are 17+sign bit, and the coef is 14bit, so 31+sign easily
+ ; fits in 32bit
+ DEFINE_ARGS dst, stride, block, coef
+ pxor m2, m2
+ DC_ONLY 6, m2
+ movd m1, coefd
+ pshuflw m1, m1, q0000
+ punpcklqdq m1, m1
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 8
+.loop_dc:
+ STORE_2x8 3, 4, 1, m2, m0, dstq, mmsize
+ STORE_2x8 3, 4, 1, m2, m0, dstq+strideq, mmsize
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop_dc
+ RET
+
+.idctfull:
+ mova [rsp+64*mmsize], m0
+ DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
+%if ARCH_X86_64
+ mov dstbakq, dstq
+ movsxd cntq, cntd
+%endif
+%ifdef PIC
+ lea ptrq, [default_16x16]
+ movzx cntd, byte [ptrq+cntq-1]
+%else
+ movzx cntd, byte [default_16x16+cntq-1]
+%endif
+ mov skipd, 4
+ sub skipd, cntd
+ mov ptrq, rsp
+.loop_1:
+ IDCT16_1D blockq
+
+ TRANSPOSE4x4D 0, 1, 2, 3, 7
+ mova [ptrq+ 1*mmsize], m0
+ mova [ptrq+ 5*mmsize], m1
+ mova [ptrq+ 9*mmsize], m2
+ mova [ptrq+13*mmsize], m3
+ mova m7, [rsp+65*mmsize]
+ TRANSPOSE4x4D 4, 5, 6, 7, 0
+ mova [ptrq+ 2*mmsize], m4
+ mova [ptrq+ 6*mmsize], m5
+ mova [ptrq+10*mmsize], m6
+ mova [ptrq+14*mmsize], m7
+ UNSCRATCH 0, 8, rsp+67*mmsize
+ UNSCRATCH 1, 9, rsp+68*mmsize
+ UNSCRATCH 2, 10, rsp+69*mmsize
+ UNSCRATCH 3, 11, rsp+70*mmsize
+ TRANSPOSE4x4D 0, 1, 2, 3, 7
+ mova [ptrq+ 0*mmsize], m0
+ mova [ptrq+ 4*mmsize], m1
+ mova [ptrq+ 8*mmsize], m2
+ mova [ptrq+12*mmsize], m3
+ UNSCRATCH 4, 12, rsp+71*mmsize
+ UNSCRATCH 5, 13, rsp+72*mmsize
+ UNSCRATCH 6, 14, rsp+73*mmsize
+ UNSCRATCH 7, 15, rsp+74*mmsize
+ TRANSPOSE4x4D 4, 5, 6, 7, 0
+ mova [ptrq+ 3*mmsize], m4
+ mova [ptrq+ 7*mmsize], m5
+ mova [ptrq+11*mmsize], m6
+ mova [ptrq+15*mmsize], m7
+ add ptrq, 16 * mmsize
+ add blockq, mmsize
+ dec cntd
+ jg .loop_1
+
+ ; zero-pad the remainder (skipped cols)
+ test skipd, skipd
+ jz .end
+ add skipd, skipd
+ lea blockq, [blockq+skipq*(mmsize/2)]
+ pxor m0, m0
+.loop_z:
+ mova [ptrq+mmsize*0], m0
+ mova [ptrq+mmsize*1], m0
+ mova [ptrq+mmsize*2], m0
+ mova [ptrq+mmsize*3], m0
+ mova [ptrq+mmsize*4], m0
+ mova [ptrq+mmsize*5], m0
+ mova [ptrq+mmsize*6], m0
+ mova [ptrq+mmsize*7], m0
+ add ptrq, 8 * mmsize
+ dec skipd
+ jg .loop_z
+.end:
+
+ DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+ mov ptrq, rsp
+.loop_2:
+ IDCT16_1D ptrq
+
+ pxor m7, m7
+ lea dstq, [dstq+strideq*4]
+ ROUND_AND_STORE_4x4 0, 1, 2, 3, m7, [rsp+64*mmsize], [pd_32], 6
+ lea dstq, [dstq+strideq*4]
+ mova m0, [rsp+65*mmsize]
+ mova m1, [rsp+64*mmsize]
+ mova m2, [pd_32]
+ ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, m1, m2, 6
+
+%if ARCH_X86_64
+ DEFINE_ARGS dstbak, stride, block, cnt, ptr, stride3, dst
+%else
+ mov dstq, dstm
+%endif
+ UNSCRATCH 0, 8, rsp+67*mmsize
+ UNSCRATCH 4, 9, rsp+68*mmsize
+ UNSCRATCH 5, 10, rsp+69*mmsize
+ UNSCRATCH 3, 11, rsp+70*mmsize
+ ROUND_AND_STORE_4x4 0, 4, 5, 3, m7, m1, m2, 6
+%if ARCH_X86_64
+ DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
+ lea dstq, [dstbakq+stride3q*4]
+%else
+ lea dstq, [dstq+stride3q*4]
+%endif
+ UNSCRATCH 4, 12, rsp+71*mmsize
+ UNSCRATCH 5, 13, rsp+72*mmsize
+ UNSCRATCH 6, 14, rsp+73*mmsize
+ UNSCRATCH 0, 15, rsp+74*mmsize
+ ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, m1, m2, 6
+
+ add ptrq, mmsize
+%if ARCH_X86_64
+ add dstbakq, 8
+ mov dstq, dstbakq
+%else
+ add dword dstm, 8
+ mov dstq, dstm
+%endif
+ dec cntd
+ jg .loop_2
+
+ ; m7 is still zero
+ ZERO_BLOCK blockq-4*mmsize, 64, 16, m7
+ RET
+
+INIT_XMM sse2
+cglobal vp9_idct_idct_16x16_add_12, 4, 6 + ARCH_X86_64, 16, \
+ 67 * mmsize + ARCH_X86_32 * 8 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_4095]
+ cmp eobd, 1
+ jg mangle(private_prefix %+ _ %+ vp9_idct_idct_16x16_add_10 %+ SUFFIX).idctfull
+
+ ; dc-only - unfortunately, this one can overflow, since coefs are 19+sign
+ ; bpp, and 19+14+sign does not fit in 32bit, so we do 2-stage multiplies
+ DEFINE_ARGS dst, stride, block, coef, coefl
+ pxor m2, m2
+ DC_ONLY_64BIT 6, m2
+ movd m1, coefd
+ pshuflw m1, m1, q0000
+ punpcklqdq m1, m1
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 8
+.loop_dc:
+ STORE_2x8 3, 4, 1, m2, m0, dstq, mmsize
+ STORE_2x8 3, 4, 1, m2, m0, dstq+strideq, mmsize
+ lea dstq, [dstq+strideq*2]
+ dec cntd
+ jg .loop_dc
+ RET
+
+; r65-69 are available for spills
+; r70-77 are available on x86-32 only (x86-64 should use m8-15)
+; output should be in m8-11|r70-73, m0-6,r65 and m12-15|r74-77
+%macro IADST16_1D 1 ; src
+ mova m0, [%1+ 0*4*mmsize] ; in0
+ mova m1, [%1+ 7*4*mmsize] ; in7
+ mova m2, [%1+ 8*4*mmsize] ; in8
+ mova m3, [%1+15*4*mmsize] ; in15
+ SUMSUB_MUL_D 3, 0, 4, 5, 16364, 804 ; m3/4=t0, m0/5=t1
+ SUMSUB_MUL_D 1, 2, 6, 7, 11003, 12140 ; m1/6=t8, m2/7=t9
+ SCRATCH 0, 8, rsp+70*mmsize
+ SUMSUB_PACK_D 1, 3, 6, 4, 0 ; m1=t0a, m3=t8a
+ UNSCRATCH 0, 8, rsp+70*mmsize
+ SUMSUB_PACK_D 2, 0, 7, 5, 4 ; m2=t1a, m0=t9a
+ mova [rsp+67*mmsize], m1
+ SCRATCH 2, 9, rsp+71*mmsize
+ SCRATCH 3, 12, rsp+74*mmsize
+ SCRATCH 0, 13, rsp+75*mmsize
+
+ mova m0, [%1+ 3*4*mmsize] ; in3
+ mova m1, [%1+ 4*4*mmsize] ; in4
+ mova m2, [%1+11*4*mmsize] ; in11
+ mova m3, [%1+12*4*mmsize] ; in12
+ SUMSUB_MUL_D 2, 1, 4, 5, 14811, 7005 ; m2/4=t4, m1/5=t5
+ SUMSUB_MUL_D 0, 3, 6, 7, 5520, 15426 ; m0/6=t12, m3/7=t13
+ SCRATCH 1, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 0, 2, 6, 4, 1 ; m0=t4a, m2=t12a
+ UNSCRATCH 1, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 3, 1, 7, 5, 4 ; m3=t5a, m1=t13a
+ SCRATCH 0, 15, rsp+77*mmsize
+ SCRATCH 3, 11, rsp+73*mmsize
+
+ UNSCRATCH 0, 12, rsp+74*mmsize ; t8a
+ UNSCRATCH 3, 13, rsp+75*mmsize ; t9a
+ SUMSUB_MUL_D 0, 3, 4, 5, 16069, 3196 ; m0/4=t8, m3/5=t9
+ SUMSUB_MUL_D 1, 2, 6, 7, 3196, 16069 ; m1/6=t13, m2/7=t12
+ SCRATCH 1, 12, rsp+74*mmsize
+ SUMSUB_PACK_D 2, 0, 7, 4, 1 ; m2=t8a, m0=t12a
+ UNSCRATCH 1, 12, rsp+74*mmsize
+ SUMSUB_PACK_D 1, 3, 6, 5, 4 ; m1=t9a, m3=t13a
+ mova [rsp+65*mmsize], m2
+ mova [rsp+66*mmsize], m1
+ SCRATCH 0, 8, rsp+70*mmsize
+ SCRATCH 3, 12, rsp+74*mmsize
+
+ mova m0, [%1+ 2*4*mmsize] ; in2
+ mova m1, [%1+ 5*4*mmsize] ; in5
+ mova m2, [%1+10*4*mmsize] ; in10
+ mova m3, [%1+13*4*mmsize] ; in13
+ SUMSUB_MUL_D 3, 0, 4, 5, 15893, 3981 ; m3/4=t2, m0/5=t3
+ SUMSUB_MUL_D 1, 2, 6, 7, 8423, 14053 ; m1/6=t10, m2/7=t11
+ SCRATCH 0, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 1, 3, 6, 4, 0 ; m1=t2a, m3=t10a
+ UNSCRATCH 0, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 2, 0, 7, 5, 4 ; m2=t3a, m0=t11a
+ mova [rsp+68*mmsize], m1
+ mova [rsp+69*mmsize], m2
+ SCRATCH 3, 13, rsp+75*mmsize
+ SCRATCH 0, 14, rsp+76*mmsize
+
+ mova m0, [%1+ 1*4*mmsize] ; in1
+ mova m1, [%1+ 6*4*mmsize] ; in6
+ mova m2, [%1+ 9*4*mmsize] ; in9
+ mova m3, [%1+14*4*mmsize] ; in14
+ SUMSUB_MUL_D 2, 1, 4, 5, 13160, 9760 ; m2/4=t6, m1/5=t7
+ SUMSUB_MUL_D 0, 3, 6, 7, 2404, 16207 ; m0/6=t14, m3/7=t15
+ SCRATCH 1, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 0, 2, 6, 4, 1 ; m0=t6a, m2=t14a
+ UNSCRATCH 1, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 3, 1, 7, 5, 4 ; m3=t7a, m1=t15a
+
+ UNSCRATCH 4, 13, rsp+75*mmsize ; t10a
+ UNSCRATCH 5, 14, rsp+76*mmsize ; t11a
+ SCRATCH 0, 13, rsp+75*mmsize
+ SCRATCH 3, 14, rsp+76*mmsize
+ SUMSUB_MUL_D 4, 5, 6, 7, 9102, 13623 ; m4/6=t10, m5/7=t11
+ SUMSUB_MUL_D 1, 2, 0, 3, 13623, 9102 ; m1/0=t15, m2/3=t14
+ SCRATCH 0, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 2, 4, 3, 6, 0 ; m2=t10a, m4=t14a
+ UNSCRATCH 0, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 1, 5, 0, 7, 6 ; m1=t11a, m5=t15a
+
+ UNSCRATCH 0, 8, rsp+70*mmsize ; t12a
+ UNSCRATCH 3, 12, rsp+74*mmsize ; t13a
+ SCRATCH 2, 8, rsp+70*mmsize
+ SCRATCH 1, 12, rsp+74*mmsize
+ SUMSUB_MUL_D 0, 3, 1, 2, 15137, 6270 ; m0/1=t12, m3/2=t13
+ SUMSUB_MUL_D 5, 4, 7, 6, 6270, 15137 ; m5/7=t15, m4/6=t14
+ SCRATCH 2, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 4, 0, 6, 1, 2 ; m4=out2, m0=t14a
+ UNSCRATCH 2, 10, rsp+72*mmsize
+ SUMSUB_PACK_D 5, 3, 7, 2, 1 ; m5=-out13, m3=t15a
+ NEGD m5 ; m5=out13
+
+ UNSCRATCH 1, 9, rsp+71*mmsize ; t1a
+ mova m2, [rsp+68*mmsize] ; t2a
+ UNSCRATCH 6, 13, rsp+75*mmsize ; t6a
+ UNSCRATCH 7, 14, rsp+76*mmsize ; t7a
+ SCRATCH 4, 10, rsp+72*mmsize
+ SCRATCH 5, 13, rsp+75*mmsize
+ UNSCRATCH 4, 15, rsp+77*mmsize ; t4a
+ UNSCRATCH 5, 11, rsp+73*mmsize ; t5a
+ SCRATCH 0, 14, rsp+76*mmsize
+ SCRATCH 3, 15, rsp+77*mmsize
+ mova m0, [rsp+67*mmsize] ; t0a
+ SUMSUB_BA d, 4, 0, 3 ; m4=t0, m0=t4
+ SUMSUB_BA d, 5, 1, 3 ; m5=t1, m1=t5
+ SUMSUB_BA d, 6, 2, 3 ; m6=t2, m2=t6
+ SCRATCH 4, 9, rsp+71*mmsize
+ mova m3, [rsp+69*mmsize] ; t3a
+ SUMSUB_BA d, 7, 3, 4 ; m7=t3, m3=t7
+
+ mova [rsp+67*mmsize], m5
+ mova [rsp+68*mmsize], m6
+ mova [rsp+69*mmsize], m7
+ SUMSUB_MUL_D 0, 1, 4, 5, 15137, 6270 ; m0/4=t4a, m1/5=t5a
+ SUMSUB_MUL_D 3, 2, 7, 6, 6270, 15137 ; m3/7=t7a, m2/6=t6a
+ SCRATCH 1, 11, rsp+73*mmsize
+ SUMSUB_PACK_D 2, 0, 6, 4, 1 ; m2=-out3, m0=t6
+ NEGD m2 ; m2=out3
+ UNSCRATCH 1, 11, rsp+73*mmsize
+ SUMSUB_PACK_D 3, 1, 7, 5, 4 ; m3=out12, m1=t7
+ SCRATCH 2, 11, rsp+73*mmsize
+ UNSCRATCH 2, 12, rsp+74*mmsize ; t11a
+ SCRATCH 3, 12, rsp+74*mmsize
+
+ UNSCRATCH 3, 8, rsp+70*mmsize ; t10a
+ mova m4, [rsp+65*mmsize] ; t8a
+ mova m5, [rsp+66*mmsize] ; t9a
+ SUMSUB_BA d, 3, 4, 6 ; m3=-out1, m4=t10
+ NEGD m3 ; m3=out1
+ SUMSUB_BA d, 2, 5, 6 ; m2=out14, m5=t11
+ UNSCRATCH 6, 9, rsp+71*mmsize ; t0
+ UNSCRATCH 7, 14, rsp+76*mmsize ; t14a
+ SCRATCH 3, 9, rsp+71*mmsize
+ SCRATCH 2, 14, rsp+76*mmsize
+
+ SUMSUB_MUL 1, 0, 2, 3, 11585, 11585 ; m1=out4, m0=out11
+ mova [rsp+65*mmsize], m0
+ SUMSUB_MUL 5, 4, 2, 3, 11585, 11585 ; m5=out6, m4=out9
+ UNSCRATCH 0, 15, rsp+77*mmsize ; t15a
+ SUMSUB_MUL 7, 0, 2, 3, 11585, m11585 ; m7=out10, m0=out5
+
+ mova m2, [rsp+68*mmsize] ; t2
+ SUMSUB_BA d, 2, 6, 3 ; m2=out0, m6=t2a
+ SCRATCH 2, 8, rsp+70*mmsize
+ mova m2, [rsp+67*mmsize] ; t1
+ mova m3, [rsp+69*mmsize] ; t3
+ mova [rsp+67*mmsize], m7
+ SUMSUB_BA d, 3, 2, 7 ; m3=-out15, m2=t3a
+ NEGD m3 ; m3=out15
+ SCRATCH 3, 15, rsp+77*mmsize
+ SUMSUB_MUL 6, 2, 7, 3, 11585, m11585 ; m6=out8, m2=out7
+ mova m7, [rsp+67*mmsize]
+
+ SWAP 0, 1
+ SWAP 2, 5, 4, 6, 7, 3
+%endmacro
+
+%macro IADST16_FN 7
+cglobal vp9_%1_%4_16x16_add_10, 4, 6 + ARCH_X86_64, 16, \
+ 70 * mmsize + ARCH_X86_32 * 8 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_1023]
+
+.body:
+ mova [rsp+64*mmsize], m0
+ DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
+%if ARCH_X86_64
+ mov dstbakq, dstq
+ movsxd cntq, cntd
+%endif
+%ifdef PIC
+ lea ptrq, [%7_16x16]
+ movzx cntd, byte [ptrq+cntq-1]
+%else
+ movzx cntd, byte [%7_16x16+cntq-1]
+%endif
+ mov skipd, 4
+ sub skipd, cntd
+ mov ptrq, rsp
+.loop_1:
+ %2_1D blockq
+
+ TRANSPOSE4x4D 0, 1, 2, 3, 7
+ mova [ptrq+ 1*mmsize], m0
+ mova [ptrq+ 5*mmsize], m1
+ mova [ptrq+ 9*mmsize], m2
+ mova [ptrq+13*mmsize], m3
+ mova m7, [rsp+65*mmsize]
+ TRANSPOSE4x4D 4, 5, 6, 7, 0
+ mova [ptrq+ 2*mmsize], m4
+ mova [ptrq+ 6*mmsize], m5
+ mova [ptrq+10*mmsize], m6
+ mova [ptrq+14*mmsize], m7
+ UNSCRATCH 0, 8, rsp+(%3+0)*mmsize
+ UNSCRATCH 1, 9, rsp+(%3+1)*mmsize
+ UNSCRATCH 2, 10, rsp+(%3+2)*mmsize
+ UNSCRATCH 3, 11, rsp+(%3+3)*mmsize
+ TRANSPOSE4x4D 0, 1, 2, 3, 7
+ mova [ptrq+ 0*mmsize], m0
+ mova [ptrq+ 4*mmsize], m1
+ mova [ptrq+ 8*mmsize], m2
+ mova [ptrq+12*mmsize], m3
+ UNSCRATCH 4, 12, rsp+(%3+4)*mmsize
+ UNSCRATCH 5, 13, rsp+(%3+5)*mmsize
+ UNSCRATCH 6, 14, rsp+(%3+6)*mmsize
+ UNSCRATCH 7, 15, rsp+(%3+7)*mmsize
+ TRANSPOSE4x4D 4, 5, 6, 7, 0
+ mova [ptrq+ 3*mmsize], m4
+ mova [ptrq+ 7*mmsize], m5
+ mova [ptrq+11*mmsize], m6
+ mova [ptrq+15*mmsize], m7
+ add ptrq, 16 * mmsize
+ add blockq, mmsize
+ dec cntd
+ jg .loop_1
+
+ ; zero-pad the remainder (skipped cols)
+ test skipd, skipd
+ jz .end
+ add skipd, skipd
+ lea blockq, [blockq+skipq*(mmsize/2)]
+ pxor m0, m0
+.loop_z:
+ mova [ptrq+mmsize*0], m0
+ mova [ptrq+mmsize*1], m0
+ mova [ptrq+mmsize*2], m0
+ mova [ptrq+mmsize*3], m0
+ mova [ptrq+mmsize*4], m0
+ mova [ptrq+mmsize*5], m0
+ mova [ptrq+mmsize*6], m0
+ mova [ptrq+mmsize*7], m0
+ add ptrq, 8 * mmsize
+ dec skipd
+ jg .loop_z
+.end:
+
+ DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
+ lea stride3q, [strideq*3]
+ mov cntd, 4
+ mov ptrq, rsp
+.loop_2:
+ %5_1D ptrq
+
+ pxor m7, m7
+ lea dstq, [dstq+strideq*4]
+ ROUND_AND_STORE_4x4 0, 1, 2, 3, m7, [rsp+64*mmsize], [pd_32], 6
+ lea dstq, [dstq+strideq*4]
+ mova m0, [rsp+65*mmsize]
+ mova m1, [rsp+64*mmsize]
+ mova m2, [pd_32]
+ ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, m1, m2, 6
+
+%if ARCH_X86_64
+ DEFINE_ARGS dstbak, stride, block, cnt, ptr, stride3, dst
+%else
+ mov dstq, dstm
+%endif
+ UNSCRATCH 0, 8, rsp+(%6+0)*mmsize
+ UNSCRATCH 4, 9, rsp+(%6+1)*mmsize
+ UNSCRATCH 5, 10, rsp+(%6+2)*mmsize
+ UNSCRATCH 3, 11, rsp+(%6+3)*mmsize
+ ROUND_AND_STORE_4x4 0, 4, 5, 3, m7, m1, m2, 6
+%if ARCH_X86_64
+ DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
+ lea dstq, [dstbakq+stride3q*4]
+%else
+ lea dstq, [dstq+stride3q*4]
+%endif
+ UNSCRATCH 4, 12, rsp+(%6+4)*mmsize
+ UNSCRATCH 5, 13, rsp+(%6+5)*mmsize
+ UNSCRATCH 6, 14, rsp+(%6+6)*mmsize
+ UNSCRATCH 0, 15, rsp+(%6+7)*mmsize
+ ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, m1, m2, 6
+
+ add ptrq, mmsize
+%if ARCH_X86_64
+ add dstbakq, 8
+ mov dstq, dstbakq
+%else
+ add dword dstm, 8
+ mov dstq, dstm
+%endif
+ dec cntd
+ jg .loop_2
+
+ ; m7 is still zero
+ ZERO_BLOCK blockq-4*mmsize, 64, 16, m7
+ RET
+
+cglobal vp9_%1_%4_16x16_add_12, 4, 6 + ARCH_X86_64, 16, \
+ 70 * mmsize + ARCH_X86_32 * 8 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_%1_%4_16x16_add_10 %+ SUFFIX).body
+%endmacro
+
+INIT_XMM sse2
+IADST16_FN idct, IDCT16, 67, iadst, IADST16, 70, row
+IADST16_FN iadst, IADST16, 70, idct, IDCT16, 67, col
+IADST16_FN iadst, IADST16, 70, iadst, IADST16, 70, default
+
+%macro IDCT32_1D 2-3 8 * mmsize; pass[1/2], src, src_stride
+ IDCT16_1D %2, 2 * %3, 272, 257
+%if ARCH_X86_64
+ mova [rsp+257*mmsize], m8
+ mova [rsp+258*mmsize], m9
+ mova [rsp+259*mmsize], m10
+ mova [rsp+260*mmsize], m11
+ mova [rsp+261*mmsize], m12
+ mova [rsp+262*mmsize], m13
+ mova [rsp+263*mmsize], m14
+ mova [rsp+264*mmsize], m15
+%endif
+ mova [rsp+265*mmsize], m0
+ mova [rsp+266*mmsize], m1
+ mova [rsp+267*mmsize], m2
+ mova [rsp+268*mmsize], m3
+ mova [rsp+269*mmsize], m4
+ mova [rsp+270*mmsize], m5
+ mova [rsp+271*mmsize], m6
+
+ ; r257-260: t0-3
+ ; r265-272: t4/5a/6a/7/8/9a/10/11a
+ ; r261-264: t12a/13/14a/15
+ ; r273-274 is free as scratch space, and 275-282 mirrors m8-15 on 32bit
+
+ mova m0, [%2+ 1*%3] ; in1
+ mova m1, [%2+15*%3] ; in15
+ mova m2, [%2+17*%3] ; in17
+ mova m3, [%2+31*%3] ; in31
+ SUMSUB_MUL 0, 3, 4, 5, 16364, 804 ; m0=t31a, m3=t16a
+ SUMSUB_MUL 2, 1, 4, 5, 11003, 12140 ; m2=t30a, m1=t17a
+ SUMSUB_BA d, 1, 3, 4 ; m1=t16, m3=t17
+ SUMSUB_BA d, 2, 0, 4 ; m2=t31, m0=t30
+ SUMSUB_MUL 0, 3, 4, 5, 16069, 3196 ; m0=t30a, m3=t17a
+ SCRATCH 0, 8, rsp+275*mmsize
+ SCRATCH 2, 9, rsp+276*mmsize
+
+ ; end of stage 1-3 first quart
+
+ mova m0, [%2+ 7*%3] ; in7
+ mova m2, [%2+ 9*%3] ; in9
+ mova m4, [%2+23*%3] ; in23
+ mova m5, [%2+25*%3] ; in25
+ SUMSUB_MUL 2, 4, 6, 7, 14811, 7005 ; m2=t29a, m4=t18a
+ SUMSUB_MUL 5, 0, 6, 7, 5520, 15426 ; m5=t28a, m0=t19a
+ SUMSUB_BA d, 4, 0, 6 ; m4=t19, m0=t18
+ SUMSUB_BA d, 2, 5, 6 ; m2=t28, m5=t29
+ SUMSUB_MUL 5, 0, 6, 7, 3196, m16069 ; m5=t29a, m0=t18a
+
+ ; end of stage 1-3 second quart
+
+ SUMSUB_BA d, 4, 1, 6 ; m4=t16a, m1=t19a
+ SUMSUB_BA d, 0, 3, 6 ; m0=t17, m3=t18
+ UNSCRATCH 6, 8, rsp+275*mmsize ; t30a
+ UNSCRATCH 7, 9, rsp+276*mmsize ; t31
+ mova [rsp+273*mmsize], m4
+ mova [rsp+274*mmsize], m0
+ SUMSUB_BA d, 2, 7, 0 ; m2=t31a, m7=t28a
+ SUMSUB_BA d, 5, 6, 0 ; m5=t30, m6=t29
+ SUMSUB_MUL 6, 3, 0, 4, 15137, 6270 ; m6=t29a, m3=t18a
+ SUMSUB_MUL 7, 1, 0, 4, 15137, 6270 ; m7=t28, m1=t19
+ SCRATCH 3, 10, rsp+277*mmsize
+ SCRATCH 1, 11, rsp+278*mmsize
+ SCRATCH 7, 12, rsp+279*mmsize
+ SCRATCH 6, 13, rsp+280*mmsize
+ SCRATCH 5, 14, rsp+281*mmsize
+ SCRATCH 2, 15, rsp+282*mmsize
+
+ ; end of stage 4-5 first half
+
+ mova m0, [%2+ 5*%3] ; in5
+ mova m1, [%2+11*%3] ; in11
+ mova m2, [%2+21*%3] ; in21
+ mova m3, [%2+27*%3] ; in27
+ SUMSUB_MUL 0, 3, 4, 5, 15893, 3981 ; m0=t27a, m3=t20a
+ SUMSUB_MUL 2, 1, 4, 5, 8423, 14053 ; m2=t26a, m1=t21a
+ SUMSUB_BA d, 1, 3, 4 ; m1=t20, m3=t21
+ SUMSUB_BA d, 2, 0, 4 ; m2=t27, m0=t26
+ SUMSUB_MUL 0, 3, 4, 5, 9102, 13623 ; m0=t26a, m3=t21a
+ SCRATCH 0, 8, rsp+275*mmsize
+ SCRATCH 2, 9, rsp+276*mmsize
+
+ ; end of stage 1-3 third quart
+
+ mova m0, [%2+ 3*%3] ; in3
+ mova m2, [%2+13*%3] ; in13
+ mova m4, [%2+19*%3] ; in19
+ mova m5, [%2+29*%3] ; in29
+ SUMSUB_MUL 2, 4, 6, 7, 13160, 9760 ; m2=t25a, m4=t22a
+ SUMSUB_MUL 5, 0, 6, 7, 2404, 16207 ; m5=t24a, m0=t23a
+ SUMSUB_BA d, 4, 0, 6 ; m4=t23, m0=t22
+ SUMSUB_BA d, 2, 5, 6 ; m2=t24, m5=t25
+ SUMSUB_MUL 5, 0, 6, 7, 13623, m9102 ; m5=t25a, m0=t22a
+
+ ; end of stage 1-3 fourth quart
+
+ SUMSUB_BA d, 1, 4, 6 ; m1=t23a, m4=t20a
+ SUMSUB_BA d, 3, 0, 6 ; m3=t22, m0=t21
+ UNSCRATCH 6, 8, rsp+275*mmsize ; t26a
+ UNSCRATCH 7, 9, rsp+276*mmsize ; t27
+ SCRATCH 3, 8, rsp+275*mmsize
+ SCRATCH 1, 9, rsp+276*mmsize
+ SUMSUB_BA d, 7, 2, 1 ; m7=t24a, m2=t27a
+ SUMSUB_BA d, 6, 5, 1 ; m6=t25, m5=t26
+ SUMSUB_MUL 2, 4, 1, 3, 6270, m15137 ; m2=t27, m4=t20
+ SUMSUB_MUL 5, 0, 1, 3, 6270, m15137 ; m5=t26a, m0=t21a
+
+ ; end of stage 4-5 second half
+
+ UNSCRATCH 1, 12, rsp+279*mmsize ; t28
+ UNSCRATCH 3, 13, rsp+280*mmsize ; t29a
+ SCRATCH 4, 12, rsp+279*mmsize
+ SCRATCH 0, 13, rsp+280*mmsize
+ SUMSUB_BA d, 5, 3, 0 ; m5=t29, m3=t26
+ SUMSUB_BA d, 2, 1, 0 ; m2=t28a, m1=t27a
+ UNSCRATCH 0, 14, rsp+281*mmsize ; t30
+ UNSCRATCH 4, 15, rsp+282*mmsize ; t31a
+ SCRATCH 2, 14, rsp+281*mmsize
+ SCRATCH 5, 15, rsp+282*mmsize
+ SUMSUB_BA d, 6, 0, 2 ; m6=t30a, m0=t25a
+ SUMSUB_BA d, 7, 4, 2 ; m7=t31, m4=t24
+
+ mova m2, [rsp+273*mmsize] ; t16a
+ mova m5, [rsp+274*mmsize] ; t17
+ mova [rsp+273*mmsize], m6
+ mova [rsp+274*mmsize], m7
+ UNSCRATCH 6, 10, rsp+277*mmsize ; t18a
+ UNSCRATCH 7, 11, rsp+278*mmsize ; t19
+ SCRATCH 4, 10, rsp+277*mmsize
+ SCRATCH 0, 11, rsp+278*mmsize
+ UNSCRATCH 4, 12, rsp+279*mmsize ; t20
+ UNSCRATCH 0, 13, rsp+280*mmsize ; t21a
+ SCRATCH 3, 12, rsp+279*mmsize
+ SCRATCH 1, 13, rsp+280*mmsize
+ SUMSUB_BA d, 0, 6, 1 ; m0=t18, m6=t21
+ SUMSUB_BA d, 4, 7, 1 ; m4=t19a, m7=t20a
+ UNSCRATCH 3, 8, rsp+275*mmsize ; t22
+ UNSCRATCH 1, 9, rsp+276*mmsize ; t23a
+ SCRATCH 0, 8, rsp+275*mmsize
+ SCRATCH 4, 9, rsp+276*mmsize
+ SUMSUB_BA d, 3, 5, 0 ; m3=t17a, m5=t22a
+ SUMSUB_BA d, 1, 2, 0 ; m1=t16, m2=t23
+
+ ; end of stage 6
+
+ UNSCRATCH 0, 10, rsp+277*mmsize ; t24
+ UNSCRATCH 4, 11, rsp+278*mmsize ; t25a
+ SCRATCH 1, 10, rsp+277*mmsize
+ SCRATCH 3, 11, rsp+278*mmsize
+ SUMSUB_MUL 0, 2, 1, 3, 11585, 11585 ; m0=t24a, m2=t23a
+ SUMSUB_MUL 4, 5, 1, 3, 11585, 11585 ; m4=t25, m5=t22
+ UNSCRATCH 1, 12, rsp+279*mmsize ; t26
+ UNSCRATCH 3, 13, rsp+280*mmsize ; t27a
+ SCRATCH 0, 12, rsp+279*mmsize
+ SCRATCH 4, 13, rsp+280*mmsize
+ SUMSUB_MUL 3, 7, 0, 4, 11585, 11585 ; m3=t27, m7=t20
+ SUMSUB_MUL 1, 6, 0, 4, 11585, 11585 ; m1=t26a, m6=t21a
+
+ ; end of stage 7
+
+ mova m0, [rsp+269*mmsize] ; t8
+ mova m4, [rsp+270*mmsize] ; t9a
+ mova [rsp+269*mmsize], m1 ; t26a
+ mova [rsp+270*mmsize], m3 ; t27
+ mova m3, [rsp+271*mmsize] ; t10
+ SUMSUB_BA d, 2, 0, 1 ; m2=out8, m0=out23
+ SUMSUB_BA d, 5, 4, 1 ; m5=out9, m4=out22
+ SUMSUB_BA d, 6, 3, 1 ; m6=out10, m3=out21
+ mova m1, [rsp+272*mmsize] ; t11a
+ mova [rsp+271*mmsize], m0
+ SUMSUB_BA d, 7, 1, 0 ; m7=out11, m1=out20
+
+%if %1 == 1
+ TRANSPOSE4x4D 2, 5, 6, 7, 0
+ mova [ptrq+ 2*mmsize], m2
+ mova [ptrq+10*mmsize], m5
+ mova [ptrq+18*mmsize], m6
+ mova [ptrq+26*mmsize], m7
+%else ; %1 == 2
+ pxor m0, m0
+ lea dstq, [dstq+strideq*8]
+ ROUND_AND_STORE_4x4 2, 5, 6, 7, m0, [rsp+256*mmsize], [pd_32], 6
+%endif
+ mova m2, [rsp+271*mmsize]
+%if %1 == 1
+ TRANSPOSE4x4D 1, 3, 4, 2, 0
+ mova [ptrq+ 5*mmsize], m1
+ mova [ptrq+13*mmsize], m3
+ mova [ptrq+21*mmsize], m4
+ mova [ptrq+29*mmsize], m2
+%else ; %1 == 2
+ lea dstq, [dstq+stride3q*4]
+ ROUND_AND_STORE_4x4 1, 3, 4, 2, m0, [rsp+256*mmsize], [pd_32], 6
+%endif
+
+ ; end of last stage + store for out8-11 and out20-23
+
+ UNSCRATCH 0, 9, rsp+276*mmsize ; t19a
+ UNSCRATCH 1, 8, rsp+275*mmsize ; t18
+ UNSCRATCH 2, 11, rsp+278*mmsize ; t17a
+ UNSCRATCH 3, 10, rsp+277*mmsize ; t16
+ mova m7, [rsp+261*mmsize] ; t12a
+ mova m6, [rsp+262*mmsize] ; t13
+ mova m5, [rsp+263*mmsize] ; t14a
+ SUMSUB_BA d, 0, 7, 4 ; m0=out12, m7=out19
+ SUMSUB_BA d, 1, 6, 4 ; m1=out13, m6=out18
+ SUMSUB_BA d, 2, 5, 4 ; m2=out14, m5=out17
+ mova m4, [rsp+264*mmsize] ; t15
+ SCRATCH 7, 8, rsp+275*mmsize
+ SUMSUB_BA d, 3, 4, 7 ; m3=out15, m4=out16
+
+%if %1 == 1
+ TRANSPOSE4x4D 0, 1, 2, 3, 7
+ mova [ptrq+ 3*mmsize], m0
+ mova [ptrq+11*mmsize], m1
+ mova [ptrq+19*mmsize], m2
+ mova [ptrq+27*mmsize], m3
+%else ; %1 == 2
+%if ARCH_X86_64
+ SWAP 7, 9
+ lea dstq, [dstbakq+stride3q*4]
+%else ; x86-32
+ pxor m7, m7
+ mov dstq, dstm
+ lea dstq, [dstq+stride3q*4]
+%endif
+ ROUND_AND_STORE_4x4 0, 1, 2, 3, m7, [rsp+256*mmsize], [pd_32], 6
+%endif
+ UNSCRATCH 0, 8, rsp+275*mmsize ; out19
+%if %1 == 1
+ TRANSPOSE4x4D 4, 5, 6, 0, 7
+ mova [ptrq+ 4*mmsize], m4
+ mova [ptrq+12*mmsize], m5
+ mova [ptrq+20*mmsize], m6
+ mova [ptrq+28*mmsize], m0
+%else ; %1 == 2
+ lea dstq, [dstq+strideq*4]
+ ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, [rsp+256*mmsize], [pd_32], 6
+%endif
+
+ ; end of last stage + store for out12-19
+
+%if ARCH_X86_64
+ SWAP 7, 8
+%endif
+ mova m7, [rsp+257*mmsize] ; t0
+ mova m6, [rsp+258*mmsize] ; t1
+ mova m5, [rsp+259*mmsize] ; t2
+ mova m4, [rsp+260*mmsize] ; t3
+ mova m0, [rsp+274*mmsize] ; t31
+ mova m1, [rsp+273*mmsize] ; t30a
+ UNSCRATCH 2, 15, rsp+282*mmsize ; t29
+ SUMSUB_BA d, 0, 7, 3 ; m0=out0, m7=out31
+ SUMSUB_BA d, 1, 6, 3 ; m1=out1, m6=out30
+ SUMSUB_BA d, 2, 5, 3 ; m2=out2, m5=out29
+ SCRATCH 0, 9, rsp+276*mmsize
+ UNSCRATCH 3, 14, rsp+281*mmsize ; t28a
+ SUMSUB_BA d, 3, 4, 0 ; m3=out3, m4=out28
+
+%if %1 == 1
+ TRANSPOSE4x4D 4, 5, 6, 7, 0
+ mova [ptrq+ 7*mmsize], m4
+ mova [ptrq+15*mmsize], m5
+ mova [ptrq+23*mmsize], m6
+ mova [ptrq+31*mmsize], m7
+%else ; %1 == 2
+%if ARCH_X86_64
+ SWAP 0, 8
+%else ; x86-32
+ pxor m0, m0
+%endif
+ lea dstq, [dstq+stride3q*4]
+ ROUND_AND_STORE_4x4 4, 5, 6, 7, m0, [rsp+256*mmsize], [pd_32], 6
+%endif
+ UNSCRATCH 7, 9, rsp+276*mmsize ; out0
+%if %1 == 1
+ TRANSPOSE4x4D 7, 1, 2, 3, 0
+ mova [ptrq+ 0*mmsize], m7
+ mova [ptrq+ 8*mmsize], m1
+ mova [ptrq+16*mmsize], m2
+ mova [ptrq+24*mmsize], m3
+%else ; %1 == 2
+%if ARCH_X86_64
+ DEFINE_ARGS dstbak, stride, block, cnt, ptr, stride3, dst
+%else ; x86-32
+ mov dstq, dstm
+%endif
+ ROUND_AND_STORE_4x4 7, 1, 2, 3, m0, [rsp+256*mmsize], [pd_32], 6
+%if ARCH_X86_64
+ DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
+%endif
+%endif
+
+ ; end of last stage + store for out0-3 and out28-31
+
+%if ARCH_X86_64
+ SWAP 0, 8
+%endif
+ mova m7, [rsp+265*mmsize] ; t4
+ mova m6, [rsp+266*mmsize] ; t5a
+ mova m5, [rsp+267*mmsize] ; t6a
+ mova m4, [rsp+268*mmsize] ; t7
+ mova m0, [rsp+270*mmsize] ; t27
+ mova m1, [rsp+269*mmsize] ; t26a
+ UNSCRATCH 2, 13, rsp+280*mmsize ; t25
+ SUMSUB_BA d, 0, 7, 3 ; m0=out4, m7=out27
+ SUMSUB_BA d, 1, 6, 3 ; m1=out5, m6=out26
+ SUMSUB_BA d, 2, 5, 3 ; m2=out6, m5=out25
+ UNSCRATCH 3, 12, rsp+279*mmsize ; t24a
+ SCRATCH 7, 9, rsp+276*mmsize
+ SUMSUB_BA d, 3, 4, 7 ; m3=out7, m4=out24
+
+%if %1 == 1
+ TRANSPOSE4x4D 0, 1, 2, 3, 7
+ mova [ptrq+ 1*mmsize], m0
+ mova [ptrq+ 9*mmsize], m1
+ mova [ptrq+17*mmsize], m2
+ mova [ptrq+25*mmsize], m3
+%else ; %1 == 2
+%if ARCH_X86_64
+ SWAP 7, 8
+ lea dstq, [dstbakq+strideq*4]
+%else ; x86-32
+ pxor m7, m7
+ lea dstq, [dstq+strideq*4]
+%endif
+ ROUND_AND_STORE_4x4 0, 1, 2, 3, m7, [rsp+256*mmsize], [pd_32], 6
+%endif
+ UNSCRATCH 0, 9, rsp+276*mmsize ; out27
+%if %1 == 1
+ TRANSPOSE4x4D 4, 5, 6, 0, 7
+ mova [ptrq+ 6*mmsize], m4
+ mova [ptrq+14*mmsize], m5
+ mova [ptrq+22*mmsize], m6
+ mova [ptrq+30*mmsize], m0
+%else ; %1 == 2
+%if ARCH_X86_64
+ lea dstq, [dstbakq+stride3q*8]
+%else
+ mov dstq, dstm
+ lea dstq, [dstq+stride3q*8]
+%endif
+ ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, [rsp+256*mmsize], [pd_32], 6
+%endif
+
+ ; end of last stage + store for out4-7 and out24-27
+%endmacro
+
+INIT_XMM sse2
+cglobal vp9_idct_idct_32x32_add_10, 4, 6 + ARCH_X86_64, 16, \
+ 275 * mmsize + ARCH_X86_32 * 8 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_1023]
+ cmp eobd, 1
+ jg .idctfull
+
+ ; dc-only - the 10bit version can be done entirely in 32bit, since the max
+ ; coef values are 17+sign bit, and the coef is 14bit, so 31+sign easily
+ ; fits in 32bit
+ DEFINE_ARGS dst, stride, block, coef
+ pxor m2, m2
+ DC_ONLY 6, m2
+ movd m1, coefd
+ pshuflw m1, m1, q0000
+ punpcklqdq m1, m1
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 32
+.loop_dc:
+ STORE_2x8 3, 4, 1, m2, m0, dstq, mmsize
+ STORE_2x8 3, 4, 1, m2, m0, dstq+mmsize*2, mmsize
+ add dstq, strideq
+ dec cntd
+ jg .loop_dc
+ RET
+
+.idctfull:
+ mova [rsp+256*mmsize], m0
+ DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
+%if ARCH_X86_64
+ mov dstbakq, dstq
+ movsxd cntq, cntd
+%endif
+%ifdef PIC
+ lea ptrq, [default_32x32]
+ movzx cntd, byte [ptrq+cntq-1]
+%else
+ movzx cntd, byte [default_32x32+cntq-1]
+%endif
+ mov skipd, 8
+ sub skipd, cntd
+ mov ptrq, rsp
+.loop_1:
+ IDCT32_1D 1, blockq
+
+ add ptrq, 32 * mmsize
+ add blockq, mmsize
+ dec cntd
+ jg .loop_1
+
+ ; zero-pad the remainder (skipped cols)
+ test skipd, skipd
+ jz .end
+ shl skipd, 2
+ lea blockq, [blockq+skipq*(mmsize/4)]
+ pxor m0, m0
+.loop_z:
+ mova [ptrq+mmsize*0], m0
+ mova [ptrq+mmsize*1], m0
+ mova [ptrq+mmsize*2], m0
+ mova [ptrq+mmsize*3], m0
+ mova [ptrq+mmsize*4], m0
+ mova [ptrq+mmsize*5], m0
+ mova [ptrq+mmsize*6], m0
+ mova [ptrq+mmsize*7], m0
+ add ptrq, 8 * mmsize
+ dec skipd
+ jg .loop_z
+.end:
+
+ DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
+ lea stride3q, [strideq*3]
+ mov cntd, 8
+ mov ptrq, rsp
+.loop_2:
+ IDCT32_1D 2, ptrq
+
+ add ptrq, mmsize
+%if ARCH_X86_64
+ add dstbakq, 8
+ mov dstq, dstbakq
+%else
+ add dword dstm, 8
+ mov dstq, dstm
+%endif
+ dec cntd
+ jg .loop_2
+
+ ; m7 is still zero
+ ZERO_BLOCK blockq-8*mmsize, 128, 32, m7
+ RET
+
+INIT_XMM sse2
+cglobal vp9_idct_idct_32x32_add_12, 4, 6 + ARCH_X86_64, 16, \
+ 275 * mmsize + ARCH_X86_32 * 8 * mmsize, \
+ dst, stride, block, eob
+ mova m0, [pw_4095]
+ cmp eobd, 1
+ jg mangle(private_prefix %+ _ %+ vp9_idct_idct_32x32_add_10 %+ SUFFIX).idctfull
+
+ ; dc-only - unfortunately, this one can overflow, since coefs are 19+sign
+ ; bpp, and 19+14+sign does not fit in 32bit, so we do 2-stage multiplies
+ DEFINE_ARGS dst, stride, block, coef, coefl
+ pxor m2, m2
+ DC_ONLY_64BIT 6, m2
+ movd m1, coefd
+ pshuflw m1, m1, q0000
+ punpcklqdq m1, m1
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 32
+.loop_dc:
+ STORE_2x8 3, 4, 1, m2, m0, dstq, mmsize
+ STORE_2x8 3, 4, 1, m2, m0, dstq+mmsize*2, mmsize
+ add dstq, strideq
+ dec cntd
+ jg .loop_dc
+ RET
diff --git a/media/ffvpx/libavcodec/x86/vp9itxfm_template.asm b/media/ffvpx/libavcodec/x86/vp9itxfm_template.asm
new file mode 100644
index 0000000000..d2f2257d84
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9itxfm_template.asm
@@ -0,0 +1,142 @@
+;******************************************************************************
+;* VP9 IDCT SIMD optimizations
+;*
+;* Copyright (C) 2013 Clément Bœsch <u pkh me>
+;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%macro VP9_IWHT4_1D 0
+ SWAP 1, 2, 3
+ paddw m0, m2
+ psubw m3, m1
+ psubw m4, m0, m3
+ psraw m4, 1
+ psubw m5, m4, m1
+ SWAP 5, 1
+ psubw m4, m2
+ SWAP 4, 2
+ psubw m0, m1
+ paddw m3, m2
+ SWAP 3, 2, 1
+%endmacro
+
+; (a*x + b*y + round) >> shift
+%macro VP9_MULSUB_2W_2X 5 ; dst1, dst2/src, round, coefs1, coefs2
+ pmaddwd m%1, m%2, %4
+ pmaddwd m%2, %5
+ paddd m%1, %3
+ paddd m%2, %3
+ psrad m%1, 14
+ psrad m%2, 14
+%endmacro
+
+%macro VP9_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1/src, tmp2
+ VP9_MULSUB_2W_2X %7, %6, %5, [pw_m%3_%4], [pw_%4_%3]
+ VP9_MULSUB_2W_2X %1, %2, %5, [pw_m%3_%4], [pw_%4_%3]
+ packssdw m%1, m%7
+ packssdw m%2, m%6
+%endmacro
+
+%macro VP9_UNPACK_MULSUB_2W_4X 7-9 ; dst1, dst2, (src1, src2,) coef1, coef2, rnd, tmp1, tmp2
+%if %0 == 7
+ punpckhwd m%6, m%2, m%1
+ punpcklwd m%2, m%1
+ VP9_MULSUB_2W_4X %1, %2, %3, %4, %5, %6, %7
+%else
+ punpckhwd m%8, m%4, m%3
+ punpcklwd m%2, m%4, m%3
+ VP9_MULSUB_2W_4X %1, %2, %5, %6, %7, %8, %9
+%endif
+%endmacro
+
+%macro VP9_IDCT4_1D_FINALIZE 0
+ SUMSUB_BA w, 3, 2, 4 ; m3=t3+t0, m2=-t3+t0
+ SUMSUB_BA w, 1, 0, 4 ; m1=t2+t1, m0=-t2+t1
+ SWAP 0, 3, 2 ; 3102 -> 0123
+%endmacro
+
+%macro VP9_IDCT4_1D 0
+%if cpuflag(ssse3)
+ SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
+ pmulhrsw m2, m6 ; m2=t0
+ pmulhrsw m0, m6 ; m0=t1
+%else ; <= sse2
+ VP9_UNPACK_MULSUB_2W_4X 0, 2, 11585, 11585, m7, 4, 5 ; m0=t1, m1=t0
+%endif
+ VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5 ; m1=t2, m3=t3
+ VP9_IDCT4_1D_FINALIZE
+%endmacro
+
+%macro VP9_IADST4_1D 0
+ movq2dq xmm0, m0
+ movq2dq xmm1, m1
+ movq2dq xmm2, m2
+ movq2dq xmm3, m3
+%if cpuflag(ssse3)
+ paddw m3, m0
+%endif
+ punpcklwd xmm0, xmm1
+ punpcklwd xmm2, xmm3
+ pmaddwd xmm1, xmm0, [pw_5283_13377]
+ pmaddwd xmm4, xmm0, [pw_9929_13377]
+%if notcpuflag(ssse3)
+ pmaddwd xmm6, xmm0, [pw_13377_0]
+%endif
+ pmaddwd xmm0, [pw_15212_m13377]
+ pmaddwd xmm3, xmm2, [pw_15212_9929]
+%if notcpuflag(ssse3)
+ pmaddwd xmm7, xmm2, [pw_m13377_13377]
+%endif
+ pmaddwd xmm2, [pw_m5283_m15212]
+%if cpuflag(ssse3)
+ psubw m3, m2
+%else
+ paddd xmm6, xmm7
+%endif
+ paddd xmm0, xmm2
+ paddd xmm3, xmm5
+ paddd xmm2, xmm5
+%if notcpuflag(ssse3)
+ paddd xmm6, xmm5
+%endif
+ paddd xmm1, xmm3
+ paddd xmm0, xmm3
+ paddd xmm4, xmm2
+ psrad xmm1, 14
+ psrad xmm0, 14
+ psrad xmm4, 14
+%if cpuflag(ssse3)
+ pmulhrsw m3, [pw_13377x2] ; out2
+%else
+ psrad xmm6, 14
+%endif
+ packssdw xmm0, xmm0
+ packssdw xmm1, xmm1
+ packssdw xmm4, xmm4
+%if notcpuflag(ssse3)
+ packssdw xmm6, xmm6
+%endif
+ movdq2q m0, xmm0 ; out3
+ movdq2q m1, xmm1 ; out0
+ movdq2q m2, xmm4 ; out1
+%if notcpuflag(ssse3)
+ movdq2q m3, xmm6 ; out2
+%endif
+ SWAP 0, 1, 2, 3
+%endmacro
diff --git a/media/ffvpx/libavcodec/x86/vp9lpf.asm b/media/ffvpx/libavcodec/x86/vp9lpf.asm
new file mode 100644
index 0000000000..4e7ede2235
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9lpf.asm
@@ -0,0 +1,1211 @@
+;******************************************************************************
+;* VP9 loop filter SIMD optimizations
+;*
+;* Copyright (C) 2013-2014 Clément Bœsch <u pkh me>
+;* Copyright (C) 2014 Ronald S. Bultje <rsbultje@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+cextern pb_3
+cextern pb_80
+
+pb_4: times 16 db 0x04
+pb_10: times 16 db 0x10
+pb_40: times 16 db 0x40
+pb_81: times 16 db 0x81
+pb_f8: times 16 db 0xf8
+pb_fe: times 16 db 0xfe
+pb_ff: times 16 db 0xff
+
+cextern pw_4
+cextern pw_8
+
+; with mix functions, two 8-bit thresholds are stored in a 16-bit storage,
+; the following mask is used to splat both in the same register
+mask_mix: times 8 db 0
+ times 8 db 1
+
+mask_mix84: times 8 db 0xff
+ times 8 db 0x00
+mask_mix48: times 8 db 0x00
+ times 8 db 0xff
+
+SECTION .text
+
+%macro SCRATCH 3
+%ifdef m8
+ SWAP %1, %2
+%else
+ mova [%3], m%1
+%endif
+%endmacro
+
+%macro UNSCRATCH 3
+%ifdef m8
+ SWAP %1, %2
+%else
+ mova m%1, [%3]
+%endif
+%endmacro
+
+; %1 = abs(%2-%3)
+%macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp
+%ifdef m8
+ psubusb %1, %3, %2
+ psubusb %4, %2, %3
+%else
+ mova %1, %3
+ mova %4, %2
+ psubusb %1, %2
+ psubusb %4, %3
+%endif
+ por %1, %4
+%endmacro
+
+; %1 = %1>%2
+%macro CMP_GT 2-3 ; src/dst, cmp, pb_80
+%if %0 == 3
+ pxor %1, %3
+%endif
+ pcmpgtb %1, %2
+%endmacro
+
+; %1 = abs(%2-%3) > %4
+%macro ABSSUB_GT 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80]
+ ABSSUB %1, %2, %3, %5 ; dst = abs(src1-src2)
+ CMP_GT %1, %4, %6 ; dst > cmp
+%endmacro
+
+%macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp
+ pand %1, %3 ; new &= mask
+ pandn %4, %3, %2 ; tmp = ~mask & old
+ por %1, %4 ; new&mask | old&~mask
+%endmacro
+
+%macro UNPACK 4
+%ifdef m8
+ punpck%1bw %2, %3, %4
+%else
+ mova %2, %3
+ punpck%1bw %2, %4
+%endif
+%endmacro
+
+%macro FILTER_SUBx2_ADDx2 11 ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1
+ ; %8=add2 %9=rshift, [unpack], [unpack_is_mem_on_x86_32]
+ psubw %3, [rsp+%4+%5*mmsize*2]
+ psubw %3, [rsp+%4+%6*mmsize*2]
+ paddw %3, [rsp+%4+%7*mmsize*2]
+%ifnidn %10, ""
+%if %11 == 0
+ punpck%2bw %1, %10, m0
+%else
+ UNPACK %2, %1, %10, m0
+%endif
+ mova [rsp+%4+%8*mmsize*2], %1
+ paddw %3, %1
+%else
+ paddw %3, [rsp+%4+%8*mmsize*2]
+%endif
+ psraw %1, %3, %9
+%endmacro
+
+; FIXME interleave l/h better (for instruction pairing)
+%macro FILTER_INIT 9 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, filterid, mask, source
+ FILTER%7_INIT %1, l, %3, %6 + 0
+ FILTER%7_INIT %2, h, %4, %6 + mmsize
+ packuswb %1, %2
+ MASK_APPLY %1, %9, %8, %2
+ mova %5, %1
+%endmacro
+
+
+%macro FILTER_UPDATE 12-16 "", "", "", 0 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift,
+ ; mask, [source], [unpack + src], [unpack_is_mem_on_x86_32]
+; FIXME interleave this properly with the subx2/addx2
+%ifnidn %15, ""
+%if %16 == 0 || ARCH_X86_64
+ mova %14, %15
+%endif
+%endif
+ FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16
+ FILTER_SUBx2_ADDx2 %2, h, %4, %6 + mmsize, %7, %8, %9, %10, %11, %14, %16
+ packuswb %1, %2
+%ifnidn %13, ""
+ MASK_APPLY %1, %13, %12, %2
+%else
+ MASK_APPLY %1, %5, %12, %2
+%endif
+ mova %5, %1
+%endmacro
+
+%macro SRSHIFT3B_2X 4 ; reg1, reg2, [pb_10], tmp
+ mova %4, [pb_f8]
+ pand %1, %4
+ pand %2, %4
+ psrlq %1, 3
+ psrlq %2, 3
+ pxor %1, %3
+ pxor %2, %3
+ psubb %1, %3
+ psubb %2, %3
+%endmacro
+
+%macro EXTRACT_POS_NEG 3 ; i8, neg, pos
+ pxor %3, %3
+ pxor %2, %2
+ pcmpgtb %3, %1 ; i8 < 0 mask
+ psubb %2, %1 ; neg values (only the originally - will be kept)
+ pand %2, %3 ; negative values of i8 (but stored as +)
+ pandn %3, %1 ; positive values of i8
+%endmacro
+
+; clip_u8(u8 + i8)
+%macro SIGN_ADD 4 ; dst, u8, i8, tmp1
+ EXTRACT_POS_NEG %3, %4, %1
+ paddusb %1, %2 ; add the positives
+ psubusb %1, %4 ; sub the negatives
+%endmacro
+
+; clip_u8(u8 - i8)
+%macro SIGN_SUB 4 ; dst, u8, i8, tmp1
+ EXTRACT_POS_NEG %3, %1, %4
+ paddusb %1, %2 ; add the negatives
+ psubusb %1, %4 ; sub the positives
+%endmacro
+
+%macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off
+ UNPACK %2, %1, rp3, m0 ; p3: B->W
+ mova [rsp+%4+0*mmsize*2], %1
+ paddw %3, %1, %1 ; p3*2
+ paddw %3, %1 ; p3*3
+ punpck%2bw %1, m1, m0 ; p2: B->W
+ mova [rsp+%4+1*mmsize*2], %1
+ paddw %3, %1 ; p3*3 + p2
+ paddw %3, %1 ; p3*3 + p2*2
+ UNPACK %2, %1, rp1, m0 ; p1: B->W
+ mova [rsp+%4+2*mmsize*2], %1
+ paddw %3, %1 ; p3*3 + p2*2 + p1
+ UNPACK %2, %1, rp0, m0 ; p0: B->W
+ mova [rsp+%4+3*mmsize*2], %1
+ paddw %3, %1 ; p3*3 + p2*2 + p1 + p0
+ UNPACK %2, %1, rq0, m0 ; q0: B->W
+ mova [rsp+%4+4*mmsize*2], %1
+ paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0
+ paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4
+ psraw %1, %3, 3 ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3
+%endmacro
+
+%macro FILTER14_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off
+ punpck%2bw %1, m2, m0 ; p7: B->W
+ mova [rsp+%4+ 8*mmsize*2], %1
+ psllw %3, %1, 3 ; p7*8
+ psubw %3, %1 ; p7*7
+ punpck%2bw %1, m3, m0 ; p6: B->W
+ mova [rsp+%4+ 9*mmsize*2], %1
+ paddw %3, %1 ; p7*7 + p6
+ paddw %3, %1 ; p7*7 + p6*2
+ UNPACK %2, %1, rp5, m0 ; p5: B->W
+ mova [rsp+%4+10*mmsize*2], %1
+ paddw %3, %1 ; p7*7 + p6*2 + p5
+ UNPACK %2, %1, rp4, m0 ; p4: B->W
+ mova [rsp+%4+11*mmsize*2], %1
+ paddw %3, %1 ; p7*7 + p6*2 + p5 + p4
+ paddw %3, [rsp+%4+ 0*mmsize*2] ; p7*7 + p6*2 + p5 + p4 + p3
+ paddw %3, [rsp+%4+ 1*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p2
+ paddw %3, [rsp+%4+ 2*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p1
+ paddw %3, [rsp+%4+ 3*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p0
+ paddw %3, [rsp+%4+ 4*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p0 + q0
+ paddw %3, [pw_8] ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8
+ psraw %1, %3, 4 ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4
+%endmacro
+
+%macro TRANSPOSE16x16B 17
+ mova %17, m%16
+ SBUTTERFLY bw, %1, %2, %16
+ SBUTTERFLY bw, %3, %4, %16
+ SBUTTERFLY bw, %5, %6, %16
+ SBUTTERFLY bw, %7, %8, %16
+ SBUTTERFLY bw, %9, %10, %16
+ SBUTTERFLY bw, %11, %12, %16
+ SBUTTERFLY bw, %13, %14, %16
+ mova m%16, %17
+ mova %17, m%14
+ SBUTTERFLY bw, %15, %16, %14
+ SBUTTERFLY wd, %1, %3, %14
+ SBUTTERFLY wd, %2, %4, %14
+ SBUTTERFLY wd, %5, %7, %14
+ SBUTTERFLY wd, %6, %8, %14
+ SBUTTERFLY wd, %9, %11, %14
+ SBUTTERFLY wd, %10, %12, %14
+ SBUTTERFLY wd, %13, %15, %14
+ mova m%14, %17
+ mova %17, m%12
+ SBUTTERFLY wd, %14, %16, %12
+ SBUTTERFLY dq, %1, %5, %12
+ SBUTTERFLY dq, %2, %6, %12
+ SBUTTERFLY dq, %3, %7, %12
+ SBUTTERFLY dq, %4, %8, %12
+ SBUTTERFLY dq, %9, %13, %12
+ SBUTTERFLY dq, %10, %14, %12
+ SBUTTERFLY dq, %11, %15, %12
+ mova m%12, %17
+ mova %17, m%8
+ SBUTTERFLY dq, %12, %16, %8
+ SBUTTERFLY qdq, %1, %9, %8
+ SBUTTERFLY qdq, %2, %10, %8
+ SBUTTERFLY qdq, %3, %11, %8
+ SBUTTERFLY qdq, %4, %12, %8
+ SBUTTERFLY qdq, %5, %13, %8
+ SBUTTERFLY qdq, %6, %14, %8
+ SBUTTERFLY qdq, %7, %15, %8
+ mova m%8, %17
+ mova %17, m%1
+ SBUTTERFLY qdq, %8, %16, %1
+ mova m%1, %17
+ SWAP %2, %9
+ SWAP %3, %5
+ SWAP %4, %13
+ SWAP %6, %11
+ SWAP %8, %15
+ SWAP %12, %14
+%endmacro
+
+%macro TRANSPOSE8x8B 13
+ SBUTTERFLY bw, %1, %2, %7
+ movdq%10 m%7, %9
+ movdqa %11, m%2
+ SBUTTERFLY bw, %3, %4, %2
+ SBUTTERFLY bw, %5, %6, %2
+ SBUTTERFLY bw, %7, %8, %2
+ SBUTTERFLY wd, %1, %3, %2
+ movdqa m%2, %11
+ movdqa %11, m%3
+ SBUTTERFLY wd, %2, %4, %3
+ SBUTTERFLY wd, %5, %7, %3
+ SBUTTERFLY wd, %6, %8, %3
+ SBUTTERFLY dq, %1, %5, %3
+ SBUTTERFLY dq, %2, %6, %3
+ movdqa m%3, %11
+ movh %12, m%2
+ movhps %13, m%2
+ SBUTTERFLY dq, %3, %7, %2
+ SBUTTERFLY dq, %4, %8, %2
+ SWAP %2, %5
+ SWAP %4, %7
+%endmacro
+
+%macro DEFINE_REAL_P7_TO_Q7 0-1 0
+%define P7 dstq + 4*mstrideq + %1
+%define P6 dstq + mstride3q + %1
+%define P5 dstq + 2*mstrideq + %1
+%define P4 dstq + mstrideq + %1
+%define P3 dstq + %1
+%define P2 dstq + strideq + %1
+%define P1 dstq + 2* strideq + %1
+%define P0 dstq + stride3q + %1
+%define Q0 dstq + 4* strideq + %1
+%define Q1 dst2q + mstride3q + %1
+%define Q2 dst2q + 2*mstrideq + %1
+%define Q3 dst2q + mstrideq + %1
+%define Q4 dst2q + %1
+%define Q5 dst2q + strideq + %1
+%define Q6 dst2q + 2* strideq + %1
+%define Q7 dst2q + stride3q + %1
+%endmacro
+
+%macro DEFINE_TRANSPOSED_P7_TO_Q7 0-1 0
+%define P3 rsp + 0*mmsize + %1
+%define P2 rsp + 1*mmsize + %1
+%define P1 rsp + 2*mmsize + %1
+%define P0 rsp + 3*mmsize + %1
+%define Q0 rsp + 4*mmsize + %1
+%define Q1 rsp + 5*mmsize + %1
+%define Q2 rsp + 6*mmsize + %1
+%define Q3 rsp + 7*mmsize + %1
+%if mmsize == 16
+%define P7 rsp + 8*mmsize + %1
+%define P6 rsp + 9*mmsize + %1
+%define P5 rsp + 10*mmsize + %1
+%define P4 rsp + 11*mmsize + %1
+%define Q4 rsp + 12*mmsize + %1
+%define Q5 rsp + 13*mmsize + %1
+%define Q6 rsp + 14*mmsize + %1
+%define Q7 rsp + 15*mmsize + %1
+%endif
+%endmacro
+
+; ..............AB -> AAAAAAAABBBBBBBB
+%macro SPLATB_MIX 1-2 [mask_mix]
+%if cpuflag(ssse3)
+ pshufb %1, %2
+%else
+ punpcklbw %1, %1
+ punpcklwd %1, %1
+ punpckldq %1, %1
+%endif
+%endmacro
+
+%macro LOOPFILTER 5 ; %1=v/h %2=size1 %3+%4=stack, %5=mmx/32bit stack only
+%assign %%ext 0
+%if ARCH_X86_32 || mmsize == 8
+%assign %%ext %5
+%endif
+
+%if UNIX64
+cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 5, 9, 16, %3 + %4 + %%ext, dst, stride, E, I, H, mstride, dst2, stride3, mstride3
+%else
+%if WIN64
+cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 4, 8, 16, %3 + %4 + %%ext, dst, stride, E, I, mstride, dst2, stride3, mstride3
+%else
+cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 2, 6, 16, %3 + %4 + %%ext, dst, stride, mstride, dst2, stride3, mstride3
+%define Ed dword r2m
+%define Id dword r3m
+%endif
+%define Hd dword r4m
+%endif
+
+ mov mstrideq, strideq
+ neg mstrideq
+
+ lea stride3q, [strideq*3]
+ lea mstride3q, [mstrideq*3]
+
+%ifidn %1, h
+%if %2 != 16
+%if mmsize == 16
+%define movx movh
+%else
+%define movx mova
+%endif
+ lea dstq, [dstq + 4*strideq - 4]
+%else
+%define movx movu
+ lea dstq, [dstq + 4*strideq - 8] ; go from top center (h pos) to center left (v pos)
+%endif
+%else
+ lea dstq, [dstq + 4*mstrideq]
+%endif
+ ; FIXME we shouldn't need two dts registers if mmsize == 8
+ lea dst2q, [dstq + 8*strideq]
+
+ DEFINE_REAL_P7_TO_Q7
+
+%ifidn %1, h
+ movx m0, [P7]
+ movx m1, [P6]
+ movx m2, [P5]
+ movx m3, [P4]
+ movx m4, [P3]
+ movx m5, [P2]
+%if (ARCH_X86_64 && mmsize == 16) || %2 > 16
+ movx m6, [P1]
+%endif
+ movx m7, [P0]
+%ifdef m8
+ movx m8, [Q0]
+ movx m9, [Q1]
+ movx m10, [Q2]
+ movx m11, [Q3]
+ movx m12, [Q4]
+ movx m13, [Q5]
+ movx m14, [Q6]
+ movx m15, [Q7]
+ DEFINE_TRANSPOSED_P7_TO_Q7
+%if %2 == 16
+ TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp]
+ mova [P7], m0
+ mova [P6], m1
+ mova [P5], m2
+ mova [P4], m3
+%else ; %2 == 44/48/84/88
+ ; 8x16 transpose
+ punpcklbw m0, m1
+ punpcklbw m2, m3
+ punpcklbw m4, m5
+ punpcklbw m6, m7
+ punpcklbw m8, m9
+ punpcklbw m10, m11
+ punpcklbw m12, m13
+ punpcklbw m14, m15
+ TRANSPOSE8x8W 0, 2, 4, 6, 8, 10, 12, 14, 15
+ SWAP 0, 4
+ SWAP 2, 5
+ SWAP 0, 6
+ SWAP 0, 7
+ SWAP 10, 9
+ SWAP 12, 10
+ SWAP 14, 11
+%endif ; %2
+ mova [P3], m4
+ mova [P2], m5
+ mova [P1], m6
+ mova [P0], m7
+ mova [Q0], m8
+ mova [Q1], m9
+ mova [Q2], m10
+ mova [Q3], m11
+%if %2 == 16
+ mova [Q4], m12
+ mova [Q5], m13
+ mova [Q6], m14
+ mova [Q7], m15
+%endif ; %2
+%else ; x86-32
+%if %2 == 16
+ TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [P1], u, [rsp+%3+%4], [rsp+64], [rsp+80]
+ DEFINE_TRANSPOSED_P7_TO_Q7
+ movh [P7], m0
+ movh [P5], m1
+ movh [P3], m2
+ movh [P1], m3
+ movh [Q2], m5
+ movh [Q4], m6
+ movh [Q6], m7
+ movhps [P6], m0
+ movhps [P4], m1
+ movhps [P2], m2
+ movhps [P0], m3
+ movhps [Q3], m5
+ movhps [Q5], m6
+ movhps [Q7], m7
+ DEFINE_REAL_P7_TO_Q7
+ movx m0, [Q0]
+ movx m1, [Q1]
+ movx m2, [Q2]
+ movx m3, [Q3]
+ movx m4, [Q4]
+ movx m5, [Q5]
+ movx m7, [Q7]
+ TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [Q6], u, [rsp+%3+%4], [rsp+72], [rsp+88]
+ DEFINE_TRANSPOSED_P7_TO_Q7 8
+ movh [P7], m0
+ movh [P5], m1
+ movh [P3], m2
+ movh [P1], m3
+ movh [Q2], m5
+ movh [Q4], m6
+ movh [Q6], m7
+ movhps [P6], m0
+ movhps [P4], m1
+ movhps [P2], m2
+ movhps [P0], m3
+ movhps [Q3], m5
+ movhps [Q5], m6
+ movhps [Q7], m7
+ DEFINE_TRANSPOSED_P7_TO_Q7
+%elif %2 > 16 ; %2 == 44/48/84/88
+ punpcklbw m0, m1
+ punpcklbw m2, m3
+ punpcklbw m4, m5
+ punpcklbw m6, m7
+ movx m1, [Q0]
+ movx m3, [Q1]
+ movx m5, [Q2]
+ movx m7, [Q3]
+ punpcklbw m1, m3
+ punpcklbw m5, m7
+ movx m3, [Q4]
+ movx m7, [Q5]
+ punpcklbw m3, m7
+ mova [rsp], m3
+ movx m3, [Q6]
+ movx m7, [Q7]
+ punpcklbw m3, m7
+ DEFINE_TRANSPOSED_P7_TO_Q7
+ TRANSPOSE8x8W 0, 2, 4, 6, 1, 5, 7, 3, [rsp], [Q0], 1
+ mova [P3], m0
+ mova [P2], m2
+ mova [P1], m4
+ mova [P0], m6
+ mova [Q1], m5
+ mova [Q2], m7
+ mova [Q3], m3
+%else ; %2 == 4 || %2 == 8
+ SBUTTERFLY bw, 0, 1, 6
+ SBUTTERFLY bw, 2, 3, 6
+ SBUTTERFLY bw, 4, 5, 6
+ mova [rsp+4*mmsize], m5
+ mova m6, [P1]
+ SBUTTERFLY bw, 6, 7, 5
+ DEFINE_TRANSPOSED_P7_TO_Q7
+ TRANSPOSE4x4W 0, 2, 4, 6, 5
+ mova [P3], m0
+ mova [P2], m2
+ mova [P1], m4
+ mova [P0], m6
+ mova m5, [rsp+4*mmsize]
+ TRANSPOSE4x4W 1, 3, 5, 7, 0
+ mova [Q0], m1
+ mova [Q1], m3
+ mova [Q2], m5
+ mova [Q3], m7
+%endif ; %2
+%endif ; x86-32/64
+%endif ; %1 == h
+
+ ; calc fm mask
+%if %2 == 16 || mmsize == 8
+%if cpuflag(ssse3)
+ pxor m0, m0
+%endif
+ SPLATB_REG m2, I, m0 ; I I I I ...
+ SPLATB_REG m3, E, m0 ; E E E E ...
+%else
+%if cpuflag(ssse3)
+ mova m0, [mask_mix]
+%endif
+ movd m2, Id
+ movd m3, Ed
+ SPLATB_MIX m2, m0
+ SPLATB_MIX m3, m0
+%endif
+ mova m0, [pb_80]
+ pxor m2, m0
+ pxor m3, m0
+%ifdef m8
+%ifidn %1, v
+ mova m8, [P3]
+ mova m9, [P2]
+ mova m10, [P1]
+ mova m11, [P0]
+ mova m12, [Q0]
+ mova m13, [Q1]
+ mova m14, [Q2]
+ mova m15, [Q3]
+%else
+ ; In case of horizontal, P3..Q3 are already present in some registers due
+ ; to the previous transpose, so we just swap registers.
+ SWAP 8, 4, 12
+ SWAP 9, 5, 13
+ SWAP 10, 6, 14
+ SWAP 11, 7, 15
+%endif
+%define rp3 m8
+%define rp2 m9
+%define rp1 m10
+%define rp0 m11
+%define rq0 m12
+%define rq1 m13
+%define rq2 m14
+%define rq3 m15
+%else
+%define rp3 [P3]
+%define rp2 [P2]
+%define rp1 [P1]
+%define rp0 [P0]
+%define rq0 [Q0]
+%define rq1 [Q1]
+%define rq2 [Q2]
+%define rq3 [Q3]
+%endif
+ ABSSUB_GT m5, rp3, rp2, m2, m7, m0 ; m5 = abs(p3-p2) <= I
+ ABSSUB_GT m1, rp2, rp1, m2, m7, m0 ; m1 = abs(p2-p1) <= I
+ por m5, m1
+ ABSSUB_GT m1, rp1, rp0, m2, m7, m0 ; m1 = abs(p1-p0) <= I
+ por m5, m1
+ ABSSUB_GT m1, rq0, rq1, m2, m7, m0 ; m1 = abs(q1-q0) <= I
+ por m5, m1
+ ABSSUB_GT m1, rq1, rq2, m2, m7, m0 ; m1 = abs(q2-q1) <= I
+ por m5, m1
+ ABSSUB_GT m1, rq2, rq3, m2, m7, m0 ; m1 = abs(q3-q2) <= I
+ por m5, m1
+ ABSSUB m1, rp0, rq0, m7 ; abs(p0-q0)
+ paddusb m1, m1 ; abs(p0-q0) * 2
+ ABSSUB m2, rp1, rq1, m7 ; abs(p1-q1)
+ pand m2, [pb_fe] ; drop lsb so shift can work
+ psrlq m2, 1 ; abs(p1-q1)/2
+ paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2
+ pxor m1, m0
+ pcmpgtb m1, m3
+ por m1, m5 ; fm final value
+ SWAP 1, 3
+ pxor m3, [pb_ff]
+
+ ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3)
+ ; calc flat8in (if not 44_16) and hev masks
+%if %2 != 44 && %2 != 4
+ mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80
+ ABSSUB_GT m2, rp3, rp0, m6, m5 ; abs(p3 - p0) <= 1
+%ifdef m8
+ mova m8, [pb_80]
+%define rb80 m8
+%else
+%define rb80 [pb_80]
+%endif
+ ABSSUB_GT m1, rp2, rp0, m6, m5, rb80 ; abs(p2 - p0) <= 1
+ por m2, m1
+ ABSSUB m4, rp1, rp0, m5 ; abs(p1 - p0)
+%if %2 <= 16
+%if cpuflag(ssse3)
+ pxor m0, m0
+%endif
+ SPLATB_REG m7, H, m0 ; H H H H ...
+%else
+ movd m7, Hd
+ SPLATB_MIX m7
+%endif
+ pxor m7, rb80
+ pxor m4, rb80
+ pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition)
+ CMP_GT m4, m6 ; abs(p1 - p0) <= 1
+ por m2, m4 ; (flat8in)
+ ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0)
+ pxor m4, rb80
+ pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition)
+ por m0, m5 ; hev final value
+ CMP_GT m4, m6 ; abs(q1 - q0) <= 1
+ por m2, m4 ; (flat8in)
+ ABSSUB_GT m1, rq2, rq0, m6, m5, rb80 ; abs(q2 - q0) <= 1
+ por m2, m1
+ ABSSUB_GT m1, rq3, rq0, m6, m5, rb80 ; abs(q3 - q0) <= 1
+ por m2, m1 ; flat8in final value
+ pxor m2, [pb_ff]
+%if %2 == 84 || %2 == 48
+ pand m2, [mask_mix%2]
+%endif
+%else
+ mova m6, [pb_80]
+%if %2 == 44
+ movd m7, Hd
+ SPLATB_MIX m7
+%else
+%if cpuflag(ssse3)
+ pxor m0, m0
+%endif
+ SPLATB_REG m7, H, m0 ; H H H H ...
+%endif
+ pxor m7, m6
+ ABSSUB m4, rp1, rp0, m1 ; abs(p1 - p0)
+ pxor m4, m6
+ pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition)
+ ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0)
+ pxor m4, m6
+ pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition)
+ por m0, m5 ; hev final value
+%endif
+
+%if %2 == 16
+ ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3)
+ ; calc flat8out mask
+%ifdef m8
+ mova m8, [P7]
+ mova m9, [P6]
+%define rp7 m8
+%define rp6 m9
+%else
+%define rp7 [P7]
+%define rp6 [P6]
+%endif
+ ABSSUB_GT m1, rp7, rp0, m6, m5 ; abs(p7 - p0) <= 1
+ ABSSUB_GT m7, rp6, rp0, m6, m5 ; abs(p6 - p0) <= 1
+ por m1, m7
+%ifdef m8
+ mova m8, [P5]
+ mova m9, [P4]
+%define rp5 m8
+%define rp4 m9
+%else
+%define rp5 [P5]
+%define rp4 [P4]
+%endif
+ ABSSUB_GT m7, rp5, rp0, m6, m5 ; abs(p5 - p0) <= 1
+ por m1, m7
+ ABSSUB_GT m7, rp4, rp0, m6, m5 ; abs(p4 - p0) <= 1
+ por m1, m7
+%ifdef m8
+ mova m14, [Q4]
+ mova m15, [Q5]
+%define rq4 m14
+%define rq5 m15
+%else
+%define rq4 [Q4]
+%define rq5 [Q5]
+%endif
+ ABSSUB_GT m7, rq4, rq0, m6, m5 ; abs(q4 - q0) <= 1
+ por m1, m7
+ ABSSUB_GT m7, rq5, rq0, m6, m5 ; abs(q5 - q0) <= 1
+ por m1, m7
+%ifdef m8
+ mova m14, [Q6]
+ mova m15, [Q7]
+%define rq6 m14
+%define rq7 m15
+%else
+%define rq6 [Q6]
+%define rq7 [Q7]
+%endif
+ ABSSUB_GT m7, rq6, rq0, m6, m5 ; abs(q4 - q0) <= 1
+ por m1, m7
+ ABSSUB_GT m7, rq7, rq0, m6, m5 ; abs(q5 - q0) <= 1
+ por m1, m7 ; flat8out final value
+ pxor m1, [pb_ff]
+%endif
+
+ ; if (fm) {
+ ; if (out && in) filter_14()
+ ; else if (in) filter_6()
+ ; else if (hev) filter_2()
+ ; else filter_4()
+ ; }
+ ;
+ ; f14: fm & out & in
+ ; f6: fm & ~f14 & in => fm & ~(out & in) & in => fm & ~out & in
+ ; f2: fm & ~f14 & ~f6 & hev => fm & ~(out & in) & ~(~out & in) & hev => fm & ~in & hev
+ ; f4: fm & ~f14 & ~f6 & ~f2 => fm & ~(out & in) & ~(~out & in) & ~(~in & hev) => fm & ~in & ~hev
+
+ ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7)
+ ; filter2()
+%if %2 != 44 && %2 != 4
+ mova m6, [pb_80] ; already in m6 if 44_16
+ SCRATCH 2, 15, rsp+%3+%4
+%if %2 == 16
+ SCRATCH 1, 8, rsp+%3+%4+16
+%endif
+%endif
+ pxor m2, m6, rq0 ; q0 ^ 0x80
+ pxor m4, m6, rp0 ; p0 ^ 0x80
+ psubsb m2, m4 ; (signed) q0 - p0
+ pxor m4, m6, rp1 ; p1 ^ 0x80
+ pxor m5, m6, rq1 ; q1 ^ 0x80
+ psubsb m4, m5 ; (signed) p1 - q1
+ paddsb m4, m2 ; (q0 - p0) + (p1 - q1)
+ paddsb m4, m2 ; 2*(q0 - p0) + (p1 - q1)
+ paddsb m4, m2 ; 3*(q0 - p0) + (p1 - q1)
+ paddsb m6, m4, [pb_4] ; m6: f1 = clip(f + 4, 127)
+ paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127)
+%ifdef m8
+ mova m14, [pb_10] ; will be reused in filter4()
+%define rb10 m14
+%else
+%define rb10 [pb_10]
+%endif
+ SRSHIFT3B_2X m6, m4, rb10, m7 ; f1 and f2 sign byte shift by 3
+ SIGN_SUB m7, rq0, m6, m5 ; m7 = q0 - f1
+ SIGN_ADD m1, rp0, m4, m5 ; m1 = p0 + f2
+%if %2 != 44 && %2 != 4
+%ifdef m8
+ pandn m6, m15, m3 ; ~mask(in) & mask(fm)
+%else
+ mova m6, [rsp+%3+%4]
+ pandn m6, m3
+%endif
+ pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev)
+%else
+ pand m6, m3, m0
+%endif
+ MASK_APPLY m7, rq0, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4()
+ MASK_APPLY m1, rp0, m6, m5 ; m1 = filter2(p0) & mask / we write it in filter4()
+
+ ; (m0: hev, m1: p0', m2: q0-p0, m3: fm, m7: q0', [m8: flat8out], m10..13: p1 p0 q0 q1, m14: pb_10, [m15: flat8in], )
+ ; filter4()
+ mova m4, m2
+ paddsb m2, m4 ; 2 * (q0 - p0)
+ paddsb m2, m4 ; 3 * (q0 - p0)
+ paddsb m6, m2, [pb_4] ; m6: f1 = clip(f + 4, 127)
+ paddsb m2, [pb_3] ; m2: f2 = clip(f + 3, 127)
+ SRSHIFT3B_2X m6, m2, rb10, m4 ; f1 and f2 sign byte shift by 3
+%if %2 != 44 && %2 != 4
+%ifdef m8
+ pandn m5, m15, m3 ; ~mask(in) & mask(fm)
+%else
+ mova m5, [rsp+%3+%4]
+ pandn m5, m3
+%endif
+ pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm))
+%else
+ pandn m0, m3
+%endif
+ SIGN_SUB m5, rq0, m6, m4 ; q0 - f1
+ MASK_APPLY m5, m7, m0, m4 ; filter4(q0) & mask
+ mova [Q0], m5
+ SIGN_ADD m7, rp0, m2, m4 ; p0 + f2
+ MASK_APPLY m7, m1, m0, m4 ; filter4(p0) & mask
+ mova [P0], m7
+ paddb m6, [pb_80] ;
+ pxor m1, m1 ; f=(f1+1)>>1
+ pavgb m6, m1 ;
+ psubb m6, [pb_40] ;
+ SIGN_ADD m1, rp1, m6, m2 ; p1 + f
+ SIGN_SUB m4, rq1, m6, m2 ; q1 - f
+ MASK_APPLY m1, rp1, m0, m2 ; m1 = filter4(p1)
+ MASK_APPLY m4, rq1, m0, m2 ; m4 = filter4(q1)
+ mova [P1], m1
+ mova [Q1], m4
+
+%if %2 != 44 && %2 != 4
+ UNSCRATCH 2, 15, rsp+%3+%4
+%endif
+
+ ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1)
+ ; filter6()
+%if %2 != 44 && %2 != 4
+ pxor m0, m0
+%if %2 != 16
+ pand m3, m2
+%else
+ pand m2, m3 ; mask(fm) & mask(in)
+%ifdef m8
+ pandn m3, m8, m2 ; ~mask(out) & (mask(fm) & mask(in))
+%else
+ mova m3, [rsp+%3+%4+16]
+ pandn m3, m2
+%endif
+%endif
+%ifdef m8
+ mova m14, [P3]
+ mova m9, [Q3]
+%define rp3 m14
+%define rq3 m9
+%else
+%define rp3 [P3]
+%define rq3 [Q3]
+%endif
+ mova m1, [P2]
+ FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m1 ; [p2]
+ mova m1, [Q2]
+ FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", rq1, "", 1 ; [p1] -p3 -p2 +p1 +q1
+ FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2
+ FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", rq3, "", 1 ; [q0] -p3 -p0 +q0 +q3
+ FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3
+ FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3
+%endif
+
+%if %2 == 16
+ UNSCRATCH 1, 8, rsp+%3+%4+16
+%endif
+
+ ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2)
+ ; filter14()
+ ;
+ ; m2 m3 m8 m9 m14 m15 m10 m11 m12 m13
+ ;
+ ; q2 q3 p3 p2 p1 p0 q0 q1
+ ; p6 -7 p7 p6 p5 p4 . . . . .
+ ; p5 -6 -p7 -p6 +p5 +q1 . . . .
+ ; p4 -5 -p7 -p5 +p4 +q2 . . . q2
+ ; p3 -4 -p7 -p4 +p3 +q3 . . . q3
+ ; p2 -3 -p7 -p3 +p2 +q4 . . . q4
+ ; p1 -2 -p7 -p2 +p1 +q5 . . . q5
+ ; p0 -1 -p7 -p1 +p0 +q6 . . . q6
+ ; q0 +0 -p7 -p0 +q0 +q7 . . . q7
+ ; q1 +1 -p6 -q0 +q1 +q7 q1 . . .
+ ; q2 +2 -p5 -q1 +q2 +q7 . q2 . .
+ ; q3 +3 -p4 -q2 +q3 +q7 . q3 . .
+ ; q4 +4 -p3 -q3 +q4 +q7 . q4 . .
+ ; q5 +5 -p2 -q4 +q5 +q7 . q5 . .
+ ; q6 +6 -p1 -q5 +q6 +q7 . q6 . .
+
+%if %2 == 16
+ pand m1, m2 ; mask(out) & (mask(fm) & mask(in))
+ mova m2, [P7]
+ mova m3, [P6]
+%ifdef m8
+ mova m8, [P5]
+ mova m9, [P4]
+%define rp5 m8
+%define rp4 m9
+%define rp5s m8
+%define rp4s m9
+%define rp3s m14
+%define rq4 m8
+%define rq5 m9
+%define rq6 m14
+%define rq7 m15
+%define rq4s m8
+%define rq5s m9
+%define rq6s m14
+%else
+%define rp5 [P5]
+%define rp4 [P4]
+%define rp5s ""
+%define rp4s ""
+%define rp3s ""
+%define rq4 [Q4]
+%define rq5 [Q5]
+%define rq6 [Q6]
+%define rq7 [Q7]
+%define rq4s ""
+%define rq5s ""
+%define rq6s ""
+%endif
+ FILTER_INIT m4, m5, m6, m7, [P6], %4, 14, m1, m3 ; [p6]
+ FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, rp5s ; [p5] -p7 -p6 +p5 +q1
+ FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, rp4s ; [p4] -p7 -p5 +p4 +q2
+ FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, rp3s ; [p3] -p7 -p4 +p3 +q3
+ FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", rq4, [Q4], 1 ; [p2] -p7 -p3 +p2 +q4
+ FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", rq5, [Q5], 1 ; [p1] -p7 -p2 +p1 +q5
+ FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", rq6, [Q6], 1 ; [p0] -p7 -p1 +p0 +q6
+ FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", rq7, [Q7], 1 ; [q0] -p7 -p0 +q0 +q7
+ FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 9, 4, 5, 15, 4, m1, "" ; [q1] -p6 -q0 +q1 +q7
+ FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 10, 5, 6, 15, 4, m1, "" ; [q2] -p5 -q1 +q2 +q7
+ FILTER_UPDATE m4, m5, m6, m7, [Q3], %4, 11, 6, 7, 15, 4, m1, "" ; [q3] -p4 -q2 +q3 +q7
+ FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, rq4s ; [q4] -p3 -q3 +q4 +q7
+ FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, rq5s ; [q5] -p2 -q4 +q5 +q7
+ FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, rq6s ; [q6] -p1 -q5 +q6 +q7
+%endif
+
+%ifidn %1, h
+%if %2 == 16
+ mova m0, [P7]
+ mova m1, [P6]
+ mova m2, [P5]
+ mova m3, [P4]
+ mova m4, [P3]
+ mova m5, [P2]
+%if ARCH_X86_64
+ mova m6, [P1]
+%endif
+ mova m7, [P0]
+%if ARCH_X86_64
+ mova m8, [Q0]
+ mova m9, [Q1]
+ mova m10, [Q2]
+ mova m11, [Q3]
+ mova m12, [Q4]
+ mova m13, [Q5]
+ mova m14, [Q6]
+ mova m15, [Q7]
+ TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp]
+ DEFINE_REAL_P7_TO_Q7
+ movu [P7], m0
+ movu [P6], m1
+ movu [P5], m2
+ movu [P4], m3
+ movu [P3], m4
+ movu [P2], m5
+ movu [P1], m6
+ movu [P0], m7
+ movu [Q0], m8
+ movu [Q1], m9
+ movu [Q2], m10
+ movu [Q3], m11
+ movu [Q4], m12
+ movu [Q5], m13
+ movu [Q6], m14
+ movu [Q7], m15
+%else
+ DEFINE_REAL_P7_TO_Q7
+ TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+32], a, [rsp+%3+%4], [Q0], [Q1]
+ movh [P7], m0
+ movh [P5], m1
+ movh [P3], m2
+ movh [P1], m3
+ movh [Q2], m5
+ movh [Q4], m6
+ movh [Q6], m7
+ movhps [P6], m0
+ movhps [P4], m1
+ movhps [P2], m2
+ movhps [P0], m3
+ movhps [Q3], m5
+ movhps [Q5], m6
+ movhps [Q7], m7
+ DEFINE_TRANSPOSED_P7_TO_Q7
+ mova m0, [Q0]
+ mova m1, [Q1]
+ mova m2, [Q2]
+ mova m3, [Q3]
+ mova m4, [Q4]
+ mova m5, [Q5]
+ mova m7, [Q7]
+ DEFINE_REAL_P7_TO_Q7 8
+ TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+224], a, [rsp+%3+%4], [Q0], [Q1]
+ movh [P7], m0
+ movh [P5], m1
+ movh [P3], m2
+ movh [P1], m3
+ movh [Q2], m5
+ movh [Q4], m6
+ movh [Q6], m7
+ movhps [P6], m0
+ movhps [P4], m1
+ movhps [P2], m2
+ movhps [P0], m3
+ movhps [Q3], m5
+ movhps [Q5], m6
+ movhps [Q7], m7
+%endif
+%elif %2 == 44 || %2 == 4
+ SWAP 0, 1 ; m0 = p1
+ SWAP 1, 7 ; m1 = p0
+ SWAP 2, 5 ; m2 = q0
+ SWAP 3, 4 ; m3 = q1
+ DEFINE_REAL_P7_TO_Q7 2
+ SBUTTERFLY bw, 0, 1, 4
+ SBUTTERFLY bw, 2, 3, 4
+ SBUTTERFLY wd, 0, 2, 4
+ SBUTTERFLY wd, 1, 3, 4
+%if mmsize == 16
+ movd [P7], m0
+ movd [P3], m2
+ movd [Q0], m1
+ movd [Q4], m3
+ psrldq m0, 4
+ psrldq m1, 4
+ psrldq m2, 4
+ psrldq m3, 4
+ movd [P6], m0
+ movd [P2], m2
+ movd [Q1], m1
+ movd [Q5], m3
+ psrldq m0, 4
+ psrldq m1, 4
+ psrldq m2, 4
+ psrldq m3, 4
+ movd [P5], m0
+ movd [P1], m2
+ movd [Q2], m1
+ movd [Q6], m3
+ psrldq m0, 4
+ psrldq m1, 4
+ psrldq m2, 4
+ psrldq m3, 4
+ movd [P4], m0
+ movd [P0], m2
+ movd [Q3], m1
+ movd [Q7], m3
+%else
+ movd [P7], m0
+ movd [P5], m2
+ movd [P3], m1
+ movd [P1], m3
+ psrlq m0, 32
+ psrlq m2, 32
+ psrlq m1, 32
+ psrlq m3, 32
+ movd [P6], m0
+ movd [P4], m2
+ movd [P2], m1
+ movd [P0], m3
+%endif
+%else
+ ; the following code do a transpose of 8 full lines to 16 half
+ ; lines (high part). It is inlined to avoid the need of a staging area
+ mova m0, [P3]
+ mova m1, [P2]
+ mova m2, [P1]
+ mova m3, [P0]
+ mova m4, [Q0]
+ mova m5, [Q1]
+%ifdef m8
+ mova m6, [Q2]
+%endif
+ mova m7, [Q3]
+ DEFINE_REAL_P7_TO_Q7
+%ifdef m8
+ SBUTTERFLY bw, 0, 1, 8
+ SBUTTERFLY bw, 2, 3, 8
+ SBUTTERFLY bw, 4, 5, 8
+ SBUTTERFLY bw, 6, 7, 8
+ SBUTTERFLY wd, 0, 2, 8
+ SBUTTERFLY wd, 1, 3, 8
+ SBUTTERFLY wd, 4, 6, 8
+ SBUTTERFLY wd, 5, 7, 8
+ SBUTTERFLY dq, 0, 4, 8
+ SBUTTERFLY dq, 1, 5, 8
+ SBUTTERFLY dq, 2, 6, 8
+ SBUTTERFLY dq, 3, 7, 8
+%else
+ SBUTTERFLY bw, 0, 1, 6
+ mova [rsp+mmsize*4], m1
+ mova m6, [rsp+mmsize*6]
+ SBUTTERFLY bw, 2, 3, 1
+ SBUTTERFLY bw, 4, 5, 1
+ SBUTTERFLY bw, 6, 7, 1
+ SBUTTERFLY wd, 0, 2, 1
+ mova [rsp+mmsize*6], m2
+ mova m1, [rsp+mmsize*4]
+ SBUTTERFLY wd, 1, 3, 2
+ SBUTTERFLY wd, 4, 6, 2
+ SBUTTERFLY wd, 5, 7, 2
+ SBUTTERFLY dq, 0, 4, 2
+ SBUTTERFLY dq, 1, 5, 2
+%if mmsize == 16
+ movh [Q0], m1
+ movhps [Q1], m1
+%else
+ mova [P3], m1
+%endif
+ mova m2, [rsp+mmsize*6]
+ SBUTTERFLY dq, 2, 6, 1
+ SBUTTERFLY dq, 3, 7, 1
+%endif
+ SWAP 3, 6
+ SWAP 1, 4
+%if mmsize == 16
+ movh [P7], m0
+ movhps [P6], m0
+ movh [P5], m1
+ movhps [P4], m1
+ movh [P3], m2
+ movhps [P2], m2
+ movh [P1], m3
+ movhps [P0], m3
+%ifdef m8
+ movh [Q0], m4
+ movhps [Q1], m4
+%endif
+ movh [Q2], m5
+ movhps [Q3], m5
+ movh [Q4], m6
+ movhps [Q5], m6
+ movh [Q6], m7
+ movhps [Q7], m7
+%else
+ mova [P7], m0
+ mova [P6], m1
+ mova [P5], m2
+ mova [P4], m3
+ mova [P2], m5
+ mova [P1], m6
+ mova [P0], m7
+%endif
+%endif
+%endif
+
+ RET
+%endmacro
+
+%macro LPF_16_VH 5
+INIT_XMM %5
+LOOPFILTER v, %1, %2, 0, %4
+LOOPFILTER h, %1, %2, %3, %4
+%endmacro
+
+%macro LPF_16_VH_ALL_OPTS 4
+LPF_16_VH %1, %2, %3, %4, sse2
+LPF_16_VH %1, %2, %3, %4, ssse3
+LPF_16_VH %1, %2, %3, %4, avx
+%endmacro
+
+LPF_16_VH_ALL_OPTS 16, 512, 256, 32
+LPF_16_VH_ALL_OPTS 44, 0, 128, 0
+LPF_16_VH_ALL_OPTS 48, 256, 128, 16
+LPF_16_VH_ALL_OPTS 84, 256, 128, 16
+LPF_16_VH_ALL_OPTS 88, 256, 128, 16
+
+INIT_MMX mmxext
+LOOPFILTER v, 4, 0, 0, 0
+LOOPFILTER h, 4, 0, 64, 0
+LOOPFILTER v, 8, 128, 0, 8
+LOOPFILTER h, 8, 128, 64, 8
diff --git a/media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm b/media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm
new file mode 100644
index 0000000000..c0888170c9
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm
@@ -0,0 +1,823 @@
+;******************************************************************************
+;* VP9 loop filter SIMD optimizations
+;*
+;* Copyright (C) 2015 Ronald S. Bultje <rsbultje@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_511: times 16 dw 511
+pw_2047: times 16 dw 2047
+pw_16384: times 16 dw 16384
+pw_m512: times 16 dw -512
+pw_m2048: times 16 dw -2048
+
+cextern pw_1
+cextern pw_3
+cextern pw_4
+cextern pw_8
+cextern pw_16
+cextern pw_256
+cextern pw_1023
+cextern pw_4095
+cextern pw_m1
+
+SECTION .text
+
+%macro SCRATCH 3-4
+%if ARCH_X86_64
+ SWAP %1, %2
+%if %0 == 4
+%define reg_%4 m%2
+%endif
+%else
+ mova [%3], m%1
+%if %0 == 4
+%define reg_%4 [%3]
+%endif
+%endif
+%endmacro
+
+%macro UNSCRATCH 3-4
+%if ARCH_X86_64
+ SWAP %1, %2
+%else
+ mova m%1, [%3]
+%endif
+%if %0 == 4
+%undef reg_%4
+%endif
+%endmacro
+
+%macro PRELOAD 2-3
+%if ARCH_X86_64
+ mova m%1, [%2]
+%if %0 == 3
+%define reg_%3 m%1
+%endif
+%elif %0 == 3
+%define reg_%3 [%2]
+%endif
+%endmacro
+
+; calculate p or q portion of flat8out
+%macro FLAT8OUT_HALF 0
+ psubw m4, m0 ; q4-q0
+ psubw m5, m0 ; q5-q0
+ psubw m6, m0 ; q6-q0
+ psubw m7, m0 ; q7-q0
+ ABS2 m4, m5, m2, m3 ; abs(q4-q0) | abs(q5-q0)
+ ABS2 m6, m7, m2, m3 ; abs(q6-q0) | abs(q7-q0)
+ pcmpgtw m4, reg_F ; abs(q4-q0) > F
+ pcmpgtw m5, reg_F ; abs(q5-q0) > F
+ pcmpgtw m6, reg_F ; abs(q6-q0) > F
+ pcmpgtw m7, reg_F ; abs(q7-q0) > F
+ por m5, m4
+ por m7, m6
+ por m7, m5 ; !flat8out, q portion
+%endmacro
+
+; calculate p or q portion of flat8in/hev/fm (excluding mb_edge condition)
+%macro FLAT8IN_HALF 1
+%if %1 > 4
+ psubw m4, m3, m0 ; q3-q0
+ psubw m5, m2, m0 ; q2-q0
+ ABS2 m4, m5, m6, m7 ; abs(q3-q0) | abs(q2-q0)
+ pcmpgtw m4, reg_F ; abs(q3-q0) > F
+ pcmpgtw m5, reg_F ; abs(q2-q0) > F
+%endif
+ psubw m3, m2 ; q3-q2
+ psubw m2, m1 ; q2-q1
+ ABS2 m3, m2, m6, m7 ; abs(q3-q2) | abs(q2-q1)
+ pcmpgtw m3, reg_I ; abs(q3-q2) > I
+ pcmpgtw m2, reg_I ; abs(q2-q1) > I
+%if %1 > 4
+ por m4, m5
+%endif
+ por m2, m3
+ psubw m3, m1, m0 ; q1-q0
+ ABS1 m3, m5 ; abs(q1-q0)
+%if %1 > 4
+ pcmpgtw m6, m3, reg_F ; abs(q1-q0) > F
+%endif
+ pcmpgtw m7, m3, reg_H ; abs(q1-q0) > H
+ pcmpgtw m3, reg_I ; abs(q1-q0) > I
+%if %1 > 4
+ por m4, m6
+%endif
+ por m2, m3
+%endmacro
+
+; one step in filter_14/filter_6
+;
+; take sum $reg, downshift, apply mask and write into dst
+;
+; if sub2/add1-2 are present, add/sub as appropriate to prepare for the next
+; step's sum $reg. This is omitted for the last row in each filter.
+;
+; if dont_store is set, don't write the result into memory, instead keep the
+; values in register so we can write it out later
+%macro FILTER_STEP 6-10 "", "", "", 0 ; tmp, reg, mask, shift, dst, \
+ ; src/sub1, sub2, add1, add2, dont_store
+ psrlw %1, %2, %4
+ psubw %1, %6 ; abs->delta
+%ifnidn %7, ""
+ psubw %2, %6
+ psubw %2, %7
+ paddw %2, %8
+ paddw %2, %9
+%endif
+ pand %1, reg_%3 ; apply mask
+%if %10 == 1
+ paddw %6, %1 ; delta->abs
+%else
+ paddw %1, %6 ; delta->abs
+ mova [%5], %1
+%endif
+%endmacro
+
+; FIXME avx2 versions for 16_16 and mix2_{4,8}{4,8}
+
+%macro LOOP_FILTER 3 ; dir[h/v], wd[4/8/16], bpp[10/12]
+
+%if ARCH_X86_64
+%if %2 == 16
+%assign %%num_xmm_regs 16
+%elif %2 == 8
+%assign %%num_xmm_regs 15
+%else ; %2 == 4
+%assign %%num_xmm_regs 14
+%endif ; %2
+%assign %%bak_mem 0
+%else ; ARCH_X86_32
+%assign %%num_xmm_regs 8
+%if %2 == 16
+%assign %%bak_mem 7
+%elif %2 == 8
+%assign %%bak_mem 6
+%else ; %2 == 4
+%assign %%bak_mem 5
+%endif ; %2
+%endif ; ARCH_X86_64/32
+
+%if %2 == 16
+%ifidn %1, v
+%assign %%num_gpr_regs 6
+%else ; %1 == h
+%assign %%num_gpr_regs 5
+%endif ; %1
+%assign %%wd_mem 6
+%else ; %2 == 8/4
+%assign %%num_gpr_regs 5
+%if ARCH_X86_32 && %2 == 8
+%assign %%wd_mem 2
+%else ; ARCH_X86_64 || %2 == 4
+%assign %%wd_mem 0
+%endif ; ARCH_X86_64/32 etc.
+%endif ; %2
+
+%ifidn %1, v
+%assign %%tsp_mem 0
+%elif %2 == 16 ; && %1 == h
+%assign %%tsp_mem 16
+%else ; %1 == h && %1 == 8/4
+%assign %%tsp_mem 8
+%endif ; %1/%2
+
+%assign %%off %%wd_mem
+%assign %%tspoff %%bak_mem+%%wd_mem
+%assign %%stack_mem ((%%bak_mem+%%wd_mem+%%tsp_mem)*mmsize)
+
+%if %3 == 10
+%define %%maxsgn 511
+%define %%minsgn m512
+%define %%maxusgn 1023
+%define %%maxf 4
+%else ; %3 == 12
+%define %%maxsgn 2047
+%define %%minsgn m2048
+%define %%maxusgn 4095
+%define %%maxf 16
+%endif ; %3
+
+cglobal vp9_loop_filter_%1_%2_%3, 5, %%num_gpr_regs, %%num_xmm_regs, %%stack_mem, dst, stride, E, I, H
+ ; prepare E, I and H masks
+ shl Ed, %3-8
+ shl Id, %3-8
+ shl Hd, %3-8
+%if cpuflag(ssse3)
+ mova m0, [pw_256]
+%endif
+ movd m1, Ed
+ movd m2, Id
+ movd m3, Hd
+%if cpuflag(ssse3)
+ pshufb m1, m0 ; E << (bit_depth - 8)
+ pshufb m2, m0 ; I << (bit_depth - 8)
+ pshufb m3, m0 ; H << (bit_depth - 8)
+%else
+ punpcklwd m1, m1
+ punpcklwd m2, m2
+ punpcklwd m3, m3
+ pshufd m1, m1, q0000
+ pshufd m2, m2, q0000
+ pshufd m3, m3, q0000
+%endif
+ SCRATCH 1, 8, rsp+(%%off+0)*mmsize, E
+ SCRATCH 2, 9, rsp+(%%off+1)*mmsize, I
+ SCRATCH 3, 10, rsp+(%%off+2)*mmsize, H
+%if %2 > 4
+ PRELOAD 11, pw_ %+ %%maxf, F
+%endif
+
+ ; set up variables to load data
+%ifidn %1, v
+ DEFINE_ARGS dst8, stride, stride3, dst0, dst4, dst12
+ lea stride3q, [strideq*3]
+ neg strideq
+%if %2 == 16
+ lea dst0q, [dst8q+strideq*8]
+%else
+ lea dst4q, [dst8q+strideq*4]
+%endif
+ neg strideq
+%if %2 == 16
+ lea dst12q, [dst8q+strideq*4]
+ lea dst4q, [dst0q+strideq*4]
+%endif
+
+%if %2 == 16
+%define %%p7 dst0q
+%define %%p6 dst0q+strideq
+%define %%p5 dst0q+strideq*2
+%define %%p4 dst0q+stride3q
+%endif
+%define %%p3 dst4q
+%define %%p2 dst4q+strideq
+%define %%p1 dst4q+strideq*2
+%define %%p0 dst4q+stride3q
+%define %%q0 dst8q
+%define %%q1 dst8q+strideq
+%define %%q2 dst8q+strideq*2
+%define %%q3 dst8q+stride3q
+%if %2 == 16
+%define %%q4 dst12q
+%define %%q5 dst12q+strideq
+%define %%q6 dst12q+strideq*2
+%define %%q7 dst12q+stride3q
+%endif
+%else ; %1 == h
+ DEFINE_ARGS dst0, stride, stride3, dst4
+ lea stride3q, [strideq*3]
+ lea dst4q, [dst0q+strideq*4]
+
+%define %%p3 rsp+(%%tspoff+0)*mmsize
+%define %%p2 rsp+(%%tspoff+1)*mmsize
+%define %%p1 rsp+(%%tspoff+2)*mmsize
+%define %%p0 rsp+(%%tspoff+3)*mmsize
+%define %%q0 rsp+(%%tspoff+4)*mmsize
+%define %%q1 rsp+(%%tspoff+5)*mmsize
+%define %%q2 rsp+(%%tspoff+6)*mmsize
+%define %%q3 rsp+(%%tspoff+7)*mmsize
+
+%if %2 < 16
+ movu m0, [dst0q+strideq*0-8]
+ movu m1, [dst0q+strideq*1-8]
+ movu m2, [dst0q+strideq*2-8]
+ movu m3, [dst0q+stride3q -8]
+ movu m4, [dst4q+strideq*0-8]
+ movu m5, [dst4q+strideq*1-8]
+ movu m6, [dst4q+strideq*2-8]
+ movu m7, [dst4q+stride3q -8]
+
+%if ARCH_X86_64
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 12
+%else
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%%p0], [%%q0]
+%endif
+
+ mova [%%p3], m0
+ mova [%%p2], m1
+ mova [%%p1], m2
+ mova [%%p0], m3
+%if ARCH_X86_64
+ mova [%%q0], m4
+%endif
+ mova [%%q1], m5
+ mova [%%q2], m6
+ mova [%%q3], m7
+
+ ; FIXME investigate if we can _not_ load q0-3 below if h, and adjust register
+ ; order here accordingly
+%else ; %2 == 16
+
+%define %%p7 rsp+(%%tspoff+ 8)*mmsize
+%define %%p6 rsp+(%%tspoff+ 9)*mmsize
+%define %%p5 rsp+(%%tspoff+10)*mmsize
+%define %%p4 rsp+(%%tspoff+11)*mmsize
+%define %%q4 rsp+(%%tspoff+12)*mmsize
+%define %%q5 rsp+(%%tspoff+13)*mmsize
+%define %%q6 rsp+(%%tspoff+14)*mmsize
+%define %%q7 rsp+(%%tspoff+15)*mmsize
+
+ mova m0, [dst0q+strideq*0-16]
+ mova m1, [dst0q+strideq*1-16]
+ mova m2, [dst0q+strideq*2-16]
+ mova m3, [dst0q+stride3q -16]
+ mova m4, [dst4q+strideq*0-16]
+ mova m5, [dst4q+strideq*1-16]
+%if ARCH_X86_64
+ mova m6, [dst4q+strideq*2-16]
+%endif
+ mova m7, [dst4q+stride3q -16]
+
+%if ARCH_X86_64
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 12
+%else
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [dst4q+strideq*2-16], [%%p3], 1
+%endif
+
+ mova [%%p7], m0
+ mova [%%p6], m1
+ mova [%%p5], m2
+ mova [%%p4], m3
+%if ARCH_X86_64
+ mova [%%p3], m4
+%endif
+ mova [%%p2], m5
+ mova [%%p1], m6
+ mova [%%p0], m7
+
+ mova m0, [dst0q+strideq*0]
+ mova m1, [dst0q+strideq*1]
+ mova m2, [dst0q+strideq*2]
+ mova m3, [dst0q+stride3q ]
+ mova m4, [dst4q+strideq*0]
+ mova m5, [dst4q+strideq*1]
+%if ARCH_X86_64
+ mova m6, [dst4q+strideq*2]
+%endif
+ mova m7, [dst4q+stride3q ]
+
+%if ARCH_X86_64
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 12
+%else
+ TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [dst4q+strideq*2], [%%q4], 1
+%endif
+
+ mova [%%q0], m0
+ mova [%%q1], m1
+ mova [%%q2], m2
+ mova [%%q3], m3
+%if ARCH_X86_64
+ mova [%%q4], m4
+%endif
+ mova [%%q5], m5
+ mova [%%q6], m6
+ mova [%%q7], m7
+
+ ; FIXME investigate if we can _not_ load q0|q4-7 below if h, and adjust register
+ ; order here accordingly
+%endif ; %2
+%endif ; %1
+
+ ; load q0|q4-7 data
+ mova m0, [%%q0]
+%if %2 == 16
+ mova m4, [%%q4]
+ mova m5, [%%q5]
+ mova m6, [%%q6]
+ mova m7, [%%q7]
+
+ ; flat8out q portion
+ FLAT8OUT_HALF
+ SCRATCH 7, 15, rsp+(%%off+6)*mmsize, F8O
+%endif
+
+ ; load q1-3 data
+ mova m1, [%%q1]
+ mova m2, [%%q2]
+ mova m3, [%%q3]
+
+ ; r6-8|pw_4[m8-11]=reg_E/I/H/F
+ ; r9[m15]=!flatout[q]
+ ; m12-14=free
+ ; m0-3=q0-q3
+ ; m4-7=free
+
+ ; flat8in|fm|hev q portion
+ FLAT8IN_HALF %2
+ SCRATCH 7, 13, rsp+(%%off+4)*mmsize, HEV
+%if %2 > 4
+ SCRATCH 4, 14, rsp+(%%off+5)*mmsize, F8I
+%endif
+
+ ; r6-8|pw_4[m8-11]=reg_E/I/H/F
+ ; r9[m15]=!flat8out[q]
+ ; r10[m13]=hev[q]
+ ; r11[m14]=!flat8in[q]
+ ; m2=!fm[q]
+ ; m0,1=q0-q1
+ ; m2-7=free
+ ; m12=free
+
+ ; load p0-1
+ mova m3, [%%p0]
+ mova m4, [%%p1]
+
+ ; fm mb_edge portion
+ psubw m5, m3, m0 ; q0-p0
+ psubw m6, m4, m1 ; q1-p1
+%if ARCH_X86_64
+ ABS2 m5, m6, m7, m12 ; abs(q0-p0) | abs(q1-p1)
+%else
+ ABS1 m5, m7 ; abs(q0-p0)
+ ABS1 m6, m7 ; abs(q1-p1)
+%endif
+ paddw m5, m5
+ psraw m6, 1
+ paddw m6, m5 ; abs(q0-p0)*2+(abs(q1-p1)>>1)
+ pcmpgtw m6, reg_E
+ por m2, m6
+ SCRATCH 2, 12, rsp+(%%off+3)*mmsize, FM
+
+ ; r6-8|pw_4[m8-11]=reg_E/I/H/F
+ ; r9[m15]=!flat8out[q]
+ ; r10[m13]=hev[q]
+ ; r11[m14]=!flat8in[q]
+ ; r12[m12]=!fm[q]
+ ; m3-4=q0-1
+ ; m0-2/5-7=free
+
+ ; load p4-7 data
+ SWAP 3, 0 ; p0
+ SWAP 4, 1 ; p1
+%if %2 == 16
+ mova m7, [%%p7]
+ mova m6, [%%p6]
+ mova m5, [%%p5]
+ mova m4, [%%p4]
+
+ ; flat8out p portion
+ FLAT8OUT_HALF
+ por m7, reg_F8O
+ SCRATCH 7, 15, rsp+(%%off+6)*mmsize, F8O
+%endif
+
+ ; r6-8|pw_4[m8-11]=reg_E/I/H/F
+ ; r9[m15]=!flat8out
+ ; r10[m13]=hev[q]
+ ; r11[m14]=!flat8in[q]
+ ; r12[m12]=!fm[q]
+ ; m0=p0
+ ; m1-7=free
+
+ ; load p2-3 data
+ mova m2, [%%p2]
+ mova m3, [%%p3]
+
+ ; flat8in|fm|hev p portion
+ FLAT8IN_HALF %2
+ por m7, reg_HEV
+%if %2 > 4
+ por m4, reg_F8I
+%endif
+ por m2, reg_FM
+%if %2 > 4
+ por m4, m2 ; !flat8|!fm
+%if %2 == 16
+ por m5, m4, reg_F8O ; !flat16|!fm
+ pandn m2, m4 ; filter4_mask
+ pandn m4, m5 ; filter8_mask
+ pxor m5, [pw_m1] ; filter16_mask
+ SCRATCH 5, 15, rsp+(%%off+6)*mmsize, F16M
+%else
+ pandn m2, m4 ; filter4_mask
+ pxor m4, [pw_m1] ; filter8_mask
+%endif
+ SCRATCH 4, 14, rsp+(%%off+5)*mmsize, F8M
+%else
+ pxor m2, [pw_m1] ; filter4_mask
+%endif
+ SCRATCH 7, 13, rsp+(%%off+4)*mmsize, HEV
+ SCRATCH 2, 12, rsp+(%%off+3)*mmsize, F4M
+
+ ; r9[m15]=filter16_mask
+ ; r10[m13]=hev
+ ; r11[m14]=filter8_mask
+ ; r12[m12]=filter4_mask
+ ; m0,1=p0-p1
+ ; m2-7=free
+ ; m8-11=free
+
+%if %2 > 4
+%if %2 == 16
+ ; filter_14
+ mova m2, [%%p7]
+ mova m3, [%%p6]
+ mova m6, [%%p5]
+ mova m7, [%%p4]
+ PRELOAD 8, %%p3, P3
+ PRELOAD 9, %%p2, P2
+%endif
+ PRELOAD 10, %%q0, Q0
+ PRELOAD 11, %%q1, Q1
+%if %2 == 16
+ psllw m4, m2, 3
+ paddw m5, m3, m3
+ paddw m4, m6
+ paddw m5, m7
+ paddw m4, reg_P3
+ paddw m5, reg_P2
+ paddw m4, m1
+ paddw m5, m0
+ paddw m4, reg_Q0 ; q0+p1+p3+p5+p7*8
+ psubw m5, m2 ; p0+p2+p4+p6*2-p7
+ paddw m4, [pw_8]
+ paddw m5, m4 ; q0+p0+p1+p2+p3+p4+p5+p6*2+p7*7+8
+
+ ; below, we use r0-5 for storing pre-filter pixels for subsequent subtraction
+ ; at the end of the filter
+
+ mova [rsp+0*mmsize], m3
+ FILTER_STEP m4, m5, F16M, 4, %%p6, m3, m2, m6, reg_Q1
+%endif
+ mova m3, [%%q2]
+%if %2 == 16
+ mova [rsp+1*mmsize], m6
+ FILTER_STEP m4, m5, F16M, 4, %%p5, m6, m2, m7, m3
+%endif
+ mova m6, [%%q3]
+%if %2 == 16
+ mova [rsp+2*mmsize], m7
+ FILTER_STEP m4, m5, F16M, 4, %%p4, m7, m2, reg_P3, m6
+ mova m7, [%%q4]
+%if ARCH_X86_64
+ mova [rsp+3*mmsize], reg_P3
+%else
+ mova m4, reg_P3
+ mova [rsp+3*mmsize], m4
+%endif
+ FILTER_STEP m4, m5, F16M, 4, %%p3, reg_P3, m2, reg_P2, m7
+ PRELOAD 8, %%q5, Q5
+%if ARCH_X86_64
+ mova [rsp+4*mmsize], reg_P2
+%else
+ mova m4, reg_P2
+ mova [rsp+4*mmsize], m4
+%endif
+ FILTER_STEP m4, m5, F16M, 4, %%p2, reg_P2, m2, m1, reg_Q5
+ PRELOAD 9, %%q6, Q6
+ mova [rsp+5*mmsize], m1
+ FILTER_STEP m4, m5, F16M, 4, %%p1, m1, m2, m0, reg_Q6
+ mova m1, [%%q7]
+ FILTER_STEP m4, m5, F16M, 4, %%p0, m0, m2, reg_Q0, m1, 1
+ FILTER_STEP m4, m5, F16M, 4, %%q0, reg_Q0, [rsp+0*mmsize], reg_Q1, m1, ARCH_X86_64
+ FILTER_STEP m4, m5, F16M, 4, %%q1, reg_Q1, [rsp+1*mmsize], m3, m1, ARCH_X86_64
+ FILTER_STEP m4, m5, F16M, 4, %%q2, m3, [rsp+2*mmsize], m6, m1, 1
+ FILTER_STEP m4, m5, F16M, 4, %%q3, m6, [rsp+3*mmsize], m7, m1
+ FILTER_STEP m4, m5, F16M, 4, %%q4, m7, [rsp+4*mmsize], reg_Q5, m1
+ FILTER_STEP m4, m5, F16M, 4, %%q5, reg_Q5, [rsp+5*mmsize], reg_Q6, m1
+ FILTER_STEP m4, m5, F16M, 4, %%q6, reg_Q6
+
+ mova m7, [%%p1]
+%else
+ SWAP 1, 7
+%endif
+
+ mova m2, [%%p3]
+ mova m1, [%%p2]
+
+ ; reg_Q0-1 (m10-m11)
+ ; m0=p0
+ ; m1=p2
+ ; m2=p3
+ ; m3=q2
+ ; m4-5=free
+ ; m6=q3
+ ; m7=p1
+ ; m8-9 unused
+
+ ; filter_6
+ psllw m4, m2, 2
+ paddw m5, m1, m1
+ paddw m4, m7
+ psubw m5, m2
+ paddw m4, m0
+ paddw m5, reg_Q0
+ paddw m4, [pw_4]
+ paddw m5, m4
+
+%if ARCH_X86_64
+ mova m8, m1
+ mova m9, m7
+%else
+ mova [rsp+0*mmsize], m1
+ mova [rsp+1*mmsize], m7
+%endif
+%ifidn %1, v
+ FILTER_STEP m4, m5, F8M, 3, %%p2, m1, m2, m7, reg_Q1
+%else
+ FILTER_STEP m4, m5, F8M, 3, %%p2, m1, m2, m7, reg_Q1, 1
+%endif
+ FILTER_STEP m4, m5, F8M, 3, %%p1, m7, m2, m0, m3, 1
+ FILTER_STEP m4, m5, F8M, 3, %%p0, m0, m2, reg_Q0, m6, 1
+%if ARCH_X86_64
+ FILTER_STEP m4, m5, F8M, 3, %%q0, reg_Q0, m8, reg_Q1, m6, ARCH_X86_64
+ FILTER_STEP m4, m5, F8M, 3, %%q1, reg_Q1, m9, m3, m6, ARCH_X86_64
+%else
+ FILTER_STEP m4, m5, F8M, 3, %%q0, reg_Q0, [rsp+0*mmsize], reg_Q1, m6, ARCH_X86_64
+ FILTER_STEP m4, m5, F8M, 3, %%q1, reg_Q1, [rsp+1*mmsize], m3, m6, ARCH_X86_64
+%endif
+ FILTER_STEP m4, m5, F8M, 3, %%q2, m3
+
+ UNSCRATCH 2, 10, %%q0
+ UNSCRATCH 6, 11, %%q1
+%else
+ SWAP 1, 7
+ mova m2, [%%q0]
+ mova m6, [%%q1]
+%endif
+ UNSCRATCH 3, 13, rsp+(%%off+4)*mmsize, HEV
+
+ ; m0=p0
+ ; m1=p2
+ ; m2=q0
+ ; m3=hev_mask
+ ; m4-5=free
+ ; m6=q1
+ ; m7=p1
+
+ ; filter_4
+ psubw m4, m7, m6 ; p1-q1
+ psubw m5, m2, m0 ; q0-p0
+ pand m4, m3
+ pminsw m4, [pw_ %+ %%maxsgn]
+ pmaxsw m4, [pw_ %+ %%minsgn] ; clip_intp2(p1-q1, 9) -> f
+ paddw m4, m5
+ paddw m5, m5
+ paddw m4, m5 ; 3*(q0-p0)+f
+ pminsw m4, [pw_ %+ %%maxsgn]
+ pmaxsw m4, [pw_ %+ %%minsgn] ; clip_intp2(3*(q0-p0)+f, 9) -> f
+ pand m4, reg_F4M
+ paddw m5, m4, [pw_4]
+ paddw m4, [pw_3]
+ pminsw m5, [pw_ %+ %%maxsgn]
+ pminsw m4, [pw_ %+ %%maxsgn]
+ psraw m5, 3 ; min_intp2(f+4, 9)>>3 -> f1
+ psraw m4, 3 ; min_intp2(f+3, 9)>>3 -> f2
+ psubw m2, m5 ; q0-f1
+ paddw m0, m4 ; p0+f2
+ pandn m3, m5 ; f1 & !hev (for p1/q1 adj)
+ pxor m4, m4
+ mova m5, [pw_ %+ %%maxusgn]
+ pmaxsw m2, m4
+ pmaxsw m0, m4
+ pminsw m2, m5
+ pminsw m0, m5
+%if cpuflag(ssse3)
+ pmulhrsw m3, [pw_16384] ; (f1+1)>>1
+%else
+ paddw m3, [pw_1]
+ psraw m3, 1
+%endif
+ paddw m7, m3 ; p1+f
+ psubw m6, m3 ; q1-f
+ pmaxsw m7, m4
+ pmaxsw m6, m4
+ pminsw m7, m5
+ pminsw m6, m5
+
+ ; store
+%ifidn %1, v
+ mova [%%p1], m7
+ mova [%%p0], m0
+ mova [%%q0], m2
+ mova [%%q1], m6
+%else ; %1 == h
+%if %2 == 4
+ TRANSPOSE4x4W 7, 0, 2, 6, 1
+ movh [dst0q+strideq*0-4], m7
+ movhps [dst0q+strideq*1-4], m7
+ movh [dst0q+strideq*2-4], m0
+ movhps [dst0q+stride3q -4], m0
+ movh [dst4q+strideq*0-4], m2
+ movhps [dst4q+strideq*1-4], m2
+ movh [dst4q+strideq*2-4], m6
+ movhps [dst4q+stride3q -4], m6
+%elif %2 == 8
+ mova m3, [%%p3]
+ mova m4, [%%q2]
+ mova m5, [%%q3]
+
+%if ARCH_X86_64
+ TRANSPOSE8x8W 3, 1, 7, 0, 2, 6, 4, 5, 8
+%else
+ TRANSPOSE8x8W 3, 1, 7, 0, 2, 6, 4, 5, [%%q2], [%%q0], 1
+ mova m2, [%%q0]
+%endif
+
+ movu [dst0q+strideq*0-8], m3
+ movu [dst0q+strideq*1-8], m1
+ movu [dst0q+strideq*2-8], m7
+ movu [dst0q+stride3q -8], m0
+ movu [dst4q+strideq*0-8], m2
+ movu [dst4q+strideq*1-8], m6
+ movu [dst4q+strideq*2-8], m4
+ movu [dst4q+stride3q -8], m5
+%else ; %2 == 16
+ SCRATCH 2, 8, %%q0
+ SCRATCH 6, 9, %%q1
+ mova m2, [%%p7]
+ mova m3, [%%p6]
+ mova m4, [%%p5]
+ mova m5, [%%p4]
+ mova m6, [%%p3]
+
+%if ARCH_X86_64
+ TRANSPOSE8x8W 2, 3, 4, 5, 6, 1, 7, 0, 10
+%else
+ mova [%%p1], m7
+ TRANSPOSE8x8W 2, 3, 4, 5, 6, 1, 7, 0, [%%p1], [dst4q+strideq*0-16], 1
+%endif
+
+ mova [dst0q+strideq*0-16], m2
+ mova [dst0q+strideq*1-16], m3
+ mova [dst0q+strideq*2-16], m4
+ mova [dst0q+stride3q -16], m5
+%if ARCH_X86_64
+ mova [dst4q+strideq*0-16], m6
+%endif
+ mova [dst4q+strideq*1-16], m1
+ mova [dst4q+strideq*2-16], m7
+ mova [dst4q+stride3q -16], m0
+
+ UNSCRATCH 2, 8, %%q0
+ UNSCRATCH 6, 9, %%q1
+ mova m0, [%%q2]
+ mova m1, [%%q3]
+ mova m3, [%%q4]
+ mova m4, [%%q5]
+%if ARCH_X86_64
+ mova m5, [%%q6]
+%endif
+ mova m7, [%%q7]
+
+%if ARCH_X86_64
+ TRANSPOSE8x8W 2, 6, 0, 1, 3, 4, 5, 7, 8
+%else
+ TRANSPOSE8x8W 2, 6, 0, 1, 3, 4, 5, 7, [%%q6], [dst4q+strideq*0], 1
+%endif
+
+ mova [dst0q+strideq*0], m2
+ mova [dst0q+strideq*1], m6
+ mova [dst0q+strideq*2], m0
+ mova [dst0q+stride3q ], m1
+%if ARCH_X86_64
+ mova [dst4q+strideq*0], m3
+%endif
+ mova [dst4q+strideq*1], m4
+ mova [dst4q+strideq*2], m5
+ mova [dst4q+stride3q ], m7
+%endif ; %2
+%endif ; %1
+ RET
+%endmacro
+
+%macro LOOP_FILTER_CPUSETS 3
+INIT_XMM sse2
+LOOP_FILTER %1, %2, %3
+INIT_XMM ssse3
+LOOP_FILTER %1, %2, %3
+INIT_XMM avx
+LOOP_FILTER %1, %2, %3
+%endmacro
+
+%macro LOOP_FILTER_WDSETS 2
+LOOP_FILTER_CPUSETS %1, 4, %2
+LOOP_FILTER_CPUSETS %1, 8, %2
+LOOP_FILTER_CPUSETS %1, 16, %2
+%endmacro
+
+LOOP_FILTER_WDSETS h, 10
+LOOP_FILTER_WDSETS v, 10
+LOOP_FILTER_WDSETS h, 12
+LOOP_FILTER_WDSETS v, 12
diff --git a/media/ffvpx/libavcodec/x86/vp9mc.asm b/media/ffvpx/libavcodec/x86/vp9mc.asm
new file mode 100644
index 0000000000..efc4cfbef1
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9mc.asm
@@ -0,0 +1,680 @@
+;******************************************************************************
+;* VP9 motion compensation SIMD optimizations
+;*
+;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA 32
+
+cextern pw_256
+cextern pw_64
+
+%macro F8_SSSE3_TAPS 8
+times 16 db %1, %2
+times 16 db %3, %4
+times 16 db %5, %6
+times 16 db %7, %8
+%endmacro
+
+%macro F8_SSE2_TAPS 8
+times 8 dw %1
+times 8 dw %2
+times 8 dw %3
+times 8 dw %4
+times 8 dw %5
+times 8 dw %6
+times 8 dw %7
+times 8 dw %8
+%endmacro
+
+%macro F8_16BPP_TAPS 8
+times 8 dw %1, %2
+times 8 dw %3, %4
+times 8 dw %5, %6
+times 8 dw %7, %8
+%endmacro
+
+%macro FILTER 1
+const filters_%1 ; smooth
+ F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
+ F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
+ F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
+ F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
+ F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
+ F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
+ F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
+ F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
+ F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
+ F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
+ F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
+ F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
+ F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
+ F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
+ F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
+ ; regular
+ F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
+ F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
+ F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
+ F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
+ F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
+ F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
+ F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
+ F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
+ F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
+ F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
+ F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
+ F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
+ F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
+ F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
+ F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
+ ; sharp
+ F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
+ F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
+ F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
+ F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
+ F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
+ F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
+ F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
+ F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
+ F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
+ F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
+ F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
+ F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
+ F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
+ F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
+ F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
+%endmacro
+
+%define F8_TAPS F8_SSSE3_TAPS
+; int8_t ff_filters_ssse3[3][15][4][32]
+FILTER ssse3
+%define F8_TAPS F8_SSE2_TAPS
+; int16_t ff_filters_sse2[3][15][8][8]
+FILTER sse2
+%define F8_TAPS F8_16BPP_TAPS
+; int16_t ff_filters_16bpp[3][15][4][16]
+FILTER 16bpp
+
+SECTION .text
+
+%macro filter_sse2_h_fn 1
+%assign %%px mmsize/2
+cglobal vp9_%1_8tap_1d_h_ %+ %%px %+ _8, 6, 6, 15, dst, dstride, src, sstride, h, filtery
+ pxor m5, m5
+ mova m6, [pw_64]
+ mova m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+ mova m8, [filteryq+ 16]
+ mova m9, [filteryq+ 32]
+ mova m10, [filteryq+ 48]
+ mova m11, [filteryq+ 64]
+ mova m12, [filteryq+ 80]
+ mova m13, [filteryq+ 96]
+ mova m14, [filteryq+112]
+%endif
+.loop:
+ movh m0, [srcq-3]
+ movh m1, [srcq-2]
+ movh m2, [srcq-1]
+ movh m3, [srcq+0]
+ movh m4, [srcq+1]
+ punpcklbw m0, m5
+ punpcklbw m1, m5
+ punpcklbw m2, m5
+ punpcklbw m3, m5
+ punpcklbw m4, m5
+ pmullw m0, m7
+%if ARCH_X86_64 && mmsize > 8
+ pmullw m1, m8
+ pmullw m2, m9
+ pmullw m3, m10
+ pmullw m4, m11
+%else
+ pmullw m1, [filteryq+ 16]
+ pmullw m2, [filteryq+ 32]
+ pmullw m3, [filteryq+ 48]
+ pmullw m4, [filteryq+ 64]
+%endif
+ paddw m0, m1
+ paddw m2, m3
+ paddw m0, m4
+ movh m1, [srcq+2]
+ movh m3, [srcq+3]
+ movh m4, [srcq+4]
+ add srcq, sstrideq
+ punpcklbw m1, m5
+ punpcklbw m3, m5
+ punpcklbw m4, m5
+%if ARCH_X86_64 && mmsize > 8
+ pmullw m1, m12
+ pmullw m3, m13
+ pmullw m4, m14
+%else
+ pmullw m1, [filteryq+ 80]
+ pmullw m3, [filteryq+ 96]
+ pmullw m4, [filteryq+112]
+%endif
+ paddw m0, m1
+ paddw m3, m4
+ paddw m0, m6
+ paddw m2, m3
+ paddsw m0, m2
+ psraw m0, 7
+%ifidn %1, avg
+ movh m1, [dstq]
+%endif
+ packuswb m0, m0
+%ifidn %1, avg
+ pavgb m0, m1
+%endif
+ movh [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+%endmacro
+
+INIT_MMX mmxext
+filter_sse2_h_fn put
+filter_sse2_h_fn avg
+
+INIT_XMM sse2
+filter_sse2_h_fn put
+filter_sse2_h_fn avg
+
+%macro filter_h_fn 1
+%assign %%px mmsize/2
+cglobal vp9_%1_8tap_1d_h_ %+ %%px %+ _8, 6, 6, 11, dst, dstride, src, sstride, h, filtery
+ mova m6, [pw_256]
+ mova m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+ mova m8, [filteryq+32]
+ mova m9, [filteryq+64]
+ mova m10, [filteryq+96]
+%endif
+.loop:
+ movh m0, [srcq-3]
+ movh m1, [srcq-2]
+ movh m2, [srcq-1]
+ movh m3, [srcq+0]
+ movh m4, [srcq+1]
+ movh m5, [srcq+2]
+ punpcklbw m0, m1
+ punpcklbw m2, m3
+ movh m1, [srcq+3]
+ movh m3, [srcq+4]
+ add srcq, sstrideq
+ punpcklbw m4, m5
+ punpcklbw m1, m3
+ pmaddubsw m0, m7
+%if ARCH_X86_64 && mmsize > 8
+ pmaddubsw m2, m8
+ pmaddubsw m4, m9
+ pmaddubsw m1, m10
+%else
+ pmaddubsw m2, [filteryq+32]
+ pmaddubsw m4, [filteryq+64]
+ pmaddubsw m1, [filteryq+96]
+%endif
+ paddw m0, m4
+ paddw m2, m1
+ paddsw m0, m2
+ pmulhrsw m0, m6
+%ifidn %1, avg
+ movh m1, [dstq]
+%endif
+ packuswb m0, m0
+%ifidn %1, avg
+ pavgb m0, m1
+%endif
+ movh [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+%endmacro
+
+INIT_MMX ssse3
+filter_h_fn put
+filter_h_fn avg
+
+INIT_XMM ssse3
+filter_h_fn put
+filter_h_fn avg
+
+%if ARCH_X86_64
+%macro filter_hx2_fn 1
+%assign %%px mmsize
+cglobal vp9_%1_8tap_1d_h_ %+ %%px %+ _8, 6, 6, 14, dst, dstride, src, sstride, h, filtery
+ mova m13, [pw_256]
+ mova m8, [filteryq+ 0]
+ mova m9, [filteryq+32]
+ mova m10, [filteryq+64]
+ mova m11, [filteryq+96]
+.loop:
+ movu m0, [srcq-3]
+ movu m1, [srcq-2]
+ movu m2, [srcq-1]
+ movu m3, [srcq+0]
+ movu m4, [srcq+1]
+ movu m5, [srcq+2]
+ movu m6, [srcq+3]
+ movu m7, [srcq+4]
+ add srcq, sstrideq
+ SBUTTERFLY bw, 0, 1, 12
+ SBUTTERFLY bw, 2, 3, 12
+ SBUTTERFLY bw, 4, 5, 12
+ SBUTTERFLY bw, 6, 7, 12
+ pmaddubsw m0, m8
+ pmaddubsw m1, m8
+ pmaddubsw m2, m9
+ pmaddubsw m3, m9
+ pmaddubsw m4, m10
+ pmaddubsw m5, m10
+ pmaddubsw m6, m11
+ pmaddubsw m7, m11
+ paddw m0, m4
+ paddw m1, m5
+ paddw m2, m6
+ paddw m3, m7
+ paddsw m0, m2
+ paddsw m1, m3
+ pmulhrsw m0, m13
+ pmulhrsw m1, m13
+ packuswb m0, m1
+%ifidn %1, avg
+ pavgb m0, [dstq]
+%endif
+ mova [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM ssse3
+filter_hx2_fn put
+filter_hx2_fn avg
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+filter_hx2_fn put
+filter_hx2_fn avg
+%endif
+
+%endif ; ARCH_X86_64
+
+%macro filter_sse2_v_fn 1
+%assign %%px mmsize/2
+%if ARCH_X86_64
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 6, 8, 15, dst, dstride, src, sstride, h, filtery, src4, sstride3
+%else
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 4, 7, 15, dst, dstride, src, sstride, filtery, src4, sstride3
+ mov filteryq, r5mp
+%define hd r4mp
+%endif
+ pxor m5, m5
+ mova m6, [pw_64]
+ lea sstride3q, [sstrideq*3]
+ lea src4q, [srcq+sstrideq]
+ sub srcq, sstride3q
+ mova m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+ mova m8, [filteryq+ 16]
+ mova m9, [filteryq+ 32]
+ mova m10, [filteryq+ 48]
+ mova m11, [filteryq+ 64]
+ mova m12, [filteryq+ 80]
+ mova m13, [filteryq+ 96]
+ mova m14, [filteryq+112]
+%endif
+.loop:
+ ; FIXME maybe reuse loads from previous rows, or just
+ ; more generally unroll this to prevent multiple loads of
+ ; the same data?
+ movh m0, [srcq]
+ movh m1, [srcq+sstrideq]
+ movh m2, [srcq+sstrideq*2]
+ movh m3, [srcq+sstride3q]
+ add srcq, sstrideq
+ movh m4, [src4q]
+ punpcklbw m0, m5
+ punpcklbw m1, m5
+ punpcklbw m2, m5
+ punpcklbw m3, m5
+ punpcklbw m4, m5
+ pmullw m0, m7
+%if ARCH_X86_64 && mmsize > 8
+ pmullw m1, m8
+ pmullw m2, m9
+ pmullw m3, m10
+ pmullw m4, m11
+%else
+ pmullw m1, [filteryq+ 16]
+ pmullw m2, [filteryq+ 32]
+ pmullw m3, [filteryq+ 48]
+ pmullw m4, [filteryq+ 64]
+%endif
+ paddw m0, m1
+ paddw m2, m3
+ paddw m0, m4
+ movh m1, [src4q+sstrideq]
+ movh m3, [src4q+sstrideq*2]
+ movh m4, [src4q+sstride3q]
+ add src4q, sstrideq
+ punpcklbw m1, m5
+ punpcklbw m3, m5
+ punpcklbw m4, m5
+%if ARCH_X86_64 && mmsize > 8
+ pmullw m1, m12
+ pmullw m3, m13
+ pmullw m4, m14
+%else
+ pmullw m1, [filteryq+ 80]
+ pmullw m3, [filteryq+ 96]
+ pmullw m4, [filteryq+112]
+%endif
+ paddw m0, m1
+ paddw m3, m4
+ paddw m0, m6
+ paddw m2, m3
+ paddsw m0, m2
+ psraw m0, 7
+%ifidn %1, avg
+ movh m1, [dstq]
+%endif
+ packuswb m0, m0
+%ifidn %1, avg
+ pavgb m0, m1
+%endif
+ movh [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+%endmacro
+
+INIT_MMX mmxext
+filter_sse2_v_fn put
+filter_sse2_v_fn avg
+
+INIT_XMM sse2
+filter_sse2_v_fn put
+filter_sse2_v_fn avg
+
+%macro filter_v_fn 1
+%assign %%px mmsize/2
+%if ARCH_X86_64
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
+%else
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
+ mov filteryq, r5mp
+%define hd r4mp
+%endif
+ mova m6, [pw_256]
+ lea sstride3q, [sstrideq*3]
+ lea src4q, [srcq+sstrideq]
+ sub srcq, sstride3q
+ mova m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+ mova m8, [filteryq+32]
+ mova m9, [filteryq+64]
+ mova m10, [filteryq+96]
+%endif
+.loop:
+ ; FIXME maybe reuse loads from previous rows, or just more generally
+ ; unroll this to prevent multiple loads of the same data?
+ movh m0, [srcq]
+ movh m1, [srcq+sstrideq]
+ movh m2, [srcq+sstrideq*2]
+ movh m3, [srcq+sstride3q]
+ movh m4, [src4q]
+ movh m5, [src4q+sstrideq]
+ punpcklbw m0, m1
+ punpcklbw m2, m3
+ movh m1, [src4q+sstrideq*2]
+ movh m3, [src4q+sstride3q]
+ add srcq, sstrideq
+ add src4q, sstrideq
+ punpcklbw m4, m5
+ punpcklbw m1, m3
+ pmaddubsw m0, m7
+%if ARCH_X86_64 && mmsize > 8
+ pmaddubsw m2, m8
+ pmaddubsw m4, m9
+ pmaddubsw m1, m10
+%else
+ pmaddubsw m2, [filteryq+32]
+ pmaddubsw m4, [filteryq+64]
+ pmaddubsw m1, [filteryq+96]
+%endif
+ paddw m0, m4
+ paddw m2, m1
+ paddsw m0, m2
+ pmulhrsw m0, m6
+%ifidn %1, avg
+ movh m1, [dstq]
+%endif
+ packuswb m0, m0
+%ifidn %1, avg
+ pavgb m0, m1
+%endif
+ movh [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+%endmacro
+
+INIT_MMX ssse3
+filter_v_fn put
+filter_v_fn avg
+
+INIT_XMM ssse3
+filter_v_fn put
+filter_v_fn avg
+
+%if ARCH_X86_64
+
+%macro filter_vx2_fn 1
+%assign %%px mmsize
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 6, 8, 14, dst, dstride, src, sstride, h, filtery, src4, sstride3
+ mova m13, [pw_256]
+ lea sstride3q, [sstrideq*3]
+ lea src4q, [srcq+sstrideq]
+ sub srcq, sstride3q
+ mova m8, [filteryq+ 0]
+ mova m9, [filteryq+32]
+ mova m10, [filteryq+64]
+ mova m11, [filteryq+96]
+.loop:
+ ; FIXME maybe reuse loads from previous rows, or just
+ ; more generally unroll this to prevent multiple loads of
+ ; the same data?
+ movu m0, [srcq]
+ movu m1, [srcq+sstrideq]
+ movu m2, [srcq+sstrideq*2]
+ movu m3, [srcq+sstride3q]
+ movu m4, [src4q]
+ movu m5, [src4q+sstrideq]
+ movu m6, [src4q+sstrideq*2]
+ movu m7, [src4q+sstride3q]
+ add srcq, sstrideq
+ add src4q, sstrideq
+ SBUTTERFLY bw, 0, 1, 12
+ SBUTTERFLY bw, 2, 3, 12
+ SBUTTERFLY bw, 4, 5, 12
+ SBUTTERFLY bw, 6, 7, 12
+ pmaddubsw m0, m8
+ pmaddubsw m1, m8
+ pmaddubsw m2, m9
+ pmaddubsw m3, m9
+ pmaddubsw m4, m10
+ pmaddubsw m5, m10
+ pmaddubsw m6, m11
+ pmaddubsw m7, m11
+ paddw m0, m4
+ paddw m1, m5
+ paddw m2, m6
+ paddw m3, m7
+ paddsw m0, m2
+ paddsw m1, m3
+ pmulhrsw m0, m13
+ pmulhrsw m1, m13
+ packuswb m0, m1
+%ifidn %1, avg
+ pavgb m0, [dstq]
+%endif
+ mova [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+%endmacro
+
+INIT_XMM ssse3
+filter_vx2_fn put
+filter_vx2_fn avg
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+filter_vx2_fn put
+filter_vx2_fn avg
+%endif
+
+%endif ; ARCH_X86_64
+
+%macro fpel_fn 6-8 0, 4
+%if %2 == 4
+%define %%srcfn movh
+%define %%dstfn movh
+%else
+%define %%srcfn movu
+%define %%dstfn mova
+%endif
+
+%if %7 == 8
+%define %%pavg pavgb
+%define %%szsuf _8
+%elif %7 == 16
+%define %%pavg pavgw
+%define %%szsuf _16
+%else
+%define %%szsuf
+%endif
+
+%if %2 <= mmsize
+cglobal vp9_%1%2 %+ %%szsuf, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
+ lea sstride3q, [sstrideq*3]
+ lea dstride3q, [dstrideq*3]
+%else
+cglobal vp9_%1%2 %+ %%szsuf, 5, 5, %8, dst, dstride, src, sstride, h
+%endif
+.loop:
+ %%srcfn m0, [srcq]
+ %%srcfn m1, [srcq+s%3]
+ %%srcfn m2, [srcq+s%4]
+ %%srcfn m3, [srcq+s%5]
+%if %2/mmsize == 8
+ %%srcfn m4, [srcq+mmsize*4]
+ %%srcfn m5, [srcq+mmsize*5]
+ %%srcfn m6, [srcq+mmsize*6]
+ %%srcfn m7, [srcq+mmsize*7]
+%endif
+ lea srcq, [srcq+sstrideq*%6]
+%ifidn %1, avg
+ %%pavg m0, [dstq]
+ %%pavg m1, [dstq+d%3]
+ %%pavg m2, [dstq+d%4]
+%if %2 == 4
+ %%srcfn m4, [dstq+d%5]
+ %%pavg m3, m4
+%else
+ %%pavg m3, [dstq+d%5]
+%endif
+%if %2/mmsize == 8
+ %%pavg m4, [dstq+mmsize*4]
+ %%pavg m5, [dstq+mmsize*5]
+ %%pavg m6, [dstq+mmsize*6]
+ %%pavg m7, [dstq+mmsize*7]
+%endif
+%endif
+ %%dstfn [dstq], m0
+ %%dstfn [dstq+d%3], m1
+ %%dstfn [dstq+d%4], m2
+ %%dstfn [dstq+d%5], m3
+%if %2/mmsize == 8
+ %%dstfn [dstq+mmsize*4], m4
+ %%dstfn [dstq+mmsize*5], m5
+ %%dstfn [dstq+mmsize*6], m6
+ %%dstfn [dstq+mmsize*7], m7
+%endif
+ lea dstq, [dstq+dstrideq*%6]
+ sub hd, %6
+ jnz .loop
+ RET
+%endmacro
+
+%define d16 16
+%define s16 16
+%define d32 32
+%define s32 32
+INIT_MMX mmx
+fpel_fn put, 4, strideq, strideq*2, stride3q, 4
+fpel_fn put, 8, strideq, strideq*2, stride3q, 4
+INIT_MMX mmxext
+fpel_fn avg, 4, strideq, strideq*2, stride3q, 4, 8
+fpel_fn avg, 8, strideq, strideq*2, stride3q, 4, 8
+INIT_XMM sse
+fpel_fn put, 16, strideq, strideq*2, stride3q, 4
+fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
+fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
+fpel_fn put, 128, mmsize, mmsize*2, mmsize*3, 1, 0, 8
+INIT_XMM sse2
+fpel_fn avg, 16, strideq, strideq*2, stride3q, 4, 8
+fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2, 8
+fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1, 8
+INIT_YMM avx
+fpel_fn put, 32, strideq, strideq*2, stride3q, 4
+fpel_fn put, 64, mmsize, strideq, strideq+mmsize, 2
+fpel_fn put, 128, mmsize, mmsize*2, mmsize*3, 1
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+fpel_fn avg, 32, strideq, strideq*2, stride3q, 4, 8
+fpel_fn avg, 64, mmsize, strideq, strideq+mmsize, 2, 8
+%endif
+INIT_MMX mmxext
+fpel_fn avg, 8, strideq, strideq*2, stride3q, 4, 16
+INIT_XMM sse2
+fpel_fn avg, 16, strideq, strideq*2, stride3q, 4, 16
+fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2, 16
+fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1, 16
+fpel_fn avg, 128, mmsize, mmsize*2, mmsize*3, 1, 16, 8
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+fpel_fn avg, 32, strideq, strideq*2, stride3q, 4, 16
+fpel_fn avg, 64, mmsize, strideq, strideq+mmsize, 2, 16
+fpel_fn avg, 128, mmsize, mmsize*2, mmsize*3, 1, 16
+%endif
+%undef s16
+%undef d16
+%undef s32
+%undef d32
diff --git a/media/ffvpx/libavcodec/x86/vp9mc_16bpp.asm b/media/ffvpx/libavcodec/x86/vp9mc_16bpp.asm
new file mode 100644
index 0000000000..9a462eaf80
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vp9mc_16bpp.asm
@@ -0,0 +1,431 @@
+;******************************************************************************
+;* VP9 MC SIMD optimizations
+;*
+;* Copyright (c) 2015 Ronald S. Bultje <rsbultje gmail com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA 32
+
+pd_64: times 8 dd 64
+
+cextern pw_1023
+cextern pw_4095
+
+SECTION .text
+
+%macro filter_h4_fn 1-2 12
+cglobal vp9_%1_8tap_1d_h_4_10, 6, 6, %2, dst, dstride, src, sstride, h, filtery
+ mova m5, [pw_1023]
+.body:
+%if notcpuflag(sse4) && ARCH_X86_64
+ pxor m11, m11
+%endif
+ mova m6, [pd_64]
+ mova m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+ mova m8, [filteryq+32]
+ mova m9, [filteryq+64]
+ mova m10, [filteryq+96]
+%endif
+.loop:
+ movh m0, [srcq-6]
+ movh m1, [srcq-4]
+ movh m2, [srcq-2]
+ movh m3, [srcq+0]
+ movh m4, [srcq+2]
+ punpcklwd m0, m1
+ punpcklwd m2, m3
+ pmaddwd m0, m7
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m2, m8
+%else
+ pmaddwd m2, [filteryq+32]
+%endif
+ movu m1, [srcq+4]
+ movu m3, [srcq+6]
+ paddd m0, m2
+ movu m2, [srcq+8]
+ add srcq, sstrideq
+ punpcklwd m4, m1
+ punpcklwd m3, m2
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m4, m9
+ pmaddwd m3, m10
+%else
+ pmaddwd m4, [filteryq+64]
+ pmaddwd m3, [filteryq+96]
+%endif
+ paddd m0, m4
+ paddd m0, m3
+ paddd m0, m6
+ psrad m0, 7
+%if cpuflag(sse4)
+ packusdw m0, m0
+%else
+ packssdw m0, m0
+%endif
+%ifidn %1, avg
+ movh m1, [dstq]
+%endif
+ pminsw m0, m5
+%if notcpuflag(sse4)
+%if ARCH_X86_64
+ pmaxsw m0, m11
+%else
+ pxor m2, m2
+ pmaxsw m0, m2
+%endif
+%endif
+%ifidn %1, avg
+ pavgw m0, m1
+%endif
+ movh [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+
+cglobal vp9_%1_8tap_1d_h_4_12, 6, 6, %2, dst, dstride, src, sstride, h, filtery
+ mova m5, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_%1_8tap_1d_h_4_10 %+ SUFFIX).body
+%endmacro
+
+INIT_XMM sse2
+filter_h4_fn put
+filter_h4_fn avg
+
+%macro filter_h_fn 1-2 12
+%assign %%px mmsize/2
+cglobal vp9_%1_8tap_1d_h_ %+ %%px %+ _10, 6, 6, %2, dst, dstride, src, sstride, h, filtery
+ mova m5, [pw_1023]
+.body:
+%if notcpuflag(sse4) && ARCH_X86_64
+ pxor m11, m11
+%endif
+ mova m6, [pd_64]
+ mova m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+ mova m8, [filteryq+32]
+ mova m9, [filteryq+64]
+ mova m10, [filteryq+96]
+%endif
+.loop:
+ movu m0, [srcq-6]
+ movu m1, [srcq-4]
+ movu m2, [srcq-2]
+ movu m3, [srcq+0]
+ movu m4, [srcq+2]
+ pmaddwd m0, m7
+ pmaddwd m1, m7
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m2, m8
+ pmaddwd m3, m8
+ pmaddwd m4, m9
+%else
+ pmaddwd m2, [filteryq+32]
+ pmaddwd m3, [filteryq+32]
+ pmaddwd m4, [filteryq+64]
+%endif
+ paddd m0, m2
+ paddd m1, m3
+ paddd m0, m4
+ movu m2, [srcq+4]
+ movu m3, [srcq+6]
+ movu m4, [srcq+8]
+ add srcq, sstrideq
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m2, m9
+ pmaddwd m3, m10
+ pmaddwd m4, m10
+%else
+ pmaddwd m2, [filteryq+64]
+ pmaddwd m3, [filteryq+96]
+ pmaddwd m4, [filteryq+96]
+%endif
+ paddd m1, m2
+ paddd m0, m3
+ paddd m1, m4
+ paddd m0, m6
+ paddd m1, m6
+ psrad m0, 7
+ psrad m1, 7
+%if cpuflag(sse4)
+ packusdw m0, m0
+ packusdw m1, m1
+%else
+ packssdw m0, m0
+ packssdw m1, m1
+%endif
+ punpcklwd m0, m1
+ pminsw m0, m5
+%if notcpuflag(sse4)
+%if ARCH_X86_64
+ pmaxsw m0, m11
+%else
+ pxor m2, m2
+ pmaxsw m0, m2
+%endif
+%endif
+%ifidn %1, avg
+ pavgw m0, [dstq]
+%endif
+ mova [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+
+cglobal vp9_%1_8tap_1d_h_ %+ %%px %+ _12, 6, 6, %2, dst, dstride, src, sstride, h, filtery
+ mova m5, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_%1_8tap_1d_h_ %+ %%px %+ _10 %+ SUFFIX).body
+%endmacro
+
+INIT_XMM sse2
+filter_h_fn put
+filter_h_fn avg
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+filter_h_fn put
+filter_h_fn avg
+%endif
+
+%macro filter_v4_fn 1-2 12
+%if ARCH_X86_64
+cglobal vp9_%1_8tap_1d_v_4_10, 6, 8, %2, dst, dstride, src, sstride, h, filtery, src4, sstride3
+%else
+cglobal vp9_%1_8tap_1d_v_4_10, 4, 7, %2, dst, dstride, src, sstride, filtery, src4, sstride3
+ mov filteryq, r5mp
+%define hd r4mp
+%endif
+ mova m5, [pw_1023]
+.body:
+%if notcpuflag(sse4) && ARCH_X86_64
+ pxor m11, m11
+%endif
+ mova m6, [pd_64]
+ lea sstride3q, [sstrideq*3]
+ lea src4q, [srcq+sstrideq]
+ sub srcq, sstride3q
+ mova m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+ mova m8, [filteryq+ 32]
+ mova m9, [filteryq+ 64]
+ mova m10, [filteryq+ 96]
+%endif
+.loop:
+ ; FIXME maybe reuse loads from previous rows, or just
+ ; more generally unroll this to prevent multiple loads of
+ ; the same data?
+ movh m0, [srcq]
+ movh m1, [srcq+sstrideq]
+ movh m2, [srcq+sstrideq*2]
+ movh m3, [srcq+sstride3q]
+ add srcq, sstrideq
+ movh m4, [src4q]
+ punpcklwd m0, m1
+ punpcklwd m2, m3
+ pmaddwd m0, m7
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m2, m8
+%else
+ pmaddwd m2, [filteryq+ 32]
+%endif
+ movh m1, [src4q+sstrideq]
+ movh m3, [src4q+sstrideq*2]
+ paddd m0, m2
+ movh m2, [src4q+sstride3q]
+ add src4q, sstrideq
+ punpcklwd m4, m1
+ punpcklwd m3, m2
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m4, m9
+ pmaddwd m3, m10
+%else
+ pmaddwd m4, [filteryq+ 64]
+ pmaddwd m3, [filteryq+ 96]
+%endif
+ paddd m0, m4
+ paddd m0, m3
+ paddd m0, m6
+ psrad m0, 7
+%if cpuflag(sse4)
+ packusdw m0, m0
+%else
+ packssdw m0, m0
+%endif
+%ifidn %1, avg
+ movh m1, [dstq]
+%endif
+ pminsw m0, m5
+%if notcpuflag(sse4)
+%if ARCH_X86_64
+ pmaxsw m0, m11
+%else
+ pxor m2, m2
+ pmaxsw m0, m2
+%endif
+%endif
+%ifidn %1, avg
+ pavgw m0, m1
+%endif
+ movh [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+
+%if ARCH_X86_64
+cglobal vp9_%1_8tap_1d_v_4_12, 6, 8, %2, dst, dstride, src, sstride, h, filtery, src4, sstride3
+%else
+cglobal vp9_%1_8tap_1d_v_4_12, 4, 7, %2, dst, dstride, src, sstride, filtery, src4, sstride3
+ mov filteryq, r5mp
+%endif
+ mova m5, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_%1_8tap_1d_v_4_10 %+ SUFFIX).body
+%endmacro
+
+INIT_XMM sse2
+filter_v4_fn put
+filter_v4_fn avg
+
+%macro filter_v_fn 1-2 13
+%assign %%px mmsize/2
+%if ARCH_X86_64
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _10, 6, 8, %2, dst, dstride, src, sstride, h, filtery, src4, sstride3
+%else
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _10, 4, 7, %2, dst, dstride, src, sstride, filtery, src4, sstride3
+ mov filteryq, r5mp
+%define hd r4mp
+%endif
+ mova m5, [pw_1023]
+.body:
+%if notcpuflag(sse4) && ARCH_X86_64
+ pxor m12, m12
+%endif
+%if ARCH_X86_64
+ mova m11, [pd_64]
+%endif
+ lea sstride3q, [sstrideq*3]
+ lea src4q, [srcq+sstrideq]
+ sub srcq, sstride3q
+ mova m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+ mova m8, [filteryq+ 32]
+ mova m9, [filteryq+ 64]
+ mova m10, [filteryq+ 96]
+%endif
+.loop:
+ ; FIXME maybe reuse loads from previous rows, or just
+ ; more generally unroll this to prevent multiple loads of
+ ; the same data?
+ movu m0, [srcq]
+ movu m1, [srcq+sstrideq]
+ movu m2, [srcq+sstrideq*2]
+ movu m3, [srcq+sstride3q]
+ add srcq, sstrideq
+ movu m4, [src4q]
+ SBUTTERFLY wd, 0, 1, 6
+ SBUTTERFLY wd, 2, 3, 6
+ pmaddwd m0, m7
+ pmaddwd m1, m7
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m2, m8
+ pmaddwd m3, m8
+%else
+ pmaddwd m2, [filteryq+ 32]
+ pmaddwd m3, [filteryq+ 32]
+%endif
+ paddd m0, m2
+ paddd m1, m3
+ movu m2, [src4q+sstrideq]
+ movu m3, [src4q+sstrideq*2]
+ SBUTTERFLY wd, 4, 2, 6
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m4, m9
+ pmaddwd m2, m9
+%else
+ pmaddwd m4, [filteryq+ 64]
+ pmaddwd m2, [filteryq+ 64]
+%endif
+ paddd m0, m4
+ paddd m1, m2
+ movu m4, [src4q+sstride3q]
+ add src4q, sstrideq
+ SBUTTERFLY wd, 3, 4, 6
+%if ARCH_X86_64 && mmsize > 8
+ pmaddwd m3, m10
+ pmaddwd m4, m10
+%else
+ pmaddwd m3, [filteryq+ 96]
+ pmaddwd m4, [filteryq+ 96]
+%endif
+ paddd m0, m3
+ paddd m1, m4
+%if ARCH_X86_64
+ paddd m0, m11
+ paddd m1, m11
+%else
+ paddd m0, [pd_64]
+ paddd m1, [pd_64]
+%endif
+ psrad m0, 7
+ psrad m1, 7
+%if cpuflag(sse4)
+ packusdw m0, m1
+%else
+ packssdw m0, m1
+%endif
+ pminsw m0, m5
+%if notcpuflag(sse4)
+%if ARCH_X86_64
+ pmaxsw m0, m12
+%else
+ pxor m2, m2
+ pmaxsw m0, m2
+%endif
+%endif
+%ifidn %1, avg
+ pavgw m0, [dstq]
+%endif
+ mova [dstq], m0
+ add dstq, dstrideq
+ dec hd
+ jg .loop
+ RET
+
+%if ARCH_X86_64
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _12, 6, 8, %2, dst, dstride, src, sstride, h, filtery, src4, sstride3
+%else
+cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _12, 4, 7, %2, dst, dstride, src, sstride, filtery, src4, sstride3
+ mov filteryq, r5mp
+%endif
+ mova m5, [pw_4095]
+ jmp mangle(private_prefix %+ _ %+ vp9_%1_8tap_1d_v_ %+ %%px %+ _10 %+ SUFFIX).body
+%endmacro
+
+INIT_XMM sse2
+filter_v_fn put
+filter_v_fn avg
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+filter_v_fn put
+filter_v_fn avg
+%endif
diff --git a/media/ffvpx/libavcodec/x86/vpx_arith.h b/media/ffvpx/libavcodec/x86/vpx_arith.h
new file mode 100644
index 0000000000..d9e4c0dec4
--- /dev/null
+++ b/media/ffvpx/libavcodec/x86/vpx_arith.h
@@ -0,0 +1,55 @@
+/**
+ * VP5 and VP6 compatible video decoder (arith decoder)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ * Copyright (C) 2010 Eli Friedman
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_VPX_ARITH_H
+#define AVCODEC_X86_VPX_ARITH_H
+
+#include "libavutil/x86/asm.h"
+
+#if HAVE_INLINE_ASM && HAVE_FAST_CMOV && HAVE_6REGS
+#include "libavutil/attributes.h"
+
+#define vpx_rac_get_prob vpx_rac_get_prob
+static av_always_inline int vpx_rac_get_prob(VPXRangeCoder *c, uint8_t prob)
+{
+ unsigned int code_word = vpx_rac_renorm(c);
+ unsigned int low = 1 + (((c->high - 1) * prob) >> 8);
+ unsigned int low_shift = low << 16;
+ int bit = 0;
+ c->code_word = code_word;
+
+ __asm__(
+ "subl %4, %1 \n\t"
+ "subl %3, %2 \n\t"
+ "setae %b0 \n\t"
+ "cmovb %4, %1 \n\t"
+ "cmovb %5, %2 \n\t"
+ : "+q"(bit), "+&r"(c->high), "+&r"(c->code_word)
+ : "r"(low_shift), "r"(low), "r"(code_word)
+ );
+
+ return bit;
+}
+#endif
+
+#endif /* AVCODEC_X86_VPX_ARITH_H */
diff --git a/media/ffvpx/libavcodec/xiph.c b/media/ffvpx/libavcodec/xiph.c
new file mode 100644
index 0000000000..218b0813e9
--- /dev/null
+++ b/media/ffvpx/libavcodec/xiph.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2007 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <limits.h>
+#include "libavutil/error.h"
+#include "libavutil/intreadwrite.h"
+#include "xiph.h"
+
+int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size,
+ int first_header_size, const uint8_t *header_start[3],
+ int header_len[3])
+{
+ int i;
+
+ if (extradata_size >= 6 && AV_RB16(extradata) == first_header_size) {
+ int overall_len = 6;
+ for (i=0; i<3; i++) {
+ header_len[i] = AV_RB16(extradata);
+ extradata += 2;
+ header_start[i] = extradata;
+ extradata += header_len[i];
+ if (overall_len > extradata_size - header_len[i])
+ return AVERROR_INVALIDDATA;
+ overall_len += header_len[i];
+ }
+ } else if (extradata_size >= 3 && extradata_size < INT_MAX - 0x1ff && extradata[0] == 2) {
+ int overall_len = 3;
+ extradata++;
+ for (i=0; i<2; i++, extradata++) {
+ header_len[i] = 0;
+ for (; overall_len < extradata_size && *extradata==0xff; extradata++) {
+ header_len[i] += 0xff;
+ overall_len += 0xff + 1;
+ }
+ header_len[i] += *extradata;
+ overall_len += *extradata;
+ if (overall_len > extradata_size)
+ return AVERROR_INVALIDDATA;
+ }
+ header_len[2] = extradata_size - overall_len;
+ header_start[0] = extradata;
+ header_start[1] = header_start[0] + header_len[0];
+ header_start[2] = header_start[1] + header_len[1];
+ } else {
+ return -1;
+ }
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/xiph.h b/media/ffvpx/libavcodec/xiph.h
new file mode 100644
index 0000000000..4ab2469528
--- /dev/null
+++ b/media/ffvpx/libavcodec/xiph.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2007 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_XIPH_H
+#define AVCODEC_XIPH_H
+
+#include <stdint.h>
+
+/**
+ * Split a single extradata buffer into the three headers that most
+ * Xiph codecs use. (e.g. Theora and Vorbis)
+ * Works both with Matroska's packing and lavc's packing.
+ *
+ * @param[in] extradata The single chunk that combines all three headers
+ * @param[in] extradata_size The size of the extradata buffer
+ * @param[in] first_header_size The size of the first header, used to
+ * differentiate between the Matroska packing and lavc packing.
+ * @param[out] header_start Pointers to the start of the three separate headers.
+ * @param[out] header_len The sizes of each of the three headers.
+ * @return On error a negative value is returned, on success zero.
+ */
+int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size,
+ int first_header_size, const uint8_t *header_start[3],
+ int header_len[3]);
+
+#endif /* AVCODEC_XIPH_H */
diff --git a/media/ffvpx/libavcodec/xvididct.h b/media/ffvpx/libavcodec/xvididct.h
new file mode 100644
index 0000000000..e0bc1a2b91
--- /dev/null
+++ b/media/ffvpx/libavcodec/xvididct.h
@@ -0,0 +1,36 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_XVIDIDCT_H
+#define AVCODEC_XVIDIDCT_H
+
+#include <stdint.h>
+
+#include "avcodec.h"
+#include "idctdsp.h"
+
+void ff_xvid_idct(int16_t *const in);
+
+void ff_xvid_idct_init(IDCTDSPContext *c, AVCodecContext *avctx);
+
+void ff_xvid_idct_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+void ff_xvid_idct_init_mips(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
+
+#endif /* AVCODEC_XVIDIDCT_H */
diff --git a/media/ffvpx/libavutil/aarch64/asm.S b/media/ffvpx/libavutil/aarch64/asm.S
new file mode 100644
index 0000000000..a7782415d7
--- /dev/null
+++ b/media/ffvpx/libavutil/aarch64/asm.S
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#ifdef __ELF__
+# define ELF
+#else
+# define ELF #
+#endif
+
+#if HAVE_AS_FUNC
+# define FUNC
+#else
+# define FUNC #
+#endif
+
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+
+/* Support macros for
+ * - Armv8.3-A Pointer Authentication and
+ * - Armv8.5-A Branch Target Identification
+ * features which require emitting a .note.gnu.property section with the
+ * appropriate architecture-dependent feature bits set.
+ *
+ * |AARCH64_SIGN_LINK_REGISTER| and |AARCH64_VALIDATE_LINK_REGISTER| expand to
+ * PACIxSP and AUTIxSP, respectively. |AARCH64_SIGN_LINK_REGISTER| should be
+ * used immediately before saving the LR register (x30) to the stack.
+ * |AARCH64_VALIDATE_LINK_REGISTER| should be used immediately after restoring
+ * it. Note |AARCH64_SIGN_LINK_REGISTER|'s modifications to LR must be undone
+ * with |AARCH64_VALIDATE_LINK_REGISTER| before RET. The SP register must also
+ * have the same value at the two points. For example:
+ *
+ * .global f
+ * f:
+ * AARCH64_SIGN_LINK_REGISTER
+ * stp x29, x30, [sp, #-96]!
+ * mov x29, sp
+ * ...
+ * ldp x29, x30, [sp], #96
+ * AARCH64_VALIDATE_LINK_REGISTER
+ * ret
+ *
+ * |AARCH64_VALID_CALL_TARGET| expands to BTI 'c'. Either it, or
+ * |AARCH64_SIGN_LINK_REGISTER|, must be used at every point that may be an
+ * indirect call target. In particular, all symbols exported from a file must
+ * begin with one of these macros. For example, a leaf function that does not
+ * save LR can instead use |AARCH64_VALID_CALL_TARGET|:
+ *
+ * .globl return_zero
+ * return_zero:
+ * AARCH64_VALID_CALL_TARGET
+ * mov x0, #0
+ * ret
+ *
+ * A non-leaf function which does not immediately save LR may need both macros
+ * because |AARCH64_SIGN_LINK_REGISTER| appears late. For example, the function
+ * may jump to an alternate implementation before setting up the stack:
+ *
+ * .globl with_early_jump
+ * with_early_jump:
+ * AARCH64_VALID_CALL_TARGET
+ * cmp x0, #128
+ * b.lt .Lwith_early_jump_128
+ * AARCH64_SIGN_LINK_REGISTER
+ * stp x29, x30, [sp, #-96]!
+ * mov x29, sp
+ * ...
+ * ldp x29, x30, [sp], #96
+ * AARCH64_VALIDATE_LINK_REGISTER
+ * ret
+ *
+ * .Lwith_early_jump_128:
+ * ...
+ * ret
+ *
+ * These annotations are only required with indirect calls. Private symbols that
+ * are only the target of direct calls do not require annotations. Also note
+ * that |AARCH64_VALID_CALL_TARGET| is only valid for indirect calls (BLR), not
+ * indirect jumps (BR). Indirect jumps in assembly are supported through
+ * |AARCH64_VALID_JUMP_TARGET|. Landing Pads which shall serve for jumps and
+ * calls can be created using |AARCH64_VALID_JUMP_CALL_TARGET|.
+ *
+ * Although not necessary, it is safe to use these macros in 32-bit ARM
+ * assembly. This may be used to simplify dual 32-bit and 64-bit files.
+ *
+ * References:
+ * - "ELF for the Arm® 64-bit Architecture"
+ * https: *github.com/ARM-software/abi-aa/blob/master/aaelf64/aaelf64.rst
+ * - "Providing protection for complex software"
+ * https://developer.arm.com/architectures/learn-the-architecture/providing-protection-for-complex-software
+ */
+#if defined(__ARM_FEATURE_BTI_DEFAULT) && (__ARM_FEATURE_BTI_DEFAULT == 1)
+# define GNU_PROPERTY_AARCH64_BTI (1 << 0) // Has BTI
+# define AARCH64_VALID_CALL_TARGET hint #34 // BTI 'c'
+# define AARCH64_VALID_JUMP_TARGET hint #38 // BTI 'j'
+#else
+# define GNU_PROPERTY_AARCH64_BTI 0 // No BTI
+# define AARCH64_VALID_CALL_TARGET
+# define AARCH64_VALID_JUMP_TARGET
+#endif
+
+#if defined(__ARM_FEATURE_PAC_DEFAULT)
+# if ((__ARM_FEATURE_PAC_DEFAULT & (1 << 0)) != 0) // authentication using key A
+# define AARCH64_SIGN_LINK_REGISTER paciasp
+# define AARCH64_VALIDATE_LINK_REGISTER autiasp
+# elif ((__ARM_FEATURE_PAC_DEFAULT & (1 << 1)) != 0) // authentication using key B
+# define AARCH64_SIGN_LINK_REGISTER pacibsp
+# define AARCH64_VALIDATE_LINK_REGISTER autibsp
+# else
+# error Pointer authentication defines no valid key!
+# endif
+# if ((__ARM_FEATURE_PAC_DEFAULT & (1 << 2)) != 0)
+# error Authentication of leaf functions is enabled but not supported in FFmpeg!
+# endif
+# define GNU_PROPERTY_AARCH64_PAC (1 << 1)
+#else
+# define GNU_PROPERTY_AARCH64_PAC 0
+# define AARCH64_SIGN_LINK_REGISTER
+# define AARCH64_VALIDATE_LINK_REGISTER
+#endif
+
+
+#if (GNU_PROPERTY_AARCH64_BTI != 0 || GNU_PROPERTY_AARCH64_PAC != 0) && defined(__ELF__)
+ .pushsection .note.gnu.property, "a"
+ .balign 8
+ .long 4
+ .long 0x10
+ .long 0x5
+ .asciz "GNU"
+ .long 0xc0000000 /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
+ .long 4
+ .long (GNU_PROPERTY_AARCH64_BTI | GNU_PROPERTY_AARCH64_PAC)
+ .long 0
+ .popsection
+#endif
+
+.macro function name, export=0, align=2
+ .macro endfunc
+ELF .size \name, . - \name
+FUNC .endfunc
+ .purgem endfunc
+ .endm
+ .text
+ .align \align
+ .if \export
+ .global EXTERN_ASM\name
+ELF .type EXTERN_ASM\name, %function
+FUNC .func EXTERN_ASM\name
+EXTERN_ASM\name:
+ AARCH64_VALID_CALL_TARGET
+ .else
+ELF .type \name, %function
+FUNC .func \name
+\name:
+ .endif
+.endm
+
+.macro const name, align=2, relocate=0
+ .macro endconst
+ELF .size \name, . - \name
+ .purgem endconst
+ .endm
+#if HAVE_SECTION_DATA_REL_RO
+.if \relocate
+ .section .data.rel.ro
+.else
+ .section .rodata
+.endif
+#elif defined(_WIN32)
+ .section .rdata
+#elif !defined(__MACH__)
+ .section .rodata
+#else
+ .const_data
+#endif
+ .align \align
+\name:
+.endm
+
+.macro movrel rd, val, offset=0
+#if CONFIG_PIC && defined(__APPLE__)
+ .if \offset < 0
+ adrp \rd, \val@PAGE
+ add \rd, \rd, \val@PAGEOFF
+ sub \rd, \rd, -(\offset)
+ .else
+ adrp \rd, \val+(\offset)@PAGE
+ add \rd, \rd, \val+(\offset)@PAGEOFF
+ .endif
+#elif CONFIG_PIC && defined(_WIN32)
+ .if \offset < 0
+ adrp \rd, \val
+ add \rd, \rd, :lo12:\val
+ sub \rd, \rd, -(\offset)
+ .else
+ adrp \rd, \val+(\offset)
+ add \rd, \rd, :lo12:\val+(\offset)
+ .endif
+#elif CONFIG_PIC
+# if __has_feature(hwaddress_sanitizer)
+ adrp \rd, :pg_hi21_nc:\val+(\offset)
+# else
+ adrp \rd, \val+(\offset)
+# endif
+ add \rd, \rd, :lo12:\val+(\offset)
+#else
+ ldr \rd, =\val+\offset
+#endif
+.endm
+
+#define GLUE(a, b) a ## b
+#define JOIN(a, b) GLUE(a, b)
+#define X(s) JOIN(EXTERN_ASM, s)
+
+#define x18 do_not_use_x18
+#define w18 do_not_use_w18
diff --git a/media/ffvpx/libavutil/aarch64/bswap.h b/media/ffvpx/libavutil/aarch64/bswap.h
new file mode 100644
index 0000000000..7abca657ba
--- /dev/null
+++ b/media/ffvpx/libavutil/aarch64/bswap.h
@@ -0,0 +1,56 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AARCH64_BSWAP_H
+#define AVUTIL_AARCH64_BSWAP_H
+
+#include <stdint.h>
+#include "config.h"
+#include "libavutil/attributes.h"
+
+#if HAVE_INLINE_ASM
+
+#define av_bswap16 av_bswap16
+static av_always_inline av_const unsigned av_bswap16(unsigned x)
+{
+ unsigned y;
+
+ __asm__("rev16 %w0, %w1" : "=r"(y) : "r"(x));
+ return y;
+}
+
+#define av_bswap32 av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ uint32_t y;
+
+ __asm__("rev %w0, %w1" : "=r"(y) : "r"(x));
+ return y;
+}
+
+#define av_bswap64 av_bswap64
+static av_always_inline av_const uint64_t av_bswap64(uint64_t x)
+{
+ uint64_t y;
+
+ __asm__("rev %0, %1" : "=r"(y) : "r"(x));
+ return y;
+}
+
+#endif /* HAVE_INLINE_ASM */
+#endif /* AVUTIL_AARCH64_BSWAP_H */
diff --git a/media/ffvpx/libavutil/aarch64/cpu.c b/media/ffvpx/libavutil/aarch64/cpu.c
new file mode 100644
index 0000000000..cc641da576
--- /dev/null
+++ b/media/ffvpx/libavutil/aarch64/cpu.c
@@ -0,0 +1,38 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/cpu.h"
+#include "libavutil/cpu_internal.h"
+#include "config.h"
+
+int ff_get_cpu_flags_aarch64(void)
+{
+ return AV_CPU_FLAG_ARMV8 * HAVE_ARMV8 |
+ AV_CPU_FLAG_NEON * HAVE_NEON |
+ AV_CPU_FLAG_VFP * HAVE_VFP;
+}
+
+size_t ff_get_cpu_max_align_aarch64(void)
+{
+ int flags = av_get_cpu_flags();
+
+ if (flags & AV_CPU_FLAG_NEON)
+ return 16;
+
+ return 8;
+}
diff --git a/media/ffvpx/libavutil/aarch64/cpu.h b/media/ffvpx/libavutil/aarch64/cpu.h
new file mode 100644
index 0000000000..2ee3f9323a
--- /dev/null
+++ b/media/ffvpx/libavutil/aarch64/cpu.h
@@ -0,0 +1,29 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AARCH64_CPU_H
+#define AVUTIL_AARCH64_CPU_H
+
+#include "libavutil/cpu.h"
+#include "libavutil/cpu_internal.h"
+
+#define have_armv8(flags) CPUEXT(flags, ARMV8)
+#define have_neon(flags) CPUEXT(flags, NEON)
+#define have_vfp(flags) CPUEXT(flags, VFP)
+
+#endif /* AVUTIL_AARCH64_CPU_H */
diff --git a/media/ffvpx/libavutil/aarch64/float_dsp_init.c b/media/ffvpx/libavutil/aarch64/float_dsp_init.c
new file mode 100644
index 0000000000..4325071821
--- /dev/null
+++ b/media/ffvpx/libavutil/aarch64/float_dsp_init.c
@@ -0,0 +1,69 @@
+/*
+ * ARM NEON optimised Float DSP functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/float_dsp.h"
+#include "cpu.h"
+
+void ff_vector_fmul_neon(float *dst, const float *src0, const float *src1,
+ int len);
+
+void ff_vector_fmac_scalar_neon(float *dst, const float *src, float mul,
+ int len);
+
+void ff_vector_fmul_scalar_neon(float *dst, const float *src, float mul,
+ int len);
+
+void ff_vector_dmul_scalar_neon(double *dst, const double *src, double mul,
+ int len);
+
+void ff_vector_fmul_window_neon(float *dst, const float *src0,
+ const float *src1, const float *win, int len);
+
+void ff_vector_fmul_add_neon(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
+
+void ff_vector_fmul_reverse_neon(float *dst, const float *src0,
+ const float *src1, int len);
+
+void ff_butterflies_float_neon(float *v1, float *v2, int len);
+
+float ff_scalarproduct_float_neon(const float *v1, const float *v2, int len);
+
+av_cold void ff_float_dsp_init_aarch64(AVFloatDSPContext *fdsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+ fdsp->butterflies_float = ff_butterflies_float_neon;
+ fdsp->scalarproduct_float = ff_scalarproduct_float_neon;
+ fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_neon;
+ fdsp->vector_fmul = ff_vector_fmul_neon;
+ fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_neon;
+ fdsp->vector_fmul_add = ff_vector_fmul_add_neon;
+ fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_neon;
+ fdsp->vector_fmul_scalar = ff_vector_fmul_scalar_neon;
+ fdsp->vector_fmul_window = ff_vector_fmul_window_neon;
+ }
+}
diff --git a/media/ffvpx/libavutil/aarch64/float_dsp_neon.S b/media/ffvpx/libavutil/aarch64/float_dsp_neon.S
new file mode 100644
index 0000000000..02d790c0cc
--- /dev/null
+++ b/media/ffvpx/libavutil/aarch64/float_dsp_neon.S
@@ -0,0 +1,202 @@
+/*
+ * ARM NEON optimised Float DSP functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "asm.S"
+
+function ff_vector_fmul_neon, export=1
+1: subs w3, w3, #16
+ ld1 {v0.4S, v1.4S}, [x1], #32
+ ld1 {v2.4S, v3.4S}, [x1], #32
+ ld1 {v4.4S, v5.4S}, [x2], #32
+ ld1 {v6.4S, v7.4S}, [x2], #32
+ fmul v16.4S, v0.4S, v4.4S
+ fmul v17.4S, v1.4S, v5.4S
+ fmul v18.4S, v2.4S, v6.4S
+ fmul v19.4S, v3.4S, v7.4S
+ st1 {v16.4S, v17.4S}, [x0], #32
+ st1 {v18.4S, v19.4S}, [x0], #32
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vector_fmac_scalar_neon, export=1
+ mov x3, #-32
+1: subs w2, w2, #16
+ ld1 {v16.4S, v17.4S}, [x0], #32
+ ld1 {v18.4S, v19.4S}, [x0], x3
+ ld1 {v4.4S, v5.4S}, [x1], #32
+ ld1 {v6.4S, v7.4S}, [x1], #32
+ fmla v16.4S, v4.4S, v0.S[0]
+ fmla v17.4S, v5.4S, v0.S[0]
+ fmla v18.4S, v6.4S, v0.S[0]
+ fmla v19.4S, v7.4S, v0.S[0]
+ st1 {v16.4S, v17.4S}, [x0], #32
+ st1 {v18.4S, v19.4S}, [x0], #32
+ b.ne 1b
+ ret
+endfunc
+
+function ff_vector_fmul_scalar_neon, export=1
+ mov w4, #15
+ bics w3, w2, w4
+ dup v16.4S, v0.S[0]
+ b.eq 3f
+ ld1 {v0.4S, v1.4S}, [x1], #32
+1: subs w3, w3, #16
+ fmul v0.4S, v0.4S, v16.4S
+ ld1 {v2.4S, v3.4S}, [x1], #32
+ fmul v1.4S, v1.4S, v16.4S
+ fmul v2.4S, v2.4S, v16.4S
+ st1 {v0.4S, v1.4S}, [x0], #32
+ fmul v3.4S, v3.4S, v16.4S
+ b.eq 2f
+ ld1 {v0.4S, v1.4S}, [x1], #32
+ st1 {v2.4S, v3.4S}, [x0], #32
+ b 1b
+2: ands w2, w2, #15
+ st1 {v2.4S, v3.4S}, [x0], #32
+ b.eq 4f
+3: ld1 {v0.4S}, [x1], #16
+ fmul v0.4S, v0.4S, v16.4S
+ st1 {v0.4S}, [x0], #16
+ subs w2, w2, #4
+ b.gt 3b
+4: ret
+endfunc
+
+function ff_vector_dmul_scalar_neon, export=1
+ dup v16.2D, v0.D[0]
+ ld1 {v0.2D, v1.2D}, [x1], #32
+1: subs w2, w2, #8
+ fmul v0.2D, v0.2D, v16.2D
+ ld1 {v2.2D, v3.2D}, [x1], #32
+ fmul v1.2D, v1.2D, v16.2D
+ fmul v2.2D, v2.2D, v16.2D
+ st1 {v0.2D, v1.2D}, [x0], #32
+ fmul v3.2D, v3.2D, v16.2D
+ ld1 {v0.2D, v1.2D}, [x1], #32
+ st1 {v2.2D, v3.2D}, [x0], #32
+ b.gt 1b
+ ret
+endfunc
+
+function ff_vector_fmul_window_neon, export=1
+ sxtw x4, w4 // len
+ sub x2, x2, #8
+ sub x5, x4, #2
+ add x2, x2, x5, lsl #2 // src1 + 4 * (len - 4)
+ add x6, x3, x5, lsl #3 // win + 8 * (len - 2)
+ add x5, x0, x5, lsl #3 // dst + 8 * (len - 2)
+ mov x7, #-16
+ ld1 {v0.4S}, [x1], #16 // s0
+ ld1 {v2.4S}, [x3], #16 // wi
+ ld1 {v1.4S}, [x2], x7 // s1
+1: ld1 {v3.4S}, [x6], x7 // wj
+ subs x4, x4, #4
+ fmul v17.4S, v0.4S, v2.4S // s0 * wi
+ rev64 v4.4S, v1.4S
+ rev64 v5.4S, v3.4S
+ rev64 v17.4S, v17.4S
+ ext v4.16B, v4.16B, v4.16B, #8 // s1_r
+ ext v5.16B, v5.16B, v5.16B, #8 // wj_r
+ ext v17.16B, v17.16B, v17.16B, #8 // (s0 * wi)_rev
+ fmul v16.4S, v0.4S, v5.4S // s0 * wj_r
+ fmla v17.4S, v1.4S, v3.4S // (s0 * wi)_rev + s1 * wj
+ b.eq 2f
+ ld1 {v0.4S}, [x1], #16
+ fmls v16.4S, v4.4S, v2.4S // s0 * wj_r - s1_r * wi
+ st1 {v17.4S}, [x5], x7
+ ld1 {v2.4S}, [x3], #16
+ ld1 {v1.4S}, [x2], x7
+ st1 {v16.4S}, [x0], #16
+ b 1b
+2:
+ fmls v16.4S, v4.4S, v2.4S // s0 * wj_r - s1_r * wi
+ st1 {v17.4S}, [x5], x7
+ st1 {v16.4S}, [x0], #16
+ ret
+endfunc
+
+function ff_vector_fmul_add_neon, export=1
+ ld1 {v0.4S, v1.4S}, [x1], #32
+ ld1 {v2.4S, v3.4S}, [x2], #32
+ ld1 {v4.4S, v5.4S}, [x3], #32
+1: subs w4, w4, #8
+ fmla v4.4S, v0.4S, v2.4S
+ fmla v5.4S, v1.4S, v3.4S
+ b.eq 2f
+ ld1 {v0.4S, v1.4S}, [x1], #32
+ ld1 {v2.4S, v3.4S}, [x2], #32
+ st1 {v4.4S, v5.4S}, [x0], #32
+ ld1 {v4.4S, v5.4S}, [x3], #32
+ b 1b
+2: st1 {v4.4S, v5.4S}, [x0], #32
+ ret
+endfunc
+
+function ff_vector_fmul_reverse_neon, export=1
+ sxtw x3, w3
+ add x2, x2, x3, lsl #2
+ sub x2, x2, #32
+ mov x4, #-32
+ ld1 {v2.4S, v3.4S}, [x2], x4
+ ld1 {v0.4S, v1.4S}, [x1], #32
+1: subs x3, x3, #8
+ rev64 v3.4S, v3.4S
+ rev64 v2.4S, v2.4S
+ ext v3.16B, v3.16B, v3.16B, #8
+ ext v2.16B, v2.16B, v2.16B, #8
+ fmul v16.4S, v0.4S, v3.4S
+ fmul v17.4S, v1.4S, v2.4S
+ b.eq 2f
+ ld1 {v2.4S, v3.4S}, [x2], x4
+ ld1 {v0.4S, v1.4S}, [x1], #32
+ st1 {v16.4S, v17.4S}, [x0], #32
+ b 1b
+2: st1 {v16.4S, v17.4S}, [x0], #32
+ ret
+endfunc
+
+function ff_butterflies_float_neon, export=1
+1: ld1 {v0.4S}, [x0]
+ ld1 {v1.4S}, [x1]
+ subs w2, w2, #4
+ fsub v2.4S, v0.4S, v1.4S
+ fadd v3.4S, v0.4S, v1.4S
+ st1 {v2.4S}, [x1], #16
+ st1 {v3.4S}, [x0], #16
+ b.gt 1b
+ ret
+endfunc
+
+function ff_scalarproduct_float_neon, export=1
+ movi v2.4S, #0
+1: ld1 {v0.4S}, [x0], #16
+ ld1 {v1.4S}, [x1], #16
+ subs w2, w2, #4
+ fmla v2.4S, v0.4S, v1.4S
+ b.gt 1b
+ faddp v0.4S, v2.4S, v2.4S
+ faddp s0, v0.2S
+ ret
+endfunc
diff --git a/media/ffvpx/libavutil/aarch64/moz.build b/media/ffvpx/libavutil/aarch64/moz.build
new file mode 100644
index 0000000000..9233abf768
--- /dev/null
+++ b/media/ffvpx/libavutil/aarch64/moz.build
@@ -0,0 +1,19 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ 'cpu.c',
+ 'float_dsp_init.c',
+ 'float_dsp_neon.S',
+]
+
+if CONFIG['OS_ARCH'] == 'WINNT':
+ USE_INTEGRATED_CLANGCL_AS = True
+ DEFINES['EXTERN_ASM'] = ''
+
+FINAL_LIBRARY = 'mozavutil'
+
+include('/media/ffvpx/ffvpxcommon.mozbuild')
diff --git a/media/ffvpx/libavutil/aarch64/timer.h b/media/ffvpx/libavutil/aarch64/timer.h
new file mode 100644
index 0000000000..8b28fd354c
--- /dev/null
+++ b/media/ffvpx/libavutil/aarch64/timer.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AARCH64_TIMER_H
+#define AVUTIL_AARCH64_TIMER_H
+
+#include <stdint.h>
+#include "config.h"
+
+#if defined(__APPLE__)
+
+#include <mach/mach_time.h>
+
+#define AV_READ_TIME mach_absolute_time
+
+#elif HAVE_INLINE_ASM
+
+#define AV_READ_TIME read_time
+
+static inline uint64_t read_time(void)
+{
+ uint64_t cycle_counter;
+ __asm__ volatile(
+ "isb \t\n"
+ "mrs %0, pmccntr_el0 "
+ : "=r"(cycle_counter) :: "memory" );
+
+ return cycle_counter;
+}
+
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVUTIL_AARCH64_TIMER_H */
diff --git a/media/ffvpx/libavutil/adler32.c b/media/ffvpx/libavutil/adler32.c
new file mode 100644
index 0000000000..7124f18802
--- /dev/null
+++ b/media/ffvpx/libavutil/adler32.c
@@ -0,0 +1,96 @@
+/*
+ * Compute the Adler-32 checksum of a data stream.
+ * This is a modified version based on adler32.c from the zlib library.
+ *
+ * Copyright (C) 1995 Mark Adler
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/**
+ * @file
+ * Computes the Adler-32 checksum of a data stream
+ *
+ * This is a modified version based on adler32.c from the zlib library.
+ * @author Mark Adler
+ * @ingroup lavu_adler32
+ */
+
+#include "config.h"
+#include "adler32.h"
+#include "intreadwrite.h"
+#include "macros.h"
+
+#define BASE 65521L /* largest prime smaller than 65536 */
+
+#define DO1(buf) { s1 += *buf++; s2 += s1; }
+#define DO4(buf) DO1(buf); DO1(buf); DO1(buf); DO1(buf);
+#define DO16(buf) DO4(buf); DO4(buf); DO4(buf); DO4(buf);
+
+AVAdler av_adler32_update(AVAdler adler, const uint8_t *buf, size_t len)
+{
+ unsigned long s1 = adler & 0xffff;
+ unsigned long s2 = adler >> 16;
+
+ while (len > 0) {
+#if HAVE_FAST_64BIT && HAVE_FAST_UNALIGNED && !CONFIG_SMALL
+ unsigned len2 = FFMIN((len-1) & ~7, 23*8);
+ if (len2) {
+ uint64_t a1= 0;
+ uint64_t a2= 0;
+ uint64_t b1= 0;
+ uint64_t b2= 0;
+ len -= len2;
+ s2 += s1*len2;
+ while (len2 >= 8) {
+ uint64_t v = AV_RN64(buf);
+ a2 += a1;
+ b2 += b1;
+ a1 += v &0x00FF00FF00FF00FF;
+ b1 += (v>>8)&0x00FF00FF00FF00FF;
+ len2 -= 8;
+ buf+=8;
+ }
+
+ //We combine the 8 interleaved adler32 checksums without overflows
+ //Decreasing the number of iterations would allow below code to be
+ //simplified but would likely be slower due to the fewer iterations
+ //of the inner loop
+ s1 += ((a1+b1)*0x1000100010001)>>48;
+ s2 += ((((a2&0xFFFF0000FFFF)+(b2&0xFFFF0000FFFF)+((a2>>16)&0xFFFF0000FFFF)+((b2>>16)&0xFFFF0000FFFF))*0x800000008)>>32)
+#if HAVE_BIGENDIAN
+ + 2*((b1*0x1000200030004)>>48)
+ + ((a1*0x1000100010001)>>48)
+ + 2*((a1*0x0000100020003)>>48);
+#else
+ + 2*((a1*0x4000300020001)>>48)
+ + ((b1*0x1000100010001)>>48)
+ + 2*((b1*0x3000200010000)>>48);
+#endif
+ }
+#else
+ while (len > 4 && s2 < (1U << 31)) {
+ DO4(buf);
+ len -= 4;
+ }
+#endif
+ DO1(buf); len--;
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
diff --git a/media/ffvpx/libavutil/adler32.h b/media/ffvpx/libavutil/adler32.h
new file mode 100644
index 0000000000..232d07f5fe
--- /dev/null
+++ b/media/ffvpx/libavutil/adler32.h
@@ -0,0 +1,63 @@
+/*
+ * copyright (c) 2006 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_adler32
+ * Public header for Adler-32 hash function implementation.
+ */
+
+#ifndef AVUTIL_ADLER32_H
+#define AVUTIL_ADLER32_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include "attributes.h"
+
+/**
+ * @defgroup lavu_adler32 Adler-32
+ * @ingroup lavu_hash
+ * Adler-32 hash function implementation.
+ *
+ * @{
+ */
+
+typedef uint32_t AVAdler;
+
+/**
+ * Calculate the Adler32 checksum of a buffer.
+ *
+ * Passing the return value to a subsequent av_adler32_update() call
+ * allows the checksum of multiple buffers to be calculated as though
+ * they were concatenated.
+ *
+ * @param adler initial checksum value
+ * @param buf pointer to input buffer
+ * @param len size of input buffer
+ * @return updated checksum
+ */
+AVAdler av_adler32_update(AVAdler adler, const uint8_t *buf,
+ size_t len) av_pure;
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ADLER32_H */
diff --git a/media/ffvpx/libavutil/arm/asm.S b/media/ffvpx/libavutil/arm/asm.S
new file mode 100644
index 0000000000..e3a8c7f065
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/asm.S
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#ifdef __ELF__
+# define ELF
+#else
+# define ELF @
+#endif
+
+#if CONFIG_THUMB
+# define A @
+# define T
+#else
+# define A
+# define T @
+#endif
+
+#if HAVE_AS_FUNC
+# define FUNC
+#else
+# define FUNC @
+#endif
+
+#if HAVE_AS_FPU_DIRECTIVE
+# define FPU
+#else
+# define FPU @
+#endif
+
+#if CONFIG_THUMB && defined(__APPLE__)
+# define TFUNC
+#else
+# define TFUNC @
+#endif
+
+#if HAVE_AS_ARCH_DIRECTIVE
+#if HAVE_NEON
+ .arch armv7-a
+#elif HAVE_ARMV6T2
+ .arch armv6t2
+#elif HAVE_ARMV6
+ .arch armv6
+#elif HAVE_ARMV5TE
+ .arch armv5te
+#endif
+#endif
+#if HAVE_AS_OBJECT_ARCH
+ELF .object_arch armv4
+#endif
+
+#if HAVE_NEON
+FPU .fpu neon
+ELF .eabi_attribute 10, 0 @ suppress Tag_FP_arch
+ELF .eabi_attribute 12, 0 @ suppress Tag_Advanced_SIMD_arch
+#elif HAVE_VFP
+FPU .fpu vfp
+ELF .eabi_attribute 10, 0 @ suppress Tag_FP_arch
+#endif
+
+ .syntax unified
+T .thumb
+ELF .eabi_attribute 25, 1 @ Tag_ABI_align_preserved
+ELF .section .note.GNU-stack,"",%progbits @ Mark stack as non-executable
+
+.macro function name, export=0, align=2
+ .set .Lpic_idx, 0
+ .set .Lpic_gp, 0
+ .macro endfunc
+ .if .Lpic_idx
+ .align 2
+ .altmacro
+ put_pic %(.Lpic_idx - 1)
+ .noaltmacro
+ .endif
+ .if .Lpic_gp
+ .unreq gp
+ .endif
+ELF .size \name, . - \name
+FUNC .endfunc
+ .purgem endfunc
+ .endm
+ .text
+ .align \align
+ .if \export
+ .global EXTERN_ASM\name
+ELF .type EXTERN_ASM\name, %function
+FUNC .func EXTERN_ASM\name
+TFUNC .thumb_func EXTERN_ASM\name
+EXTERN_ASM\name:
+ .else
+ELF .type \name, %function
+FUNC .func \name
+TFUNC .thumb_func \name
+\name:
+ .endif
+.endm
+
+.macro const name, align=2, relocate=0
+ .macro endconst
+ELF .size \name, . - \name
+ .purgem endconst
+ .endm
+#if HAVE_SECTION_DATA_REL_RO
+.if \relocate
+ .section .data.rel.ro
+.else
+ .section .rodata
+.endif
+#elif defined(_WIN32)
+ .section .rdata
+#elif !defined(__MACH__)
+ .section .rodata
+#else
+ .const_data
+#endif
+ .align \align
+\name:
+.endm
+
+#if !HAVE_ARMV6T2_EXTERNAL
+.macro movw rd, val
+ mov \rd, \val & 255
+ orr \rd, \val & ~255
+.endm
+#endif
+
+.macro mov32 rd, val
+#if HAVE_ARMV6T2_EXTERNAL
+ movw \rd, #(\val) & 0xffff
+ .if (\val) >> 16
+ movt \rd, #(\val) >> 16
+ .endif
+#else
+ ldr \rd, =\val
+#endif
+.endm
+
+.macro put_pic num
+ put_pic_\num
+.endm
+
+.macro do_def_pic num, val, label
+ .macro put_pic_\num
+ .if \num
+ .altmacro
+ put_pic %(\num - 1)
+ .noaltmacro
+ .endif
+\label: .word \val
+ .purgem put_pic_\num
+ .endm
+.endm
+
+.macro def_pic val, label
+ .altmacro
+ do_def_pic %.Lpic_idx, \val, \label
+ .noaltmacro
+ .set .Lpic_idx, .Lpic_idx + 1
+.endm
+
+.macro ldpic rd, val, indir=0
+ ldr \rd, .Lpicoff\@
+.Lpic\@:
+ .if \indir
+A ldr \rd, [pc, \rd]
+T add \rd, pc
+T ldr \rd, [\rd]
+ .else
+ add \rd, pc
+ .endif
+ def_pic \val - (.Lpic\@ + (8 >> CONFIG_THUMB)), .Lpicoff\@
+.endm
+
+.macro movrel rd, val
+#if CONFIG_PIC
+ ldpic \rd, \val
+#elif HAVE_ARMV6T2_EXTERNAL && !defined(__APPLE__)
+ movw \rd, #:lower16:\val
+ movt \rd, #:upper16:\val
+#else
+ ldr \rd, =\val
+#endif
+.endm
+
+.macro movrelx rd, val, gp
+ .ifc \rd,\gp
+ .error "movrelx needs two distinct registers"
+ .endif
+ .ifc \rd\()_\gp,r12_
+ .warning "movrelx rd=\rd without explicit set gp"
+ .endif
+ .ifc \rd\()_\gp,ip_
+ .warning "movrelx rd=\rd without explicit set gp"
+ .endif
+#if CONFIG_PIC && defined(__ELF__)
+ .ifnb \gp
+ .if .Lpic_gp
+ .unreq gp
+ .endif
+ gp .req \gp
+ ldpic gp, _GLOBAL_OFFSET_TABLE_
+ .elseif !.Lpic_gp
+ gp .req r12
+ ldpic gp, _GLOBAL_OFFSET_TABLE_
+ .endif
+ .set .Lpic_gp, 1
+ ldr \rd, .Lpicoff\@
+ ldr \rd, [gp, \rd]
+ def_pic \val(GOT), .Lpicoff\@
+#elif CONFIG_PIC && defined(__APPLE__)
+ ldpic \rd, .Lpic\@, indir=1
+ .non_lazy_symbol_pointer
+.Lpic\@:
+ .indirect_symbol \val
+ .word 0
+ .text
+#else
+ movrel \rd, \val
+#endif
+.endm
+
+.macro add_sh rd, rn, rm, sh:vararg
+A add \rd, \rn, \rm, \sh
+T mov \rm, \rm, \sh
+T add \rd, \rn, \rm
+.endm
+
+.macro ldr_pre rt, rn, rm:vararg
+A ldr \rt, [\rn, \rm]!
+T add \rn, \rn, \rm
+T ldr \rt, [\rn]
+.endm
+
+.macro ldr_dpre rt, rn, rm:vararg
+A ldr \rt, [\rn, -\rm]!
+T sub \rn, \rn, \rm
+T ldr \rt, [\rn]
+.endm
+
+.macro ldr_nreg rt, rn, rm:vararg
+A ldr \rt, [\rn, -\rm]
+T sub \rt, \rn, \rm
+T ldr \rt, [\rt]
+.endm
+
+.macro ldr_post rt, rn, rm:vararg
+A ldr \rt, [\rn], \rm
+T ldr \rt, [\rn]
+T add \rn, \rn, \rm
+.endm
+
+.macro ldrc_pre cc, rt, rn, rm:vararg
+A ldr\cc \rt, [\rn, \rm]!
+T itt \cc
+T add\cc \rn, \rn, \rm
+T ldr\cc \rt, [\rn]
+.endm
+
+.macro ldrd_reg rt, rt2, rn, rm
+A ldrd \rt, \rt2, [\rn, \rm]
+T add \rt, \rn, \rm
+T ldrd \rt, \rt2, [\rt]
+.endm
+
+.macro ldrd_post rt, rt2, rn, rm
+A ldrd \rt, \rt2, [\rn], \rm
+T ldrd \rt, \rt2, [\rn]
+T add \rn, \rn, \rm
+.endm
+
+.macro ldrh_pre rt, rn, rm
+A ldrh \rt, [\rn, \rm]!
+T add \rn, \rn, \rm
+T ldrh \rt, [\rn]
+.endm
+
+.macro ldrh_dpre rt, rn, rm
+A ldrh \rt, [\rn, -\rm]!
+T sub \rn, \rn, \rm
+T ldrh \rt, [\rn]
+.endm
+
+.macro ldrh_post rt, rn, rm
+A ldrh \rt, [\rn], \rm
+T ldrh \rt, [\rn]
+T add \rn, \rn, \rm
+.endm
+
+.macro ldrb_post rt, rn, rm
+A ldrb \rt, [\rn], \rm
+T ldrb \rt, [\rn]
+T add \rn, \rn, \rm
+.endm
+
+.macro str_post rt, rn, rm:vararg
+A str \rt, [\rn], \rm
+T str \rt, [\rn]
+T add \rn, \rn, \rm
+.endm
+
+.macro strb_post rt, rn, rm:vararg
+A strb \rt, [\rn], \rm
+T strb \rt, [\rn]
+T add \rn, \rn, \rm
+.endm
+
+.macro strd_post rt, rt2, rn, rm
+A strd \rt, \rt2, [\rn], \rm
+T strd \rt, \rt2, [\rn]
+T add \rn, \rn, \rm
+.endm
+
+.macro strh_pre rt, rn, rm
+A strh \rt, [\rn, \rm]!
+T add \rn, \rn, \rm
+T strh \rt, [\rn]
+.endm
+
+.macro strh_dpre rt, rn, rm
+A strh \rt, [\rn, -\rm]!
+T sub \rn, \rn, \rm
+T strh \rt, [\rn]
+.endm
+
+.macro strh_post rt, rn, rm
+A strh \rt, [\rn], \rm
+T strh \rt, [\rn]
+T add \rn, \rn, \rm
+.endm
+
+.macro strh_dpost rt, rn, rm
+A strh \rt, [\rn], -\rm
+T strh \rt, [\rn]
+T sub \rn, \rn, \rm
+.endm
+
+#if HAVE_VFP_ARGS
+ELF .eabi_attribute 28, 1
+# define VFP
+# define NOVFP @
+#else
+# define VFP @
+# define NOVFP
+#endif
+
+#define GLUE(a, b) a ## b
+#define JOIN(a, b) GLUE(a, b)
+#define X(s) JOIN(EXTERN_ASM, s)
diff --git a/media/ffvpx/libavutil/arm/bswap.h b/media/ffvpx/libavutil/arm/bswap.h
new file mode 100644
index 0000000000..c3460e035d
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/bswap.h
@@ -0,0 +1,70 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ARM_BSWAP_H
+#define AVUTIL_ARM_BSWAP_H
+
+#include <stdint.h>
+#include "config.h"
+#include "libavutil/attributes.h"
+
+#ifdef __ARMCC_VERSION
+
+#if HAVE_ARMV6
+#define av_bswap32 av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ return __rev(x);
+}
+#endif /* HAVE_ARMV6 */
+
+#elif HAVE_INLINE_ASM
+
+#if HAVE_ARMV6_INLINE
+#define av_bswap16 av_bswap16
+static av_always_inline av_const unsigned av_bswap16(unsigned x)
+{
+ unsigned y;
+
+ __asm__("rev16 %0, %1" : "=r"(y) : "r"(x));
+ return y;
+}
+#endif
+
+#if AV_GCC_VERSION_AT_MOST(4,4)
+#define av_bswap32 av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ uint32_t y;
+#if HAVE_ARMV6_INLINE
+ __asm__("rev %0, %1" : "=r"(y) : "r"(x));
+#else
+ uint32_t t;
+ __asm__ ("eor %1, %2, %2, ror #16 \n\t"
+ "bic %1, %1, #0xFF0000 \n\t"
+ "mov %0, %2, ror #8 \n\t"
+ "eor %0, %0, %1, lsr #8 \n\t"
+ : "=r"(y), "=&r"(t) : "r"(x));
+#endif /* HAVE_ARMV6_INLINE */
+ return y;
+}
+#endif /* AV_GCC_VERSION_AT_MOST(4,4) */
+
+#endif /* __ARMCC_VERSION */
+
+#endif /* AVUTIL_ARM_BSWAP_H */
diff --git a/media/ffvpx/libavutil/arm/cpu.c b/media/ffvpx/libavutil/arm/cpu.c
new file mode 100644
index 0000000000..c84a655c37
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/cpu.c
@@ -0,0 +1,188 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/cpu.h"
+#include "libavutil/cpu_internal.h"
+#include "config.h"
+
+#define CORE_FLAG(f) \
+ (AV_CPU_FLAG_ ## f * (HAVE_ ## f ## _EXTERNAL || HAVE_ ## f ## _INLINE))
+
+#define CORE_CPU_FLAGS \
+ (CORE_FLAG(ARMV5TE) | \
+ CORE_FLAG(ARMV6) | \
+ CORE_FLAG(ARMV6T2) | \
+ CORE_FLAG(VFP) | \
+ CORE_FLAG(VFPV3) | \
+ CORE_FLAG(NEON))
+
+#if defined __linux__ || defined __ANDROID__
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include "libavutil/avstring.h"
+
+#if HAVE_GETAUXVAL
+#include <sys/auxv.h>
+#endif
+
+#define AT_HWCAP 16
+
+/* Relevant HWCAP values from kernel headers */
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP (1 << 7)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_TLS (1 << 15)
+
+static int get_auxval(uint32_t *hwcap)
+{
+#if HAVE_GETAUXVAL
+ unsigned long ret = getauxval(AT_HWCAP);
+ if (ret == 0)
+ return -1;
+ *hwcap = ret;
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static int get_hwcap(uint32_t *hwcap)
+{
+ struct { uint32_t a_type; uint32_t a_val; } auxv;
+ FILE *f = fopen("/proc/self/auxv", "r");
+ int err = -1;
+
+ if (!f)
+ return -1;
+
+ while (fread(&auxv, sizeof(auxv), 1, f) > 0) {
+ if (auxv.a_type == AT_HWCAP) {
+ *hwcap = auxv.a_val;
+ err = 0;
+ break;
+ }
+ }
+
+ fclose(f);
+ return err;
+}
+
+static int get_cpuinfo(uint32_t *hwcap)
+{
+ FILE *f = fopen("/proc/cpuinfo", "r");
+ char buf[200];
+
+ if (!f)
+ return -1;
+
+ *hwcap = 0;
+ while (fgets(buf, sizeof(buf), f)) {
+ if (av_strstart(buf, "Features", NULL)) {
+ if (strstr(buf, " edsp "))
+ *hwcap |= HWCAP_EDSP;
+ if (strstr(buf, " tls "))
+ *hwcap |= HWCAP_TLS;
+ if (strstr(buf, " thumbee "))
+ *hwcap |= HWCAP_THUMBEE;
+ if (strstr(buf, " vfp "))
+ *hwcap |= HWCAP_VFP;
+ if (strstr(buf, " vfpv3 "))
+ *hwcap |= HWCAP_VFPv3;
+ if (strstr(buf, " neon ") || strstr(buf, " asimd "))
+ *hwcap |= HWCAP_NEON;
+ if (strstr(buf, " fp ")) // Listed on 64 bit ARMv8 kernels
+ *hwcap |= HWCAP_VFP | HWCAP_VFPv3;
+ break;
+ }
+ }
+ fclose(f);
+ return 0;
+}
+
+int ff_get_cpu_flags_arm(void)
+{
+ int flags = CORE_CPU_FLAGS;
+ uint32_t hwcap;
+
+ if (get_auxval(&hwcap) < 0)
+ if (get_hwcap(&hwcap) < 0)
+ if (get_cpuinfo(&hwcap) < 0)
+ return flags;
+
+#define check_cap(cap, flag) do { \
+ if (hwcap & HWCAP_ ## cap) \
+ flags |= AV_CPU_FLAG_ ## flag; \
+ } while (0)
+
+ /* No flags explicitly indicate v6 or v6T2 so check others which
+ imply support. */
+ check_cap(EDSP, ARMV5TE);
+ check_cap(TLS, ARMV6);
+ check_cap(THUMBEE, ARMV6T2);
+ check_cap(VFP, VFP);
+ check_cap(VFPv3, VFPV3);
+ check_cap(NEON, NEON);
+
+ /* The v6 checks above are not reliable so let higher flags
+ trickle down. */
+ if (flags & (AV_CPU_FLAG_VFPV3 | AV_CPU_FLAG_NEON))
+ flags |= AV_CPU_FLAG_ARMV6T2;
+ else if (flags & (AV_CPU_FLAG_ARMV6T2 | AV_CPU_FLAG_ARMV6))
+ /* Some functions use the 'setend' instruction which is deprecated on ARMv8
+ * and serializing on some ARMv7 cores. This ensures such functions
+ * are only enabled on ARMv6. */
+ flags |= AV_CPU_FLAG_SETEND;
+
+ if (flags & AV_CPU_FLAG_ARMV6T2)
+ flags |= AV_CPU_FLAG_ARMV6;
+
+ /* set the virtual VFPv2 vector mode flag */
+ if ((flags & AV_CPU_FLAG_VFP) && !(flags & (AV_CPU_FLAG_VFPV3 | AV_CPU_FLAG_NEON)))
+ flags |= AV_CPU_FLAG_VFP_VM;
+
+ return flags;
+}
+
+#else
+
+int ff_get_cpu_flags_arm(void)
+{
+ return AV_CPU_FLAG_ARMV5TE * HAVE_ARMV5TE |
+ AV_CPU_FLAG_ARMV6 * HAVE_ARMV6 |
+ AV_CPU_FLAG_ARMV6T2 * HAVE_ARMV6T2 |
+ AV_CPU_FLAG_VFP * HAVE_VFP |
+ AV_CPU_FLAG_VFPV3 * HAVE_VFPV3 |
+ AV_CPU_FLAG_NEON * HAVE_NEON |
+ AV_CPU_FLAG_SETEND * !(HAVE_NEON | HAVE_VFPV3);
+}
+
+#endif
+
+size_t ff_get_cpu_max_align_arm(void)
+{
+ int flags = av_get_cpu_flags();
+
+ if (flags & AV_CPU_FLAG_NEON)
+ return 16;
+
+ return 8;
+}
diff --git a/media/ffvpx/libavutil/arm/cpu.h b/media/ffvpx/libavutil/arm/cpu.h
new file mode 100644
index 0000000000..1d6cc65dc4
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/cpu.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ARM_CPU_H
+#define AVUTIL_ARM_CPU_H
+
+#include "libavutil/cpu.h"
+#include "libavutil/cpu_internal.h"
+
+#define have_armv5te(flags) CPUEXT(flags, ARMV5TE)
+#define have_armv6(flags) CPUEXT(flags, ARMV6)
+#define have_armv6t2(flags) CPUEXT(flags, ARMV6T2)
+#define have_vfp(flags) CPUEXT(flags, VFP)
+#define have_vfpv3(flags) CPUEXT(flags, VFPV3)
+#define have_neon(flags) CPUEXT(flags, NEON)
+#define have_setend(flags) CPUEXT(flags, SETEND)
+
+/* some functions use the VFPv2 vector mode which is deprecated in ARMv7-A
+ * and might trap on such CPU depending on the OS configuration */
+#define have_vfp_vm(flags) \
+ (HAVE_VFP && ((flags) & AV_CPU_FLAG_VFP_VM))
+
+#endif /* AVUTIL_ARM_CPU_H */
diff --git a/media/ffvpx/libavutil/arm/float_dsp_arm.h b/media/ffvpx/libavutil/arm/float_dsp_arm.h
new file mode 100644
index 0000000000..fe311cc0d2
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/float_dsp_arm.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ARM_FLOAT_DSP_ARM_H
+#define AVUTIL_ARM_FLOAT_DSP_ARM_H
+
+#include "libavutil/float_dsp.h"
+
+void ff_float_dsp_init_vfp(AVFloatDSPContext *fdsp, int cpu_flags);
+void ff_float_dsp_init_neon(AVFloatDSPContext *fdsp);
+
+#endif /* AVUTIL_ARM_FLOAT_DSP_ARM_H */
diff --git a/media/ffvpx/libavutil/arm/float_dsp_init_arm.c b/media/ffvpx/libavutil/arm/float_dsp_init_arm.c
new file mode 100644
index 0000000000..678762862e
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/float_dsp_init_arm.c
@@ -0,0 +1,32 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/float_dsp.h"
+#include "cpu.h"
+#include "float_dsp_arm.h"
+
+av_cold void ff_float_dsp_init_arm(AVFloatDSPContext *fdsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_vfp(cpu_flags))
+ ff_float_dsp_init_vfp(fdsp, cpu_flags);
+ if (have_neon(cpu_flags))
+ ff_float_dsp_init_neon(fdsp);
+}
diff --git a/media/ffvpx/libavutil/arm/float_dsp_init_neon.c b/media/ffvpx/libavutil/arm/float_dsp_init_neon.c
new file mode 100644
index 0000000000..689aa77c7b
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/float_dsp_init_neon.c
@@ -0,0 +1,59 @@
+/*
+ * ARM NEON optimised Float DSP functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/float_dsp.h"
+#include "float_dsp_arm.h"
+
+void ff_vector_fmul_neon(float *dst, const float *src0, const float *src1, int len);
+
+void ff_vector_fmac_scalar_neon(float *dst, const float *src, float mul,
+ int len);
+
+void ff_vector_fmul_scalar_neon(float *dst, const float *src, float mul,
+ int len);
+
+void ff_vector_fmul_window_neon(float *dst, const float *src0,
+ const float *src1, const float *win, int len);
+
+void ff_vector_fmul_add_neon(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
+
+void ff_vector_fmul_reverse_neon(float *dst, const float *src0,
+ const float *src1, int len);
+
+void ff_butterflies_float_neon(float *v1, float *v2, int len);
+
+float ff_scalarproduct_float_neon(const float *v1, const float *v2, int len);
+
+av_cold void ff_float_dsp_init_neon(AVFloatDSPContext *fdsp)
+{
+ fdsp->vector_fmul = ff_vector_fmul_neon;
+ fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_neon;
+ fdsp->vector_fmul_scalar = ff_vector_fmul_scalar_neon;
+ fdsp->vector_fmul_window = ff_vector_fmul_window_neon;
+ fdsp->vector_fmul_add = ff_vector_fmul_add_neon;
+ fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_neon;
+ fdsp->butterflies_float = ff_butterflies_float_neon;
+ fdsp->scalarproduct_float = ff_scalarproduct_float_neon;
+}
diff --git a/media/ffvpx/libavutil/arm/float_dsp_init_vfp.c b/media/ffvpx/libavutil/arm/float_dsp_init_vfp.c
new file mode 100644
index 0000000000..05873e7e37
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/float_dsp_init_vfp.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2008 Siarhei Siamashka <ssvb@users.sourceforge.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/float_dsp.h"
+#include "cpu.h"
+#include "float_dsp_arm.h"
+
+void ff_vector_fmul_vfp(float *dst, const float *src0, const float *src1,
+ int len);
+
+void ff_vector_fmul_window_vfp(float *dst, const float *src0,
+ const float *src1, const float *win, int len);
+
+void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
+ const float *src1, int len);
+
+void ff_butterflies_float_vfp(float *av_restrict v1, float *av_restrict v2, int len);
+
+av_cold void ff_float_dsp_init_vfp(AVFloatDSPContext *fdsp, int cpu_flags)
+{
+ if (have_vfp_vm(cpu_flags)) {
+ fdsp->vector_fmul = ff_vector_fmul_vfp;
+ fdsp->vector_fmul_window = ff_vector_fmul_window_vfp;
+ }
+ fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_vfp;
+ if (have_vfp_vm(cpu_flags))
+ fdsp->butterflies_float = ff_butterflies_float_vfp;
+}
diff --git a/media/ffvpx/libavutil/arm/float_dsp_neon.S b/media/ffvpx/libavutil/arm/float_dsp_neon.S
new file mode 100644
index 0000000000..3823227608
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/float_dsp_neon.S
@@ -0,0 +1,271 @@
+/*
+ * ARM NEON optimised Float DSP functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "asm.S"
+
+function ff_vector_fmul_neon, export=1
+ subs r3, r3, #8
+ vld1.32 {d0-d3}, [r1,:128]!
+ vld1.32 {d4-d7}, [r2,:128]!
+ vmul.f32 q8, q0, q2
+ vmul.f32 q9, q1, q3
+ beq 3f
+ bics ip, r3, #15
+ beq 2f
+1: subs ip, ip, #16
+ vld1.32 {d0-d1}, [r1,:128]!
+ vld1.32 {d4-d5}, [r2,:128]!
+ vmul.f32 q10, q0, q2
+ vld1.32 {d2-d3}, [r1,:128]!
+ vld1.32 {d6-d7}, [r2,:128]!
+ vmul.f32 q11, q1, q3
+ vst1.32 {d16-d19},[r0,:128]!
+ vld1.32 {d0-d1}, [r1,:128]!
+ vld1.32 {d4-d5}, [r2,:128]!
+ vmul.f32 q8, q0, q2
+ vld1.32 {d2-d3}, [r1,:128]!
+ vld1.32 {d6-d7}, [r2,:128]!
+ vmul.f32 q9, q1, q3
+ vst1.32 {d20-d23},[r0,:128]!
+ bne 1b
+ ands r3, r3, #15
+ beq 3f
+2: vld1.32 {d0-d1}, [r1,:128]!
+ vld1.32 {d4-d5}, [r2,:128]!
+ vst1.32 {d16-d17},[r0,:128]!
+ vmul.f32 q8, q0, q2
+ vld1.32 {d2-d3}, [r1,:128]!
+ vld1.32 {d6-d7}, [r2,:128]!
+ vst1.32 {d18-d19},[r0,:128]!
+ vmul.f32 q9, q1, q3
+3: vst1.32 {d16-d19},[r0,:128]!
+ bx lr
+endfunc
+
+function ff_vector_fmac_scalar_neon, export=1
+VFP len .req r2
+VFP acc .req r3
+NOVFP len .req r3
+NOVFP acc .req r2
+VFP vdup.32 q15, d0[0]
+NOVFP vdup.32 q15, r2
+ bics r12, len, #15
+ mov acc, r0
+ beq 3f
+ vld1.32 {q0}, [r1,:128]!
+ vld1.32 {q8}, [acc,:128]!
+ vld1.32 {q1}, [r1,:128]!
+ vld1.32 {q9}, [acc,:128]!
+1: vmla.f32 q8, q0, q15
+ vld1.32 {q2}, [r1,:128]!
+ vld1.32 {q10}, [acc,:128]!
+ vmla.f32 q9, q1, q15
+ vld1.32 {q3}, [r1,:128]!
+ vld1.32 {q11}, [acc,:128]!
+ vmla.f32 q10, q2, q15
+ vst1.32 {q8}, [r0,:128]!
+ vmla.f32 q11, q3, q15
+ vst1.32 {q9}, [r0,:128]!
+ subs r12, r12, #16
+ beq 2f
+ vld1.32 {q0}, [r1,:128]!
+ vld1.32 {q8}, [acc,:128]!
+ vst1.32 {q10}, [r0,:128]!
+ vld1.32 {q1}, [r1,:128]!
+ vld1.32 {q9}, [acc,:128]!
+ vst1.32 {q11}, [r0,:128]!
+ b 1b
+2: vst1.32 {q10}, [r0,:128]!
+ vst1.32 {q11}, [r0,:128]!
+ ands len, len, #15
+ it eq
+ bxeq lr
+3: vld1.32 {q0}, [r1,:128]!
+ vld1.32 {q8}, [acc,:128]!
+ vmla.f32 q8, q0, q15
+ vst1.32 {q8}, [r0,:128]!
+ subs len, len, #4
+ bgt 3b
+ bx lr
+ .unreq len
+endfunc
+
+function ff_vector_fmul_scalar_neon, export=1
+VFP len .req r2
+NOVFP len .req r3
+VFP vdup.32 q8, d0[0]
+NOVFP vdup.32 q8, r2
+ bics r12, len, #15
+ beq 3f
+ vld1.32 {q0},[r1,:128]!
+ vld1.32 {q1},[r1,:128]!
+1: vmul.f32 q0, q0, q8
+ vld1.32 {q2},[r1,:128]!
+ vmul.f32 q1, q1, q8
+ vld1.32 {q3},[r1,:128]!
+ vmul.f32 q2, q2, q8
+ vst1.32 {q0},[r0,:128]!
+ vmul.f32 q3, q3, q8
+ vst1.32 {q1},[r0,:128]!
+ subs r12, r12, #16
+ beq 2f
+ vld1.32 {q0},[r1,:128]!
+ vst1.32 {q2},[r0,:128]!
+ vld1.32 {q1},[r1,:128]!
+ vst1.32 {q3},[r0,:128]!
+ b 1b
+2: vst1.32 {q2},[r0,:128]!
+ vst1.32 {q3},[r0,:128]!
+ ands len, len, #15
+ it eq
+ bxeq lr
+3: vld1.32 {q0},[r1,:128]!
+ vmul.f32 q0, q0, q8
+ vst1.32 {q0},[r0,:128]!
+ subs len, len, #4
+ bgt 3b
+ bx lr
+ .unreq len
+endfunc
+
+function ff_vector_fmul_window_neon, export=1
+ push {r4,r5,lr}
+ ldr lr, [sp, #12]
+ sub r2, r2, #8
+ sub r5, lr, #2
+ add r2, r2, r5, lsl #2
+ add r4, r3, r5, lsl #3
+ add ip, r0, r5, lsl #3
+ mov r5, #-16
+ vld1.32 {d0,d1}, [r1,:128]!
+ vld1.32 {d2,d3}, [r2,:128], r5
+ vld1.32 {d4,d5}, [r3,:128]!
+ vld1.32 {d6,d7}, [r4,:128], r5
+1: subs lr, lr, #4
+ vmul.f32 d22, d0, d4
+ vrev64.32 q3, q3
+ vmul.f32 d23, d1, d5
+ vrev64.32 q1, q1
+ vmul.f32 d20, d0, d7
+ vmul.f32 d21, d1, d6
+ beq 2f
+ vmla.f32 d22, d3, d7
+ vld1.32 {d0,d1}, [r1,:128]!
+ vmla.f32 d23, d2, d6
+ vld1.32 {d18,d19},[r2,:128], r5
+ vmls.f32 d20, d3, d4
+ vld1.32 {d24,d25},[r3,:128]!
+ vmls.f32 d21, d2, d5
+ vld1.32 {d6,d7}, [r4,:128], r5
+ vmov q1, q9
+ vrev64.32 q11, q11
+ vmov q2, q12
+ vswp d22, d23
+ vst1.32 {d20,d21},[r0,:128]!
+ vst1.32 {d22,d23},[ip,:128], r5
+ b 1b
+2: vmla.f32 d22, d3, d7
+ vmla.f32 d23, d2, d6
+ vmls.f32 d20, d3, d4
+ vmls.f32 d21, d2, d5
+ vrev64.32 q11, q11
+ vswp d22, d23
+ vst1.32 {d20,d21},[r0,:128]!
+ vst1.32 {d22,d23},[ip,:128], r5
+ pop {r4,r5,pc}
+endfunc
+
+function ff_vector_fmul_add_neon, export=1
+ ldr r12, [sp]
+ vld1.32 {q0-q1}, [r1,:128]!
+ vld1.32 {q8-q9}, [r2,:128]!
+ vld1.32 {q2-q3}, [r3,:128]!
+ vmul.f32 q10, q0, q8
+ vmul.f32 q11, q1, q9
+1: vadd.f32 q12, q2, q10
+ vadd.f32 q13, q3, q11
+ pld [r1, #16]
+ pld [r2, #16]
+ pld [r3, #16]
+ subs r12, r12, #8
+ beq 2f
+ vld1.32 {q0}, [r1,:128]!
+ vld1.32 {q8}, [r2,:128]!
+ vmul.f32 q10, q0, q8
+ vld1.32 {q1}, [r1,:128]!
+ vld1.32 {q9}, [r2,:128]!
+ vmul.f32 q11, q1, q9
+ vld1.32 {q2-q3}, [r3,:128]!
+ vst1.32 {q12-q13},[r0,:128]!
+ b 1b
+2: vst1.32 {q12-q13},[r0,:128]!
+ bx lr
+endfunc
+
+function ff_vector_fmul_reverse_neon, export=1
+ add r2, r2, r3, lsl #2
+ sub r2, r2, #32
+ mov r12, #-32
+ vld1.32 {q0-q1}, [r1,:128]!
+ vld1.32 {q2-q3}, [r2,:128], r12
+1: pld [r1, #32]
+ vrev64.32 q3, q3
+ vmul.f32 d16, d0, d7
+ vmul.f32 d17, d1, d6
+ pld [r2, #-32]
+ vrev64.32 q2, q2
+ vmul.f32 d18, d2, d5
+ vmul.f32 d19, d3, d4
+ subs r3, r3, #8
+ beq 2f
+ vld1.32 {q0-q1}, [r1,:128]!
+ vld1.32 {q2-q3}, [r2,:128], r12
+ vst1.32 {q8-q9}, [r0,:128]!
+ b 1b
+2: vst1.32 {q8-q9}, [r0,:128]!
+ bx lr
+endfunc
+
+function ff_butterflies_float_neon, export=1
+1: vld1.32 {q0},[r0,:128]
+ vld1.32 {q1},[r1,:128]
+ vsub.f32 q2, q0, q1
+ vadd.f32 q1, q0, q1
+ vst1.32 {q2},[r1,:128]!
+ vst1.32 {q1},[r0,:128]!
+ subs r2, r2, #4
+ bgt 1b
+ bx lr
+endfunc
+
+function ff_scalarproduct_float_neon, export=1
+ vmov.f32 q2, #0.0
+1: vld1.32 {q0},[r0,:128]!
+ vld1.32 {q1},[r1,:128]!
+ vmla.f32 q2, q0, q1
+ subs r2, r2, #4
+ bgt 1b
+ vadd.f32 d0, d4, d5
+ vpadd.f32 d0, d0, d0
+NOVFP vmov.32 r0, d0[0]
+ bx lr
+endfunc
diff --git a/media/ffvpx/libavutil/arm/float_dsp_vfp.S b/media/ffvpx/libavutil/arm/float_dsp_vfp.S
new file mode 100644
index 0000000000..7db2452284
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/float_dsp_vfp.S
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2008 Siarhei Siamashka <ssvb@users.sourceforge.net>
+ *
+ * This file is part of FFmpeg
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "asm.S"
+
+/**
+ * Assume that len is a positive number and is multiple of 8
+ */
+@ void ff_vector_fmul_vfp(float *dst, const float *src0, const float *src1, int len)
+function ff_vector_fmul_vfp, export=1
+ vpush {d8-d15}
+ fmrx r12, fpscr
+ orr r12, r12, #(3 << 16) /* set vector size to 4 */
+ fmxr fpscr, r12
+
+ vldmia r1!, {s0-s3}
+ vldmia r2!, {s8-s11}
+ vldmia r1!, {s4-s7}
+ vldmia r2!, {s12-s15}
+ vmul.f32 s8, s0, s8
+1:
+ subs r3, r3, #16
+ vmul.f32 s12, s4, s12
+ itttt ge
+ vldmiage r1!, {s16-s19}
+ vldmiage r2!, {s24-s27}
+ vldmiage r1!, {s20-s23}
+ vldmiage r2!, {s28-s31}
+ it ge
+ vmulge.f32 s24, s16, s24
+ vstmia r0!, {s8-s11}
+ vstmia r0!, {s12-s15}
+ it ge
+ vmulge.f32 s28, s20, s28
+ itttt gt
+ vldmiagt r1!, {s0-s3}
+ vldmiagt r2!, {s8-s11}
+ vldmiagt r1!, {s4-s7}
+ vldmiagt r2!, {s12-s15}
+ ittt ge
+ vmulge.f32 s8, s0, s8
+ vstmiage r0!, {s24-s27}
+ vstmiage r0!, {s28-s31}
+ bgt 1b
+
+ bic r12, r12, #(7 << 16) /* set vector size back to 1 */
+ fmxr fpscr, r12
+ vpop {d8-d15}
+ bx lr
+endfunc
+
+/**
+ * ARM VFP implementation of 'vector_fmul_window_c' function
+ * Assume that len is a positive non-zero number
+ */
+@ void ff_vector_fmul_window_vfp(float *dst, const float *src0,
+@ const float *src1, const float *win, int len)
+function ff_vector_fmul_window_vfp, export=1
+DST0 .req a1
+SRC0 .req a2
+SRC1 .req a3
+WIN0 .req a4
+LEN .req v1
+DST1 .req v2
+WIN1 .req v3
+OLDFPSCR .req ip
+
+ push {v1-v3,lr}
+ ldr LEN, [sp, #4*4+0]
+ vpush {s16-s31}
+ fmrx OLDFPSCR, FPSCR
+ add DST1, DST0, LEN, lsl #3
+ add SRC1, SRC1, LEN, lsl #2
+ add WIN1, WIN0, LEN, lsl #3
+
+ tst LEN, #7
+ beq 4f @ common case: len is a multiple of 8
+
+ ldr lr, =0x03000000 @ RunFast mode, scalar mode
+ fmxr FPSCR, lr
+
+ tst LEN, #1
+ beq 1f
+ vldmdb WIN1!, {s0}
+ vldmia SRC0!, {s8}
+ vldmia WIN0!, {s16}
+ vmul.f s24, s0, s8
+ vldmdb SRC1!, {s20}
+ vmul.f s8, s16, s8
+ vmls.f s24, s16, s20
+ vmla.f s8, s0, s20
+ vstmia DST0!, {s24}
+ vstmdb DST1!, {s8}
+1:
+ tst LEN, #2
+ beq 2f
+ vldmdb WIN1!, {s0}
+ vldmdb WIN1!, {s1}
+ vldmia SRC0!, {s8-s9}
+ vldmia WIN0!, {s16-s17}
+ vmul.f s24, s0, s8
+ vmul.f s25, s1, s9
+ vldmdb SRC1!, {s20}
+ vldmdb SRC1!, {s21}
+ vmul.f s8, s16, s8
+ vmul.f s9, s17, s9
+ vmls.f s24, s16, s20
+ vmls.f s25, s17, s21
+ vmla.f s8, s0, s20
+ vmla.f s9, s1, s21
+ vstmia DST0!, {s24-s25}
+ vstmdb DST1!, {s8}
+ vstmdb DST1!, {s9}
+2:
+ tst LEN, #4
+ beq 3f
+ vldmdb WIN1!, {s0}
+ vldmdb WIN1!, {s1}
+ vldmdb WIN1!, {s2}
+ vldmdb WIN1!, {s3}
+ vldmia SRC0!, {s8-s11}
+ vldmia WIN0!, {s16-s19}
+ vmul.f s24, s0, s8
+ vmul.f s25, s1, s9
+ vmul.f s26, s2, s10
+ vmul.f s27, s3, s11
+ vldmdb SRC1!, {s20}
+ vldmdb SRC1!, {s21}
+ vldmdb SRC1!, {s22}
+ vldmdb SRC1!, {s23}
+ vmul.f s8, s16, s8
+ vmul.f s9, s17, s9
+ vmul.f s10, s18, s10
+ vmul.f s11, s19, s11
+ vmls.f s24, s16, s20
+ vmls.f s25, s17, s21
+ vmls.f s26, s18, s22
+ vmls.f s27, s19, s23
+ vmla.f s8, s0, s20
+ vmla.f s9, s1, s21
+ vmla.f s10, s2, s22
+ vmla.f s11, s3, s23
+ vstmia DST0!, {s24-s27}
+ vstmdb DST1!, {s8}
+ vstmdb DST1!, {s9}
+ vstmdb DST1!, {s10}
+ vstmdb DST1!, {s11}
+3:
+ bics LEN, LEN, #7
+ beq 7f
+4:
+ ldr lr, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
+ fmxr FPSCR, lr
+
+ vldmdb WIN1!, {s0}
+ vldmdb WIN1!, {s1}
+ vldmdb WIN1!, {s2}
+ vldmdb WIN1!, {s3}
+ vldmia SRC0!, {s8-s11}
+ vldmia WIN0!, {s16-s19}
+ vmul.f s24, s0, s8 @ vector * vector
+ vldmdb SRC1!, {s20}
+ vldmdb SRC1!, {s21}
+ vldmdb SRC1!, {s22}
+ vldmdb SRC1!, {s23}
+ vmul.f s8, s16, s8 @ vector * vector
+ vmls.f s24, s16, s20 @ vector * vector
+ vldmdb WIN1!, {s4}
+ vldmdb WIN1!, {s5}
+ vldmdb WIN1!, {s6}
+ vldmdb WIN1!, {s7}
+ vldmia SRC0!, {s12-s13}
+ vmla.f s8, s0, s20 @ vector * vector
+ vldmia SRC0!, {s14-s15}
+ subs LEN, LEN, #8
+ beq 6f
+5: vldmia WIN0!, {s20-s23}
+ vmul.f s28, s4, s12 @ vector * vector
+ vstmia DST0!, {s24-s25}
+ vldmdb SRC1!, {s16}
+ vldmdb SRC1!, {s17}
+ vldmdb SRC1!, {s18}
+ vldmdb SRC1!, {s19}
+ vmul.f s12, s20, s12 @ vector * vector
+ vstmia DST0!, {s26-s27}
+ vstmdb DST1!, {s8}
+ vstmdb DST1!, {s9}
+ vstmdb DST1!, {s10}
+ vstmdb DST1!, {s11}
+ vmls.f s28, s20, s16 @ vector * vector
+ vldmdb WIN1!, {s0}
+ vldmdb WIN1!, {s1}
+ vldmdb WIN1!, {s2}
+ vldmdb WIN1!, {s3}
+ vldmia SRC0!, {s8-s9}
+ vmla.f s12, s4, s16 @ vector * vector
+ vldmia SRC0!, {s10-s11}
+ subs LEN, LEN, #8
+ vldmia WIN0!, {s16-s19}
+ vmul.f s24, s0, s8 @ vector * vector
+ vstmia DST0!, {s28-s29}
+ vldmdb SRC1!, {s20}
+ vldmdb SRC1!, {s21}
+ vldmdb SRC1!, {s22}
+ vldmdb SRC1!, {s23}
+ vmul.f s8, s16, s8 @ vector * vector
+ vstmia DST0!, {s30-s31}
+ vstmdb DST1!, {s12}
+ vstmdb DST1!, {s13}
+ vstmdb DST1!, {s14}
+ vstmdb DST1!, {s15}
+ vmls.f s24, s16, s20 @ vector * vector
+ vldmdb WIN1!, {s4}
+ vldmdb WIN1!, {s5}
+ vldmdb WIN1!, {s6}
+ vldmdb WIN1!, {s7}
+ vldmia SRC0!, {s12-s13}
+ vmla.f s8, s0, s20 @ vector * vector
+ vldmia SRC0!, {s14-s15}
+ bne 5b
+6: vldmia WIN0!, {s20-s23}
+ vmul.f s28, s4, s12 @ vector * vector
+ vstmia DST0!, {s24-s25}
+ vldmdb SRC1!, {s16}
+ vldmdb SRC1!, {s17}
+ vldmdb SRC1!, {s18}
+ vldmdb SRC1!, {s19}
+ vmul.f s12, s20, s12 @ vector * vector
+ vstmia DST0!, {s26-s27}
+ vstmdb DST1!, {s8}
+ vstmdb DST1!, {s9}
+ vstmdb DST1!, {s10}
+ vstmdb DST1!, {s11}
+ vmls.f s28, s20, s16 @ vector * vector
+ vmla.f s12, s4, s16 @ vector * vector
+ vstmia DST0!, {s28-s31}
+ vstmdb DST1!, {s12}
+ vstmdb DST1!, {s13}
+ vstmdb DST1!, {s14}
+ vstmdb DST1!, {s15}
+7:
+ fmxr FPSCR, OLDFPSCR
+ vpop {s16-s31}
+ pop {v1-v3,pc}
+
+ .unreq DST0
+ .unreq SRC0
+ .unreq SRC1
+ .unreq WIN0
+ .unreq LEN
+ .unreq OLDFPSCR
+ .unreq DST1
+ .unreq WIN1
+endfunc
+
+/**
+ * ARM VFP optimized implementation of 'vector_fmul_reverse_c' function.
+ * Assume that len is a positive number and is multiple of 8
+ */
+@ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
+@ const float *src1, int len)
+function ff_vector_fmul_reverse_vfp, export=1
+ vpush {d8-d15}
+ add r2, r2, r3, lsl #2
+ vldmdb r2!, {s0-s3}
+ vldmia r1!, {s8-s11}
+ vldmdb r2!, {s4-s7}
+ vldmia r1!, {s12-s15}
+ vmul.f32 s8, s3, s8
+ vmul.f32 s9, s2, s9
+ vmul.f32 s10, s1, s10
+ vmul.f32 s11, s0, s11
+1:
+ subs r3, r3, #16
+ it ge
+ vldmdbge r2!, {s16-s19}
+ vmul.f32 s12, s7, s12
+ it ge
+ vldmiage r1!, {s24-s27}
+ vmul.f32 s13, s6, s13
+ it ge
+ vldmdbge r2!, {s20-s23}
+ vmul.f32 s14, s5, s14
+ it ge
+ vldmiage r1!, {s28-s31}
+ vmul.f32 s15, s4, s15
+ it ge
+ vmulge.f32 s24, s19, s24
+ it gt
+ vldmdbgt r2!, {s0-s3}
+ it ge
+ vmulge.f32 s25, s18, s25
+ vstmia r0!, {s8-s13}
+ it ge
+ vmulge.f32 s26, s17, s26
+ it gt
+ vldmiagt r1!, {s8-s11}
+ itt ge
+ vmulge.f32 s27, s16, s27
+ vmulge.f32 s28, s23, s28
+ it gt
+ vldmdbgt r2!, {s4-s7}
+ it ge
+ vmulge.f32 s29, s22, s29
+ vstmia r0!, {s14-s15}
+ ittt ge
+ vmulge.f32 s30, s21, s30
+ vmulge.f32 s31, s20, s31
+ vmulge.f32 s8, s3, s8
+ it gt
+ vldmiagt r1!, {s12-s15}
+ itttt ge
+ vmulge.f32 s9, s2, s9
+ vmulge.f32 s10, s1, s10
+ vstmiage r0!, {s24-s27}
+ vmulge.f32 s11, s0, s11
+ it ge
+ vstmiage r0!, {s28-s31}
+ bgt 1b
+
+ vpop {d8-d15}
+ bx lr
+endfunc
+
+/**
+ * ARM VFP implementation of 'butterflies_float_c' function
+ * Assume that len is a positive non-zero number
+ */
+@ void ff_butterflies_float_vfp(float *restrict v1, float *restrict v2, int len)
+function ff_butterflies_float_vfp, export=1
+BASE1 .req a1
+BASE2 .req a2
+LEN .req a3
+OLDFPSCR .req a4
+
+ vpush {s16-s31}
+ fmrx OLDFPSCR, FPSCR
+
+ tst LEN, #7
+ beq 4f @ common case: len is a multiple of 8
+
+ ldr ip, =0x03000000 @ RunFast mode, scalar mode
+ fmxr FPSCR, ip
+
+ tst LEN, #1
+ beq 1f
+ vldmia BASE1!, {s0}
+ vldmia BASE2!, {s8}
+ vadd.f s16, s0, s8
+ vsub.f s24, s0, s8
+ vstr s16, [BASE1, #0-4*1]
+ vstr s24, [BASE2, #0-4*1]
+1:
+ tst LEN, #2
+ beq 2f
+ vldmia BASE1!, {s0-s1}
+ vldmia BASE2!, {s8-s9}
+ vadd.f s16, s0, s8
+ vadd.f s17, s1, s9
+ vsub.f s24, s0, s8
+ vsub.f s25, s1, s9
+ vstr d8, [BASE1, #0-8*1] @ s16,s17
+ vstr d12, [BASE2, #0-8*1] @ s24,s25
+2:
+ tst LEN, #4
+ beq 3f
+ vldmia BASE1!, {s0-s1}
+ vldmia BASE2!, {s8-s9}
+ vldmia BASE1!, {s2-s3}
+ vldmia BASE2!, {s10-s11}
+ vadd.f s16, s0, s8
+ vadd.f s17, s1, s9
+ vsub.f s24, s0, s8
+ vsub.f s25, s1, s9
+ vadd.f s18, s2, s10
+ vadd.f s19, s3, s11
+ vsub.f s26, s2, s10
+ vsub.f s27, s3, s11
+ vstr d8, [BASE1, #0-16*1] @ s16,s17
+ vstr d12, [BASE2, #0-16*1] @ s24,s25
+ vstr d9, [BASE1, #8-16*1] @ s18,s19
+ vstr d13, [BASE2, #8-16*1] @ s26,s27
+3:
+ bics LEN, LEN, #7
+ beq 7f
+4:
+ ldr ip, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
+ fmxr FPSCR, ip
+
+ vldmia BASE1!, {s0-s1}
+ vldmia BASE2!, {s8-s9}
+ vldmia BASE1!, {s2-s3}
+ vldmia BASE2!, {s10-s11}
+ vadd.f s16, s0, s8
+ vldmia BASE1!, {s4-s5}
+ vldmia BASE2!, {s12-s13}
+ vldmia BASE1!, {s6-s7}
+ vldmia BASE2!, {s14-s15}
+ vsub.f s24, s0, s8
+ vadd.f s20, s4, s12
+ subs LEN, LEN, #8
+ beq 6f
+5: vldmia BASE1!, {s0-s3}
+ vldmia BASE2!, {s8-s11}
+ vsub.f s28, s4, s12
+ vstr d8, [BASE1, #0-16*3] @ s16,s17
+ vstr d9, [BASE1, #8-16*3] @ s18,s19
+ vstr d12, [BASE2, #0-16*3] @ s24,s25
+ vstr d13, [BASE2, #8-16*3] @ s26,s27
+ vadd.f s16, s0, s8
+ vldmia BASE1!, {s4-s7}
+ vldmia BASE2!, {s12-s15}
+ vsub.f s24, s0, s8
+ vstr d10, [BASE1, #0-16*3] @ s20,s21
+ vstr d11, [BASE1, #8-16*3] @ s22,s23
+ vstr d14, [BASE2, #0-16*3] @ s28,s29
+ vstr d15, [BASE2, #8-16*3] @ s30,s31
+ vadd.f s20, s4, s12
+ subs LEN, LEN, #8
+ bne 5b
+6: vsub.f s28, s4, s12
+ vstr d8, [BASE1, #0-16*2] @ s16,s17
+ vstr d9, [BASE1, #8-16*2] @ s18,s19
+ vstr d12, [BASE2, #0-16*2] @ s24,s25
+ vstr d13, [BASE2, #8-16*2] @ s26,s27
+ vstr d10, [BASE1, #0-16*1] @ s20,s21
+ vstr d11, [BASE1, #8-16*1] @ s22,s23
+ vstr d14, [BASE2, #0-16*1] @ s28,s29
+ vstr d15, [BASE2, #8-16*1] @ s30,s31
+7:
+ fmxr FPSCR, OLDFPSCR
+ vpop {s16-s31}
+ bx lr
+
+ .unreq BASE1
+ .unreq BASE2
+ .unreq LEN
+ .unreq OLDFPSCR
+endfunc
diff --git a/media/ffvpx/libavutil/arm/intmath.h b/media/ffvpx/libavutil/arm/intmath.h
new file mode 100644
index 0000000000..f19b21e98d
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/intmath.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ARM_INTMATH_H
+#define AVUTIL_ARM_INTMATH_H
+
+#include <stdint.h>
+
+#include "config.h"
+#include "libavutil/attributes.h"
+
+#if HAVE_INLINE_ASM
+
+#if HAVE_ARMV6_INLINE
+
+#define av_clip_uint8 av_clip_uint8_arm
+static av_always_inline av_const int av_clip_uint8_arm(int a)
+{
+ int x;
+ __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a));
+ return x;
+}
+
+#define av_clip_int8 av_clip_int8_arm
+static av_always_inline av_const int av_clip_int8_arm(int a)
+{
+ int x;
+ __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
+ return x;
+}
+
+#define av_clip_uint16 av_clip_uint16_arm
+static av_always_inline av_const int av_clip_uint16_arm(int a)
+{
+ int x;
+ __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
+ return x;
+}
+
+#define av_clip_int16 av_clip_int16_arm
+static av_always_inline av_const int av_clip_int16_arm(int a)
+{
+ int x;
+ __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
+ return x;
+}
+
+#define av_clip_intp2 av_clip_intp2_arm
+static av_always_inline av_const int av_clip_intp2_arm(int a, int p)
+{
+ if (av_builtin_constant_p(p)) {
+ unsigned x;
+ __asm__ ("ssat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p+1));
+ return x;
+ } else {
+ if (((unsigned)a + (1 << p)) & ~((2 << p) - 1))
+ return (a >> 31) ^ ((1 << p) - 1);
+ else
+ return a;
+ }
+}
+
+#define av_clip_uintp2 av_clip_uintp2_arm
+static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
+{
+ if (av_builtin_constant_p(p)) {
+ unsigned x;
+ __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
+ return x;
+ } else {
+ if (a & ~((1<<p) - 1)) return (~a) >> 31 & ((1<<p) - 1);
+ else return a;
+ }
+}
+
+#define av_sat_add32 av_sat_add32_arm
+static av_always_inline int av_sat_add32_arm(int a, int b)
+{
+ int r;
+ __asm__ ("qadd %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
+ return r;
+}
+
+#define av_sat_dadd32 av_sat_dadd32_arm
+static av_always_inline int av_sat_dadd32_arm(int a, int b)
+{
+ int r;
+ __asm__ ("qdadd %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
+ return r;
+}
+
+#define av_sat_sub32 av_sat_sub32_arm
+static av_always_inline int av_sat_sub32_arm(int a, int b)
+{
+ int r;
+ __asm__ ("qsub %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
+ return r;
+}
+
+#define av_sat_dsub32 av_sat_dsub32_arm
+static av_always_inline int av_sat_dsub32_arm(int a, int b)
+{
+ int r;
+ __asm__ ("qdsub %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
+ return r;
+}
+
+#endif /* HAVE_ARMV6_INLINE */
+
+#if HAVE_ASM_MOD_Q
+
+#define av_clipl_int32 av_clipl_int32_arm
+static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
+{
+ int x, y;
+ __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t"
+ "itet ne \n\t"
+ "mvnne %1, #1<<31 \n\t"
+ "moveq %0, %Q2 \n\t"
+ "eorne %0, %1, %R2, asr #31 \n\t"
+ : "=r"(x), "=&r"(y) : "r"(a) : "cc");
+ return x;
+}
+
+#endif /* HAVE_ASM_MOD_Q */
+
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVUTIL_ARM_INTMATH_H */
diff --git a/media/ffvpx/libavutil/arm/intreadwrite.h b/media/ffvpx/libavutil/arm/intreadwrite.h
new file mode 100644
index 0000000000..60fc860cbb
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/intreadwrite.h
@@ -0,0 +1,91 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ARM_INTREADWRITE_H
+#define AVUTIL_ARM_INTREADWRITE_H
+
+#include <stdint.h>
+#include "config.h"
+#include "libavutil/attributes.h"
+
+#if HAVE_FAST_UNALIGNED && HAVE_INLINE_ASM && AV_GCC_VERSION_AT_MOST(4,6)
+
+#define AV_RN16 AV_RN16
+static av_always_inline unsigned AV_RN16(const void *p)
+{
+ const uint8_t *q = p;
+ unsigned v;
+#if AV_GCC_VERSION_AT_MOST(4,5)
+ __asm__ ("ldrh %0, %1" : "=r"(v) : "m"(*(const uint16_t *)q));
+#elif defined __thumb__
+ __asm__ ("ldrh %0, %1" : "=r"(v) : "m"(q[0]), "m"(q[1]));
+#else
+ __asm__ ("ldrh %0, %1" : "=r"(v) : "Uq"(q[0]), "m"(q[1]));
+#endif
+ return v;
+}
+
+#define AV_WN16 AV_WN16
+static av_always_inline void AV_WN16(void *p, uint16_t v)
+{
+ __asm__ ("strh %1, %0" : "=m"(*(uint16_t *)p) : "r"(v));
+}
+
+#define AV_RN32 AV_RN32
+static av_always_inline uint32_t AV_RN32(const void *p)
+{
+ const struct __attribute__((packed)) { uint32_t v; } *q = p;
+ uint32_t v;
+ __asm__ ("ldr %0, %1" : "=r"(v) : "m"(*q));
+ return v;
+}
+
+#define AV_WN32 AV_WN32
+static av_always_inline void AV_WN32(void *p, uint32_t v)
+{
+ __asm__ ("str %1, %0" : "=m"(*(uint32_t *)p) : "r"(v));
+}
+
+#if HAVE_ASM_MOD_Q
+
+#define AV_RN64 AV_RN64
+static av_always_inline uint64_t AV_RN64(const void *p)
+{
+ const struct __attribute__((packed)) { uint32_t v; } *q = p;
+ uint64_t v;
+ __asm__ ("ldr %Q0, %1 \n\t"
+ "ldr %R0, %2 \n\t"
+ : "=&r"(v)
+ : "m"(q[0]), "m"(q[1]));
+ return v;
+}
+
+#define AV_WN64 AV_WN64
+static av_always_inline void AV_WN64(void *p, uint64_t v)
+{
+ __asm__ ("str %Q2, %0 \n\t"
+ "str %R2, %1 \n\t"
+ : "=m"(*(uint32_t*)p), "=m"(*((uint32_t*)p+1))
+ : "r"(v));
+}
+
+#endif /* HAVE_ASM_MOD_Q */
+
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVUTIL_ARM_INTREADWRITE_H */
diff --git a/media/ffvpx/libavutil/arm/moz.build b/media/ffvpx/libavutil/arm/moz.build
new file mode 100644
index 0000000000..e5f9fb23e3
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/moz.build
@@ -0,0 +1,18 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ 'cpu.c',
+ 'float_dsp_init_arm.c',
+ 'float_dsp_init_neon.c',
+ 'float_dsp_init_vfp.c',
+ 'float_dsp_neon.S',
+ 'float_dsp_vfp.S',
+]
+
+FINAL_LIBRARY = 'mozavutil'
+
+include('/media/ffvpx/ffvpxcommon.mozbuild')
diff --git a/media/ffvpx/libavutil/arm/timer.h b/media/ffvpx/libavutil/arm/timer.h
new file mode 100644
index 0000000000..caf23e2a5a
--- /dev/null
+++ b/media/ffvpx/libavutil/arm/timer.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ARM_TIMER_H
+#define AVUTIL_ARM_TIMER_H
+
+#include <stdint.h>
+#include "config.h"
+
+#if defined(__APPLE__)
+
+#include <mach/mach_time.h>
+
+#define AV_READ_TIME mach_absolute_time
+
+#elif HAVE_INLINE_ASM && defined(__ARM_ARCH_7A__)
+
+#define AV_READ_TIME read_time
+
+static inline uint64_t read_time(void)
+{
+ unsigned cc;
+ __asm__ volatile ("mrc p15, 0, %0, c9, c13, 0" : "=r"(cc));
+ return cc;
+}
+
+#endif /* HAVE_INLINE_ASM && __ARM_ARCH_7A__ */
+
+#endif /* AVUTIL_ARM_TIMER_H */
diff --git a/media/ffvpx/libavutil/attributes.h b/media/ffvpx/libavutil/attributes.h
new file mode 100644
index 0000000000..04c615c952
--- /dev/null
+++ b/media/ffvpx/libavutil/attributes.h
@@ -0,0 +1,173 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
+# define AV_GCC_VERSION_AT_MOST(x,y) (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y))
+#else
+# define AV_GCC_VERSION_AT_LEAST(x,y) 0
+# define AV_GCC_VERSION_AT_MOST(x,y) 0
+#endif
+
+#ifdef __has_builtin
+# define AV_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define AV_HAS_BUILTIN(x) 0
+#endif
+
+#ifndef av_always_inline
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_always_inline __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+# define av_always_inline __forceinline
+#else
+# define av_always_inline inline
+#endif
+#endif
+
+#ifndef av_extern_inline
+#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)
+# define av_extern_inline extern inline
+#else
+# define av_extern_inline inline
+#endif
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,4)
+# define av_warn_unused_result __attribute__((warn_unused_result))
+#else
+# define av_warn_unused_result
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_noinline __attribute__((noinline))
+#elif defined(_MSC_VER)
+# define av_noinline __declspec(noinline)
+#else
+# define av_noinline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__)
+# define av_pure __attribute__((pure))
+#else
+# define av_pure
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,6) || defined(__clang__)
+# define av_const __attribute__((const))
+#else
+# define av_const
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3) || defined(__clang__)
+# define av_cold __attribute__((cold))
+#else
+# define av_cold
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)
+# define av_flatten __attribute__((flatten))
+#else
+# define av_flatten
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define attribute_deprecated __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define attribute_deprecated __declspec(deprecated)
+#else
+# define attribute_deprecated
+#endif
+
+/**
+ * Disable warnings about deprecated features
+ * This is useful for sections of code kept for backward compatibility and
+ * scheduled for removal.
+ */
+#ifndef AV_NOWARN_DEPRECATED
+#if AV_GCC_VERSION_AT_LEAST(4,6) || defined(__clang__)
+# define AV_NOWARN_DEPRECATED(code) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
+ code \
+ _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+# define AV_NOWARN_DEPRECATED(code) \
+ __pragma(warning(push)) \
+ __pragma(warning(disable : 4996)) \
+ code; \
+ __pragma(warning(pop))
+#else
+# define AV_NOWARN_DEPRECATED(code) code
+#endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define av_unused __attribute__((unused))
+#else
+# define av_unused
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away. This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__)
+# define av_used __attribute__((used))
+#else
+# define av_used
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,3) || defined(__clang__)
+# define av_alias __attribute__((may_alias))
+#else
+# define av_alias
+#endif
+
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__INTEL_COMPILER)
+# define av_uninit(x) x=x
+#else
+# define av_uninit(x) x
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define av_builtin_constant_p __builtin_constant_p
+# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+# define av_builtin_constant_p(x) 0
+# define av_printf_format(fmtpos, attrpos)
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,5) || defined(__clang__)
+# define av_noreturn __attribute__((noreturn))
+#else
+# define av_noreturn
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
diff --git a/media/ffvpx/libavutil/attributes_internal.h b/media/ffvpx/libavutil/attributes_internal.h
new file mode 100644
index 0000000000..3df1ee6af3
--- /dev/null
+++ b/media/ffvpx/libavutil/attributes_internal.h
@@ -0,0 +1,34 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_INTERNAL_H
+#define AVUTIL_ATTRIBUTES_INTERNAL_H
+
+#include "attributes.h"
+
+#if (AV_GCC_VERSION_AT_LEAST(4,0) || defined(__clang__)) && (defined(__ELF__) || defined(__MACH__))
+# define attribute_visibility_hidden __attribute__((visibility("hidden")))
+# define FF_VISIBILITY_PUSH_HIDDEN _Pragma("GCC visibility push(hidden)")
+# define FF_VISIBILITY_POP_HIDDEN _Pragma("GCC visibility pop")
+#else
+# define attribute_visibility_hidden
+# define FF_VISIBILITY_PUSH_HIDDEN
+# define FF_VISIBILITY_POP_HIDDEN
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/avassert.h b/media/ffvpx/libavutil/avassert.h
new file mode 100644
index 0000000000..51e462bbae
--- /dev/null
+++ b/media/ffvpx/libavutil/avassert.h
@@ -0,0 +1,75 @@
+/*
+ * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple assert() macros that are a bit more flexible than ISO C assert().
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_AVASSERT_H
+#define AVUTIL_AVASSERT_H
+
+#include <stdlib.h>
+#include "log.h"
+#include "macros.h"
+
+/**
+ * assert() equivalent, that is always enabled.
+ */
+#define av_assert0(cond) do { \
+ if (!(cond)) { \
+ av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \
+ AV_STRINGIFY(cond), __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+
+
+/**
+ * assert() equivalent, that does not lie in speed critical code.
+ * These asserts() thus can be enabled without fearing speed loss.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0
+#define av_assert1(cond) av_assert0(cond)
+#else
+#define av_assert1(cond) ((void)0)
+#endif
+
+
+/**
+ * assert() equivalent, that does lie in speed critical code.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
+#define av_assert2(cond) av_assert0(cond)
+#define av_assert2_fpu() av_assert0_fpu()
+#else
+#define av_assert2(cond) ((void)0)
+#define av_assert2_fpu() ((void)0)
+#endif
+
+/**
+ * Assert that floating point operations can be executed.
+ *
+ * This will av_assert0() that the cpu is not in MMX state on X86
+ */
+void av_assert0_fpu(void);
+
+#endif /* AVUTIL_AVASSERT_H */
diff --git a/media/ffvpx/libavutil/avconfig.h b/media/ffvpx/libavutil/avconfig.h
new file mode 100644
index 0000000000..c289fbb551
--- /dev/null
+++ b/media/ffvpx/libavutil/avconfig.h
@@ -0,0 +1,6 @@
+/* Generated by ffmpeg configure */
+#ifndef AVUTIL_AVCONFIG_H
+#define AVUTIL_AVCONFIG_H
+#define AV_HAVE_BIGENDIAN 0
+#define AV_HAVE_FAST_UNALIGNED 1
+#endif /* AVUTIL_AVCONFIG_H */
diff --git a/media/ffvpx/libavutil/avsscanf.c b/media/ffvpx/libavutil/avsscanf.c
new file mode 100644
index 0000000000..7061e6d965
--- /dev/null
+++ b/media/ffvpx/libavutil/avsscanf.c
@@ -0,0 +1,970 @@
+/*
+ * Copyright (c) 2005-2014 Rich Felker, et al.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <float.h>
+
+#include "avstring.h"
+#include "libm.h"
+
+typedef struct FFFILE {
+ size_t buf_size;
+ unsigned char *buf;
+ unsigned char *rpos, *rend;
+ unsigned char *shend;
+ ptrdiff_t shlim, shcnt;
+ void *cookie;
+ size_t (*read)(struct FFFILE *, unsigned char *, size_t);
+} FFFILE;
+
+#define SIZE_hh -2
+#define SIZE_h -1
+#define SIZE_def 0
+#define SIZE_l 1
+#define SIZE_L 2
+#define SIZE_ll 3
+
+#define shcnt(f) ((f)->shcnt + ((f)->rpos - (f)->buf))
+
+static int fftoread(FFFILE *f)
+{
+ f->rpos = f->rend = f->buf + f->buf_size;
+ return 0;
+}
+
+static size_t ffstring_read(FFFILE *f, unsigned char *buf, size_t len)
+{
+ char *src = f->cookie;
+ size_t k = len+256;
+ char *end = memchr(src, 0, k);
+
+ if (end) k = end-src;
+ if (k < len) len = k;
+ memcpy(buf, src, len);
+ f->rpos = (void *)(src+len);
+ f->rend = (void *)(src+k);
+ f->cookie = src+k;
+
+ return len;
+}
+
+static int ffuflow(FFFILE *f)
+{
+ unsigned char c;
+ if (!fftoread(f) && f->read(f, &c, 1)==1) return c;
+ return EOF;
+}
+
+static void ffshlim(FFFILE *f, ptrdiff_t lim)
+{
+ f->shlim = lim;
+ f->shcnt = f->buf - f->rpos;
+ /* If lim is nonzero, rend must be a valid pointer. */
+ if (lim && f->rend - f->rpos > lim)
+ f->shend = f->rpos + lim;
+ else
+ f->shend = f->rend;
+}
+
+static int ffshgetc(FFFILE *f)
+{
+ int c;
+ ptrdiff_t cnt = shcnt(f);
+ if (f->shlim && cnt >= f->shlim || (c=ffuflow(f)) < 0) {
+ f->shcnt = f->buf - f->rpos + cnt;
+ f->shend = 0;
+ return EOF;
+ }
+ cnt++;
+ if (f->shlim && f->rend - f->rpos > f->shlim - cnt)
+ f->shend = f->rpos + (f->shlim - cnt);
+ else
+ f->shend = f->rend;
+ f->shcnt = f->buf - f->rpos + cnt;
+ if (f->rpos[-1] != c) f->rpos[-1] = c;
+ return c;
+}
+
+#define shlim(f, lim) ffshlim((f), (lim))
+#define shgetc(f) (((f)->rpos < (f)->shend) ? *(f)->rpos++ : ffshgetc(f))
+#define shunget(f) ((f)->shend ? (void)(f)->rpos-- : (void)0)
+
+static const unsigned char table[] = { -1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,
+ -1,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,
+ 25,26,27,28,29,30,31,32,33,34,35,-1,-1,-1,-1,-1,
+ -1,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,
+ 25,26,27,28,29,30,31,32,33,34,35,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+};
+
+static unsigned long long ffintscan(FFFILE *f, unsigned base, int pok, unsigned long long lim)
+{
+ const unsigned char *val = table+1;
+ int c, neg=0;
+ unsigned x;
+ unsigned long long y;
+ if (base > 36 || base == 1) {
+ errno = EINVAL;
+ return 0;
+ }
+ while (av_isspace((c=shgetc(f))));
+ if (c=='+' || c=='-') {
+ neg = -(c=='-');
+ c = shgetc(f);
+ }
+ if ((base == 0 || base == 16) && c=='0') {
+ c = shgetc(f);
+ if ((c|32)=='x') {
+ c = shgetc(f);
+ if (val[c]>=16) {
+ shunget(f);
+ if (pok) shunget(f);
+ else shlim(f, 0);
+ return 0;
+ }
+ base = 16;
+ } else if (base == 0) {
+ base = 8;
+ }
+ } else {
+ if (base == 0) base = 10;
+ if (val[c] >= base) {
+ shunget(f);
+ shlim(f, 0);
+ errno = EINVAL;
+ return 0;
+ }
+ }
+ if (base == 10) {
+ for (x=0; c-'0'<10U && x<=UINT_MAX/10-1; c=shgetc(f))
+ x = x*10 + (c-'0');
+ for (y=x; c-'0'<10U && y<=ULLONG_MAX/10 && 10*y<=ULLONG_MAX-(c-'0'); c=shgetc(f))
+ y = y*10 + (c-'0');
+ if (c-'0'>=10U) goto done;
+ } else if (!(base & base-1)) {
+ int bs = "\0\1\2\4\7\3\6\5"[(0x17*base)>>5&7];
+ for (x=0; val[c]<base && x<=UINT_MAX/32; c=shgetc(f))
+ x = x<<bs | val[c];
+ for (y=x; val[c]<base && y<=ULLONG_MAX>>bs; c=shgetc(f))
+ y = y<<bs | val[c];
+ } else {
+ for (x=0; val[c]<base && x<=UINT_MAX/36-1; c=shgetc(f))
+ x = x*base + val[c];
+ for (y=x; val[c]<base && y<=ULLONG_MAX/base && base*y<=ULLONG_MAX-val[c]; c=shgetc(f))
+ y = y*base + val[c];
+ }
+ if (val[c]<base) {
+ for (; val[c]<base; c=shgetc(f));
+ errno = ERANGE;
+ y = lim;
+ if (lim&1) neg = 0;
+ }
+done:
+ shunget(f);
+ if (y>=lim) {
+ if (!(lim&1) && !neg) {
+ errno = ERANGE;
+ return lim-1;
+ } else if (y>lim) {
+ errno = ERANGE;
+ return lim;
+ }
+ }
+ return (y^neg)-neg;
+}
+
+static long long scanexp(FFFILE *f, int pok)
+{
+ int c;
+ int x;
+ long long y;
+ int neg = 0;
+
+ c = shgetc(f);
+ if (c=='+' || c=='-') {
+ neg = (c=='-');
+ c = shgetc(f);
+ if (c-'0'>=10U && pok) shunget(f);
+ }
+ if (c-'0'>=10U) {
+ shunget(f);
+ return LLONG_MIN;
+ }
+ for (x=0; c-'0'<10U && x<INT_MAX/10; c = shgetc(f))
+ x = 10*x + (c-'0');
+ for (y=x; c-'0'<10U && y<LLONG_MAX/100; c = shgetc(f))
+ y = 10*y + (c-'0');
+ for (; c-'0'<10U; c = shgetc(f));
+ shunget(f);
+ return neg ? -y : y;
+}
+
+#define LD_B1B_DIG 2
+#define LD_B1B_MAX 9007199, 254740991
+#define KMAX 128
+#define MASK (KMAX-1)
+
+static double decfloat(FFFILE *f, int c, int bits, int emin, int sign, int pok)
+{
+ uint32_t x[KMAX];
+ static const uint32_t th[] = { LD_B1B_MAX };
+ int i, j, k, a, z;
+ long long lrp=0, dc=0;
+ long long e10=0;
+ int lnz = 0;
+ int gotdig = 0, gotrad = 0;
+ int rp;
+ int e2;
+ int emax = -emin-bits+3;
+ int denormal = 0;
+ double y;
+ double frac=0;
+ double bias=0;
+ static const int p10s[] = { 10, 100, 1000, 10000,
+ 100000, 1000000, 10000000, 100000000 };
+
+ j=0;
+ k=0;
+
+ /* Don't let leading zeros consume buffer space */
+ for (; c=='0'; c = shgetc(f)) gotdig=1;
+ if (c=='.') {
+ gotrad = 1;
+ for (c = shgetc(f); c=='0'; c = shgetc(f)) gotdig=1, lrp--;
+ }
+
+ x[0] = 0;
+ for (; c-'0'<10U || c=='.'; c = shgetc(f)) {
+ if (c == '.') {
+ if (gotrad) break;
+ gotrad = 1;
+ lrp = dc;
+ } else if (k < KMAX-3) {
+ dc++;
+ if (c!='0') lnz = dc;
+ if (j) x[k] = x[k]*10 + c-'0';
+ else x[k] = c-'0';
+ if (++j==9) {
+ k++;
+ j=0;
+ }
+ gotdig=1;
+ } else {
+ dc++;
+ if (c!='0') {
+ lnz = (KMAX-4)*9;
+ x[KMAX-4] |= 1;
+ }
+ }
+ }
+ if (!gotrad) lrp=dc;
+
+ if (gotdig && (c|32)=='e') {
+ e10 = scanexp(f, pok);
+ if (e10 == LLONG_MIN) {
+ if (pok) {
+ shunget(f);
+ } else {
+ shlim(f, 0);
+ return 0;
+ }
+ e10 = 0;
+ }
+ lrp += e10;
+ } else if (c>=0) {
+ shunget(f);
+ }
+ if (!gotdig) {
+ errno = EINVAL;
+ shlim(f, 0);
+ return 0;
+ }
+
+ /* Handle zero specially to avoid nasty special cases later */
+ if (!x[0]) return sign * 0.0;
+
+ /* Optimize small integers (w/no exponent) and over/under-flow */
+ if (lrp==dc && dc<10 && (bits>30 || x[0]>>bits==0))
+ return sign * (double)x[0];
+ if (lrp > -emin/2) {
+ errno = ERANGE;
+ return sign * DBL_MAX * DBL_MAX;
+ }
+ if (lrp < emin-2*DBL_MANT_DIG) {
+ errno = ERANGE;
+ return sign * DBL_MIN * DBL_MIN;
+ }
+
+ /* Align incomplete final B1B digit */
+ if (j) {
+ for (; j<9; j++) x[k]*=10;
+ k++;
+ j=0;
+ }
+
+ a = 0;
+ z = k;
+ e2 = 0;
+ rp = lrp;
+
+ /* Optimize small to mid-size integers (even in exp. notation) */
+ if (lnz<9 && lnz<=rp && rp < 18) {
+ int bitlim;
+ if (rp == 9) return sign * (double)x[0];
+ if (rp < 9) return sign * (double)x[0] / p10s[8-rp];
+ bitlim = bits-3*(int)(rp-9);
+ if (bitlim>30 || x[0]>>bitlim==0)
+ return sign * (double)x[0] * p10s[rp-10];
+ }
+
+ /* Drop trailing zeros */
+ for (; !x[z-1]; z--);
+
+ /* Align radix point to B1B digit boundary */
+ if (rp % 9) {
+ int rpm9 = rp>=0 ? rp%9 : rp%9+9;
+ int p10 = p10s[8-rpm9];
+ uint32_t carry = 0;
+ for (k=a; k!=z; k++) {
+ uint32_t tmp = x[k] % p10;
+ x[k] = x[k]/p10 + carry;
+ carry = 1000000000/p10 * tmp;
+ if (k==a && !x[k]) {
+ a = (a+1 & MASK);
+ rp -= 9;
+ }
+ }
+ if (carry) x[z++] = carry;
+ rp += 9-rpm9;
+ }
+
+ /* Upscale until desired number of bits are left of radix point */
+ while (rp < 9*LD_B1B_DIG || (rp == 9*LD_B1B_DIG && x[a]<th[0])) {
+ uint32_t carry = 0;
+ e2 -= 29;
+ for (k=(z-1 & MASK); ; k=(k-1 & MASK)) {
+ uint64_t tmp = ((uint64_t)x[k] << 29) + carry;
+ if (tmp > 1000000000) {
+ carry = tmp / 1000000000;
+ x[k] = tmp % 1000000000;
+ } else {
+ carry = 0;
+ x[k] = tmp;
+ }
+ if (k==(z-1 & MASK) && k!=a && !x[k]) z = k;
+ if (k==a) break;
+ }
+ if (carry) {
+ rp += 9;
+ a = (a-1 & MASK);
+ if (a == z) {
+ z = (z-1 & MASK);
+ x[z-1 & MASK] |= x[z];
+ }
+ x[a] = carry;
+ }
+ }
+
+ /* Downscale until exactly number of bits are left of radix point */
+ for (;;) {
+ uint32_t carry = 0;
+ int sh = 1;
+ for (i=0; i<LD_B1B_DIG; i++) {
+ k = (a+i & MASK);
+ if (k == z || x[k] < th[i]) {
+ i=LD_B1B_DIG;
+ break;
+ }
+ if (x[a+i & MASK] > th[i]) break;
+ }
+ if (i==LD_B1B_DIG && rp==9*LD_B1B_DIG) break;
+ /* FIXME: find a way to compute optimal sh */
+ if (rp > 9+9*LD_B1B_DIG) sh = 9;
+ e2 += sh;
+ for (k=a; k!=z; k=(k+1 & MASK)) {
+ uint32_t tmp = x[k] & (1<<sh)-1;
+ x[k] = (x[k]>>sh) + carry;
+ carry = (1000000000>>sh) * tmp;
+ if (k==a && !x[k]) {
+ a = (a+1 & MASK);
+ i--;
+ rp -= 9;
+ }
+ }
+ if (carry) {
+ if ((z+1 & MASK) != a) {
+ x[z] = carry;
+ z = (z+1 & MASK);
+ } else x[z-1 & MASK] |= 1;
+ }
+ }
+
+ /* Assemble desired bits into floating point variable */
+ for (y=i=0; i<LD_B1B_DIG; i++) {
+ if ((a+i & MASK)==z) x[(z=(z+1 & MASK))-1] = 0;
+ y = 1000000000.0L * y + x[a+i & MASK];
+ }
+
+ y *= sign;
+
+ /* Limit precision for denormal results */
+ if (bits > DBL_MANT_DIG+e2-emin) {
+ bits = DBL_MANT_DIG+e2-emin;
+ if (bits<0) bits=0;
+ denormal = 1;
+ }
+
+ /* Calculate bias term to force rounding, move out lower bits */
+ if (bits < DBL_MANT_DIG) {
+ bias = copysign(scalbn(1, 2*DBL_MANT_DIG-bits-1), y);
+ frac = fmod(y, scalbn(1, DBL_MANT_DIG-bits));
+ y -= frac;
+ y += bias;
+ }
+
+ /* Process tail of decimal input so it can affect rounding */
+ if ((a+i & MASK) != z) {
+ uint32_t t = x[a+i & MASK];
+ if (t < 500000000 && (t || (a+i+1 & MASK) != z))
+ frac += 0.25*sign;
+ else if (t > 500000000)
+ frac += 0.75*sign;
+ else if (t == 500000000) {
+ if ((a+i+1 & MASK) == z)
+ frac += 0.5*sign;
+ else
+ frac += 0.75*sign;
+ }
+ if (DBL_MANT_DIG-bits >= 2 && !fmod(frac, 1))
+ frac++;
+ }
+
+ y += frac;
+ y -= bias;
+
+ if ((e2+DBL_MANT_DIG & INT_MAX) > emax-5) {
+ if (fabs(y) >= pow(2, DBL_MANT_DIG)) {
+ if (denormal && bits==DBL_MANT_DIG+e2-emin)
+ denormal = 0;
+ y *= 0.5;
+ e2++;
+ }
+ if (e2+DBL_MANT_DIG>emax || (denormal && frac))
+ errno = ERANGE;
+ }
+
+ return scalbn(y, e2);
+}
+
+static double hexfloat(FFFILE *f, int bits, int emin, int sign, int pok)
+{
+ uint32_t x = 0;
+ double y = 0;
+ double scale = 1;
+ double bias = 0;
+ int gottail = 0, gotrad = 0, gotdig = 0;
+ long long rp = 0;
+ long long dc = 0;
+ long long e2 = 0;
+ int d;
+ int c;
+
+ c = shgetc(f);
+
+ /* Skip leading zeros */
+ for (; c=='0'; c = shgetc(f))
+ gotdig = 1;
+
+ if (c=='.') {
+ gotrad = 1;
+ c = shgetc(f);
+ /* Count zeros after the radix point before significand */
+ for (rp=0; c=='0'; c = shgetc(f), rp--) gotdig = 1;
+ }
+
+ for (; c-'0'<10U || (c|32)-'a'<6U || c=='.'; c = shgetc(f)) {
+ if (c=='.') {
+ if (gotrad) break;
+ rp = dc;
+ gotrad = 1;
+ } else {
+ gotdig = 1;
+ if (c > '9') d = (c|32)+10-'a';
+ else d = c-'0';
+ if (dc<8) {
+ x = x*16 + d;
+ } else if (dc < DBL_MANT_DIG/4+1) {
+ y += d*(scale/=16);
+ } else if (d && !gottail) {
+ y += 0.5*scale;
+ gottail = 1;
+ }
+ dc++;
+ }
+ }
+ if (!gotdig) {
+ shunget(f);
+ if (pok) {
+ shunget(f);
+ if (gotrad) shunget(f);
+ } else {
+ shlim(f, 0);
+ }
+ return sign * 0.0;
+ }
+ if (!gotrad) rp = dc;
+ while (dc<8) x *= 16, dc++;
+ if ((c|32)=='p') {
+ e2 = scanexp(f, pok);
+ if (e2 == LLONG_MIN) {
+ if (pok) {
+ shunget(f);
+ } else {
+ shlim(f, 0);
+ return 0;
+ }
+ e2 = 0;
+ }
+ } else {
+ shunget(f);
+ }
+ e2 += 4*rp - 32;
+
+ if (!x) return sign * 0.0;
+ if (e2 > -emin) {
+ errno = ERANGE;
+ return sign * DBL_MAX * DBL_MAX;
+ }
+ if (e2 < emin-2*DBL_MANT_DIG) {
+ errno = ERANGE;
+ return sign * DBL_MIN * DBL_MIN;
+ }
+
+ while (x < 0x80000000) {
+ if (y>=0.5) {
+ x += x + 1;
+ y += y - 1;
+ } else {
+ x += x;
+ y += y;
+ }
+ e2--;
+ }
+
+ if (bits > 32+e2-emin) {
+ bits = 32+e2-emin;
+ if (bits<0) bits=0;
+ }
+
+ if (bits < DBL_MANT_DIG)
+ bias = copysign(scalbn(1, 32+DBL_MANT_DIG-bits-1), sign);
+
+ if (bits<32 && y && !(x&1)) x++, y=0;
+
+ y = bias + sign*(double)x + sign*y;
+ y -= bias;
+
+ if (!y) errno = ERANGE;
+
+ return scalbn(y, e2);
+}
+
+static double fffloatscan(FFFILE *f, int prec, int pok)
+{
+ int sign = 1;
+ size_t i;
+ int bits;
+ int emin;
+ int c;
+
+ switch (prec) {
+ case 0:
+ bits = FLT_MANT_DIG;
+ emin = FLT_MIN_EXP-bits;
+ break;
+ case 1:
+ bits = DBL_MANT_DIG;
+ emin = DBL_MIN_EXP-bits;
+ break;
+ case 2:
+ bits = DBL_MANT_DIG;
+ emin = DBL_MIN_EXP-bits;
+ break;
+ default:
+ return 0;
+ }
+
+ while (av_isspace((c = shgetc(f))));
+
+ if (c=='+' || c=='-') {
+ sign -= 2*(c=='-');
+ c = shgetc(f);
+ }
+
+ for (i=0; i<8 && (c|32)=="infinity"[i]; i++)
+ if (i<7) c = shgetc(f);
+ if (i==3 || i==8 || (i>3 && pok)) {
+ if (i!=8) {
+ shunget(f);
+ if (pok) for (; i>3; i--) shunget(f);
+ }
+ return sign * INFINITY;
+ }
+ if (!i) for (i=0; i<3 && (c|32)=="nan"[i]; i++)
+ if (i<2) c = shgetc(f);
+ if (i==3) {
+ if (shgetc(f) != '(') {
+ shunget(f);
+ return NAN;
+ }
+ for (i=1; ; i++) {
+ c = shgetc(f);
+ if (c-'0'<10U || c-'A'<26U || c-'a'<26U || c=='_')
+ continue;
+ if (c==')') return NAN;
+ shunget(f);
+ if (!pok) {
+ errno = EINVAL;
+ shlim(f, 0);
+ return 0;
+ }
+ while (i--) shunget(f);
+ return NAN;
+ }
+ return NAN;
+ }
+
+ if (i) {
+ shunget(f);
+ errno = EINVAL;
+ shlim(f, 0);
+ return 0;
+ }
+
+ if (c=='0') {
+ c = shgetc(f);
+ if ((c|32) == 'x')
+ return hexfloat(f, bits, emin, sign, pok);
+ shunget(f);
+ c = '0';
+ }
+
+ return decfloat(f, c, bits, emin, sign, pok);
+}
+
+static void *arg_n(va_list ap, unsigned int n)
+{
+ void *p;
+ unsigned int i;
+ va_list ap2;
+ va_copy(ap2, ap);
+ for (i=n; i>1; i--) va_arg(ap2, void *);
+ p = va_arg(ap2, void *);
+ va_end(ap2);
+ return p;
+}
+
+static void store_int(void *dest, int size, unsigned long long i)
+{
+ if (!dest) return;
+ switch (size) {
+ case SIZE_hh:
+ *(char *)dest = i;
+ break;
+ case SIZE_h:
+ *(short *)dest = i;
+ break;
+ case SIZE_def:
+ *(int *)dest = i;
+ break;
+ case SIZE_l:
+ *(long *)dest = i;
+ break;
+ case SIZE_ll:
+ *(long long *)dest = i;
+ break;
+ }
+}
+
+static int ff_vfscanf(FFFILE *f, const char *fmt, va_list ap)
+{
+ int width;
+ int size;
+ int base;
+ const unsigned char *p;
+ int c, t;
+ char *s;
+ void *dest=NULL;
+ int invert;
+ int matches=0;
+ unsigned long long x;
+ double y;
+ ptrdiff_t pos = 0;
+ unsigned char scanset[257];
+ size_t i;
+
+ for (p=(const unsigned char *)fmt; *p; p++) {
+
+ if (av_isspace(*p)) {
+ while (av_isspace(p[1])) p++;
+ shlim(f, 0);
+ while (av_isspace(shgetc(f)));
+ shunget(f);
+ pos += shcnt(f);
+ continue;
+ }
+ if (*p != '%' || p[1] == '%') {
+ shlim(f, 0);
+ if (*p == '%') {
+ p++;
+ while (av_isspace((c=shgetc(f))));
+ } else {
+ c = shgetc(f);
+ }
+ if (c!=*p) {
+ shunget(f);
+ if (c<0) goto input_fail;
+ goto match_fail;
+ }
+ pos += shcnt(f);
+ continue;
+ }
+
+ p++;
+ if (*p=='*') {
+ dest = 0; p++;
+ } else if (av_isdigit(*p) && p[1]=='$') {
+ dest = arg_n(ap, *p-'0'); p+=2;
+ } else {
+ dest = va_arg(ap, void *);
+ }
+
+ for (width=0; av_isdigit(*p); p++) {
+ width = 10*width + *p - '0';
+ }
+
+ if (*p=='m') {
+ s = 0;
+ p++;
+ }
+
+ size = SIZE_def;
+ switch (*p++) {
+ case 'h':
+ if (*p == 'h') p++, size = SIZE_hh;
+ else size = SIZE_h;
+ break;
+ case 'l':
+ if (*p == 'l') p++, size = SIZE_ll;
+ else size = SIZE_l;
+ break;
+ case 'j':
+ size = SIZE_ll;
+ break;
+ case 'z':
+ case 't':
+ size = SIZE_l;
+ break;
+ case 'L':
+ size = SIZE_L;
+ break;
+ case 'd': case 'i': case 'o': case 'u': case 'x':
+ case 'a': case 'e': case 'f': case 'g':
+ case 'A': case 'E': case 'F': case 'G': case 'X':
+ case 's': case 'c': case '[':
+ case 'S': case 'C':
+ case 'p': case 'n':
+ p--;
+ break;
+ default:
+ goto fmt_fail;
+ }
+
+ t = *p;
+
+ /* C or S */
+ if ((t&0x2f) == 3) {
+ t |= 32;
+ size = SIZE_l;
+ }
+
+ switch (t) {
+ case 'c':
+ if (width < 1) width = 1;
+ case '[':
+ break;
+ case 'n':
+ store_int(dest, size, pos);
+ /* do not increment match count, etc! */
+ continue;
+ default:
+ shlim(f, 0);
+ while (av_isspace(shgetc(f)));
+ shunget(f);
+ pos += shcnt(f);
+ }
+
+ shlim(f, width);
+ if (shgetc(f) < 0) goto input_fail;
+ shunget(f);
+
+ switch (t) {
+ case 's':
+ case 'c':
+ case '[':
+ if (t == 'c' || t == 's') {
+ memset(scanset, -1, sizeof scanset);
+ scanset[0] = 0;
+ if (t == 's') {
+ scanset[1 + '\t'] = 0;
+ scanset[1 + '\n'] = 0;
+ scanset[1 + '\v'] = 0;
+ scanset[1 + '\f'] = 0;
+ scanset[1 + '\r'] = 0;
+ scanset[1 + ' ' ] = 0;
+ }
+ } else {
+ if (*++p == '^') p++, invert = 1;
+ else invert = 0;
+ memset(scanset, invert, sizeof scanset);
+ scanset[0] = 0;
+ if (*p == '-') p++, scanset[1+'-'] = 1-invert;
+ else if (*p == ']') p++, scanset[1+']'] = 1-invert;
+ for (; *p != ']'; p++) {
+ if (!*p) goto fmt_fail;
+ if (*p=='-' && p[1] && p[1] != ']')
+ for (c=p++[-1]; c<*p; c++)
+ scanset[1+c] = 1-invert;
+ scanset[1+*p] = 1-invert;
+ }
+ }
+ s = 0;
+ i = 0;
+ if ((s = dest)) {
+ while (scanset[(c=shgetc(f))+1])
+ s[i++] = c;
+ } else {
+ while (scanset[(c=shgetc(f))+1]);
+ }
+ shunget(f);
+ if (!shcnt(f)) goto match_fail;
+ if (t == 'c' && shcnt(f) != width) goto match_fail;
+ if (t != 'c') {
+ if (s) s[i] = 0;
+ }
+ break;
+ case 'p':
+ case 'X':
+ case 'x':
+ base = 16;
+ goto int_common;
+ case 'o':
+ base = 8;
+ goto int_common;
+ case 'd':
+ case 'u':
+ base = 10;
+ goto int_common;
+ case 'i':
+ base = 0;
+int_common:
+ x = ffintscan(f, base, 0, ULLONG_MAX);
+ if (!shcnt(f))
+ goto match_fail;
+ if (t=='p' && dest)
+ *(void **)dest = (void *)(uintptr_t)x;
+ else
+ store_int(dest, size, x);
+ break;
+ case 'a': case 'A':
+ case 'e': case 'E':
+ case 'f': case 'F':
+ case 'g': case 'G':
+ y = fffloatscan(f, size, 0);
+ if (!shcnt(f))
+ goto match_fail;
+ if (dest) {
+ switch (size) {
+ case SIZE_def:
+ *(float *)dest = y;
+ break;
+ case SIZE_l:
+ *(double *)dest = y;
+ break;
+ case SIZE_L:
+ *(double *)dest = y;
+ break;
+ }
+ }
+ break;
+ }
+
+ pos += shcnt(f);
+ if (dest) matches++;
+ }
+ if (0) {
+fmt_fail:
+input_fail:
+ if (!matches) matches--;
+ }
+match_fail:
+ return matches;
+}
+
+static int ff_vsscanf(const char *s, const char *fmt, va_list ap)
+{
+ FFFILE f = {
+ .buf = (void *)s, .cookie = (void *)s,
+ .read = ffstring_read,
+ };
+
+ return ff_vfscanf(&f, fmt, ap);
+}
+
+int av_sscanf(const char *string, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+ va_start(ap, format);
+ ret = ff_vsscanf(string, format, ap);
+ va_end(ap);
+ return ret;
+}
diff --git a/media/ffvpx/libavutil/avstring.c b/media/ffvpx/libavutil/avstring.c
new file mode 100644
index 0000000000..e460b5be7f
--- /dev/null
+++ b/media/ffvpx/libavutil/avstring.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2007 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <limits.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "config.h"
+#include "mem.h"
+#include "avassert.h"
+#include "avstring.h"
+#include "bprint.h"
+#include "error.h"
+#include "macros.h"
+#include "version.h"
+
+int av_strstart(const char *str, const char *pfx, const char **ptr)
+{
+ while (*pfx && *pfx == *str) {
+ pfx++;
+ str++;
+ }
+ if (!*pfx && ptr)
+ *ptr = str;
+ return !*pfx;
+}
+
+int av_stristart(const char *str, const char *pfx, const char **ptr)
+{
+ while (*pfx && av_toupper((unsigned)*pfx) == av_toupper((unsigned)*str)) {
+ pfx++;
+ str++;
+ }
+ if (!*pfx && ptr)
+ *ptr = str;
+ return !*pfx;
+}
+
+char *av_stristr(const char *s1, const char *s2)
+{
+ if (!*s2)
+ return (char*)(intptr_t)s1;
+
+ do
+ if (av_stristart(s1, s2, NULL))
+ return (char*)(intptr_t)s1;
+ while (*s1++);
+
+ return NULL;
+}
+
+char *av_strnstr(const char *haystack, const char *needle, size_t hay_length)
+{
+ size_t needle_len = strlen(needle);
+ if (!needle_len)
+ return (char*)haystack;
+ while (hay_length >= needle_len) {
+ hay_length--;
+ if (!memcmp(haystack, needle, needle_len))
+ return (char*)haystack;
+ haystack++;
+ }
+ return NULL;
+}
+
+size_t av_strlcpy(char *dst, const char *src, size_t size)
+{
+ size_t len = 0;
+ while (++len < size && *src)
+ *dst++ = *src++;
+ if (len <= size)
+ *dst = 0;
+ return len + strlen(src) - 1;
+}
+
+size_t av_strlcat(char *dst, const char *src, size_t size)
+{
+ size_t len = strlen(dst);
+ if (size <= len + 1)
+ return len + strlen(src);
+ return len + av_strlcpy(dst + len, src, size - len);
+}
+
+size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...)
+{
+ size_t len = strlen(dst);
+ va_list vl;
+
+ va_start(vl, fmt);
+ len += vsnprintf(dst + len, size > len ? size - len : 0, fmt, vl);
+ va_end(vl);
+
+ return len;
+}
+
+char *av_asprintf(const char *fmt, ...)
+{
+ char *p = NULL;
+ va_list va;
+ int len;
+
+ va_start(va, fmt);
+ len = vsnprintf(NULL, 0, fmt, va);
+ va_end(va);
+ if (len < 0)
+ goto end;
+
+ p = av_malloc(len + 1);
+ if (!p)
+ goto end;
+
+ va_start(va, fmt);
+ len = vsnprintf(p, len + 1, fmt, va);
+ va_end(va);
+ if (len < 0)
+ av_freep(&p);
+
+end:
+ return p;
+}
+
+#define WHITESPACES " \n\t\r"
+
+char *av_get_token(const char **buf, const char *term)
+{
+ char *out = av_malloc(strlen(*buf) + 1);
+ char *ret = out, *end = out;
+ const char *p = *buf;
+ if (!out)
+ return NULL;
+ p += strspn(p, WHITESPACES);
+
+ while (*p && !strspn(p, term)) {
+ char c = *p++;
+ if (c == '\\' && *p) {
+ *out++ = *p++;
+ end = out;
+ } else if (c == '\'') {
+ while (*p && *p != '\'')
+ *out++ = *p++;
+ if (*p) {
+ p++;
+ end = out;
+ }
+ } else {
+ *out++ = c;
+ }
+ }
+
+ do
+ *out-- = 0;
+ while (out >= end && strspn(out, WHITESPACES));
+
+ *buf = p;
+
+ return ret;
+}
+
+char *av_strtok(char *s, const char *delim, char **saveptr)
+{
+ char *tok;
+
+ if (!s && !(s = *saveptr))
+ return NULL;
+
+ /* skip leading delimiters */
+ s += strspn(s, delim);
+
+ /* s now points to the first non delimiter char, or to the end of the string */
+ if (!*s) {
+ *saveptr = NULL;
+ return NULL;
+ }
+ tok = s++;
+
+ /* skip non delimiters */
+ s += strcspn(s, delim);
+ if (*s) {
+ *s = 0;
+ *saveptr = s+1;
+ } else {
+ *saveptr = NULL;
+ }
+
+ return tok;
+}
+
+int av_strcasecmp(const char *a, const char *b)
+{
+ uint8_t c1, c2;
+ do {
+ c1 = av_tolower(*a++);
+ c2 = av_tolower(*b++);
+ } while (c1 && c1 == c2);
+ return c1 - c2;
+}
+
+int av_strncasecmp(const char *a, const char *b, size_t n)
+{
+ uint8_t c1, c2;
+ if (n <= 0)
+ return 0;
+ do {
+ c1 = av_tolower(*a++);
+ c2 = av_tolower(*b++);
+ } while (--n && c1 && c1 == c2);
+ return c1 - c2;
+}
+
+char *av_strireplace(const char *str, const char *from, const char *to)
+{
+ char *ret = NULL;
+ const char *pstr2, *pstr = str;
+ size_t tolen = strlen(to), fromlen = strlen(from);
+ AVBPrint pbuf;
+
+ av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
+ while ((pstr2 = av_stristr(pstr, from))) {
+ av_bprint_append_data(&pbuf, pstr, pstr2 - pstr);
+ pstr = pstr2 + fromlen;
+ av_bprint_append_data(&pbuf, to, tolen);
+ }
+ av_bprint_append_data(&pbuf, pstr, strlen(pstr));
+ if (!av_bprint_is_complete(&pbuf)) {
+ av_bprint_finalize(&pbuf, NULL);
+ } else {
+ av_bprint_finalize(&pbuf, &ret);
+ }
+
+ return ret;
+}
+
+const char *av_basename(const char *path)
+{
+ char *p;
+#if HAVE_DOS_PATHS
+ char *q, *d;
+#endif
+
+ if (!path || *path == '\0')
+ return ".";
+
+ p = strrchr(path, '/');
+#if HAVE_DOS_PATHS
+ q = strrchr(path, '\\');
+ d = strchr(path, ':');
+ p = FFMAX3(p, q, d);
+#endif
+
+ if (!p)
+ return path;
+
+ return p + 1;
+}
+
+const char *av_dirname(char *path)
+{
+ char *p = path ? strrchr(path, '/') : NULL;
+
+#if HAVE_DOS_PATHS
+ char *q = path ? strrchr(path, '\\') : NULL;
+ char *d = path ? strchr(path, ':') : NULL;
+
+ d = d ? d + 1 : d;
+
+ p = FFMAX3(p, q, d);
+#endif
+
+ if (!p)
+ return ".";
+
+ *p = '\0';
+
+ return path;
+}
+
+char *av_append_path_component(const char *path, const char *component)
+{
+ size_t p_len, c_len;
+ char *fullpath;
+
+ if (!path)
+ return av_strdup(component);
+ if (!component)
+ return av_strdup(path);
+
+ p_len = strlen(path);
+ c_len = strlen(component);
+ if (p_len > SIZE_MAX - c_len || p_len + c_len > SIZE_MAX - 2)
+ return NULL;
+ fullpath = av_malloc(p_len + c_len + 2);
+ if (fullpath) {
+ if (p_len) {
+ av_strlcpy(fullpath, path, p_len + 1);
+ if (c_len) {
+ if (fullpath[p_len - 1] != '/' && component[0] != '/')
+ fullpath[p_len++] = '/';
+ else if (fullpath[p_len - 1] == '/' && component[0] == '/')
+ p_len--;
+ }
+ }
+ av_strlcpy(&fullpath[p_len], component, c_len + 1);
+ fullpath[p_len + c_len] = 0;
+ }
+ return fullpath;
+}
+
+int av_escape(char **dst, const char *src, const char *special_chars,
+ enum AVEscapeMode mode, int flags)
+{
+ AVBPrint dstbuf;
+ int ret;
+
+ av_bprint_init(&dstbuf, 1, INT_MAX); /* (int)dstbuf.len must be >= 0 */
+ av_bprint_escape(&dstbuf, src, special_chars, mode, flags);
+
+ if (!av_bprint_is_complete(&dstbuf)) {
+ av_bprint_finalize(&dstbuf, NULL);
+ return AVERROR(ENOMEM);
+ }
+ if ((ret = av_bprint_finalize(&dstbuf, dst)) < 0)
+ return ret;
+ return dstbuf.len;
+}
+
+int av_match_name(const char *name, const char *names)
+{
+ const char *p;
+ int len, namelen;
+
+ if (!name || !names)
+ return 0;
+
+ namelen = strlen(name);
+ while (*names) {
+ int negate = '-' == *names;
+ p = strchr(names, ',');
+ if (!p)
+ p = names + strlen(names);
+ names += negate;
+ len = FFMAX(p - names, namelen);
+ if (!av_strncasecmp(name, names, len) || !strncmp("ALL", names, FFMAX(3, p - names)))
+ return !negate;
+ names = p + (*p == ',');
+ }
+ return 0;
+}
+
+int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end,
+ unsigned int flags)
+{
+ const uint8_t *p = *bufp;
+ uint32_t top;
+ uint64_t code;
+ int ret = 0, tail_len;
+ uint32_t overlong_encoding_mins[6] = {
+ 0x00000000, 0x00000080, 0x00000800, 0x00010000, 0x00200000, 0x04000000,
+ };
+
+ if (p >= buf_end)
+ return 0;
+
+ code = *p++;
+
+ /* first sequence byte starts with 10, or is 1111-1110 or 1111-1111,
+ which is not admitted */
+ if ((code & 0xc0) == 0x80 || code >= 0xFE) {
+ ret = AVERROR(EILSEQ);
+ goto end;
+ }
+ top = (code & 128) >> 1;
+
+ tail_len = 0;
+ while (code & top) {
+ int tmp;
+ tail_len++;
+ if (p >= buf_end) {
+ (*bufp) ++;
+ return AVERROR(EILSEQ); /* incomplete sequence */
+ }
+
+ /* we assume the byte to be in the form 10xx-xxxx */
+ tmp = *p++ - 128; /* strip leading 1 */
+ if (tmp>>6) {
+ (*bufp) ++;
+ return AVERROR(EILSEQ);
+ }
+ code = (code<<6) + tmp;
+ top <<= 5;
+ }
+ code &= (top << 1) - 1;
+
+ /* check for overlong encodings */
+ av_assert0(tail_len <= 5);
+ if (code < overlong_encoding_mins[tail_len]) {
+ ret = AVERROR(EILSEQ);
+ goto end;
+ }
+
+ if (code >= 1U<<31) {
+ ret = AVERROR(EILSEQ); /* out-of-range value */
+ goto end;
+ }
+
+ *codep = code;
+
+ if (code > 0x10FFFF &&
+ !(flags & AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES))
+ ret = AVERROR(EILSEQ);
+ if (code < 0x20 && code != 0x9 && code != 0xA && code != 0xD &&
+ flags & AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES)
+ ret = AVERROR(EILSEQ);
+ if (code >= 0xD800 && code <= 0xDFFF &&
+ !(flags & AV_UTF8_FLAG_ACCEPT_SURROGATES))
+ ret = AVERROR(EILSEQ);
+ if ((code == 0xFFFE || code == 0xFFFF) &&
+ !(flags & AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS))
+ ret = AVERROR(EILSEQ);
+
+end:
+ *bufp = p;
+ return ret;
+}
+
+int av_match_list(const char *name, const char *list, char separator)
+{
+ const char *p, *q;
+
+ for (p = name; p && *p; ) {
+ for (q = list; q && *q; ) {
+ int k;
+ for (k = 0; p[k] == q[k] || (p[k]*q[k] == 0 && p[k]+q[k] == separator); k++)
+ if (k && (!p[k] || p[k] == separator))
+ return 1;
+ q = strchr(q, separator);
+ q += !!q;
+ }
+ p = strchr(p, separator);
+ p += !!p;
+ }
+
+ return 0;
+}
diff --git a/media/ffvpx/libavutil/avstring.h b/media/ffvpx/libavutil/avstring.h
new file mode 100644
index 0000000000..e260263763
--- /dev/null
+++ b/media/ffvpx/libavutil/avstring.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2007 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVSTRING_H
+#define AVUTIL_AVSTRING_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @addtogroup lavu_string
+ * @{
+ */
+
+/**
+ * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to
+ * the address of the first character in str after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_strstart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Return non-zero if pfx is a prefix of str independent of case. If
+ * it is, *ptr is set to the address of the first character in str
+ * after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_stristart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Locate the first case-independent occurrence in the string haystack
+ * of the string needle. A zero-length string needle is considered to
+ * match at the start of haystack.
+ *
+ * This function is a case-insensitive version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle string to search for
+ * @return pointer to the located match within haystack
+ * or a null pointer if no match
+ */
+char *av_stristr(const char *haystack, const char *needle);
+
+/**
+ * Locate the first occurrence of the string needle in the string haystack
+ * where not more than hay_length characters are searched. A zero-length
+ * string needle is considered to match at the start of haystack.
+ *
+ * This function is a length-limited version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle string to search for
+ * @param hay_length length of string to search in
+ * @return pointer to the located match within haystack
+ * or a null pointer if no match
+ */
+char *av_strnstr(const char *haystack, const char *needle, size_t hay_length);
+
+/**
+ * Copy the string src to dst, but no more than size - 1 bytes, and
+ * null-terminate dst.
+ *
+ * This function is the same as BSD strlcpy().
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the length of src
+ *
+ * @warning since the return value is the length of src, src absolutely
+ * _must_ be a properly 0-terminated string, otherwise this will read beyond
+ * the end of the buffer and possibly crash.
+ */
+size_t av_strlcpy(char *dst, const char *src, size_t size);
+
+/**
+ * Append the string src to the string dst, but to a total length of
+ * no more than size - 1 bytes, and null-terminate dst.
+ *
+ * This function is similar to BSD strlcat(), but differs when
+ * size <= strlen(dst).
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the total length of src and dst
+ *
+ * @warning since the return value use the length of src and dst, these
+ * absolutely _must_ be a properly 0-terminated strings, otherwise this
+ * will read beyond the end of the buffer and possibly crash.
+ */
+size_t av_strlcat(char *dst, const char *src, size_t size);
+
+/**
+ * Append output to a string, according to a format. Never write out of
+ * the destination buffer, and always put a terminating 0 within
+ * the buffer.
+ * @param dst destination buffer (string to which the output is
+ * appended)
+ * @param size total size of the destination buffer
+ * @param fmt printf-compatible format string, specifying how the
+ * following parameters are used
+ * @return the length of the string that would have been generated
+ * if enough space had been available
+ */
+size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Get the count of continuous non zero chars starting from the beginning.
+ *
+ * @param s the string whose length to count
+ * @param len maximum number of characters to check in the string, that
+ * is the maximum value which is returned by the function
+ */
+static inline size_t av_strnlen(const char *s, size_t len)
+{
+ size_t i;
+ for (i = 0; i < len && s[i]; i++)
+ ;
+ return i;
+}
+
+/**
+ * Print arguments following specified format into a large enough auto
+ * allocated buffer. It is similar to GNU asprintf().
+ * @param fmt printf-compatible format string, specifying how the
+ * following parameters are used.
+ * @return the allocated string
+ * @note You have to free the string yourself with av_free().
+ */
+char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2);
+
+/**
+ * Unescape the given string until a non escaped terminating char,
+ * and return the token corresponding to the unescaped string.
+ *
+ * The normal \ and ' escaping is supported. Leading and trailing
+ * whitespaces are removed, unless they are escaped with '\' or are
+ * enclosed between ''.
+ *
+ * @param buf the buffer to parse, buf will be updated to point to the
+ * terminating char
+ * @param term a 0-terminated list of terminating chars
+ * @return the malloced unescaped string, which must be av_freed by
+ * the user, NULL in case of allocation failure
+ */
+char *av_get_token(const char **buf, const char *term);
+
+/**
+ * Split the string into several tokens which can be accessed by
+ * successive calls to av_strtok().
+ *
+ * A token is defined as a sequence of characters not belonging to the
+ * set specified in delim.
+ *
+ * On the first call to av_strtok(), s should point to the string to
+ * parse, and the value of saveptr is ignored. In subsequent calls, s
+ * should be NULL, and saveptr should be unchanged since the previous
+ * call.
+ *
+ * This function is similar to strtok_r() defined in POSIX.1.
+ *
+ * @param s the string to parse, may be NULL
+ * @param delim 0-terminated list of token delimiters, must be non-NULL
+ * @param saveptr user-provided pointer which points to stored
+ * information necessary for av_strtok() to continue scanning the same
+ * string. saveptr is updated to point to the next character after the
+ * first delimiter found, or to NULL if the string was terminated
+ * @return the found token, or NULL when no token is found
+ */
+char *av_strtok(char *s, const char *delim, char **saveptr);
+
+/**
+ * Locale-independent conversion of ASCII isdigit.
+ */
+static inline av_const int av_isdigit(int c)
+{
+ return c >= '0' && c <= '9';
+}
+
+/**
+ * Locale-independent conversion of ASCII isgraph.
+ */
+static inline av_const int av_isgraph(int c)
+{
+ return c > 32 && c < 127;
+}
+
+/**
+ * Locale-independent conversion of ASCII isspace.
+ */
+static inline av_const int av_isspace(int c)
+{
+ return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' ||
+ c == '\v';
+}
+
+/**
+ * Locale-independent conversion of ASCII characters to uppercase.
+ */
+static inline av_const int av_toupper(int c)
+{
+ if (c >= 'a' && c <= 'z')
+ c ^= 0x20;
+ return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII characters to lowercase.
+ */
+static inline av_const int av_tolower(int c)
+{
+ if (c >= 'A' && c <= 'Z')
+ c ^= 0x20;
+ return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII isxdigit.
+ */
+static inline av_const int av_isxdigit(int c)
+{
+ c = av_tolower(c);
+ return av_isdigit(c) || (c >= 'a' && c <= 'f');
+}
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strcasecmp(const char *a, const char *b);
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strncasecmp(const char *a, const char *b, size_t n);
+
+/**
+ * Locale-independent strings replace.
+ * @note This means only ASCII-range characters are replace
+ */
+char *av_strireplace(const char *str, const char *from, const char *to);
+
+/**
+ * Thread safe basename.
+ * @param path the string to parse, on DOS both \ and / are considered separators.
+ * @return pointer to the basename substring.
+ * If path does not contain a slash, the function returns a copy of path.
+ * If path is a NULL pointer or points to an empty string, a pointer
+ * to a string "." is returned.
+ */
+const char *av_basename(const char *path);
+
+/**
+ * Thread safe dirname.
+ * @param path the string to parse, on DOS both \ and / are considered separators.
+ * @return A pointer to a string that's the parent directory of path.
+ * If path is a NULL pointer or points to an empty string, a pointer
+ * to a string "." is returned.
+ * @note the function may modify the contents of the path, so copies should be passed.
+ */
+const char *av_dirname(char *path);
+
+/**
+ * Match instances of a name in a comma-separated list of names.
+ * List entries are checked from the start to the end of the names list,
+ * the first match ends further processing. If an entry prefixed with '-'
+ * matches, then 0 is returned. The "ALL" list entry is considered to
+ * match all names.
+ *
+ * @param name Name to look for.
+ * @param names List of names.
+ * @return 1 on match, 0 otherwise.
+ */
+int av_match_name(const char *name, const char *names);
+
+/**
+ * Append path component to the existing path.
+ * Path separator '/' is placed between when needed.
+ * Resulting string have to be freed with av_free().
+ * @param path base path
+ * @param component component to be appended
+ * @return new path or NULL on error.
+ */
+char *av_append_path_component(const char *path, const char *component);
+
+enum AVEscapeMode {
+ AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode.
+ AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping.
+ AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping.
+ AV_ESCAPE_MODE_XML, ///< Use XML non-markup character data escaping.
+};
+
+/**
+ * Consider spaces special and escape them even in the middle of the
+ * string.
+ *
+ * This is equivalent to adding the whitespace characters to the special
+ * characters lists, except it is guaranteed to use the exact same list
+ * of whitespace characters as the rest of libavutil.
+ */
+#define AV_ESCAPE_FLAG_WHITESPACE (1 << 0)
+
+/**
+ * Escape only specified special characters.
+ * Without this flag, escape also any characters that may be considered
+ * special by av_get_token(), such as the single quote.
+ */
+#define AV_ESCAPE_FLAG_STRICT (1 << 1)
+
+/**
+ * Within AV_ESCAPE_MODE_XML, additionally escape single quotes for single
+ * quoted attributes.
+ */
+#define AV_ESCAPE_FLAG_XML_SINGLE_QUOTES (1 << 2)
+
+/**
+ * Within AV_ESCAPE_MODE_XML, additionally escape double quotes for double
+ * quoted attributes.
+ */
+#define AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES (1 << 3)
+
+
+/**
+ * Escape string in src, and put the escaped string in an allocated
+ * string in *dst, which must be freed with av_free().
+ *
+ * @param dst pointer where an allocated string is put
+ * @param src string to escape, must be non-NULL
+ * @param special_chars string containing the special characters which
+ * need to be escaped, can be NULL
+ * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros.
+ * Any unknown value for mode will be considered equivalent to
+ * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without
+ * notice.
+ * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros
+ * @return the length of the allocated string, or a negative error code in case of error
+ * @see av_bprint_escape()
+ */
+av_warn_unused_result
+int av_escape(char **dst, const char *src, const char *special_chars,
+ enum AVEscapeMode mode, int flags);
+
+#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES 1 ///< accept codepoints over 0x10FFFF
+#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS 2 ///< accept non-characters - 0xFFFE and 0xFFFF
+#define AV_UTF8_FLAG_ACCEPT_SURROGATES 4 ///< accept UTF-16 surrogates codes
+#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML
+
+#define AV_UTF8_FLAG_ACCEPT_ALL \
+ AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES
+
+/**
+ * Read and decode a single UTF-8 code point (character) from the
+ * buffer in *buf, and update *buf to point to the next byte to
+ * decode.
+ *
+ * In case of an invalid byte sequence, the pointer will be updated to
+ * the next byte after the invalid sequence and the function will
+ * return an error code.
+ *
+ * Depending on the specified flags, the function will also fail in
+ * case the decoded code point does not belong to a valid range.
+ *
+ * @note For speed-relevant code a carefully implemented use of
+ * GET_UTF8() may be preferred.
+ *
+ * @param codep pointer used to return the parsed code in case of success.
+ * The value in *codep is set even in case the range check fails.
+ * @param bufp pointer to the address the first byte of the sequence
+ * to decode, updated by the function to point to the
+ * byte next after the decoded sequence
+ * @param buf_end pointer to the end of the buffer, points to the next
+ * byte past the last in the buffer. This is used to
+ * avoid buffer overreads (in case of an unfinished
+ * UTF-8 sequence towards the end of the buffer).
+ * @param flags a collection of AV_UTF8_FLAG_* flags
+ * @return >= 0 in case a sequence was successfully read, a negative
+ * value in case of invalid sequence
+ */
+av_warn_unused_result
+int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end,
+ unsigned int flags);
+
+/**
+ * Check if a name is in a list.
+ * @returns 0 if not found, or the 1 based index where it has been found in the
+ * list.
+ */
+int av_match_list(const char *name, const char *list, char separator);
+
+/**
+ * See libc sscanf manual for more information.
+ * Locale-independent sscanf implementation.
+ */
+int av_sscanf(const char *string, const char *format, ...);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AVSTRING_H */
diff --git a/media/ffvpx/libavutil/avutil.h b/media/ffvpx/libavutil/avutil.h
new file mode 100644
index 0000000000..64b68bdbd3
--- /dev/null
+++ b/media/ffvpx/libavutil/avutil.h
@@ -0,0 +1,371 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * @ingroup lavu
+ * Convenience header that includes @ref lavu "libavutil"'s core.
+ */
+
+/**
+ * @mainpage
+ *
+ * @section ffmpeg_intro Introduction
+ *
+ * This document describes the usage of the different libraries
+ * provided by FFmpeg.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @ref lavfi "libavfilter" graph-based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @ref lswr "libswresample" audio resampling, format conversion and mixing
+ * @li @ref lpp "libpostproc" post processing library
+ * @li @ref libsws "libswscale" color conversion and scaling library
+ *
+ * @section ffmpeg_versioning Versioning and compatibility
+ *
+ * Each of the FFmpeg libraries contains a version.h header, which defines a
+ * major, minor and micro version number with the
+ * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version
+ * number is incremented with backward incompatible changes - e.g. removing
+ * parts of the public API, reordering public struct members, etc. The minor
+ * version number is incremented for backward compatible API changes or major
+ * new features - e.g. adding a new public function or a new decoder. The micro
+ * version number is incremented for smaller changes that a calling program
+ * might still want to check for - e.g. changing behavior in a previously
+ * unspecified situation.
+ *
+ * FFmpeg guarantees backward API and ABI compatibility for each library as long
+ * as its major version number is unchanged. This means that no public symbols
+ * will be removed or renamed. Types and names of the public struct members and
+ * values of public macros and enums will remain the same (unless they were
+ * explicitly declared as not part of the public API). Documented behavior will
+ * not change.
+ *
+ * In other words, any correct program that works with a given FFmpeg snapshot
+ * should work just as well without any changes with any later snapshot with the
+ * same major versions. This applies to both rebuilding the program against new
+ * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program
+ * links against.
+ *
+ * However, new public symbols may be added and new members may be appended to
+ * public structs whose size is not part of public ABI (most public structs in
+ * FFmpeg). New macros and enum values may be added. Behavior in undocumented
+ * situations may change slightly (and be documented). All those are accompanied
+ * by an entry in doc/APIchanges and incrementing either the minor or micro
+ * version number.
+ */
+
+/**
+ * @defgroup lavu libavutil
+ * Common code shared across all FFmpeg libraries.
+ *
+ * @note
+ * libavutil is designed to be modular. In most cases, in order to use the
+ * functions provided by one component of libavutil you must explicitly include
+ * the specific header containing that feature. If you are only using
+ * media-related components, you could simply include libavutil/avutil.h, which
+ * brings in most of the "core" components.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Mathematics
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_video Video related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_log Logging Facility
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup version_utils Library Version Macros
+ *
+ * @{
+ *
+ * @}
+ */
+
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return an informative version string. This usually is the actual release
+ * version number or a git commit description. This string has no fixed format
+ * and can change any time. It should never be parsed by code.
+ */
+const char *av_version_info(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char *avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char *avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
+ AVMEDIA_TYPE_VIDEO,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
+ AVMEDIA_TYPE_SUBTITLE,
+ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
+ AVMEDIA_TYPE_NB
+};
+
+/**
+ * Return a string describing the media_type enum, NULL if media_type
+ * is unknown.
+ */
+const char *av_get_media_type_string(enum AVMediaType media_type);
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256*128-1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
+
+/**
+ * @}
+ * @defgroup lavu_time Timestamp specific
+ *
+ * FFmpeg internal timebase and timestamp definitions
+ *
+ * @{
+ */
+
+/**
+ * @brief Undefined timestamp value
+ *
+ * Usually reported by demuxer that work on containers that do not provide
+ * either pts or dts.
+ */
+
+#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000))
+
+/**
+ * Internal time base represented as integer
+ */
+
+#define AV_TIME_BASE 1000000
+
+/**
+ * Internal time base represented as fractional value
+ */
+
+#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
+
+/**
+ * @}
+ * @}
+ * @defgroup lavu_picture Image related
+ *
+ * AVPicture types, pixel formats and basic image planes manipulation.
+ *
+ * @{
+ */
+
+enum AVPictureType {
+ AV_PICTURE_TYPE_NONE = 0, ///< Undefined
+ AV_PICTURE_TYPE_I, ///< Intra
+ AV_PICTURE_TYPE_P, ///< Predicted
+ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
+ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4
+ AV_PICTURE_TYPE_SI, ///< Switching Intra
+ AV_PICTURE_TYPE_SP, ///< Switching Predicted
+ AV_PICTURE_TYPE_BI, ///< BI type
+};
+
+/**
+ * Return a single letter to describe the given picture type
+ * pict_type.
+ *
+ * @param[in] pict_type the picture type @return a single character
+ * representing the picture type, '?' if pict_type is unknown
+ */
+char av_get_picture_type_char(enum AVPictureType pict_type);
+
+/**
+ * @}
+ */
+
+#include "common.h"
+#include "error.h"
+#include "rational.h"
+#include "version.h"
+#include "macros.h"
+#include "mathematics.h"
+#include "log.h"
+#include "pixfmt.h"
+
+/**
+ * Return x default pointer in case p is NULL.
+ */
+static inline void *av_x_if_null(const void *p, const void *x)
+{
+ return (void *)(intptr_t)(p ? p : x);
+}
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param elsize size in bytes of each list element (only 1, 2, 4 or 8)
+ * @param term list terminator (usually 0 or -1)
+ * @param list pointer to the list
+ * @return length of the list, in elements, not counting the terminator
+ */
+unsigned av_int_list_length_for_size(unsigned elsize,
+ const void *list, uint64_t term) av_pure;
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param term list terminator (usually 0 or -1)
+ * @param list pointer to the list
+ * @return length of the list, in elements, not counting the terminator
+ */
+#define av_int_list_length(list, term) \
+ av_int_list_length_for_size(sizeof(*(list)), list, term)
+
+#if FF_API_AV_FOPEN_UTF8
+/**
+ * Open a file using a UTF-8 filename.
+ * The API of this function matches POSIX fopen(), errors are returned through
+ * errno.
+ * @deprecated Avoid using it, as on Windows, the FILE* allocated by this
+ * function may be allocated with a different CRT than the caller
+ * who uses the FILE*. No replacement provided in public API.
+ */
+attribute_deprecated
+FILE *av_fopen_utf8(const char *path, const char *mode);
+#endif
+
+/**
+ * Return the fractional representation of the internal time base.
+ */
+AVRational av_get_time_base_q(void);
+
+#define AV_FOURCC_MAX_STRING_SIZE 32
+
+#define av_fourcc2str(fourcc) av_fourcc_make_string((char[AV_FOURCC_MAX_STRING_SIZE]){0}, fourcc)
+
+/**
+ * Fill the provided buffer with a string containing a FourCC (four-character
+ * code) representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_FOURCC_MAX_STRING_SIZE
+ * @param fourcc the fourcc to represent
+ * @return the buffer in input
+ */
+char *av_fourcc_make_string(char *buf, uint32_t fourcc);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AVUTIL_H */
diff --git a/media/ffvpx/libavutil/avutil.symbols b/media/ffvpx/libavutil/avutil.symbols
new file mode 100644
index 0000000000..aefd9b7310
--- /dev/null
+++ b/media/ffvpx/libavutil/avutil.symbols
@@ -0,0 +1,326 @@
+av_add_q
+av_add_stable
+av_append_path_component
+av_asprintf
+av_basename
+av_bprint_append_data
+av_bprint_channel_layout
+av_bprint_chars
+av_bprint_clear
+av_bprint_escape
+av_bprint_finalize
+av_bprint_get_buffer
+av_bprint_init
+av_bprint_init_for_buffer
+av_bprint_strftime
+av_bprintf
+av_buffer_alloc
+av_buffer_allocz
+av_buffer_create
+av_buffer_default_free
+av_buffer_get_opaque
+av_buffer_get_ref_count
+av_buffer_is_writable
+av_buffer_make_writable
+av_buffer_pool_get
+av_buffer_pool_init
+av_buffer_pool_uninit
+av_buffer_realloc
+av_buffer_replace
+av_buffer_ref
+av_buffer_unref
+av_calloc
+av_channel_layout_check
+av_channel_layout_copy
+av_channel_layout_compare
+av_channel_layout_extract_channel
+av_channel_layout_describe
+av_channel_layout_from_mask
+av_channel_layout_uninit
+av_chroma_location_name
+av_chroma_location_enum_to_pos
+av_chroma_location_pos_to_enum
+av_color_primaries_name
+av_color_range_name
+av_color_space_name
+av_color_transfer_name
+av_compare_mod
+av_compare_ts
+#ifndef MOZ_FFVPX_AUDIOONLY
+av_content_light_metadata_create_side_data
+#endif
+av_cpu_count
+av_crc
+av_crc_get_table
+av_crc_init
+av_d2q
+av_default_get_category
+av_default_item_name
+av_dict_copy
+av_dict_count
+av_dict_free
+av_dict_get
+av_dict_get_string
+av_dict_iterate
+av_dict_parse_string
+av_dict_set
+av_dict_set_int
+av_dirname
+av_div_q
+av_dynarray2_add
+av_dynarray_add
+av_dynarray_add_nofree
+av_escape
+av_expr_eval
+av_expr_free
+av_expr_parse
+av_expr_parse_and_eval
+av_fast_malloc
+av_fast_mallocz
+av_fast_realloc
+av_fifo_alloc
+av_fifo_alloc2
+av_fifo_alloc_array
+av_fifo_can_read
+av_fifo_drain
+av_fifo_free
+av_fifo_freep
+av_fifo_freep2
+av_fifo_generic_peek
+av_fifo_generic_read
+av_fifo_generic_write
+av_fifo_grow
+av_fifo_realloc2
+av_fifo_read
+av_fifo_reset
+av_fifo_size
+av_fifo_space
+av_fifo_write
+av_find_best_pix_fmt_of_2
+av_find_info_tag
+av_find_nearest_q_idx
+#ifndef MOZ_FFVPX_AUDIOONLY
+av_film_grain_params_create_side_data
+#endif
+av_force_cpu_flags
+av_fourcc_make_string
+av_frame_alloc
+av_frame_apply_cropping
+av_frame_clone
+av_frame_copy
+av_frame_copy_props
+av_frame_free
+av_frame_get_buffer
+av_frame_get_plane_buffer
+av_frame_get_side_data
+av_frame_is_writable
+av_frame_make_writable
+av_frame_move_ref
+av_frame_new_side_data
+av_frame_new_side_data_from_buf
+av_frame_ref
+av_frame_remove_side_data
+av_frame_side_data_name
+av_frame_unref
+av_free
+av_freep
+av_gcd
+av_get_alt_sample_fmt
+av_get_bits_per_pixel
+av_get_bytes_per_sample
+av_get_channel_description
+av_get_channel_layout
+av_get_channel_layout_channel_index
+av_get_channel_layout_nb_channels
+av_get_channel_layout_string
+av_get_channel_name
+av_get_cpu_flags
+av_get_default_channel_layout
+av_get_known_color_name
+av_get_media_type_string
+av_get_packed_sample_fmt
+av_get_padded_bits_per_pixel
+av_get_picture_type_char
+av_get_pix_fmt
+av_get_pix_fmt_loss
+av_get_pix_fmt_name
+av_get_pix_fmt_string
+av_get_planar_sample_fmt
+av_get_sample_fmt
+av_get_sample_fmt_name
+av_get_sample_fmt_string
+av_get_standard_channel_layout
+av_get_time_base_q
+av_get_token
+av_gettime
+av_gettime_relative
+av_gettime_relative_is_monotonic
+av_hwdevice_get_hwframe_constraints
+av_hwdevice_hwconfig_alloc
+av_hwframe_constraints_free
+av_hwframe_get_buffer
+av_image_alloc
+av_image_check_sar
+av_image_check_size
+av_image_check_size2
+av_image_copy
+av_image_copy_plane
+av_image_copy_to_buffer
+av_image_fill_arrays
+av_image_fill_linesizes
+av_image_fill_max_pixsteps
+av_image_fill_pointers
+av_image_fill_plane_sizes
+av_image_get_buffer_size
+av_image_get_linesize
+av_int_list_length_for_size
+av_log
+#ifndef MOZ_FFVPX_AUDIOONLY
+av_log2
+av_log2_16bit
+#endif
+av_log_default_callback
+av_log_format_line
+av_log_get_flags
+av_log_get_level
+av_log_set_callback
+av_log_set_flags
+av_log_set_level
+av_malloc
+av_mallocz
+#ifndef MOZ_FFVPX_AUDIOONLY
+av_mastering_display_metadata_create_side_data
+#endif
+av_match_list
+av_match_name
+av_max_alloc
+av_memcpy_backptr
+av_memdup
+av_mul_q
+av_nearer_q
+av_opt_child_next
+av_opt_copy
+av_opt_eval_double
+av_opt_eval_flags
+av_opt_eval_float
+av_opt_eval_int
+av_opt_eval_int64
+av_opt_eval_q
+av_opt_find
+av_opt_find2
+av_opt_flag_is_set
+av_opt_free
+av_opt_freep_ranges
+av_opt_get
+av_opt_get_channel_layout
+av_opt_get_dict_val
+av_opt_get_double
+av_opt_get_image_size
+av_opt_get_int
+av_opt_get_key_value
+av_opt_get_pixel_fmt
+av_opt_get_q
+av_opt_get_sample_fmt
+av_opt_get_video_rate
+av_opt_is_set_to_default
+av_opt_is_set_to_default_by_name
+av_opt_next
+av_opt_ptr
+av_opt_query_ranges
+av_opt_query_ranges_default
+av_opt_serialize
+av_opt_set
+av_opt_set_bin
+av_opt_set_channel_layout
+av_opt_set_defaults
+av_opt_set_defaults2
+av_opt_set_dict
+av_opt_set_dict2
+av_opt_set_dict_val
+av_opt_set_double
+av_opt_set_from_string
+av_opt_set_image_size
+av_opt_set_int
+av_opt_set_pixel_fmt
+av_opt_set_q
+av_opt_set_sample_fmt
+av_opt_set_video_rate
+av_opt_show2
+av_parse_color
+av_parse_cpu_caps
+av_parse_ratio
+av_parse_time
+av_parse_video_rate
+av_parse_video_size
+av_pix_fmt_count_planes
+av_pix_fmt_desc_get
+av_pix_fmt_desc_get_id
+av_pix_fmt_desc_next
+av_pix_fmt_get_chroma_sub_sample
+av_pix_fmt_swap_endianness
+av_q2intfloat
+av_read_image_line
+av_realloc
+av_realloc_array
+av_realloc_f
+av_reallocp
+av_reallocp_array
+av_reduce
+av_rescale
+av_rescale_delta
+av_rescale_q
+av_rescale_q_rnd
+av_rescale_rnd
+av_sample_fmt_is_planar
+av_samples_alloc
+av_samples_alloc_array_and_samples
+av_samples_copy
+av_samples_fill_arrays
+av_samples_get_buffer_size
+av_samples_set_silence
+av_set_options_string
+av_small_strptime
+av_strcasecmp
+av_strdup
+av_strerror
+av_stristart
+av_stristr
+av_strlcat
+av_strlcatf
+av_strlcpy
+av_strncasecmp
+av_strndup
+av_strnstr
+av_strstart
+av_strtod
+av_strtok
+av_sub_q
+av_timegm
+av_usleep
+av_utf8_decode
+av_vbprintf
+#ifndef MOZ_FFVPX_AUDIOONLY
+av_video_enc_params_create_side_data
+#endif
+av_vlog
+av_write_image_line
+avpriv_alloc_fixed_dsp
+avpriv_float_dsp_alloc
+avpriv_report_missing_feature
+avpriv_request_sample
+avpriv_scalarproduct_float_c
+avpriv_set_systematic_pal2
+#if defined(XP_WIN) && !defined(_ARM64_)
+avpriv_emms_asm
+#endif
+avpriv_slicethread_create
+avpriv_slicethread_execute
+avpriv_slicethread_free
+av_hwdevice_get_type_name
+av_hwframe_ctx_alloc
+av_hwframe_ctx_init
+av_hwdevice_ctx_alloc
+av_hwdevice_ctx_init
+av_hwframe_transfer_get_formats
+av_hwdevice_ctx_create_derived
+av_malloc_array
diff --git a/media/ffvpx/libavutil/avutilres.rc b/media/ffvpx/libavutil/avutilres.rc
new file mode 100644
index 0000000000..40a75eb377
--- /dev/null
+++ b/media/ffvpx/libavutil/avutilres.rc
@@ -0,0 +1,55 @@
+/*
+ * Windows resource file for libavutil
+ *
+ * Copyright (C) 2012 James Almer
+ * Copyright (C) 2013 Tiancheng "Timothy" Gu
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <windows.h>
+#include "libavutil/version.h"
+#include "libavutil/ffversion.h"
+#include "config.h"
+
+1 VERSIONINFO
+FILEVERSION LIBAVUTIL_VERSION_MAJOR, LIBAVUTIL_VERSION_MINOR, LIBAVUTIL_VERSION_MICRO, 0
+PRODUCTVERSION LIBAVUTIL_VERSION_MAJOR, LIBAVUTIL_VERSION_MINOR, LIBAVUTIL_VERSION_MICRO, 0
+FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
+FILEOS VOS_NT_WINDOWS32
+FILETYPE VFT_DLL
+{
+ BLOCK "StringFileInfo"
+ {
+ BLOCK "040904B0"
+ {
+ VALUE "CompanyName", "FFmpeg Project"
+ VALUE "FileDescription", "FFmpeg utility library"
+ VALUE "FileVersion", AV_STRINGIFY(LIBAVUTIL_VERSION)
+ VALUE "InternalName", "libavutil"
+ VALUE "LegalCopyright", "Copyright (C) 2000-" AV_STRINGIFY(CONFIG_THIS_YEAR) " FFmpeg Project"
+ VALUE "OriginalFilename", "avutil" BUILDSUF "-" AV_STRINGIFY(LIBAVUTIL_VERSION_MAJOR) SLIBSUF
+ VALUE "ProductName", "FFmpeg"
+ VALUE "ProductVersion", FFMPEG_VERSION
+ }
+ }
+
+ BLOCK "VarFileInfo"
+ {
+ VALUE "Translation", 0x0409, 0x04B0
+ }
+}
diff --git a/media/ffvpx/libavutil/base64.c b/media/ffvpx/libavutil/base64.c
new file mode 100644
index 0000000000..3e66f4fcbe
--- /dev/null
+++ b/media/ffvpx/libavutil/base64.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @brief Base64 encode/decode
+ * @author Ryan Martell <rdm4@martellventures.com> (with lots of Michael)
+ */
+
+#include <limits.h>
+#include <stddef.h>
+
+#include "base64.h"
+#include "error.h"
+#include "intreadwrite.h"
+
+/* ---------------- private code */
+static const uint8_t map2[256] =
+{
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff,
+
+ 0x3e, 0xff, 0xff, 0xff, 0x3f, 0x34, 0x35, 0x36,
+ 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0xff,
+ 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
+ 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1a, 0x1b,
+ 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33,
+
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+#define BASE64_DEC_STEP(i) do { \
+ bits = map2[in[i]]; \
+ if (bits & 0x80) \
+ goto out ## i; \
+ v = i ? (v << 6) + bits : bits; \
+} while(0)
+
+int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
+{
+ uint8_t *dst = out;
+ uint8_t *end;
+ // no sign extension
+ const uint8_t *in = in_str;
+ unsigned bits = 0xff;
+ unsigned v;
+
+ if (!out)
+ goto validity_check;
+
+ end = out + out_size;
+ while (end - dst > 3) {
+ BASE64_DEC_STEP(0);
+ BASE64_DEC_STEP(1);
+ BASE64_DEC_STEP(2);
+ BASE64_DEC_STEP(3);
+ // Using AV_WB32 directly confuses compiler
+ v = av_be2ne32(v << 8);
+ AV_WN32(dst, v);
+ dst += 3;
+ in += 4;
+ }
+ if (end - dst) {
+ BASE64_DEC_STEP(0);
+ BASE64_DEC_STEP(1);
+ BASE64_DEC_STEP(2);
+ BASE64_DEC_STEP(3);
+ *dst++ = v >> 16;
+ if (end - dst)
+ *dst++ = v >> 8;
+ if (end - dst)
+ *dst++ = v;
+ in += 4;
+ }
+validity_check:
+ while (1) {
+ BASE64_DEC_STEP(0);
+ in++;
+ BASE64_DEC_STEP(0);
+ in++;
+ BASE64_DEC_STEP(0);
+ in++;
+ BASE64_DEC_STEP(0);
+ in++;
+ }
+
+out3:
+ *dst++ = v >> 10;
+ v <<= 2;
+out2:
+ *dst++ = v >> 4;
+out1:
+out0:
+ return bits & 1 ? AVERROR_INVALIDDATA : out ? dst - out : 0;
+}
+
+/*****************************************************************************
+* b64_encode: Stolen from VLC's http.c.
+* Simplified by Michael.
+* Fixed edge cases and made it work from data (vs. strings) by Ryan.
+*****************************************************************************/
+
+char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
+{
+ static const char b64[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ char *ret, *dst;
+ unsigned i_bits = 0;
+ int i_shift = 0;
+ int bytes_remaining = in_size;
+
+ if (in_size >= UINT_MAX / 4 ||
+ out_size < AV_BASE64_SIZE(in_size))
+ return NULL;
+ ret = dst = out;
+ while (bytes_remaining > 3) {
+ i_bits = AV_RB32(in);
+ in += 3; bytes_remaining -= 3;
+ *dst++ = b64[ i_bits>>26 ];
+ *dst++ = b64[(i_bits>>20) & 0x3F];
+ *dst++ = b64[(i_bits>>14) & 0x3F];
+ *dst++ = b64[(i_bits>>8 ) & 0x3F];
+ }
+ i_bits = 0;
+ while (bytes_remaining) {
+ i_bits = (i_bits << 8) + *in++;
+ bytes_remaining--;
+ i_shift += 8;
+ }
+ while (i_shift > 0) {
+ *dst++ = b64[(i_bits << 6 >> i_shift) & 0x3f];
+ i_shift -= 6;
+ }
+ while ((dst - ret) & 3)
+ *dst++ = '=';
+ *dst = '\0';
+
+ return ret;
+}
diff --git a/media/ffvpx/libavutil/base64.h b/media/ffvpx/libavutil/base64.h
new file mode 100644
index 0000000000..2954c12d42
--- /dev/null
+++ b/media/ffvpx/libavutil/base64.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BASE64_H
+#define AVUTIL_BASE64_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_base64 Base64
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+/**
+ * Decode a base64-encoded string.
+ *
+ * @param out buffer for decoded data
+ * @param in null-terminated input string
+ * @param out_size size in bytes of the out buffer, must be at
+ * least 3/4 of the length of in, that is AV_BASE64_DECODE_SIZE(strlen(in))
+ * @return number of bytes written, or a negative value in case of
+ * invalid input
+ */
+int av_base64_decode(uint8_t *out, const char *in, int out_size);
+
+/**
+ * Calculate the output size in bytes needed to decode a base64 string
+ * with length x to a data buffer.
+ */
+#define AV_BASE64_DECODE_SIZE(x) ((x) * 3LL / 4)
+
+/**
+ * Encode data to base64 and null-terminate.
+ *
+ * @param out buffer for encoded data
+ * @param out_size size in bytes of the out buffer (including the
+ * null terminator), must be at least AV_BASE64_SIZE(in_size)
+ * @param in input buffer containing the data to encode
+ * @param in_size size in bytes of the in buffer
+ * @return out or NULL in case of error
+ */
+char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);
+
+/**
+ * Calculate the output size needed to base64-encode x bytes to a
+ * null-terminated string.
+ */
+#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1)
+
+ /**
+ * @}
+ */
+
+#endif /* AVUTIL_BASE64_H */
diff --git a/media/ffvpx/libavutil/bprint.c b/media/ffvpx/libavutil/bprint.c
new file mode 100644
index 0000000000..5b540ebc9e
--- /dev/null
+++ b/media/ffvpx/libavutil/bprint.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <limits.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include "avstring.h"
+#include "bprint.h"
+#include "compat/va_copy.h"
+#include "error.h"
+#include "macros.h"
+#include "mem.h"
+
+#define av_bprint_room(buf) ((buf)->size - FFMIN((buf)->len, (buf)->size))
+#define av_bprint_is_allocated(buf) ((buf)->str != (buf)->reserved_internal_buffer)
+
+static int av_bprint_alloc(AVBPrint *buf, unsigned room)
+{
+ char *old_str, *new_str;
+ unsigned min_size, new_size;
+
+ if (buf->size == buf->size_max)
+ return AVERROR(EIO);
+ if (!av_bprint_is_complete(buf))
+ return AVERROR_INVALIDDATA; /* it is already truncated anyway */
+ min_size = buf->len + 1 + FFMIN(UINT_MAX - buf->len - 1, room);
+ new_size = buf->size > buf->size_max / 2 ? buf->size_max : buf->size * 2;
+ if (new_size < min_size)
+ new_size = FFMIN(buf->size_max, min_size);
+ old_str = av_bprint_is_allocated(buf) ? buf->str : NULL;
+ new_str = av_realloc(old_str, new_size);
+ if (!new_str)
+ return AVERROR(ENOMEM);
+ if (!old_str)
+ memcpy(new_str, buf->str, buf->len + 1);
+ buf->str = new_str;
+ buf->size = new_size;
+ return 0;
+}
+
+static void av_bprint_grow(AVBPrint *buf, unsigned extra_len)
+{
+ /* arbitrary margin to avoid small overflows */
+ extra_len = FFMIN(extra_len, UINT_MAX - 5 - buf->len);
+ buf->len += extra_len;
+ if (buf->size)
+ buf->str[FFMIN(buf->len, buf->size - 1)] = 0;
+}
+
+void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
+{
+ unsigned size_auto = (char *)buf + sizeof(*buf) -
+ buf->reserved_internal_buffer;
+
+ if (size_max == 1)
+ size_max = size_auto;
+ buf->str = buf->reserved_internal_buffer;
+ buf->len = 0;
+ buf->size = FFMIN(size_auto, size_max);
+ buf->size_max = size_max;
+ *buf->str = 0;
+ if (size_init > buf->size)
+ av_bprint_alloc(buf, size_init - 1);
+}
+
+void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size)
+{
+ buf->str = buffer;
+ buf->len = 0;
+ buf->size = size;
+ buf->size_max = size;
+ *buf->str = 0;
+}
+
+void av_bprintf(AVBPrint *buf, const char *fmt, ...)
+{
+ unsigned room;
+ char *dst;
+ va_list vl;
+ int extra_len;
+
+ while (1) {
+ room = av_bprint_room(buf);
+ dst = room ? buf->str + buf->len : NULL;
+ va_start(vl, fmt);
+ extra_len = vsnprintf(dst, room, fmt, vl);
+ va_end(vl);
+ if (extra_len <= 0)
+ return;
+ if (extra_len < room)
+ break;
+ if (av_bprint_alloc(buf, extra_len))
+ break;
+ }
+ av_bprint_grow(buf, extra_len);
+}
+
+void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg)
+{
+ unsigned room;
+ char *dst;
+ int extra_len;
+ va_list vl;
+
+ while (1) {
+ room = av_bprint_room(buf);
+ dst = room ? buf->str + buf->len : NULL;
+ va_copy(vl, vl_arg);
+ extra_len = vsnprintf(dst, room, fmt, vl);
+ va_end(vl);
+ if (extra_len <= 0)
+ return;
+ if (extra_len < room)
+ break;
+ if (av_bprint_alloc(buf, extra_len))
+ break;
+ }
+ av_bprint_grow(buf, extra_len);
+}
+
+void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
+{
+ unsigned room, real_n;
+
+ while (1) {
+ room = av_bprint_room(buf);
+ if (n < room)
+ break;
+ if (av_bprint_alloc(buf, n))
+ break;
+ }
+ if (room) {
+ real_n = FFMIN(n, room - 1);
+ memset(buf->str + buf->len, c, real_n);
+ }
+ av_bprint_grow(buf, n);
+}
+
+void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size)
+{
+ unsigned room, real_n;
+
+ while (1) {
+ room = av_bprint_room(buf);
+ if (size < room)
+ break;
+ if (av_bprint_alloc(buf, size))
+ break;
+ }
+ if (room) {
+ real_n = FFMIN(size, room - 1);
+ memcpy(buf->str + buf->len, data, real_n);
+ }
+ av_bprint_grow(buf, size);
+}
+
+void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm)
+{
+ unsigned room;
+ size_t l;
+
+ if (!*fmt)
+ return;
+ while (1) {
+ room = av_bprint_room(buf);
+ if (room && (l = strftime(buf->str + buf->len, room, fmt, tm)))
+ break;
+ /* strftime does not tell us how much room it would need: let us
+ retry with twice as much until the buffer is large enough */
+ room = !room ? strlen(fmt) + 1 :
+ room <= INT_MAX / 2 ? room * 2 : INT_MAX;
+ if (av_bprint_alloc(buf, room)) {
+ /* impossible to grow, try to manage something useful anyway */
+ room = av_bprint_room(buf);
+ if (room < 1024) {
+ /* if strftime fails because the buffer has (almost) reached
+ its maximum size, let us try in a local buffer; 1k should
+ be enough to format any real date+time string */
+ char buf2[1024];
+ if ((l = strftime(buf2, sizeof(buf2), fmt, tm))) {
+ av_bprintf(buf, "%s", buf2);
+ return;
+ }
+ }
+ if (room) {
+ /* if anything else failed and the buffer is not already
+ truncated, let us add a stock string and force truncation */
+ static const char txt[] = "[truncated strftime output]";
+ memset(buf->str + buf->len, '!', room);
+ memcpy(buf->str + buf->len, txt, FFMIN(sizeof(txt) - 1, room));
+ av_bprint_grow(buf, room); /* force truncation */
+ }
+ return;
+ }
+ }
+ av_bprint_grow(buf, l);
+}
+
+void av_bprint_get_buffer(AVBPrint *buf, unsigned size,
+ unsigned char **mem, unsigned *actual_size)
+{
+ if (size > av_bprint_room(buf))
+ av_bprint_alloc(buf, size);
+ *actual_size = av_bprint_room(buf);
+ *mem = *actual_size ? buf->str + buf->len : NULL;
+}
+
+void av_bprint_clear(AVBPrint *buf)
+{
+ if (buf->len) {
+ *buf->str = 0;
+ buf->len = 0;
+ }
+}
+
+int av_bprint_finalize(AVBPrint *buf, char **ret_str)
+{
+ unsigned real_size = FFMIN(buf->len + 1, buf->size);
+ char *str;
+ int ret = 0;
+
+ if (ret_str) {
+ if (av_bprint_is_allocated(buf)) {
+ str = av_realloc(buf->str, real_size);
+ if (!str)
+ str = buf->str;
+ buf->str = NULL;
+ } else {
+ str = av_memdup(buf->str, real_size);
+ if (!str)
+ ret = AVERROR(ENOMEM);
+ }
+ *ret_str = str;
+ } else {
+ if (av_bprint_is_allocated(buf))
+ av_freep(&buf->str);
+ }
+ buf->size = real_size;
+ return ret;
+}
+
+#define WHITESPACES " \n\t\r"
+
+void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars,
+ enum AVEscapeMode mode, int flags)
+{
+ const char *src0 = src;
+
+ if (mode == AV_ESCAPE_MODE_AUTO)
+ mode = AV_ESCAPE_MODE_BACKSLASH; /* TODO: implement a heuristic */
+
+ switch (mode) {
+ case AV_ESCAPE_MODE_QUOTE:
+ /* enclose the string between '' */
+ av_bprint_chars(dstbuf, '\'', 1);
+ for (; *src; src++) {
+ if (*src == '\'')
+ av_bprintf(dstbuf, "'\\''");
+ else
+ av_bprint_chars(dstbuf, *src, 1);
+ }
+ av_bprint_chars(dstbuf, '\'', 1);
+ break;
+
+ case AV_ESCAPE_MODE_XML:
+ /* escape XML non-markup character data as per 2.4 by default: */
+ /* [^<&]* - ([^<&]* ']]>' [^<&]*) */
+
+ /* additionally, given one of the AV_ESCAPE_FLAG_XML_* flags, */
+ /* escape those specific characters as required. */
+ for (; *src; src++) {
+ switch (*src) {
+ case '&' : av_bprintf(dstbuf, "%s", "&amp;"); break;
+ case '<' : av_bprintf(dstbuf, "%s", "&lt;"); break;
+ case '>' : av_bprintf(dstbuf, "%s", "&gt;"); break;
+ case '\'':
+ if (!(flags & AV_ESCAPE_FLAG_XML_SINGLE_QUOTES))
+ goto XML_DEFAULT_HANDLING;
+
+ av_bprintf(dstbuf, "%s", "&apos;");
+ break;
+ case '"' :
+ if (!(flags & AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES))
+ goto XML_DEFAULT_HANDLING;
+
+ av_bprintf(dstbuf, "%s", "&quot;");
+ break;
+XML_DEFAULT_HANDLING:
+ default: av_bprint_chars(dstbuf, *src, 1);
+ }
+ }
+ break;
+
+ /* case AV_ESCAPE_MODE_BACKSLASH or unknown mode */
+ default:
+ /* \-escape characters */
+ for (; *src; src++) {
+ int is_first_last = src == src0 || !*(src+1);
+ int is_ws = !!strchr(WHITESPACES, *src);
+ int is_strictly_special = special_chars && strchr(special_chars, *src);
+ int is_special =
+ is_strictly_special || strchr("'\\", *src) ||
+ (is_ws && (flags & AV_ESCAPE_FLAG_WHITESPACE));
+
+ if (is_strictly_special ||
+ (!(flags & AV_ESCAPE_FLAG_STRICT) &&
+ (is_special || (is_ws && is_first_last))))
+ av_bprint_chars(dstbuf, '\\', 1);
+ av_bprint_chars(dstbuf, *src, 1);
+ }
+ break;
+ }
+}
diff --git a/media/ffvpx/libavutil/bprint.h b/media/ffvpx/libavutil/bprint.h
new file mode 100644
index 0000000000..f27d30f723
--- /dev/null
+++ b/media/ffvpx/libavutil/bprint.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_avbprint
+ * AVBPrint public header
+ */
+
+#ifndef AVUTIL_BPRINT_H
+#define AVUTIL_BPRINT_H
+
+#include <stdarg.h>
+
+#include "attributes.h"
+#include "avstring.h"
+
+/**
+ * @defgroup lavu_avbprint AVBPrint
+ * @ingroup lavu_data
+ *
+ * A buffer to print data progressively
+ * @{
+ */
+
+/**
+ * Define a structure with extra padding to a fixed size
+ * This helps ensuring binary compatibility with future versions.
+ */
+
+#define FF_PAD_STRUCTURE(name, size, ...) \
+struct ff_pad_helper_##name { __VA_ARGS__ }; \
+typedef struct name { \
+ __VA_ARGS__ \
+ char reserved_padding[size - sizeof(struct ff_pad_helper_##name)]; \
+} name;
+
+/**
+ * Buffer to print data progressively
+ *
+ * The string buffer grows as necessary and is always 0-terminated.
+ * The content of the string is never accessed, and thus is
+ * encoding-agnostic and can even hold binary data.
+ *
+ * Small buffers are kept in the structure itself, and thus require no
+ * memory allocation at all (unless the contents of the buffer is needed
+ * after the structure goes out of scope). This is almost as lightweight as
+ * declaring a local `char buf[512]`.
+ *
+ * The length of the string can go beyond the allocated size: the buffer is
+ * then truncated, but the functions still keep account of the actual total
+ * length.
+ *
+ * In other words, AVBPrint.len can be greater than AVBPrint.size and records
+ * the total length of what would have been to the buffer if there had been
+ * enough memory.
+ *
+ * Append operations do not need to be tested for failure: if a memory
+ * allocation fails, data stop being appended to the buffer, but the length
+ * is still updated. This situation can be tested with
+ * av_bprint_is_complete().
+ *
+ * The AVBPrint.size_max field determines several possible behaviours:
+ * - `size_max = -1` (= `UINT_MAX`) or any large value will let the buffer be
+ * reallocated as necessary, with an amortized linear cost.
+ * - `size_max = 0` prevents writing anything to the buffer: only the total
+ * length is computed. The write operations can then possibly be repeated in
+ * a buffer with exactly the necessary size
+ * (using `size_init = size_max = len + 1`).
+ * - `size_max = 1` is automatically replaced by the exact size available in the
+ * structure itself, thus ensuring no dynamic memory allocation. The
+ * internal buffer is large enough to hold a reasonable paragraph of text,
+ * such as the current paragraph.
+ */
+
+FF_PAD_STRUCTURE(AVBPrint, 1024,
+ char *str; /**< string so far */
+ unsigned len; /**< length so far */
+ unsigned size; /**< allocated memory */
+ unsigned size_max; /**< maximum allocated memory */
+ char reserved_internal_buffer[1];
+)
+
+/**
+ * @name Max size special values
+ * Convenience macros for special values for av_bprint_init() size_max
+ * parameter.
+ * @{
+ */
+
+/**
+ * Buffer will be reallocated as necessary, with an amortized linear cost.
+ */
+#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1)
+/**
+ * Use the exact size available in the AVBPrint structure itself.
+ *
+ * Thus ensuring no dynamic memory allocation. The internal buffer is large
+ * enough to hold a reasonable paragraph of text, such as the current paragraph.
+ */
+#define AV_BPRINT_SIZE_AUTOMATIC 1
+/**
+ * Do not write anything to the buffer, only calculate the total length.
+ *
+ * The write operations can then possibly be repeated in a buffer with
+ * exactly the necessary size (using `size_init = size_max = AVBPrint.len + 1`).
+ */
+#define AV_BPRINT_SIZE_COUNT_ONLY 0
+/** @} */
+
+/**
+ * Init a print buffer.
+ *
+ * @param buf buffer to init
+ * @param size_init initial size (including the final 0)
+ * @param size_max maximum size;
+ * - `0` means do not write anything, just count the length
+ * - `1` is replaced by the maximum value for automatic storage
+ * any large value means that the internal buffer will be
+ * reallocated as needed up to that limit
+ * - `-1` is converted to `UINT_MAX`, the largest limit possible.
+ * Check also `AV_BPRINT_SIZE_*` macros.
+ */
+void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);
+
+/**
+ * Init a print buffer using a pre-existing buffer.
+ *
+ * The buffer will not be reallocated.
+ *
+ * @param buf buffer structure to init
+ * @param buffer byte buffer to use for the string data
+ * @param size size of buffer
+ */
+void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size);
+
+/**
+ * Append a formatted string to a print buffer.
+ */
+void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3);
+
+/**
+ * Append a formatted string to a print buffer.
+ */
+void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg);
+
+/**
+ * Append char c n times to a print buffer.
+ */
+void av_bprint_chars(AVBPrint *buf, char c, unsigned n);
+
+/**
+ * Append data to a print buffer.
+ *
+ * param buf bprint buffer to use
+ * param data pointer to data
+ * param size size of data
+ */
+void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size);
+
+struct tm;
+/**
+ * Append a formatted date and time to a print buffer.
+ *
+ * param buf bprint buffer to use
+ * param fmt date and time format string, see strftime()
+ * param tm broken-down time structure to translate
+ *
+ * @note due to poor design of the standard strftime function, it may
+ * produce poor results if the format string expands to a very long text and
+ * the bprint buffer is near the limit stated by the size_max option.
+ */
+void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm);
+
+/**
+ * Allocate bytes in the buffer for external use.
+ *
+ * @param[in] buf buffer structure
+ * @param[in] size required size
+ * @param[out] mem pointer to the memory area
+ * @param[out] actual_size size of the memory area after allocation;
+ * can be larger or smaller than size
+ */
+void av_bprint_get_buffer(AVBPrint *buf, unsigned size,
+ unsigned char **mem, unsigned *actual_size);
+
+/**
+ * Reset the string to "" but keep internal allocated data.
+ */
+void av_bprint_clear(AVBPrint *buf);
+
+/**
+ * Test if the print buffer is complete (not truncated).
+ *
+ * It may have been truncated due to a memory allocation failure
+ * or the size_max limit (compare size and size_max if necessary).
+ */
+static inline int av_bprint_is_complete(const AVBPrint *buf)
+{
+ return buf->len < buf->size;
+}
+
+/**
+ * Finalize a print buffer.
+ *
+ * The print buffer can no longer be used afterwards,
+ * but the len and size fields are still valid.
+ *
+ * @arg[out] ret_str if not NULL, used to return a permanent copy of the
+ * buffer contents, or NULL if memory allocation fails;
+ * if NULL, the buffer is discarded and freed
+ * @return 0 for success or error code (probably AVERROR(ENOMEM))
+ */
+int av_bprint_finalize(AVBPrint *buf, char **ret_str);
+
+/**
+ * Escape the content in src and append it to dstbuf.
+ *
+ * @param dstbuf already inited destination bprint buffer
+ * @param src string containing the text to escape
+ * @param special_chars string containing the special characters which
+ * need to be escaped, can be NULL
+ * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros.
+ * Any unknown value for mode will be considered equivalent to
+ * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without
+ * notice.
+ * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros
+ */
+void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars,
+ enum AVEscapeMode mode, int flags);
+
+/** @} */
+
+#endif /* AVUTIL_BPRINT_H */
diff --git a/media/ffvpx/libavutil/bswap.h b/media/ffvpx/libavutil/bswap.h
new file mode 100644
index 0000000000..4840ab433f
--- /dev/null
+++ b/media/ffvpx/libavutil/bswap.h
@@ -0,0 +1,111 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * byte swapping routines
+ */
+
+#ifndef AVUTIL_BSWAP_H
+#define AVUTIL_BSWAP_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_AARCH64
+# include "aarch64/bswap.h"
+#elif ARCH_ARM
+# include "arm/bswap.h"
+#elif ARCH_AVR32
+# include "avr32/bswap.h"
+#elif ARCH_RISCV
+# include "riscv/bswap.h"
+#elif ARCH_SH4
+# include "sh4/bswap.h"
+#elif ARCH_X86
+# include "x86/bswap.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff))
+#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))
+#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))
+
+#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)
+
+#ifndef av_bswap16
+static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
+{
+ x= (x>>8) | (x<<8);
+ return x;
+}
+#endif
+
+#ifndef av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ return AV_BSWAP32C(x);
+}
+#endif
+
+#ifndef av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+ return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);
+}
+#endif
+
+// be2ne ... big-endian to native-endian
+// le2ne ... little-endian to native-endian
+
+#if AV_HAVE_BIGENDIAN
+#define av_be2ne16(x) (x)
+#define av_be2ne32(x) (x)
+#define av_be2ne64(x) (x)
+#define av_le2ne16(x) av_bswap16(x)
+#define av_le2ne32(x) av_bswap32(x)
+#define av_le2ne64(x) av_bswap64(x)
+#define AV_BE2NEC(s, x) (x)
+#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)
+#else
+#define av_be2ne16(x) av_bswap16(x)
+#define av_be2ne32(x) av_bswap32(x)
+#define av_be2ne64(x) av_bswap64(x)
+#define av_le2ne16(x) (x)
+#define av_le2ne32(x) (x)
+#define av_le2ne64(x) (x)
+#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)
+#define AV_LE2NEC(s, x) (x)
+#endif
+
+#define AV_BE2NE16C(x) AV_BE2NEC(16, x)
+#define AV_BE2NE32C(x) AV_BE2NEC(32, x)
+#define AV_BE2NE64C(x) AV_BE2NEC(64, x)
+#define AV_LE2NE16C(x) AV_LE2NEC(16, x)
+#define AV_LE2NE32C(x) AV_LE2NEC(32, x)
+#define AV_LE2NE64C(x) AV_LE2NEC(64, x)
+
+#endif /* AVUTIL_BSWAP_H */
diff --git a/media/ffvpx/libavutil/buffer.c b/media/ffvpx/libavutil/buffer.c
new file mode 100644
index 0000000000..e4562a79b1
--- /dev/null
+++ b/media/ffvpx/libavutil/buffer.c
@@ -0,0 +1,416 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdatomic.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "avassert.h"
+#include "buffer_internal.h"
+#include "common.h"
+#include "mem.h"
+#include "thread.h"
+
+static AVBufferRef *buffer_create(AVBuffer *buf, uint8_t *data, size_t size,
+ void (*free)(void *opaque, uint8_t *data),
+ void *opaque, int flags)
+{
+ AVBufferRef *ref = NULL;
+
+ buf->data = data;
+ buf->size = size;
+ buf->free = free ? free : av_buffer_default_free;
+ buf->opaque = opaque;
+
+ atomic_init(&buf->refcount, 1);
+
+ buf->flags = flags;
+
+ ref = av_mallocz(sizeof(*ref));
+ if (!ref)
+ return NULL;
+
+ ref->buffer = buf;
+ ref->data = data;
+ ref->size = size;
+
+ return ref;
+}
+
+AVBufferRef *av_buffer_create(uint8_t *data, size_t size,
+ void (*free)(void *opaque, uint8_t *data),
+ void *opaque, int flags)
+{
+ AVBufferRef *ret;
+ AVBuffer *buf = av_mallocz(sizeof(*buf));
+ if (!buf)
+ return NULL;
+
+ ret = buffer_create(buf, data, size, free, opaque, flags);
+ if (!ret) {
+ av_free(buf);
+ return NULL;
+ }
+ return ret;
+}
+
+void av_buffer_default_free(void *opaque, uint8_t *data)
+{
+ av_free(data);
+}
+
+AVBufferRef *av_buffer_alloc(size_t size)
+{
+ AVBufferRef *ret = NULL;
+ uint8_t *data = NULL;
+
+ data = av_malloc(size);
+ if (!data)
+ return NULL;
+
+ ret = av_buffer_create(data, size, av_buffer_default_free, NULL, 0);
+ if (!ret)
+ av_freep(&data);
+
+ return ret;
+}
+
+AVBufferRef *av_buffer_allocz(size_t size)
+{
+ AVBufferRef *ret = av_buffer_alloc(size);
+ if (!ret)
+ return NULL;
+
+ memset(ret->data, 0, size);
+ return ret;
+}
+
+AVBufferRef *av_buffer_ref(const AVBufferRef *buf)
+{
+ AVBufferRef *ret = av_mallocz(sizeof(*ret));
+
+ if (!ret)
+ return NULL;
+
+ *ret = *buf;
+
+ atomic_fetch_add_explicit(&buf->buffer->refcount, 1, memory_order_relaxed);
+
+ return ret;
+}
+
+static void buffer_replace(AVBufferRef **dst, AVBufferRef **src)
+{
+ AVBuffer *b;
+
+ b = (*dst)->buffer;
+
+ if (src) {
+ **dst = **src;
+ av_freep(src);
+ } else
+ av_freep(dst);
+
+ if (atomic_fetch_sub_explicit(&b->refcount, 1, memory_order_acq_rel) == 1) {
+ /* b->free below might already free the structure containing *b,
+ * so we have to read the flag now to avoid use-after-free. */
+ int free_avbuffer = !(b->flags_internal & BUFFER_FLAG_NO_FREE);
+ b->free(b->opaque, b->data);
+ if (free_avbuffer)
+ av_free(b);
+ }
+}
+
+void av_buffer_unref(AVBufferRef **buf)
+{
+ if (!buf || !*buf)
+ return;
+
+ buffer_replace(buf, NULL);
+}
+
+int av_buffer_is_writable(const AVBufferRef *buf)
+{
+ if (buf->buffer->flags & AV_BUFFER_FLAG_READONLY)
+ return 0;
+
+ return atomic_load(&buf->buffer->refcount) == 1;
+}
+
+void *av_buffer_get_opaque(const AVBufferRef *buf)
+{
+ return buf->buffer->opaque;
+}
+
+int av_buffer_get_ref_count(const AVBufferRef *buf)
+{
+ return atomic_load(&buf->buffer->refcount);
+}
+
+int av_buffer_make_writable(AVBufferRef **pbuf)
+{
+ AVBufferRef *newbuf, *buf = *pbuf;
+
+ if (av_buffer_is_writable(buf))
+ return 0;
+
+ newbuf = av_buffer_alloc(buf->size);
+ if (!newbuf)
+ return AVERROR(ENOMEM);
+
+ memcpy(newbuf->data, buf->data, buf->size);
+
+ buffer_replace(pbuf, &newbuf);
+
+ return 0;
+}
+
+int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
+{
+ AVBufferRef *buf = *pbuf;
+ uint8_t *tmp;
+ int ret;
+
+ if (!buf) {
+ /* allocate a new buffer with av_realloc(), so it will be reallocatable
+ * later */
+ uint8_t *data = av_realloc(NULL, size);
+ if (!data)
+ return AVERROR(ENOMEM);
+
+ buf = av_buffer_create(data, size, av_buffer_default_free, NULL, 0);
+ if (!buf) {
+ av_freep(&data);
+ return AVERROR(ENOMEM);
+ }
+
+ buf->buffer->flags_internal |= BUFFER_FLAG_REALLOCATABLE;
+ *pbuf = buf;
+
+ return 0;
+ } else if (buf->size == size)
+ return 0;
+
+ if (!(buf->buffer->flags_internal & BUFFER_FLAG_REALLOCATABLE) ||
+ !av_buffer_is_writable(buf) || buf->data != buf->buffer->data) {
+ /* cannot realloc, allocate a new reallocable buffer and copy data */
+ AVBufferRef *new = NULL;
+
+ ret = av_buffer_realloc(&new, size);
+ if (ret < 0)
+ return ret;
+
+ memcpy(new->data, buf->data, FFMIN(size, buf->size));
+
+ buffer_replace(pbuf, &new);
+ return 0;
+ }
+
+ tmp = av_realloc(buf->buffer->data, size);
+ if (!tmp)
+ return AVERROR(ENOMEM);
+
+ buf->buffer->data = buf->data = tmp;
+ buf->buffer->size = buf->size = size;
+ return 0;
+}
+
+int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
+{
+ AVBufferRef *dst = *pdst;
+ AVBufferRef *tmp;
+
+ if (!src) {
+ av_buffer_unref(pdst);
+ return 0;
+ }
+
+ if (dst && dst->buffer == src->buffer) {
+ /* make sure the data pointers match */
+ dst->data = src->data;
+ dst->size = src->size;
+ return 0;
+ }
+
+ tmp = av_buffer_ref(src);
+ if (!tmp)
+ return AVERROR(ENOMEM);
+
+ av_buffer_unref(pdst);
+ *pdst = tmp;
+ return 0;
+}
+
+AVBufferPool *av_buffer_pool_init2(size_t size, void *opaque,
+ AVBufferRef* (*alloc)(void *opaque, size_t size),
+ void (*pool_free)(void *opaque))
+{
+ AVBufferPool *pool = av_mallocz(sizeof(*pool));
+ if (!pool)
+ return NULL;
+
+ ff_mutex_init(&pool->mutex, NULL);
+
+ pool->size = size;
+ pool->opaque = opaque;
+ pool->alloc2 = alloc;
+ pool->alloc = av_buffer_alloc; // fallback
+ pool->pool_free = pool_free;
+
+ atomic_init(&pool->refcount, 1);
+
+ return pool;
+}
+
+AVBufferPool *av_buffer_pool_init(size_t size, AVBufferRef* (*alloc)(size_t size))
+{
+ AVBufferPool *pool = av_mallocz(sizeof(*pool));
+ if (!pool)
+ return NULL;
+
+ ff_mutex_init(&pool->mutex, NULL);
+
+ pool->size = size;
+ pool->alloc = alloc ? alloc : av_buffer_alloc;
+
+ atomic_init(&pool->refcount, 1);
+
+ return pool;
+}
+
+static void buffer_pool_flush(AVBufferPool *pool)
+{
+ while (pool->pool) {
+ BufferPoolEntry *buf = pool->pool;
+ pool->pool = buf->next;
+
+ buf->free(buf->opaque, buf->data);
+ av_freep(&buf);
+ }
+}
+
+/*
+ * This function gets called when the pool has been uninited and
+ * all the buffers returned to it.
+ */
+static void buffer_pool_free(AVBufferPool *pool)
+{
+ buffer_pool_flush(pool);
+ ff_mutex_destroy(&pool->mutex);
+
+ if (pool->pool_free)
+ pool->pool_free(pool->opaque);
+
+ av_freep(&pool);
+}
+
+void av_buffer_pool_uninit(AVBufferPool **ppool)
+{
+ AVBufferPool *pool;
+
+ if (!ppool || !*ppool)
+ return;
+ pool = *ppool;
+ *ppool = NULL;
+
+ ff_mutex_lock(&pool->mutex);
+ buffer_pool_flush(pool);
+ ff_mutex_unlock(&pool->mutex);
+
+ if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
+ buffer_pool_free(pool);
+}
+
+static void pool_release_buffer(void *opaque, uint8_t *data)
+{
+ BufferPoolEntry *buf = opaque;
+ AVBufferPool *pool = buf->pool;
+
+ ff_mutex_lock(&pool->mutex);
+ buf->next = pool->pool;
+ pool->pool = buf;
+ ff_mutex_unlock(&pool->mutex);
+
+ if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
+ buffer_pool_free(pool);
+}
+
+/* allocate a new buffer and override its free() callback so that
+ * it is returned to the pool on free */
+static AVBufferRef *pool_alloc_buffer(AVBufferPool *pool)
+{
+ BufferPoolEntry *buf;
+ AVBufferRef *ret;
+
+ av_assert0(pool->alloc || pool->alloc2);
+
+ ret = pool->alloc2 ? pool->alloc2(pool->opaque, pool->size) :
+ pool->alloc(pool->size);
+ if (!ret)
+ return NULL;
+
+ buf = av_mallocz(sizeof(*buf));
+ if (!buf) {
+ av_buffer_unref(&ret);
+ return NULL;
+ }
+
+ buf->data = ret->buffer->data;
+ buf->opaque = ret->buffer->opaque;
+ buf->free = ret->buffer->free;
+ buf->pool = pool;
+
+ ret->buffer->opaque = buf;
+ ret->buffer->free = pool_release_buffer;
+
+ return ret;
+}
+
+AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
+{
+ AVBufferRef *ret;
+ BufferPoolEntry *buf;
+
+ ff_mutex_lock(&pool->mutex);
+ buf = pool->pool;
+ if (buf) {
+ memset(&buf->buffer, 0, sizeof(buf->buffer));
+ ret = buffer_create(&buf->buffer, buf->data, pool->size,
+ pool_release_buffer, buf, 0);
+ if (ret) {
+ pool->pool = buf->next;
+ buf->next = NULL;
+ buf->buffer.flags_internal |= BUFFER_FLAG_NO_FREE;
+ }
+ } else {
+ ret = pool_alloc_buffer(pool);
+ }
+ ff_mutex_unlock(&pool->mutex);
+
+ if (ret)
+ atomic_fetch_add_explicit(&pool->refcount, 1, memory_order_relaxed);
+
+ return ret;
+}
+
+void *av_buffer_pool_buffer_get_opaque(const AVBufferRef *ref)
+{
+ BufferPoolEntry *buf = ref->buffer->opaque;
+ av_assert0(buf);
+ return buf->opaque;
+}
diff --git a/media/ffvpx/libavutil/buffer.h b/media/ffvpx/libavutil/buffer.h
new file mode 100644
index 0000000000..e1ef5b7f07
--- /dev/null
+++ b/media/ffvpx/libavutil/buffer.h
@@ -0,0 +1,322 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_buffer
+ * refcounted data buffer API
+ */
+
+#ifndef AVUTIL_BUFFER_H
+#define AVUTIL_BUFFER_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_buffer AVBuffer
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBuffer is an API for reference-counted data buffers.
+ *
+ * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer
+ * represents the data buffer itself; it is opaque and not meant to be accessed
+ * by the caller directly, but only through AVBufferRef. However, the caller may
+ * e.g. compare two AVBuffer pointers to check whether two different references
+ * are describing the same data buffer. AVBufferRef represents a single
+ * reference to an AVBuffer and it is the object that may be manipulated by the
+ * caller directly.
+ *
+ * There are two functions provided for creating a new AVBuffer with a single
+ * reference -- av_buffer_alloc() to just allocate a new buffer, and
+ * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing
+ * reference, additional references may be created with av_buffer_ref().
+ * Use av_buffer_unref() to free a reference (this will automatically free the
+ * data once all the references are freed).
+ *
+ * The convention throughout this API and the rest of FFmpeg is such that the
+ * buffer is considered writable if there exists only one reference to it (and
+ * it has not been marked as read-only). The av_buffer_is_writable() function is
+ * provided to check whether this is true and av_buffer_make_writable() will
+ * automatically create a new writable buffer when necessary.
+ * Of course nothing prevents the calling code from violating this convention,
+ * however that is safe only when all the existing references are under its
+ * control.
+ *
+ * @note Referencing and unreferencing the buffers is thread-safe and thus
+ * may be done from multiple threads simultaneously without any need for
+ * additional locking.
+ *
+ * @note Two different references to the same buffer can point to different
+ * parts of the buffer (i.e. their AVBufferRef.data will not be equal).
+ */
+
+/**
+ * A reference counted buffer type. It is opaque and is meant to be used through
+ * references (AVBufferRef).
+ */
+typedef struct AVBuffer AVBuffer;
+
+/**
+ * A reference to a data buffer.
+ *
+ * The size of this struct is not a part of the public ABI and it is not meant
+ * to be allocated directly.
+ */
+typedef struct AVBufferRef {
+ AVBuffer *buffer;
+
+ /**
+ * The data buffer. It is considered writable if and only if
+ * this is the only reference to the buffer, in which case
+ * av_buffer_is_writable() returns 1.
+ */
+ uint8_t *data;
+ /**
+ * Size of data in bytes.
+ */
+ size_t size;
+} AVBufferRef;
+
+/**
+ * Allocate an AVBuffer of the given size using av_malloc().
+ *
+ * @return an AVBufferRef of given size or NULL when out of memory
+ */
+AVBufferRef *av_buffer_alloc(size_t size);
+
+/**
+ * Same as av_buffer_alloc(), except the returned buffer will be initialized
+ * to zero.
+ */
+AVBufferRef *av_buffer_allocz(size_t size);
+
+/**
+ * Always treat the buffer as read-only, even when it has only one
+ * reference.
+ */
+#define AV_BUFFER_FLAG_READONLY (1 << 0)
+
+/**
+ * Create an AVBuffer from an existing array.
+ *
+ * If this function is successful, data is owned by the AVBuffer. The caller may
+ * only access data through the returned AVBufferRef and references derived from
+ * it.
+ * If this function fails, data is left untouched.
+ * @param data data array
+ * @param size size of data in bytes
+ * @param free a callback for freeing this buffer's data
+ * @param opaque parameter to be got for processing or passed to free
+ * @param flags a combination of AV_BUFFER_FLAG_*
+ *
+ * @return an AVBufferRef referring to data on success, NULL on failure.
+ */
+AVBufferRef *av_buffer_create(uint8_t *data, size_t size,
+ void (*free)(void *opaque, uint8_t *data),
+ void *opaque, int flags);
+
+/**
+ * Default free callback, which calls av_free() on the buffer data.
+ * This function is meant to be passed to av_buffer_create(), not called
+ * directly.
+ */
+void av_buffer_default_free(void *opaque, uint8_t *data);
+
+/**
+ * Create a new reference to an AVBuffer.
+ *
+ * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
+ * failure.
+ */
+AVBufferRef *av_buffer_ref(const AVBufferRef *buf);
+
+/**
+ * Free a given reference and automatically free the buffer if there are no more
+ * references to it.
+ *
+ * @param buf the reference to be freed. The pointer is set to NULL on return.
+ */
+void av_buffer_unref(AVBufferRef **buf);
+
+/**
+ * @return 1 if the caller may write to the data referred to by buf (which is
+ * true if and only if buf is the only reference to the underlying AVBuffer).
+ * Return 0 otherwise.
+ * A positive answer is valid until av_buffer_ref() is called on buf.
+ */
+int av_buffer_is_writable(const AVBufferRef *buf);
+
+/**
+ * @return the opaque parameter set by av_buffer_create.
+ */
+void *av_buffer_get_opaque(const AVBufferRef *buf);
+
+int av_buffer_get_ref_count(const AVBufferRef *buf);
+
+/**
+ * Create a writable reference from a given buffer reference, avoiding data copy
+ * if possible.
+ *
+ * @param buf buffer reference to make writable. On success, buf is either left
+ * untouched, or it is unreferenced and a new writable AVBufferRef is
+ * written in its place. On failure, buf is left untouched.
+ * @return 0 on success, a negative AVERROR on failure.
+ */
+int av_buffer_make_writable(AVBufferRef **buf);
+
+/**
+ * Reallocate a given buffer.
+ *
+ * @param buf a buffer reference to reallocate. On success, buf will be
+ * unreferenced and a new reference with the required size will be
+ * written in its place. On failure buf will be left untouched. *buf
+ * may be NULL, then a new buffer is allocated.
+ * @param size required new buffer size.
+ * @return 0 on success, a negative AVERROR on failure.
+ *
+ * @note the buffer is actually reallocated with av_realloc() only if it was
+ * initially allocated through av_buffer_realloc(NULL) and there is only one
+ * reference to it (i.e. the one passed to this function). In all other cases
+ * a new buffer is allocated and the data is copied.
+ */
+int av_buffer_realloc(AVBufferRef **buf, size_t size);
+
+/**
+ * Ensure dst refers to the same data as src.
+ *
+ * When *dst is already equivalent to src, do nothing. Otherwise unreference dst
+ * and replace it with a new reference to src.
+ *
+ * @param dst Pointer to either a valid buffer reference or NULL. On success,
+ * this will point to a buffer reference equivalent to src. On
+ * failure, dst will be left untouched.
+ * @param src A buffer reference to replace dst with. May be NULL, then this
+ * function is equivalent to av_buffer_unref(dst).
+ * @return 0 on success
+ * AVERROR(ENOMEM) on memory allocation failure.
+ */
+int av_buffer_replace(AVBufferRef **dst, const AVBufferRef *src);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_bufferpool AVBufferPool
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.
+ *
+ * Frequently allocating and freeing large buffers may be slow. AVBufferPool is
+ * meant to solve this in cases when the caller needs a set of buffers of the
+ * same size (the most obvious use case being buffers for raw video or audio
+ * frames).
+ *
+ * At the beginning, the user must call av_buffer_pool_init() to create the
+ * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to
+ * get a reference to a new buffer, similar to av_buffer_alloc(). This new
+ * reference works in all aspects the same way as the one created by
+ * av_buffer_alloc(). However, when the last reference to this buffer is
+ * unreferenced, it is returned to the pool instead of being freed and will be
+ * reused for subsequent av_buffer_pool_get() calls.
+ *
+ * When the caller is done with the pool and no longer needs to allocate any new
+ * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.
+ * Once all the buffers are released, it will automatically be freed.
+ *
+ * Allocating and releasing buffers with this API is thread-safe as long as
+ * either the default alloc callback is used, or the user-supplied one is
+ * thread-safe.
+ */
+
+/**
+ * The buffer pool. This structure is opaque and not meant to be accessed
+ * directly. It is allocated with av_buffer_pool_init() and freed with
+ * av_buffer_pool_uninit().
+ */
+typedef struct AVBufferPool AVBufferPool;
+
+/**
+ * Allocate and initialize a buffer pool.
+ *
+ * @param size size of each buffer in this pool
+ * @param alloc a function that will be used to allocate new buffers when the
+ * pool is empty. May be NULL, then the default allocator will be used
+ * (av_buffer_alloc()).
+ * @return newly created buffer pool on success, NULL on error.
+ */
+AVBufferPool *av_buffer_pool_init(size_t size, AVBufferRef* (*alloc)(size_t size));
+
+/**
+ * Allocate and initialize a buffer pool with a more complex allocator.
+ *
+ * @param size size of each buffer in this pool
+ * @param opaque arbitrary user data used by the allocator
+ * @param alloc a function that will be used to allocate new buffers when the
+ * pool is empty. May be NULL, then the default allocator will be
+ * used (av_buffer_alloc()).
+ * @param pool_free a function that will be called immediately before the pool
+ * is freed. I.e. after av_buffer_pool_uninit() is called
+ * by the caller and all the frames are returned to the pool
+ * and freed. It is intended to uninitialize the user opaque
+ * data. May be NULL.
+ * @return newly created buffer pool on success, NULL on error.
+ */
+AVBufferPool *av_buffer_pool_init2(size_t size, void *opaque,
+ AVBufferRef* (*alloc)(void *opaque, size_t size),
+ void (*pool_free)(void *opaque));
+
+/**
+ * Mark the pool as being available for freeing. It will actually be freed only
+ * once all the allocated buffers associated with the pool are released. Thus it
+ * is safe to call this function while some of the allocated buffers are still
+ * in use.
+ *
+ * @param pool pointer to the pool to be freed. It will be set to NULL.
+ */
+void av_buffer_pool_uninit(AVBufferPool **pool);
+
+/**
+ * Allocate a new AVBuffer, reusing an old buffer from the pool when available.
+ * This function may be called simultaneously from multiple threads.
+ *
+ * @return a reference to the new buffer on success, NULL on error.
+ */
+AVBufferRef *av_buffer_pool_get(AVBufferPool *pool);
+
+/**
+ * Query the original opaque parameter of an allocated buffer in the pool.
+ *
+ * @param ref a buffer reference to a buffer returned by av_buffer_pool_get.
+ * @return the opaque parameter set by the buffer allocator function of the
+ * buffer pool.
+ *
+ * @note the opaque parameter of ref is used by the buffer pool implementation,
+ * therefore you have to use this function to access the original opaque
+ * parameter of an allocated buffer.
+ */
+void *av_buffer_pool_buffer_get_opaque(const AVBufferRef *ref);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BUFFER_H */
diff --git a/media/ffvpx/libavutil/buffer_internal.h b/media/ffvpx/libavutil/buffer_internal.h
new file mode 100644
index 0000000000..adb916aaa2
--- /dev/null
+++ b/media/ffvpx/libavutil/buffer_internal.h
@@ -0,0 +1,110 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BUFFER_INTERNAL_H
+#define AVUTIL_BUFFER_INTERNAL_H
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+#include "buffer.h"
+#include "thread.h"
+
+/**
+ * The buffer was av_realloc()ed, so it is reallocatable.
+ */
+#define BUFFER_FLAG_REALLOCATABLE (1 << 0)
+/**
+ * The AVBuffer structure is part of a larger structure
+ * and should not be freed.
+ */
+#define BUFFER_FLAG_NO_FREE (1 << 1)
+
+struct AVBuffer {
+ uint8_t *data; /**< data described by this buffer */
+ size_t size; /**< size of data in bytes */
+
+ /**
+ * number of existing AVBufferRef instances referring to this buffer
+ */
+ atomic_uint refcount;
+
+ /**
+ * a callback for freeing the data
+ */
+ void (*free)(void *opaque, uint8_t *data);
+
+ /**
+ * an opaque pointer, to be used by the freeing callback
+ */
+ void *opaque;
+
+ /**
+ * A combination of AV_BUFFER_FLAG_*
+ */
+ int flags;
+
+ /**
+ * A combination of BUFFER_FLAG_*
+ */
+ int flags_internal;
+};
+
+typedef struct BufferPoolEntry {
+ uint8_t *data;
+
+ /*
+ * Backups of the original opaque/free of the AVBuffer corresponding to
+ * data. They will be used to free the buffer when the pool is freed.
+ */
+ void *opaque;
+ void (*free)(void *opaque, uint8_t *data);
+
+ AVBufferPool *pool;
+ struct BufferPoolEntry *next;
+
+ /*
+ * An AVBuffer structure to (re)use as AVBuffer for subsequent uses
+ * of this BufferPoolEntry.
+ */
+ AVBuffer buffer;
+} BufferPoolEntry;
+
+struct AVBufferPool {
+ AVMutex mutex;
+ BufferPoolEntry *pool;
+
+ /*
+ * This is used to track when the pool is to be freed.
+ * The pointer to the pool itself held by the caller is considered to
+ * be one reference. Each buffer requested by the caller increases refcount
+ * by one, returning the buffer to the pool decreases it by one.
+ * refcount reaches zero when the buffer has been uninited AND all the
+ * buffers have been released, then it's safe to free the pool and all
+ * the buffers in it.
+ */
+ atomic_uint refcount;
+
+ size_t size;
+ void *opaque;
+ AVBufferRef* (*alloc)(size_t size);
+ AVBufferRef* (*alloc2)(void *opaque, size_t size);
+ void (*pool_free)(void *opaque);
+};
+
+#endif /* AVUTIL_BUFFER_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/channel_layout.c b/media/ffvpx/libavutil/channel_layout.c
new file mode 100644
index 0000000000..e2f7512254
--- /dev/null
+++ b/media/ffvpx/libavutil/channel_layout.c
@@ -0,0 +1,1006 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio channel layout utility functions
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "avassert.h"
+#include "channel_layout.h"
+#include "bprint.h"
+#include "common.h"
+#include "error.h"
+#include "macros.h"
+#include "opt.h"
+
+#define CHAN_IS_AMBI(x) ((x) >= AV_CHAN_AMBISONIC_BASE &&\
+ (x) <= AV_CHAN_AMBISONIC_END)
+
+struct channel_name {
+ const char *name;
+ const char *description;
+};
+
+static const struct channel_name channel_names[] = {
+ [AV_CHAN_FRONT_LEFT ] = { "FL", "front left" },
+ [AV_CHAN_FRONT_RIGHT ] = { "FR", "front right" },
+ [AV_CHAN_FRONT_CENTER ] = { "FC", "front center" },
+ [AV_CHAN_LOW_FREQUENCY ] = { "LFE", "low frequency" },
+ [AV_CHAN_BACK_LEFT ] = { "BL", "back left" },
+ [AV_CHAN_BACK_RIGHT ] = { "BR", "back right" },
+ [AV_CHAN_FRONT_LEFT_OF_CENTER ] = { "FLC", "front left-of-center" },
+ [AV_CHAN_FRONT_RIGHT_OF_CENTER] = { "FRC", "front right-of-center" },
+ [AV_CHAN_BACK_CENTER ] = { "BC", "back center" },
+ [AV_CHAN_SIDE_LEFT ] = { "SL", "side left" },
+ [AV_CHAN_SIDE_RIGHT ] = { "SR", "side right" },
+ [AV_CHAN_TOP_CENTER ] = { "TC", "top center" },
+ [AV_CHAN_TOP_FRONT_LEFT ] = { "TFL", "top front left" },
+ [AV_CHAN_TOP_FRONT_CENTER ] = { "TFC", "top front center" },
+ [AV_CHAN_TOP_FRONT_RIGHT ] = { "TFR", "top front right" },
+ [AV_CHAN_TOP_BACK_LEFT ] = { "TBL", "top back left" },
+ [AV_CHAN_TOP_BACK_CENTER ] = { "TBC", "top back center" },
+ [AV_CHAN_TOP_BACK_RIGHT ] = { "TBR", "top back right" },
+ [AV_CHAN_STEREO_LEFT ] = { "DL", "downmix left" },
+ [AV_CHAN_STEREO_RIGHT ] = { "DR", "downmix right" },
+ [AV_CHAN_WIDE_LEFT ] = { "WL", "wide left" },
+ [AV_CHAN_WIDE_RIGHT ] = { "WR", "wide right" },
+ [AV_CHAN_SURROUND_DIRECT_LEFT ] = { "SDL", "surround direct left" },
+ [AV_CHAN_SURROUND_DIRECT_RIGHT] = { "SDR", "surround direct right" },
+ [AV_CHAN_LOW_FREQUENCY_2 ] = { "LFE2", "low frequency 2" },
+ [AV_CHAN_TOP_SIDE_LEFT ] = { "TSL", "top side left" },
+ [AV_CHAN_TOP_SIDE_RIGHT ] = { "TSR", "top side right" },
+ [AV_CHAN_BOTTOM_FRONT_CENTER ] = { "BFC", "bottom front center" },
+ [AV_CHAN_BOTTOM_FRONT_LEFT ] = { "BFL", "bottom front left" },
+ [AV_CHAN_BOTTOM_FRONT_RIGHT ] = { "BFR", "bottom front right" },
+};
+
+static const char *get_channel_name(enum AVChannel channel_id)
+{
+ if ((unsigned) channel_id >= FF_ARRAY_ELEMS(channel_names) ||
+ !channel_names[channel_id].name)
+ return NULL;
+ return channel_names[channel_id].name;
+}
+
+void av_channel_name_bprint(AVBPrint *bp, enum AVChannel channel_id)
+{
+ if (channel_id >= AV_CHAN_AMBISONIC_BASE &&
+ channel_id <= AV_CHAN_AMBISONIC_END)
+ av_bprintf(bp, "AMBI%d", channel_id - AV_CHAN_AMBISONIC_BASE);
+ else if ((unsigned)channel_id < FF_ARRAY_ELEMS(channel_names) &&
+ channel_names[channel_id].name)
+ av_bprintf(bp, "%s", channel_names[channel_id].name);
+ else if (channel_id == AV_CHAN_NONE)
+ av_bprintf(bp, "NONE");
+ else
+ av_bprintf(bp, "USR%d", channel_id);
+}
+
+int av_channel_name(char *buf, size_t buf_size, enum AVChannel channel_id)
+{
+ AVBPrint bp;
+
+ if (!buf && buf_size)
+ return AVERROR(EINVAL);
+
+ av_bprint_init_for_buffer(&bp, buf, buf_size);
+ av_channel_name_bprint(&bp, channel_id);
+
+ return bp.len;
+}
+
+void av_channel_description_bprint(AVBPrint *bp, enum AVChannel channel_id)
+{
+ if (channel_id >= AV_CHAN_AMBISONIC_BASE &&
+ channel_id <= AV_CHAN_AMBISONIC_END)
+ av_bprintf(bp, "ambisonic ACN %d", channel_id - AV_CHAN_AMBISONIC_BASE);
+ else if ((unsigned)channel_id < FF_ARRAY_ELEMS(channel_names) &&
+ channel_names[channel_id].description)
+ av_bprintf(bp, "%s", channel_names[channel_id].description);
+ else if (channel_id == AV_CHAN_NONE)
+ av_bprintf(bp, "none");
+ else
+ av_bprintf(bp, "user %d", channel_id);
+}
+
+int av_channel_description(char *buf, size_t buf_size, enum AVChannel channel_id)
+{
+ AVBPrint bp;
+
+ if (!buf && buf_size)
+ return AVERROR(EINVAL);
+
+ av_bprint_init_for_buffer(&bp, buf, buf_size);
+ av_channel_description_bprint(&bp, channel_id);
+
+ return bp.len;
+}
+
+enum AVChannel av_channel_from_string(const char *str)
+{
+ int i;
+ char *endptr = (char *)str;
+ enum AVChannel id = AV_CHAN_NONE;
+
+ if (!strncmp(str, "AMBI", 4)) {
+ i = strtol(str + 4, NULL, 0);
+ if (i < 0 || i > AV_CHAN_AMBISONIC_END - AV_CHAN_AMBISONIC_BASE)
+ return AV_CHAN_NONE;
+ return AV_CHAN_AMBISONIC_BASE + i;
+ }
+
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++) {
+ if (channel_names[i].name && !strcmp(str, channel_names[i].name))
+ return i;
+ }
+ if (!strncmp(str, "USR", 3)) {
+ const char *p = str + 3;
+ id = strtol(p, &endptr, 0);
+ }
+ if (id >= 0 && !*endptr)
+ return id;
+
+ return AV_CHAN_NONE;
+}
+
+struct channel_layout_name {
+ const char *name;
+ AVChannelLayout layout;
+};
+
+static const struct channel_layout_name channel_layout_map[] = {
+ { "mono", AV_CHANNEL_LAYOUT_MONO },
+ { "stereo", AV_CHANNEL_LAYOUT_STEREO },
+ { "2.1", AV_CHANNEL_LAYOUT_2POINT1 },
+ { "3.0", AV_CHANNEL_LAYOUT_SURROUND },
+ { "3.0(back)", AV_CHANNEL_LAYOUT_2_1 },
+ { "4.0", AV_CHANNEL_LAYOUT_4POINT0 },
+ { "quad", AV_CHANNEL_LAYOUT_QUAD },
+ { "quad(side)", AV_CHANNEL_LAYOUT_2_2 },
+ { "3.1", AV_CHANNEL_LAYOUT_3POINT1 },
+ { "5.0", AV_CHANNEL_LAYOUT_5POINT0_BACK },
+ { "5.0(side)", AV_CHANNEL_LAYOUT_5POINT0 },
+ { "4.1", AV_CHANNEL_LAYOUT_4POINT1 },
+ { "5.1", AV_CHANNEL_LAYOUT_5POINT1_BACK },
+ { "5.1(side)", AV_CHANNEL_LAYOUT_5POINT1 },
+ { "6.0", AV_CHANNEL_LAYOUT_6POINT0 },
+ { "6.0(front)", AV_CHANNEL_LAYOUT_6POINT0_FRONT },
+ { "hexagonal", AV_CHANNEL_LAYOUT_HEXAGONAL },
+ { "6.1", AV_CHANNEL_LAYOUT_6POINT1 },
+ { "6.1(back)", AV_CHANNEL_LAYOUT_6POINT1_BACK },
+ { "6.1(front)", AV_CHANNEL_LAYOUT_6POINT1_FRONT },
+ { "7.0", AV_CHANNEL_LAYOUT_7POINT0 },
+ { "7.0(front)", AV_CHANNEL_LAYOUT_7POINT0_FRONT },
+ { "7.1", AV_CHANNEL_LAYOUT_7POINT1 },
+ { "7.1(wide)", AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK },
+ { "7.1(wide-side)", AV_CHANNEL_LAYOUT_7POINT1_WIDE },
+ { "7.1(top)", AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK },
+ { "octagonal", AV_CHANNEL_LAYOUT_OCTAGONAL },
+ { "cube", AV_CHANNEL_LAYOUT_CUBE },
+ { "hexadecagonal", AV_CHANNEL_LAYOUT_HEXADECAGONAL },
+ { "downmix", AV_CHANNEL_LAYOUT_STEREO_DOWNMIX, },
+ { "22.2", AV_CHANNEL_LAYOUT_22POINT2, },
+};
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+static uint64_t get_channel_layout_single(const char *name, int name_len)
+{
+ int i;
+ char *end;
+ int64_t layout;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++) {
+ if (strlen(channel_layout_map[i].name) == name_len &&
+ !memcmp(channel_layout_map[i].name, name, name_len))
+ return channel_layout_map[i].layout.u.mask;
+ }
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++)
+ if (channel_names[i].name &&
+ strlen(channel_names[i].name) == name_len &&
+ !memcmp(channel_names[i].name, name, name_len))
+ return (int64_t)1 << i;
+
+ errno = 0;
+ i = strtol(name, &end, 10);
+
+ if (!errno && (end + 1 - name == name_len && *end == 'c'))
+ return av_get_default_channel_layout(i);
+
+ errno = 0;
+ layout = strtoll(name, &end, 0);
+ if (!errno && end - name == name_len)
+ return FFMAX(layout, 0);
+ return 0;
+}
+
+uint64_t av_get_channel_layout(const char *name)
+{
+ const char *n, *e;
+ const char *name_end = name + strlen(name);
+ int64_t layout = 0, layout_single;
+
+ for (n = name; n < name_end; n = e + 1) {
+ for (e = n; e < name_end && *e != '+' && *e != '|'; e++);
+ layout_single = get_channel_layout_single(n, e - n);
+ if (!layout_single)
+ return 0;
+ layout |= layout_single;
+ }
+ return layout;
+}
+
+int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels)
+{
+ int nb = 0;
+ char *end;
+ uint64_t layout = av_get_channel_layout(name);
+
+ if (layout) {
+ *channel_layout = layout;
+ *nb_channels = av_get_channel_layout_nb_channels(layout);
+ return 0;
+ }
+
+ nb = strtol(name, &end, 10);
+ if (!errno && *end == 'C' && *(end + 1) == '\0' && nb > 0 && nb < 64) {
+ *channel_layout = 0;
+ *nb_channels = nb;
+ return 0;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+void av_bprint_channel_layout(struct AVBPrint *bp,
+ int nb_channels, uint64_t channel_layout)
+{
+ int i;
+
+ if (nb_channels <= 0)
+ nb_channels = av_get_channel_layout_nb_channels(channel_layout);
+
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++)
+ if (nb_channels == channel_layout_map[i].layout.nb_channels &&
+ channel_layout == channel_layout_map[i].layout.u.mask) {
+ av_bprintf(bp, "%s", channel_layout_map[i].name);
+ return;
+ }
+
+ av_bprintf(bp, "%d channels", nb_channels);
+ if (channel_layout) {
+ int i, ch;
+ av_bprintf(bp, " (");
+ for (i = 0, ch = 0; i < 64; i++) {
+ if ((channel_layout & (UINT64_C(1) << i))) {
+ const char *name = get_channel_name(i);
+ if (name) {
+ if (ch > 0)
+ av_bprintf(bp, "+");
+ av_bprintf(bp, "%s", name);
+ }
+ ch++;
+ }
+ }
+ av_bprintf(bp, ")");
+ }
+}
+
+void av_get_channel_layout_string(char *buf, int buf_size,
+ int nb_channels, uint64_t channel_layout)
+{
+ AVBPrint bp;
+
+ av_bprint_init_for_buffer(&bp, buf, buf_size);
+ av_bprint_channel_layout(&bp, nb_channels, channel_layout);
+}
+
+int av_get_channel_layout_nb_channels(uint64_t channel_layout)
+{
+ return av_popcount64(channel_layout);
+}
+
+int64_t av_get_default_channel_layout(int nb_channels) {
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++)
+ if (nb_channels == channel_layout_map[i].layout.nb_channels)
+ return channel_layout_map[i].layout.u.mask;
+ return 0;
+}
+
+int av_get_channel_layout_channel_index(uint64_t channel_layout,
+ uint64_t channel)
+{
+ if (!(channel_layout & channel) ||
+ av_get_channel_layout_nb_channels(channel) != 1)
+ return AVERROR(EINVAL);
+ channel_layout &= channel - 1;
+ return av_get_channel_layout_nb_channels(channel_layout);
+}
+
+const char *av_get_channel_name(uint64_t channel)
+{
+ int i;
+ if (av_get_channel_layout_nb_channels(channel) != 1)
+ return NULL;
+ for (i = 0; i < 64; i++)
+ if ((1ULL<<i) & channel)
+ return get_channel_name(i);
+ return NULL;
+}
+
+const char *av_get_channel_description(uint64_t channel)
+{
+ int i;
+ if (av_get_channel_layout_nb_channels(channel) != 1)
+ return NULL;
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++)
+ if ((1ULL<<i) & channel)
+ return channel_names[i].description;
+ return NULL;
+}
+
+uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
+{
+ int i;
+
+ if (av_get_channel_layout_nb_channels(channel_layout) <= index)
+ return 0;
+
+ for (i = 0; i < 64; i++) {
+ if ((1ULL << i) & channel_layout && !index--)
+ return 1ULL << i;
+ }
+ return 0;
+}
+
+int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
+ const char **name)
+{
+ if (index >= FF_ARRAY_ELEMS(channel_layout_map))
+ return AVERROR_EOF;
+ if (layout) *layout = channel_layout_map[index].layout.u.mask;
+ if (name) *name = channel_layout_map[index].name;
+ return 0;
+}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+int av_channel_layout_from_mask(AVChannelLayout *channel_layout,
+ uint64_t mask)
+{
+ if (!mask)
+ return AVERROR(EINVAL);
+
+ channel_layout->order = AV_CHANNEL_ORDER_NATIVE;
+ channel_layout->nb_channels = av_popcount64(mask);
+ channel_layout->u.mask = mask;
+
+ return 0;
+}
+
+int av_channel_layout_from_string(AVChannelLayout *channel_layout,
+ const char *str)
+{
+ int i;
+ int channels = 0, nb_channels = 0, native = 1;
+ enum AVChannel highest_channel = AV_CHAN_NONE;
+ const char *dup;
+ char *chlist, *end;
+ uint64_t mask = 0;
+
+ /* channel layout names */
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++) {
+ if (channel_layout_map[i].name && !strcmp(str, channel_layout_map[i].name)) {
+ *channel_layout = channel_layout_map[i].layout;
+ return 0;
+ }
+ }
+
+ /* ambisonic */
+ if (!strncmp(str, "ambisonic ", 10)) {
+ const char *p = str + 10;
+ char *endptr;
+ AVChannelLayout extra = {0};
+ int order;
+
+ order = strtol(p, &endptr, 0);
+ if (order < 0 || order + 1 > INT_MAX / (order + 1) ||
+ (*endptr && *endptr != '+'))
+ return AVERROR(EINVAL);
+
+ channel_layout->order = AV_CHANNEL_ORDER_AMBISONIC;
+ channel_layout->nb_channels = (order + 1) * (order + 1);
+
+ if (*endptr) {
+ int ret = av_channel_layout_from_string(&extra, endptr + 1);
+ if (ret < 0)
+ return ret;
+ if (extra.nb_channels >= INT_MAX - channel_layout->nb_channels) {
+ av_channel_layout_uninit(&extra);
+ return AVERROR(EINVAL);
+ }
+
+ if (extra.order == AV_CHANNEL_ORDER_NATIVE) {
+ channel_layout->u.mask = extra.u.mask;
+ } else {
+ channel_layout->order = AV_CHANNEL_ORDER_CUSTOM;
+ channel_layout->u.map =
+ av_calloc(channel_layout->nb_channels + extra.nb_channels,
+ sizeof(*channel_layout->u.map));
+ if (!channel_layout->u.map) {
+ av_channel_layout_uninit(&extra);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < channel_layout->nb_channels; i++)
+ channel_layout->u.map[i].id = AV_CHAN_AMBISONIC_BASE + i;
+ for (i = 0; i < extra.nb_channels; i++) {
+ enum AVChannel ch = av_channel_layout_channel_from_index(&extra, i);
+ if (CHAN_IS_AMBI(ch)) {
+ av_channel_layout_uninit(&extra);
+ return AVERROR(EINVAL);
+ }
+ channel_layout->u.map[channel_layout->nb_channels + i].id = ch;
+ if (extra.order == AV_CHANNEL_ORDER_CUSTOM &&
+ extra.u.map[i].name[0])
+ av_strlcpy(channel_layout->u.map[channel_layout->nb_channels + i].name,
+ extra.u.map[i].name,
+ sizeof(channel_layout->u.map[channel_layout->nb_channels + i].name));
+ }
+ }
+ channel_layout->nb_channels += extra.nb_channels;
+ av_channel_layout_uninit(&extra);
+ }
+
+ return 0;
+ }
+
+ chlist = av_strdup(str);
+ if (!chlist)
+ return AVERROR(ENOMEM);
+
+ /* channel names */
+ av_sscanf(str, "%d channels (%[^)]", &nb_channels, chlist);
+ end = strchr(str, ')');
+
+ dup = chlist;
+ while (*dup) {
+ char *channel, *chname;
+ int ret = av_opt_get_key_value(&dup, "@", "+", AV_OPT_FLAG_IMPLICIT_KEY, &channel, &chname);
+ if (ret < 0) {
+ av_free(chlist);
+ return ret;
+ }
+ if (*dup)
+ dup++; // skip separator
+ if (channel && !*channel)
+ av_freep(&channel);
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++) {
+ if (channel_names[i].name && !strcmp(channel ? channel : chname, channel_names[i].name)) {
+ if (channel || i < highest_channel || mask & (1ULL << i))
+ native = 0; // Not a native layout, use a custom one
+ highest_channel = i;
+ mask |= 1ULL << i;
+ break;
+ }
+ }
+
+ if (!channel && i >= FF_ARRAY_ELEMS(channel_names)) {
+ char *endptr = chname;
+ enum AVChannel id = AV_CHAN_NONE;
+
+ if (!strncmp(chname, "USR", 3)) {
+ const char *p = chname + 3;
+ id = strtol(p, &endptr, 0);
+ }
+ if (id < 0 || *endptr) {
+ native = 0; // Unknown channel name
+ channels = 0;
+ mask = 0;
+ av_free(chname);
+ break;
+ }
+ if (id > 63)
+ native = 0; // Not a native layout, use a custom one
+ else {
+ if (id < highest_channel || mask & (1ULL << id))
+ native = 0; // Not a native layout, use a custom one
+ highest_channel = id;
+ mask |= 1ULL << id;
+ }
+ }
+ channels++;
+ av_free(channel);
+ av_free(chname);
+ }
+
+ if (mask && native) {
+ av_free(chlist);
+ if (nb_channels && ((nb_channels != channels) || (!end || *++end)))
+ return AVERROR(EINVAL);
+ av_channel_layout_from_mask(channel_layout, mask);
+ return 0;
+ }
+
+ /* custom layout of channel names */
+ if (channels && !native) {
+ int idx = 0;
+
+ if (nb_channels && ((nb_channels != channels) || (!end || *++end))) {
+ av_free(chlist);
+ return AVERROR(EINVAL);
+ }
+
+ channel_layout->u.map = av_calloc(channels, sizeof(*channel_layout->u.map));
+ if (!channel_layout->u.map) {
+ av_free(chlist);
+ return AVERROR(ENOMEM);
+ }
+
+ channel_layout->order = AV_CHANNEL_ORDER_CUSTOM;
+ channel_layout->nb_channels = channels;
+
+ dup = chlist;
+ while (*dup) {
+ char *channel, *chname;
+ int ret = av_opt_get_key_value(&dup, "@", "+", AV_OPT_FLAG_IMPLICIT_KEY, &channel, &chname);
+ if (ret < 0) {
+ av_freep(&channel_layout->u.map);
+ av_free(chlist);
+ return ret;
+ }
+ if (*dup)
+ dup++; // skip separator
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++) {
+ if (channel_names[i].name && !strcmp(channel ? channel : chname, channel_names[i].name)) {
+ channel_layout->u.map[idx].id = i;
+ if (channel)
+ av_strlcpy(channel_layout->u.map[idx].name, chname, sizeof(channel_layout->u.map[idx].name));
+ idx++;
+ break;
+ }
+ }
+ if (i >= FF_ARRAY_ELEMS(channel_names)) {
+ const char *p = (channel ? channel : chname) + 3;
+ channel_layout->u.map[idx].id = strtol(p, NULL, 0);
+ if (channel)
+ av_strlcpy(channel_layout->u.map[idx].name, chname, sizeof(channel_layout->u.map[idx].name));
+ idx++;
+ }
+ av_free(channel);
+ av_free(chname);
+ }
+ av_free(chlist);
+
+ return 0;
+ }
+ av_freep(&chlist);
+
+ errno = 0;
+ mask = strtoull(str, &end, 0);
+
+ /* channel layout mask */
+ if (!errno && !*end && !strchr(str, '-') && mask) {
+ av_channel_layout_from_mask(channel_layout, mask);
+ return 0;
+ }
+
+ errno = 0;
+ channels = strtol(str, &end, 10);
+
+ /* number of channels */
+ if (!errno && !strcmp(end, "c") && channels > 0) {
+ av_channel_layout_default(channel_layout, channels);
+ if (channel_layout->order == AV_CHANNEL_ORDER_NATIVE)
+ return 0;
+ }
+
+ /* number of unordered channels */
+ if (!errno && (!strcmp(end, "C") || !strcmp(end, " channels"))
+ && channels > 0) {
+ channel_layout->order = AV_CHANNEL_ORDER_UNSPEC;
+ channel_layout->nb_channels = channels;
+ return 0;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+void av_channel_layout_uninit(AVChannelLayout *channel_layout)
+{
+ if (channel_layout->order == AV_CHANNEL_ORDER_CUSTOM)
+ av_freep(&channel_layout->u.map);
+ memset(channel_layout, 0, sizeof(*channel_layout));
+}
+
+int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
+{
+ av_channel_layout_uninit(dst);
+ *dst = *src;
+ if (src->order == AV_CHANNEL_ORDER_CUSTOM) {
+ dst->u.map = av_malloc_array(src->nb_channels, sizeof(*dst->u.map));
+ if (!dst->u.map)
+ return AVERROR(ENOMEM);
+ memcpy(dst->u.map, src->u.map, src->nb_channels * sizeof(*src->u.map));
+ }
+ return 0;
+}
+
+/**
+ * If the layout is n-th order standard-order ambisonic, with optional
+ * extra non-diegetic channels at the end, return the order.
+ * Return a negative error code otherwise.
+ */
+static int ambisonic_order(const AVChannelLayout *channel_layout)
+{
+ int i, highest_ambi, order;
+
+ highest_ambi = -1;
+ if (channel_layout->order == AV_CHANNEL_ORDER_AMBISONIC)
+ highest_ambi = channel_layout->nb_channels - av_popcount64(channel_layout->u.mask) - 1;
+ else {
+ const AVChannelCustom *map = channel_layout->u.map;
+ av_assert0(channel_layout->order == AV_CHANNEL_ORDER_CUSTOM);
+
+ for (i = 0; i < channel_layout->nb_channels; i++) {
+ int is_ambi = CHAN_IS_AMBI(map[i].id);
+
+ /* ambisonic following non-ambisonic */
+ if (i > 0 && is_ambi && !CHAN_IS_AMBI(map[i - 1].id))
+ return AVERROR(EINVAL);
+
+ /* non-default ordering */
+ if (is_ambi && map[i].id - AV_CHAN_AMBISONIC_BASE != i)
+ return AVERROR(EINVAL);
+
+ if (CHAN_IS_AMBI(map[i].id))
+ highest_ambi = i;
+ }
+ }
+ /* no ambisonic channels*/
+ if (highest_ambi < 0)
+ return AVERROR(EINVAL);
+
+ order = floor(sqrt(highest_ambi));
+ /* incomplete order - some harmonics are missing */
+ if ((order + 1) * (order + 1) != highest_ambi + 1)
+ return AVERROR(EINVAL);
+
+ return order;
+}
+
+/**
+ * If the custom layout is n-th order standard-order ambisonic, with optional
+ * extra non-diegetic channels at the end, write its string description in bp.
+ * Return a negative error code otherwise.
+ */
+static int try_describe_ambisonic(AVBPrint *bp, const AVChannelLayout *channel_layout)
+{
+ int nb_ambi_channels;
+ int order = ambisonic_order(channel_layout);
+ if (order < 0)
+ return order;
+
+ av_bprintf(bp, "ambisonic %d", order);
+
+ /* extra channels present */
+ nb_ambi_channels = (order + 1) * (order + 1);
+ if (nb_ambi_channels < channel_layout->nb_channels) {
+ AVChannelLayout extra = { 0 };
+
+ if (channel_layout->order == AV_CHANNEL_ORDER_AMBISONIC) {
+ extra.order = AV_CHANNEL_ORDER_NATIVE;
+ extra.nb_channels = av_popcount64(channel_layout->u.mask);
+ extra.u.mask = channel_layout->u.mask;
+ } else {
+ extra.order = AV_CHANNEL_ORDER_CUSTOM;
+ extra.nb_channels = channel_layout->nb_channels - nb_ambi_channels;
+ extra.u.map = channel_layout->u.map + nb_ambi_channels;
+ }
+
+ av_bprint_chars(bp, '+', 1);
+ av_channel_layout_describe_bprint(&extra, bp);
+ /* Not calling uninit here on extra because we don't own the u.map pointer */
+ }
+
+ return 0;
+}
+
+int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout,
+ AVBPrint *bp)
+{
+ int i;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_NATIVE:
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++)
+ if (channel_layout->u.mask == channel_layout_map[i].layout.u.mask) {
+ av_bprintf(bp, "%s", channel_layout_map[i].name);
+ return 0;
+ }
+ // fall-through
+ case AV_CHANNEL_ORDER_CUSTOM:
+ if (channel_layout->order == AV_CHANNEL_ORDER_CUSTOM) {
+ int res = try_describe_ambisonic(bp, channel_layout);
+ if (res >= 0)
+ return 0;
+ }
+ if (channel_layout->nb_channels)
+ av_bprintf(bp, "%d channels (", channel_layout->nb_channels);
+ for (i = 0; i < channel_layout->nb_channels; i++) {
+ enum AVChannel ch = av_channel_layout_channel_from_index(channel_layout, i);
+
+ if (i)
+ av_bprintf(bp, "+");
+ av_channel_name_bprint(bp, ch);
+ if (channel_layout->order == AV_CHANNEL_ORDER_CUSTOM &&
+ channel_layout->u.map[i].name[0])
+ av_bprintf(bp, "@%s", channel_layout->u.map[i].name);
+ }
+ if (channel_layout->nb_channels) {
+ av_bprintf(bp, ")");
+ return 0;
+ }
+ // fall-through
+ case AV_CHANNEL_ORDER_UNSPEC:
+ av_bprintf(bp, "%d channels", channel_layout->nb_channels);
+ return 0;
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ return try_describe_ambisonic(bp, channel_layout);
+ default:
+ return AVERROR(EINVAL);
+ }
+}
+
+int av_channel_layout_describe(const AVChannelLayout *channel_layout,
+ char *buf, size_t buf_size)
+{
+ AVBPrint bp;
+ int ret;
+
+ if (!buf && buf_size)
+ return AVERROR(EINVAL);
+
+ av_bprint_init_for_buffer(&bp, buf, buf_size);
+ ret = av_channel_layout_describe_bprint(channel_layout, &bp);
+ if (ret < 0)
+ return ret;
+
+ return bp.len;
+}
+
+enum AVChannel
+av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout,
+ unsigned int idx)
+{
+ int i;
+
+ if (idx >= channel_layout->nb_channels)
+ return AV_CHAN_NONE;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_CUSTOM:
+ return channel_layout->u.map[idx].id;
+ case AV_CHANNEL_ORDER_AMBISONIC: {
+ int ambi_channels = channel_layout->nb_channels - av_popcount64(channel_layout->u.mask);
+ if (idx < ambi_channels)
+ return AV_CHAN_AMBISONIC_BASE + idx;
+ idx -= ambi_channels;
+ }
+ // fall-through
+ case AV_CHANNEL_ORDER_NATIVE:
+ for (i = 0; i < 64; i++) {
+ if ((1ULL << i) & channel_layout->u.mask && !idx--)
+ return i;
+ }
+ default:
+ return AV_CHAN_NONE;
+ }
+}
+
+enum AVChannel
+av_channel_layout_channel_from_string(const AVChannelLayout *channel_layout,
+ const char *str)
+{
+ int index = av_channel_layout_index_from_string(channel_layout, str);
+
+ if (index < 0)
+ return AV_CHAN_NONE;
+
+ return av_channel_layout_channel_from_index(channel_layout, index);
+}
+
+int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout,
+ enum AVChannel channel)
+{
+ int i;
+
+ if (channel == AV_CHAN_NONE)
+ return AVERROR(EINVAL);
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_CUSTOM:
+ for (i = 0; i < channel_layout->nb_channels; i++)
+ if (channel_layout->u.map[i].id == channel)
+ return i;
+ return AVERROR(EINVAL);
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ case AV_CHANNEL_ORDER_NATIVE: {
+ uint64_t mask = channel_layout->u.mask;
+ int ambi_channels = channel_layout->nb_channels - av_popcount64(mask);
+ if (channel_layout->order == AV_CHANNEL_ORDER_AMBISONIC &&
+ channel >= AV_CHAN_AMBISONIC_BASE) {
+ if (channel - AV_CHAN_AMBISONIC_BASE >= ambi_channels)
+ return AVERROR(EINVAL);
+ return channel - AV_CHAN_AMBISONIC_BASE;
+ }
+ if ((unsigned)channel > 63 || !(mask & (1ULL << channel)))
+ return AVERROR(EINVAL);
+ mask &= (1ULL << channel) - 1;
+ return av_popcount64(mask) + ambi_channels;
+ }
+ default:
+ return AVERROR(EINVAL);
+ }
+}
+
+int av_channel_layout_index_from_string(const AVChannelLayout *channel_layout,
+ const char *str)
+{
+ char *chname;
+ enum AVChannel ch = AV_CHAN_NONE;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_CUSTOM:
+ chname = strstr(str, "@");
+ if (chname) {
+ char buf[16];
+ chname++;
+ av_strlcpy(buf, str, FFMIN(sizeof(buf), chname - str));
+ if (!*chname)
+ chname = NULL;
+ ch = av_channel_from_string(buf);
+ if (ch == AV_CHAN_NONE && *buf)
+ return AVERROR(EINVAL);
+ }
+ for (int i = 0; chname && i < channel_layout->nb_channels; i++) {
+ if (!strcmp(chname, channel_layout->u.map[i].name) &&
+ (ch == AV_CHAN_NONE || ch == channel_layout->u.map[i].id))
+ return i;
+ }
+ // fall-through
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ case AV_CHANNEL_ORDER_NATIVE:
+ ch = av_channel_from_string(str);
+ if (ch == AV_CHAN_NONE)
+ return AVERROR(EINVAL);
+ return av_channel_layout_index_from_channel(channel_layout, ch);
+ }
+
+ return AVERROR(EINVAL);
+}
+
+int av_channel_layout_check(const AVChannelLayout *channel_layout)
+{
+ if (channel_layout->nb_channels <= 0)
+ return 0;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_NATIVE:
+ return av_popcount64(channel_layout->u.mask) == channel_layout->nb_channels;
+ case AV_CHANNEL_ORDER_CUSTOM:
+ if (!channel_layout->u.map)
+ return 0;
+ for (int i = 0; i < channel_layout->nb_channels; i++) {
+ if (channel_layout->u.map[i].id == AV_CHAN_NONE)
+ return 0;
+ }
+ return 1;
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ /* If non-diegetic channels are present, ensure they are taken into account */
+ return av_popcount64(channel_layout->u.mask) < channel_layout->nb_channels;
+ case AV_CHANNEL_ORDER_UNSPEC:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
+{
+ int i;
+
+ /* different channel counts -> not equal */
+ if (chl->nb_channels != chl1->nb_channels)
+ return 1;
+
+ /* if only one is unspecified -> not equal */
+ if ((chl->order == AV_CHANNEL_ORDER_UNSPEC) !=
+ (chl1->order == AV_CHANNEL_ORDER_UNSPEC))
+ return 1;
+ /* both are unspecified -> equal */
+ else if (chl->order == AV_CHANNEL_ORDER_UNSPEC)
+ return 0;
+
+ /* can compare masks directly */
+ if ((chl->order == AV_CHANNEL_ORDER_NATIVE ||
+ chl->order == AV_CHANNEL_ORDER_AMBISONIC) &&
+ chl->order == chl1->order)
+ return chl->u.mask != chl1->u.mask;
+
+ /* compare channel by channel */
+ for (i = 0; i < chl->nb_channels; i++)
+ if (av_channel_layout_channel_from_index(chl, i) !=
+ av_channel_layout_channel_from_index(chl1, i))
+ return 1;
+ return 0;
+}
+
+void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++)
+ if (nb_channels == channel_layout_map[i].layout.nb_channels) {
+ *ch_layout = channel_layout_map[i].layout;
+ return;
+ }
+
+ ch_layout->order = AV_CHANNEL_ORDER_UNSPEC;
+ ch_layout->nb_channels = nb_channels;
+}
+
+const AVChannelLayout *av_channel_layout_standard(void **opaque)
+{
+ uintptr_t i = (uintptr_t)*opaque;
+ const AVChannelLayout *ch_layout = NULL;
+
+ if (i < FF_ARRAY_ELEMS(channel_layout_map)) {
+ ch_layout = &channel_layout_map[i].layout;
+ *opaque = (void*)(i + 1);
+ }
+
+ return ch_layout;
+}
+
+uint64_t av_channel_layout_subset(const AVChannelLayout *channel_layout,
+ uint64_t mask)
+{
+ uint64_t ret = 0;
+ int i;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_NATIVE:
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ return channel_layout->u.mask & mask;
+ case AV_CHANNEL_ORDER_CUSTOM:
+ for (i = 0; i < 64; i++)
+ if (mask & (1ULL << i) && av_channel_layout_index_from_channel(channel_layout, i) >= 0)
+ ret |= (1ULL << i);
+ break;
+ }
+
+ return ret;
+}
diff --git a/media/ffvpx/libavutil/channel_layout.h b/media/ffvpx/libavutil/channel_layout.h
new file mode 100644
index 0000000000..f345415c55
--- /dev/null
+++ b/media/ffvpx/libavutil/channel_layout.h
@@ -0,0 +1,783 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CHANNEL_LAYOUT_H
+#define AVUTIL_CHANNEL_LAYOUT_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "version.h"
+#include "attributes.h"
+
+/**
+ * @file
+ * @ingroup lavu_audio_channels
+ * Public libavutil channel layout APIs header.
+ */
+
+
+/**
+ * @defgroup lavu_audio_channels Audio channels
+ * @ingroup lavu_audio
+ *
+ * Audio channel layout utility functions
+ *
+ * @{
+ */
+
+enum AVChannel {
+ ///< Invalid channel index
+ AV_CHAN_NONE = -1,
+ AV_CHAN_FRONT_LEFT,
+ AV_CHAN_FRONT_RIGHT,
+ AV_CHAN_FRONT_CENTER,
+ AV_CHAN_LOW_FREQUENCY,
+ AV_CHAN_BACK_LEFT,
+ AV_CHAN_BACK_RIGHT,
+ AV_CHAN_FRONT_LEFT_OF_CENTER,
+ AV_CHAN_FRONT_RIGHT_OF_CENTER,
+ AV_CHAN_BACK_CENTER,
+ AV_CHAN_SIDE_LEFT,
+ AV_CHAN_SIDE_RIGHT,
+ AV_CHAN_TOP_CENTER,
+ AV_CHAN_TOP_FRONT_LEFT,
+ AV_CHAN_TOP_FRONT_CENTER,
+ AV_CHAN_TOP_FRONT_RIGHT,
+ AV_CHAN_TOP_BACK_LEFT,
+ AV_CHAN_TOP_BACK_CENTER,
+ AV_CHAN_TOP_BACK_RIGHT,
+ /** Stereo downmix. */
+ AV_CHAN_STEREO_LEFT = 29,
+ /** See above. */
+ AV_CHAN_STEREO_RIGHT,
+ AV_CHAN_WIDE_LEFT,
+ AV_CHAN_WIDE_RIGHT,
+ AV_CHAN_SURROUND_DIRECT_LEFT,
+ AV_CHAN_SURROUND_DIRECT_RIGHT,
+ AV_CHAN_LOW_FREQUENCY_2,
+ AV_CHAN_TOP_SIDE_LEFT,
+ AV_CHAN_TOP_SIDE_RIGHT,
+ AV_CHAN_BOTTOM_FRONT_CENTER,
+ AV_CHAN_BOTTOM_FRONT_LEFT,
+ AV_CHAN_BOTTOM_FRONT_RIGHT,
+
+ /** Channel is empty can be safely skipped. */
+ AV_CHAN_UNUSED = 0x200,
+
+ /** Channel contains data, but its position is unknown. */
+ AV_CHAN_UNKNOWN = 0x300,
+
+ /**
+ * Range of channels between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END represent Ambisonic components using the ACN system.
+ *
+ * Given a channel id `<i>` between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END (inclusive), the ACN index of the channel `<n>` is
+ * `<n> = <i> - AV_CHAN_AMBISONIC_BASE`.
+ *
+ * @note these values are only used for AV_CHANNEL_ORDER_CUSTOM channel
+ * orderings, the AV_CHANNEL_ORDER_AMBISONIC ordering orders the channels
+ * implicitly by their position in the stream.
+ */
+ AV_CHAN_AMBISONIC_BASE = 0x400,
+ // leave space for 1024 ids, which correspond to maximum order-32 harmonics,
+ // which should be enough for the foreseeable use cases
+ AV_CHAN_AMBISONIC_END = 0x7ff,
+};
+
+enum AVChannelOrder {
+ /**
+ * Only the channel count is specified, without any further information
+ * about the channel order.
+ */
+ AV_CHANNEL_ORDER_UNSPEC,
+ /**
+ * The native channel order, i.e. the channels are in the same order in
+ * which they are defined in the AVChannel enum. This supports up to 63
+ * different channels.
+ */
+ AV_CHANNEL_ORDER_NATIVE,
+ /**
+ * The channel order does not correspond to any other predefined order and
+ * is stored as an explicit map. For example, this could be used to support
+ * layouts with 64 or more channels, or with empty/skipped (AV_CHAN_SILENCE)
+ * channels at arbitrary positions.
+ */
+ AV_CHANNEL_ORDER_CUSTOM,
+ /**
+ * The audio is represented as the decomposition of the sound field into
+ * spherical harmonics. Each channel corresponds to a single expansion
+ * component. Channels are ordered according to ACN (Ambisonic Channel
+ * Number).
+ *
+ * The channel with the index n in the stream contains the spherical
+ * harmonic of degree l and order m given by
+ * @code{.unparsed}
+ * l = floor(sqrt(n)),
+ * m = n - l * (l + 1).
+ * @endcode
+ *
+ * Conversely given a spherical harmonic of degree l and order m, the
+ * corresponding channel index n is given by
+ * @code{.unparsed}
+ * n = l * (l + 1) + m.
+ * @endcode
+ *
+ * Normalization is assumed to be SN3D (Schmidt Semi-Normalization)
+ * as defined in AmbiX format $ 2.1.
+ */
+ AV_CHANNEL_ORDER_AMBISONIC,
+};
+
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ *
+ * A channel layout is a 64-bits integer with a bit set for every channel.
+ * The number of bits set must be equal to the number of channels.
+ * The value 0 means that the channel layout is not known.
+ * @note this data structure is not powerful enough to handle channels
+ * combinations that have the same channel multiple times, such as
+ * dual-mono.
+ *
+ * @{
+ */
+#define AV_CH_FRONT_LEFT (1ULL << AV_CHAN_FRONT_LEFT )
+#define AV_CH_FRONT_RIGHT (1ULL << AV_CHAN_FRONT_RIGHT )
+#define AV_CH_FRONT_CENTER (1ULL << AV_CHAN_FRONT_CENTER )
+#define AV_CH_LOW_FREQUENCY (1ULL << AV_CHAN_LOW_FREQUENCY )
+#define AV_CH_BACK_LEFT (1ULL << AV_CHAN_BACK_LEFT )
+#define AV_CH_BACK_RIGHT (1ULL << AV_CHAN_BACK_RIGHT )
+#define AV_CH_FRONT_LEFT_OF_CENTER (1ULL << AV_CHAN_FRONT_LEFT_OF_CENTER )
+#define AV_CH_FRONT_RIGHT_OF_CENTER (1ULL << AV_CHAN_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_BACK_CENTER (1ULL << AV_CHAN_BACK_CENTER )
+#define AV_CH_SIDE_LEFT (1ULL << AV_CHAN_SIDE_LEFT )
+#define AV_CH_SIDE_RIGHT (1ULL << AV_CHAN_SIDE_RIGHT )
+#define AV_CH_TOP_CENTER (1ULL << AV_CHAN_TOP_CENTER )
+#define AV_CH_TOP_FRONT_LEFT (1ULL << AV_CHAN_TOP_FRONT_LEFT )
+#define AV_CH_TOP_FRONT_CENTER (1ULL << AV_CHAN_TOP_FRONT_CENTER )
+#define AV_CH_TOP_FRONT_RIGHT (1ULL << AV_CHAN_TOP_FRONT_RIGHT )
+#define AV_CH_TOP_BACK_LEFT (1ULL << AV_CHAN_TOP_BACK_LEFT )
+#define AV_CH_TOP_BACK_CENTER (1ULL << AV_CHAN_TOP_BACK_CENTER )
+#define AV_CH_TOP_BACK_RIGHT (1ULL << AV_CHAN_TOP_BACK_RIGHT )
+#define AV_CH_STEREO_LEFT (1ULL << AV_CHAN_STEREO_LEFT )
+#define AV_CH_STEREO_RIGHT (1ULL << AV_CHAN_STEREO_RIGHT )
+#define AV_CH_WIDE_LEFT (1ULL << AV_CHAN_WIDE_LEFT )
+#define AV_CH_WIDE_RIGHT (1ULL << AV_CHAN_WIDE_RIGHT )
+#define AV_CH_SURROUND_DIRECT_LEFT (1ULL << AV_CHAN_SURROUND_DIRECT_LEFT )
+#define AV_CH_SURROUND_DIRECT_RIGHT (1ULL << AV_CHAN_SURROUND_DIRECT_RIGHT)
+#define AV_CH_LOW_FREQUENCY_2 (1ULL << AV_CHAN_LOW_FREQUENCY_2 )
+#define AV_CH_TOP_SIDE_LEFT (1ULL << AV_CHAN_TOP_SIDE_LEFT )
+#define AV_CH_TOP_SIDE_RIGHT (1ULL << AV_CHAN_TOP_SIDE_RIGHT )
+#define AV_CH_BOTTOM_FRONT_CENTER (1ULL << AV_CHAN_BOTTOM_FRONT_CENTER )
+#define AV_CH_BOTTOM_FRONT_LEFT (1ULL << AV_CHAN_BOTTOM_FRONT_LEFT )
+#define AV_CH_BOTTOM_FRONT_RIGHT (1ULL << AV_CHAN_BOTTOM_FRONT_RIGHT )
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order.
+ @deprecated channel order is now indicated in a special field in
+ AVChannelLayout
+ */
+#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
+#endif
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel layouts
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_TOP_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_CUBE (AV_CH_LAYOUT_QUAD|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT)
+#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
+#define AV_CH_LAYOUT_22POINT2 (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_BACK_CENTER|AV_CH_LOW_FREQUENCY_2|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_CENTER|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_SIDE_LEFT|AV_CH_TOP_SIDE_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_BOTTOM_FRONT_CENTER|AV_CH_BOTTOM_FRONT_LEFT|AV_CH_BOTTOM_FRONT_RIGHT)
+
+enum AVMatrixEncoding {
+ AV_MATRIX_ENCODING_NONE,
+ AV_MATRIX_ENCODING_DOLBY,
+ AV_MATRIX_ENCODING_DPLII,
+ AV_MATRIX_ENCODING_DPLIIX,
+ AV_MATRIX_ENCODING_DPLIIZ,
+ AV_MATRIX_ENCODING_DOLBYEX,
+ AV_MATRIX_ENCODING_DOLBYHEADPHONE,
+ AV_MATRIX_ENCODING_NB
+};
+
+/**
+ * @}
+ */
+
+/**
+ * An AVChannelCustom defines a single channel within a custom order layout
+ *
+ * Unlike most structures in FFmpeg, sizeof(AVChannelCustom) is a part of the
+ * public ABI.
+ *
+ * No new fields may be added to it without a major version bump.
+ */
+typedef struct AVChannelCustom {
+ enum AVChannel id;
+ char name[16];
+ void *opaque;
+} AVChannelCustom;
+
+/**
+ * An AVChannelLayout holds information about the channel layout of audio data.
+ *
+ * A channel layout here is defined as a set of channels ordered in a specific
+ * way (unless the channel order is AV_CHANNEL_ORDER_UNSPEC, in which case an
+ * AVChannelLayout carries only the channel count).
+ * All orders may be treated as if they were AV_CHANNEL_ORDER_UNSPEC by
+ * ignoring everything but the channel count, as long as av_channel_layout_check()
+ * considers they are valid.
+ *
+ * Unlike most structures in FFmpeg, sizeof(AVChannelLayout) is a part of the
+ * public ABI and may be used by the caller. E.g. it may be allocated on stack
+ * or embedded in caller-defined structs.
+ *
+ * AVChannelLayout can be initialized as follows:
+ * - default initialization with {0}, followed by setting all used fields
+ * correctly;
+ * - by assigning one of the predefined AV_CHANNEL_LAYOUT_* initializers;
+ * - with a constructor function, such as av_channel_layout_default(),
+ * av_channel_layout_from_mask() or av_channel_layout_from_string().
+ *
+ * The channel layout must be unitialized with av_channel_layout_uninit()
+ *
+ * Copying an AVChannelLayout via assigning is forbidden,
+ * av_channel_layout_copy() must be used instead (and its return value should
+ * be checked)
+ *
+ * No new fields may be added to it without a major version bump, except for
+ * new elements of the union fitting in sizeof(uint64_t).
+ */
+typedef struct AVChannelLayout {
+ /**
+ * Channel order used in this layout.
+ * This is a mandatory field.
+ */
+ enum AVChannelOrder order;
+
+ /**
+ * Number of channels in this layout. Mandatory field.
+ */
+ int nb_channels;
+
+ /**
+ * Details about which channels are present in this layout.
+ * For AV_CHANNEL_ORDER_UNSPEC, this field is undefined and must not be
+ * used.
+ */
+ union {
+ /**
+ * This member must be used for AV_CHANNEL_ORDER_NATIVE, and may be used
+ * for AV_CHANNEL_ORDER_AMBISONIC to signal non-diegetic channels.
+ * It is a bitmask, where the position of each set bit means that the
+ * AVChannel with the corresponding value is present.
+ *
+ * I.e. when (mask & (1 << AV_CHAN_FOO)) is non-zero, then AV_CHAN_FOO
+ * is present in the layout. Otherwise it is not present.
+ *
+ * @note when a channel layout using a bitmask is constructed or
+ * modified manually (i.e. not using any of the av_channel_layout_*
+ * functions), the code doing it must ensure that the number of set bits
+ * is equal to nb_channels.
+ */
+ uint64_t mask;
+ /**
+ * This member must be used when the channel order is
+ * AV_CHANNEL_ORDER_CUSTOM. It is a nb_channels-sized array, with each
+ * element signalling the presence of the AVChannel with the
+ * corresponding value in map[i].id.
+ *
+ * I.e. when map[i].id is equal to AV_CHAN_FOO, then AV_CH_FOO is the
+ * i-th channel in the audio data.
+ *
+ * When map[i].id is in the range between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END (inclusive), the channel contains an ambisonic
+ * component with ACN index (as defined above)
+ * n = map[i].id - AV_CHAN_AMBISONIC_BASE.
+ *
+ * map[i].name may be filled with a 0-terminated string, in which case
+ * it will be used for the purpose of identifying the channel with the
+ * convenience functions below. Otherise it must be zeroed.
+ */
+ AVChannelCustom *map;
+ } u;
+
+ /**
+ * For some private data of the user.
+ */
+ void *opaque;
+} AVChannelLayout;
+
+#define AV_CHANNEL_LAYOUT_MASK(nb, m) \
+ { .order = AV_CHANNEL_ORDER_NATIVE, .nb_channels = (nb), .u = { .mask = (m) }}
+
+/**
+ * @name Common pre-defined channel layouts
+ * @{
+ */
+#define AV_CHANNEL_LAYOUT_MONO AV_CHANNEL_LAYOUT_MASK(1, AV_CH_LAYOUT_MONO)
+#define AV_CHANNEL_LAYOUT_STEREO AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO)
+#define AV_CHANNEL_LAYOUT_2POINT1 AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2POINT1)
+#define AV_CHANNEL_LAYOUT_2_1 AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2_1)
+#define AV_CHANNEL_LAYOUT_SURROUND AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_SURROUND)
+#define AV_CHANNEL_LAYOUT_3POINT1 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_3POINT1)
+#define AV_CHANNEL_LAYOUT_4POINT0 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_4POINT0)
+#define AV_CHANNEL_LAYOUT_4POINT1 AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_4POINT1)
+#define AV_CHANNEL_LAYOUT_2_2 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_2_2)
+#define AV_CHANNEL_LAYOUT_QUAD AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_QUAD)
+#define AV_CHANNEL_LAYOUT_5POINT0 AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0)
+#define AV_CHANNEL_LAYOUT_5POINT1 AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1)
+#define AV_CHANNEL_LAYOUT_5POINT0_BACK AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0_BACK)
+#define AV_CHANNEL_LAYOUT_5POINT1_BACK AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1_BACK)
+#define AV_CHANNEL_LAYOUT_6POINT0 AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0)
+#define AV_CHANNEL_LAYOUT_6POINT0_FRONT AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0_FRONT)
+#define AV_CHANNEL_LAYOUT_HEXAGONAL AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_HEXAGONAL)
+#define AV_CHANNEL_LAYOUT_6POINT1 AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1)
+#define AV_CHANNEL_LAYOUT_6POINT1_BACK AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_BACK)
+#define AV_CHANNEL_LAYOUT_6POINT1_FRONT AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_FRONT)
+#define AV_CHANNEL_LAYOUT_7POINT0 AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0)
+#define AV_CHANNEL_LAYOUT_7POINT0_FRONT AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0_FRONT)
+#define AV_CHANNEL_LAYOUT_7POINT1 AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1)
+#define AV_CHANNEL_LAYOUT_7POINT1_WIDE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE)
+#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE_BACK)
+#define AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_TOP_BACK)
+#define AV_CHANNEL_LAYOUT_OCTAGONAL AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_OCTAGONAL)
+#define AV_CHANNEL_LAYOUT_CUBE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_CUBE)
+#define AV_CHANNEL_LAYOUT_HEXADECAGONAL AV_CHANNEL_LAYOUT_MASK(16, AV_CH_LAYOUT_HEXADECAGONAL)
+#define AV_CHANNEL_LAYOUT_STEREO_DOWNMIX AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO_DOWNMIX)
+#define AV_CHANNEL_LAYOUT_22POINT2 AV_CHANNEL_LAYOUT_MASK(24, AV_CH_LAYOUT_22POINT2)
+#define AV_CHANNEL_LAYOUT_AMBISONIC_FIRST_ORDER \
+ { .order = AV_CHANNEL_ORDER_AMBISONIC, .nb_channels = 4, .u = { .mask = 0 }}
+/** @} */
+
+struct AVBPrint;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+/**
+ * @name Deprecated Functions
+ * @{
+ */
+
+/**
+ * Return a channel layout id that matches name, or 0 if no match is found.
+ *
+ * name can be one or several of the following notations,
+ * separated by '+' or '|':
+ * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,
+ * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);
+ * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,
+ * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);
+ * - a number of channels, in decimal, followed by 'c', yielding
+ * the default channel layout for that number of channels (@see
+ * av_get_default_channel_layout);
+ * - a channel layout mask, in hexadecimal starting with "0x" (see the
+ * AV_CH_* macros).
+ *
+ * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7"
+ *
+ * @deprecated use av_channel_layout_from_string()
+ */
+attribute_deprecated
+uint64_t av_get_channel_layout(const char *name);
+
+/**
+ * Return a channel layout and the number of channels based on the specified name.
+ *
+ * This function is similar to (@see av_get_channel_layout), but can also parse
+ * unknown channel layout specifications.
+ *
+ * @param[in] name channel layout specification string
+ * @param[out] channel_layout parsed channel layout (0 if unknown)
+ * @param[out] nb_channels number of channels
+ *
+ * @return 0 on success, AVERROR(EINVAL) if the parsing fails.
+ * @deprecated use av_channel_layout_from_string()
+ */
+attribute_deprecated
+int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels);
+
+/**
+ * Return a description of a channel layout.
+ * If nb_channels is <= 0, it is guessed from the channel_layout.
+ *
+ * @param buf put here the string containing the channel layout
+ * @param buf_size size in bytes of the buffer
+ * @param nb_channels number of channels
+ * @param channel_layout channel layout bitset
+ * @deprecated use av_channel_layout_describe()
+ */
+attribute_deprecated
+void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
+
+/**
+ * Append a description of a channel layout to a bprint buffer.
+ * @deprecated use av_channel_layout_describe()
+ */
+attribute_deprecated
+void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);
+
+/**
+ * Return the number of channels in the channel layout.
+ * @deprecated use AVChannelLayout.nb_channels
+ */
+attribute_deprecated
+int av_get_channel_layout_nb_channels(uint64_t channel_layout);
+
+/**
+ * Return default channel layout for a given number of channels.
+ *
+ * @deprecated use av_channel_layout_default()
+ */
+attribute_deprecated
+int64_t av_get_default_channel_layout(int nb_channels);
+
+/**
+ * Get the index of a channel in channel_layout.
+ *
+ * @param channel_layout channel layout bitset
+ * @param channel a channel layout describing exactly one channel which must be
+ * present in channel_layout.
+ *
+ * @return index of channel in channel_layout on success, a negative AVERROR
+ * on error.
+ *
+ * @deprecated use av_channel_layout_index_from_channel()
+ */
+attribute_deprecated
+int av_get_channel_layout_channel_index(uint64_t channel_layout,
+ uint64_t channel);
+
+/**
+ * Get the channel with the given index in channel_layout.
+ * @deprecated use av_channel_layout_channel_from_index()
+ */
+attribute_deprecated
+uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
+
+/**
+ * Get the name of a given channel.
+ *
+ * @return channel name on success, NULL on error.
+ *
+ * @deprecated use av_channel_name()
+ */
+attribute_deprecated
+const char *av_get_channel_name(uint64_t channel);
+
+/**
+ * Get the description of a given channel.
+ *
+ * @param channel a channel layout with a single channel
+ * @return channel description on success, NULL on error
+ * @deprecated use av_channel_description()
+ */
+attribute_deprecated
+const char *av_get_channel_description(uint64_t channel);
+
+/**
+ * Get the value and name of a standard channel layout.
+ *
+ * @param[in] index index in an internal list, starting at 0
+ * @param[out] layout channel layout mask
+ * @param[out] name name of the layout
+ * @return 0 if the layout exists,
+ * <0 if index is beyond the limits
+ * @deprecated use av_channel_layout_standard()
+ */
+attribute_deprecated
+int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
+ const char **name);
+/**
+ * @}
+ */
+#endif
+
+/**
+ * Get a human readable string in an abbreviated form describing a given channel.
+ * This is the inverse function of @ref av_channel_from_string().
+ *
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @param channel the AVChannel whose name to get
+ * @return amount of bytes needed to hold the output string, or a negative AVERROR
+ * on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_name(char *buf, size_t buf_size, enum AVChannel channel);
+
+/**
+ * bprint variant of av_channel_name().
+ *
+ * @note the string will be appended to the bprint buffer.
+ */
+void av_channel_name_bprint(struct AVBPrint *bp, enum AVChannel channel_id);
+
+/**
+ * Get a human readable string describing a given channel.
+ *
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @param channel the AVChannel whose description to get
+ * @return amount of bytes needed to hold the output string, or a negative AVERROR
+ * on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_description(char *buf, size_t buf_size, enum AVChannel channel);
+
+/**
+ * bprint variant of av_channel_description().
+ *
+ * @note the string will be appended to the bprint buffer.
+ */
+void av_channel_description_bprint(struct AVBPrint *bp, enum AVChannel channel_id);
+
+/**
+ * This is the inverse function of @ref av_channel_name().
+ *
+ * @return the channel with the given name
+ * AV_CHAN_NONE when name does not identify a known channel
+ */
+enum AVChannel av_channel_from_string(const char *name);
+
+/**
+ * Initialize a native channel layout from a bitmask indicating which channels
+ * are present.
+ *
+ * @param channel_layout the layout structure to be initialized
+ * @param mask bitmask describing the channel layout
+ *
+ * @return 0 on success
+ * AVERROR(EINVAL) for invalid mask values
+ */
+int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask);
+
+/**
+ * Initialize a channel layout from a given string description.
+ * The input string can be represented by:
+ * - the formal channel layout name (returned by av_channel_layout_describe())
+ * - single or multiple channel names (returned by av_channel_name(), eg. "FL",
+ * or concatenated with "+", each optionally containing a custom name after
+ * a "@", eg. "FL@Left+FR@Right+LFE")
+ * - a decimal or hexadecimal value of a native channel layout (eg. "4" or "0x4")
+ * - the number of channels with default layout (eg. "4c")
+ * - the number of unordered channels (eg. "4C" or "4 channels")
+ * - the ambisonic order followed by optional non-diegetic channels (eg.
+ * "ambisonic 2+stereo")
+ *
+ * @param channel_layout input channel layout
+ * @param str string describing the channel layout
+ * @return 0 channel layout was detected, AVERROR_INVALIDATATA otherwise
+ */
+int av_channel_layout_from_string(AVChannelLayout *channel_layout,
+ const char *str);
+
+/**
+ * Get the default channel layout for a given number of channels.
+ *
+ * @param ch_layout the layout structure to be initialized
+ * @param nb_channels number of channels
+ */
+void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels);
+
+/**
+ * Iterate over all standard channel layouts.
+ *
+ * @param opaque a pointer where libavutil will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the standard channel layout or NULL when the iteration is
+ * finished
+ */
+const AVChannelLayout *av_channel_layout_standard(void **opaque);
+
+/**
+ * Free any allocated data in the channel layout and reset the channel
+ * count to 0.
+ *
+ * @param channel_layout the layout structure to be uninitialized
+ */
+void av_channel_layout_uninit(AVChannelLayout *channel_layout);
+
+/**
+ * Make a copy of a channel layout. This differs from just assigning src to dst
+ * in that it allocates and copies the map for AV_CHANNEL_ORDER_CUSTOM.
+ *
+ * @note the destination channel_layout will be always uninitialized before copy.
+ *
+ * @param dst destination channel layout
+ * @param src source channel layout
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src);
+
+/**
+ * Get a human-readable string describing the channel layout properties.
+ * The string will be in the same format that is accepted by
+ * @ref av_channel_layout_from_string(), allowing to rebuild the same
+ * channel layout, except for opaque pointers.
+ *
+ * @param channel_layout channel layout to be described
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @return amount of bytes needed to hold the output string, or a negative AVERROR
+ * on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_layout_describe(const AVChannelLayout *channel_layout,
+ char *buf, size_t buf_size);
+
+/**
+ * bprint variant of av_channel_layout_describe().
+ *
+ * @note the string will be appended to the bprint buffer.
+ * @return 0 on success, or a negative AVERROR value on failure.
+ */
+int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout,
+ struct AVBPrint *bp);
+
+/**
+ * Get the channel with the given index in a channel layout.
+ *
+ * @param channel_layout input channel layout
+ * @param idx index of the channel
+ * @return channel with the index idx in channel_layout on success or
+ * AV_CHAN_NONE on failure (if idx is not valid or the channel order is
+ * unspecified)
+ */
+enum AVChannel
+av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx);
+
+/**
+ * Get the index of a given channel in a channel layout. In case multiple
+ * channels are found, only the first match will be returned.
+ *
+ * @param channel_layout input channel layout
+ * @param channel the channel whose index to obtain
+ * @return index of channel in channel_layout on success or a negative number if
+ * channel is not present in channel_layout.
+ */
+int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout,
+ enum AVChannel channel);
+
+/**
+ * Get the index in a channel layout of a channel described by the given string.
+ * In case multiple channels are found, only the first match will be returned.
+ *
+ * This function accepts channel names in the same format as
+ * @ref av_channel_from_string().
+ *
+ * @param channel_layout input channel layout
+ * @param name string describing the channel whose index to obtain
+ * @return a channel index described by the given string, or a negative AVERROR
+ * value.
+ */
+int av_channel_layout_index_from_string(const AVChannelLayout *channel_layout,
+ const char *name);
+
+/**
+ * Get a channel described by the given string.
+ *
+ * This function accepts channel names in the same format as
+ * @ref av_channel_from_string().
+ *
+ * @param channel_layout input channel layout
+ * @param name string describing the channel to obtain
+ * @return a channel described by the given string in channel_layout on success
+ * or AV_CHAN_NONE on failure (if the string is not valid or the channel
+ * order is unspecified)
+ */
+enum AVChannel
+av_channel_layout_channel_from_string(const AVChannelLayout *channel_layout,
+ const char *name);
+
+/**
+ * Find out what channels from a given set are present in a channel layout,
+ * without regard for their positions.
+ *
+ * @param channel_layout input channel layout
+ * @param mask a combination of AV_CH_* representing a set of channels
+ * @return a bitfield representing all the channels from mask that are present
+ * in channel_layout
+ */
+uint64_t av_channel_layout_subset(const AVChannelLayout *channel_layout,
+ uint64_t mask);
+
+/**
+ * Check whether a channel layout is valid, i.e. can possibly describe audio
+ * data.
+ *
+ * @param channel_layout input channel layout
+ * @return 1 if channel_layout is valid, 0 otherwise.
+ */
+int av_channel_layout_check(const AVChannelLayout *channel_layout);
+
+/**
+ * Check whether two channel layouts are semantically the same, i.e. the same
+ * channels are present on the same positions in both.
+ *
+ * If one of the channel layouts is AV_CHANNEL_ORDER_UNSPEC, while the other is
+ * not, they are considered to be unequal. If both are AV_CHANNEL_ORDER_UNSPEC,
+ * they are considered equal iff the channel counts are the same in both.
+ *
+ * @param chl input channel layout
+ * @param chl1 input channel layout
+ * @return 0 if chl and chl1 are equal, 1 if they are not equal. A negative
+ * AVERROR code if one or both are invalid.
+ */
+int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_CHANNEL_LAYOUT_H */
diff --git a/media/ffvpx/libavutil/color_utils.c b/media/ffvpx/libavutil/color_utils.c
new file mode 100644
index 0000000000..5e221fb798
--- /dev/null
+++ b/media/ffvpx/libavutil/color_utils.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2015 Kevin Wheatley <kevin.j.wheatley@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "libavutil/color_utils.h"
+#include "libavutil/pixfmt.h"
+
+double avpriv_get_gamma_from_trc(enum AVColorTransferCharacteristic trc)
+{
+ double gamma;
+ switch (trc) {
+ case AVCOL_TRC_BT709:
+ case AVCOL_TRC_SMPTE170M:
+ case AVCOL_TRC_SMPTE240M:
+ case AVCOL_TRC_BT1361_ECG:
+ case AVCOL_TRC_BT2020_10:
+ case AVCOL_TRC_BT2020_12:
+ /* these share a segmented TRC, but gamma 1.961 is a close
+ approximation, and also more correct for decoding content */
+ gamma = 1.961;
+ break;
+ case AVCOL_TRC_GAMMA22:
+ case AVCOL_TRC_IEC61966_2_1:
+ gamma = 2.2;
+ break;
+ case AVCOL_TRC_GAMMA28:
+ gamma = 2.8;
+ break;
+ case AVCOL_TRC_LINEAR:
+ gamma = 1.0;
+ break;
+ default:
+ gamma = 0.0; // Unknown value representation
+ }
+ return gamma;
+}
+
+#define BT709_alpha 1.099296826809442
+#define BT709_beta 0.018053968510807
+
+static double avpriv_trc_bt709(double Lc)
+{
+ const double a = BT709_alpha;
+ const double b = BT709_beta;
+
+ return (0.0 > Lc) ? 0.0
+ : ( b > Lc) ? 4.500 * Lc
+ : a * pow(Lc, 0.45) - (a - 1.0);
+}
+
+static double avpriv_trc_gamma22(double Lc)
+{
+ return (0.0 > Lc) ? 0.0 : pow(Lc, 1.0/ 2.2);
+}
+
+static double avpriv_trc_gamma28(double Lc)
+{
+ return (0.0 > Lc) ? 0.0 : pow(Lc, 1.0/ 2.8);
+}
+
+static double avpriv_trc_smpte240M(double Lc)
+{
+ const double a = 1.1115;
+ const double b = 0.0228;
+
+ return (0.0 > Lc) ? 0.0
+ : ( b > Lc) ? 4.000 * Lc
+ : a * pow(Lc, 0.45) - (a - 1.0);
+}
+
+static double avpriv_trc_linear(double Lc)
+{
+ return Lc;
+}
+
+static double avpriv_trc_log(double Lc)
+{
+ return (0.01 > Lc) ? 0.0 : 1.0 + log10(Lc) / 2.0;
+}
+
+static double avpriv_trc_log_sqrt(double Lc)
+{
+ // sqrt(10) / 1000
+ return (0.00316227766 > Lc) ? 0.0 : 1.0 + log10(Lc) / 2.5;
+}
+
+static double avpriv_trc_iec61966_2_4(double Lc)
+{
+ const double a = BT709_alpha;
+ const double b = BT709_beta;
+
+ return (-b >= Lc) ? -a * pow(-Lc, 0.45) + (a - 1.0)
+ : ( b > Lc) ? 4.500 * Lc
+ : a * pow( Lc, 0.45) - (a - 1.0);
+}
+
+static double avpriv_trc_bt1361(double Lc)
+{
+ const double a = BT709_alpha;
+ const double b = BT709_beta;
+
+ return (-0.0045 >= Lc) ? -(a * pow(-4.0 * Lc, 0.45) + (a - 1.0)) / 4.0
+ : ( b > Lc) ? 4.500 * Lc
+ : a * pow( Lc, 0.45) - (a - 1.0);
+}
+
+static double avpriv_trc_iec61966_2_1(double Lc)
+{
+ const double a = 1.055;
+ const double b = 0.0031308;
+
+ return (0.0 > Lc) ? 0.0
+ : ( b > Lc) ? 12.92 * Lc
+ : a * pow(Lc, 1.0 / 2.4) - (a - 1.0);
+}
+
+static double avpriv_trc_smpte_st2084(double Lc)
+{
+ const double c1 = 3424.0 / 4096.0; // c3-c2 + 1
+ const double c2 = 32.0 * 2413.0 / 4096.0;
+ const double c3 = 32.0 * 2392.0 / 4096.0;
+ const double m = 128.0 * 2523.0 / 4096.0;
+ const double n = 0.25 * 2610.0 / 4096.0;
+ const double L = Lc / 10000.0;
+ const double Ln = pow(L, n);
+
+ return (0.0 > Lc) ? 0.0
+ : pow((c1 + c2 * Ln) / (1.0 + c3 * Ln), m);
+
+}
+
+static double avpriv_trc_smpte_st428_1(double Lc)
+{
+ return (0.0 > Lc) ? 0.0
+ : pow(48.0 * Lc / 52.37, 1.0 / 2.6);
+}
+
+
+static double avpriv_trc_arib_std_b67(double Lc) {
+ // The function uses the definition from HEVC, which assumes that the peak
+ // white is input level = 1. (this is equivalent to scaling E = Lc * 12 and
+ // using the definition from the ARIB STD-B67 spec)
+ const double a = 0.17883277;
+ const double b = 0.28466892;
+ const double c = 0.55991073;
+ return (0.0 > Lc) ? 0.0 :
+ (Lc <= 1.0 / 12.0 ? sqrt(3.0 * Lc) : a * log(12.0 * Lc - b) + c);
+}
+
+avpriv_trc_function avpriv_get_trc_function_from_trc(enum AVColorTransferCharacteristic trc)
+{
+ avpriv_trc_function func = NULL;
+ switch (trc) {
+ case AVCOL_TRC_BT709:
+ case AVCOL_TRC_SMPTE170M:
+ case AVCOL_TRC_BT2020_10:
+ case AVCOL_TRC_BT2020_12:
+ func = avpriv_trc_bt709;
+ break;
+
+ case AVCOL_TRC_GAMMA22:
+ func = avpriv_trc_gamma22;
+ break;
+ case AVCOL_TRC_GAMMA28:
+ func = avpriv_trc_gamma28;
+ break;
+
+ case AVCOL_TRC_SMPTE240M:
+ func = avpriv_trc_smpte240M;
+ break;
+
+ case AVCOL_TRC_LINEAR:
+ func = avpriv_trc_linear;
+ break;
+
+ case AVCOL_TRC_LOG:
+ func = avpriv_trc_log;
+ break;
+
+ case AVCOL_TRC_LOG_SQRT:
+ func = avpriv_trc_log_sqrt;
+ break;
+
+ case AVCOL_TRC_IEC61966_2_4:
+ func = avpriv_trc_iec61966_2_4;
+ break;
+
+ case AVCOL_TRC_BT1361_ECG:
+ func = avpriv_trc_bt1361;
+ break;
+
+ case AVCOL_TRC_IEC61966_2_1:
+ func = avpriv_trc_iec61966_2_1;
+ break;
+
+ case AVCOL_TRC_SMPTEST2084:
+ func = avpriv_trc_smpte_st2084;
+ break;
+
+ case AVCOL_TRC_SMPTEST428_1:
+ func = avpriv_trc_smpte_st428_1;
+ break;
+
+ case AVCOL_TRC_ARIB_STD_B67:
+ func = avpriv_trc_arib_std_b67;
+ break;
+
+ case AVCOL_TRC_RESERVED0:
+ case AVCOL_TRC_UNSPECIFIED:
+ case AVCOL_TRC_RESERVED:
+ default:
+ break;
+ }
+ return func;
+}
diff --git a/media/ffvpx/libavutil/color_utils.h b/media/ffvpx/libavutil/color_utils.h
new file mode 100644
index 0000000000..9529006452
--- /dev/null
+++ b/media/ffvpx/libavutil/color_utils.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015 Kevin Wheatley <kevin.j.wheatley@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_COLOR_UTILS_H
+#define AVUTIL_COLOR_UTILS_H
+
+
+#include "libavutil/pixfmt.h"
+
+/**
+ * Determine a suitable 'gamma' value to match the supplied
+ * AVColorTransferCharacteristic.
+ *
+ * See Apple Technical Note TN2257 (https://developer.apple.com/library/mac/technotes/tn2257/_index.html)
+ *
+ * @return Will return an approximation to the simple gamma function matching
+ * the supplied Transfer Characteristic, Will return 0.0 for any
+ * we cannot reasonably match against.
+ */
+double avpriv_get_gamma_from_trc(enum AVColorTransferCharacteristic trc);
+
+
+typedef double (*avpriv_trc_function)(double);
+
+/**
+ * Determine the function needed to apply the given
+ * AVColorTransferCharacteristic to linear input.
+ *
+ * The function returned should expect a nominal domain and range of [0.0-1.0]
+ * values outside of this range maybe valid depending on the chosen
+ * characteristic function.
+ *
+ * @return Will return pointer to the function matching the
+ * supplied Transfer Characteristic. If unspecified will
+ * return NULL:
+ */
+avpriv_trc_function avpriv_get_trc_function_from_trc(enum AVColorTransferCharacteristic trc);
+
+#endif
diff --git a/media/ffvpx/libavutil/colorspace.h b/media/ffvpx/libavutil/colorspace.h
new file mode 100644
index 0000000000..ef6f6107d6
--- /dev/null
+++ b/media/ffvpx/libavutil/colorspace.h
@@ -0,0 +1,150 @@
+/*
+ * Colorspace conversion defines
+ * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Various defines for YUV<->RGB conversion
+ */
+
+#ifndef AVUTIL_COLORSPACE_H
+#define AVUTIL_COLORSPACE_H
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define YUV_TO_RGB1_CCIR(cb1, cr1)\
+{\
+ cb = (cb1) - 128;\
+ cr = (cr1) - 128;\
+ r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
+ g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
+ ONE_HALF;\
+ b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
+}
+
+#define YUV_TO_RGB1_CCIR_BT709(cb1, cr1) \
+ { \
+ cb = (cb1) - 128; \
+ cr = (cr1) - 128; \
+ r_add = ONE_HALF + FIX(1.5747 * 255.0 / 224.0) * cr; \
+ g_add = ONE_HALF - FIX(0.1873 * 255.0 / 224.0) * cb - \
+ FIX(0.4682 * 255.0 / 224.0) * cr; \
+ b_add = ONE_HALF + FIX(1.8556 * 255.0 / 224.0) * cb; \
+ }
+
+// To be used for the BT709 variant as well
+#define YUV_TO_RGB2_CCIR(r, g, b, y1)\
+{\
+ y = ((y1) - 16) * FIX(255.0/219.0);\
+ r = cm[(y + r_add) >> SCALEBITS];\
+ g = cm[(y + g_add) >> SCALEBITS];\
+ b = cm[(y + b_add) >> SCALEBITS];\
+}
+
+#define YUV_TO_RGB1(cb1, cr1)\
+{\
+ cb = (cb1) - 128;\
+ cr = (cr1) - 128;\
+ r_add = FIX(1.40200) * cr + ONE_HALF;\
+ g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
+ b_add = FIX(1.77200) * cb + ONE_HALF;\
+}
+
+#define YUV_TO_RGB2(r, g, b, y1)\
+{\
+ y = (y1) << SCALEBITS;\
+ r = cm[(y + r_add) >> SCALEBITS];\
+ g = cm[(y + g_add) >> SCALEBITS];\
+ b = cm[(y + b_add) >> SCALEBITS];\
+}
+
+#define Y_CCIR_TO_JPEG(y)\
+ cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
+
+#define Y_JPEG_TO_CCIR(y)\
+ (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
+
+#define C_CCIR_TO_JPEG(y)\
+ cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
+
+/* NOTE: the clamp is really necessary! */
+static inline int C_JPEG_TO_CCIR(int y) {
+ y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
+ if (y < 16)
+ y = 16;
+ return y;
+}
+
+
+#define RGB_TO_Y_CCIR(r, g, b) \
+((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
+ FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
+
+#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
+(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
+ FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
+(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
+ FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_Y_JPEG(r, g, b) \
+(FFMIN((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
+ FIX(0.11400) * (b) + (ONE_HALF)) >> SCALEBITS, 255))
+
+#define RGB_TO_U_JPEG(r1, g1, b1)\
+(((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
+ FIX(0.50000) * b1 + (ONE_HALF) - 1) >> (SCALEBITS)) + 128)
+
+#define RGB_TO_V_JPEG(r1, g1, b1)\
+(((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
+ FIX(0.08131) * b1 + (ONE_HALF) - 1) >> (SCALEBITS)) + 128)
+
+// Conversion macros for 8-bit RGB to YUV
+// Derived from ITU-R BT.709-6 (06/2015) Item 3.5
+// https://www.itu.int/rec/R-REC-BT.709-6-201506-I/en
+
+#define RGB_TO_Y_BT709(r, g, b) \
+((FIX(0.21260*219.0/255.0) * (r) + FIX(0.71520*219.0/255.0) * (g) + \
+ FIX(0.07220*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
+
+#define RGB_TO_U_BT709(r1, g1, b1, shift)\
+(((- FIX(0.11457*224.0/255.0) * r1 - FIX(0.38543*224.0/255.0) * g1 + \
+ FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_V_BT709(r1, g1, b1, shift)\
+(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.45415*224.0/255.0) * g1 - \
+ FIX(0.04585*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_Y_BT709_FULL(r, g, b) \
+(FFMIN((FIX(0.21260) * (r) + FIX(0.71520) * (g) + \
+ FIX(0.07220) * (b) + (ONE_HALF)) >> SCALEBITS, 255))
+
+#define RGB_TO_U_BT709_FULL(r1, g1, b1)\
+(((- FIX(0.11457) * r1 - FIX(0.38543) * g1 + \
+ FIX(0.50000) * b1 + (ONE_HALF) - 1) >> (SCALEBITS)) + 128)
+
+#define RGB_TO_V_BT709_FULL(r1, g1, b1)\
+(((FIX(0.50000) * r1 - FIX(0.45415) * g1 - \
+ FIX(0.04585) * b1 + (ONE_HALF) - 1) >> (SCALEBITS)) + 128)
+
+#endif /* AVUTIL_COLORSPACE_H */
diff --git a/media/ffvpx/libavutil/common.h b/media/ffvpx/libavutil/common.h
new file mode 100644
index 0000000000..fd1404be6c
--- /dev/null
+++ b/media/ffvpx/libavutil/common.h
@@ -0,0 +1,578 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C)
+#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS
+#endif
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "macros.h"
+
+//rounded division & shift
+#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
+/* assume b>0 */
+#define ROUNDED_DIV(a,b) (((a)>=0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+/* Fast a/(1<<b) rounded toward +inf. Assume a>=0 and b>=0 */
+#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \
+ : ((a) + (1<<(b)) - 1) >> (b))
+/* Backwards compat. */
+#define FF_CEIL_RSHIFT AV_CEIL_RSHIFT
+
+#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b))
+#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b))
+
+/**
+ * Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they
+ * are not representable as absolute values of their type. This is the same
+ * as with *abs()
+ * @see FFNABS()
+ */
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+/**
+ * Negative Absolute value.
+ * this works for all integers of all types.
+ * As with many macros, this evaluates its argument twice, it thus must not have
+ * a sideeffect, that is FFNABS(x++) has undefined behavior.
+ */
+#define FFNABS(a) ((a) <= 0 ? (a) : (-(a)))
+
+/**
+ * Unsigned Absolute value.
+ * This takes the absolute value of a signed int and returns it as a unsigned.
+ * This also works with INT_MIN which would otherwise not be representable
+ * As with many macros, this evaluates its argument twice.
+ */
+#define FFABSU(a) ((a) <= 0 ? -(unsigned)(a) : (unsigned)(a))
+#define FFABS64U(a) ((a) <= 0 ? -(uint64_t)(a) : (uint64_t)(a))
+
+/* misc math functions */
+
+#ifdef HAVE_AV_CONFIG_H
+# include "config.h"
+# include "intmath.h"
+#endif
+
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip64
+# define av_clip64 av_clip64_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_intp2
+# define av_clip_intp2 av_clip_intp2_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_mod_uintp2
+# define av_mod_uintp2 av_mod_uintp2_c
+#endif
+#ifndef av_sat_add32
+# define av_sat_add32 av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+# define av_sat_dadd32 av_sat_dadd32_c
+#endif
+#ifndef av_sat_sub32
+# define av_sat_sub32 av_sat_sub32_c
+#endif
+#ifndef av_sat_dsub32
+# define av_sat_dsub32 av_sat_dsub32_c
+#endif
+#ifndef av_sat_add64
+# define av_sat_add64 av_sat_add64_c
+#endif
+#ifndef av_sat_sub64
+# define av_sat_sub64 av_sat_sub64_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_clipd
+# define av_clipd av_clipd_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
+#ifndef av_parity
+# define av_parity av_parity_c
+#endif
+
+#ifndef av_log2
+av_const int av_log2(unsigned v);
+#endif
+
+#ifndef av_log2_16bit
+av_const int av_log2_16bit(unsigned v);
+#endif
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed 64bit integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a)
+{
+ if (a&(~0xFF)) return (~a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a)
+{
+ if ((a+0x80U) & ~0xFF) return (a>>31) ^ 0x7F;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
+{
+ if (a&(~0xFFFF)) return (~a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a)
+{
+ if ((a+0x8000U) & ~0xFFFF) return (a>>31) ^ 0x7FFF;
+ else return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
+{
+ if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF);
+ else return (int32_t)a;
+}
+
+/**
+ * Clip a signed integer into the -(2^p),(2^p-1) range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_intp2_c(int a, int p)
+{
+ if (((unsigned)a + (1 << p)) & ~((2 << p) - 1))
+ return (a >> 31) ^ ((1 << p) - 1);
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
+{
+ if (a & ~((1<<p) - 1)) return (~a) >> 31 & ((1<<p) - 1);
+ else return a;
+}
+
+/**
+ * Clear high bits from an unsigned integer starting with specific bit position
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a, unsigned p)
+{
+ return a & ((1U << p) - 1);
+}
+
+/**
+ * Add two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_add32_c(int a, int b)
+{
+ return av_clipl_int32((int64_t)a + b);
+}
+
+/**
+ * Add a doubled value to another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and added to a
+ * @return sum sat(a + sat(2*b)) with signed saturation
+ */
+static av_always_inline int av_sat_dadd32_c(int a, int b)
+{
+ return av_sat_add32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Subtract two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return difference with signed saturation
+ */
+static av_always_inline int av_sat_sub32_c(int a, int b)
+{
+ return av_clipl_int32((int64_t)a - b);
+}
+
+/**
+ * Subtract a doubled value from another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and subtracted from a
+ * @return difference sat(a - sat(2*b)) with signed saturation
+ */
+static av_always_inline int av_sat_dsub32_c(int a, int b)
+{
+ return av_sat_sub32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Add two signed 64-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int64_t av_sat_add64_c(int64_t a, int64_t b) {
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5,1)) || AV_HAS_BUILTIN(__builtin_add_overflow)
+ int64_t tmp;
+ return !__builtin_add_overflow(a, b, &tmp) ? tmp : (tmp < 0 ? INT64_MAX : INT64_MIN);
+#else
+ int64_t s = a+(uint64_t)b;
+ if ((int64_t)(a^b | ~s^b) >= 0)
+ return INT64_MAX ^ (b >> 63);
+ return s;
+#endif
+}
+
+/**
+ * Subtract two signed 64-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return difference with signed saturation
+ */
+static av_always_inline int64_t av_sat_sub64_c(int64_t a, int64_t b) {
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5,1)) || AV_HAS_BUILTIN(__builtin_sub_overflow)
+ int64_t tmp;
+ return !__builtin_sub_overflow(a, b, &tmp) ? tmp : (tmp < 0 ? INT64_MAX : INT64_MIN);
+#else
+ if (b <= 0 && a >= INT64_MAX + b)
+ return INT64_MAX;
+ if (b >= 0 && a <= INT64_MIN + b)
+ return INT64_MIN;
+ return a - b;
+#endif
+}
+
+/**
+ * Clip a float value into the amin-amax range.
+ * If a is nan or -inf amin will be returned.
+ * If a is +inf amax will be returned.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const float av_clipf_c(float a, float amin, float amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ return FFMIN(FFMAX(a, amin), amax);
+}
+
+/**
+ * Clip a double value into the amin-amax range.
+ * If a is nan or -inf amin will be returned.
+ * If a is +inf amax will be returned.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const double av_clipd_c(double a, double amin, double amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ return FFMIN(FFMAX(a, amin), amax);
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x)
+{
+ return av_log2((x - 1U) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x)
+{
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x)
+{
+ return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));
+}
+
+static av_always_inline av_const int av_parity_c(uint32_t v)
+{
+ return av_popcount(v) & 1;
+}
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ * Evaluated up to 7 times (4 for the currently
+ * assigned Unicode range). With a memory buffer
+ * input, this could be *ptr++, or if you want to make sure
+ * that *ptr stops at the end of a NULL terminated string then
+ * *ptr ? *ptr++ : 0
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ *
+ * @warning ERROR should not contain a loop control statement which
+ * could interact with the internal while loop, and should force an
+ * exit from the macro code (e.g. through a goto or a return) in order
+ * to prevent undefined results.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR)\
+ val= (GET_BYTE);\
+ {\
+ uint32_t top = (val & 128) >> 1;\
+ if ((val & 0xc0) == 0x80 || val >= 0xFE)\
+ {ERROR}\
+ while (val & top) {\
+ unsigned int tmp = (GET_BYTE) - 128;\
+ if(tmp>>6)\
+ {ERROR}\
+ val= (val<<6) + tmp;\
+ top <<= 5;\
+ }\
+ val &= (top << 1) - 1;\
+ }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ * to native byte order. Evaluated one or two times.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR)\
+ val = (GET_16BIT);\
+ {\
+ unsigned int hi = val - 0xD800;\
+ if (hi < 0x800) {\
+ val = (GET_16BIT) - 0xDC00;\
+ if (val > 0x3FFU || hi > 0x3FFU)\
+ {ERROR}\
+ val += (hi<<10) + 0x10000;\
+ }\
+ }\
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE)\
+ {\
+ int bytes, shift;\
+ uint32_t in = val;\
+ if (in < 0x80) {\
+ tmp = in;\
+ PUT_BYTE\
+ } else {\
+ bytes = (av_log2(in) + 4) / 5;\
+ shift = (bytes - 1) * 6;\
+ tmp = (256 - (256 >> bytes)) | (in >> shift);\
+ PUT_BYTE\
+ while (shift >= 6) {\
+ shift -= 6;\
+ tmp = 0x80 | ((in >> shift) & 0x3f);\
+ PUT_BYTE\
+ }\
+ }\
+ }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT)\
+ {\
+ uint32_t in = val;\
+ if (in < 0x10000) {\
+ tmp = in;\
+ PUT_16BIT\
+ } else {\
+ tmp = 0xD800 | ((in - 0x10000) >> 10);\
+ PUT_16BIT\
+ tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\
+ PUT_16BIT\
+ }\
+ }\
+
+
+
+#include "mem.h"
+
+#ifdef HAVE_AV_CONFIG_H
+# include "internal.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+#endif /* AVUTIL_COMMON_H */
diff --git a/media/ffvpx/libavutil/cpu.c b/media/ffvpx/libavutil/cpu.c
new file mode 100644
index 0000000000..2c5f7f4958
--- /dev/null
+++ b/media/ffvpx/libavutil/cpu.c
@@ -0,0 +1,282 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#if HAVE_SCHED_GETAFFINITY
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+#endif
+#include <sched.h>
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include "attributes.h"
+#include "cpu.h"
+#include "cpu_internal.h"
+#include "opt.h"
+#include "common.h"
+
+#if HAVE_GETPROCESSAFFINITYMASK || HAVE_WINRT
+#include <windows.h>
+#endif
+#if HAVE_SYSCTL
+#if HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+static atomic_int cpu_flags = ATOMIC_VAR_INIT(-1);
+static atomic_int cpu_count = ATOMIC_VAR_INIT(-1);
+
+static int get_cpu_flags(void)
+{
+#if ARCH_MIPS
+ return ff_get_cpu_flags_mips();
+#elif ARCH_AARCH64
+ return ff_get_cpu_flags_aarch64();
+#elif ARCH_ARM
+ return ff_get_cpu_flags_arm();
+#elif ARCH_PPC
+ return ff_get_cpu_flags_ppc();
+#elif ARCH_RISCV
+ return ff_get_cpu_flags_riscv();
+#elif ARCH_X86
+ return ff_get_cpu_flags_x86();
+#elif ARCH_LOONGARCH
+ return ff_get_cpu_flags_loongarch();
+#endif
+ return 0;
+}
+
+void av_force_cpu_flags(int arg){
+ if (ARCH_X86 &&
+ (arg & ( AV_CPU_FLAG_3DNOW |
+ AV_CPU_FLAG_3DNOWEXT |
+ AV_CPU_FLAG_MMXEXT |
+ AV_CPU_FLAG_SSE |
+ AV_CPU_FLAG_SSE2 |
+ AV_CPU_FLAG_SSE2SLOW |
+ AV_CPU_FLAG_SSE3 |
+ AV_CPU_FLAG_SSE3SLOW |
+ AV_CPU_FLAG_SSSE3 |
+ AV_CPU_FLAG_SSE4 |
+ AV_CPU_FLAG_SSE42 |
+ AV_CPU_FLAG_AVX |
+ AV_CPU_FLAG_AVXSLOW |
+ AV_CPU_FLAG_XOP |
+ AV_CPU_FLAG_FMA3 |
+ AV_CPU_FLAG_FMA4 |
+ AV_CPU_FLAG_AVX2 |
+ AV_CPU_FLAG_AVX512 ))
+ && !(arg & AV_CPU_FLAG_MMX)) {
+ av_log(NULL, AV_LOG_WARNING, "MMX implied by specified flags\n");
+ arg |= AV_CPU_FLAG_MMX;
+ }
+
+ atomic_store_explicit(&cpu_flags, arg, memory_order_relaxed);
+}
+
+int av_get_cpu_flags(void)
+{
+ int flags = atomic_load_explicit(&cpu_flags, memory_order_relaxed);
+ if (flags == -1) {
+ flags = get_cpu_flags();
+ atomic_store_explicit(&cpu_flags, flags, memory_order_relaxed);
+ }
+ return flags;
+}
+
+int av_parse_cpu_caps(unsigned *flags, const char *s)
+{
+ static const AVOption cpuflags_opts[] = {
+ { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
+#if ARCH_PPC
+ { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" },
+#elif ARCH_X86
+ { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" },
+ { "mmx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" },
+ { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" },
+ { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE }, .unit = "flags" },
+ { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2 }, .unit = "flags" },
+ { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2SLOW }, .unit = "flags" },
+ { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3 }, .unit = "flags" },
+ { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3SLOW }, .unit = "flags" },
+ { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSSE3 }, .unit = "flags" },
+ { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" },
+ { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE4 }, .unit = "flags" },
+ { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE42 }, .unit = "flags" },
+ { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX }, .unit = "flags" },
+ { "avxslow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVXSLOW }, .unit = "flags" },
+ { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_XOP }, .unit = "flags" },
+ { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA3 }, .unit = "flags" },
+ { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA4 }, .unit = "flags" },
+ { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX2 }, .unit = "flags" },
+ { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" },
+ { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI2 }, .unit = "flags" },
+ { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOW }, .unit = "flags" },
+ { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOWEXT }, .unit = "flags" },
+ { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" },
+ { "aesni", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AESNI }, .unit = "flags" },
+ { "avx512" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX512 }, .unit = "flags" },
+ { "avx512icl", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX512ICL }, .unit = "flags" },
+ { "slowgather", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SLOW_GATHER }, .unit = "flags" },
+
+#define CPU_FLAG_P2 AV_CPU_FLAG_CMOV | AV_CPU_FLAG_MMX
+#define CPU_FLAG_P3 CPU_FLAG_P2 | AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE
+#define CPU_FLAG_P4 CPU_FLAG_P3| AV_CPU_FLAG_SSE2
+ { "pentium2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P2 }, .unit = "flags" },
+ { "pentium3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P3 }, .unit = "flags" },
+ { "pentium4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P4 }, .unit = "flags" },
+
+#define CPU_FLAG_K62 AV_CPU_FLAG_MMX | AV_CPU_FLAG_3DNOW
+#define CPU_FLAG_ATHLON CPU_FLAG_K62 | AV_CPU_FLAG_CMOV | AV_CPU_FLAG_3DNOWEXT | AV_CPU_FLAG_MMX2
+#define CPU_FLAG_ATHLONXP CPU_FLAG_ATHLON | AV_CPU_FLAG_SSE
+#define CPU_FLAG_K8 CPU_FLAG_ATHLONXP | AV_CPU_FLAG_SSE2
+ { "k6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" },
+ { "k62", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K62 }, .unit = "flags" },
+ { "athlon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLON }, .unit = "flags" },
+ { "athlonxp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLONXP }, .unit = "flags" },
+ { "k8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K8 }, .unit = "flags" },
+#elif ARCH_ARM
+ { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" },
+ { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" },
+ { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" },
+ { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
+ { "vfp_vm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP_VM }, .unit = "flags" },
+ { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" },
+ { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
+ { "setend", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SETEND }, .unit = "flags" },
+#elif ARCH_AARCH64
+ { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" },
+ { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
+ { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
+#elif ARCH_MIPS
+ { "mmi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMI }, .unit = "flags" },
+ { "msa", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MSA }, .unit = "flags" },
+#elif ARCH_LOONGARCH
+ { "lsx", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_LSX }, .unit = "flags" },
+ { "lasx", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_LASX }, .unit = "flags" },
+#elif ARCH_RISCV
+ { "rvi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVI }, .unit = "flags" },
+ { "rvf", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVF }, .unit = "flags" },
+ { "rvd", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVD }, .unit = "flags" },
+ { "rvv-i32", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_I32 }, .unit = "flags" },
+ { "rvv-f32", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_F32 }, .unit = "flags" },
+ { "rvv-i64", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_I64 }, .unit = "flags" },
+ { "rvv", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_F64 }, .unit = "flags" },
+ { "rvb-basic",NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVB_BASIC }, .unit = "flags" },
+#endif
+ { NULL },
+ };
+ static const AVClass class = {
+ .class_name = "cpuflags",
+ .item_name = av_default_item_name,
+ .option = cpuflags_opts,
+ .version = LIBAVUTIL_VERSION_INT,
+ };
+ const AVClass *pclass = &class;
+
+ return av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, flags);
+}
+
+int av_cpu_count(void)
+{
+ static atomic_int printed = ATOMIC_VAR_INIT(0);
+
+ int nb_cpus = 1;
+ int count = 0;
+#if HAVE_WINRT
+ SYSTEM_INFO sysinfo;
+#endif
+#if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT)
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+
+ if (!sched_getaffinity(0, sizeof(cpuset), &cpuset))
+ nb_cpus = CPU_COUNT(&cpuset);
+#elif HAVE_GETPROCESSAFFINITYMASK
+ DWORD_PTR proc_aff, sys_aff;
+ if (GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff))
+ nb_cpus = av_popcount64(proc_aff);
+#elif HAVE_SYSCTL && defined(HW_NCPUONLINE)
+ int mib[2] = { CTL_HW, HW_NCPUONLINE };
+ size_t len = sizeof(nb_cpus);
+
+ if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1)
+ nb_cpus = 0;
+#elif HAVE_SYSCTL && defined(HW_NCPU)
+ int mib[2] = { CTL_HW, HW_NCPU };
+ size_t len = sizeof(nb_cpus);
+
+ if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1)
+ nb_cpus = 0;
+#elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN)
+ nb_cpus = sysconf(_SC_NPROC_ONLN);
+#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
+ nb_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+#elif HAVE_WINRT
+ GetNativeSystemInfo(&sysinfo);
+ nb_cpus = sysinfo.dwNumberOfProcessors;
+#endif
+
+ if (!atomic_exchange_explicit(&printed, 1, memory_order_relaxed))
+ av_log(NULL, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus);
+
+ count = atomic_load_explicit(&cpu_count, memory_order_relaxed);
+
+ if (count > 0) {
+ nb_cpus = count;
+ av_log(NULL, AV_LOG_DEBUG, "overriding to %d logical cores\n", nb_cpus);
+ }
+
+ return nb_cpus;
+}
+
+void av_cpu_force_count(int count)
+{
+ atomic_store_explicit(&cpu_count, count, memory_order_relaxed);
+}
+
+size_t av_cpu_max_align(void)
+{
+#if ARCH_MIPS
+ return ff_get_cpu_max_align_mips();
+#elif ARCH_AARCH64
+ return ff_get_cpu_max_align_aarch64();
+#elif ARCH_ARM
+ return ff_get_cpu_max_align_arm();
+#elif ARCH_PPC
+ return ff_get_cpu_max_align_ppc();
+#elif ARCH_X86
+ return ff_get_cpu_max_align_x86();
+#elif ARCH_LOONGARCH
+ return ff_get_cpu_max_align_loongarch();
+#endif
+
+ return 8;
+}
diff --git a/media/ffvpx/libavutil/cpu.h b/media/ffvpx/libavutil/cpu.h
new file mode 100644
index 0000000000..8fa5ea9199
--- /dev/null
+++ b/media/ffvpx/libavutil/cpu.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#include <stddef.h>
+
+#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
+
+ /* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster
+#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions
+#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer)
+#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
+#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction
+#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions
+#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1
+#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2
+#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used
+#define AV_CPU_FLAG_AVX512ICL 0x200000 ///< F/CD/BW/DQ/VL/VNNI/IFMA/VBMI/VBMI2/VPOPCNTDQ/BITALG/GFNI/VAES/VPCLMULQDQ
+#define AV_CPU_FLAG_SLOW_GATHER 0x2000000 ///< CPU has slow gathers.
+
+#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
+#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06
+#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07
+
+#define AV_CPU_FLAG_ARMV5TE (1 << 0)
+#define AV_CPU_FLAG_ARMV6 (1 << 1)
+#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
+#define AV_CPU_FLAG_VFP (1 << 3)
+#define AV_CPU_FLAG_VFPV3 (1 << 4)
+#define AV_CPU_FLAG_NEON (1 << 5)
+#define AV_CPU_FLAG_ARMV8 (1 << 6)
+#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations
+#define AV_CPU_FLAG_SETEND (1 <<16)
+
+#define AV_CPU_FLAG_MMI (1 << 0)
+#define AV_CPU_FLAG_MSA (1 << 1)
+
+//Loongarch SIMD extension.
+#define AV_CPU_FLAG_LSX (1 << 0)
+#define AV_CPU_FLAG_LASX (1 << 1)
+
+// RISC-V extensions
+#define AV_CPU_FLAG_RVI (1 << 0) ///< I (full GPR bank)
+#define AV_CPU_FLAG_RVF (1 << 1) ///< F (single precision FP)
+#define AV_CPU_FLAG_RVD (1 << 2) ///< D (double precision FP)
+#define AV_CPU_FLAG_RVV_I32 (1 << 3) ///< Vectors of 8/16/32-bit int's */
+#define AV_CPU_FLAG_RVV_F32 (1 << 4) ///< Vectors of float's */
+#define AV_CPU_FLAG_RVV_I64 (1 << 5) ///< Vectors of 64-bit int's */
+#define AV_CPU_FLAG_RVV_F64 (1 << 6) ///< Vectors of double's
+#define AV_CPU_FLAG_RVB_BASIC (1 << 7) ///< Basic bit-manipulations
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ * The returned value is affected by av_force_cpu_flags() if that was used
+ * before. So av_get_cpu_flags() can easily be used in an application to
+ * detect the enabled cpu flags.
+ */
+int av_get_cpu_flags(void);
+
+/**
+ * Disables cpu detection and forces the specified flags.
+ * -1 is a special case that disables forcing of specific flags.
+ */
+void av_force_cpu_flags(int flags);
+
+/**
+ * Parse CPU caps from a string and update the given AV_CPU_* flags based on that.
+ *
+ * @return negative on error.
+ */
+int av_parse_cpu_caps(unsigned *flags, const char *s);
+
+/**
+ * @return the number of logical CPU cores present.
+ */
+int av_cpu_count(void);
+
+/**
+ * Overrides cpu count detection and forces the specified count.
+ * Count < 1 disables forcing of specific count.
+ */
+void av_cpu_force_count(int count);
+
+/**
+ * Get the maximum data alignment that may be required by FFmpeg.
+ *
+ * Note that this is affected by the build configuration and the CPU flags mask,
+ * so e.g. if the CPU supports AVX, but libavutil has been built with
+ * --disable-avx or the AV_CPU_FLAG_AVX flag has been disabled through
+ * av_set_cpu_flags_mask(), then this function will behave as if AVX is not
+ * present.
+ */
+size_t av_cpu_max_align(void);
+
+#endif /* AVUTIL_CPU_H */
diff --git a/media/ffvpx/libavutil/cpu_internal.h b/media/ffvpx/libavutil/cpu_internal.h
new file mode 100644
index 0000000000..634f28bac4
--- /dev/null
+++ b/media/ffvpx/libavutil/cpu_internal.h
@@ -0,0 +1,62 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_INTERNAL_H
+#define AVUTIL_CPU_INTERNAL_H
+
+#include "config.h"
+
+#include "cpu.h"
+
+#define CPUEXT_SUFFIX(flags, suffix, cpuext) \
+ (HAVE_ ## cpuext ## suffix && ((flags) & AV_CPU_FLAG_ ## cpuext))
+
+#define CPUEXT_SUFFIX_FAST2(flags, suffix, cpuext, slow_cpuext) \
+ (HAVE_ ## cpuext ## suffix && ((flags) & AV_CPU_FLAG_ ## cpuext) && \
+ !((flags) & AV_CPU_FLAG_ ## slow_cpuext ## SLOW))
+
+#define CPUEXT_SUFFIX_SLOW(flags, suffix, cpuext) \
+ (HAVE_ ## cpuext ## suffix && \
+ ((flags) & (AV_CPU_FLAG_ ## cpuext | AV_CPU_FLAG_ ## cpuext ## SLOW)))
+
+#define CPUEXT_SUFFIX_SLOW2(flags, suffix, cpuext, slow_cpuext) \
+ (HAVE_ ## cpuext ## suffix && ((flags) & AV_CPU_FLAG_ ## cpuext) && \
+ ((flags) & (AV_CPU_FLAG_ ## slow_cpuext | AV_CPU_FLAG_ ## slow_cpuext ## SLOW)))
+
+#define CPUEXT_SUFFIX_FAST(flags, suffix, cpuext) CPUEXT_SUFFIX_FAST2(flags, suffix, cpuext, cpuext)
+
+#define CPUEXT(flags, cpuext) CPUEXT_SUFFIX(flags, , cpuext)
+#define CPUEXT_FAST(flags, cpuext) CPUEXT_SUFFIX_FAST(flags, , cpuext)
+#define CPUEXT_SLOW(flags, cpuext) CPUEXT_SUFFIX_SLOW(flags, , cpuext)
+
+int ff_get_cpu_flags_mips(void);
+int ff_get_cpu_flags_aarch64(void);
+int ff_get_cpu_flags_arm(void);
+int ff_get_cpu_flags_ppc(void);
+int ff_get_cpu_flags_riscv(void);
+int ff_get_cpu_flags_x86(void);
+int ff_get_cpu_flags_loongarch(void);
+
+size_t ff_get_cpu_max_align_mips(void);
+size_t ff_get_cpu_max_align_aarch64(void);
+size_t ff_get_cpu_max_align_arm(void);
+size_t ff_get_cpu_max_align_ppc(void);
+size_t ff_get_cpu_max_align_x86(void);
+size_t ff_get_cpu_max_align_loongarch(void);
+
+#endif /* AVUTIL_CPU_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/crc.c b/media/ffvpx/libavutil/crc.c
new file mode 100644
index 0000000000..703b56f4e0
--- /dev/null
+++ b/media/ffvpx/libavutil/crc.c
@@ -0,0 +1,415 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "thread.h"
+#include "avassert.h"
+#include "bswap.h"
+#include "crc.h"
+#include "error.h"
+
+#if CONFIG_HARDCODED_TABLES
+static const AVCRC av_crc_table[AV_CRC_MAX][257] = {
+ [AV_CRC_8_ATM] = {
+ 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15, 0x38, 0x3F, 0x36, 0x31,
+ 0x24, 0x23, 0x2A, 0x2D, 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
+ 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D, 0xE0, 0xE7, 0xEE, 0xE9,
+ 0xFC, 0xFB, 0xF2, 0xF5, 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
+ 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85, 0xA8, 0xAF, 0xA6, 0xA1,
+ 0xB4, 0xB3, 0xBA, 0xBD, 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
+ 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA, 0xB7, 0xB0, 0xB9, 0xBE,
+ 0xAB, 0xAC, 0xA5, 0xA2, 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
+ 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32, 0x1F, 0x18, 0x11, 0x16,
+ 0x03, 0x04, 0x0D, 0x0A, 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
+ 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A, 0x89, 0x8E, 0x87, 0x80,
+ 0x95, 0x92, 0x9B, 0x9C, 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
+ 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC, 0xC1, 0xC6, 0xCF, 0xC8,
+ 0xDD, 0xDA, 0xD3, 0xD4, 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
+ 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44, 0x19, 0x1E, 0x17, 0x10,
+ 0x05, 0x02, 0x0B, 0x0C, 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
+ 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B, 0x76, 0x71, 0x78, 0x7F,
+ 0x6A, 0x6D, 0x64, 0x63, 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
+ 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13, 0xAE, 0xA9, 0xA0, 0xA7,
+ 0xB2, 0xB5, 0xBC, 0xBB, 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
+ 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB, 0xE6, 0xE1, 0xE8, 0xEF,
+ 0xFA, 0xFD, 0xF4, 0xF3, 0x01
+ },
+ [AV_CRC_8_EBU] = {
+ 0x00, 0x1D, 0x3A, 0x27, 0x74, 0x69, 0x4E, 0x53, 0xE8, 0xF5, 0xD2, 0xCF,
+ 0x9C, 0x81, 0xA6, 0xBB, 0xCD, 0xD0, 0xF7, 0xEA, 0xB9, 0xA4, 0x83, 0x9E,
+ 0x25, 0x38, 0x1F, 0x02, 0x51, 0x4C, 0x6B, 0x76, 0x87, 0x9A, 0xBD, 0xA0,
+ 0xF3, 0xEE, 0xC9, 0xD4, 0x6F, 0x72, 0x55, 0x48, 0x1B, 0x06, 0x21, 0x3C,
+ 0x4A, 0x57, 0x70, 0x6D, 0x3E, 0x23, 0x04, 0x19, 0xA2, 0xBF, 0x98, 0x85,
+ 0xD6, 0xCB, 0xEC, 0xF1, 0x13, 0x0E, 0x29, 0x34, 0x67, 0x7A, 0x5D, 0x40,
+ 0xFB, 0xE6, 0xC1, 0xDC, 0x8F, 0x92, 0xB5, 0xA8, 0xDE, 0xC3, 0xE4, 0xF9,
+ 0xAA, 0xB7, 0x90, 0x8D, 0x36, 0x2B, 0x0C, 0x11, 0x42, 0x5F, 0x78, 0x65,
+ 0x94, 0x89, 0xAE, 0xB3, 0xE0, 0xFD, 0xDA, 0xC7, 0x7C, 0x61, 0x46, 0x5B,
+ 0x08, 0x15, 0x32, 0x2F, 0x59, 0x44, 0x63, 0x7E, 0x2D, 0x30, 0x17, 0x0A,
+ 0xB1, 0xAC, 0x8B, 0x96, 0xC5, 0xD8, 0xFF, 0xE2, 0x26, 0x3B, 0x1C, 0x01,
+ 0x52, 0x4F, 0x68, 0x75, 0xCE, 0xD3, 0xF4, 0xE9, 0xBA, 0xA7, 0x80, 0x9D,
+ 0xEB, 0xF6, 0xD1, 0xCC, 0x9F, 0x82, 0xA5, 0xB8, 0x03, 0x1E, 0x39, 0x24,
+ 0x77, 0x6A, 0x4D, 0x50, 0xA1, 0xBC, 0x9B, 0x86, 0xD5, 0xC8, 0xEF, 0xF2,
+ 0x49, 0x54, 0x73, 0x6E, 0x3D, 0x20, 0x07, 0x1A, 0x6C, 0x71, 0x56, 0x4B,
+ 0x18, 0x05, 0x22, 0x3F, 0x84, 0x99, 0xBE, 0xA3, 0xF0, 0xED, 0xCA, 0xD7,
+ 0x35, 0x28, 0x0F, 0x12, 0x41, 0x5C, 0x7B, 0x66, 0xDD, 0xC0, 0xE7, 0xFA,
+ 0xA9, 0xB4, 0x93, 0x8E, 0xF8, 0xE5, 0xC2, 0xDF, 0x8C, 0x91, 0xB6, 0xAB,
+ 0x10, 0x0D, 0x2A, 0x37, 0x64, 0x79, 0x5E, 0x43, 0xB2, 0xAF, 0x88, 0x95,
+ 0xC6, 0xDB, 0xFC, 0xE1, 0x5A, 0x47, 0x60, 0x7D, 0x2E, 0x33, 0x14, 0x09,
+ 0x7F, 0x62, 0x45, 0x58, 0x0B, 0x16, 0x31, 0x2C, 0x97, 0x8A, 0xAD, 0xB0,
+ 0xE3, 0xFE, 0xD9, 0xC4, 0x01
+ },
+ [AV_CRC_16_ANSI] = {
+ 0x0000, 0x0580, 0x0F80, 0x0A00, 0x1B80, 0x1E00, 0x1400, 0x1180,
+ 0x3380, 0x3600, 0x3C00, 0x3980, 0x2800, 0x2D80, 0x2780, 0x2200,
+ 0x6380, 0x6600, 0x6C00, 0x6980, 0x7800, 0x7D80, 0x7780, 0x7200,
+ 0x5000, 0x5580, 0x5F80, 0x5A00, 0x4B80, 0x4E00, 0x4400, 0x4180,
+ 0xC380, 0xC600, 0xCC00, 0xC980, 0xD800, 0xDD80, 0xD780, 0xD200,
+ 0xF000, 0xF580, 0xFF80, 0xFA00, 0xEB80, 0xEE00, 0xE400, 0xE180,
+ 0xA000, 0xA580, 0xAF80, 0xAA00, 0xBB80, 0xBE00, 0xB400, 0xB180,
+ 0x9380, 0x9600, 0x9C00, 0x9980, 0x8800, 0x8D80, 0x8780, 0x8200,
+ 0x8381, 0x8601, 0x8C01, 0x8981, 0x9801, 0x9D81, 0x9781, 0x9201,
+ 0xB001, 0xB581, 0xBF81, 0xBA01, 0xAB81, 0xAE01, 0xA401, 0xA181,
+ 0xE001, 0xE581, 0xEF81, 0xEA01, 0xFB81, 0xFE01, 0xF401, 0xF181,
+ 0xD381, 0xD601, 0xDC01, 0xD981, 0xC801, 0xCD81, 0xC781, 0xC201,
+ 0x4001, 0x4581, 0x4F81, 0x4A01, 0x5B81, 0x5E01, 0x5401, 0x5181,
+ 0x7381, 0x7601, 0x7C01, 0x7981, 0x6801, 0x6D81, 0x6781, 0x6201,
+ 0x2381, 0x2601, 0x2C01, 0x2981, 0x3801, 0x3D81, 0x3781, 0x3201,
+ 0x1001, 0x1581, 0x1F81, 0x1A01, 0x0B81, 0x0E01, 0x0401, 0x0181,
+ 0x0383, 0x0603, 0x0C03, 0x0983, 0x1803, 0x1D83, 0x1783, 0x1203,
+ 0x3003, 0x3583, 0x3F83, 0x3A03, 0x2B83, 0x2E03, 0x2403, 0x2183,
+ 0x6003, 0x6583, 0x6F83, 0x6A03, 0x7B83, 0x7E03, 0x7403, 0x7183,
+ 0x5383, 0x5603, 0x5C03, 0x5983, 0x4803, 0x4D83, 0x4783, 0x4203,
+ 0xC003, 0xC583, 0xCF83, 0xCA03, 0xDB83, 0xDE03, 0xD403, 0xD183,
+ 0xF383, 0xF603, 0xFC03, 0xF983, 0xE803, 0xED83, 0xE783, 0xE203,
+ 0xA383, 0xA603, 0xAC03, 0xA983, 0xB803, 0xBD83, 0xB783, 0xB203,
+ 0x9003, 0x9583, 0x9F83, 0x9A03, 0x8B83, 0x8E03, 0x8403, 0x8183,
+ 0x8002, 0x8582, 0x8F82, 0x8A02, 0x9B82, 0x9E02, 0x9402, 0x9182,
+ 0xB382, 0xB602, 0xBC02, 0xB982, 0xA802, 0xAD82, 0xA782, 0xA202,
+ 0xE382, 0xE602, 0xEC02, 0xE982, 0xF802, 0xFD82, 0xF782, 0xF202,
+ 0xD002, 0xD582, 0xDF82, 0xDA02, 0xCB82, 0xCE02, 0xC402, 0xC182,
+ 0x4382, 0x4602, 0x4C02, 0x4982, 0x5802, 0x5D82, 0x5782, 0x5202,
+ 0x7002, 0x7582, 0x7F82, 0x7A02, 0x6B82, 0x6E02, 0x6402, 0x6182,
+ 0x2002, 0x2582, 0x2F82, 0x2A02, 0x3B82, 0x3E02, 0x3402, 0x3182,
+ 0x1382, 0x1602, 0x1C02, 0x1982, 0x0802, 0x0D82, 0x0782, 0x0202,
+ 0x0001
+ },
+ [AV_CRC_16_CCITT] = {
+ 0x0000, 0x2110, 0x4220, 0x6330, 0x8440, 0xA550, 0xC660, 0xE770,
+ 0x0881, 0x2991, 0x4AA1, 0x6BB1, 0x8CC1, 0xADD1, 0xCEE1, 0xEFF1,
+ 0x3112, 0x1002, 0x7332, 0x5222, 0xB552, 0x9442, 0xF772, 0xD662,
+ 0x3993, 0x1883, 0x7BB3, 0x5AA3, 0xBDD3, 0x9CC3, 0xFFF3, 0xDEE3,
+ 0x6224, 0x4334, 0x2004, 0x0114, 0xE664, 0xC774, 0xA444, 0x8554,
+ 0x6AA5, 0x4BB5, 0x2885, 0x0995, 0xEEE5, 0xCFF5, 0xACC5, 0x8DD5,
+ 0x5336, 0x7226, 0x1116, 0x3006, 0xD776, 0xF666, 0x9556, 0xB446,
+ 0x5BB7, 0x7AA7, 0x1997, 0x3887, 0xDFF7, 0xFEE7, 0x9DD7, 0xBCC7,
+ 0xC448, 0xE558, 0x8668, 0xA778, 0x4008, 0x6118, 0x0228, 0x2338,
+ 0xCCC9, 0xEDD9, 0x8EE9, 0xAFF9, 0x4889, 0x6999, 0x0AA9, 0x2BB9,
+ 0xF55A, 0xD44A, 0xB77A, 0x966A, 0x711A, 0x500A, 0x333A, 0x122A,
+ 0xFDDB, 0xDCCB, 0xBFFB, 0x9EEB, 0x799B, 0x588B, 0x3BBB, 0x1AAB,
+ 0xA66C, 0x877C, 0xE44C, 0xC55C, 0x222C, 0x033C, 0x600C, 0x411C,
+ 0xAEED, 0x8FFD, 0xECCD, 0xCDDD, 0x2AAD, 0x0BBD, 0x688D, 0x499D,
+ 0x977E, 0xB66E, 0xD55E, 0xF44E, 0x133E, 0x322E, 0x511E, 0x700E,
+ 0x9FFF, 0xBEEF, 0xDDDF, 0xFCCF, 0x1BBF, 0x3AAF, 0x599F, 0x788F,
+ 0x8891, 0xA981, 0xCAB1, 0xEBA1, 0x0CD1, 0x2DC1, 0x4EF1, 0x6FE1,
+ 0x8010, 0xA100, 0xC230, 0xE320, 0x0450, 0x2540, 0x4670, 0x6760,
+ 0xB983, 0x9893, 0xFBA3, 0xDAB3, 0x3DC3, 0x1CD3, 0x7FE3, 0x5EF3,
+ 0xB102, 0x9012, 0xF322, 0xD232, 0x3542, 0x1452, 0x7762, 0x5672,
+ 0xEAB5, 0xCBA5, 0xA895, 0x8985, 0x6EF5, 0x4FE5, 0x2CD5, 0x0DC5,
+ 0xE234, 0xC324, 0xA014, 0x8104, 0x6674, 0x4764, 0x2454, 0x0544,
+ 0xDBA7, 0xFAB7, 0x9987, 0xB897, 0x5FE7, 0x7EF7, 0x1DC7, 0x3CD7,
+ 0xD326, 0xF236, 0x9106, 0xB016, 0x5766, 0x7676, 0x1546, 0x3456,
+ 0x4CD9, 0x6DC9, 0x0EF9, 0x2FE9, 0xC899, 0xE989, 0x8AB9, 0xABA9,
+ 0x4458, 0x6548, 0x0678, 0x2768, 0xC018, 0xE108, 0x8238, 0xA328,
+ 0x7DCB, 0x5CDB, 0x3FEB, 0x1EFB, 0xF98B, 0xD89B, 0xBBAB, 0x9ABB,
+ 0x754A, 0x545A, 0x376A, 0x167A, 0xF10A, 0xD01A, 0xB32A, 0x923A,
+ 0x2EFD, 0x0FED, 0x6CDD, 0x4DCD, 0xAABD, 0x8BAD, 0xE89D, 0xC98D,
+ 0x267C, 0x076C, 0x645C, 0x454C, 0xA23C, 0x832C, 0xE01C, 0xC10C,
+ 0x1FEF, 0x3EFF, 0x5DCF, 0x7CDF, 0x9BAF, 0xBABF, 0xD98F, 0xF89F,
+ 0x176E, 0x367E, 0x554E, 0x745E, 0x932E, 0xB23E, 0xD10E, 0xF01E,
+ 0x0001
+ },
+ [AV_CRC_24_IEEE] = {
+ 0x000000, 0xFB4C86, 0x0DD58A, 0xF6990C, 0xE1E693, 0x1AAA15, 0xEC3319,
+ 0x177F9F, 0x3981A1, 0xC2CD27, 0x34542B, 0xCF18AD, 0xD86732, 0x232BB4,
+ 0xD5B2B8, 0x2EFE3E, 0x894EC5, 0x720243, 0x849B4F, 0x7FD7C9, 0x68A856,
+ 0x93E4D0, 0x657DDC, 0x9E315A, 0xB0CF64, 0x4B83E2, 0xBD1AEE, 0x465668,
+ 0x5129F7, 0xAA6571, 0x5CFC7D, 0xA7B0FB, 0xE9D10C, 0x129D8A, 0xE40486,
+ 0x1F4800, 0x08379F, 0xF37B19, 0x05E215, 0xFEAE93, 0xD050AD, 0x2B1C2B,
+ 0xDD8527, 0x26C9A1, 0x31B63E, 0xCAFAB8, 0x3C63B4, 0xC72F32, 0x609FC9,
+ 0x9BD34F, 0x6D4A43, 0x9606C5, 0x81795A, 0x7A35DC, 0x8CACD0, 0x77E056,
+ 0x591E68, 0xA252EE, 0x54CBE2, 0xAF8764, 0xB8F8FB, 0x43B47D, 0xB52D71,
+ 0x4E61F7, 0xD2A319, 0x29EF9F, 0xDF7693, 0x243A15, 0x33458A, 0xC8090C,
+ 0x3E9000, 0xC5DC86, 0xEB22B8, 0x106E3E, 0xE6F732, 0x1DBBB4, 0x0AC42B,
+ 0xF188AD, 0x0711A1, 0xFC5D27, 0x5BEDDC, 0xA0A15A, 0x563856, 0xAD74D0,
+ 0xBA0B4F, 0x4147C9, 0xB7DEC5, 0x4C9243, 0x626C7D, 0x9920FB, 0x6FB9F7,
+ 0x94F571, 0x838AEE, 0x78C668, 0x8E5F64, 0x7513E2, 0x3B7215, 0xC03E93,
+ 0x36A79F, 0xCDEB19, 0xDA9486, 0x21D800, 0xD7410C, 0x2C0D8A, 0x02F3B4,
+ 0xF9BF32, 0x0F263E, 0xF46AB8, 0xE31527, 0x1859A1, 0xEEC0AD, 0x158C2B,
+ 0xB23CD0, 0x497056, 0xBFE95A, 0x44A5DC, 0x53DA43, 0xA896C5, 0x5E0FC9,
+ 0xA5434F, 0x8BBD71, 0x70F1F7, 0x8668FB, 0x7D247D, 0x6A5BE2, 0x911764,
+ 0x678E68, 0x9CC2EE, 0xA44733, 0x5F0BB5, 0xA992B9, 0x52DE3F, 0x45A1A0,
+ 0xBEED26, 0x48742A, 0xB338AC, 0x9DC692, 0x668A14, 0x901318, 0x6B5F9E,
+ 0x7C2001, 0x876C87, 0x71F58B, 0x8AB90D, 0x2D09F6, 0xD64570, 0x20DC7C,
+ 0xDB90FA, 0xCCEF65, 0x37A3E3, 0xC13AEF, 0x3A7669, 0x148857, 0xEFC4D1,
+ 0x195DDD, 0xE2115B, 0xF56EC4, 0x0E2242, 0xF8BB4E, 0x03F7C8, 0x4D963F,
+ 0xB6DAB9, 0x4043B5, 0xBB0F33, 0xAC70AC, 0x573C2A, 0xA1A526, 0x5AE9A0,
+ 0x74179E, 0x8F5B18, 0x79C214, 0x828E92, 0x95F10D, 0x6EBD8B, 0x982487,
+ 0x636801, 0xC4D8FA, 0x3F947C, 0xC90D70, 0x3241F6, 0x253E69, 0xDE72EF,
+ 0x28EBE3, 0xD3A765, 0xFD595B, 0x0615DD, 0xF08CD1, 0x0BC057, 0x1CBFC8,
+ 0xE7F34E, 0x116A42, 0xEA26C4, 0x76E42A, 0x8DA8AC, 0x7B31A0, 0x807D26,
+ 0x9702B9, 0x6C4E3F, 0x9AD733, 0x619BB5, 0x4F658B, 0xB4290D, 0x42B001,
+ 0xB9FC87, 0xAE8318, 0x55CF9E, 0xA35692, 0x581A14, 0xFFAAEF, 0x04E669,
+ 0xF27F65, 0x0933E3, 0x1E4C7C, 0xE500FA, 0x1399F6, 0xE8D570, 0xC62B4E,
+ 0x3D67C8, 0xCBFEC4, 0x30B242, 0x27CDDD, 0xDC815B, 0x2A1857, 0xD154D1,
+ 0x9F3526, 0x6479A0, 0x92E0AC, 0x69AC2A, 0x7ED3B5, 0x859F33, 0x73063F,
+ 0x884AB9, 0xA6B487, 0x5DF801, 0xAB610D, 0x502D8B, 0x475214, 0xBC1E92,
+ 0x4A879E, 0xB1CB18, 0x167BE3, 0xED3765, 0x1BAE69, 0xE0E2EF, 0xF79D70,
+ 0x0CD1F6, 0xFA48FA, 0x01047C, 0x2FFA42, 0xD4B6C4, 0x222FC8, 0xD9634E,
+ 0xCE1CD1, 0x355057, 0xC3C95B, 0x3885DD, 0x000001,
+ },
+ [AV_CRC_32_IEEE] = {
+ 0x00000000, 0xB71DC104, 0x6E3B8209, 0xD926430D, 0xDC760413, 0x6B6BC517,
+ 0xB24D861A, 0x0550471E, 0xB8ED0826, 0x0FF0C922, 0xD6D68A2F, 0x61CB4B2B,
+ 0x649B0C35, 0xD386CD31, 0x0AA08E3C, 0xBDBD4F38, 0x70DB114C, 0xC7C6D048,
+ 0x1EE09345, 0xA9FD5241, 0xACAD155F, 0x1BB0D45B, 0xC2969756, 0x758B5652,
+ 0xC836196A, 0x7F2BD86E, 0xA60D9B63, 0x11105A67, 0x14401D79, 0xA35DDC7D,
+ 0x7A7B9F70, 0xCD665E74, 0xE0B62398, 0x57ABE29C, 0x8E8DA191, 0x39906095,
+ 0x3CC0278B, 0x8BDDE68F, 0x52FBA582, 0xE5E66486, 0x585B2BBE, 0xEF46EABA,
+ 0x3660A9B7, 0x817D68B3, 0x842D2FAD, 0x3330EEA9, 0xEA16ADA4, 0x5D0B6CA0,
+ 0x906D32D4, 0x2770F3D0, 0xFE56B0DD, 0x494B71D9, 0x4C1B36C7, 0xFB06F7C3,
+ 0x2220B4CE, 0x953D75CA, 0x28803AF2, 0x9F9DFBF6, 0x46BBB8FB, 0xF1A679FF,
+ 0xF4F63EE1, 0x43EBFFE5, 0x9ACDBCE8, 0x2DD07DEC, 0x77708634, 0xC06D4730,
+ 0x194B043D, 0xAE56C539, 0xAB068227, 0x1C1B4323, 0xC53D002E, 0x7220C12A,
+ 0xCF9D8E12, 0x78804F16, 0xA1A60C1B, 0x16BBCD1F, 0x13EB8A01, 0xA4F64B05,
+ 0x7DD00808, 0xCACDC90C, 0x07AB9778, 0xB0B6567C, 0x69901571, 0xDE8DD475,
+ 0xDBDD936B, 0x6CC0526F, 0xB5E61162, 0x02FBD066, 0xBF469F5E, 0x085B5E5A,
+ 0xD17D1D57, 0x6660DC53, 0x63309B4D, 0xD42D5A49, 0x0D0B1944, 0xBA16D840,
+ 0x97C6A5AC, 0x20DB64A8, 0xF9FD27A5, 0x4EE0E6A1, 0x4BB0A1BF, 0xFCAD60BB,
+ 0x258B23B6, 0x9296E2B2, 0x2F2BAD8A, 0x98366C8E, 0x41102F83, 0xF60DEE87,
+ 0xF35DA999, 0x4440689D, 0x9D662B90, 0x2A7BEA94, 0xE71DB4E0, 0x500075E4,
+ 0x892636E9, 0x3E3BF7ED, 0x3B6BB0F3, 0x8C7671F7, 0x555032FA, 0xE24DF3FE,
+ 0x5FF0BCC6, 0xE8ED7DC2, 0x31CB3ECF, 0x86D6FFCB, 0x8386B8D5, 0x349B79D1,
+ 0xEDBD3ADC, 0x5AA0FBD8, 0xEEE00C69, 0x59FDCD6D, 0x80DB8E60, 0x37C64F64,
+ 0x3296087A, 0x858BC97E, 0x5CAD8A73, 0xEBB04B77, 0x560D044F, 0xE110C54B,
+ 0x38368646, 0x8F2B4742, 0x8A7B005C, 0x3D66C158, 0xE4408255, 0x535D4351,
+ 0x9E3B1D25, 0x2926DC21, 0xF0009F2C, 0x471D5E28, 0x424D1936, 0xF550D832,
+ 0x2C769B3F, 0x9B6B5A3B, 0x26D61503, 0x91CBD407, 0x48ED970A, 0xFFF0560E,
+ 0xFAA01110, 0x4DBDD014, 0x949B9319, 0x2386521D, 0x0E562FF1, 0xB94BEEF5,
+ 0x606DADF8, 0xD7706CFC, 0xD2202BE2, 0x653DEAE6, 0xBC1BA9EB, 0x0B0668EF,
+ 0xB6BB27D7, 0x01A6E6D3, 0xD880A5DE, 0x6F9D64DA, 0x6ACD23C4, 0xDDD0E2C0,
+ 0x04F6A1CD, 0xB3EB60C9, 0x7E8D3EBD, 0xC990FFB9, 0x10B6BCB4, 0xA7AB7DB0,
+ 0xA2FB3AAE, 0x15E6FBAA, 0xCCC0B8A7, 0x7BDD79A3, 0xC660369B, 0x717DF79F,
+ 0xA85BB492, 0x1F467596, 0x1A163288, 0xAD0BF38C, 0x742DB081, 0xC3307185,
+ 0x99908A5D, 0x2E8D4B59, 0xF7AB0854, 0x40B6C950, 0x45E68E4E, 0xF2FB4F4A,
+ 0x2BDD0C47, 0x9CC0CD43, 0x217D827B, 0x9660437F, 0x4F460072, 0xF85BC176,
+ 0xFD0B8668, 0x4A16476C, 0x93300461, 0x242DC565, 0xE94B9B11, 0x5E565A15,
+ 0x87701918, 0x306DD81C, 0x353D9F02, 0x82205E06, 0x5B061D0B, 0xEC1BDC0F,
+ 0x51A69337, 0xE6BB5233, 0x3F9D113E, 0x8880D03A, 0x8DD09724, 0x3ACD5620,
+ 0xE3EB152D, 0x54F6D429, 0x7926A9C5, 0xCE3B68C1, 0x171D2BCC, 0xA000EAC8,
+ 0xA550ADD6, 0x124D6CD2, 0xCB6B2FDF, 0x7C76EEDB, 0xC1CBA1E3, 0x76D660E7,
+ 0xAFF023EA, 0x18EDE2EE, 0x1DBDA5F0, 0xAAA064F4, 0x738627F9, 0xC49BE6FD,
+ 0x09FDB889, 0xBEE0798D, 0x67C63A80, 0xD0DBFB84, 0xD58BBC9A, 0x62967D9E,
+ 0xBBB03E93, 0x0CADFF97, 0xB110B0AF, 0x060D71AB, 0xDF2B32A6, 0x6836F3A2,
+ 0x6D66B4BC, 0xDA7B75B8, 0x035D36B5, 0xB440F7B1, 0x00000001
+ },
+ [AV_CRC_32_IEEE_LE] = {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F,
+ 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2,
+ 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9,
+ 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C,
+ 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423,
+ 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106,
+ 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D,
+ 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950,
+ 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7,
+ 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA,
+ 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81,
+ 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84,
+ 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB,
+ 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E,
+ 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55,
+ 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28,
+ 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F,
+ 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242,
+ 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69,
+ 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC,
+ 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693,
+ 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D, 0x00000001
+ },
+ [AV_CRC_16_ANSI_LE] = {
+ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
+ 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
+ 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
+ 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
+ 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
+ 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
+ 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
+ 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
+ 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
+ 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
+ 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
+ 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
+ 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
+ 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
+ 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
+ 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
+ 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
+ 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
+ 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
+ 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
+ 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
+ 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
+ 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
+ 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
+ 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
+ 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
+ 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
+ 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
+ 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
+ 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
+ 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
+ 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040,
+ 0x0001
+ },
+};
+#else
+#if CONFIG_SMALL
+#define CRC_TABLE_SIZE 257
+#else
+#define CRC_TABLE_SIZE 1024
+#endif
+static AVCRC av_crc_table[AV_CRC_MAX][CRC_TABLE_SIZE];
+
+#define DECLARE_CRC_INIT_TABLE_ONCE(id, le, bits, poly) \
+static AVOnce id ## _once_control = AV_ONCE_INIT; \
+static void id ## _init_table_once(void) \
+{ \
+ av_assert0(av_crc_init(av_crc_table[id], le, bits, poly, sizeof(av_crc_table[id])) >= 0); \
+}
+
+#define CRC_INIT_TABLE_ONCE(id) ff_thread_once(&id ## _once_control, id ## _init_table_once)
+
+DECLARE_CRC_INIT_TABLE_ONCE(AV_CRC_8_ATM, 0, 8, 0x07)
+DECLARE_CRC_INIT_TABLE_ONCE(AV_CRC_8_EBU, 0, 8, 0x1D)
+DECLARE_CRC_INIT_TABLE_ONCE(AV_CRC_16_ANSI, 0, 16, 0x8005)
+DECLARE_CRC_INIT_TABLE_ONCE(AV_CRC_16_CCITT, 0, 16, 0x1021)
+DECLARE_CRC_INIT_TABLE_ONCE(AV_CRC_24_IEEE, 0, 24, 0x864CFB)
+DECLARE_CRC_INIT_TABLE_ONCE(AV_CRC_32_IEEE, 0, 32, 0x04C11DB7)
+DECLARE_CRC_INIT_TABLE_ONCE(AV_CRC_32_IEEE_LE, 1, 32, 0xEDB88320)
+DECLARE_CRC_INIT_TABLE_ONCE(AV_CRC_16_ANSI_LE, 1, 16, 0xA001)
+#endif
+
+int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size)
+{
+ unsigned i, j;
+ uint32_t c;
+
+ if (bits < 8 || bits > 32 || poly >= (1LL << bits))
+ return AVERROR(EINVAL);
+ if (ctx_size != sizeof(AVCRC) * 257 && ctx_size != sizeof(AVCRC) * 1024)
+ return AVERROR(EINVAL);
+
+ for (i = 0; i < 256; i++) {
+ if (le) {
+ for (c = i, j = 0; j < 8; j++)
+ c = (c >> 1) ^ (poly & (-(c & 1)));
+ ctx[i] = c;
+ } else {
+ for (c = i << 24, j = 0; j < 8; j++)
+ c = (c << 1) ^ ((poly << (32 - bits)) & (((int32_t) c) >> 31));
+ ctx[i] = av_bswap32(c);
+ }
+ }
+ ctx[256] = 1;
+#if !CONFIG_SMALL
+ if (ctx_size >= sizeof(AVCRC) * 1024)
+ for (i = 0; i < 256; i++)
+ for (j = 0; j < 3; j++)
+ ctx[256 * (j + 1) + i] =
+ (ctx[256 * j + i] >> 8) ^ ctx[ctx[256 * j + i] & 0xFF];
+#endif
+
+ return 0;
+}
+
+const AVCRC *av_crc_get_table(AVCRCId crc_id)
+{
+#if !CONFIG_HARDCODED_TABLES
+ switch (crc_id) {
+ case AV_CRC_8_ATM: CRC_INIT_TABLE_ONCE(AV_CRC_8_ATM); break;
+ case AV_CRC_8_EBU: CRC_INIT_TABLE_ONCE(AV_CRC_8_EBU); break;
+ case AV_CRC_16_ANSI: CRC_INIT_TABLE_ONCE(AV_CRC_16_ANSI); break;
+ case AV_CRC_16_CCITT: CRC_INIT_TABLE_ONCE(AV_CRC_16_CCITT); break;
+ case AV_CRC_24_IEEE: CRC_INIT_TABLE_ONCE(AV_CRC_24_IEEE); break;
+ case AV_CRC_32_IEEE: CRC_INIT_TABLE_ONCE(AV_CRC_32_IEEE); break;
+ case AV_CRC_32_IEEE_LE: CRC_INIT_TABLE_ONCE(AV_CRC_32_IEEE_LE); break;
+ case AV_CRC_16_ANSI_LE: CRC_INIT_TABLE_ONCE(AV_CRC_16_ANSI_LE); break;
+ default: av_assert0(0);
+ }
+#endif
+ return av_crc_table[crc_id];
+}
+
+uint32_t av_crc(const AVCRC *ctx, uint32_t crc,
+ const uint8_t *buffer, size_t length)
+{
+ const uint8_t *end = buffer + length;
+
+#if !CONFIG_SMALL
+ if (!ctx[256]) {
+ while (((intptr_t) buffer & 3) && buffer < end)
+ crc = ctx[((uint8_t) crc) ^ *buffer++] ^ (crc >> 8);
+
+ while (buffer < end - 3) {
+ crc ^= av_le2ne32(*(const uint32_t *) buffer); buffer += 4;
+ crc = ctx[3 * 256 + ( crc & 0xFF)] ^
+ ctx[2 * 256 + ((crc >> 8 ) & 0xFF)] ^
+ ctx[1 * 256 + ((crc >> 16) & 0xFF)] ^
+ ctx[0 * 256 + ((crc >> 24) )];
+ }
+ }
+#endif
+ while (buffer < end)
+ crc = ctx[((uint8_t) crc) ^ *buffer++] ^ (crc >> 8);
+
+ return crc;
+}
diff --git a/media/ffvpx/libavutil/crc.h b/media/ffvpx/libavutil/crc.h
new file mode 100644
index 0000000000..7f59812a18
--- /dev/null
+++ b/media/ffvpx/libavutil/crc.h
@@ -0,0 +1,102 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_crc32
+ * Public header for CRC hash function implementation.
+ */
+
+#ifndef AVUTIL_CRC_H
+#define AVUTIL_CRC_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include "attributes.h"
+
+/**
+ * @defgroup lavu_crc32 CRC
+ * @ingroup lavu_hash
+ * CRC (Cyclic Redundancy Check) hash function implementation.
+ *
+ * This module supports numerous CRC polynomials, in addition to the most
+ * widely used CRC-32-IEEE. See @ref AVCRCId for a list of available
+ * polynomials.
+ *
+ * @{
+ */
+
+typedef uint32_t AVCRC;
+
+typedef enum {
+ AV_CRC_8_ATM,
+ AV_CRC_16_ANSI,
+ AV_CRC_16_CCITT,
+ AV_CRC_32_IEEE,
+ AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */
+ AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */
+ AV_CRC_24_IEEE,
+ AV_CRC_8_EBU,
+ AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */
+}AVCRCId;
+
+/**
+ * Initialize a CRC table.
+ * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024
+ * @param le If 1, the lowest bit represents the coefficient for the highest
+ * exponent of the corresponding polynomial (both for poly and
+ * actual CRC).
+ * If 0, you must swap the CRC parameter and the result of av_crc
+ * if you need the standard representation (can be simplified in
+ * most cases to e.g. bswap16):
+ * av_bswap32(crc << (32-bits))
+ * @param bits number of bits for the CRC
+ * @param poly generator polynomial without the x**bits coefficient, in the
+ * representation as specified by le
+ * @param ctx_size size of ctx in bytes
+ * @return <0 on failure
+ */
+int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
+
+/**
+ * Get an initialized standard CRC table.
+ * @param crc_id ID of a standard CRC
+ * @return a pointer to the CRC table or NULL on failure
+ */
+const AVCRC *av_crc_get_table(AVCRCId crc_id);
+
+/**
+ * Calculate the CRC of a block.
+ * @param ctx initialized AVCRC array (see av_crc_init())
+ * @param crc CRC of previous blocks if any or initial value for CRC
+ * @param buffer buffer whose CRC to calculate
+ * @param length length of the buffer
+ * @return CRC updated with the data from the given block
+ *
+ * @see av_crc_init() "le" parameter
+ */
+uint32_t av_crc(const AVCRC *ctx, uint32_t crc,
+ const uint8_t *buffer, size_t length) av_pure;
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_CRC_H */
diff --git a/media/ffvpx/libavutil/dict.c b/media/ffvpx/libavutil/dict.c
new file mode 100644
index 0000000000..f673977a98
--- /dev/null
+++ b/media/ffvpx/libavutil/dict.c
@@ -0,0 +1,292 @@
+/*
+ * copyright (c) 2009 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include "avassert.h"
+#include "avstring.h"
+#include "dict.h"
+#include "dict_internal.h"
+#include "internal.h"
+#include "mem.h"
+#include "time_internal.h"
+#include "bprint.h"
+
+struct AVDictionary {
+ int count;
+ AVDictionaryEntry *elems;
+};
+
+int av_dict_count(const AVDictionary *m)
+{
+ return m ? m->count : 0;
+}
+
+const AVDictionaryEntry *av_dict_iterate(const AVDictionary *m,
+ const AVDictionaryEntry *prev)
+{
+ int i = 0;
+
+ if (!m)
+ return NULL;
+
+ if (prev)
+ i = prev - m->elems + 1;
+
+ av_assert2(i >= 0);
+ if (i >= m->count)
+ return NULL;
+
+ return &m->elems[i];
+}
+
+AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
+ const AVDictionaryEntry *prev, int flags)
+{
+ const AVDictionaryEntry *entry = prev;
+ unsigned int j;
+
+ if (!key)
+ return NULL;
+
+ while ((entry = av_dict_iterate(m, entry))) {
+ const char *s = entry->key;
+ if (flags & AV_DICT_MATCH_CASE)
+ for (j = 0; s[j] == key[j] && key[j]; j++)
+ ;
+ else
+ for (j = 0; av_toupper(s[j]) == av_toupper(key[j]) && key[j]; j++)
+ ;
+ if (key[j])
+ continue;
+ if (s[j] && !(flags & AV_DICT_IGNORE_SUFFIX))
+ continue;
+ return (AVDictionaryEntry *)entry;
+ }
+ return NULL;
+}
+
+int av_dict_set(AVDictionary **pm, const char *key, const char *value,
+ int flags)
+{
+ AVDictionary *m = *pm;
+ AVDictionaryEntry *tag = NULL;
+ char *copy_key = NULL, *copy_value = NULL;
+ int err;
+
+ if (flags & AV_DICT_DONT_STRDUP_VAL)
+ copy_value = (void *)value;
+ else if (value)
+ copy_value = av_strdup(value);
+ if (!key) {
+ err = AVERROR(EINVAL);
+ goto err_out;
+ }
+ if (!(flags & AV_DICT_MULTIKEY)) {
+ tag = av_dict_get(m, key, NULL, flags);
+ }
+ if (flags & AV_DICT_DONT_STRDUP_KEY)
+ copy_key = (void *)key;
+ else
+ copy_key = av_strdup(key);
+ if (!m)
+ m = *pm = av_mallocz(sizeof(*m));
+ if (!m || !copy_key || (value && !copy_value))
+ goto enomem;
+
+ if (tag) {
+ if (flags & AV_DICT_DONT_OVERWRITE) {
+ av_free(copy_key);
+ av_free(copy_value);
+ return 0;
+ }
+ if (copy_value && flags & AV_DICT_APPEND) {
+ size_t oldlen = strlen(tag->value);
+ size_t new_part_len = strlen(copy_value);
+ size_t len = oldlen + new_part_len + 1;
+ char *newval = av_realloc(tag->value, len);
+ if (!newval)
+ goto enomem;
+ memcpy(newval + oldlen, copy_value, new_part_len + 1);
+ av_freep(&copy_value);
+ copy_value = newval;
+ } else
+ av_free(tag->value);
+ av_free(tag->key);
+ *tag = m->elems[--m->count];
+ } else if (copy_value) {
+ AVDictionaryEntry *tmp = av_realloc_array(m->elems,
+ m->count + 1, sizeof(*m->elems));
+ if (!tmp)
+ goto enomem;
+ m->elems = tmp;
+ }
+ if (copy_value) {
+ m->elems[m->count].key = copy_key;
+ m->elems[m->count].value = copy_value;
+ m->count++;
+ } else {
+ if (!m->count) {
+ av_freep(&m->elems);
+ av_freep(pm);
+ }
+ av_freep(&copy_key);
+ }
+
+ return 0;
+
+enomem:
+ err = AVERROR(ENOMEM);
+err_out:
+ if (m && !m->count) {
+ av_freep(&m->elems);
+ av_freep(pm);
+ }
+ av_free(copy_key);
+ av_free(copy_value);
+ return err;
+}
+
+int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value,
+ int flags)
+{
+ char valuestr[22];
+ snprintf(valuestr, sizeof(valuestr), "%"PRId64, value);
+ flags &= ~AV_DICT_DONT_STRDUP_VAL;
+ return av_dict_set(pm, key, valuestr, flags);
+}
+
+static int parse_key_value_pair(AVDictionary **pm, const char **buf,
+ const char *key_val_sep, const char *pairs_sep,
+ int flags)
+{
+ char *key = av_get_token(buf, key_val_sep);
+ char *val = NULL;
+ int ret;
+
+ if (key && *key && strspn(*buf, key_val_sep)) {
+ (*buf)++;
+ val = av_get_token(buf, pairs_sep);
+ }
+
+ if (key && *key && val && *val)
+ ret = av_dict_set(pm, key, val, flags);
+ else
+ ret = AVERROR(EINVAL);
+
+ av_freep(&key);
+ av_freep(&val);
+
+ return ret;
+}
+
+int av_dict_parse_string(AVDictionary **pm, const char *str,
+ const char *key_val_sep, const char *pairs_sep,
+ int flags)
+{
+ int ret;
+
+ if (!str)
+ return 0;
+
+ /* ignore STRDUP flags */
+ flags &= ~(AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+
+ while (*str) {
+ if ((ret = parse_key_value_pair(pm, &str, key_val_sep, pairs_sep, flags)) < 0)
+ return ret;
+
+ if (*str)
+ str++;
+ }
+
+ return 0;
+}
+
+void av_dict_free(AVDictionary **pm)
+{
+ AVDictionary *m = *pm;
+
+ if (m) {
+ while (m->count--) {
+ av_freep(&m->elems[m->count].key);
+ av_freep(&m->elems[m->count].value);
+ }
+ av_freep(&m->elems);
+ }
+ av_freep(pm);
+}
+
+int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
+{
+ const AVDictionaryEntry *t = NULL;
+
+ while ((t = av_dict_iterate(src, t))) {
+ int ret = av_dict_set(dst, t->key, t->value, flags);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int av_dict_get_string(const AVDictionary *m, char **buffer,
+ const char key_val_sep, const char pairs_sep)
+{
+ const AVDictionaryEntry *t = NULL;
+ AVBPrint bprint;
+ int cnt = 0;
+ char special_chars[] = {pairs_sep, key_val_sep, '\0'};
+
+ if (!buffer || pairs_sep == '\0' || key_val_sep == '\0' || pairs_sep == key_val_sep ||
+ pairs_sep == '\\' || key_val_sep == '\\')
+ return AVERROR(EINVAL);
+
+ if (!av_dict_count(m)) {
+ *buffer = av_strdup("");
+ return *buffer ? 0 : AVERROR(ENOMEM);
+ }
+
+ av_bprint_init(&bprint, 64, AV_BPRINT_SIZE_UNLIMITED);
+ while ((t = av_dict_iterate(m, t))) {
+ if (cnt++)
+ av_bprint_append_data(&bprint, &pairs_sep, 1);
+ av_bprint_escape(&bprint, t->key, special_chars, AV_ESCAPE_MODE_BACKSLASH, 0);
+ av_bprint_append_data(&bprint, &key_val_sep, 1);
+ av_bprint_escape(&bprint, t->value, special_chars, AV_ESCAPE_MODE_BACKSLASH, 0);
+ }
+ return av_bprint_finalize(&bprint, buffer);
+}
+
+int avpriv_dict_set_timestamp(AVDictionary **dict, const char *key, int64_t timestamp)
+{
+ time_t seconds = timestamp / 1000000;
+ struct tm *ptm, tmbuf;
+ ptm = gmtime_r(&seconds, &tmbuf);
+ if (ptm) {
+ char buf[32];
+ if (!strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S", ptm))
+ return AVERROR_EXTERNAL;
+ av_strlcatf(buf, sizeof(buf), ".%06dZ", (int)(timestamp % 1000000));
+ return av_dict_set(dict, key, buf, 0);
+ } else {
+ return AVERROR_EXTERNAL;
+ }
+}
diff --git a/media/ffvpx/libavutil/dict.h b/media/ffvpx/libavutil/dict.h
new file mode 100644
index 0000000000..713c9e361a
--- /dev/null
+++ b/media/ffvpx/libavutil/dict.h
@@ -0,0 +1,241 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ * @deprecated
+ * AVDictionary is provided for compatibility with libav. It is both in
+ * implementation as well as API inefficient. It does not scale and is
+ * extremely slow with large dictionaries.
+ * It is recommended that new code uses our tree container from tree.c/h
+ * where applicable, which uses AVL trees to achieve O(log n) performance.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+#include <stdint.h>
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key-value pairs.
+ *
+ * - To **create an AVDictionary**, simply pass an address of a NULL
+ * pointer to av_dict_set(). NULL can be used as an empty dictionary
+ * wherever a pointer to an AVDictionary is required.
+ * - To **insert an entry**, use av_dict_set().
+ * - Use av_dict_get() to **retrieve an entry**.
+ * - To **iterate over all entries**, use av_dict_iterate().
+ * - In order to **free the dictionary and all its contents**, use av_dict_free().
+ *
+ @code
+ AVDictionary *d = NULL; // "create" an empty dictionary
+ AVDictionaryEntry *t = NULL;
+
+ av_dict_set(&d, "foo", "bar", 0); // add an entry
+
+ char *k = av_strdup("key"); // if your strings are already allocated,
+ char *v = av_strdup("value"); // you can avoid copying them like this
+ av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+
+ while ((t = av_dict_iterate(d, t))) {
+ <....> // iterate over all entries in d
+ }
+ av_dict_free(&d);
+ @endcode
+ */
+
+/**
+ * @name AVDictionary Flags
+ * Flags that influence behavior of the matching of keys or insertion to the dictionary.
+ * @{
+ */
+#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */
+#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key,
+ ignoring the suffix of the found key string. Only relevant in av_dict_get(). */
+#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_OVERWRITE 16 /**< Don't overwrite existing entries. */
+#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
+ delimiter is added, the strings are simply concatenated. */
+#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys in the dictionary */
+/**
+ * @}
+ */
+
+typedef struct AVDictionaryEntry {
+ char *key;
+ char *value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * The returned entry key or value must not be changed, or it will
+ * cause undefined behavior.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param key Matching key
+ * @param flags A collection of AV_DICT_* flags controlling how the
+ * entry is retrieved
+ *
+ * @return Found entry or NULL in case no matching entry was found in the dictionary
+ */
+AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
+ const AVDictionaryEntry *prev, int flags);
+
+/**
+ * Iterate over a dictionary
+ *
+ * Iterates through all entries in the dictionary.
+ *
+ * @warning The returned AVDictionaryEntry key/value must not be changed.
+ *
+ * @warning As av_dict_set() invalidates all previous entries returned
+ * by this function, it must not be called while iterating over the dict.
+ *
+ * Typical usage:
+ * @code
+ * const AVDictionaryEntry *e = NULL;
+ * while ((e = av_dict_iterate(m, e))) {
+ * // ...
+ * }
+ * @endcode
+ *
+ * @param m The dictionary to iterate over
+ * @param prev Pointer to the previous AVDictionaryEntry, NULL initially
+ *
+ * @retval AVDictionaryEntry* The next element in the dictionary
+ * @retval NULL No more elements in the dictionary
+ */
+const AVDictionaryEntry *av_dict_iterate(const AVDictionary *m,
+ const AVDictionaryEntry *prev);
+
+/**
+ * Get number of entries in dictionary.
+ *
+ * @param m dictionary
+ * @return number of entries in dictionary
+ */
+int av_dict_count(const AVDictionary *m);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
+ * these arguments will be freed on error.
+ *
+ * @warning Adding a new entry to a dictionary invalidates all existing entries
+ * previously returned with av_dict_get() or av_dict_iterate().
+ *
+ * @param pm Pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key Entry key to add to *pm (will either be av_strduped or added as a new key depending on flags)
+ * @param value Entry value to add to *pm (will be av_strduped or added as a new key depending on flags).
+ * Passing a NULL value will cause an existing entry to be deleted.
+ *
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
+
+/**
+ * Convenience wrapper for av_dict_set() that converts the value to a string
+ * and stores it.
+ *
+ * Note: If ::AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
+ */
+int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags);
+
+/**
+ * Parse the key/value pairs list and add the parsed entries to a dictionary.
+ *
+ * In case of failure, all the successfully set entries are stored in
+ * *pm. You may need to manually free the created dictionary.
+ *
+ * @param key_val_sep A 0-terminated list of characters used to separate
+ * key from value
+ * @param pairs_sep A 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @param flags Flags to use when adding to the dictionary.
+ * ::AV_DICT_DONT_STRDUP_KEY and ::AV_DICT_DONT_STRDUP_VAL
+ * are ignored since the key/value tokens will always
+ * be duplicated.
+ *
+ * @return 0 on success, negative AVERROR code on failure
+ */
+int av_dict_parse_string(AVDictionary **pm, const char *str,
+ const char *key_val_sep, const char *pairs_sep,
+ int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ *
+ * @note Metadata is read using the ::AV_DICT_IGNORE_SUFFIX flag
+ *
+ * @param dst Pointer to a pointer to a AVDictionary struct to copy into. If *dst is NULL,
+ * this function will allocate a struct for you and put it in *dst
+ * @param src Pointer to the source AVDictionary struct to copy items from.
+ * @param flags Flags to use when setting entries in *dst
+ *
+ * @return 0 on success, negative AVERROR code on failure. If dst was allocated
+ * by this function, callers should free the associated memory.
+ */
+int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary **m);
+
+/**
+ * Get dictionary entries as a string.
+ *
+ * Create a string containing dictionary's entries.
+ * Such string may be passed back to av_dict_parse_string().
+ * @note String is escaped with backslashes ('\').
+ *
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
+ *
+ * @param[in] m The dictionary
+ * @param[out] buffer Pointer to buffer that will be allocated with string containg entries.
+ * Buffer must be freed by the caller when is no longer needed.
+ * @param[in] key_val_sep Character used to separate key from value
+ * @param[in] pairs_sep Character used to separate two pairs from each other
+ *
+ * @return >= 0 on success, negative on error
+ */
+int av_dict_get_string(const AVDictionary *m, char **buffer,
+ const char key_val_sep, const char pairs_sep);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DICT_H */
diff --git a/media/ffvpx/libavutil/dict_internal.h b/media/ffvpx/libavutil/dict_internal.h
new file mode 100644
index 0000000000..6d5b0dc2b0
--- /dev/null
+++ b/media/ffvpx/libavutil/dict_internal.h
@@ -0,0 +1,37 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DICT_INTERNAL_H
+#define AVUTIL_DICT_INTERNAL_H
+
+#include <stdint.h>
+
+#include "dict.h"
+
+/**
+ * Set a dictionary value to an ISO-8601 compliant timestamp string.
+ *
+ * @param dict pointer to a pointer to a dictionary struct. If *dict is NULL
+ * a dictionary struct is allocated and put in *dict.
+ * @param key metadata key
+ * @param timestamp unix timestamp in microseconds
+ * @return <0 on error
+ */
+int avpriv_dict_set_timestamp(AVDictionary **dict, const char *key, int64_t timestamp);
+
+#endif /* AVUTIL_DICT_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/dynarray.h b/media/ffvpx/libavutil/dynarray.h
new file mode 100644
index 0000000000..3a7e146422
--- /dev/null
+++ b/media/ffvpx/libavutil/dynarray.h
@@ -0,0 +1,70 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DYNARRAY_H
+#define AVUTIL_DYNARRAY_H
+
+#include "log.h"
+#include "mem.h"
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * The array is reallocated when its number of elements reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the size is incremented.
+ *
+ * @param av_size_max maximum size of the array, usually the MAX macro of
+ * the type of the size
+ * @param av_elt_size size of the elements in the array, in bytes
+ * @param av_array pointer to the array, must be a lvalue
+ * @param av_size size of the array, must be an integer lvalue
+ * @param av_success statement to execute on success; at this point, the
+ * size variable is not yet incremented
+ * @param av_failure statement to execute on failure; if this happens, the
+ * array and size are not changed; the statement can end
+ * with a return or a goto
+ */
+#define FF_DYNARRAY_ADD(av_size_max, av_elt_size, av_array, av_size, \
+ av_success, av_failure) \
+ do { \
+ size_t av_size_new = (av_size); \
+ if (!((av_size) & ((av_size) - 1))) { \
+ av_size_new = (av_size) ? (av_size) << 1 : 1; \
+ if (av_size_new > (av_size_max) / (av_elt_size)) { \
+ av_size_new = 0; \
+ } else { \
+ void *av_array_new = \
+ av_realloc((av_array), av_size_new * (av_elt_size)); \
+ if (!av_array_new) \
+ av_size_new = 0; \
+ else \
+ (av_array) = av_array_new; \
+ } \
+ } \
+ if (av_size_new) { \
+ { av_success } \
+ (av_size)++; \
+ } else { \
+ av_failure \
+ } \
+ } while (0)
+
+#endif /* AVUTIL_DYNARRAY_H */
diff --git a/media/ffvpx/libavutil/error.c b/media/ffvpx/libavutil/error.c
new file mode 100644
index 0000000000..938a8bc000
--- /dev/null
+++ b/media/ffvpx/libavutil/error.c
@@ -0,0 +1,132 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef _GNU_SOURCE
+#define _XOPEN_SOURCE 600 /* XSI-compliant version of strerror_r */
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "avstring.h"
+#include "error.h"
+#include "macros.h"
+
+struct error_entry {
+ int num;
+ const char *tag;
+ const char *str;
+};
+
+#define ERROR_TAG(tag) AVERROR_##tag, #tag
+#define EERROR_TAG(tag) AVERROR(tag), #tag
+#define AVERROR_INPUT_AND_OUTPUT_CHANGED (AVERROR_INPUT_CHANGED | AVERROR_OUTPUT_CHANGED)
+static const struct error_entry error_entries[] = {
+ { ERROR_TAG(BSF_NOT_FOUND), "Bitstream filter not found" },
+ { ERROR_TAG(BUG), "Internal bug, should not have happened" },
+ { ERROR_TAG(BUG2), "Internal bug, should not have happened" },
+ { ERROR_TAG(BUFFER_TOO_SMALL), "Buffer too small" },
+ { ERROR_TAG(DECODER_NOT_FOUND), "Decoder not found" },
+ { ERROR_TAG(DEMUXER_NOT_FOUND), "Demuxer not found" },
+ { ERROR_TAG(ENCODER_NOT_FOUND), "Encoder not found" },
+ { ERROR_TAG(EOF), "End of file" },
+ { ERROR_TAG(EXIT), "Immediate exit requested" },
+ { ERROR_TAG(EXTERNAL), "Generic error in an external library" },
+ { ERROR_TAG(FILTER_NOT_FOUND), "Filter not found" },
+ { ERROR_TAG(INPUT_CHANGED), "Input changed" },
+ { ERROR_TAG(INVALIDDATA), "Invalid data found when processing input" },
+ { ERROR_TAG(MUXER_NOT_FOUND), "Muxer not found" },
+ { ERROR_TAG(OPTION_NOT_FOUND), "Option not found" },
+ { ERROR_TAG(OUTPUT_CHANGED), "Output changed" },
+ { ERROR_TAG(PATCHWELCOME), "Not yet implemented in FFmpeg, patches welcome" },
+ { ERROR_TAG(PROTOCOL_NOT_FOUND), "Protocol not found" },
+ { ERROR_TAG(STREAM_NOT_FOUND), "Stream not found" },
+ { ERROR_TAG(UNKNOWN), "Unknown error occurred" },
+ { ERROR_TAG(EXPERIMENTAL), "Experimental feature" },
+ { ERROR_TAG(INPUT_AND_OUTPUT_CHANGED), "Input and output changed" },
+ { ERROR_TAG(HTTP_BAD_REQUEST), "Server returned 400 Bad Request" },
+ { ERROR_TAG(HTTP_UNAUTHORIZED), "Server returned 401 Unauthorized (authorization failed)" },
+ { ERROR_TAG(HTTP_FORBIDDEN), "Server returned 403 Forbidden (access denied)" },
+ { ERROR_TAG(HTTP_NOT_FOUND), "Server returned 404 Not Found" },
+ { ERROR_TAG(HTTP_OTHER_4XX), "Server returned 4XX Client Error, but not one of 40{0,1,3,4}" },
+ { ERROR_TAG(HTTP_SERVER_ERROR), "Server returned 5XX Server Error reply" },
+#if !HAVE_STRERROR_R
+ { EERROR_TAG(E2BIG), "Argument list too long" },
+ { EERROR_TAG(EACCES), "Permission denied" },
+ { EERROR_TAG(EAGAIN), "Resource temporarily unavailable" },
+ { EERROR_TAG(EBADF), "Bad file descriptor" },
+ { EERROR_TAG(EBUSY), "Device or resource busy" },
+ { EERROR_TAG(ECHILD), "No child processes" },
+ { EERROR_TAG(EDEADLK), "Resource deadlock avoided" },
+ { EERROR_TAG(EDOM), "Numerical argument out of domain" },
+ { EERROR_TAG(EEXIST), "File exists" },
+ { EERROR_TAG(EFAULT), "Bad address" },
+ { EERROR_TAG(EFBIG), "File too large" },
+ { EERROR_TAG(EILSEQ), "Illegal byte sequence" },
+ { EERROR_TAG(EINTR), "Interrupted system call" },
+ { EERROR_TAG(EINVAL), "Invalid argument" },
+ { EERROR_TAG(EIO), "I/O error" },
+ { EERROR_TAG(EISDIR), "Is a directory" },
+ { EERROR_TAG(EMFILE), "Too many open files" },
+ { EERROR_TAG(EMLINK), "Too many links" },
+ { EERROR_TAG(ENAMETOOLONG), "File name too long" },
+ { EERROR_TAG(ENFILE), "Too many open files in system" },
+ { EERROR_TAG(ENODEV), "No such device" },
+ { EERROR_TAG(ENOENT), "No such file or directory" },
+ { EERROR_TAG(ENOEXEC), "Exec format error" },
+ { EERROR_TAG(ENOLCK), "No locks available" },
+ { EERROR_TAG(ENOMEM), "Cannot allocate memory" },
+ { EERROR_TAG(ENOSPC), "No space left on device" },
+ { EERROR_TAG(ENOSYS), "Function not implemented" },
+ { EERROR_TAG(ENOTDIR), "Not a directory" },
+ { EERROR_TAG(ENOTEMPTY), "Directory not empty" },
+ { EERROR_TAG(ENOTTY), "Inappropriate I/O control operation" },
+ { EERROR_TAG(ENXIO), "No such device or address" },
+ { EERROR_TAG(EPERM), "Operation not permitted" },
+ { EERROR_TAG(EPIPE), "Broken pipe" },
+ { EERROR_TAG(ERANGE), "Result too large" },
+ { EERROR_TAG(EROFS), "Read-only file system" },
+ { EERROR_TAG(ESPIPE), "Illegal seek" },
+ { EERROR_TAG(ESRCH), "No such process" },
+ { EERROR_TAG(EXDEV), "Cross-device link" },
+#endif
+};
+
+int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
+{
+ int ret = 0, i;
+ const struct error_entry *entry = NULL;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(error_entries); i++) {
+ if (errnum == error_entries[i].num) {
+ entry = &error_entries[i];
+ break;
+ }
+ }
+ if (entry) {
+ av_strlcpy(errbuf, entry->str, errbuf_size);
+ } else {
+#if HAVE_STRERROR_R
+ ret = AVERROR(strerror_r(AVUNERROR(errnum), errbuf, errbuf_size));
+#else
+ ret = -1;
+#endif
+ if (ret < 0)
+ snprintf(errbuf, errbuf_size, "Error number %d occurred", errnum);
+ }
+
+ return ret;
+}
diff --git a/media/ffvpx/libavutil/error.h b/media/ffvpx/libavutil/error.h
new file mode 100644
index 0000000000..0d3269aa6d
--- /dev/null
+++ b/media/ffvpx/libavutil/error.h
@@ -0,0 +1,128 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * error code definitions
+ */
+
+#ifndef AVUTIL_ERROR_H
+#define AVUTIL_ERROR_H
+
+#include <errno.h>
+#include <stddef.h>
+
+#include "macros.h"
+
+/**
+ * @addtogroup lavu_error
+ *
+ * @{
+ */
+
+
+/* error handling */
+#if EDOM > 0
+#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions.
+#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.
+#else
+/* Some platforms have E* and errno already negated. */
+#define AVERROR(e) (e)
+#define AVUNERROR(e) (e)
+#endif
+
+#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d))
+
+#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found
+#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2
+#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small
+#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found
+#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found
+#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found
+#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file
+#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted
+#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library
+#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found
+#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input
+#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found
+#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found
+#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome
+#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found
+
+#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found
+/**
+ * This is semantically identical to AVERROR_BUG
+ * it has been introduced in Libav after our AVERROR_BUG and with a modified value.
+ */
+#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ')
+#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library
+#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
+#define AVERROR_INPUT_CHANGED (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED)
+#define AVERROR_OUTPUT_CHANGED (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED)
+/* HTTP & RTSP errors */
+#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8,'4','0','0')
+#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8,'4','0','1')
+#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8,'4','0','3')
+#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8,'4','0','4')
+#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8,'4','X','X')
+#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8,'5','X','X')
+
+#define AV_ERROR_MAX_STRING_SIZE 64
+
+/**
+ * Put a description of the AVERROR code errnum in errbuf.
+ * In case of failure the global variable errno is set to indicate the
+ * error. Even in case of failure av_strerror() will print a generic
+ * error message indicating the errnum provided to errbuf.
+ *
+ * @param errnum error code to describe
+ * @param errbuf buffer to which description is written
+ * @param errbuf_size the size in bytes of errbuf
+ * @return 0 on success, a negative value if a description for errnum
+ * cannot be found
+ */
+int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
+
+/**
+ * Fill the provided buffer with a string containing an error string
+ * corresponding to the AVERROR code errnum.
+ *
+ * @param errbuf a buffer
+ * @param errbuf_size size in bytes of errbuf
+ * @param errnum error code to describe
+ * @return the buffer in input, filled with the error description
+ * @see av_strerror()
+ */
+static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum)
+{
+ av_strerror(errnum, errbuf, errbuf_size);
+ return errbuf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_err2str(errnum) \
+ av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ERROR_H */
diff --git a/media/ffvpx/libavutil/eval.c b/media/ffvpx/libavutil/eval.c
new file mode 100644
index 0000000000..17381256fb
--- /dev/null
+++ b/media/ffvpx/libavutil/eval.c
@@ -0,0 +1,792 @@
+/*
+ * Copyright (c) 2002-2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple arithmetic expression evaluator.
+ *
+ * see http://joe.hotchkiss.com/programming/eval/eval.html
+ */
+
+#include <float.h>
+#include "attributes.h"
+#include "avutil.h"
+#include "common.h"
+#include "eval.h"
+#include "ffmath.h"
+#include "internal.h"
+#include "log.h"
+#include "mathematics.h"
+#include "fftime.h"
+#include "avstring.h"
+#include "timer.h"
+#include "reverse.h"
+
+typedef struct Parser {
+ const AVClass *class;
+ int stack_index;
+ char *s;
+ const double *const_values;
+ const char * const *const_names; // NULL terminated
+ double (* const *funcs1)(void *, double a); // NULL terminated
+ const char * const *func1_names; // NULL terminated
+ double (* const *funcs2)(void *, double a, double b); // NULL terminated
+ const char * const *func2_names; // NULL terminated
+ void *opaque;
+ int log_offset;
+ void *log_ctx;
+#define VARS 10
+ double *var;
+} Parser;
+
+static const AVClass eval_class = {
+ .class_name = "Eval",
+ .item_name = av_default_item_name,
+ .option = NULL,
+ .version = LIBAVUTIL_VERSION_INT,
+ .log_level_offset_offset = offsetof(Parser, log_offset),
+ .parent_log_context_offset = offsetof(Parser, log_ctx),
+};
+
+static const struct {
+ double bin_val;
+ double dec_val;
+ int8_t exp;
+} si_prefixes['z' - 'E' + 1] = {
+ ['y'-'E']= { 8.271806125530276749e-25, 1e-24, -24 },
+ ['z'-'E']= { 8.4703294725430034e-22, 1e-21, -21 },
+ ['a'-'E']= { 8.6736173798840355e-19, 1e-18, -18 },
+ ['f'-'E']= { 8.8817841970012523e-16, 1e-15, -15 },
+ ['p'-'E']= { 9.0949470177292824e-13, 1e-12, -12 },
+ ['n'-'E']= { 9.3132257461547852e-10, 1e-9, -9 },
+ ['u'-'E']= { 9.5367431640625e-7, 1e-6, -6 },
+ ['m'-'E']= { 9.765625e-4, 1e-3, -3 },
+ ['c'-'E']= { 9.8431332023036951e-3, 1e-2, -2 },
+ ['d'-'E']= { 9.921256574801246e-2, 1e-1, -1 },
+ ['h'-'E']= { 1.0159366732596479e2, 1e2, 2 },
+ ['k'-'E']= { 1.024e3, 1e3, 3 },
+ ['K'-'E']= { 1.024e3, 1e3, 3 },
+ ['M'-'E']= { 1.048576e6, 1e6, 6 },
+ ['G'-'E']= { 1.073741824e9, 1e9, 9 },
+ ['T'-'E']= { 1.099511627776e12, 1e12, 12 },
+ ['P'-'E']= { 1.125899906842624e15, 1e15, 15 },
+ ['E'-'E']= { 1.152921504606847e18, 1e18, 18 },
+ ['Z'-'E']= { 1.1805916207174113e21, 1e21, 21 },
+ ['Y'-'E']= { 1.2089258196146292e24, 1e24, 24 },
+};
+
+static const struct {
+ const char *name;
+ double value;
+} constants[] = {
+ { "E", M_E },
+ { "PI", M_PI },
+ { "PHI", M_PHI },
+ { "QP2LAMBDA", FF_QP2LAMBDA },
+};
+
+double av_strtod(const char *numstr, char **tail)
+{
+ double d;
+ char *next;
+ if(numstr[0]=='0' && (numstr[1]|0x20)=='x') {
+ d = strtoul(numstr, &next, 16);
+ } else
+ d = strtod(numstr, &next);
+ /* if parsing succeeded, check for and interpret postfixes */
+ if (next!=numstr) {
+ if (next[0] == 'd' && next[1] == 'B') {
+ /* treat dB as decibels instead of decibytes */
+ d = ff_exp10(d / 20);
+ next += 2;
+ } else if (*next >= 'E' && *next <= 'z') {
+ int e= si_prefixes[*next - 'E'].exp;
+ if (e) {
+ if (next[1] == 'i') {
+ d*= si_prefixes[*next - 'E'].bin_val;
+ next+=2;
+ } else {
+ d*= si_prefixes[*next - 'E'].dec_val;
+ next++;
+ }
+ }
+ }
+
+ if (*next=='B') {
+ d*=8;
+ next++;
+ }
+ }
+ /* if requested, fill in tail with the position after the last parsed
+ character */
+ if (tail)
+ *tail = next;
+ return d;
+}
+
+#define IS_IDENTIFIER_CHAR(c) ((c) - '0' <= 9U || (c) - 'a' <= 25U || (c) - 'A' <= 25U || (c) == '_')
+
+static int strmatch(const char *s, const char *prefix)
+{
+ int i;
+ for (i=0; prefix[i]; i++) {
+ if (prefix[i] != s[i]) return 0;
+ }
+ /* return 1 only if the s identifier is terminated */
+ return !IS_IDENTIFIER_CHAR(s[i]);
+}
+
+struct AVExpr {
+ enum {
+ e_value, e_const, e_func0, e_func1, e_func2,
+ e_squish, e_gauss, e_ld, e_isnan, e_isinf,
+ e_mod, e_max, e_min, e_eq, e_gt, e_gte, e_lte, e_lt,
+ e_pow, e_mul, e_div, e_add,
+ e_last, e_st, e_while, e_taylor, e_root, e_floor, e_ceil, e_trunc, e_round,
+ e_sqrt, e_not, e_random, e_hypot, e_gcd,
+ e_if, e_ifnot, e_print, e_bitand, e_bitor, e_between, e_clip, e_atan2, e_lerp,
+ e_sgn,
+ } type;
+ double value; // is sign in other types
+ int const_index;
+ union {
+ double (*func0)(double);
+ double (*func1)(void *, double);
+ double (*func2)(void *, double, double);
+ } a;
+ struct AVExpr *param[3];
+ double *var;
+};
+
+static double etime(double v)
+{
+ return av_gettime() * 0.000001;
+}
+
+static double eval_expr(Parser *p, AVExpr *e)
+{
+ switch (e->type) {
+ case e_value: return e->value;
+ case e_const: return e->value * p->const_values[e->const_index];
+ case e_func0: return e->value * e->a.func0(eval_expr(p, e->param[0]));
+ case e_func1: return e->value * e->a.func1(p->opaque, eval_expr(p, e->param[0]));
+ case e_func2: return e->value * e->a.func2(p->opaque, eval_expr(p, e->param[0]), eval_expr(p, e->param[1]));
+ case e_squish: return 1/(1+exp(4*eval_expr(p, e->param[0])));
+ case e_gauss: { double d = eval_expr(p, e->param[0]); return exp(-d*d/2)/sqrt(2*M_PI); }
+ case e_ld: return e->value * p->var[av_clip(eval_expr(p, e->param[0]), 0, VARS-1)];
+ case e_isnan: return e->value * !!isnan(eval_expr(p, e->param[0]));
+ case e_isinf: return e->value * !!isinf(eval_expr(p, e->param[0]));
+ case e_floor: return e->value * floor(eval_expr(p, e->param[0]));
+ case e_ceil : return e->value * ceil (eval_expr(p, e->param[0]));
+ case e_trunc: return e->value * trunc(eval_expr(p, e->param[0]));
+ case e_round: return e->value * round(eval_expr(p, e->param[0]));
+ case e_sgn: return e->value * FFDIFFSIGN(eval_expr(p, e->param[0]), 0);
+ case e_sqrt: return e->value * sqrt (eval_expr(p, e->param[0]));
+ case e_not: return e->value * (eval_expr(p, e->param[0]) == 0);
+ case e_if: return e->value * (eval_expr(p, e->param[0]) ? eval_expr(p, e->param[1]) :
+ e->param[2] ? eval_expr(p, e->param[2]) : 0);
+ case e_ifnot: return e->value * (!eval_expr(p, e->param[0]) ? eval_expr(p, e->param[1]) :
+ e->param[2] ? eval_expr(p, e->param[2]) : 0);
+ case e_clip: {
+ double x = eval_expr(p, e->param[0]);
+ double min = eval_expr(p, e->param[1]), max = eval_expr(p, e->param[2]);
+ if (isnan(min) || isnan(max) || isnan(x) || min > max)
+ return NAN;
+ return e->value * av_clipd(eval_expr(p, e->param[0]), min, max);
+ }
+ case e_between: {
+ double d = eval_expr(p, e->param[0]);
+ return e->value * (d >= eval_expr(p, e->param[1]) &&
+ d <= eval_expr(p, e->param[2]));
+ }
+ case e_lerp: {
+ double v0 = eval_expr(p, e->param[0]);
+ double v1 = eval_expr(p, e->param[1]);
+ double f = eval_expr(p, e->param[2]);
+ return v0 + (v1 - v0) * f;
+ }
+ case e_print: {
+ double x = eval_expr(p, e->param[0]);
+ int level = e->param[1] ? av_clip(eval_expr(p, e->param[1]), INT_MIN, INT_MAX) : AV_LOG_INFO;
+ av_log(p, level, "%f\n", x);
+ return x;
+ }
+ case e_random:{
+ int idx= av_clip(eval_expr(p, e->param[0]), 0, VARS-1);
+ uint64_t r= isnan(p->var[idx]) ? 0 : p->var[idx];
+ r= r*1664525+1013904223;
+ p->var[idx]= r;
+ return e->value * (r * (1.0/UINT64_MAX));
+ }
+ case e_while: {
+ double d = NAN;
+ while (eval_expr(p, e->param[0]))
+ d=eval_expr(p, e->param[1]);
+ return d;
+ }
+ case e_taylor: {
+ double t = 1, d = 0, v;
+ double x = eval_expr(p, e->param[1]);
+ int id = e->param[2] ? av_clip(eval_expr(p, e->param[2]), 0, VARS-1) : 0;
+ int i;
+ double var0 = p->var[id];
+ for(i=0; i<1000; i++) {
+ double ld = d;
+ p->var[id] = i;
+ v = eval_expr(p, e->param[0]);
+ d += t*v;
+ if(ld==d && v)
+ break;
+ t *= x / (i+1);
+ }
+ p->var[id] = var0;
+ return d;
+ }
+ case e_root: {
+ int i, j;
+ double low = -1, high = -1, v, low_v = -DBL_MAX, high_v = DBL_MAX;
+ double var0 = p->var[0];
+ double x_max = eval_expr(p, e->param[1]);
+ for(i=-1; i<1024; i++) {
+ if(i<255) {
+ p->var[0] = ff_reverse[i&255]*x_max/255;
+ } else {
+ p->var[0] = x_max*pow(0.9, i-255);
+ if (i&1) p->var[0] *= -1;
+ if (i&2) p->var[0] += low;
+ else p->var[0] += high;
+ }
+ v = eval_expr(p, e->param[0]);
+ if (v<=0 && v>low_v) {
+ low = p->var[0];
+ low_v = v;
+ }
+ if (v>=0 && v<high_v) {
+ high = p->var[0];
+ high_v = v;
+ }
+ if (low>=0 && high>=0){
+ for (j=0; j<1000; j++) {
+ p->var[0] = (low+high)*0.5;
+ if (low == p->var[0] || high == p->var[0])
+ break;
+ v = eval_expr(p, e->param[0]);
+ if (v<=0) low = p->var[0];
+ if (v>=0) high= p->var[0];
+ if (isnan(v)) {
+ low = high = v;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ p->var[0] = var0;
+ return -low_v<high_v ? low : high;
+ }
+ default: {
+ double d = eval_expr(p, e->param[0]);
+ double d2 = eval_expr(p, e->param[1]);
+ switch (e->type) {
+ case e_mod: return e->value * (d - floor(d2 ? d / d2 : d * INFINITY) * d2);
+ case e_gcd: return e->value * av_gcd(d,d2);
+ case e_max: return e->value * (d > d2 ? d : d2);
+ case e_min: return e->value * (d < d2 ? d : d2);
+ case e_eq: return e->value * (d == d2 ? 1.0 : 0.0);
+ case e_gt: return e->value * (d > d2 ? 1.0 : 0.0);
+ case e_gte: return e->value * (d >= d2 ? 1.0 : 0.0);
+ case e_lt: return e->value * (d < d2 ? 1.0 : 0.0);
+ case e_lte: return e->value * (d <= d2 ? 1.0 : 0.0);
+ case e_pow: return e->value * pow(d, d2);
+ case e_mul: return e->value * (d * d2);
+ case e_div: return e->value * (d2 ? (d / d2) : d * INFINITY);
+ case e_add: return e->value * (d + d2);
+ case e_last:return e->value * d2;
+ case e_st : return e->value * (p->var[av_clip(d, 0, VARS-1)]= d2);
+ case e_hypot:return e->value * hypot(d, d2);
+ case e_atan2:return e->value * atan2(d, d2);
+ case e_bitand: return isnan(d) || isnan(d2) ? NAN : e->value * ((long int)d & (long int)d2);
+ case e_bitor: return isnan(d) || isnan(d2) ? NAN : e->value * ((long int)d | (long int)d2);
+ }
+ }
+ }
+ return NAN;
+}
+
+static int parse_expr(AVExpr **e, Parser *p);
+
+void av_expr_free(AVExpr *e)
+{
+ if (!e) return;
+ av_expr_free(e->param[0]);
+ av_expr_free(e->param[1]);
+ av_expr_free(e->param[2]);
+ av_freep(&e->var);
+ av_freep(&e);
+}
+
+static int parse_primary(AVExpr **e, Parser *p)
+{
+ AVExpr *d = av_mallocz(sizeof(AVExpr));
+ char *next = p->s, *s0 = p->s;
+ int ret, i;
+
+ if (!d)
+ return AVERROR(ENOMEM);
+
+ /* number */
+ d->value = av_strtod(p->s, &next);
+ if (next != p->s) {
+ d->type = e_value;
+ p->s= next;
+ *e = d;
+ return 0;
+ }
+ d->value = 1;
+
+ /* named constants */
+ for (i=0; p->const_names && p->const_names[i]; i++) {
+ if (strmatch(p->s, p->const_names[i])) {
+ p->s+= strlen(p->const_names[i]);
+ d->type = e_const;
+ d->const_index = i;
+ *e = d;
+ return 0;
+ }
+ }
+ for (i = 0; i < FF_ARRAY_ELEMS(constants); i++) {
+ if (strmatch(p->s, constants[i].name)) {
+ p->s += strlen(constants[i].name);
+ d->type = e_value;
+ d->value = constants[i].value;
+ *e = d;
+ return 0;
+ }
+ }
+
+ p->s= strchr(p->s, '(');
+ if (!p->s) {
+ av_log(p, AV_LOG_ERROR, "Undefined constant or missing '(' in '%s'\n", s0);
+ p->s= next;
+ av_expr_free(d);
+ return AVERROR(EINVAL);
+ }
+ p->s++; // "("
+ if (*next == '(') { // special case do-nothing
+ av_freep(&d);
+ if ((ret = parse_expr(&d, p)) < 0)
+ return ret;
+ if (p->s[0] != ')') {
+ av_log(p, AV_LOG_ERROR, "Missing ')' in '%s'\n", s0);
+ av_expr_free(d);
+ return AVERROR(EINVAL);
+ }
+ p->s++; // ")"
+ *e = d;
+ return 0;
+ }
+ if ((ret = parse_expr(&(d->param[0]), p)) < 0) {
+ av_expr_free(d);
+ return ret;
+ }
+ if (p->s[0]== ',') {
+ p->s++; // ","
+ parse_expr(&d->param[1], p);
+ }
+ if (p->s[0]== ',') {
+ p->s++; // ","
+ parse_expr(&d->param[2], p);
+ }
+ if (p->s[0] != ')') {
+ av_log(p, AV_LOG_ERROR, "Missing ')' or too many args in '%s'\n", s0);
+ av_expr_free(d);
+ return AVERROR(EINVAL);
+ }
+ p->s++; // ")"
+
+ d->type = e_func0;
+ if (strmatch(next, "sinh" )) d->a.func0 = sinh;
+ else if (strmatch(next, "cosh" )) d->a.func0 = cosh;
+ else if (strmatch(next, "tanh" )) d->a.func0 = tanh;
+ else if (strmatch(next, "sin" )) d->a.func0 = sin;
+ else if (strmatch(next, "cos" )) d->a.func0 = cos;
+ else if (strmatch(next, "tan" )) d->a.func0 = tan;
+ else if (strmatch(next, "atan" )) d->a.func0 = atan;
+ else if (strmatch(next, "asin" )) d->a.func0 = asin;
+ else if (strmatch(next, "acos" )) d->a.func0 = acos;
+ else if (strmatch(next, "exp" )) d->a.func0 = exp;
+ else if (strmatch(next, "log" )) d->a.func0 = log;
+ else if (strmatch(next, "abs" )) d->a.func0 = fabs;
+ else if (strmatch(next, "time" )) d->a.func0 = etime;
+ else if (strmatch(next, "squish")) d->type = e_squish;
+ else if (strmatch(next, "gauss" )) d->type = e_gauss;
+ else if (strmatch(next, "mod" )) d->type = e_mod;
+ else if (strmatch(next, "max" )) d->type = e_max;
+ else if (strmatch(next, "min" )) d->type = e_min;
+ else if (strmatch(next, "eq" )) d->type = e_eq;
+ else if (strmatch(next, "gte" )) d->type = e_gte;
+ else if (strmatch(next, "gt" )) d->type = e_gt;
+ else if (strmatch(next, "lte" )) d->type = e_lte;
+ else if (strmatch(next, "lt" )) d->type = e_lt;
+ else if (strmatch(next, "ld" )) d->type = e_ld;
+ else if (strmatch(next, "isnan" )) d->type = e_isnan;
+ else if (strmatch(next, "isinf" )) d->type = e_isinf;
+ else if (strmatch(next, "st" )) d->type = e_st;
+ else if (strmatch(next, "while" )) d->type = e_while;
+ else if (strmatch(next, "taylor")) d->type = e_taylor;
+ else if (strmatch(next, "root" )) d->type = e_root;
+ else if (strmatch(next, "floor" )) d->type = e_floor;
+ else if (strmatch(next, "ceil" )) d->type = e_ceil;
+ else if (strmatch(next, "trunc" )) d->type = e_trunc;
+ else if (strmatch(next, "round" )) d->type = e_round;
+ else if (strmatch(next, "sqrt" )) d->type = e_sqrt;
+ else if (strmatch(next, "not" )) d->type = e_not;
+ else if (strmatch(next, "pow" )) d->type = e_pow;
+ else if (strmatch(next, "print" )) d->type = e_print;
+ else if (strmatch(next, "random")) d->type = e_random;
+ else if (strmatch(next, "hypot" )) d->type = e_hypot;
+ else if (strmatch(next, "gcd" )) d->type = e_gcd;
+ else if (strmatch(next, "if" )) d->type = e_if;
+ else if (strmatch(next, "ifnot" )) d->type = e_ifnot;
+ else if (strmatch(next, "bitand")) d->type = e_bitand;
+ else if (strmatch(next, "bitor" )) d->type = e_bitor;
+ else if (strmatch(next, "between"))d->type = e_between;
+ else if (strmatch(next, "clip" )) d->type = e_clip;
+ else if (strmatch(next, "atan2" )) d->type = e_atan2;
+ else if (strmatch(next, "lerp" )) d->type = e_lerp;
+ else if (strmatch(next, "sgn" )) d->type = e_sgn;
+ else {
+ for (i=0; p->func1_names && p->func1_names[i]; i++) {
+ if (strmatch(next, p->func1_names[i])) {
+ d->a.func1 = p->funcs1[i];
+ d->type = e_func1;
+ d->const_index = i;
+ *e = d;
+ return 0;
+ }
+ }
+
+ for (i=0; p->func2_names && p->func2_names[i]; i++) {
+ if (strmatch(next, p->func2_names[i])) {
+ d->a.func2 = p->funcs2[i];
+ d->type = e_func2;
+ d->const_index = i;
+ *e = d;
+ return 0;
+ }
+ }
+
+ av_log(p, AV_LOG_ERROR, "Unknown function in '%s'\n", s0);
+ av_expr_free(d);
+ return AVERROR(EINVAL);
+ }
+
+ *e = d;
+ return 0;
+}
+
+static AVExpr *make_eval_expr(int type, int value, AVExpr *p0, AVExpr *p1)
+{
+ AVExpr *e = av_mallocz(sizeof(AVExpr));
+ if (!e)
+ return NULL;
+ e->type =type ;
+ e->value =value ;
+ e->param[0] =p0 ;
+ e->param[1] =p1 ;
+ return e;
+}
+
+static int parse_pow(AVExpr **e, Parser *p, int *sign)
+{
+ *sign= (*p->s == '+') - (*p->s == '-');
+ p->s += *sign&1;
+ return parse_primary(e, p);
+}
+
+static int parse_dB(AVExpr **e, Parser *p, int *sign)
+{
+ /* do not filter out the negative sign when parsing a dB value.
+ for example, -3dB is not the same as -(3dB) */
+ if (*p->s == '-') {
+ char *next;
+ double av_unused ignored = strtod(p->s, &next);
+ if (next != p->s && next[0] == 'd' && next[1] == 'B') {
+ *sign = 0;
+ return parse_primary(e, p);
+ }
+ }
+ return parse_pow(e, p, sign);
+}
+
+static int parse_factor(AVExpr **e, Parser *p)
+{
+ int sign, sign2, ret;
+ AVExpr *e0, *e1, *e2;
+ if ((ret = parse_dB(&e0, p, &sign)) < 0)
+ return ret;
+ while(p->s[0]=='^'){
+ e1 = e0;
+ p->s++;
+ if ((ret = parse_dB(&e2, p, &sign2)) < 0) {
+ av_expr_free(e1);
+ return ret;
+ }
+ e0 = make_eval_expr(e_pow, 1, e1, e2);
+ if (!e0) {
+ av_expr_free(e1);
+ av_expr_free(e2);
+ return AVERROR(ENOMEM);
+ }
+ if (e0->param[1]) e0->param[1]->value *= (sign2|1);
+ }
+ if (e0) e0->value *= (sign|1);
+
+ *e = e0;
+ return 0;
+}
+
+static int parse_term(AVExpr **e, Parser *p)
+{
+ int ret;
+ AVExpr *e0, *e1, *e2;
+ if ((ret = parse_factor(&e0, p)) < 0)
+ return ret;
+ while (p->s[0]=='*' || p->s[0]=='/') {
+ int c= *p->s++;
+ e1 = e0;
+ if ((ret = parse_factor(&e2, p)) < 0) {
+ av_expr_free(e1);
+ return ret;
+ }
+ e0 = make_eval_expr(c == '*' ? e_mul : e_div, 1, e1, e2);
+ if (!e0) {
+ av_expr_free(e1);
+ av_expr_free(e2);
+ return AVERROR(ENOMEM);
+ }
+ }
+ *e = e0;
+ return 0;
+}
+
+static int parse_subexpr(AVExpr **e, Parser *p)
+{
+ int ret;
+ AVExpr *e0, *e1, *e2;
+ if ((ret = parse_term(&e0, p)) < 0)
+ return ret;
+ while (*p->s == '+' || *p->s == '-') {
+ e1 = e0;
+ if ((ret = parse_term(&e2, p)) < 0) {
+ av_expr_free(e1);
+ return ret;
+ }
+ e0 = make_eval_expr(e_add, 1, e1, e2);
+ if (!e0) {
+ av_expr_free(e1);
+ av_expr_free(e2);
+ return AVERROR(ENOMEM);
+ }
+ };
+
+ *e = e0;
+ return 0;
+}
+
+static int parse_expr(AVExpr **e, Parser *p)
+{
+ int ret;
+ AVExpr *e0, *e1, *e2;
+ if (p->stack_index <= 0) //protect against stack overflows
+ return AVERROR(EINVAL);
+ p->stack_index--;
+
+ if ((ret = parse_subexpr(&e0, p)) < 0)
+ return ret;
+ while (*p->s == ';') {
+ p->s++;
+ e1 = e0;
+ if ((ret = parse_subexpr(&e2, p)) < 0) {
+ av_expr_free(e1);
+ return ret;
+ }
+ e0 = make_eval_expr(e_last, 1, e1, e2);
+ if (!e0) {
+ av_expr_free(e1);
+ av_expr_free(e2);
+ return AVERROR(ENOMEM);
+ }
+ };
+
+ p->stack_index++;
+ *e = e0;
+ return 0;
+}
+
+static int verify_expr(AVExpr *e)
+{
+ if (!e) return 0;
+ switch (e->type) {
+ case e_value:
+ case e_const: return 1;
+ case e_func0:
+ case e_func1:
+ case e_squish:
+ case e_ld:
+ case e_gauss:
+ case e_isnan:
+ case e_isinf:
+ case e_floor:
+ case e_ceil:
+ case e_trunc:
+ case e_round:
+ case e_sqrt:
+ case e_not:
+ case e_random:
+ case e_sgn:
+ return verify_expr(e->param[0]) && !e->param[1];
+ case e_print:
+ return verify_expr(e->param[0])
+ && (!e->param[1] || verify_expr(e->param[1]));
+ case e_if:
+ case e_ifnot:
+ case e_taylor:
+ return verify_expr(e->param[0]) && verify_expr(e->param[1])
+ && (!e->param[2] || verify_expr(e->param[2]));
+ case e_between:
+ case e_clip:
+ case e_lerp:
+ return verify_expr(e->param[0]) &&
+ verify_expr(e->param[1]) &&
+ verify_expr(e->param[2]);
+ default: return verify_expr(e->param[0]) && verify_expr(e->param[1]) && !e->param[2];
+ }
+}
+
+int av_expr_parse(AVExpr **expr, const char *s,
+ const char * const *const_names,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ int log_offset, void *log_ctx)
+{
+ Parser p = { 0 };
+ AVExpr *e = NULL;
+ char *w = av_malloc(strlen(s) + 1);
+ char *wp = w;
+ const char *s0 = s;
+ int ret = 0;
+
+ if (!w)
+ return AVERROR(ENOMEM);
+
+ while (*s)
+ if (!av_isspace(*s++)) *wp++ = s[-1];
+ *wp++ = 0;
+
+ p.class = &eval_class;
+ p.stack_index=100;
+ p.s= w;
+ p.const_names = const_names;
+ p.funcs1 = funcs1;
+ p.func1_names = func1_names;
+ p.funcs2 = funcs2;
+ p.func2_names = func2_names;
+ p.log_offset = log_offset;
+ p.log_ctx = log_ctx;
+
+ if ((ret = parse_expr(&e, &p)) < 0)
+ goto end;
+ if (*p.s) {
+ av_log(&p, AV_LOG_ERROR, "Invalid chars '%s' at the end of expression '%s'\n", p.s, s0);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ if (!verify_expr(e)) {
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ e->var= av_mallocz(sizeof(double) *VARS);
+ if (!e->var) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ *expr = e;
+ e = NULL;
+end:
+ av_expr_free(e);
+ av_free(w);
+ return ret;
+}
+
+static int expr_count(AVExpr *e, unsigned *counter, int size, int type)
+{
+ int i;
+
+ if (!e || !counter || !size)
+ return AVERROR(EINVAL);
+
+ for (i = 0; e->type != type && i < 3 && e->param[i]; i++)
+ expr_count(e->param[i], counter, size, type);
+
+ if (e->type == type && e->const_index < size)
+ counter[e->const_index]++;
+
+ return 0;
+}
+
+int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
+{
+ return expr_count(e, counter, size, e_const);
+}
+
+int av_expr_count_func(AVExpr *e, unsigned *counter, int size, int arg)
+{
+ return expr_count(e, counter, size, ((int[]){e_const, e_func1, e_func2})[arg]);
+}
+
+double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
+{
+ Parser p = { 0 };
+ p.var= e->var;
+
+ p.const_values = const_values;
+ p.opaque = opaque;
+ return eval_expr(&p, e);
+}
+
+int av_expr_parse_and_eval(double *d, const char *s,
+ const char * const *const_names, const double *const_values,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ void *opaque, int log_offset, void *log_ctx)
+{
+ AVExpr *e = NULL;
+ int ret = av_expr_parse(&e, s, const_names, func1_names, funcs1, func2_names, funcs2, log_offset, log_ctx);
+
+ if (ret < 0) {
+ *d = NAN;
+ return ret;
+ }
+ *d = av_expr_eval(e, const_values, opaque);
+ av_expr_free(e);
+ return isnan(*d) ? AVERROR(EINVAL) : 0;
+}
diff --git a/media/ffvpx/libavutil/eval.h b/media/ffvpx/libavutil/eval.h
new file mode 100644
index 0000000000..ee8cffb057
--- /dev/null
+++ b/media/ffvpx/libavutil/eval.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple arithmetic expression evaluator
+ */
+
+#ifndef AVUTIL_EVAL_H
+#define AVUTIL_EVAL_H
+
+typedef struct AVExpr AVExpr;
+
+/**
+ * Parse and evaluate an expression.
+ * Note, this is significantly slower than av_expr_eval().
+ *
+ * @param res a pointer to a double where is put the result value of
+ * the expression, or NAN in case of error
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param const_values a zero terminated array of values for the identifiers from const_names
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @param log_offset log level offset, can be used to silence error messages
+ * @param log_ctx parent logging context
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse_and_eval(double *res, const char *s,
+ const char * const *const_names, const double *const_values,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ void *opaque, int log_offset, void *log_ctx);
+
+/**
+ * Parse an expression.
+ *
+ * @param expr a pointer where is put an AVExpr containing the parsed
+ * value in case of successful parsing, or NULL otherwise.
+ * The pointed to AVExpr must be freed with av_expr_free() by the user
+ * when it is not needed anymore.
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param log_offset log level offset, can be used to silence error messages
+ * @param log_ctx parent logging context
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse(AVExpr **expr, const char *s,
+ const char * const *const_names,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ int log_offset, void *log_ctx);
+
+/**
+ * Evaluate a previously parsed expression.
+ *
+ * @param e the AVExpr to evaluate
+ * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @return the value of the expression
+ */
+double av_expr_eval(AVExpr *e, const double *const_values, void *opaque);
+
+/**
+ * Track the presence of variables and their number of occurrences in a parsed expression
+ *
+ * @param e the AVExpr to track variables in
+ * @param counter a zero-initialized array where the count of each variable will be stored
+ * @param size size of array
+ * @return 0 on success, a negative value indicates that no expression or array was passed
+ * or size was zero
+ */
+int av_expr_count_vars(AVExpr *e, unsigned *counter, int size);
+
+/**
+ * Track the presence of user provided functions and their number of occurrences
+ * in a parsed expression.
+ *
+ * @param e the AVExpr to track user provided functions in
+ * @param counter a zero-initialized array where the count of each function will be stored
+ * if you passed 5 functions with 2 arguments to av_expr_parse()
+ * then for arg=2 this will use upto 5 entries.
+ * @param size size of array
+ * @param arg number of arguments the counted functions have
+ * @return 0 on success, a negative value indicates that no expression or array was passed
+ * or size was zero
+ */
+int av_expr_count_func(AVExpr *e, unsigned *counter, int size, int arg);
+
+/**
+ * Free a parsed expression previously created with av_expr_parse().
+ */
+void av_expr_free(AVExpr *e);
+
+/**
+ * Parse the string in numstr and return its value as a double. If
+ * the string is empty, contains only whitespaces, or does not contain
+ * an initial substring that has the expected syntax for a
+ * floating-point number, no conversion is performed. In this case,
+ * returns a value of zero and the value returned in tail is the value
+ * of numstr.
+ *
+ * @param numstr a string representing a number, may contain one of
+ * the International System number postfixes, for example 'K', 'M',
+ * 'G'. If 'i' is appended after the postfix, powers of 2 are used
+ * instead of powers of 10. The 'B' postfix multiplies the value by
+ * 8, and can be appended after another postfix or used alone. This
+ * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.
+ * @param tail if non-NULL puts here the pointer to the char next
+ * after the last parsed character
+ */
+double av_strtod(const char *numstr, char **tail);
+
+#endif /* AVUTIL_EVAL_H */
diff --git a/media/ffvpx/libavutil/ffmath.h b/media/ffvpx/libavutil/ffmath.h
new file mode 100644
index 0000000000..aad1347f26
--- /dev/null
+++ b/media/ffvpx/libavutil/ffmath.h
@@ -0,0 +1,67 @@
+/*
+ * copyright (c) 2016 Ganesh Ajjanagadde <gajjanag@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * internal math functions header
+ */
+
+#ifndef AVUTIL_FFMATH_H
+#define AVUTIL_FFMATH_H
+
+#include "attributes.h"
+#include "libm.h"
+
+/**
+ * Compute 10^x for floating point values. Note: this function is by no means
+ * "correctly rounded", and is meant as a fast, reasonably accurate approximation.
+ * For instance, maximum relative error for the double precision variant is
+ * ~ 1e-13 for very small and very large values.
+ * This is ~2x faster than GNU libm's approach, which is still off by 2ulp on
+ * some inputs.
+ * @param x exponent
+ * @return 10^x
+ */
+static av_always_inline double ff_exp10(double x)
+{
+ return exp2(M_LOG2_10 * x);
+}
+
+static av_always_inline float ff_exp10f(float x)
+{
+ return exp2f(M_LOG2_10 * x);
+}
+
+/**
+ * Compute x^y for floating point x, y. Note: this function is faster than the
+ * libm variant due to mainly 2 reasons:
+ * 1. It does not handle any edge cases. In particular, this is only guaranteed
+ * to work correctly for x > 0.
+ * 2. It is not as accurate as a standard nearly "correctly rounded" libm variant.
+ * @param x base
+ * @param y exponent
+ * @return x^y
+ */
+static av_always_inline float ff_fast_powf(float x, float y)
+{
+ return expf(logf(x) * y);
+}
+
+#endif /* AVUTIL_FFMATH_H */
diff --git a/media/ffvpx/libavutil/fftime.h b/media/ffvpx/libavutil/fftime.h
new file mode 100644
index 0000000000..8f3b320e38
--- /dev/null
+++ b/media/ffvpx/libavutil/fftime.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_TIME_H
+#define AVUTIL_TIME_H
+
+#include <stdint.h>
+#include <time.h>
+
+/**
+ * Get the current time in microseconds.
+ */
+int64_t av_gettime(void);
+
+/**
+ * Get the current time in microseconds since some unspecified starting point.
+ * On platforms that support it, the time comes from a monotonic clock
+ * This property makes this time source ideal for measuring relative time.
+ * The returned values may not be monotonic on platforms where a monotonic
+ * clock is not available.
+ */
+int64_t av_gettime_relative(void);
+
+/**
+ * Indicates with a boolean result if the av_gettime_relative() time source
+ * is monotonic.
+ */
+int av_gettime_relative_is_monotonic(void);
+
+/**
+ * Sleep for a period of time. Although the duration is expressed in
+ * microseconds, the actual delay may be rounded to the precision of the
+ * system timer.
+ *
+ * @param usec Number of microseconds to sleep.
+ * @return zero on success or (negative) error code.
+ */
+int av_usleep(unsigned usec);
+
+#endif /* AVUTIL_TIME_H */
diff --git a/media/ffvpx/libavutil/ffversion.h b/media/ffvpx/libavutil/ffversion.h
new file mode 100644
index 0000000000..21f6614f7a
--- /dev/null
+++ b/media/ffvpx/libavutil/ffversion.h
@@ -0,0 +1,5 @@
+/* Automatically generated by version.sh, do not manually edit! */
+#ifndef AVUTIL_FFVERSION_H
+#define AVUTIL_FFVERSION_H
+#define FFMPEG_VERSION "N-109117-g6a3e174ad1"
+#endif /* AVUTIL_FFVERSION_H */
diff --git a/media/ffvpx/libavutil/fifo.c b/media/ffvpx/libavutil/fifo.c
new file mode 100644
index 0000000000..51a5af6f39
--- /dev/null
+++ b/media/ffvpx/libavutil/fifo.c
@@ -0,0 +1,510 @@
+/*
+ * a very simple circular buffer FIFO implementation
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2006 Roman Shaposhnik
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "avassert.h"
+#include "error.h"
+#include "fifo.h"
+#include "macros.h"
+#include "mem.h"
+
+// by default the FIFO can be auto-grown to 1MB
+#define AUTO_GROW_DEFAULT_BYTES (1024 * 1024)
+
+struct AVFifo {
+ uint8_t *buffer;
+
+ size_t elem_size, nb_elems;
+ size_t offset_r, offset_w;
+ // distinguishes the ambiguous situation offset_r == offset_w
+ int is_empty;
+
+ unsigned int flags;
+ size_t auto_grow_limit;
+};
+
+AVFifo *av_fifo_alloc2(size_t nb_elems, size_t elem_size,
+ unsigned int flags)
+{
+ AVFifo *f;
+ void *buffer = NULL;
+
+ if (!elem_size)
+ return NULL;
+
+ if (nb_elems) {
+ buffer = av_realloc_array(NULL, nb_elems, elem_size);
+ if (!buffer)
+ return NULL;
+ }
+ f = av_mallocz(sizeof(*f));
+ if (!f) {
+ av_free(buffer);
+ return NULL;
+ }
+ f->buffer = buffer;
+ f->nb_elems = nb_elems;
+ f->elem_size = elem_size;
+ f->is_empty = 1;
+
+ f->flags = flags;
+ f->auto_grow_limit = FFMAX(AUTO_GROW_DEFAULT_BYTES / elem_size, 1);
+
+ return f;
+}
+
+void av_fifo_auto_grow_limit(AVFifo *f, size_t max_elems)
+{
+ f->auto_grow_limit = max_elems;
+}
+
+size_t av_fifo_elem_size(const AVFifo *f)
+{
+ return f->elem_size;
+}
+
+size_t av_fifo_can_read(const AVFifo *f)
+{
+ if (f->offset_w <= f->offset_r && !f->is_empty)
+ return f->nb_elems - f->offset_r + f->offset_w;
+ return f->offset_w - f->offset_r;
+}
+
+size_t av_fifo_can_write(const AVFifo *f)
+{
+ return f->nb_elems - av_fifo_can_read(f);
+}
+
+int av_fifo_grow2(AVFifo *f, size_t inc)
+{
+ uint8_t *tmp;
+
+ if (inc > SIZE_MAX - f->nb_elems)
+ return AVERROR(EINVAL);
+
+ tmp = av_realloc_array(f->buffer, f->nb_elems + inc, f->elem_size);
+ if (!tmp)
+ return AVERROR(ENOMEM);
+ f->buffer = tmp;
+
+ // move the data from the beginning of the ring buffer
+ // to the newly allocated space
+ if (f->offset_w <= f->offset_r && !f->is_empty) {
+ const size_t copy = FFMIN(inc, f->offset_w);
+ memcpy(tmp + f->nb_elems * f->elem_size, tmp, copy * f->elem_size);
+ if (copy < f->offset_w) {
+ memmove(tmp, tmp + copy * f->elem_size,
+ (f->offset_w - copy) * f->elem_size);
+ f->offset_w -= copy;
+ } else
+ f->offset_w = copy == inc ? 0 : f->nb_elems + copy;
+ }
+
+ f->nb_elems += inc;
+
+ return 0;
+}
+
+static int fifo_check_space(AVFifo *f, size_t to_write)
+{
+ const size_t can_write = av_fifo_can_write(f);
+ const size_t need_grow = to_write > can_write ? to_write - can_write : 0;
+ size_t can_grow;
+
+ if (!need_grow)
+ return 0;
+
+ can_grow = f->auto_grow_limit > f->nb_elems ?
+ f->auto_grow_limit - f->nb_elems : 0;
+ if ((f->flags & AV_FIFO_FLAG_AUTO_GROW) && need_grow <= can_grow) {
+ // allocate a bit more than necessary, if we can
+ const size_t inc = (need_grow < can_grow / 2 ) ? need_grow * 2 : can_grow;
+ return av_fifo_grow2(f, inc);
+ }
+
+ return AVERROR(ENOSPC);
+}
+
+static int fifo_write_common(AVFifo *f, const uint8_t *buf, size_t *nb_elems,
+ AVFifoCB read_cb, void *opaque)
+{
+ size_t to_write = *nb_elems;
+ size_t offset_w;
+ int ret = 0;
+
+ ret = fifo_check_space(f, to_write);
+ if (ret < 0)
+ return ret;
+
+ offset_w = f->offset_w;
+
+ while (to_write > 0) {
+ size_t len = FFMIN(f->nb_elems - offset_w, to_write);
+ uint8_t *wptr = f->buffer + offset_w * f->elem_size;
+
+ if (read_cb) {
+ ret = read_cb(opaque, wptr, &len);
+ if (ret < 0 || len == 0)
+ break;
+ } else {
+ memcpy(wptr, buf, len * f->elem_size);
+ buf += len * f->elem_size;
+ }
+ offset_w += len;
+ if (offset_w >= f->nb_elems)
+ offset_w = 0;
+ to_write -= len;
+ }
+ f->offset_w = offset_w;
+
+ if (*nb_elems != to_write)
+ f->is_empty = 0;
+ *nb_elems -= to_write;
+
+ return ret;
+}
+
+int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
+{
+ return fifo_write_common(f, buf, &nb_elems, NULL, NULL);
+}
+
+int av_fifo_write_from_cb(AVFifo *f, AVFifoCB read_cb,
+ void *opaque, size_t *nb_elems)
+{
+ return fifo_write_common(f, NULL, nb_elems, read_cb, opaque);
+}
+
+static int fifo_peek_common(const AVFifo *f, uint8_t *buf, size_t *nb_elems,
+ size_t offset, AVFifoCB write_cb, void *opaque)
+{
+ size_t to_read = *nb_elems;
+ size_t offset_r = f->offset_r;
+ size_t can_read = av_fifo_can_read(f);
+ int ret = 0;
+
+ if (offset > can_read || to_read > can_read - offset) {
+ *nb_elems = 0;
+ return AVERROR(EINVAL);
+ }
+
+ if (offset_r >= f->nb_elems - offset)
+ offset_r -= f->nb_elems - offset;
+ else
+ offset_r += offset;
+
+ while (to_read > 0) {
+ size_t len = FFMIN(f->nb_elems - offset_r, to_read);
+ uint8_t *rptr = f->buffer + offset_r * f->elem_size;
+
+ if (write_cb) {
+ ret = write_cb(opaque, rptr, &len);
+ if (ret < 0 || len == 0)
+ break;
+ } else {
+ memcpy(buf, rptr, len * f->elem_size);
+ buf += len * f->elem_size;
+ }
+ offset_r += len;
+ if (offset_r >= f->nb_elems)
+ offset_r = 0;
+ to_read -= len;
+ }
+
+ *nb_elems -= to_read;
+
+ return ret;
+}
+
+int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
+{
+ int ret = fifo_peek_common(f, buf, &nb_elems, 0, NULL, NULL);
+ av_fifo_drain2(f, nb_elems);
+ return ret;
+}
+
+int av_fifo_read_to_cb(AVFifo *f, AVFifoCB write_cb,
+ void *opaque, size_t *nb_elems)
+{
+ int ret = fifo_peek_common(f, NULL, nb_elems, 0, write_cb, opaque);
+ av_fifo_drain2(f, *nb_elems);
+ return ret;
+}
+
+int av_fifo_peek(AVFifo *f, void *buf, size_t nb_elems, size_t offset)
+{
+ return fifo_peek_common(f, buf, &nb_elems, offset, NULL, NULL);
+}
+
+int av_fifo_peek_to_cb(AVFifo *f, AVFifoCB write_cb, void *opaque,
+ size_t *nb_elems, size_t offset)
+{
+ return fifo_peek_common(f, NULL, nb_elems, offset, write_cb, opaque);
+}
+
+void av_fifo_drain2(AVFifo *f, size_t size)
+{
+ const size_t cur_size = av_fifo_can_read(f);
+
+ av_assert0(cur_size >= size);
+ if (cur_size == size)
+ f->is_empty = 1;
+
+ if (f->offset_r >= f->nb_elems - size)
+ f->offset_r -= f->nb_elems - size;
+ else
+ f->offset_r += size;
+}
+
+void av_fifo_reset2(AVFifo *f)
+{
+ f->offset_r = f->offset_w = 0;
+ f->is_empty = 1;
+}
+
+void av_fifo_freep2(AVFifo **f)
+{
+ if (*f) {
+ av_freep(&(*f)->buffer);
+ av_freep(f);
+ }
+}
+
+
+#if FF_API_FIFO_OLD_API
+FF_DISABLE_DEPRECATION_WARNINGS
+#define OLD_FIFO_SIZE_MAX (size_t)FFMIN3(INT_MAX, UINT32_MAX, SIZE_MAX)
+
+AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size)
+{
+ AVFifoBuffer *f;
+ void *buffer;
+
+ if (nmemb > OLD_FIFO_SIZE_MAX / size)
+ return NULL;
+
+ buffer = av_realloc_array(NULL, nmemb, size);
+ if (!buffer)
+ return NULL;
+ f = av_mallocz(sizeof(AVFifoBuffer));
+ if (!f) {
+ av_free(buffer);
+ return NULL;
+ }
+ f->buffer = buffer;
+ f->end = f->buffer + nmemb * size;
+ av_fifo_reset(f);
+ return f;
+}
+
+AVFifoBuffer *av_fifo_alloc(unsigned int size)
+{
+ return av_fifo_alloc_array(size, 1);
+}
+
+void av_fifo_free(AVFifoBuffer *f)
+{
+ if (f) {
+ av_freep(&f->buffer);
+ av_free(f);
+ }
+}
+
+void av_fifo_freep(AVFifoBuffer **f)
+{
+ if (f) {
+ av_fifo_free(*f);
+ *f = NULL;
+ }
+}
+
+void av_fifo_reset(AVFifoBuffer *f)
+{
+ f->wptr = f->rptr = f->buffer;
+ f->wndx = f->rndx = 0;
+}
+
+int av_fifo_size(const AVFifoBuffer *f)
+{
+ return (uint32_t)(f->wndx - f->rndx);
+}
+
+int av_fifo_space(const AVFifoBuffer *f)
+{
+ return f->end - f->buffer - av_fifo_size(f);
+}
+
+int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
+{
+ unsigned int old_size = f->end - f->buffer;
+
+ if (new_size > OLD_FIFO_SIZE_MAX)
+ return AVERROR(EINVAL);
+
+ if (old_size < new_size) {
+ size_t offset_r = f->rptr - f->buffer;
+ size_t offset_w = f->wptr - f->buffer;
+ uint8_t *tmp;
+
+ tmp = av_realloc(f->buffer, new_size);
+ if (!tmp)
+ return AVERROR(ENOMEM);
+
+ // move the data from the beginning of the ring buffer
+ // to the newly allocated space
+ // the second condition distinguishes full vs empty fifo
+ if (offset_w <= offset_r && av_fifo_size(f)) {
+ const size_t copy = FFMIN(new_size - old_size, offset_w);
+ memcpy(tmp + old_size, tmp, copy);
+ if (copy < offset_w) {
+ memmove(tmp, tmp + copy , offset_w - copy);
+ offset_w -= copy;
+ } else
+ offset_w = old_size + copy;
+ }
+
+ f->buffer = tmp;
+ f->end = f->buffer + new_size;
+ f->rptr = f->buffer + offset_r;
+ f->wptr = f->buffer + offset_w;
+ }
+ return 0;
+}
+
+int av_fifo_grow(AVFifoBuffer *f, unsigned int size)
+{
+ unsigned int old_size = f->end - f->buffer;
+ if(size + (unsigned)av_fifo_size(f) < size)
+ return AVERROR(EINVAL);
+
+ size += av_fifo_size(f);
+
+ if (old_size < size)
+ return av_fifo_realloc2(f, FFMAX(size, 2*old_size));
+ return 0;
+}
+
+/* src must NOT be const as it can be a context for func that may need
+ * updating (like a pointer or byte counter) */
+int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size,
+ int (*func)(void *, void *, int))
+{
+ int total = size;
+ uint32_t wndx= f->wndx;
+ uint8_t *wptr= f->wptr;
+
+ if (size > av_fifo_space(f))
+ return AVERROR(ENOSPC);
+
+ do {
+ int len = FFMIN(f->end - wptr, size);
+ if (func) {
+ len = func(src, wptr, len);
+ if (len <= 0)
+ break;
+ } else {
+ memcpy(wptr, src, len);
+ src = (uint8_t *)src + len;
+ }
+ wptr += len;
+ if (wptr >= f->end)
+ wptr = f->buffer;
+ wndx += len;
+ size -= len;
+ } while (size > 0);
+ f->wndx= wndx;
+ f->wptr= wptr;
+ return total - size;
+}
+
+int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int))
+{
+ uint8_t *rptr = f->rptr;
+
+ if (offset < 0 || buf_size > av_fifo_size(f) - offset)
+ return AVERROR(EINVAL);
+
+ if (offset >= f->end - rptr)
+ rptr += offset - (f->end - f->buffer);
+ else
+ rptr += offset;
+
+ while (buf_size > 0) {
+ int len;
+
+ if (rptr >= f->end)
+ rptr -= f->end - f->buffer;
+
+ len = FFMIN(f->end - rptr, buf_size);
+ if (func)
+ func(dest, rptr, len);
+ else {
+ memcpy(dest, rptr, len);
+ dest = (uint8_t *)dest + len;
+ }
+
+ buf_size -= len;
+ rptr += len;
+ }
+
+ return 0;
+}
+
+int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size,
+ void (*func)(void *, void *, int))
+{
+ return av_fifo_generic_peek_at(f, dest, 0, buf_size, func);
+}
+
+int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size,
+ void (*func)(void *, void *, int))
+{
+ if (buf_size > av_fifo_size(f))
+ return AVERROR(EINVAL);
+
+ do {
+ int len = FFMIN(f->end - f->rptr, buf_size);
+ if (func)
+ func(dest, f->rptr, len);
+ else {
+ memcpy(dest, f->rptr, len);
+ dest = (uint8_t *)dest + len;
+ }
+ av_fifo_drain(f, len);
+ buf_size -= len;
+ } while (buf_size > 0);
+ return 0;
+}
+
+/** Discard data from the FIFO. */
+void av_fifo_drain(AVFifoBuffer *f, int size)
+{
+ av_assert2(av_fifo_size(f) >= size);
+ f->rptr += size;
+ if (f->rptr >= f->end)
+ f->rptr -= f->end - f->buffer;
+ f->rndx += size;
+}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
diff --git a/media/ffvpx/libavutil/fifo.h b/media/ffvpx/libavutil/fifo.h
new file mode 100644
index 0000000000..70f9376d97
--- /dev/null
+++ b/media/ffvpx/libavutil/fifo.h
@@ -0,0 +1,448 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_fifo
+ * A generic FIFO API
+ */
+
+#ifndef AVUTIL_FIFO_H
+#define AVUTIL_FIFO_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_fifo AVFifo
+ * @ingroup lavu_data
+ *
+ * @{
+ * A generic FIFO API
+ */
+
+typedef struct AVFifo AVFifo;
+
+/**
+ * Callback for writing or reading from a FIFO, passed to (and invoked from) the
+ * av_fifo_*_cb() functions. It may be invoked multiple times from a single
+ * av_fifo_*_cb() call and may process less data than the maximum size indicated
+ * by nb_elems.
+ *
+ * @param opaque the opaque pointer provided to the av_fifo_*_cb() function
+ * @param buf the buffer for reading or writing the data, depending on which
+ * av_fifo_*_cb function is called
+ * @param nb_elems On entry contains the maximum number of elements that can be
+ * read from / written into buf. On success, the callback should
+ * update it to contain the number of elements actually written.
+ *
+ * @return 0 on success, a negative error code on failure (will be returned from
+ * the invoking av_fifo_*_cb() function)
+ */
+typedef int AVFifoCB(void *opaque, void *buf, size_t *nb_elems);
+
+/**
+ * Automatically resize the FIFO on writes, so that the data fits. This
+ * automatic resizing happens up to a limit that can be modified with
+ * av_fifo_auto_grow_limit().
+ */
+#define AV_FIFO_FLAG_AUTO_GROW (1 << 0)
+
+/**
+ * Allocate and initialize an AVFifo with a given element size.
+ *
+ * @param elems initial number of elements that can be stored in the FIFO
+ * @param elem_size Size in bytes of a single element. Further operations on
+ * the returned FIFO will implicitly use this element size.
+ * @param flags a combination of AV_FIFO_FLAG_*
+ *
+ * @return newly-allocated AVFifo on success, a negative error code on failure
+ */
+AVFifo *av_fifo_alloc2(size_t elems, size_t elem_size,
+ unsigned int flags);
+
+/**
+ * @return Element size for FIFO operations. This element size is set at
+ * FIFO allocation and remains constant during its lifetime
+ */
+size_t av_fifo_elem_size(const AVFifo *f);
+
+/**
+ * Set the maximum size (in elements) to which the FIFO can be resized
+ * automatically. Has no effect unless AV_FIFO_FLAG_AUTO_GROW is used.
+ */
+void av_fifo_auto_grow_limit(AVFifo *f, size_t max_elems);
+
+/**
+ * @return number of elements available for reading from the given FIFO.
+ */
+size_t av_fifo_can_read(const AVFifo *f);
+
+/**
+ * @return Number of elements that can be written into the given FIFO without
+ * growing it.
+ *
+ * In other words, this number of elements or less is guaranteed to fit
+ * into the FIFO. More data may be written when the
+ * AV_FIFO_FLAG_AUTO_GROW flag was specified at FIFO creation, but this
+ * may involve memory allocation, which can fail.
+ */
+size_t av_fifo_can_write(const AVFifo *f);
+
+/**
+ * Enlarge an AVFifo.
+ *
+ * On success, the FIFO will be large enough to hold exactly
+ * inc + av_fifo_can_read() + av_fifo_can_write()
+ * elements. In case of failure, the old FIFO is kept unchanged.
+ *
+ * @param f AVFifo to resize
+ * @param inc number of elements to allocate for, in addition to the current
+ * allocated size
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_grow2(AVFifo *f, size_t inc);
+
+/**
+ * Write data into a FIFO.
+ *
+ * In case nb_elems > av_fifo_can_write(f) and the AV_FIFO_FLAG_AUTO_GROW flag
+ * was not specified at FIFO creation, nothing is written and an error
+ * is returned.
+ *
+ * Calling function is guaranteed to succeed if nb_elems <= av_fifo_can_write(f).
+ *
+ * @param f the FIFO buffer
+ * @param buf Data to be written. nb_elems * av_fifo_elem_size(f) bytes will be
+ * read from buf on success.
+ * @param nb_elems number of elements to write into FIFO
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems);
+
+/**
+ * Write data from a user-provided callback into a FIFO.
+ *
+ * @param f the FIFO buffer
+ * @param read_cb Callback supplying the data to the FIFO. May be called
+ * multiple times.
+ * @param opaque opaque user data to be provided to read_cb
+ * @param nb_elems Should point to the maximum number of elements that can be
+ * written. Will be updated to contain the number of elements
+ * actually written.
+ *
+ * @return non-negative number on success, a negative error code on failure
+ */
+int av_fifo_write_from_cb(AVFifo *f, AVFifoCB read_cb,
+ void *opaque, size_t *nb_elems);
+
+/**
+ * Read data from a FIFO.
+ *
+ * In case nb_elems > av_fifo_can_read(f), nothing is read and an error
+ * is returned.
+ *
+ * @param f the FIFO buffer
+ * @param buf Buffer to store the data. nb_elems * av_fifo_elem_size(f) bytes
+ * will be written into buf on success.
+ * @param nb_elems number of elements to read from FIFO
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems);
+
+/**
+ * Feed data from a FIFO into a user-provided callback.
+ *
+ * @param f the FIFO buffer
+ * @param write_cb Callback the data will be supplied to. May be called
+ * multiple times.
+ * @param opaque opaque user data to be provided to write_cb
+ * @param nb_elems Should point to the maximum number of elements that can be
+ * read. Will be updated to contain the total number of elements
+ * actually sent to the callback.
+ *
+ * @return non-negative number on success, a negative error code on failure
+ */
+int av_fifo_read_to_cb(AVFifo *f, AVFifoCB write_cb,
+ void *opaque, size_t *nb_elems);
+
+/**
+ * Read data from a FIFO without modifying FIFO state.
+ *
+ * Returns an error if an attempt is made to peek to nonexistent elements
+ * (i.e. if offset + nb_elems is larger than av_fifo_can_read(f)).
+ *
+ * @param f the FIFO buffer
+ * @param buf Buffer to store the data. nb_elems * av_fifo_elem_size(f) bytes
+ * will be written into buf.
+ * @param nb_elems number of elements to read from FIFO
+ * @param offset number of initial elements to skip.
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_peek(AVFifo *f, void *buf, size_t nb_elems, size_t offset);
+
+/**
+ * Feed data from a FIFO into a user-provided callback.
+ *
+ * @param f the FIFO buffer
+ * @param write_cb Callback the data will be supplied to. May be called
+ * multiple times.
+ * @param opaque opaque user data to be provided to write_cb
+ * @param nb_elems Should point to the maximum number of elements that can be
+ * read. Will be updated to contain the total number of elements
+ * actually sent to the callback.
+ * @param offset number of initial elements to skip; offset + *nb_elems must not
+ * be larger than av_fifo_can_read(f).
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_peek_to_cb(AVFifo *f, AVFifoCB write_cb, void *opaque,
+ size_t *nb_elems, size_t offset);
+
+/**
+ * Discard the specified amount of data from an AVFifo.
+ * @param size number of elements to discard, MUST NOT be larger than
+ * av_fifo_can_read(f)
+ */
+void av_fifo_drain2(AVFifo *f, size_t size);
+
+/*
+ * Empty the AVFifo.
+ * @param f AVFifo to reset
+ */
+void av_fifo_reset2(AVFifo *f);
+
+/**
+ * Free an AVFifo and reset pointer to NULL.
+ * @param f Pointer to an AVFifo to free. *f == NULL is allowed.
+ */
+void av_fifo_freep2(AVFifo **f);
+
+
+#if FF_API_FIFO_OLD_API
+typedef struct AVFifoBuffer {
+ uint8_t *buffer;
+ uint8_t *rptr, *wptr, *end;
+ uint32_t rndx, wndx;
+} AVFifoBuffer;
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param size of FIFO
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ * @deprecated use av_fifo_alloc2()
+ */
+attribute_deprecated
+AVFifoBuffer *av_fifo_alloc(unsigned int size);
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param nmemb number of elements
+ * @param size size of the single element
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ * @deprecated use av_fifo_alloc2()
+ */
+attribute_deprecated
+AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size);
+
+/**
+ * Free an AVFifoBuffer.
+ * @param f AVFifoBuffer to free
+ * @deprecated use the AVFifo API with av_fifo_freep2()
+ */
+attribute_deprecated
+void av_fifo_free(AVFifoBuffer *f);
+
+/**
+ * Free an AVFifoBuffer and reset pointer to NULL.
+ * @param f AVFifoBuffer to free
+ * @deprecated use the AVFifo API with av_fifo_freep2()
+ */
+attribute_deprecated
+void av_fifo_freep(AVFifoBuffer **f);
+
+/**
+ * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
+ * @param f AVFifoBuffer to reset
+ * @deprecated use av_fifo_reset2() with the new AVFifo-API
+ */
+attribute_deprecated
+void av_fifo_reset(AVFifoBuffer *f);
+
+/**
+ * Return the amount of data in bytes in the AVFifoBuffer, that is the
+ * amount of data you can read from it.
+ * @param f AVFifoBuffer to read from
+ * @return size
+ * @deprecated use av_fifo_can_read() with the new AVFifo-API
+ */
+attribute_deprecated
+int av_fifo_size(const AVFifoBuffer *f);
+
+/**
+ * Return the amount of space in bytes in the AVFifoBuffer, that is the
+ * amount of data you can write into it.
+ * @param f AVFifoBuffer to write into
+ * @return size
+ * @deprecated use av_fifo_can_write() with the new AVFifo-API
+ */
+attribute_deprecated
+int av_fifo_space(const AVFifoBuffer *f);
+
+/**
+ * Feed data at specific position from an AVFifoBuffer to a user-supplied callback.
+ * Similar as av_fifo_gereric_read but without discarding data.
+ * @param f AVFifoBuffer to read from
+ * @param offset offset from current read position
+ * @param buf_size number of bytes to read
+ * @param func generic read function
+ * @param dest data destination
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_peek() when func == NULL,
+ * av_fifo_peek_to_cb() otherwise
+ */
+attribute_deprecated
+int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int));
+
+/**
+ * Feed data from an AVFifoBuffer to a user-supplied callback.
+ * Similar as av_fifo_gereric_read but without discarding data.
+ * @param f AVFifoBuffer to read from
+ * @param buf_size number of bytes to read
+ * @param func generic read function
+ * @param dest data destination
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_peek() when func == NULL,
+ * av_fifo_peek_to_cb() otherwise
+ */
+attribute_deprecated
+int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
+
+/**
+ * Feed data from an AVFifoBuffer to a user-supplied callback.
+ * @param f AVFifoBuffer to read from
+ * @param buf_size number of bytes to read
+ * @param func generic read function
+ * @param dest data destination
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_read() when func == NULL,
+ * av_fifo_read_to_cb() otherwise
+ */
+attribute_deprecated
+int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
+
+/**
+ * Feed data from a user-supplied callback to an AVFifoBuffer.
+ * @param f AVFifoBuffer to write to
+ * @param src data source; non-const since it may be used as a
+ * modifiable context by the function defined in func
+ * @param size number of bytes to write
+ * @param func generic write function; the first parameter is src,
+ * the second is dest_buf, the third is dest_buf_size.
+ * func must return the number of bytes written to dest_buf, or <= 0 to
+ * indicate no more data available to write.
+ * If func is NULL, src is interpreted as a simple byte array for source data.
+ * @return the number of bytes written to the FIFO or a negative error code on failure
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_write() when func == NULL,
+ * av_fifo_write_from_cb() otherwise
+ */
+attribute_deprecated
+int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
+
+/**
+ * Resize an AVFifoBuffer.
+ * In case of reallocation failure, the old FIFO is kept unchanged.
+ *
+ * @param f AVFifoBuffer to resize
+ * @param size new AVFifoBuffer size in bytes
+ * @return <0 for failure, >=0 otherwise
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_grow2() to increase FIFO size,
+ * decreasing FIFO size is not supported
+ */
+attribute_deprecated
+int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
+
+/**
+ * Enlarge an AVFifoBuffer.
+ * In case of reallocation failure, the old FIFO is kept unchanged.
+ * The new fifo size may be larger than the requested size.
+ *
+ * @param f AVFifoBuffer to resize
+ * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size()
+ * @return <0 for failure, >=0 otherwise
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_grow2(); note that unlike
+ * this function it adds to the allocated size, rather than to the used size
+ */
+attribute_deprecated
+int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space);
+
+/**
+ * Read and discard the specified amount of data from an AVFifoBuffer.
+ * @param f AVFifoBuffer to read from
+ * @param size amount of data to read in bytes
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_drain2()
+ */
+attribute_deprecated
+void av_fifo_drain(AVFifoBuffer *f, int size);
+
+#if FF_API_FIFO_PEEK2
+/**
+ * Return a pointer to the data stored in a FIFO buffer at a certain offset.
+ * The FIFO buffer is not modified.
+ *
+ * @param f AVFifoBuffer to peek at, f must be non-NULL
+ * @param offs an offset in bytes, its absolute value must be less
+ * than the used buffer size or the returned pointer will
+ * point outside to the buffer data.
+ * The used buffer size can be checked with av_fifo_size().
+ * @deprecated use the new AVFifo-API with av_fifo_peek() or av_fifo_peek_to_cb()
+ */
+attribute_deprecated
+static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
+{
+ uint8_t *ptr = f->rptr + offs;
+ if (ptr >= f->end)
+ ptr = f->buffer + (ptr - f->end);
+ else if (ptr < f->buffer)
+ ptr = f->end - (f->buffer - ptr);
+ return ptr;
+}
+#endif
+#endif
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_FIFO_H */
diff --git a/media/ffvpx/libavutil/film_grain_params.c b/media/ffvpx/libavutil/film_grain_params.c
new file mode 100644
index 0000000000..930d23c7fe
--- /dev/null
+++ b/media/ffvpx/libavutil/film_grain_params.c
@@ -0,0 +1,42 @@
+/**
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "film_grain_params.h"
+
+AVFilmGrainParams *av_film_grain_params_alloc(size_t *size)
+{
+ AVFilmGrainParams *params = av_mallocz(sizeof(AVFilmGrainParams));
+
+ if (size)
+ *size = sizeof(*params);
+
+ return params;
+}
+
+AVFilmGrainParams *av_film_grain_params_create_side_data(AVFrame *frame)
+{
+ AVFrameSideData *side_data = av_frame_new_side_data(frame,
+ AV_FRAME_DATA_FILM_GRAIN_PARAMS,
+ sizeof(AVFilmGrainParams));
+ if (!side_data)
+ return NULL;
+
+ memset(side_data->data, 0, sizeof(AVFilmGrainParams));
+
+ return (AVFilmGrainParams *)side_data->data;
+}
diff --git a/media/ffvpx/libavutil/film_grain_params.h b/media/ffvpx/libavutil/film_grain_params.h
new file mode 100644
index 0000000000..f3bd0a4a6a
--- /dev/null
+++ b/media/ffvpx/libavutil/film_grain_params.h
@@ -0,0 +1,260 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FILM_GRAIN_PARAMS_H
+#define AVUTIL_FILM_GRAIN_PARAMS_H
+
+#include "frame.h"
+
+enum AVFilmGrainParamsType {
+ AV_FILM_GRAIN_PARAMS_NONE = 0,
+
+ /**
+ * The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
+ */
+ AV_FILM_GRAIN_PARAMS_AV1,
+
+ /**
+ * The union is valid when interpreted as AVFilmGrainH274Params (codec.h274)
+ */
+ AV_FILM_GRAIN_PARAMS_H274,
+};
+
+/**
+ * This structure describes how to handle film grain synthesis for AOM codecs.
+ *
+ * @note The struct must be allocated as part of AVFilmGrainParams using
+ * av_film_grain_params_alloc(). Its size is not a part of the public ABI.
+ */
+typedef struct AVFilmGrainAOMParams {
+ /**
+ * Number of points, and the scale and value for each point of the
+ * piecewise linear scaling function for the uma plane.
+ */
+ int num_y_points;
+ uint8_t y_points[14][2 /* value, scaling */];
+
+ /**
+ * Signals whether to derive the chroma scaling function from the luma.
+ * Not equivalent to copying the luma values and scales.
+ */
+ int chroma_scaling_from_luma;
+
+ /**
+ * If chroma_scaling_from_luma is set to 0, signals the chroma scaling
+ * function parameters.
+ */
+ int num_uv_points[2 /* cb, cr */];
+ uint8_t uv_points[2 /* cb, cr */][10][2 /* value, scaling */];
+
+ /**
+ * Specifies the shift applied to the chroma components. For AV1, its within
+ * [8; 11] and determines the range and quantization of the film grain.
+ */
+ int scaling_shift;
+
+ /**
+ * Specifies the auto-regression lag.
+ */
+ int ar_coeff_lag;
+
+ /**
+ * Luma auto-regression coefficients. The number of coefficients is given by
+ * 2 * ar_coeff_lag * (ar_coeff_lag + 1).
+ */
+ int8_t ar_coeffs_y[24];
+
+ /**
+ * Chroma auto-regression coefficients. The number of coefficients is given by
+ * 2 * ar_coeff_lag * (ar_coeff_lag + 1) + !!num_y_points.
+ */
+ int8_t ar_coeffs_uv[2 /* cb, cr */][25];
+
+ /**
+ * Specifies the range of the auto-regressive coefficients. Values of 6,
+ * 7, 8 and so on represent a range of [-2, 2), [-1, 1), [-0.5, 0.5) and
+ * so on. For AV1 must be between 6 and 9.
+ */
+ int ar_coeff_shift;
+
+ /**
+ * Signals the down shift applied to the generated gaussian numbers during
+ * synthesis.
+ */
+ int grain_scale_shift;
+
+ /**
+ * Specifies the luma/chroma multipliers for the index to the component
+ * scaling function.
+ */
+ int uv_mult[2 /* cb, cr */];
+ int uv_mult_luma[2 /* cb, cr */];
+
+ /**
+ * Offset used for component scaling function. For AV1 its a 9-bit value
+ * with a range [-256, 255]
+ */
+ int uv_offset[2 /* cb, cr */];
+
+ /**
+ * Signals whether to overlap film grain blocks.
+ */
+ int overlap_flag;
+
+ /**
+ * Signals to clip to limited color levels after film grain application.
+ */
+ int limit_output_range;
+} AVFilmGrainAOMParams;
+
+/**
+ * This structure describes how to handle film grain synthesis for codecs using
+ * the ITU-T H.274 Versatile suplemental enhancement information message.
+ *
+ * @note The struct must be allocated as part of AVFilmGrainParams using
+ * av_film_grain_params_alloc(). Its size is not a part of the public ABI.
+ */
+typedef struct AVFilmGrainH274Params {
+ /**
+ * Specifies the film grain simulation mode.
+ * 0 = Frequency filtering, 1 = Auto-regression
+ */
+ int model_id;
+
+ /**
+ * Specifies the bit depth used for the luma component.
+ */
+ int bit_depth_luma;
+
+ /**
+ * Specifies the bit depth used for the chroma components.
+ */
+ int bit_depth_chroma;
+
+ enum AVColorRange color_range;
+ enum AVColorPrimaries color_primaries;
+ enum AVColorTransferCharacteristic color_trc;
+ enum AVColorSpace color_space;
+
+ /**
+ * Specifies the blending mode used to blend the simulated film grain
+ * with the decoded images.
+ *
+ * 0 = Additive, 1 = Multiplicative
+ */
+ int blending_mode_id;
+
+ /**
+ * Specifies a scale factor used in the film grain characterization equations.
+ */
+ int log2_scale_factor;
+
+ /**
+ * Indicates if the modelling of film grain for a given component is present.
+ */
+ int component_model_present[3 /* y, cb, cr */];
+
+ /**
+ * Specifies the number of intensity intervals for which a specific set of
+ * model values has been estimated, with a range of [1, 256].
+ */
+ uint16_t num_intensity_intervals[3 /* y, cb, cr */];
+
+ /**
+ * Specifies the number of model values present for each intensity interval
+ * in which the film grain has been modelled, with a range of [1, 6].
+ */
+ uint8_t num_model_values[3 /* y, cb, cr */];
+
+ /**
+ * Specifies the lower ounds of each intensity interval for whichthe set of
+ * model values applies for the component.
+ */
+ uint8_t intensity_interval_lower_bound[3 /* y, cb, cr */][256 /* intensity interval */];
+
+ /**
+ * Specifies the upper bound of each intensity interval for which the set of
+ * model values applies for the component.
+ */
+ uint8_t intensity_interval_upper_bound[3 /* y, cb, cr */][256 /* intensity interval */];
+
+ /**
+ * Specifies the model values for the component for each intensity interval.
+ * - When model_id == 0, the following applies:
+ * For comp_model_value[y], the range of values is [0, 2^bit_depth_luma - 1]
+ * For comp_model_value[cb..cr], the range of values is [0, 2^bit_depth_chroma - 1]
+ * - Otherwise, the following applies:
+ * For comp_model_value[y], the range of values is [-2^(bit_depth_luma - 1), 2^(bit_depth_luma - 1) - 1]
+ * For comp_model_value[cb..cr], the range of values is [-2^(bit_depth_chroma - 1), 2^(bit_depth_chroma - 1) - 1]
+ */
+ int16_t comp_model_value[3 /* y, cb, cr */][256 /* intensity interval */][6 /* model value */];
+} AVFilmGrainH274Params;
+
+/**
+ * This structure describes how to handle film grain synthesis in video
+ * for specific codecs. Must be present on every frame where film grain is
+ * meant to be synthesised for correct presentation.
+ *
+ * @note The struct must be allocated with av_film_grain_params_alloc() and
+ * its size is not a part of the public ABI.
+ */
+typedef struct AVFilmGrainParams {
+ /**
+ * Specifies the codec for which this structure is valid.
+ */
+ enum AVFilmGrainParamsType type;
+
+ /**
+ * Seed to use for the synthesis process, if the codec allows for it.
+ *
+ * @note For H.264, this refers to `pic_offset` as defined in
+ * SMPTE RDD 5-2006.
+ */
+ uint64_t seed;
+
+ /**
+ * Additional fields may be added both here and in any structure included.
+ * If a codec's film grain structure differs slightly over another
+ * codec's, fields within may change meaning depending on the type.
+ */
+ union {
+ AVFilmGrainAOMParams aom;
+ AVFilmGrainH274Params h274;
+ } codec;
+} AVFilmGrainParams;
+
+/**
+ * Allocate an AVFilmGrainParams structure and set its fields to
+ * default values. The resulting struct can be freed using av_freep().
+ * If size is not NULL it will be set to the number of bytes allocated.
+ *
+ * @return An AVFilmGrainParams filled with default values or NULL
+ * on failure.
+ */
+AVFilmGrainParams *av_film_grain_params_alloc(size_t *size);
+
+/**
+ * Allocate a complete AVFilmGrainParams and add it to the frame.
+ *
+ * @param frame The frame which side data is added to.
+ *
+ * @return The AVFilmGrainParams structure to be filled by caller.
+ */
+AVFilmGrainParams *av_film_grain_params_create_side_data(AVFrame *frame);
+
+#endif /* AVUTIL_FILM_GRAIN_PARAMS_H */
diff --git a/media/ffvpx/libavutil/fixed_dsp.c b/media/ffvpx/libavutil/fixed_dsp.c
new file mode 100644
index 0000000000..5ab47d55d0
--- /dev/null
+++ b/media/ffvpx/libavutil/fixed_dsp.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nedeljko.babic imgtec com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "common.h"
+#include "fixed_dsp.h"
+
+static void vector_fmul_add_c(int *dst, const int *src0, const int *src1, const int *src2, int len){
+ int i;
+ int64_t accu;
+
+ for (i=0; i<len; i++) {
+ accu = (int64_t)src0[i] * src1[i];
+ dst[i] = src2[i] + (int)((accu + 0x40000000) >> 31);
+ }
+}
+
+static void vector_fmul_reverse_c(int *dst, const int *src0, const int *src1, int len)
+{
+ int i;
+ int64_t accu;
+
+ src1 += len-1;
+ for (i=0; i<len; i++) {
+ accu = (int64_t)src0[i] * src1[-i];
+ dst[i] = (int)((accu+0x40000000) >> 31);
+ }
+}
+
+static void vector_fmul_window_scaled_c(int16_t *dst, const int32_t *src0,
+ const int32_t *src1, const int32_t *win,
+ int len, uint8_t bits)
+{
+ int32_t s0, s1, wi, wj, i,j, round;
+
+ dst += len;
+ win += len;
+ src0+= len;
+ round = bits? 1 << (bits-1) : 0;
+
+ for (i=-len, j=len-1; i<0; i++, j--) {
+ s0 = src0[i];
+ s1 = src1[j];
+ wi = win[i];
+ wj = win[j];
+ dst[i] = av_clip_int16(((((int64_t)s0*wj - (int64_t)s1*wi + 0x40000000) >> 31) + round) >> bits);
+ dst[j] = av_clip_int16(((((int64_t)s0*wi + (int64_t)s1*wj + 0x40000000) >> 31) + round) >> bits);
+ }
+}
+
+static void vector_fmul_window_c(int32_t *dst, const int32_t *src0,
+ const int32_t *src1, const int32_t *win,
+ int len)
+{
+ int32_t s0, s1, wi, wj, i, j;
+
+ dst += len;
+ win += len;
+ src0+= len;
+
+ for (i=-len, j=len-1; i<0; i++, j--) {
+ s0 = src0[i];
+ s1 = src1[j];
+ wi = win[i];
+ wj = win[j];
+ dst[i] = ((int64_t)s0*wj - (int64_t)s1*wi + 0x40000000) >> 31;
+ dst[j] = ((int64_t)s0*wi + (int64_t)s1*wj + 0x40000000) >> 31;
+ }
+}
+
+static void vector_fmul_c(int *dst, const int *src0, const int *src1, int len)
+{
+ int i;
+ int64_t accu;
+
+ for (i = 0; i < len; i++){
+ accu = (int64_t)src0[i] * src1[i];
+ dst[i] = (int)((accu+0x40000000) >> 31);
+ }
+}
+
+static int scalarproduct_fixed_c(const int *v1, const int *v2, int len)
+{
+ /** p is initialized with 0x40000000 so that the proper rounding will occur
+ * at the end */
+ int64_t p = 0x40000000;
+ int i;
+
+ for (i = 0; i < len; i++)
+ p += (int64_t)v1[i] * v2[i];
+
+ return (int)(p >> 31);
+}
+
+static void butterflies_fixed_c(int *av_restrict v1s, int *av_restrict v2, int len)
+{
+ int i;
+ unsigned int *v1 = v1s;
+
+ for (i = 0; i < len; i++){
+ int t = v1[i] - v2[i];
+ v1[i] += v2[i];
+ v2[i] = t;
+ }
+}
+
+AVFixedDSPContext * avpriv_alloc_fixed_dsp(int bit_exact)
+{
+ AVFixedDSPContext * fdsp = av_malloc(sizeof(AVFixedDSPContext));
+
+ if (!fdsp)
+ return NULL;
+
+ fdsp->vector_fmul_window_scaled = vector_fmul_window_scaled_c;
+ fdsp->vector_fmul_window = vector_fmul_window_c;
+ fdsp->vector_fmul = vector_fmul_c;
+ fdsp->vector_fmul_add = vector_fmul_add_c;
+ fdsp->vector_fmul_reverse = vector_fmul_reverse_c;
+ fdsp->butterflies_fixed = butterflies_fixed_c;
+ fdsp->scalarproduct_fixed = scalarproduct_fixed_c;
+
+#if ARCH_RISCV
+ ff_fixed_dsp_init_riscv(fdsp);
+#elif ARCH_X86
+ ff_fixed_dsp_init_x86(fdsp);
+#endif
+
+ return fdsp;
+}
diff --git a/media/ffvpx/libavutil/fixed_dsp.h b/media/ffvpx/libavutil/fixed_dsp.h
new file mode 100644
index 0000000000..1217d3a53b
--- /dev/null
+++ b/media/ffvpx/libavutil/fixed_dsp.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FIXED_DSP_H
+#define AVUTIL_FIXED_DSP_H
+
+#include <stdint.h>
+#include "config.h"
+#include "attributes.h"
+#include "libavcodec/mathops.h"
+
+typedef struct AVFixedDSPContext {
+ /* Assume len is a multiple of 16, and arrays are 32-byte aligned */
+ /* Results of multiplications are scaled down by 31 bit (and rounded) if not
+ * stated otherwise */
+
+ /**
+ * Overlap/add with window function.
+ * Result is scaled down by "bits" bits.
+ * Used primarily by MDCT-based audio codecs.
+ * Source and destination vectors must overlap exactly or not at all.
+ *
+ * @param dst result vector
+ * constraints: 16-byte aligned
+ * @param src0 first source vector
+ * constraints: 16-byte aligned
+ * @param src1 second source vector
+ * constraints: 16-byte aligned
+ * @param win half-window vector
+ * constraints: 16-byte aligned
+ * @param len length of vector
+ * constraints: multiple of 4
+ * @param bits scaling parameter
+ *
+ */
+ void (*vector_fmul_window_scaled)(int16_t *dst, const int32_t *src0, const int32_t *src1, const int32_t *win, int len, uint8_t bits);
+
+ /**
+ * Overlap/add with window function.
+ * Used primarily by MDCT-based audio codecs.
+ * Source and destination vectors must overlap exactly or not at all.
+ *
+ * @param dst result vector
+ * constraints: 32-byte aligned
+ * @param src0 first source vector
+ * constraints: 16-byte aligned
+ * @param src1 second source vector
+ * constraints: 16-byte aligned
+ * @param win half-window vector
+ * constraints: 16-byte aligned
+ * @param len length of vector
+ * constraints: multiple of 4
+ */
+ void (*vector_fmul_window)(int32_t *dst, const int32_t *src0, const int32_t *src1, const int32_t *win, int len);
+
+ /**
+ * Fixed-point multiplication that calculates the entry wise product of two
+ * vectors of integers and stores the result in a vector of integers.
+ *
+ * @param dst output vector
+ * constraints: 32-byte aligned
+ * @param src0 first input vector
+ * constraints: 32-byte aligned
+ * @param src1 second input vector
+ * constraints: 32-byte aligned
+ * @param len number of elements in the input
+ * constraints: multiple of 16
+ */
+ void (*vector_fmul)(int *dst, const int *src0, const int *src1,
+ int len);
+
+ void (*vector_fmul_reverse)(int *dst, const int *src0, const int *src1, int len);
+ /**
+ * Calculate the entry wise product of two vectors of integers, add a third vector of
+ * integers and store the result in a vector of integers.
+ *
+ * @param dst output vector
+ * constraints: 32-byte aligned
+ * @param src0 first input vector
+ * constraints: 32-byte aligned
+ * @param src1 second input vector
+ * constraints: 32-byte aligned
+ * @param src2 third input vector
+ * constraints: 32-byte aligned
+ * @param len number of elements in the input
+ * constraints: multiple of 16
+ */
+ void (*vector_fmul_add)(int *dst, const int *src0, const int *src1,
+ const int *src2, int len);
+
+ /**
+ * Calculate the scalar product of two vectors of integers.
+ *
+ * @param v1 first vector, 16-byte aligned
+ * @param v2 second vector, 16-byte aligned
+ * @param len length of vectors, multiple of 4
+ *
+ * @return sum of elementwise products
+ */
+ int (*scalarproduct_fixed)(const int *v1, const int *v2, int len);
+
+ /**
+ * Calculate the sum and difference of two vectors of integers.
+ *
+ * @param v1 first input vector, sum output, 16-byte aligned
+ * @param v2 second input vector, difference output, 16-byte aligned
+ * @param len length of vectors, multiple of 4
+ */
+ void (*butterflies_fixed)(int *av_restrict v1, int *av_restrict v2, int len);
+} AVFixedDSPContext;
+
+/**
+ * Allocate and initialize a fixed DSP context.
+ * note: should be freed with a av_free call when no longer needed.
+ *
+ * @param strict setting to non-zero avoids using functions which may not be IEEE-754 compliant
+ */
+AVFixedDSPContext * avpriv_alloc_fixed_dsp(int strict);
+
+void ff_fixed_dsp_init_riscv(AVFixedDSPContext *fdsp);
+void ff_fixed_dsp_init_x86(AVFixedDSPContext *fdsp);
+
+/**
+ * Calculate the square root
+ *
+ * @param x input fixed point number
+ *
+ * @param bits format of fixed point number (32 - bits).bits
+ *
+ * note: input is normalized to (0, 1) fixed point value
+ */
+
+static av_always_inline int fixed_sqrt(int x, int bits)
+{
+ int retval, bit_mask, guess, square, i;
+ int64_t accu;
+ int shift1 = 30 - bits;
+ int shift2 = bits - 15;
+
+ if (shift1 > 0) retval = ff_sqrt(x << shift1);
+ else retval = ff_sqrt(x >> -shift1);
+
+ if (shift2 > 0) {
+ retval = retval << shift2;
+ bit_mask = (1 << (shift2 - 1));
+
+ for (i=0; i<shift2; i++){
+ guess = retval + bit_mask;
+ accu = (int64_t)guess * guess;
+ square = (int)((accu + bit_mask) >> bits);
+ if (x >= square)
+ retval += bit_mask;
+ bit_mask >>= 1;
+ }
+
+ }
+ else retval >>= (-shift2);
+
+ return retval;
+}
+
+#endif /* AVUTIL_FIXED_DSP_H */
diff --git a/media/ffvpx/libavutil/float_dsp.c b/media/ffvpx/libavutil/float_dsp.c
new file mode 100644
index 0000000000..742dd679d2
--- /dev/null
+++ b/media/ffvpx/libavutil/float_dsp.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2005 Balatoni Denes
+ * Copyright 2006 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "attributes.h"
+#include "float_dsp.h"
+#include "mem.h"
+
+static void vector_fmul_c(float *dst, const float *src0, const float *src1,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] = src0[i] * src1[i];
+}
+
+static void vector_dmul_c(double *dst, const double *src0, const double *src1,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] = src0[i] * src1[i];
+}
+
+static void vector_fmac_scalar_c(float *dst, const float *src, float mul,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] += src[i] * mul;
+}
+
+static void vector_dmac_scalar_c(double *dst, const double *src, double mul,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] += src[i] * mul;
+}
+
+static void vector_fmul_scalar_c(float *dst, const float *src, float mul,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] = src[i] * mul;
+}
+
+static void vector_dmul_scalar_c(double *dst, const double *src, double mul,
+ int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] = src[i] * mul;
+}
+
+static void vector_fmul_window_c(float *dst, const float *src0,
+ const float *src1, const float *win, int len)
+{
+ int i, j;
+
+ dst += len;
+ win += len;
+ src0 += len;
+
+ for (i = -len, j = len - 1; i < 0; i++, j--) {
+ float s0 = src0[i];
+ float s1 = src1[j];
+ float wi = win[i];
+ float wj = win[j];
+ dst[i] = s0 * wj - s1 * wi;
+ dst[j] = s0 * wi + s1 * wj;
+ }
+}
+
+static void vector_fmul_add_c(float *dst, const float *src0, const float *src1,
+ const float *src2, int len){
+ int i;
+
+ for (i = 0; i < len; i++)
+ dst[i] = src0[i] * src1[i] + src2[i];
+}
+
+static void vector_fmul_reverse_c(float *dst, const float *src0,
+ const float *src1, int len)
+{
+ int i;
+
+ src1 += len-1;
+ for (i = 0; i < len; i++)
+ dst[i] = src0[i] * src1[-i];
+}
+
+static void butterflies_float_c(float *av_restrict v1, float *av_restrict v2,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ float t = v1[i] - v2[i];
+ v1[i] += v2[i];
+ v2[i] = t;
+ }
+}
+
+float avpriv_scalarproduct_float_c(const float *v1, const float *v2, int len)
+{
+ float p = 0.0;
+ int i;
+
+ for (i = 0; i < len; i++)
+ p += v1[i] * v2[i];
+
+ return p;
+}
+
+av_cold AVFloatDSPContext *avpriv_float_dsp_alloc(int bit_exact)
+{
+ AVFloatDSPContext *fdsp = av_mallocz(sizeof(AVFloatDSPContext));
+ if (!fdsp)
+ return NULL;
+
+ fdsp->vector_fmul = vector_fmul_c;
+ fdsp->vector_dmul = vector_dmul_c;
+ fdsp->vector_fmac_scalar = vector_fmac_scalar_c;
+ fdsp->vector_fmul_scalar = vector_fmul_scalar_c;
+ fdsp->vector_dmac_scalar = vector_dmac_scalar_c;
+ fdsp->vector_dmul_scalar = vector_dmul_scalar_c;
+ fdsp->vector_fmul_window = vector_fmul_window_c;
+ fdsp->vector_fmul_add = vector_fmul_add_c;
+ fdsp->vector_fmul_reverse = vector_fmul_reverse_c;
+ fdsp->butterflies_float = butterflies_float_c;
+ fdsp->scalarproduct_float = avpriv_scalarproduct_float_c;
+
+#if ARCH_AARCH64
+ ff_float_dsp_init_aarch64(fdsp);
+#elif ARCH_ARM
+ ff_float_dsp_init_arm(fdsp);
+#elif ARCH_PPC
+ ff_float_dsp_init_ppc(fdsp, bit_exact);
+#elif ARCH_RISCV
+ ff_float_dsp_init_riscv(fdsp);
+#elif ARCH_X86
+ ff_float_dsp_init_x86(fdsp);
+#elif ARCH_MIPS
+ ff_float_dsp_init_mips(fdsp);
+#endif
+ return fdsp;
+}
diff --git a/media/ffvpx/libavutil/float_dsp.h b/media/ffvpx/libavutil/float_dsp.h
new file mode 100644
index 0000000000..7cad9fc622
--- /dev/null
+++ b/media/ffvpx/libavutil/float_dsp.h
@@ -0,0 +1,219 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FLOAT_DSP_H
+#define AVUTIL_FLOAT_DSP_H
+
+#include "config.h"
+
+typedef struct AVFloatDSPContext {
+ /**
+ * Calculate the entry wise product of two vectors of floats and store the result in
+ * a vector of floats.
+ *
+ * @param dst output vector
+ * constraints: 32-byte aligned
+ * @param src0 first input vector
+ * constraints: 32-byte aligned
+ * @param src1 second input vector
+ * constraints: 32-byte aligned
+ * @param len number of elements in the input
+ * constraints: multiple of 16
+ */
+ void (*vector_fmul)(float *dst, const float *src0, const float *src1,
+ int len);
+
+ /**
+ * Multiply a vector of floats by a scalar float and add to
+ * destination vector. Source and destination vectors must
+ * overlap exactly or not at all.
+ *
+ * @param dst result vector
+ * constraints: 32-byte aligned
+ * @param src input vector
+ * constraints: 32-byte aligned
+ * @param mul scalar value
+ * @param len length of vector
+ * constraints: multiple of 16
+ */
+ void (*vector_fmac_scalar)(float *dst, const float *src, float mul,
+ int len);
+
+ /**
+ * Multiply a vector of doubles by a scalar double and add to
+ * destination vector. Source and destination vectors must
+ * overlap exactly or not at all.
+ *
+ * @param dst result vector
+ * constraints: 32-byte aligned
+ * @param src input vector
+ * constraints: 32-byte aligned
+ * @param mul scalar value
+ * @param len length of vector
+ * constraints: multiple of 16
+ */
+ void (*vector_dmac_scalar)(double *dst, const double *src, double mul,
+ int len);
+
+ /**
+ * Multiply a vector of floats by a scalar float. Source and
+ * destination vectors must overlap exactly or not at all.
+ *
+ * @param dst result vector
+ * constraints: 16-byte aligned
+ * @param src input vector
+ * constraints: 16-byte aligned
+ * @param mul scalar value
+ * @param len length of vector
+ * constraints: multiple of 4
+ */
+ void (*vector_fmul_scalar)(float *dst, const float *src, float mul,
+ int len);
+
+ /**
+ * Multiply a vector of double by a scalar double. Source and
+ * destination vectors must overlap exactly or not at all.
+ *
+ * @param dst result vector
+ * constraints: 32-byte aligned
+ * @param src input vector
+ * constraints: 32-byte aligned
+ * @param mul scalar value
+ * @param len length of vector
+ * constraints: multiple of 8
+ */
+ void (*vector_dmul_scalar)(double *dst, const double *src, double mul,
+ int len);
+
+ /**
+ * Overlap/add with window function.
+ * Used primarily by MDCT-based audio codecs.
+ * Source and destination vectors must overlap exactly or not at all.
+ *
+ * @param dst result vector
+ * constraints: 16-byte aligned
+ * @param src0 first source vector
+ * constraints: 16-byte aligned
+ * @param src1 second source vector
+ * constraints: 16-byte aligned
+ * @param win half-window vector
+ * constraints: 16-byte aligned
+ * @param len length of vector
+ * constraints: multiple of 4
+ */
+ void (*vector_fmul_window)(float *dst, const float *src0,
+ const float *src1, const float *win, int len);
+
+ /**
+ * Calculate the entry wise product of two vectors of floats, add a third vector of
+ * floats and store the result in a vector of floats.
+ *
+ * @param dst output vector
+ * constraints: 32-byte aligned
+ * @param src0 first input vector
+ * constraints: 32-byte aligned
+ * @param src1 second input vector
+ * constraints: 32-byte aligned
+ * @param src2 third input vector
+ * constraints: 32-byte aligned
+ * @param len number of elements in the input
+ * constraints: multiple of 16
+ */
+ void (*vector_fmul_add)(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
+
+ /**
+ * Calculate the entry wise product of two vectors of floats, and store the result
+ * in a vector of floats. The second vector of floats is iterated over
+ * in reverse order.
+ *
+ * @param dst output vector
+ * constraints: 32-byte aligned
+ * @param src0 first input vector
+ * constraints: 32-byte aligned
+ * @param src1 second input vector
+ * constraints: 32-byte aligned
+ * @param len number of elements in the input
+ * constraints: multiple of 16
+ */
+ void (*vector_fmul_reverse)(float *dst, const float *src0,
+ const float *src1, int len);
+
+ /**
+ * Calculate the sum and difference of two vectors of floats.
+ *
+ * @param v1 first input vector, sum output, 16-byte aligned
+ * @param v2 second input vector, difference output, 16-byte aligned
+ * @param len length of vectors, multiple of 4
+ */
+ void (*butterflies_float)(float *av_restrict v1, float *av_restrict v2, int len);
+
+ /**
+ * Calculate the scalar product of two vectors of floats.
+ *
+ * @param v1 first vector, 16-byte aligned
+ * @param v2 second vector, 16-byte aligned
+ * @param len length of vectors, multiple of 4
+ *
+ * @return sum of elementwise products
+ */
+ float (*scalarproduct_float)(const float *v1, const float *v2, int len);
+
+ /**
+ * Calculate the entry wise product of two vectors of doubles and store the result in
+ * a vector of doubles.
+ *
+ * @param dst output vector
+ * constraints: 32-byte aligned
+ * @param src0 first input vector
+ * constraints: 32-byte aligned
+ * @param src1 second input vector
+ * constraints: 32-byte aligned
+ * @param len number of elements in the input
+ * constraints: multiple of 16
+ */
+ void (*vector_dmul)(double *dst, const double *src0, const double *src1,
+ int len);
+} AVFloatDSPContext;
+
+/**
+ * Return the scalar product of two vectors.
+ *
+ * @param v1 first input vector
+ * @param v2 first input vector
+ * @param len number of elements
+ *
+ * @return sum of elementwise products
+ */
+float avpriv_scalarproduct_float_c(const float *v1, const float *v2, int len);
+
+void ff_float_dsp_init_aarch64(AVFloatDSPContext *fdsp);
+void ff_float_dsp_init_arm(AVFloatDSPContext *fdsp);
+void ff_float_dsp_init_ppc(AVFloatDSPContext *fdsp, int strict);
+void ff_float_dsp_init_riscv(AVFloatDSPContext *fdsp);
+void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp);
+void ff_float_dsp_init_mips(AVFloatDSPContext *fdsp);
+
+/**
+ * Allocate a float DSP context.
+ *
+ * @param strict setting to non-zero avoids using functions which may not be IEEE-754 compliant
+ */
+AVFloatDSPContext *avpriv_float_dsp_alloc(int strict);
+
+#endif /* AVUTIL_FLOAT_DSP_H */
diff --git a/media/ffvpx/libavutil/frame.c b/media/ffvpx/libavutil/frame.c
new file mode 100644
index 0000000000..9545477acc
--- /dev/null
+++ b/media/ffvpx/libavutil/frame.c
@@ -0,0 +1,928 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "channel_layout.h"
+#include "avassert.h"
+#include "buffer.h"
+#include "common.h"
+#include "cpu.h"
+#include "dict.h"
+#include "frame.h"
+#include "imgutils.h"
+#include "mem.h"
+#include "samplefmt.h"
+#include "hwcontext.h"
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+#define CHECK_CHANNELS_CONSISTENCY(frame) \
+ av_assert2(!(frame)->channel_layout || \
+ (frame)->channels == \
+ av_get_channel_layout_nb_channels((frame)->channel_layout))
+#endif
+
+static void get_frame_defaults(AVFrame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->pts =
+ frame->pkt_dts = AV_NOPTS_VALUE;
+ frame->best_effort_timestamp = AV_NOPTS_VALUE;
+ frame->duration = 0;
+#if FF_API_PKT_DURATION
+FF_DISABLE_DEPRECATION_WARNINGS
+ frame->pkt_duration = 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ frame->pkt_pos = -1;
+ frame->pkt_size = -1;
+ frame->time_base = (AVRational){ 0, 1 };
+ frame->key_frame = 1;
+ frame->sample_aspect_ratio = (AVRational){ 0, 1 };
+ frame->format = -1; /* unknown */
+ frame->extended_data = frame->data;
+ frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
+ frame->color_trc = AVCOL_TRC_UNSPECIFIED;
+ frame->colorspace = AVCOL_SPC_UNSPECIFIED;
+ frame->color_range = AVCOL_RANGE_UNSPECIFIED;
+ frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
+ frame->flags = 0;
+}
+
+static void free_side_data(AVFrameSideData **ptr_sd)
+{
+ AVFrameSideData *sd = *ptr_sd;
+
+ av_buffer_unref(&sd->buf);
+ av_dict_free(&sd->metadata);
+ av_freep(ptr_sd);
+}
+
+static void wipe_side_data(AVFrame *frame)
+{
+ int i;
+
+ for (i = 0; i < frame->nb_side_data; i++) {
+ free_side_data(&frame->side_data[i]);
+ }
+ frame->nb_side_data = 0;
+
+ av_freep(&frame->side_data);
+}
+
+AVFrame *av_frame_alloc(void)
+{
+ AVFrame *frame = av_malloc(sizeof(*frame));
+
+ if (!frame)
+ return NULL;
+
+ get_frame_defaults(frame);
+
+ return frame;
+}
+
+void av_frame_free(AVFrame **frame)
+{
+ if (!frame || !*frame)
+ return;
+
+ av_frame_unref(*frame);
+ av_freep(frame);
+}
+
+static int get_video_buffer(AVFrame *frame, int align)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ int ret, i, padded_height, total_size;
+ int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
+ ptrdiff_t linesizes[4];
+ size_t sizes[4];
+
+ if (!desc)
+ return AVERROR(EINVAL);
+
+ if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
+ return ret;
+
+ if (!frame->linesize[0]) {
+ if (align <= 0)
+ align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
+
+ for(i=1; i<=align; i+=i) {
+ ret = av_image_fill_linesizes(frame->linesize, frame->format,
+ FFALIGN(frame->width, i));
+ if (ret < 0)
+ return ret;
+ if (!(frame->linesize[0] & (align-1)))
+ break;
+ }
+
+ for (i = 0; i < 4 && frame->linesize[i]; i++)
+ frame->linesize[i] = FFALIGN(frame->linesize[i], align);
+ }
+
+ for (i = 0; i < 4; i++)
+ linesizes[i] = frame->linesize[i];
+
+ padded_height = FFALIGN(frame->height, 32);
+ if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
+ padded_height, linesizes)) < 0)
+ return ret;
+
+ total_size = 4*plane_padding;
+ for (i = 0; i < 4; i++) {
+ if (sizes[i] > INT_MAX - total_size)
+ return AVERROR(EINVAL);
+ total_size += sizes[i];
+ }
+
+ frame->buf[0] = av_buffer_alloc(total_size);
+ if (!frame->buf[0]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
+ frame->buf[0]->data, frame->linesize)) < 0)
+ goto fail;
+
+ for (i = 1; i < 4; i++) {
+ if (frame->data[i])
+ frame->data[i] += i * plane_padding;
+ }
+
+ frame->extended_data = frame->data;
+
+ return 0;
+fail:
+ av_frame_unref(frame);
+ return ret;
+}
+
+static int get_audio_buffer(AVFrame *frame, int align)
+{
+ int planar = av_sample_fmt_is_planar(frame->format);
+ int channels, planes;
+ int ret, i;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!frame->ch_layout.nb_channels) {
+ if (frame->channel_layout) {
+ av_channel_layout_from_mask(&frame->ch_layout, frame->channel_layout);
+ } else {
+ frame->ch_layout.nb_channels = frame->channels;
+ frame->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ }
+ }
+ frame->channels = frame->ch_layout.nb_channels;
+ frame->channel_layout = frame->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ frame->ch_layout.u.mask : 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ channels = frame->ch_layout.nb_channels;
+ planes = planar ? channels : 1;
+ if (!frame->linesize[0]) {
+ ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
+ frame->nb_samples, frame->format,
+ align);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (planes > AV_NUM_DATA_POINTERS) {
+ frame->extended_data = av_calloc(planes,
+ sizeof(*frame->extended_data));
+ frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
+ sizeof(*frame->extended_buf));
+ if (!frame->extended_data || !frame->extended_buf) {
+ av_freep(&frame->extended_data);
+ av_freep(&frame->extended_buf);
+ return AVERROR(ENOMEM);
+ }
+ frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
+ } else
+ frame->extended_data = frame->data;
+
+ for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
+ frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
+ if (!frame->buf[i]) {
+ av_frame_unref(frame);
+ return AVERROR(ENOMEM);
+ }
+ frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
+ }
+ for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
+ frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
+ if (!frame->extended_buf[i]) {
+ av_frame_unref(frame);
+ return AVERROR(ENOMEM);
+ }
+ frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
+ }
+ return 0;
+
+}
+
+int av_frame_get_buffer(AVFrame *frame, int align)
+{
+ if (frame->format < 0)
+ return AVERROR(EINVAL);
+
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (frame->width > 0 && frame->height > 0)
+ return get_video_buffer(frame, align);
+ else if (frame->nb_samples > 0 &&
+ (av_channel_layout_check(&frame->ch_layout)
+#if FF_API_OLD_CHANNEL_LAYOUT
+ || frame->channel_layout || frame->channels > 0
+#endif
+ ))
+ return get_audio_buffer(frame, align);
+FF_ENABLE_DEPRECATION_WARNINGS
+
+ return AVERROR(EINVAL);
+}
+
+static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
+{
+ int ret, i;
+
+ dst->key_frame = src->key_frame;
+ dst->pict_type = src->pict_type;
+ dst->sample_aspect_ratio = src->sample_aspect_ratio;
+ dst->crop_top = src->crop_top;
+ dst->crop_bottom = src->crop_bottom;
+ dst->crop_left = src->crop_left;
+ dst->crop_right = src->crop_right;
+ dst->pts = src->pts;
+ dst->duration = src->duration;
+ dst->repeat_pict = src->repeat_pict;
+ dst->interlaced_frame = src->interlaced_frame;
+ dst->top_field_first = src->top_field_first;
+ dst->palette_has_changed = src->palette_has_changed;
+ dst->sample_rate = src->sample_rate;
+ dst->opaque = src->opaque;
+ dst->pkt_dts = src->pkt_dts;
+ dst->pkt_pos = src->pkt_pos;
+ dst->pkt_size = src->pkt_size;
+#if FF_API_PKT_DURATION
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->pkt_duration = src->pkt_duration;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ dst->time_base = src->time_base;
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->reordered_opaque = src->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ dst->quality = src->quality;
+ dst->best_effort_timestamp = src->best_effort_timestamp;
+#if FF_API_FRAME_PICTURE_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->coded_picture_number = src->coded_picture_number;
+ dst->display_picture_number = src->display_picture_number;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ dst->flags = src->flags;
+ dst->decode_error_flags = src->decode_error_flags;
+ dst->color_primaries = src->color_primaries;
+ dst->color_trc = src->color_trc;
+ dst->colorspace = src->colorspace;
+ dst->color_range = src->color_range;
+ dst->chroma_location = src->chroma_location;
+
+ av_dict_copy(&dst->metadata, src->metadata, 0);
+
+ for (i = 0; i < src->nb_side_data; i++) {
+ const AVFrameSideData *sd_src = src->side_data[i];
+ AVFrameSideData *sd_dst;
+ if ( sd_src->type == AV_FRAME_DATA_PANSCAN
+ && (src->width != dst->width || src->height != dst->height))
+ continue;
+ if (force_copy) {
+ sd_dst = av_frame_new_side_data(dst, sd_src->type,
+ sd_src->size);
+ if (!sd_dst) {
+ wipe_side_data(dst);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(sd_dst->data, sd_src->data, sd_src->size);
+ } else {
+ AVBufferRef *ref = av_buffer_ref(sd_src->buf);
+ sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
+ if (!sd_dst) {
+ av_buffer_unref(&ref);
+ wipe_side_data(dst);
+ return AVERROR(ENOMEM);
+ }
+ }
+ av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
+ }
+
+ ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
+ ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
+ return ret;
+}
+
+int av_frame_ref(AVFrame *dst, const AVFrame *src)
+{
+ int i, ret = 0;
+
+ av_assert1(dst->width == 0 && dst->height == 0);
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ av_assert1(dst->channels == 0);
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ av_assert1(dst->ch_layout.nb_channels == 0 &&
+ dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
+
+ dst->format = src->format;
+ dst->width = src->width;
+ dst->height = src->height;
+ dst->nb_samples = src->nb_samples;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->channels = src->channels;
+ dst->channel_layout = src->channel_layout;
+ if (!av_channel_layout_check(&src->ch_layout)) {
+ if (src->channel_layout)
+ av_channel_layout_from_mask(&dst->ch_layout, src->channel_layout);
+ else {
+ dst->ch_layout.nb_channels = src->channels;
+ dst->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ }
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ ret = frame_copy_props(dst, src, 0);
+ if (ret < 0)
+ goto fail;
+
+ // this check is needed only until FF_API_OLD_CHANNEL_LAYOUT is out
+ if (av_channel_layout_check(&src->ch_layout)) {
+ ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
+ if (ret < 0)
+ goto fail;
+ }
+
+ /* duplicate the frame data if it's not refcounted */
+ if (!src->buf[0]) {
+ ret = av_frame_get_buffer(dst, 0);
+ if (ret < 0)
+ goto fail;
+
+ ret = av_frame_copy(dst, src);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+ }
+
+ /* ref the buffers */
+ for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
+ if (!src->buf[i])
+ continue;
+ dst->buf[i] = av_buffer_ref(src->buf[i]);
+ if (!dst->buf[i]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+
+ if (src->extended_buf) {
+ dst->extended_buf = av_calloc(src->nb_extended_buf,
+ sizeof(*dst->extended_buf));
+ if (!dst->extended_buf) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ dst->nb_extended_buf = src->nb_extended_buf;
+
+ for (i = 0; i < src->nb_extended_buf; i++) {
+ dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
+ if (!dst->extended_buf[i]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+ }
+
+ if (src->hw_frames_ctx) {
+ dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
+ if (!dst->hw_frames_ctx) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+
+ /* duplicate extended data */
+ if (src->extended_data != src->data) {
+ int ch = dst->ch_layout.nb_channels;
+
+ if (!ch) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
+ if (!dst->extended_data) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
+ } else
+ dst->extended_data = dst->data;
+
+ memcpy(dst->data, src->data, sizeof(src->data));
+ memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
+
+ return 0;
+
+fail:
+ av_frame_unref(dst);
+ return ret;
+}
+
+AVFrame *av_frame_clone(const AVFrame *src)
+{
+ AVFrame *ret = av_frame_alloc();
+
+ if (!ret)
+ return NULL;
+
+ if (av_frame_ref(ret, src) < 0)
+ av_frame_free(&ret);
+
+ return ret;
+}
+
+void av_frame_unref(AVFrame *frame)
+{
+ int i;
+
+ if (!frame)
+ return;
+
+ wipe_side_data(frame);
+
+ for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
+ av_buffer_unref(&frame->buf[i]);
+ for (i = 0; i < frame->nb_extended_buf; i++)
+ av_buffer_unref(&frame->extended_buf[i]);
+ av_freep(&frame->extended_buf);
+ av_dict_free(&frame->metadata);
+
+ av_buffer_unref(&frame->hw_frames_ctx);
+
+ av_buffer_unref(&frame->opaque_ref);
+ av_buffer_unref(&frame->private_ref);
+
+ if (frame->extended_data != frame->data)
+ av_freep(&frame->extended_data);
+
+ av_channel_layout_uninit(&frame->ch_layout);
+
+ get_frame_defaults(frame);
+}
+
+void av_frame_move_ref(AVFrame *dst, AVFrame *src)
+{
+ av_assert1(dst->width == 0 && dst->height == 0);
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ av_assert1(dst->channels == 0);
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ av_assert1(dst->ch_layout.nb_channels == 0 &&
+ dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
+
+ *dst = *src;
+ if (src->extended_data == src->data)
+ dst->extended_data = dst->data;
+ get_frame_defaults(src);
+}
+
+int av_frame_is_writable(AVFrame *frame)
+{
+ int i, ret = 1;
+
+ /* assume non-refcounted frames are not writable */
+ if (!frame->buf[0])
+ return 0;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
+ if (frame->buf[i])
+ ret &= !!av_buffer_is_writable(frame->buf[i]);
+ for (i = 0; i < frame->nb_extended_buf; i++)
+ ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
+
+ return ret;
+}
+
+int av_frame_make_writable(AVFrame *frame)
+{
+ AVFrame tmp;
+ int ret;
+
+ if (av_frame_is_writable(frame))
+ return 0;
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.format = frame->format;
+ tmp.width = frame->width;
+ tmp.height = frame->height;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ tmp.channels = frame->channels;
+ tmp.channel_layout = frame->channel_layout;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ tmp.nb_samples = frame->nb_samples;
+ ret = av_channel_layout_copy(&tmp.ch_layout, &frame->ch_layout);
+ if (ret < 0) {
+ av_frame_unref(&tmp);
+ return ret;
+ }
+
+ if (frame->hw_frames_ctx)
+ ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
+ else
+ ret = av_frame_get_buffer(&tmp, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = av_frame_copy(&tmp, frame);
+ if (ret < 0) {
+ av_frame_unref(&tmp);
+ return ret;
+ }
+
+ ret = av_frame_copy_props(&tmp, frame);
+ if (ret < 0) {
+ av_frame_unref(&tmp);
+ return ret;
+ }
+
+ av_frame_unref(frame);
+
+ *frame = tmp;
+ if (tmp.data == tmp.extended_data)
+ frame->extended_data = frame->data;
+
+ return 0;
+}
+
+int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
+{
+ return frame_copy_props(dst, src, 1);
+}
+
+AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane)
+{
+ uint8_t *data;
+ int planes, i;
+
+ if (frame->nb_samples) {
+ int channels = frame->ch_layout.nb_channels;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!channels) {
+ channels = frame->channels;
+ CHECK_CHANNELS_CONSISTENCY(frame);
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ if (!channels)
+ return NULL;
+ planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
+ } else
+ planes = 4;
+
+ if (plane < 0 || plane >= planes || !frame->extended_data[plane])
+ return NULL;
+ data = frame->extended_data[plane];
+
+ for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
+ AVBufferRef *buf = frame->buf[i];
+ if (data >= buf->data && data < buf->data + buf->size)
+ return buf;
+ }
+ for (i = 0; i < frame->nb_extended_buf; i++) {
+ AVBufferRef *buf = frame->extended_buf[i];
+ if (data >= buf->data && data < buf->data + buf->size)
+ return buf;
+ }
+ return NULL;
+}
+
+AVFrameSideData *av_frame_new_side_data_from_buf(AVFrame *frame,
+ enum AVFrameSideDataType type,
+ AVBufferRef *buf)
+{
+ AVFrameSideData *ret, **tmp;
+
+ if (!buf)
+ return NULL;
+
+ if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
+ return NULL;
+
+ tmp = av_realloc(frame->side_data,
+ (frame->nb_side_data + 1) * sizeof(*frame->side_data));
+ if (!tmp)
+ return NULL;
+ frame->side_data = tmp;
+
+ ret = av_mallocz(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->buf = buf;
+ ret->data = ret->buf->data;
+ ret->size = buf->size;
+ ret->type = type;
+
+ frame->side_data[frame->nb_side_data++] = ret;
+
+ return ret;
+}
+
+AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
+ enum AVFrameSideDataType type,
+ size_t size)
+{
+ AVFrameSideData *ret;
+ AVBufferRef *buf = av_buffer_alloc(size);
+ ret = av_frame_new_side_data_from_buf(frame, type, buf);
+ if (!ret)
+ av_buffer_unref(&buf);
+ return ret;
+}
+
+AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
+ enum AVFrameSideDataType type)
+{
+ int i;
+
+ for (i = 0; i < frame->nb_side_data; i++) {
+ if (frame->side_data[i]->type == type)
+ return frame->side_data[i];
+ }
+ return NULL;
+}
+
+static int frame_copy_video(AVFrame *dst, const AVFrame *src)
+{
+ const uint8_t *src_data[4];
+ int i, planes;
+
+ if (dst->width < src->width ||
+ dst->height < src->height)
+ return AVERROR(EINVAL);
+
+ if (src->hw_frames_ctx || dst->hw_frames_ctx)
+ return av_hwframe_transfer_data(dst, src, 0);
+
+ planes = av_pix_fmt_count_planes(dst->format);
+ for (i = 0; i < planes; i++)
+ if (!dst->data[i] || !src->data[i])
+ return AVERROR(EINVAL);
+
+ memcpy(src_data, src->data, sizeof(src_data));
+ av_image_copy(dst->data, dst->linesize,
+ src_data, src->linesize,
+ dst->format, src->width, src->height);
+
+ return 0;
+}
+
+static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
+{
+ int planar = av_sample_fmt_is_planar(dst->format);
+ int channels = dst->ch_layout.nb_channels;
+ int planes = planar ? channels : 1;
+ int i;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!channels || !src->ch_layout.nb_channels) {
+ if (dst->channels != src->channels ||
+ dst->channel_layout != src->channel_layout)
+ return AVERROR(EINVAL);
+ CHECK_CHANNELS_CONSISTENCY(src);
+ }
+ if (!channels) {
+ channels = dst->channels;
+ planes = planar ? channels : 1;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (dst->nb_samples != src->nb_samples ||
+#if FF_API_OLD_CHANNEL_LAYOUT
+ (av_channel_layout_check(&dst->ch_layout) &&
+ av_channel_layout_check(&src->ch_layout) &&
+#endif
+ av_channel_layout_compare(&dst->ch_layout, &src->ch_layout))
+#if FF_API_OLD_CHANNEL_LAYOUT
+ )
+#endif
+ return AVERROR(EINVAL);
+
+ for (i = 0; i < planes; i++)
+ if (!dst->extended_data[i] || !src->extended_data[i])
+ return AVERROR(EINVAL);
+
+ av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
+ dst->nb_samples, channels, dst->format);
+
+ return 0;
+}
+
+int av_frame_copy(AVFrame *dst, const AVFrame *src)
+{
+ if (dst->format != src->format || dst->format < 0)
+ return AVERROR(EINVAL);
+
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (dst->width > 0 && dst->height > 0)
+ return frame_copy_video(dst, src);
+ else if (dst->nb_samples > 0 &&
+ (av_channel_layout_check(&dst->ch_layout)
+#if FF_API_OLD_CHANNEL_LAYOUT
+ || dst->channels > 0
+#endif
+ ))
+ return frame_copy_audio(dst, src);
+FF_ENABLE_DEPRECATION_WARNINGS
+
+ return AVERROR(EINVAL);
+}
+
+void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
+{
+ int i;
+
+ for (i = frame->nb_side_data - 1; i >= 0; i--) {
+ AVFrameSideData *sd = frame->side_data[i];
+ if (sd->type == type) {
+ free_side_data(&frame->side_data[i]);
+ frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
+ frame->nb_side_data--;
+ }
+ }
+}
+
+const char *av_frame_side_data_name(enum AVFrameSideDataType type)
+{
+ switch(type) {
+ case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
+ case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
+ case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
+ case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
+ case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
+ case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
+ case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
+ case AV_FRAME_DATA_AFD: return "Active format description";
+ case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
+ case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
+ case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
+ case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
+ case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
+ case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
+ case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
+ case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
+ case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
+ case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
+ case AV_FRAME_DATA_DYNAMIC_HDR_VIVID: return "HDR Dynamic Metadata CUVA 005.1 2021 (Vivid)";
+ case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
+ case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
+ case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
+ case AV_FRAME_DATA_FILM_GRAIN_PARAMS: return "Film grain parameters";
+ case AV_FRAME_DATA_DETECTION_BBOXES: return "Bounding boxes for object detection and classification";
+ case AV_FRAME_DATA_DOVI_RPU_BUFFER: return "Dolby Vision RPU Data";
+ case AV_FRAME_DATA_DOVI_METADATA: return "Dolby Vision Metadata";
+ case AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT: return "Ambient viewing environment";
+ }
+ return NULL;
+}
+
+static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
+ const AVPixFmtDescriptor *desc)
+{
+ int i, j;
+
+ for (i = 0; frame->data[i]; i++) {
+ const AVComponentDescriptor *comp = NULL;
+ int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
+ int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
+
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
+ offsets[i] = 0;
+ break;
+ }
+
+ /* find any component descriptor for this plane */
+ for (j = 0; j < desc->nb_components; j++) {
+ if (desc->comp[j].plane == i) {
+ comp = &desc->comp[j];
+ break;
+ }
+ }
+ if (!comp)
+ return AVERROR_BUG;
+
+ offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
+ (frame->crop_left >> shift_x) * comp->step;
+ }
+
+ return 0;
+}
+
+int av_frame_apply_cropping(AVFrame *frame, int flags)
+{
+ const AVPixFmtDescriptor *desc;
+ size_t offsets[4];
+ int i;
+
+ if (!(frame->width > 0 && frame->height > 0))
+ return AVERROR(EINVAL);
+
+ if (frame->crop_left >= INT_MAX - frame->crop_right ||
+ frame->crop_top >= INT_MAX - frame->crop_bottom ||
+ (frame->crop_left + frame->crop_right) >= frame->width ||
+ (frame->crop_top + frame->crop_bottom) >= frame->height)
+ return AVERROR(ERANGE);
+
+ desc = av_pix_fmt_desc_get(frame->format);
+ if (!desc)
+ return AVERROR_BUG;
+
+ /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
+ * formats cannot be easily handled here either (and corresponding decoders
+ * should not export any cropping anyway), so do the same for those as well.
+ * */
+ if (desc->flags & (AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_HWACCEL)) {
+ frame->width -= frame->crop_right;
+ frame->height -= frame->crop_bottom;
+ frame->crop_right = 0;
+ frame->crop_bottom = 0;
+ return 0;
+ }
+
+ /* calculate the offsets for each plane */
+ calc_cropping_offsets(offsets, frame, desc);
+
+ /* adjust the offsets to avoid breaking alignment */
+ if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
+ int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
+ int min_log2_align = INT_MAX;
+
+ for (i = 0; frame->data[i]; i++) {
+ int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
+ min_log2_align = FFMIN(log2_align, min_log2_align);
+ }
+
+ /* we assume, and it should always be true, that the data alignment is
+ * related to the cropping alignment by a constant power-of-2 factor */
+ if (log2_crop_align < min_log2_align)
+ return AVERROR_BUG;
+
+ if (min_log2_align < 5) {
+ frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
+ calc_cropping_offsets(offsets, frame, desc);
+ }
+ }
+
+ for (i = 0; frame->data[i]; i++)
+ frame->data[i] += offsets[i];
+
+ frame->width -= (frame->crop_left + frame->crop_right);
+ frame->height -= (frame->crop_top + frame->crop_bottom);
+ frame->crop_left = 0;
+ frame->crop_right = 0;
+ frame->crop_top = 0;
+ frame->crop_bottom = 0;
+
+ return 0;
+}
diff --git a/media/ffvpx/libavutil/frame.h b/media/ffvpx/libavutil/frame.h
new file mode 100644
index 0000000000..2580269549
--- /dev/null
+++ b/media/ffvpx/libavutil/frame.h
@@ -0,0 +1,964 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_frame
+ * reference-counted frame API
+ */
+
+#ifndef AVUTIL_FRAME_H
+#define AVUTIL_FRAME_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "avutil.h"
+#include "buffer.h"
+#include "channel_layout.h"
+#include "dict.h"
+#include "rational.h"
+#include "samplefmt.h"
+#include "pixfmt.h"
+#include "version.h"
+
+
+/**
+ * @defgroup lavu_frame AVFrame
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVFrame is an abstraction for reference-counted raw multimedia data.
+ */
+
+enum AVFrameSideDataType {
+ /**
+ * The data is the AVPanScan struct defined in libavcodec.
+ */
+ AV_FRAME_DATA_PANSCAN,
+ /**
+ * ATSC A53 Part 4 Closed Captions.
+ * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
+ * The number of bytes of CC data is AVFrameSideData.size.
+ */
+ AV_FRAME_DATA_A53_CC,
+ /**
+ * Stereoscopic 3d metadata.
+ * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
+ */
+ AV_FRAME_DATA_STEREO3D,
+ /**
+ * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
+ */
+ AV_FRAME_DATA_MATRIXENCODING,
+ /**
+ * Metadata relevant to a downmix procedure.
+ * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
+ */
+ AV_FRAME_DATA_DOWNMIX_INFO,
+ /**
+ * ReplayGain information in the form of the AVReplayGain struct.
+ */
+ AV_FRAME_DATA_REPLAYGAIN,
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the frame for correct
+ * presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_FRAME_DATA_DISPLAYMATRIX,
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_FRAME_DATA_AFD,
+ /**
+ * Motion vectors exported by some codecs (on demand through the export_mvs
+ * flag set in the libavcodec AVCodecContext flags2 option).
+ * The data is the AVMotionVector struct defined in
+ * libavutil/motion_vector.h.
+ */
+ AV_FRAME_DATA_MOTION_VECTORS,
+ /**
+ * Recommmends skipping the specified number of samples. This is exported
+ * only if the "skip_manual" AVOption is set in libavcodec.
+ * This has the same format as AV_PKT_DATA_SKIP_SAMPLES.
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_FRAME_DATA_SKIP_SAMPLES,
+ /**
+ * This side data must be associated with an audio frame and corresponds to
+ * enum AVAudioServiceType defined in avcodec.h.
+ */
+ AV_FRAME_DATA_AUDIO_SERVICE_TYPE,
+ /**
+ * Mastering display metadata associated with a video frame. The payload is
+ * an AVMasteringDisplayMetadata type and contains information about the
+ * mastering display color volume.
+ */
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
+ /**
+ * The GOP timecode in 25 bit timecode format. Data format is 64-bit integer.
+ * This is set on the first frame of a GOP that has a temporal reference of 0.
+ */
+ AV_FRAME_DATA_GOP_TIMECODE,
+
+ /**
+ * The data represents the AVSphericalMapping structure defined in
+ * libavutil/spherical.h.
+ */
+ AV_FRAME_DATA_SPHERICAL,
+
+ /**
+ * Content light level (based on CTA-861.3). This payload contains data in
+ * the form of the AVContentLightMetadata struct.
+ */
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+
+ /**
+ * The data contains an ICC profile as an opaque octet buffer following the
+ * format described by ISO 15076-1 with an optional name defined in the
+ * metadata key entry "name".
+ */
+ AV_FRAME_DATA_ICC_PROFILE,
+
+ /**
+ * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t
+ * where the first uint32_t describes how many (1-3) of the other timecodes are used.
+ * The timecode format is described in the documentation of av_timecode_get_smpte_from_framenum()
+ * function in libavutil/timecode.h.
+ */
+ AV_FRAME_DATA_S12M_TIMECODE,
+
+ /**
+ * HDR dynamic metadata associated with a video frame. The payload is
+ * an AVDynamicHDRPlus type and contains information for color
+ * volume transform - application 4 of SMPTE 2094-40:2016 standard.
+ */
+ AV_FRAME_DATA_DYNAMIC_HDR_PLUS,
+
+ /**
+ * Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of
+ * array element is implied by AVFrameSideData.size / AVRegionOfInterest.self_size.
+ */
+ AV_FRAME_DATA_REGIONS_OF_INTEREST,
+
+ /**
+ * Encoding parameters for a video frame, as described by AVVideoEncParams.
+ */
+ AV_FRAME_DATA_VIDEO_ENC_PARAMS,
+
+ /**
+ * User data unregistered metadata associated with a video frame.
+ * This is the H.26[45] UDU SEI message, and shouldn't be used for any other purpose
+ * The data is stored as uint8_t in AVFrameSideData.data which is 16 bytes of
+ * uuid_iso_iec_11578 followed by AVFrameSideData.size - 16 bytes of user_data_payload_byte.
+ */
+ AV_FRAME_DATA_SEI_UNREGISTERED,
+
+ /**
+ * Film grain parameters for a frame, described by AVFilmGrainParams.
+ * Must be present for every frame which should have film grain applied.
+ */
+ AV_FRAME_DATA_FILM_GRAIN_PARAMS,
+
+ /**
+ * Bounding boxes for object detection and classification,
+ * as described by AVDetectionBBoxHeader.
+ */
+ AV_FRAME_DATA_DETECTION_BBOXES,
+
+ /**
+ * Dolby Vision RPU raw data, suitable for passing to x265
+ * or other libraries. Array of uint8_t, with NAL emulation
+ * bytes intact.
+ */
+ AV_FRAME_DATA_DOVI_RPU_BUFFER,
+
+ /**
+ * Parsed Dolby Vision metadata, suitable for passing to a software
+ * implementation. The payload is the AVDOVIMetadata struct defined in
+ * libavutil/dovi_meta.h.
+ */
+ AV_FRAME_DATA_DOVI_METADATA,
+
+ /**
+ * HDR Vivid dynamic metadata associated with a video frame. The payload is
+ * an AVDynamicHDRVivid type and contains information for color
+ * volume transform - CUVA 005.1-2021.
+ */
+ AV_FRAME_DATA_DYNAMIC_HDR_VIVID,
+
+ /**
+ * Ambient viewing environment metadata, as defined by H.274.
+ */
+ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT,
+};
+
+enum AVActiveFormatDescription {
+ AV_AFD_SAME = 8,
+ AV_AFD_4_3 = 9,
+ AV_AFD_16_9 = 10,
+ AV_AFD_14_9 = 11,
+ AV_AFD_4_3_SP_14_9 = 13,
+ AV_AFD_16_9_SP_14_9 = 14,
+ AV_AFD_SP_4_3 = 15,
+};
+
+
+/**
+ * Structure to hold side data for an AVFrame.
+ *
+ * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ */
+typedef struct AVFrameSideData {
+ enum AVFrameSideDataType type;
+ uint8_t *data;
+ size_t size;
+ AVDictionary *metadata;
+ AVBufferRef *buf;
+} AVFrameSideData;
+
+/**
+ * Structure describing a single Region Of Interest.
+ *
+ * When multiple regions are defined in a single side-data block, they
+ * should be ordered from most to least important - some encoders are only
+ * capable of supporting a limited number of distinct regions, so will have
+ * to truncate the list.
+ *
+ * When overlapping regions are defined, the first region containing a given
+ * area of the frame applies.
+ */
+typedef struct AVRegionOfInterest {
+ /**
+ * Must be set to the size of this data structure (that is,
+ * sizeof(AVRegionOfInterest)).
+ */
+ uint32_t self_size;
+ /**
+ * Distance in pixels from the top edge of the frame to the top and
+ * bottom edges and from the left edge of the frame to the left and
+ * right edges of the rectangle defining this region of interest.
+ *
+ * The constraints on a region are encoder dependent, so the region
+ * actually affected may be slightly larger for alignment or other
+ * reasons.
+ */
+ int top;
+ int bottom;
+ int left;
+ int right;
+ /**
+ * Quantisation offset.
+ *
+ * Must be in the range -1 to +1. A value of zero indicates no quality
+ * change. A negative value asks for better quality (less quantisation),
+ * while a positive value asks for worse quality (greater quantisation).
+ *
+ * The range is calibrated so that the extreme values indicate the
+ * largest possible offset - if the rest of the frame is encoded with the
+ * worst possible quality, an offset of -1 indicates that this region
+ * should be encoded with the best possible quality anyway. Intermediate
+ * values are then interpolated in some codec-dependent way.
+ *
+ * For example, in 10-bit H.264 the quantisation parameter varies between
+ * -12 and 51. A typical qoffset value of -1/10 therefore indicates that
+ * this region should be encoded with a QP around one-tenth of the full
+ * range better than the rest of the frame. So, if most of the frame
+ * were to be encoded with a QP of around 30, this region would get a QP
+ * of around 24 (an offset of approximately -1/10 * (51 - -12) = -6.3).
+ * An extreme value of -1 would indicate that this region should be
+ * encoded with the best possible quality regardless of the treatment of
+ * the rest of the frame - that is, should be encoded at a QP of -12.
+ */
+ AVRational qoffset;
+} AVRegionOfInterest;
+
+/**
+ * This structure describes decoded (raw) audio or video data.
+ *
+ * AVFrame must be allocated using av_frame_alloc(). Note that this only
+ * allocates the AVFrame itself, the buffers for the data must be managed
+ * through other means (see below).
+ * AVFrame must be freed with av_frame_free().
+ *
+ * AVFrame is typically allocated once and then reused multiple times to hold
+ * different data (e.g. a single AVFrame to hold frames received from a
+ * decoder). In such a case, av_frame_unref() will free any references held by
+ * the frame and reset it to its original clean state before it
+ * is reused again.
+ *
+ * The data described by an AVFrame is usually reference counted through the
+ * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
+ * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
+ * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
+ * every single data plane must be contained in one of the buffers in
+ * AVFrame.buf or AVFrame.extended_buf.
+ * There may be a single buffer for all the data, or one separate buffer for
+ * each plane, or anything in between.
+ *
+ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ *
+ * Fields can be accessed through AVOptions, the name string used, matches the
+ * C structure field name for fields accessible through AVOptions. The AVClass
+ * for AVFrame can be obtained from avcodec_get_frame_class()
+ */
+typedef struct AVFrame {
+#define AV_NUM_DATA_POINTERS 8
+ /**
+ * pointer to the picture/channel planes.
+ * This might be different from the first allocated byte. For video,
+ * it could even point to the end of the image data.
+ *
+ * All pointers in data and extended_data must point into one of the
+ * AVBufferRef in buf or extended_buf.
+ *
+ * Some decoders access areas outside 0,0 - width,height, please
+ * see avcodec_align_dimensions2(). Some filters and swscale can read
+ * up to 16 bytes beyond the planes, if these filters are to be used,
+ * then 16 extra bytes must be allocated.
+ *
+ * NOTE: Pointers not needed by the format MUST be set to NULL.
+ *
+ * @attention In case of video, the data[] pointers can point to the
+ * end of image data in order to reverse line order, when used in
+ * combination with negative values in the linesize[] array.
+ */
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For video, a positive or negative value, which is typically indicating
+ * the size in bytes of each picture line, but it can also be:
+ * - the negative byte size of lines for vertical flipping
+ * (with data[n] pointing to the end of the data
+ * - a positive or negative multiple of the byte size as for accessing
+ * even and odd fields of a frame (possibly flipped)
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * For video the linesizes should be multiples of the CPUs alignment
+ * preference, this is 16 or 32 for modern desktop CPUs.
+ * Some code requires such alignment other code can be slower without
+ * correct alignment, for yet other it makes no difference.
+ *
+ * @note The linesize may be larger than the size of usable data -- there
+ * may be extra padding present for performance reasons.
+ *
+ * @attention In case of video, line size values can be negative to achieve
+ * a vertically inverted iteration over image lines.
+ */
+ int linesize[AV_NUM_DATA_POINTERS];
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data should always be set in a valid frame,
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used in order to access all channels.
+ */
+ uint8_t **extended_data;
+
+ /**
+ * @name Video dimensions
+ * Video frames only. The coded dimensions (in pixels) of the video frame,
+ * i.e. the size of the rectangle that contains some well-defined values.
+ *
+ * @note The part of the frame intended for display/presentation is further
+ * restricted by the @ref cropping "Cropping rectangle".
+ * @{
+ */
+ int width, height;
+ /**
+ * @}
+ */
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ */
+ int nb_samples;
+
+ /**
+ * format of the frame, -1 if unknown or unset
+ * Values correspond to enum AVPixelFormat for video frames,
+ * enum AVSampleFormat for audio)
+ */
+ int format;
+
+ /**
+ * 1 -> keyframe, 0-> not
+ */
+ int key_frame;
+
+ /**
+ * Picture type of the frame.
+ */
+ enum AVPictureType pict_type;
+
+ /**
+ * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Presentation timestamp in time_base units (time when frame should be shown to user).
+ */
+ int64_t pts;
+
+ /**
+ * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
+ * This is also the Presentation time of this AVFrame calculated from
+ * only AVPacket.dts values without pts values.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * Time base for the timestamps in this frame.
+ * In the future, this field may be set on frames output by decoders or
+ * filters, but its value will be by default ignored on input to encoders
+ * or filters.
+ */
+ AVRational time_base;
+
+#if FF_API_FRAME_PICTURE_NUMBER
+ /**
+ * picture number in bitstream order
+ */
+ attribute_deprecated
+ int coded_picture_number;
+ /**
+ * picture number in display order
+ */
+ attribute_deprecated
+ int display_picture_number;
+#endif
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ */
+ int quality;
+
+ /**
+ * for some private data of the user
+ */
+ void *opaque;
+
+ /**
+ * When decoding, this signals how much the picture must be delayed.
+ * extra_delay = repeat_pict / (2*fps)
+ */
+ int repeat_pict;
+
+ /**
+ * The content of the picture is interlaced.
+ */
+ int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ */
+ int top_field_first;
+
+ /**
+ * Tell user application that palette has changed from previous frame.
+ */
+ int palette_has_changed;
+
+#if FF_API_REORDERED_OPAQUE
+ /**
+ * reordered opaque 64 bits (generally an integer or a double precision float
+ * PTS but can be anything).
+ * The user sets AVCodecContext.reordered_opaque to represent the input at
+ * that time,
+ * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+ * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ *
+ * @deprecated Use AV_CODEC_FLAG_COPY_OPAQUE instead
+ */
+ attribute_deprecated
+ int64_t reordered_opaque;
+#endif
+
+ /**
+ * Sample rate of the audio data.
+ */
+ int sample_rate;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * Channel layout of the audio data.
+ * @deprecated use ch_layout instead
+ */
+ attribute_deprecated
+ uint64_t channel_layout;
+#endif
+
+ /**
+ * AVBuffer references backing the data for this frame. All the pointers in
+ * data and extended_data must point inside one of the buffers in buf or
+ * extended_buf. This array must be filled contiguously -- if buf[i] is
+ * non-NULL then buf[j] must also be non-NULL for all j < i.
+ *
+ * There may be at most one AVBuffer per data plane, so for video this array
+ * always contains all the references. For planar audio with more than
+ * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
+ * this array. Then the extra AVBufferRef pointers are stored in the
+ * extended_buf array.
+ */
+ AVBufferRef *buf[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For planar audio which requires more than AV_NUM_DATA_POINTERS
+ * AVBufferRef pointers, this array will hold all the references which
+ * cannot fit into AVFrame.buf.
+ *
+ * Note that this is different from AVFrame.extended_data, which always
+ * contains all the pointers. This array only contains the extra pointers,
+ * which cannot fit into AVFrame.buf.
+ *
+ * This array is always allocated using av_malloc() by whoever constructs
+ * the frame. It is freed in av_frame_unref().
+ */
+ AVBufferRef **extended_buf;
+ /**
+ * Number of elements in extended_buf.
+ */
+ int nb_extended_buf;
+
+ AVFrameSideData **side_data;
+ int nb_side_data;
+
+/**
+ * @defgroup lavu_frame_flags AV_FRAME_FLAGS
+ * @ingroup lavu_frame
+ * Flags describing additional frame properties.
+ *
+ * @{
+ */
+
+/**
+ * The frame data may be corrupted, e.g. due to decoding errors.
+ */
+#define AV_FRAME_FLAG_CORRUPT (1 << 0)
+/**
+ * A flag to mark the frames which need to be decoded, but shouldn't be output.
+ */
+#define AV_FRAME_FLAG_DISCARD (1 << 2)
+/**
+ * @}
+ */
+
+ /**
+ * Frame flags, a combination of @ref lavu_frame_flags
+ */
+ int flags;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ enum AVColorPrimaries color_primaries;
+
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * frame timestamp estimated using various heuristics, in stream time base
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int64_t best_effort_timestamp;
+
+ /**
+ * reordered pos from the last AVPacket that has been input into the decoder
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_pos;
+
+#if FF_API_PKT_DURATION
+ /**
+ * duration of the corresponding packet, expressed in
+ * AVStream->time_base units, 0 if unknown.
+ * - encoding: unused
+ * - decoding: Read by user.
+ *
+ * @deprecated use duration instead
+ */
+ attribute_deprecated
+ int64_t pkt_duration;
+#endif
+
+ /**
+ * metadata.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVDictionary *metadata;
+
+ /**
+ * decode error flags of the frame, set to a combination of
+ * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
+ * were errors during the decoding.
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int decode_error_flags;
+#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
+#define FF_DECODE_ERROR_MISSING_REFERENCE 2
+#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4
+#define FF_DECODE_ERROR_DECODE_SLICES 8
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * number of audio channels, only used for audio.
+ * - encoding: unused
+ * - decoding: Read by user.
+ * @deprecated use ch_layout instead
+ */
+ attribute_deprecated
+ int channels;
+#endif
+
+ /**
+ * size of the corresponding packet containing the compressed
+ * frame.
+ * It is set to a negative value if unknown.
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int pkt_size;
+
+ /**
+ * For hwaccel-format frames, this should be a reference to the
+ * AVHWFramesContext describing the frame.
+ */
+ AVBufferRef *hw_frames_ctx;
+
+ /**
+ * AVBufferRef for free use by the API user. FFmpeg will never check the
+ * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
+ * the frame is unreferenced. av_frame_copy_props() calls create a new
+ * reference with av_buffer_ref() for the target frame's opaque_ref field.
+ *
+ * This is unrelated to the opaque field, although it serves a similar
+ * purpose.
+ */
+ AVBufferRef *opaque_ref;
+
+ /**
+ * @anchor cropping
+ * @name Cropping
+ * Video frames only. The number of pixels to discard from the the
+ * top/bottom/left/right border of the frame to obtain the sub-rectangle of
+ * the frame intended for presentation.
+ * @{
+ */
+ size_t crop_top;
+ size_t crop_bottom;
+ size_t crop_left;
+ size_t crop_right;
+ /**
+ * @}
+ */
+
+ /**
+ * AVBufferRef for internal use by a single libav* library.
+ * Must not be used to transfer data between libraries.
+ * Has to be NULL when ownership of the frame leaves the respective library.
+ *
+ * Code outside the FFmpeg libs should never check or change the contents of the buffer ref.
+ *
+ * FFmpeg calls av_buffer_unref() on it when the frame is unreferenced.
+ * av_frame_copy_props() calls create a new reference with av_buffer_ref()
+ * for the target frame's private_ref field.
+ */
+ AVBufferRef *private_ref;
+
+ /**
+ * Channel layout of the audio data.
+ */
+ AVChannelLayout ch_layout;
+
+ /**
+ * Duration of the frame, in the same units as pts. 0 if unknown.
+ */
+ int64_t duration;
+} AVFrame;
+
+
+/**
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct must be freed using av_frame_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVFrame itself, not the data buffers. Those
+ * must be allocated through other means, e.g. with av_frame_get_buffer() or
+ * manually.
+ */
+AVFrame *av_frame_alloc(void);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data. If the frame is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ */
+void av_frame_free(AVFrame **frame);
+
+/**
+ * Set up a new reference to the data described by the source frame.
+ *
+ * Copy frame properties from src to dst and create a new reference for each
+ * AVBufferRef from src.
+ *
+ * If src is not reference counted, new buffers are allocated and the data is
+ * copied.
+ *
+ * @warning: dst MUST have been either unreferenced with av_frame_unref(dst),
+ * or newly allocated with av_frame_alloc() before calling this
+ * function, or undefined behavior will occur.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_frame_ref(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Create a new frame that references the same data as src.
+ *
+ * This is a shortcut for av_frame_alloc()+av_frame_ref().
+ *
+ * @return newly created AVFrame on success, NULL on error.
+ */
+AVFrame *av_frame_clone(const AVFrame *src);
+
+/**
+ * Unreference all the buffers referenced by frame and reset the frame fields.
+ */
+void av_frame_unref(AVFrame *frame);
+
+/**
+ * Move everything contained in src to dst and reset src.
+ *
+ * @warning: dst is not unreferenced, but directly overwritten without reading
+ * or deallocating its contents. Call av_frame_unref(dst) manually
+ * before calling this function to ensure that no memory is leaked.
+ */
+void av_frame_move_ref(AVFrame *dst, AVFrame *src);
+
+/**
+ * Allocate new buffer(s) for audio or video data.
+ *
+ * The following fields must be set on frame before calling this function:
+ * - format (pixel format for video, sample format for audio)
+ * - width and height for video
+ * - nb_samples and ch_layout for audio
+ *
+ * This function will fill AVFrame.data and AVFrame.buf arrays and, if
+ * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
+ * For planar formats, one buffer will be allocated for each plane.
+ *
+ * @warning: if frame already has been allocated, calling this function will
+ * leak memory. In addition, undefined behavior can occur in certain
+ * cases.
+ *
+ * @param frame frame in which to store the new buffers.
+ * @param align Required buffer size alignment. If equal to 0, alignment will be
+ * chosen automatically for the current CPU. It is highly
+ * recommended to pass 0 here unless you know what you are doing.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_frame_get_buffer(AVFrame *frame, int align);
+
+/**
+ * Check if the frame data is writable.
+ *
+ * @return A positive value if the frame data is writable (which is true if and
+ * only if each of the underlying buffers has only one reference, namely the one
+ * stored in this frame). Return 0 otherwise.
+ *
+ * If 1 is returned the answer is valid until av_buffer_ref() is called on any
+ * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
+ *
+ * @see av_frame_make_writable(), av_buffer_is_writable()
+ */
+int av_frame_is_writable(AVFrame *frame);
+
+/**
+ * Ensure that the frame data is writable, avoiding data copy if possible.
+ *
+ * Do nothing if the frame is writable, allocate new buffers and copy the data
+ * if it is not. Non-refcounted frames behave as non-writable, i.e. a copy
+ * is always made.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ *
+ * @see av_frame_is_writable(), av_buffer_is_writable(),
+ * av_buffer_make_writable()
+ */
+int av_frame_make_writable(AVFrame *frame);
+
+/**
+ * Copy the frame data from src to dst.
+ *
+ * This function does not allocate anything, dst must be already initialized and
+ * allocated with the same parameters as src.
+ *
+ * This function only copies the frame data (i.e. the contents of the data /
+ * extended data arrays), not any other properties.
+ *
+ * @return >= 0 on success, a negative AVERROR on error.
+ */
+int av_frame_copy(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Copy only "metadata" fields from src to dst.
+ *
+ * Metadata for the purpose of this function are those fields that do not affect
+ * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample
+ * aspect ratio (for video), but not width/height or channel layout.
+ * Side data is also copied.
+ */
+int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Get the buffer reference a given data plane is stored in.
+ *
+ * @param frame the frame to get the plane's buffer from
+ * @param plane index of the data plane of interest in frame->extended_data.
+ *
+ * @return the buffer reference that contains the plane or NULL if the input
+ * frame is not valid.
+ */
+AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
+
+/**
+ * Add a new side data to a frame.
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type type of the added side data
+ * @param size size of the side data
+ *
+ * @return newly added side data on success, NULL on error
+ */
+AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
+ enum AVFrameSideDataType type,
+ size_t size);
+
+/**
+ * Add a new side data to a frame from an existing AVBufferRef
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type the type of the added side data
+ * @param buf an AVBufferRef to add as side data. The ownership of
+ * the reference is transferred to the frame.
+ *
+ * @return newly added side data on success, NULL on error. On failure
+ * the frame is unchanged and the AVBufferRef remains owned by
+ * the caller.
+ */
+AVFrameSideData *av_frame_new_side_data_from_buf(AVFrame *frame,
+ enum AVFrameSideDataType type,
+ AVBufferRef *buf);
+
+/**
+ * @return a pointer to the side data of a given type on success, NULL if there
+ * is no side data with such type in this frame.
+ */
+AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
+ enum AVFrameSideDataType type);
+
+/**
+ * Remove and free all side data instances of the given type.
+ */
+void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);
+
+
+/**
+ * Flags for frame cropping.
+ */
+enum {
+ /**
+ * Apply the maximum possible cropping, even if it requires setting the
+ * AVFrame.data[] entries to unaligned pointers. Passing unaligned data
+ * to FFmpeg API is generally not allowed, and causes undefined behavior
+ * (such as crashes). You can pass unaligned data only to FFmpeg APIs that
+ * are explicitly documented to accept it. Use this flag only if you
+ * absolutely know what you are doing.
+ */
+ AV_FRAME_CROP_UNALIGNED = 1 << 0,
+};
+
+/**
+ * Crop the given video AVFrame according to its crop_left/crop_top/crop_right/
+ * crop_bottom fields. If cropping is successful, the function will adjust the
+ * data pointers and the width/height fields, and set the crop fields to 0.
+ *
+ * In all cases, the cropping boundaries will be rounded to the inherent
+ * alignment of the pixel format. In some cases, such as for opaque hwaccel
+ * formats, the left/top cropping is ignored. The crop fields are set to 0 even
+ * if the cropping was rounded or ignored.
+ *
+ * @param frame the frame which should be cropped
+ * @param flags Some combination of AV_FRAME_CROP_* flags, or 0.
+ *
+ * @return >= 0 on success, a negative AVERROR on error. If the cropping fields
+ * were invalid, AVERROR(ERANGE) is returned, and nothing is changed.
+ */
+int av_frame_apply_cropping(AVFrame *frame, int flags);
+
+/**
+ * @return a string identifying the side data type
+ */
+const char *av_frame_side_data_name(enum AVFrameSideDataType type);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_FRAME_H */
diff --git a/media/ffvpx/libavutil/hwcontext.c b/media/ffvpx/libavutil/hwcontext.c
new file mode 100644
index 0000000000..3396598269
--- /dev/null
+++ b/media/ffvpx/libavutil/hwcontext.c
@@ -0,0 +1,955 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "avassert.h"
+#include "buffer.h"
+#include "common.h"
+#include "hwcontext.h"
+#include "hwcontext_internal.h"
+#include "imgutils.h"
+#include "log.h"
+#include "mem.h"
+#include "pixdesc.h"
+#include "pixfmt.h"
+
+static const HWContextType * const hw_table[] = {
+#if CONFIG_CUDA
+ &ff_hwcontext_type_cuda,
+#endif
+#if CONFIG_D3D11VA
+ &ff_hwcontext_type_d3d11va,
+#endif
+#if CONFIG_LIBDRM
+ &ff_hwcontext_type_drm,
+#endif
+#if CONFIG_DXVA2
+ &ff_hwcontext_type_dxva2,
+#endif
+#if CONFIG_OPENCL
+ &ff_hwcontext_type_opencl,
+#endif
+#if CONFIG_QSV
+ &ff_hwcontext_type_qsv,
+#endif
+#if CONFIG_VAAPI
+ &ff_hwcontext_type_vaapi,
+#endif
+#if CONFIG_VDPAU
+ &ff_hwcontext_type_vdpau,
+#endif
+#if CONFIG_VIDEOTOOLBOX
+ &ff_hwcontext_type_videotoolbox,
+#endif
+#if CONFIG_MEDIACODEC
+ &ff_hwcontext_type_mediacodec,
+#endif
+#if CONFIG_VULKAN
+ &ff_hwcontext_type_vulkan,
+#endif
+ NULL,
+};
+
+static const char *const hw_type_names[] = {
+ [AV_HWDEVICE_TYPE_CUDA] = "cuda",
+ [AV_HWDEVICE_TYPE_DRM] = "drm",
+ [AV_HWDEVICE_TYPE_DXVA2] = "dxva2",
+ [AV_HWDEVICE_TYPE_D3D11VA] = "d3d11va",
+ [AV_HWDEVICE_TYPE_OPENCL] = "opencl",
+ [AV_HWDEVICE_TYPE_QSV] = "qsv",
+ [AV_HWDEVICE_TYPE_VAAPI] = "vaapi",
+ [AV_HWDEVICE_TYPE_VDPAU] = "vdpau",
+ [AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = "videotoolbox",
+ [AV_HWDEVICE_TYPE_MEDIACODEC] = "mediacodec",
+ [AV_HWDEVICE_TYPE_VULKAN] = "vulkan",
+};
+
+enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
+{
+ int type;
+ for (type = 0; type < FF_ARRAY_ELEMS(hw_type_names); type++) {
+ if (hw_type_names[type] && !strcmp(hw_type_names[type], name))
+ return type;
+ }
+ return AV_HWDEVICE_TYPE_NONE;
+}
+
+const char *av_hwdevice_get_type_name(enum AVHWDeviceType type)
+{
+ if (type > AV_HWDEVICE_TYPE_NONE &&
+ type < FF_ARRAY_ELEMS(hw_type_names))
+ return hw_type_names[type];
+ else
+ return NULL;
+}
+
+enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev)
+{
+ enum AVHWDeviceType next;
+ int i, set = 0;
+ for (i = 0; hw_table[i]; i++) {
+ if (prev != AV_HWDEVICE_TYPE_NONE && hw_table[i]->type <= prev)
+ continue;
+ if (!set || hw_table[i]->type < next) {
+ next = hw_table[i]->type;
+ set = 1;
+ }
+ }
+ return set ? next : AV_HWDEVICE_TYPE_NONE;
+}
+
+static const AVClass hwdevice_ctx_class = {
+ .class_name = "AVHWDeviceContext",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static void hwdevice_ctx_free(void *opaque, uint8_t *data)
+{
+ AVHWDeviceContext *ctx = (AVHWDeviceContext*)data;
+
+ /* uninit might still want access the hw context and the user
+ * free() callback might destroy it, so uninit has to be called first */
+ if (ctx->internal->hw_type->device_uninit)
+ ctx->internal->hw_type->device_uninit(ctx);
+
+ if (ctx->free)
+ ctx->free(ctx);
+
+ av_buffer_unref(&ctx->internal->source_device);
+
+ av_freep(&ctx->hwctx);
+ av_freep(&ctx->internal->priv);
+ av_freep(&ctx->internal);
+ av_freep(&ctx);
+}
+
+AVBufferRef *av_hwdevice_ctx_alloc(enum AVHWDeviceType type)
+{
+ AVHWDeviceContext *ctx;
+ AVBufferRef *buf;
+ const HWContextType *hw_type = NULL;
+ int i;
+
+ for (i = 0; hw_table[i]; i++) {
+ if (hw_table[i]->type == type) {
+ hw_type = hw_table[i];
+ break;
+ }
+ }
+ if (!hw_type)
+ return NULL;
+
+ ctx = av_mallocz(sizeof(*ctx));
+ if (!ctx)
+ return NULL;
+
+ ctx->internal = av_mallocz(sizeof(*ctx->internal));
+ if (!ctx->internal)
+ goto fail;
+
+ if (hw_type->device_priv_size) {
+ ctx->internal->priv = av_mallocz(hw_type->device_priv_size);
+ if (!ctx->internal->priv)
+ goto fail;
+ }
+
+ if (hw_type->device_hwctx_size) {
+ ctx->hwctx = av_mallocz(hw_type->device_hwctx_size);
+ if (!ctx->hwctx)
+ goto fail;
+ }
+
+ buf = av_buffer_create((uint8_t*)ctx, sizeof(*ctx),
+ hwdevice_ctx_free, NULL,
+ AV_BUFFER_FLAG_READONLY);
+ if (!buf)
+ goto fail;
+
+ ctx->type = type;
+ ctx->av_class = &hwdevice_ctx_class;
+
+ ctx->internal->hw_type = hw_type;
+
+ return buf;
+
+fail:
+ if (ctx->internal)
+ av_freep(&ctx->internal->priv);
+ av_freep(&ctx->internal);
+ av_freep(&ctx->hwctx);
+ av_freep(&ctx);
+ return NULL;
+}
+
+int av_hwdevice_ctx_init(AVBufferRef *ref)
+{
+ AVHWDeviceContext *ctx = (AVHWDeviceContext*)ref->data;
+ int ret;
+
+ if (ctx->internal->hw_type->device_init) {
+ ret = ctx->internal->hw_type->device_init(ctx);
+ if (ret < 0)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ if (ctx->internal->hw_type->device_uninit)
+ ctx->internal->hw_type->device_uninit(ctx);
+ return ret;
+}
+
+static const AVClass hwframe_ctx_class = {
+ .class_name = "AVHWFramesContext",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static void hwframe_ctx_free(void *opaque, uint8_t *data)
+{
+ AVHWFramesContext *ctx = (AVHWFramesContext*)data;
+
+ if (ctx->internal->pool_internal)
+ av_buffer_pool_uninit(&ctx->internal->pool_internal);
+
+ if (ctx->internal->hw_type->frames_uninit)
+ ctx->internal->hw_type->frames_uninit(ctx);
+
+ if (ctx->free)
+ ctx->free(ctx);
+
+ av_buffer_unref(&ctx->internal->source_frames);
+
+ av_buffer_unref(&ctx->device_ref);
+
+ av_freep(&ctx->hwctx);
+ av_freep(&ctx->internal->priv);
+ av_freep(&ctx->internal);
+ av_freep(&ctx);
+}
+
+AVBufferRef *av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
+{
+ AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)device_ref_in->data;
+ const HWContextType *hw_type = device_ctx->internal->hw_type;
+ AVHWFramesContext *ctx;
+ AVBufferRef *buf, *device_ref = NULL;
+
+ ctx = av_mallocz(sizeof(*ctx));
+ if (!ctx)
+ return NULL;
+
+ ctx->internal = av_mallocz(sizeof(*ctx->internal));
+ if (!ctx->internal)
+ goto fail;
+
+ if (hw_type->frames_priv_size) {
+ ctx->internal->priv = av_mallocz(hw_type->frames_priv_size);
+ if (!ctx->internal->priv)
+ goto fail;
+ }
+
+ if (hw_type->frames_hwctx_size) {
+ ctx->hwctx = av_mallocz(hw_type->frames_hwctx_size);
+ if (!ctx->hwctx)
+ goto fail;
+ }
+
+ device_ref = av_buffer_ref(device_ref_in);
+ if (!device_ref)
+ goto fail;
+
+ buf = av_buffer_create((uint8_t*)ctx, sizeof(*ctx),
+ hwframe_ctx_free, NULL,
+ AV_BUFFER_FLAG_READONLY);
+ if (!buf)
+ goto fail;
+
+ ctx->av_class = &hwframe_ctx_class;
+ ctx->device_ref = device_ref;
+ ctx->device_ctx = device_ctx;
+ ctx->format = AV_PIX_FMT_NONE;
+ ctx->sw_format = AV_PIX_FMT_NONE;
+
+ ctx->internal->hw_type = hw_type;
+
+ return buf;
+
+fail:
+ if (device_ref)
+ av_buffer_unref(&device_ref);
+ if (ctx->internal)
+ av_freep(&ctx->internal->priv);
+ av_freep(&ctx->internal);
+ av_freep(&ctx->hwctx);
+ av_freep(&ctx);
+ return NULL;
+}
+
+static int hwframe_pool_prealloc(AVBufferRef *ref)
+{
+ AVHWFramesContext *ctx = (AVHWFramesContext*)ref->data;
+ AVFrame **frames;
+ int i, ret = 0;
+
+ frames = av_calloc(ctx->initial_pool_size, sizeof(*frames));
+ if (!frames)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < ctx->initial_pool_size; i++) {
+ frames[i] = av_frame_alloc();
+ if (!frames[i])
+ goto fail;
+
+ ret = av_hwframe_get_buffer(ref, frames[i], 0);
+ if (ret < 0)
+ goto fail;
+ }
+
+fail:
+ for (i = 0; i < ctx->initial_pool_size; i++)
+ av_frame_free(&frames[i]);
+ av_freep(&frames);
+
+ return ret;
+}
+
+int av_hwframe_ctx_init(AVBufferRef *ref)
+{
+ AVHWFramesContext *ctx = (AVHWFramesContext*)ref->data;
+ const enum AVPixelFormat *pix_fmt;
+ int ret;
+
+ if (ctx->internal->source_frames) {
+ /* A derived frame context is already initialised. */
+ return 0;
+ }
+
+ /* validate the pixel format */
+ for (pix_fmt = ctx->internal->hw_type->pix_fmts; *pix_fmt != AV_PIX_FMT_NONE; pix_fmt++) {
+ if (*pix_fmt == ctx->format)
+ break;
+ }
+ if (*pix_fmt == AV_PIX_FMT_NONE) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The hardware pixel format '%s' is not supported by the device type '%s'\n",
+ av_get_pix_fmt_name(ctx->format), ctx->internal->hw_type->name);
+ return AVERROR(ENOSYS);
+ }
+
+ /* validate the dimensions */
+ ret = av_image_check_size(ctx->width, ctx->height, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ /* format-specific init */
+ if (ctx->internal->hw_type->frames_init) {
+ ret = ctx->internal->hw_type->frames_init(ctx);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (ctx->internal->pool_internal && !ctx->pool)
+ ctx->pool = ctx->internal->pool_internal;
+
+ /* preallocate the frames in the pool, if requested */
+ if (ctx->initial_pool_size > 0) {
+ ret = hwframe_pool_prealloc(ref);
+ if (ret < 0)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ if (ctx->internal->hw_type->frames_uninit)
+ ctx->internal->hw_type->frames_uninit(ctx);
+ return ret;
+}
+
+int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ref,
+ enum AVHWFrameTransferDirection dir,
+ enum AVPixelFormat **formats, int flags)
+{
+ AVHWFramesContext *ctx = (AVHWFramesContext*)hwframe_ref->data;
+
+ if (!ctx->internal->hw_type->transfer_get_formats)
+ return AVERROR(ENOSYS);
+
+ return ctx->internal->hw_type->transfer_get_formats(ctx, dir, formats);
+}
+
+static int transfer_data_alloc(AVFrame *dst, const AVFrame *src, int flags)
+{
+ AVHWFramesContext *ctx;
+ AVFrame *frame_tmp;
+ int ret = 0;
+
+ if (!src->hw_frames_ctx)
+ return AVERROR(EINVAL);
+ ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
+
+ frame_tmp = av_frame_alloc();
+ if (!frame_tmp)
+ return AVERROR(ENOMEM);
+
+ /* if the format is set, use that
+ * otherwise pick the first supported one */
+ if (dst->format >= 0) {
+ frame_tmp->format = dst->format;
+ } else {
+ enum AVPixelFormat *formats;
+
+ ret = av_hwframe_transfer_get_formats(src->hw_frames_ctx,
+ AV_HWFRAME_TRANSFER_DIRECTION_FROM,
+ &formats, 0);
+ if (ret < 0)
+ goto fail;
+ frame_tmp->format = formats[0];
+ av_freep(&formats);
+ }
+ frame_tmp->width = ctx->width;
+ frame_tmp->height = ctx->height;
+
+ ret = av_frame_get_buffer(frame_tmp, 0);
+ if (ret < 0)
+ goto fail;
+
+ ret = av_hwframe_transfer_data(frame_tmp, src, flags);
+ if (ret < 0)
+ goto fail;
+
+ frame_tmp->width = src->width;
+ frame_tmp->height = src->height;
+
+ av_frame_move_ref(dst, frame_tmp);
+
+fail:
+ av_frame_free(&frame_tmp);
+ return ret;
+}
+
+int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
+{
+ AVHWFramesContext *ctx;
+ int ret;
+
+ if (!dst->buf[0])
+ return transfer_data_alloc(dst, src, flags);
+
+ /*
+ * Hardware -> Hardware Transfer.
+ * Unlike Software -> Hardware or Hardware -> Software, the transfer
+ * function could be provided by either the src or dst, depending on
+ * the specific combination of hardware.
+ */
+ if (src->hw_frames_ctx && dst->hw_frames_ctx) {
+ AVHWFramesContext *src_ctx =
+ (AVHWFramesContext*)src->hw_frames_ctx->data;
+ AVHWFramesContext *dst_ctx =
+ (AVHWFramesContext*)dst->hw_frames_ctx->data;
+
+ if (src_ctx->internal->source_frames) {
+ av_log(src_ctx, AV_LOG_ERROR,
+ "A device with a derived frame context cannot be used as "
+ "the source of a HW -> HW transfer.");
+ return AVERROR(ENOSYS);
+ }
+
+ if (dst_ctx->internal->source_frames) {
+ av_log(src_ctx, AV_LOG_ERROR,
+ "A device with a derived frame context cannot be used as "
+ "the destination of a HW -> HW transfer.");
+ return AVERROR(ENOSYS);
+ }
+
+ ret = src_ctx->internal->hw_type->transfer_data_from(src_ctx, dst, src);
+ if (ret == AVERROR(ENOSYS))
+ ret = dst_ctx->internal->hw_type->transfer_data_to(dst_ctx, dst, src);
+ if (ret < 0)
+ return ret;
+ } else {
+ if (src->hw_frames_ctx) {
+ ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
+
+ ret = ctx->internal->hw_type->transfer_data_from(ctx, dst, src);
+ if (ret < 0)
+ return ret;
+ } else if (dst->hw_frames_ctx) {
+ ctx = (AVHWFramesContext*)dst->hw_frames_ctx->data;
+
+ ret = ctx->internal->hw_type->transfer_data_to(ctx, dst, src);
+ if (ret < 0)
+ return ret;
+ } else {
+ return AVERROR(ENOSYS);
+ }
+ }
+ return 0;
+}
+
+int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
+{
+ AVHWFramesContext *ctx = (AVHWFramesContext*)hwframe_ref->data;
+ int ret;
+
+ if (ctx->internal->source_frames) {
+ // This is a derived frame context, so we allocate in the source
+ // and map the frame immediately.
+ AVFrame *src_frame;
+
+ frame->format = ctx->format;
+ frame->hw_frames_ctx = av_buffer_ref(hwframe_ref);
+ if (!frame->hw_frames_ctx)
+ return AVERROR(ENOMEM);
+
+ src_frame = av_frame_alloc();
+ if (!src_frame)
+ return AVERROR(ENOMEM);
+
+ ret = av_hwframe_get_buffer(ctx->internal->source_frames,
+ src_frame, 0);
+ if (ret < 0) {
+ av_frame_free(&src_frame);
+ return ret;
+ }
+
+ ret = av_hwframe_map(frame, src_frame,
+ ctx->internal->source_allocation_map_flags);
+ if (ret) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to map frame into derived "
+ "frame context: %d.\n", ret);
+ av_frame_free(&src_frame);
+ return ret;
+ }
+
+ // Free the source frame immediately - the mapped frame still
+ // contains a reference to it.
+ av_frame_free(&src_frame);
+
+ return 0;
+ }
+
+ if (!ctx->internal->hw_type->frames_get_buffer)
+ return AVERROR(ENOSYS);
+
+ if (!ctx->pool)
+ return AVERROR(EINVAL);
+
+ frame->hw_frames_ctx = av_buffer_ref(hwframe_ref);
+ if (!frame->hw_frames_ctx)
+ return AVERROR(ENOMEM);
+
+ ret = ctx->internal->hw_type->frames_get_buffer(ctx, frame);
+ if (ret < 0) {
+ av_buffer_unref(&frame->hw_frames_ctx);
+ return ret;
+ }
+
+ frame->extended_data = frame->data;
+
+ return 0;
+}
+
+void *av_hwdevice_hwconfig_alloc(AVBufferRef *ref)
+{
+ AVHWDeviceContext *ctx = (AVHWDeviceContext*)ref->data;
+ const HWContextType *hw_type = ctx->internal->hw_type;
+
+ if (hw_type->device_hwconfig_size == 0)
+ return NULL;
+
+ return av_mallocz(hw_type->device_hwconfig_size);
+}
+
+AVHWFramesConstraints *av_hwdevice_get_hwframe_constraints(AVBufferRef *ref,
+ const void *hwconfig)
+{
+ AVHWDeviceContext *ctx = (AVHWDeviceContext*)ref->data;
+ const HWContextType *hw_type = ctx->internal->hw_type;
+ AVHWFramesConstraints *constraints;
+
+ if (!hw_type->frames_get_constraints)
+ return NULL;
+
+ constraints = av_mallocz(sizeof(*constraints));
+ if (!constraints)
+ return NULL;
+
+ constraints->min_width = constraints->min_height = 0;
+ constraints->max_width = constraints->max_height = INT_MAX;
+
+ if (hw_type->frames_get_constraints(ctx, hwconfig, constraints) >= 0) {
+ return constraints;
+ } else {
+ av_hwframe_constraints_free(&constraints);
+ return NULL;
+ }
+}
+
+void av_hwframe_constraints_free(AVHWFramesConstraints **constraints)
+{
+ if (*constraints) {
+ av_freep(&(*constraints)->valid_hw_formats);
+ av_freep(&(*constraints)->valid_sw_formats);
+ }
+ av_freep(constraints);
+}
+
+int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type,
+ const char *device, AVDictionary *opts, int flags)
+{
+ AVBufferRef *device_ref = NULL;
+ AVHWDeviceContext *device_ctx;
+ int ret = 0;
+
+ device_ref = av_hwdevice_ctx_alloc(type);
+ if (!device_ref) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ device_ctx = (AVHWDeviceContext*)device_ref->data;
+
+ if (!device_ctx->internal->hw_type->device_create) {
+ ret = AVERROR(ENOSYS);
+ goto fail;
+ }
+
+ ret = device_ctx->internal->hw_type->device_create(device_ctx, device,
+ opts, flags);
+ if (ret < 0)
+ goto fail;
+
+ ret = av_hwdevice_ctx_init(device_ref);
+ if (ret < 0)
+ goto fail;
+
+ *pdevice_ref = device_ref;
+ return 0;
+fail:
+ av_buffer_unref(&device_ref);
+ *pdevice_ref = NULL;
+ return ret;
+}
+
+int av_hwdevice_ctx_create_derived_opts(AVBufferRef **dst_ref_ptr,
+ enum AVHWDeviceType type,
+ AVBufferRef *src_ref,
+ AVDictionary *options, int flags)
+{
+ AVBufferRef *dst_ref = NULL, *tmp_ref;
+ AVHWDeviceContext *dst_ctx, *tmp_ctx;
+ int ret = 0;
+
+ tmp_ref = src_ref;
+ while (tmp_ref) {
+ tmp_ctx = (AVHWDeviceContext*)tmp_ref->data;
+ if (tmp_ctx->type == type) {
+ dst_ref = av_buffer_ref(tmp_ref);
+ if (!dst_ref) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ goto done;
+ }
+ tmp_ref = tmp_ctx->internal->source_device;
+ }
+
+ dst_ref = av_hwdevice_ctx_alloc(type);
+ if (!dst_ref) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ dst_ctx = (AVHWDeviceContext*)dst_ref->data;
+
+ tmp_ref = src_ref;
+ while (tmp_ref) {
+ tmp_ctx = (AVHWDeviceContext*)tmp_ref->data;
+ if (dst_ctx->internal->hw_type->device_derive) {
+ ret = dst_ctx->internal->hw_type->device_derive(dst_ctx,
+ tmp_ctx,
+ options,
+ flags);
+ if (ret == 0) {
+ dst_ctx->internal->source_device = av_buffer_ref(src_ref);
+ if (!dst_ctx->internal->source_device) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ ret = av_hwdevice_ctx_init(dst_ref);
+ if (ret < 0)
+ goto fail;
+ goto done;
+ }
+ if (ret != AVERROR(ENOSYS))
+ goto fail;
+ }
+ tmp_ref = tmp_ctx->internal->source_device;
+ }
+
+ ret = AVERROR(ENOSYS);
+ goto fail;
+
+done:
+ *dst_ref_ptr = dst_ref;
+ return 0;
+
+fail:
+ av_buffer_unref(&dst_ref);
+ *dst_ref_ptr = NULL;
+ return ret;
+}
+
+int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr,
+ enum AVHWDeviceType type,
+ AVBufferRef *src_ref, int flags)
+{
+ return av_hwdevice_ctx_create_derived_opts(dst_ref_ptr, type, src_ref,
+ NULL, flags);
+}
+
+static void ff_hwframe_unmap(void *opaque, uint8_t *data)
+{
+ HWMapDescriptor *hwmap = (HWMapDescriptor*)data;
+ AVHWFramesContext *ctx = opaque;
+
+ if (hwmap->unmap)
+ hwmap->unmap(ctx, hwmap);
+
+ av_frame_free(&hwmap->source);
+
+ av_buffer_unref(&hwmap->hw_frames_ctx);
+
+ av_free(hwmap);
+}
+
+int ff_hwframe_map_create(AVBufferRef *hwframe_ref,
+ AVFrame *dst, const AVFrame *src,
+ void (*unmap)(AVHWFramesContext *ctx,
+ HWMapDescriptor *hwmap),
+ void *priv)
+{
+ AVHWFramesContext *ctx = (AVHWFramesContext*)hwframe_ref->data;
+ HWMapDescriptor *hwmap;
+ int ret;
+
+ hwmap = av_mallocz(sizeof(*hwmap));
+ if (!hwmap) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ hwmap->source = av_frame_alloc();
+ if (!hwmap->source) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ ret = av_frame_ref(hwmap->source, src);
+ if (ret < 0)
+ goto fail;
+
+ hwmap->hw_frames_ctx = av_buffer_ref(hwframe_ref);
+ if (!hwmap->hw_frames_ctx) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ hwmap->unmap = unmap;
+ hwmap->priv = priv;
+
+ dst->buf[0] = av_buffer_create((uint8_t*)hwmap, sizeof(*hwmap),
+ &ff_hwframe_unmap, ctx, 0);
+ if (!dst->buf[0]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ if (hwmap) {
+ av_buffer_unref(&hwmap->hw_frames_ctx);
+ av_frame_free(&hwmap->source);
+ }
+ av_free(hwmap);
+ return ret;
+}
+
+int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags)
+{
+ AVBufferRef *orig_dst_frames = dst->hw_frames_ctx;
+ enum AVPixelFormat orig_dst_fmt = dst->format;
+ AVHWFramesContext *src_frames, *dst_frames;
+ HWMapDescriptor *hwmap;
+ int ret;
+
+ if (src->hw_frames_ctx && dst->hw_frames_ctx) {
+ src_frames = (AVHWFramesContext*)src->hw_frames_ctx->data;
+ dst_frames = (AVHWFramesContext*)dst->hw_frames_ctx->data;
+
+ if ((src_frames == dst_frames &&
+ src->format == dst_frames->sw_format &&
+ dst->format == dst_frames->format) ||
+ (src_frames->internal->source_frames &&
+ src_frames->internal->source_frames->data ==
+ (uint8_t*)dst_frames)) {
+ // This is an unmap operation. We don't need to directly
+ // do anything here other than fill in the original frame,
+ // because the real unmap will be invoked when the last
+ // reference to the mapped frame disappears.
+ if (!src->buf[0]) {
+ av_log(src_frames, AV_LOG_ERROR, "Invalid mapping "
+ "found when attempting unmap.\n");
+ return AVERROR(EINVAL);
+ }
+ hwmap = (HWMapDescriptor*)src->buf[0]->data;
+ av_frame_unref(dst);
+ return av_frame_ref(dst, hwmap->source);
+ }
+ }
+
+ if (src->hw_frames_ctx) {
+ src_frames = (AVHWFramesContext*)src->hw_frames_ctx->data;
+
+ if (src_frames->format == src->format &&
+ src_frames->internal->hw_type->map_from) {
+ ret = src_frames->internal->hw_type->map_from(src_frames,
+ dst, src, flags);
+ if (ret >= 0)
+ return ret;
+ else if (ret != AVERROR(ENOSYS))
+ goto fail;
+ }
+ }
+
+ if (dst->hw_frames_ctx) {
+ dst_frames = (AVHWFramesContext*)dst->hw_frames_ctx->data;
+
+ if (dst_frames->format == dst->format &&
+ dst_frames->internal->hw_type->map_to) {
+ ret = dst_frames->internal->hw_type->map_to(dst_frames,
+ dst, src, flags);
+ if (ret >= 0)
+ return ret;
+ else if (ret != AVERROR(ENOSYS))
+ goto fail;
+ }
+ }
+
+ return AVERROR(ENOSYS);
+
+fail:
+ // if the caller provided dst frames context, it should be preserved
+ // by this function
+ av_assert0(orig_dst_frames == NULL ||
+ orig_dst_frames == dst->hw_frames_ctx);
+
+ // preserve user-provided dst frame fields, but clean
+ // anything we might have set
+ dst->hw_frames_ctx = NULL;
+ av_frame_unref(dst);
+
+ dst->hw_frames_ctx = orig_dst_frames;
+ dst->format = orig_dst_fmt;
+
+ return ret;
+}
+
+int av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx,
+ enum AVPixelFormat format,
+ AVBufferRef *derived_device_ctx,
+ AVBufferRef *source_frame_ctx,
+ int flags)
+{
+ AVBufferRef *dst_ref = NULL;
+ AVHWFramesContext *dst = NULL;
+ AVHWFramesContext *src = (AVHWFramesContext*)source_frame_ctx->data;
+ int ret;
+
+ if (src->internal->source_frames) {
+ AVHWFramesContext *src_src =
+ (AVHWFramesContext*)src->internal->source_frames->data;
+ AVHWDeviceContext *dst_dev =
+ (AVHWDeviceContext*)derived_device_ctx->data;
+
+ if (src_src->device_ctx == dst_dev) {
+ // This is actually an unmapping, so we just return a
+ // reference to the source frame context.
+ *derived_frame_ctx =
+ av_buffer_ref(src->internal->source_frames);
+ if (!*derived_frame_ctx) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ return 0;
+ }
+ }
+
+ dst_ref = av_hwframe_ctx_alloc(derived_device_ctx);
+ if (!dst_ref) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ dst = (AVHWFramesContext*)dst_ref->data;
+
+ dst->format = format;
+ dst->sw_format = src->sw_format;
+ dst->width = src->width;
+ dst->height = src->height;
+
+ dst->internal->source_frames = av_buffer_ref(source_frame_ctx);
+ if (!dst->internal->source_frames) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ dst->internal->source_allocation_map_flags =
+ flags & (AV_HWFRAME_MAP_READ |
+ AV_HWFRAME_MAP_WRITE |
+ AV_HWFRAME_MAP_OVERWRITE |
+ AV_HWFRAME_MAP_DIRECT);
+
+ ret = AVERROR(ENOSYS);
+ if (src->internal->hw_type->frames_derive_from)
+ ret = src->internal->hw_type->frames_derive_from(dst, src, flags);
+ if (ret == AVERROR(ENOSYS) &&
+ dst->internal->hw_type->frames_derive_to)
+ ret = dst->internal->hw_type->frames_derive_to(dst, src, flags);
+ if (ret == AVERROR(ENOSYS))
+ ret = 0;
+ if (ret)
+ goto fail;
+
+ *derived_frame_ctx = dst_ref;
+ return 0;
+
+fail:
+ if (dst)
+ av_buffer_unref(&dst->internal->source_frames);
+ av_buffer_unref(&dst_ref);
+ return ret;
+}
+
+int ff_hwframe_map_replace(AVFrame *dst, const AVFrame *src)
+{
+ HWMapDescriptor *hwmap = (HWMapDescriptor*)dst->buf[0]->data;
+ av_frame_unref(hwmap->source);
+ return av_frame_ref(hwmap->source, src);
+}
diff --git a/media/ffvpx/libavutil/hwcontext.h b/media/ffvpx/libavutil/hwcontext.h
new file mode 100644
index 0000000000..7ff08c8608
--- /dev/null
+++ b/media/ffvpx/libavutil/hwcontext.h
@@ -0,0 +1,610 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_H
+#define AVUTIL_HWCONTEXT_H
+
+#include "buffer.h"
+#include "frame.h"
+#include "log.h"
+#include "pixfmt.h"
+
+enum AVHWDeviceType {
+ AV_HWDEVICE_TYPE_NONE,
+ AV_HWDEVICE_TYPE_VDPAU,
+ AV_HWDEVICE_TYPE_CUDA,
+ AV_HWDEVICE_TYPE_VAAPI,
+ AV_HWDEVICE_TYPE_DXVA2,
+ AV_HWDEVICE_TYPE_QSV,
+ AV_HWDEVICE_TYPE_VIDEOTOOLBOX,
+ AV_HWDEVICE_TYPE_D3D11VA,
+ AV_HWDEVICE_TYPE_DRM,
+ AV_HWDEVICE_TYPE_OPENCL,
+ AV_HWDEVICE_TYPE_MEDIACODEC,
+ AV_HWDEVICE_TYPE_VULKAN,
+};
+
+typedef struct AVHWDeviceInternal AVHWDeviceInternal;
+
+/**
+ * This struct aggregates all the (hardware/vendor-specific) "high-level" state,
+ * i.e. state that is not tied to a concrete processing configuration.
+ * E.g., in an API that supports hardware-accelerated encoding and decoding,
+ * this struct will (if possible) wrap the state that is common to both encoding
+ * and decoding and from which specific instances of encoders or decoders can be
+ * derived.
+ *
+ * This struct is reference-counted with the AVBuffer mechanism. The
+ * av_hwdevice_ctx_alloc() constructor yields a reference, whose data field
+ * points to the actual AVHWDeviceContext. Further objects derived from
+ * AVHWDeviceContext (such as AVHWFramesContext, describing a frame pool with
+ * specific properties) will hold an internal reference to it. After all the
+ * references are released, the AVHWDeviceContext itself will be freed,
+ * optionally invoking a user-specified callback for uninitializing the hardware
+ * state.
+ */
+typedef struct AVHWDeviceContext {
+ /**
+ * A class for logging. Set by av_hwdevice_ctx_alloc().
+ */
+ const AVClass *av_class;
+
+ /**
+ * Private data used internally by libavutil. Must not be accessed in any
+ * way by the caller.
+ */
+ AVHWDeviceInternal *internal;
+
+ /**
+ * This field identifies the underlying API used for hardware access.
+ *
+ * This field is set when this struct is allocated and never changed
+ * afterwards.
+ */
+ enum AVHWDeviceType type;
+
+ /**
+ * The format-specific data, allocated and freed by libavutil along with
+ * this context.
+ *
+ * Should be cast by the user to the format-specific context defined in the
+ * corresponding header (hwcontext_*.h) and filled as described in the
+ * documentation before calling av_hwdevice_ctx_init().
+ *
+ * After calling av_hwdevice_ctx_init() this struct should not be modified
+ * by the caller.
+ */
+ void *hwctx;
+
+ /**
+ * This field may be set by the caller before calling av_hwdevice_ctx_init().
+ *
+ * If non-NULL, this callback will be called when the last reference to
+ * this context is unreferenced, immediately before it is freed.
+ *
+ * @note when other objects (e.g an AVHWFramesContext) are derived from this
+ * struct, this callback will be invoked after all such child objects
+ * are fully uninitialized and their respective destructors invoked.
+ */
+ void (*free)(struct AVHWDeviceContext *ctx);
+
+ /**
+ * Arbitrary user data, to be used e.g. by the free() callback.
+ */
+ void *user_opaque;
+} AVHWDeviceContext;
+
+typedef struct AVHWFramesInternal AVHWFramesInternal;
+
+/**
+ * This struct describes a set or pool of "hardware" frames (i.e. those with
+ * data not located in normal system memory). All the frames in the pool are
+ * assumed to be allocated in the same way and interchangeable.
+ *
+ * This struct is reference-counted with the AVBuffer mechanism and tied to a
+ * given AVHWDeviceContext instance. The av_hwframe_ctx_alloc() constructor
+ * yields a reference, whose data field points to the actual AVHWFramesContext
+ * struct.
+ */
+typedef struct AVHWFramesContext {
+ /**
+ * A class for logging.
+ */
+ const AVClass *av_class;
+
+ /**
+ * Private data used internally by libavutil. Must not be accessed in any
+ * way by the caller.
+ */
+ AVHWFramesInternal *internal;
+
+ /**
+ * A reference to the parent AVHWDeviceContext. This reference is owned and
+ * managed by the enclosing AVHWFramesContext, but the caller may derive
+ * additional references from it.
+ */
+ AVBufferRef *device_ref;
+
+ /**
+ * The parent AVHWDeviceContext. This is simply a pointer to
+ * device_ref->data provided for convenience.
+ *
+ * Set by libavutil in av_hwframe_ctx_init().
+ */
+ AVHWDeviceContext *device_ctx;
+
+ /**
+ * The format-specific data, allocated and freed automatically along with
+ * this context.
+ *
+ * Should be cast by the user to the format-specific context defined in the
+ * corresponding header (hwframe_*.h) and filled as described in the
+ * documentation before calling av_hwframe_ctx_init().
+ *
+ * After any frames using this context are created, the contents of this
+ * struct should not be modified by the caller.
+ */
+ void *hwctx;
+
+ /**
+ * This field may be set by the caller before calling av_hwframe_ctx_init().
+ *
+ * If non-NULL, this callback will be called when the last reference to
+ * this context is unreferenced, immediately before it is freed.
+ */
+ void (*free)(struct AVHWFramesContext *ctx);
+
+ /**
+ * Arbitrary user data, to be used e.g. by the free() callback.
+ */
+ void *user_opaque;
+
+ /**
+ * A pool from which the frames are allocated by av_hwframe_get_buffer().
+ * This field may be set by the caller before calling av_hwframe_ctx_init().
+ * The buffers returned by calling av_buffer_pool_get() on this pool must
+ * have the properties described in the documentation in the corresponding hw
+ * type's header (hwcontext_*.h). The pool will be freed strictly before
+ * this struct's free() callback is invoked.
+ *
+ * This field may be NULL, then libavutil will attempt to allocate a pool
+ * internally. Note that certain device types enforce pools allocated at
+ * fixed size (frame count), which cannot be extended dynamically. In such a
+ * case, initial_pool_size must be set appropriately.
+ */
+ AVBufferPool *pool;
+
+ /**
+ * Initial size of the frame pool. If a device type does not support
+ * dynamically resizing the pool, then this is also the maximum pool size.
+ *
+ * May be set by the caller before calling av_hwframe_ctx_init(). Must be
+ * set if pool is NULL and the device type does not support dynamic pools.
+ */
+ int initial_pool_size;
+
+ /**
+ * The pixel format identifying the underlying HW surface type.
+ *
+ * Must be a hwaccel format, i.e. the corresponding descriptor must have the
+ * AV_PIX_FMT_FLAG_HWACCEL flag set.
+ *
+ * Must be set by the user before calling av_hwframe_ctx_init().
+ */
+ enum AVPixelFormat format;
+
+ /**
+ * The pixel format identifying the actual data layout of the hardware
+ * frames.
+ *
+ * Must be set by the caller before calling av_hwframe_ctx_init().
+ *
+ * @note when the underlying API does not provide the exact data layout, but
+ * only the colorspace/bit depth, this field should be set to the fully
+ * planar version of that format (e.g. for 8-bit 420 YUV it should be
+ * AV_PIX_FMT_YUV420P, not AV_PIX_FMT_NV12 or anything else).
+ */
+ enum AVPixelFormat sw_format;
+
+ /**
+ * The allocated dimensions of the frames in this pool.
+ *
+ * Must be set by the user before calling av_hwframe_ctx_init().
+ */
+ int width, height;
+} AVHWFramesContext;
+
+/**
+ * Look up an AVHWDeviceType by name.
+ *
+ * @param name String name of the device type (case-insensitive).
+ * @return The type from enum AVHWDeviceType, or AV_HWDEVICE_TYPE_NONE if
+ * not found.
+ */
+enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name);
+
+/** Get the string name of an AVHWDeviceType.
+ *
+ * @param type Type from enum AVHWDeviceType.
+ * @return Pointer to a static string containing the name, or NULL if the type
+ * is not valid.
+ */
+const char *av_hwdevice_get_type_name(enum AVHWDeviceType type);
+
+/**
+ * Iterate over supported device types.
+ *
+ * @param prev AV_HWDEVICE_TYPE_NONE initially, then the previous type
+ * returned by this function in subsequent iterations.
+ * @return The next usable device type from enum AVHWDeviceType, or
+ * AV_HWDEVICE_TYPE_NONE if there are no more.
+ */
+enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev);
+
+/**
+ * Allocate an AVHWDeviceContext for a given hardware type.
+ *
+ * @param type the type of the hardware device to allocate.
+ * @return a reference to the newly created AVHWDeviceContext on success or NULL
+ * on failure.
+ */
+AVBufferRef *av_hwdevice_ctx_alloc(enum AVHWDeviceType type);
+
+/**
+ * Finalize the device context before use. This function must be called after
+ * the context is filled with all the required information and before it is
+ * used in any way.
+ *
+ * @param ref a reference to the AVHWDeviceContext
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwdevice_ctx_init(AVBufferRef *ref);
+
+/**
+ * Open a device of the specified type and create an AVHWDeviceContext for it.
+ *
+ * This is a convenience function intended to cover the simple cases. Callers
+ * who need to fine-tune device creation/management should open the device
+ * manually and then wrap it in an AVHWDeviceContext using
+ * av_hwdevice_ctx_alloc()/av_hwdevice_ctx_init().
+ *
+ * The returned context is already initialized and ready for use, the caller
+ * should not call av_hwdevice_ctx_init() on it. The user_opaque/free fields of
+ * the created AVHWDeviceContext are set by this function and should not be
+ * touched by the caller.
+ *
+ * @param device_ctx On success, a reference to the newly-created device context
+ * will be written here. The reference is owned by the caller
+ * and must be released with av_buffer_unref() when no longer
+ * needed. On failure, NULL will be written to this pointer.
+ * @param type The type of the device to create.
+ * @param device A type-specific string identifying the device to open.
+ * @param opts A dictionary of additional (type-specific) options to use in
+ * opening the device. The dictionary remains owned by the caller.
+ * @param flags currently unused
+ *
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create(AVBufferRef **device_ctx, enum AVHWDeviceType type,
+ const char *device, AVDictionary *opts, int flags);
+
+/**
+ * Create a new device of the specified type from an existing device.
+ *
+ * If the source device is a device of the target type or was originally
+ * derived from such a device (possibly through one or more intermediate
+ * devices of other types), then this will return a reference to the
+ * existing device of the same type as is requested.
+ *
+ * Otherwise, it will attempt to derive a new device from the given source
+ * device. If direct derivation to the new type is not implemented, it will
+ * attempt the same derivation from each ancestor of the source device in
+ * turn looking for an implemented derivation method.
+ *
+ * @param dst_ctx On success, a reference to the newly-created
+ * AVHWDeviceContext.
+ * @param type The type of the new device to create.
+ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
+ * used to create the new device.
+ * @param flags Currently unused; should be set to zero.
+ * @return Zero on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ctx,
+ enum AVHWDeviceType type,
+ AVBufferRef *src_ctx, int flags);
+
+/**
+ * Create a new device of the specified type from an existing device.
+ *
+ * This function performs the same action as av_hwdevice_ctx_create_derived,
+ * however, it is able to set options for the new device to be derived.
+ *
+ * @param dst_ctx On success, a reference to the newly-created
+ * AVHWDeviceContext.
+ * @param type The type of the new device to create.
+ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
+ * used to create the new device.
+ * @param options Options for the new device to create, same format as in
+ * av_hwdevice_ctx_create.
+ * @param flags Currently unused; should be set to zero.
+ * @return Zero on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create_derived_opts(AVBufferRef **dst_ctx,
+ enum AVHWDeviceType type,
+ AVBufferRef *src_ctx,
+ AVDictionary *options, int flags);
+
+/**
+ * Allocate an AVHWFramesContext tied to a given device context.
+ *
+ * @param device_ctx a reference to a AVHWDeviceContext. This function will make
+ * a new reference for internal use, the one passed to the
+ * function remains owned by the caller.
+ * @return a reference to the newly created AVHWFramesContext on success or NULL
+ * on failure.
+ */
+AVBufferRef *av_hwframe_ctx_alloc(AVBufferRef *device_ctx);
+
+/**
+ * Finalize the context before use. This function must be called after the
+ * context is filled with all the required information and before it is attached
+ * to any frames.
+ *
+ * @param ref a reference to the AVHWFramesContext
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwframe_ctx_init(AVBufferRef *ref);
+
+/**
+ * Allocate a new frame attached to the given AVHWFramesContext.
+ *
+ * @param hwframe_ctx a reference to an AVHWFramesContext
+ * @param frame an empty (freshly allocated or unreffed) frame to be filled with
+ * newly allocated buffers.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwframe_get_buffer(AVBufferRef *hwframe_ctx, AVFrame *frame, int flags);
+
+/**
+ * Copy data to or from a hw surface. At least one of dst/src must have an
+ * AVHWFramesContext attached.
+ *
+ * If src has an AVHWFramesContext attached, then the format of dst (if set)
+ * must use one of the formats returned by av_hwframe_transfer_get_formats(src,
+ * AV_HWFRAME_TRANSFER_DIRECTION_FROM).
+ * If dst has an AVHWFramesContext attached, then the format of src must use one
+ * of the formats returned by av_hwframe_transfer_get_formats(dst,
+ * AV_HWFRAME_TRANSFER_DIRECTION_TO)
+ *
+ * dst may be "clean" (i.e. with data/buf pointers unset), in which case the
+ * data buffers will be allocated by this function using av_frame_get_buffer().
+ * If dst->format is set, then this format will be used, otherwise (when
+ * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be chosen.
+ *
+ * The two frames must have matching allocated dimensions (i.e. equal to
+ * AVHWFramesContext.width/height), since not all device types support
+ * transferring a sub-rectangle of the whole surface. The display dimensions
+ * (i.e. AVFrame.width/height) may be smaller than the allocated dimensions, but
+ * also have to be equal for both frames. When the display dimensions are
+ * smaller than the allocated dimensions, the content of the padding in the
+ * destination frame is unspecified.
+ *
+ * @param dst the destination frame. dst is not touched on failure.
+ * @param src the source frame.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR error code on failure.
+ */
+int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags);
+
+enum AVHWFrameTransferDirection {
+ /**
+ * Transfer the data from the queried hw frame.
+ */
+ AV_HWFRAME_TRANSFER_DIRECTION_FROM,
+
+ /**
+ * Transfer the data to the queried hw frame.
+ */
+ AV_HWFRAME_TRANSFER_DIRECTION_TO,
+};
+
+/**
+ * Get a list of possible source or target formats usable in
+ * av_hwframe_transfer_data().
+ *
+ * @param hwframe_ctx the frame context to obtain the information for
+ * @param dir the direction of the transfer
+ * @param formats the pointer to the output format list will be written here.
+ * The list is terminated with AV_PIX_FMT_NONE and must be freed
+ * by the caller when no longer needed using av_free().
+ * If this function returns successfully, the format list will
+ * have at least one item (not counting the terminator).
+ * On failure, the contents of this pointer are unspecified.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ctx,
+ enum AVHWFrameTransferDirection dir,
+ enum AVPixelFormat **formats, int flags);
+
+
+/**
+ * This struct describes the constraints on hardware frames attached to
+ * a given device with a hardware-specific configuration. This is returned
+ * by av_hwdevice_get_hwframe_constraints() and must be freed by
+ * av_hwframe_constraints_free() after use.
+ */
+typedef struct AVHWFramesConstraints {
+ /**
+ * A list of possible values for format in the hw_frames_ctx,
+ * terminated by AV_PIX_FMT_NONE. This member will always be filled.
+ */
+ enum AVPixelFormat *valid_hw_formats;
+
+ /**
+ * A list of possible values for sw_format in the hw_frames_ctx,
+ * terminated by AV_PIX_FMT_NONE. Can be NULL if this information is
+ * not known.
+ */
+ enum AVPixelFormat *valid_sw_formats;
+
+ /**
+ * The minimum size of frames in this hw_frames_ctx.
+ * (Zero if not known.)
+ */
+ int min_width;
+ int min_height;
+
+ /**
+ * The maximum size of frames in this hw_frames_ctx.
+ * (INT_MAX if not known / no limit.)
+ */
+ int max_width;
+ int max_height;
+} AVHWFramesConstraints;
+
+/**
+ * Allocate a HW-specific configuration structure for a given HW device.
+ * After use, the user must free all members as required by the specific
+ * hardware structure being used, then free the structure itself with
+ * av_free().
+ *
+ * @param device_ctx a reference to the associated AVHWDeviceContext.
+ * @return The newly created HW-specific configuration structure on
+ * success or NULL on failure.
+ */
+void *av_hwdevice_hwconfig_alloc(AVBufferRef *device_ctx);
+
+/**
+ * Get the constraints on HW frames given a device and the HW-specific
+ * configuration to be used with that device. If no HW-specific
+ * configuration is provided, returns the maximum possible capabilities
+ * of the device.
+ *
+ * @param ref a reference to the associated AVHWDeviceContext.
+ * @param hwconfig a filled HW-specific configuration structure, or NULL
+ * to return the maximum possible capabilities of the device.
+ * @return AVHWFramesConstraints structure describing the constraints
+ * on the device, or NULL if not available.
+ */
+AVHWFramesConstraints *av_hwdevice_get_hwframe_constraints(AVBufferRef *ref,
+ const void *hwconfig);
+
+/**
+ * Free an AVHWFrameConstraints structure.
+ *
+ * @param constraints The (filled or unfilled) AVHWFrameConstraints structure.
+ */
+void av_hwframe_constraints_free(AVHWFramesConstraints **constraints);
+
+
+/**
+ * Flags to apply to frame mappings.
+ */
+enum {
+ /**
+ * The mapping must be readable.
+ */
+ AV_HWFRAME_MAP_READ = 1 << 0,
+ /**
+ * The mapping must be writeable.
+ */
+ AV_HWFRAME_MAP_WRITE = 1 << 1,
+ /**
+ * The mapped frame will be overwritten completely in subsequent
+ * operations, so the current frame data need not be loaded. Any values
+ * which are not overwritten are unspecified.
+ */
+ AV_HWFRAME_MAP_OVERWRITE = 1 << 2,
+ /**
+ * The mapping must be direct. That is, there must not be any copying in
+ * the map or unmap steps. Note that performance of direct mappings may
+ * be much lower than normal memory.
+ */
+ AV_HWFRAME_MAP_DIRECT = 1 << 3,
+};
+
+/**
+ * Map a hardware frame.
+ *
+ * This has a number of different possible effects, depending on the format
+ * and origin of the src and dst frames. On input, src should be a usable
+ * frame with valid buffers and dst should be blank (typically as just created
+ * by av_frame_alloc()). src should have an associated hwframe context, and
+ * dst may optionally have a format and associated hwframe context.
+ *
+ * If src was created by mapping a frame from the hwframe context of dst,
+ * then this function undoes the mapping - dst is replaced by a reference to
+ * the frame that src was originally mapped from.
+ *
+ * If both src and dst have an associated hwframe context, then this function
+ * attempts to map the src frame from its hardware context to that of dst and
+ * then fill dst with appropriate data to be usable there. This will only be
+ * possible if the hwframe contexts and associated devices are compatible -
+ * given compatible devices, av_hwframe_ctx_create_derived() can be used to
+ * create a hwframe context for dst in which mapping should be possible.
+ *
+ * If src has a hwframe context but dst does not, then the src frame is
+ * mapped to normal memory and should thereafter be usable as a normal frame.
+ * If the format is set on dst, then the mapping will attempt to create dst
+ * with that format and fail if it is not possible. If format is unset (is
+ * AV_PIX_FMT_NONE) then dst will be mapped with whatever the most appropriate
+ * format to use is (probably the sw_format of the src hwframe context).
+ *
+ * A return value of AVERROR(ENOSYS) indicates that the mapping is not
+ * possible with the given arguments and hwframe setup, while other return
+ * values indicate that it failed somehow.
+ *
+ * On failure, the destination frame will be left blank, except for the
+ * hw_frames_ctx/format fields thay may have been set by the caller - those will
+ * be preserved as they were.
+ *
+ * @param dst Destination frame, to contain the mapping.
+ * @param src Source frame, to be mapped.
+ * @param flags Some combination of AV_HWFRAME_MAP_* flags.
+ * @return Zero on success, negative AVERROR code on failure.
+ */
+int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags);
+
+
+/**
+ * Create and initialise an AVHWFramesContext as a mapping of another existing
+ * AVHWFramesContext on a different device.
+ *
+ * av_hwframe_ctx_init() should not be called after this.
+ *
+ * @param derived_frame_ctx On success, a reference to the newly created
+ * AVHWFramesContext.
+ * @param format The AVPixelFormat for the derived context.
+ * @param derived_device_ctx A reference to the device to create the new
+ * AVHWFramesContext on.
+ * @param source_frame_ctx A reference to an existing AVHWFramesContext
+ * which will be mapped to the derived context.
+ * @param flags Some combination of AV_HWFRAME_MAP_* flags, defining the
+ * mapping parameters to apply to frames which are allocated
+ * in the derived device.
+ * @return Zero on success, negative AVERROR code on failure.
+ */
+int av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx,
+ enum AVPixelFormat format,
+ AVBufferRef *derived_device_ctx,
+ AVBufferRef *source_frame_ctx,
+ int flags);
+
+#endif /* AVUTIL_HWCONTEXT_H */
diff --git a/media/ffvpx/libavutil/hwcontext_drm.h b/media/ffvpx/libavutil/hwcontext_drm.h
new file mode 100644
index 0000000000..42709f215e
--- /dev/null
+++ b/media/ffvpx/libavutil/hwcontext_drm.h
@@ -0,0 +1,169 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_DRM_H
+#define AVUTIL_HWCONTEXT_DRM_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * @file
+ * API-specific header for AV_HWDEVICE_TYPE_DRM.
+ *
+ * Internal frame allocation is not currently supported - all frames
+ * must be allocated by the user. Thus AVHWFramesContext is always
+ * NULL, though this may change if support for frame allocation is
+ * added in future.
+ */
+
+enum {
+ /**
+ * The maximum number of layers/planes in a DRM frame.
+ */
+ AV_DRM_MAX_PLANES = 4
+};
+
+/**
+ * DRM object descriptor.
+ *
+ * Describes a single DRM object, addressing it as a PRIME file
+ * descriptor.
+ */
+typedef struct AVDRMObjectDescriptor {
+ /**
+ * DRM PRIME fd for the object.
+ */
+ int fd;
+ /**
+ * Total size of the object.
+ *
+ * (This includes any parts not which do not contain image data.)
+ */
+ size_t size;
+ /**
+ * Format modifier applied to the object (DRM_FORMAT_MOD_*).
+ *
+ * If the format modifier is unknown then this should be set to
+ * DRM_FORMAT_MOD_INVALID.
+ */
+ uint64_t format_modifier;
+} AVDRMObjectDescriptor;
+
+/**
+ * DRM plane descriptor.
+ *
+ * Describes a single plane of a layer, which is contained within
+ * a single object.
+ */
+typedef struct AVDRMPlaneDescriptor {
+ /**
+ * Index of the object containing this plane in the objects
+ * array of the enclosing frame descriptor.
+ */
+ int object_index;
+ /**
+ * Offset within that object of this plane.
+ */
+ ptrdiff_t offset;
+ /**
+ * Pitch (linesize) of this plane.
+ */
+ ptrdiff_t pitch;
+} AVDRMPlaneDescriptor;
+
+/**
+ * DRM layer descriptor.
+ *
+ * Describes a single layer within a frame. This has the structure
+ * defined by its format, and will contain one or more planes.
+ */
+typedef struct AVDRMLayerDescriptor {
+ /**
+ * Format of the layer (DRM_FORMAT_*).
+ */
+ uint32_t format;
+ /**
+ * Number of planes in the layer.
+ *
+ * This must match the number of planes required by format.
+ */
+ int nb_planes;
+ /**
+ * Array of planes in this layer.
+ */
+ AVDRMPlaneDescriptor planes[AV_DRM_MAX_PLANES];
+} AVDRMLayerDescriptor;
+
+/**
+ * DRM frame descriptor.
+ *
+ * This is used as the data pointer for AV_PIX_FMT_DRM_PRIME frames.
+ * It is also used by user-allocated frame pools - allocating in
+ * AVHWFramesContext.pool must return AVBufferRefs which contain
+ * an object of this type.
+ *
+ * The fields of this structure should be set such it can be
+ * imported directly by EGL using the EGL_EXT_image_dma_buf_import
+ * and EGL_EXT_image_dma_buf_import_modifiers extensions.
+ * (Note that the exact layout of a particular format may vary between
+ * platforms - we only specify that the same platform should be able
+ * to import it.)
+ *
+ * The total number of planes must not exceed AV_DRM_MAX_PLANES, and
+ * the order of the planes by increasing layer index followed by
+ * increasing plane index must be the same as the order which would
+ * be used for the data pointers in the equivalent software format.
+ */
+typedef struct AVDRMFrameDescriptor {
+ /**
+ * Number of DRM objects making up this frame.
+ */
+ int nb_objects;
+ /**
+ * Array of objects making up the frame.
+ */
+ AVDRMObjectDescriptor objects[AV_DRM_MAX_PLANES];
+ /**
+ * Number of layers in the frame.
+ */
+ int nb_layers;
+ /**
+ * Array of layers in the frame.
+ */
+ AVDRMLayerDescriptor layers[AV_DRM_MAX_PLANES];
+} AVDRMFrameDescriptor;
+
+/**
+ * DRM device.
+ *
+ * Allocated as AVHWDeviceContext.hwctx.
+ */
+typedef struct AVDRMDeviceContext {
+ /**
+ * File descriptor of DRM device.
+ *
+ * This is used as the device to create frames on, and may also be
+ * used in some derivation and mapping operations.
+ *
+ * If no device is required, set to -1.
+ */
+ int fd;
+} AVDRMDeviceContext;
+
+#endif /* AVUTIL_HWCONTEXT_DRM_H */
diff --git a/media/ffvpx/libavutil/hwcontext_internal.h b/media/ffvpx/libavutil/hwcontext_internal.h
new file mode 100644
index 0000000000..e6266494ac
--- /dev/null
+++ b/media/ffvpx/libavutil/hwcontext_internal.h
@@ -0,0 +1,178 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_INTERNAL_H
+#define AVUTIL_HWCONTEXT_INTERNAL_H
+
+#include <stddef.h>
+
+#include "buffer.h"
+#include "hwcontext.h"
+#include "frame.h"
+#include "pixfmt.h"
+
+typedef struct HWContextType {
+ enum AVHWDeviceType type;
+ const char *name;
+
+ /**
+ * An array of pixel formats supported by the AVHWFramesContext instances
+ * Terminated by AV_PIX_FMT_NONE.
+ */
+ const enum AVPixelFormat *pix_fmts;
+
+ /**
+ * size of the public hardware-specific context,
+ * i.e. AVHWDeviceContext.hwctx
+ */
+ size_t device_hwctx_size;
+ /**
+ * size of the private data, i.e.
+ * AVHWDeviceInternal.priv
+ */
+ size_t device_priv_size;
+
+ /**
+ * Size of the hardware-specific device configuration.
+ * (Used to query hwframe constraints.)
+ */
+ size_t device_hwconfig_size;
+
+ /**
+ * size of the public frame pool hardware-specific context,
+ * i.e. AVHWFramesContext.hwctx
+ */
+ size_t frames_hwctx_size;
+ /**
+ * size of the private data, i.e.
+ * AVHWFramesInternal.priv
+ */
+ size_t frames_priv_size;
+
+ int (*device_create)(AVHWDeviceContext *ctx, const char *device,
+ AVDictionary *opts, int flags);
+ int (*device_derive)(AVHWDeviceContext *dst_ctx,
+ AVHWDeviceContext *src_ctx,
+ AVDictionary *opts, int flags);
+
+ int (*device_init)(AVHWDeviceContext *ctx);
+ void (*device_uninit)(AVHWDeviceContext *ctx);
+
+ int (*frames_get_constraints)(AVHWDeviceContext *ctx,
+ const void *hwconfig,
+ AVHWFramesConstraints *constraints);
+
+ int (*frames_init)(AVHWFramesContext *ctx);
+ void (*frames_uninit)(AVHWFramesContext *ctx);
+
+ int (*frames_get_buffer)(AVHWFramesContext *ctx, AVFrame *frame);
+ int (*transfer_get_formats)(AVHWFramesContext *ctx,
+ enum AVHWFrameTransferDirection dir,
+ enum AVPixelFormat **formats);
+ int (*transfer_data_to)(AVHWFramesContext *ctx, AVFrame *dst,
+ const AVFrame *src);
+ int (*transfer_data_from)(AVHWFramesContext *ctx, AVFrame *dst,
+ const AVFrame *src);
+
+ int (*map_to)(AVHWFramesContext *ctx, AVFrame *dst,
+ const AVFrame *src, int flags);
+ int (*map_from)(AVHWFramesContext *ctx, AVFrame *dst,
+ const AVFrame *src, int flags);
+
+ int (*frames_derive_to)(AVHWFramesContext *dst_ctx,
+ AVHWFramesContext *src_ctx, int flags);
+ int (*frames_derive_from)(AVHWFramesContext *dst_ctx,
+ AVHWFramesContext *src_ctx, int flags);
+} HWContextType;
+
+struct AVHWDeviceInternal {
+ const HWContextType *hw_type;
+ void *priv;
+
+ /**
+ * For a derived device, a reference to the original device
+ * context it was derived from.
+ */
+ AVBufferRef *source_device;
+};
+
+struct AVHWFramesInternal {
+ const HWContextType *hw_type;
+ void *priv;
+
+ AVBufferPool *pool_internal;
+
+ /**
+ * For a derived context, a reference to the original frames
+ * context it was derived from.
+ */
+ AVBufferRef *source_frames;
+ /**
+ * Flags to apply to the mapping from the source to the derived
+ * frame context when trying to allocate in the derived context.
+ */
+ int source_allocation_map_flags;
+};
+
+typedef struct HWMapDescriptor {
+ /**
+ * A reference to the original source of the mapping.
+ */
+ AVFrame *source;
+ /**
+ * A reference to the hardware frames context in which this
+ * mapping was made. May be the same as source->hw_frames_ctx,
+ * but need not be.
+ */
+ AVBufferRef *hw_frames_ctx;
+ /**
+ * Unmap function.
+ */
+ void (*unmap)(AVHWFramesContext *ctx,
+ struct HWMapDescriptor *hwmap);
+ /**
+ * Hardware-specific private data associated with the mapping.
+ */
+ void *priv;
+} HWMapDescriptor;
+
+int ff_hwframe_map_create(AVBufferRef *hwframe_ref,
+ AVFrame *dst, const AVFrame *src,
+ void (*unmap)(AVHWFramesContext *ctx,
+ HWMapDescriptor *hwmap),
+ void *priv);
+
+/**
+ * Replace the current hwmap of dst with the one from src, used for indirect
+ * mappings like VAAPI->(DRM)->OpenCL/Vulkan where a direct interop is missing
+ */
+int ff_hwframe_map_replace(AVFrame *dst, const AVFrame *src);
+
+extern const HWContextType ff_hwcontext_type_cuda;
+extern const HWContextType ff_hwcontext_type_d3d11va;
+extern const HWContextType ff_hwcontext_type_drm;
+extern const HWContextType ff_hwcontext_type_dxva2;
+extern const HWContextType ff_hwcontext_type_opencl;
+extern const HWContextType ff_hwcontext_type_qsv;
+extern const HWContextType ff_hwcontext_type_vaapi;
+extern const HWContextType ff_hwcontext_type_vdpau;
+extern const HWContextType ff_hwcontext_type_videotoolbox;
+extern const HWContextType ff_hwcontext_type_mediacodec;
+extern const HWContextType ff_hwcontext_type_vulkan;
+
+#endif /* AVUTIL_HWCONTEXT_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/hwcontext_vaapi.c b/media/ffvpx/libavutil/hwcontext_vaapi.c
new file mode 100644
index 0000000000..de30a919ab
--- /dev/null
+++ b/media/ffvpx/libavutil/hwcontext_vaapi.c
@@ -0,0 +1,1948 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#if HAVE_VAAPI_X11
+# include <va/va_x11.h>
+#endif
+#if HAVE_VAAPI_DRM
+# include <va/va_drm.h>
+#endif
+
+#if CONFIG_LIBDRM
+# include <va/va_drmcommon.h>
+# include <xf86drm.h>
+# include <drm_fourcc.h>
+# ifndef DRM_FORMAT_MOD_INVALID
+# define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
+# endif
+#endif
+
+#include <fcntl.h>
+#if HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+
+#include "avassert.h"
+#include "buffer.h"
+#include "common.h"
+#include "hwcontext.h"
+#if CONFIG_LIBDRM
+#include "hwcontext_drm.h"
+#endif
+#include "hwcontext_internal.h"
+#include "hwcontext_vaapi.h"
+#include "mem.h"
+#include "pixdesc.h"
+#include "pixfmt.h"
+
+
+typedef struct VAAPIDevicePriv {
+#if HAVE_VAAPI_X11
+ Display *x11_display;
+#endif
+
+ int drm_fd;
+} VAAPIDevicePriv;
+
+typedef struct VAAPISurfaceFormat {
+ enum AVPixelFormat pix_fmt;
+ VAImageFormat image_format;
+} VAAPISurfaceFormat;
+
+typedef struct VAAPIDeviceContext {
+ // Surface formats which can be used with this device.
+ VAAPISurfaceFormat *formats;
+ int nb_formats;
+} VAAPIDeviceContext;
+
+typedef struct VAAPIFramesContext {
+ // Surface attributes set at create time.
+ VASurfaceAttrib *attributes;
+ int nb_attributes;
+ // RT format of the underlying surface (Intel driver ignores this anyway).
+ unsigned int rt_format;
+ // Whether vaDeriveImage works.
+ int derive_works;
+ // Caches whether VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2 is unsupported for
+ // surface imports.
+ int prime_2_import_unsupported;
+} VAAPIFramesContext;
+
+typedef struct VAAPIMapping {
+ // Handle to the derived or copied image which is mapped.
+ VAImage image;
+ // The mapping flags actually used.
+ int flags;
+} VAAPIMapping;
+
+typedef struct VAAPIFormat {
+ unsigned int fourcc;
+ unsigned int rt_format;
+ enum AVPixelFormat pix_fmt;
+ int chroma_planes_swapped;
+} VAAPIFormatDescriptor;
+
+#define MAP(va, rt, av, swap_uv) { \
+ VA_FOURCC_ ## va, \
+ VA_RT_FORMAT_ ## rt, \
+ AV_PIX_FMT_ ## av, \
+ swap_uv, \
+ }
+// The map fourcc <-> pix_fmt isn't bijective because of the annoying U/V
+// plane swap cases. The frame handling below tries to hide these.
+static const VAAPIFormatDescriptor vaapi_format_map[] = {
+ MAP(NV12, YUV420, NV12, 0),
+#ifdef VA_FOURCC_I420
+ MAP(I420, YUV420, YUV420P, 0),
+#endif
+ MAP(YV12, YUV420, YUV420P, 1),
+ MAP(IYUV, YUV420, YUV420P, 0),
+ MAP(422H, YUV422, YUV422P, 0),
+#ifdef VA_FOURCC_YV16
+ MAP(YV16, YUV422, YUV422P, 1),
+#endif
+ MAP(UYVY, YUV422, UYVY422, 0),
+ MAP(YUY2, YUV422, YUYV422, 0),
+#ifdef VA_FOURCC_Y210
+ MAP(Y210, YUV422_10, Y210, 0),
+#endif
+#ifdef VA_FOURCC_Y212
+ MAP(Y212, YUV422_12, Y212, 0),
+#endif
+ MAP(411P, YUV411, YUV411P, 0),
+ MAP(422V, YUV422, YUV440P, 0),
+ MAP(444P, YUV444, YUV444P, 0),
+#ifdef VA_FOURCC_XYUV
+ MAP(XYUV, YUV444, VUYX, 0),
+#endif
+ MAP(Y800, YUV400, GRAY8, 0),
+#ifdef VA_FOURCC_P010
+ MAP(P010, YUV420_10BPP, P010, 0),
+#endif
+#ifdef VA_FOURCC_P012
+ MAP(P012, YUV420_12, P012, 0),
+#endif
+ MAP(BGRA, RGB32, BGRA, 0),
+ MAP(BGRX, RGB32, BGR0, 0),
+ MAP(RGBA, RGB32, RGBA, 0),
+ MAP(RGBX, RGB32, RGB0, 0),
+#ifdef VA_FOURCC_ABGR
+ MAP(ABGR, RGB32, ABGR, 0),
+ MAP(XBGR, RGB32, 0BGR, 0),
+#endif
+ MAP(ARGB, RGB32, ARGB, 0),
+ MAP(XRGB, RGB32, 0RGB, 0),
+#ifdef VA_FOURCC_X2R10G10B10
+ MAP(X2R10G10B10, RGB32_10, X2RGB10, 0),
+#endif
+#ifdef VA_FOURCC_Y410
+ // libva doesn't include a fourcc for XV30 and the driver only declares
+ // support for Y410, so we must fudge the mapping here.
+ MAP(Y410, YUV444_10, XV30, 0),
+#endif
+#ifdef VA_FOURCC_Y412
+ // libva doesn't include a fourcc for XV36 and the driver only declares
+ // support for Y412, so we must fudge the mapping here.
+ MAP(Y412, YUV444_12, XV36, 0),
+#endif
+};
+#undef MAP
+
+static const VAAPIFormatDescriptor *
+ vaapi_format_from_fourcc(unsigned int fourcc)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_format_map); i++)
+ if (vaapi_format_map[i].fourcc == fourcc)
+ return &vaapi_format_map[i];
+ return NULL;
+}
+
+static const VAAPIFormatDescriptor *
+ vaapi_format_from_pix_fmt(enum AVPixelFormat pix_fmt)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_format_map); i++)
+ if (vaapi_format_map[i].pix_fmt == pix_fmt)
+ return &vaapi_format_map[i];
+ return NULL;
+}
+
+static enum AVPixelFormat vaapi_pix_fmt_from_fourcc(unsigned int fourcc)
+{
+ const VAAPIFormatDescriptor *desc;
+ desc = vaapi_format_from_fourcc(fourcc);
+ if (desc)
+ return desc->pix_fmt;
+ else
+ return AV_PIX_FMT_NONE;
+}
+
+static int vaapi_get_image_format(AVHWDeviceContext *hwdev,
+ enum AVPixelFormat pix_fmt,
+ VAImageFormat **image_format)
+{
+ VAAPIDeviceContext *ctx = hwdev->internal->priv;
+ int i;
+
+ for (i = 0; i < ctx->nb_formats; i++) {
+ if (ctx->formats[i].pix_fmt == pix_fmt) {
+ if (image_format)
+ *image_format = &ctx->formats[i].image_format;
+ return 0;
+ }
+ }
+ return AVERROR(EINVAL);
+}
+
+static int vaapi_frames_get_constraints(AVHWDeviceContext *hwdev,
+ const void *hwconfig,
+ AVHWFramesConstraints *constraints)
+{
+ AVVAAPIDeviceContext *hwctx = hwdev->hwctx;
+ const AVVAAPIHWConfig *config = hwconfig;
+ VAAPIDeviceContext *ctx = hwdev->internal->priv;
+ VASurfaceAttrib *attr_list = NULL;
+ VAStatus vas;
+ enum AVPixelFormat pix_fmt;
+ unsigned int fourcc;
+ int err, i, j, attr_count, pix_fmt_count;
+
+ if (config &&
+ !(hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES)) {
+ attr_count = 0;
+ vas = vaQuerySurfaceAttributes(hwctx->display, config->config_id,
+ 0, &attr_count);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwdev, AV_LOG_ERROR, "Failed to query surface attributes: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(ENOSYS);
+ goto fail;
+ }
+
+ attr_list = av_malloc(attr_count * sizeof(*attr_list));
+ if (!attr_list) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ vas = vaQuerySurfaceAttributes(hwctx->display, config->config_id,
+ attr_list, &attr_count);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwdev, AV_LOG_ERROR, "Failed to query surface attributes: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(ENOSYS);
+ goto fail;
+ }
+
+ pix_fmt_count = 0;
+ for (i = 0; i < attr_count; i++) {
+ switch (attr_list[i].type) {
+ case VASurfaceAttribPixelFormat:
+ fourcc = attr_list[i].value.value.i;
+ pix_fmt = vaapi_pix_fmt_from_fourcc(fourcc);
+ if (pix_fmt != AV_PIX_FMT_NONE) {
+ ++pix_fmt_count;
+ } else {
+ // Something unsupported - ignore.
+ }
+ break;
+ case VASurfaceAttribMinWidth:
+ constraints->min_width = attr_list[i].value.value.i;
+ break;
+ case VASurfaceAttribMinHeight:
+ constraints->min_height = attr_list[i].value.value.i;
+ break;
+ case VASurfaceAttribMaxWidth:
+ constraints->max_width = attr_list[i].value.value.i;
+ break;
+ case VASurfaceAttribMaxHeight:
+ constraints->max_height = attr_list[i].value.value.i;
+ break;
+ }
+ }
+ if (pix_fmt_count == 0) {
+ // Nothing usable found. Presumably there exists something which
+ // works, so leave the set null to indicate unknown.
+ constraints->valid_sw_formats = NULL;
+ } else {
+ constraints->valid_sw_formats = av_malloc_array(pix_fmt_count + 1,
+ sizeof(pix_fmt));
+ if (!constraints->valid_sw_formats) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (i = j = 0; i < attr_count; i++) {
+ int k;
+
+ if (attr_list[i].type != VASurfaceAttribPixelFormat)
+ continue;
+ fourcc = attr_list[i].value.value.i;
+ pix_fmt = vaapi_pix_fmt_from_fourcc(fourcc);
+
+ if (pix_fmt == AV_PIX_FMT_NONE)
+ continue;
+
+ for (k = 0; k < j; k++) {
+ if (constraints->valid_sw_formats[k] == pix_fmt)
+ break;
+ }
+
+ if (k == j)
+ constraints->valid_sw_formats[j++] = pix_fmt;
+ }
+ constraints->valid_sw_formats[j] = AV_PIX_FMT_NONE;
+ }
+ } else {
+ // No configuration supplied.
+ // Return the full set of image formats known by the implementation.
+ constraints->valid_sw_formats = av_malloc_array(ctx->nb_formats + 1,
+ sizeof(pix_fmt));
+ if (!constraints->valid_sw_formats) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ for (i = j = 0; i < ctx->nb_formats; i++) {
+ int k;
+
+ for (k = 0; k < j; k++) {
+ if (constraints->valid_sw_formats[k] == ctx->formats[i].pix_fmt)
+ break;
+ }
+
+ if (k == j)
+ constraints->valid_sw_formats[j++] = ctx->formats[i].pix_fmt;
+ }
+
+ constraints->valid_sw_formats[j] = AV_PIX_FMT_NONE;
+ }
+
+ constraints->valid_hw_formats = av_malloc_array(2, sizeof(pix_fmt));
+ if (!constraints->valid_hw_formats) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ constraints->valid_hw_formats[0] = AV_PIX_FMT_VAAPI;
+ constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
+
+ err = 0;
+fail:
+ av_freep(&attr_list);
+ return err;
+}
+
+static const struct {
+ const char *friendly_name;
+ const char *match_string;
+ unsigned int quirks;
+} vaapi_driver_quirks_table[] = {
+#if !VA_CHECK_VERSION(1, 0, 0)
+ // The i965 driver did not conform before version 2.0.
+ {
+ "Intel i965 (Quick Sync)",
+ "i965",
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS,
+ },
+#endif
+ {
+ "Intel iHD",
+ "ubit",
+ AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE,
+ },
+ {
+ "VDPAU wrapper",
+ "Splitted-Desktop Systems VDPAU backend for VA-API",
+ AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES,
+ },
+};
+
+static int vaapi_device_init(AVHWDeviceContext *hwdev)
+{
+ VAAPIDeviceContext *ctx = hwdev->internal->priv;
+ AVVAAPIDeviceContext *hwctx = hwdev->hwctx;
+ VAImageFormat *image_list = NULL;
+ VAStatus vas;
+ const char *vendor_string;
+ int err, i, image_count;
+ enum AVPixelFormat pix_fmt;
+ unsigned int fourcc;
+
+ image_count = vaMaxNumImageFormats(hwctx->display);
+ if (image_count <= 0) {
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ image_list = av_malloc(image_count * sizeof(*image_list));
+ if (!image_list) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ vas = vaQueryImageFormats(hwctx->display, image_list, &image_count);
+ if (vas != VA_STATUS_SUCCESS) {
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ ctx->formats = av_malloc(image_count * sizeof(*ctx->formats));
+ if (!ctx->formats) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ ctx->nb_formats = 0;
+ for (i = 0; i < image_count; i++) {
+ fourcc = image_list[i].fourcc;
+ pix_fmt = vaapi_pix_fmt_from_fourcc(fourcc);
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ av_log(hwdev, AV_LOG_DEBUG, "Format %#x -> unknown.\n",
+ fourcc);
+ } else {
+ av_log(hwdev, AV_LOG_DEBUG, "Format %#x -> %s.\n",
+ fourcc, av_get_pix_fmt_name(pix_fmt));
+ ctx->formats[ctx->nb_formats].pix_fmt = pix_fmt;
+ ctx->formats[ctx->nb_formats].image_format = image_list[i];
+ ++ctx->nb_formats;
+ }
+ }
+
+ vendor_string = vaQueryVendorString(hwctx->display);
+ if (vendor_string)
+ av_log(hwdev, AV_LOG_VERBOSE, "VAAPI driver: %s.\n", vendor_string);
+
+ if (hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_USER_SET) {
+ av_log(hwdev, AV_LOG_VERBOSE, "Using quirks set by user (%#x).\n",
+ hwctx->driver_quirks);
+ } else {
+ // Detect the driver in use and set quirk flags if necessary.
+ hwctx->driver_quirks = 0;
+ if (vendor_string) {
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_driver_quirks_table); i++) {
+ if (strstr(vendor_string,
+ vaapi_driver_quirks_table[i].match_string)) {
+ av_log(hwdev, AV_LOG_VERBOSE, "Matched driver string "
+ "as known nonstandard driver \"%s\", setting "
+ "quirks (%#x).\n",
+ vaapi_driver_quirks_table[i].friendly_name,
+ vaapi_driver_quirks_table[i].quirks);
+ hwctx->driver_quirks |=
+ vaapi_driver_quirks_table[i].quirks;
+ break;
+ }
+ }
+ if (!(i < FF_ARRAY_ELEMS(vaapi_driver_quirks_table))) {
+ av_log(hwdev, AV_LOG_VERBOSE, "Driver not found in known "
+ "nonstandard list, using standard behaviour.\n");
+ }
+ } else {
+ av_log(hwdev, AV_LOG_VERBOSE, "Driver has no vendor string, "
+ "assuming standard behaviour.\n");
+ }
+ }
+
+ av_free(image_list);
+ return 0;
+fail:
+ av_freep(&ctx->formats);
+ av_free(image_list);
+ return err;
+}
+
+static void vaapi_device_uninit(AVHWDeviceContext *hwdev)
+{
+ VAAPIDeviceContext *ctx = hwdev->internal->priv;
+
+ av_freep(&ctx->formats);
+}
+
+static void vaapi_buffer_free(void *opaque, uint8_t *data)
+{
+ AVHWFramesContext *hwfc = opaque;
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VASurfaceID surface_id;
+ VAStatus vas;
+
+ surface_id = (VASurfaceID)(uintptr_t)data;
+
+ vas = vaDestroySurfaces(hwctx->display, &surface_id, 1);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to destroy surface %#x: "
+ "%d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+}
+
+static AVBufferRef *vaapi_pool_alloc(void *opaque, size_t size)
+{
+ AVHWFramesContext *hwfc = opaque;
+ VAAPIFramesContext *ctx = hwfc->internal->priv;
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ AVVAAPIFramesContext *avfc = hwfc->hwctx;
+ VASurfaceID surface_id;
+ VAStatus vas;
+ AVBufferRef *ref;
+
+ if (hwfc->initial_pool_size > 0 &&
+ avfc->nb_surfaces >= hwfc->initial_pool_size)
+ return NULL;
+
+ vas = vaCreateSurfaces(hwctx->display, ctx->rt_format,
+ hwfc->width, hwfc->height,
+ &surface_id, 1,
+ ctx->attributes, ctx->nb_attributes);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to create surface: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return NULL;
+ }
+ av_log(hwfc, AV_LOG_DEBUG, "Created surface %#x.\n", surface_id);
+
+ ref = av_buffer_create((uint8_t*)(uintptr_t)surface_id,
+ sizeof(surface_id), &vaapi_buffer_free,
+ hwfc, AV_BUFFER_FLAG_READONLY);
+ if (!ref) {
+ vaDestroySurfaces(hwctx->display, &surface_id, 1);
+ return NULL;
+ }
+
+ if (hwfc->initial_pool_size > 0) {
+ // This is a fixed-size pool, so we must still be in the initial
+ // allocation sequence.
+ av_assert0(avfc->nb_surfaces < hwfc->initial_pool_size);
+ avfc->surface_ids[avfc->nb_surfaces] = surface_id;
+ ++avfc->nb_surfaces;
+ }
+
+ return ref;
+}
+
+static int vaapi_frames_init(AVHWFramesContext *hwfc)
+{
+ AVVAAPIFramesContext *avfc = hwfc->hwctx;
+ VAAPIFramesContext *ctx = hwfc->internal->priv;
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ const VAAPIFormatDescriptor *desc;
+ VAImageFormat *expected_format;
+ AVBufferRef *test_surface = NULL;
+ VASurfaceID test_surface_id;
+ VAImage test_image;
+ VAStatus vas;
+ int err, i;
+
+ desc = vaapi_format_from_pix_fmt(hwfc->sw_format);
+ if (!desc) {
+ av_log(hwfc, AV_LOG_ERROR, "Unsupported format: %s.\n",
+ av_get_pix_fmt_name(hwfc->sw_format));
+ return AVERROR(EINVAL);
+ }
+
+ if (!hwfc->pool) {
+ if (!(hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES)) {
+ int need_memory_type = !(hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE);
+ int need_pixel_format = 1;
+ for (i = 0; i < avfc->nb_attributes; i++) {
+ if (avfc->attributes[i].type == VASurfaceAttribMemoryType)
+ need_memory_type = 0;
+ if (avfc->attributes[i].type == VASurfaceAttribPixelFormat)
+ need_pixel_format = 0;
+ }
+ ctx->nb_attributes =
+ avfc->nb_attributes + need_memory_type + need_pixel_format;
+
+ ctx->attributes = av_malloc(ctx->nb_attributes *
+ sizeof(*ctx->attributes));
+ if (!ctx->attributes) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (i = 0; i < avfc->nb_attributes; i++)
+ ctx->attributes[i] = avfc->attributes[i];
+ if (need_memory_type) {
+ ctx->attributes[i++] = (VASurfaceAttrib) {
+ .type = VASurfaceAttribMemoryType,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_VA,
+ };
+ }
+ if (need_pixel_format) {
+ ctx->attributes[i++] = (VASurfaceAttrib) {
+ .type = VASurfaceAttribPixelFormat,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = desc->fourcc,
+ };
+ }
+ av_assert0(i == ctx->nb_attributes);
+ } else {
+ ctx->attributes = NULL;
+ ctx->nb_attributes = 0;
+ }
+
+ ctx->rt_format = desc->rt_format;
+
+ if (hwfc->initial_pool_size > 0) {
+ // This pool will be usable as a render target, so we need to store
+ // all of the surface IDs somewhere that vaCreateContext() calls
+ // will be able to access them.
+ avfc->nb_surfaces = 0;
+ avfc->surface_ids = av_malloc(hwfc->initial_pool_size *
+ sizeof(*avfc->surface_ids));
+ if (!avfc->surface_ids) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ } else {
+ // This pool allows dynamic sizing, and will not be usable as a
+ // render target.
+ avfc->nb_surfaces = 0;
+ avfc->surface_ids = NULL;
+ }
+
+ hwfc->internal->pool_internal =
+ av_buffer_pool_init2(sizeof(VASurfaceID), hwfc,
+ &vaapi_pool_alloc, NULL);
+ if (!hwfc->internal->pool_internal) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to create VAAPI surface pool.\n");
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+
+ // Allocate a single surface to test whether vaDeriveImage() is going
+ // to work for the specific configuration.
+ if (hwfc->pool) {
+ test_surface = av_buffer_pool_get(hwfc->pool);
+ if (!test_surface) {
+ av_log(hwfc, AV_LOG_ERROR, "Unable to allocate a surface from "
+ "user-configured buffer pool.\n");
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ } else {
+ test_surface = av_buffer_pool_get(hwfc->internal->pool_internal);
+ if (!test_surface) {
+ av_log(hwfc, AV_LOG_ERROR, "Unable to allocate a surface from "
+ "internal buffer pool.\n");
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+ test_surface_id = (VASurfaceID)(uintptr_t)test_surface->data;
+
+ ctx->derive_works = 0;
+
+ err = vaapi_get_image_format(hwfc->device_ctx,
+ hwfc->sw_format, &expected_format);
+ if (err == 0) {
+ vas = vaDeriveImage(hwctx->display, test_surface_id, &test_image);
+ if (vas == VA_STATUS_SUCCESS) {
+ if (expected_format->fourcc == test_image.format.fourcc) {
+ av_log(hwfc, AV_LOG_DEBUG, "Direct mapping possible.\n");
+ ctx->derive_works = 1;
+ } else {
+ av_log(hwfc, AV_LOG_DEBUG, "Direct mapping disabled: "
+ "derived image format %08x does not match "
+ "expected format %08x.\n",
+ expected_format->fourcc, test_image.format.fourcc);
+ }
+ vaDestroyImage(hwctx->display, test_image.image_id);
+ } else {
+ av_log(hwfc, AV_LOG_DEBUG, "Direct mapping disabled: "
+ "deriving image does not work: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ }
+ } else {
+ av_log(hwfc, AV_LOG_DEBUG, "Direct mapping disabled: "
+ "image format is not supported.\n");
+ }
+
+ av_buffer_unref(&test_surface);
+ return 0;
+
+fail:
+ av_buffer_unref(&test_surface);
+ av_freep(&avfc->surface_ids);
+ av_freep(&ctx->attributes);
+ return err;
+}
+
+static void vaapi_frames_uninit(AVHWFramesContext *hwfc)
+{
+ AVVAAPIFramesContext *avfc = hwfc->hwctx;
+ VAAPIFramesContext *ctx = hwfc->internal->priv;
+
+ av_freep(&avfc->surface_ids);
+ av_freep(&ctx->attributes);
+}
+
+static int vaapi_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame)
+{
+ frame->buf[0] = av_buffer_pool_get(hwfc->pool);
+ if (!frame->buf[0])
+ return AVERROR(ENOMEM);
+
+ frame->data[3] = frame->buf[0]->data;
+ frame->format = AV_PIX_FMT_VAAPI;
+ frame->width = hwfc->width;
+ frame->height = hwfc->height;
+
+ return 0;
+}
+
+static int vaapi_transfer_get_formats(AVHWFramesContext *hwfc,
+ enum AVHWFrameTransferDirection dir,
+ enum AVPixelFormat **formats)
+{
+ VAAPIDeviceContext *ctx = hwfc->device_ctx->internal->priv;
+ enum AVPixelFormat *pix_fmts;
+ int i, k, sw_format_available;
+
+ sw_format_available = 0;
+ for (i = 0; i < ctx->nb_formats; i++) {
+ if (ctx->formats[i].pix_fmt == hwfc->sw_format)
+ sw_format_available = 1;
+ }
+
+ pix_fmts = av_malloc((ctx->nb_formats + 1) * sizeof(*pix_fmts));
+ if (!pix_fmts)
+ return AVERROR(ENOMEM);
+
+ if (sw_format_available) {
+ pix_fmts[0] = hwfc->sw_format;
+ k = 1;
+ } else {
+ k = 0;
+ }
+ for (i = 0; i < ctx->nb_formats; i++) {
+ if (ctx->formats[i].pix_fmt == hwfc->sw_format)
+ continue;
+ av_assert0(k < ctx->nb_formats);
+ pix_fmts[k++] = ctx->formats[i].pix_fmt;
+ }
+ pix_fmts[k] = AV_PIX_FMT_NONE;
+
+ *formats = pix_fmts;
+ return 0;
+}
+
+static void vaapi_unmap_frame(AVHWFramesContext *hwfc,
+ HWMapDescriptor *hwmap)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VAAPIMapping *map = hwmap->priv;
+ VASurfaceID surface_id;
+ VAStatus vas;
+
+ surface_id = (VASurfaceID)(uintptr_t)hwmap->source->data[3];
+ av_log(hwfc, AV_LOG_DEBUG, "Unmap surface %#x.\n", surface_id);
+
+ vas = vaUnmapBuffer(hwctx->display, map->image.buf);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to unmap image from surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+
+ if ((map->flags & AV_HWFRAME_MAP_WRITE) &&
+ !(map->flags & AV_HWFRAME_MAP_DIRECT)) {
+ vas = vaPutImage(hwctx->display, surface_id, map->image.image_id,
+ 0, 0, hwfc->width, hwfc->height,
+ 0, 0, hwfc->width, hwfc->height);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to write image to surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+ }
+
+ vas = vaDestroyImage(hwctx->display, map->image.image_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to destroy image from surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+
+ av_free(map);
+}
+
+static int vaapi_map_frame(AVHWFramesContext *hwfc,
+ AVFrame *dst, const AVFrame *src, int flags)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VAAPIFramesContext *ctx = hwfc->internal->priv;
+ VASurfaceID surface_id;
+ const VAAPIFormatDescriptor *desc;
+ VAImageFormat *image_format;
+ VAAPIMapping *map;
+ VAStatus vas;
+ void *address = NULL;
+ int err, i;
+
+ surface_id = (VASurfaceID)(uintptr_t)src->data[3];
+ av_log(hwfc, AV_LOG_DEBUG, "Map surface %#x.\n", surface_id);
+
+ if (!ctx->derive_works && (flags & AV_HWFRAME_MAP_DIRECT)) {
+ // Requested direct mapping but it is not possible.
+ return AVERROR(EINVAL);
+ }
+ if (dst->format == AV_PIX_FMT_NONE)
+ dst->format = hwfc->sw_format;
+ if (dst->format != hwfc->sw_format && (flags & AV_HWFRAME_MAP_DIRECT)) {
+ // Requested direct mapping but the formats do not match.
+ return AVERROR(EINVAL);
+ }
+
+ err = vaapi_get_image_format(hwfc->device_ctx, dst->format, &image_format);
+ if (err < 0) {
+ // Requested format is not a valid output format.
+ return AVERROR(EINVAL);
+ }
+
+ map = av_malloc(sizeof(*map));
+ if (!map)
+ return AVERROR(ENOMEM);
+ map->flags = flags;
+ map->image.image_id = VA_INVALID_ID;
+
+ vas = vaSyncSurface(hwctx->display, surface_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to sync surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ // The memory which we map using derive need not be connected to the CPU
+ // in a way conducive to fast access. On Gen7-Gen9 Intel graphics, the
+ // memory is mappable but not cached, so normal memcpy()-like access is
+ // very slow to read it (but writing is ok). It is possible to read much
+ // faster with a copy routine which is aware of the limitation, but we
+ // assume for now that the user is not aware of that and would therefore
+ // prefer not to be given direct-mapped memory if they request read access.
+ if (ctx->derive_works && dst->format == hwfc->sw_format &&
+ ((flags & AV_HWFRAME_MAP_DIRECT) || !(flags & AV_HWFRAME_MAP_READ))) {
+ vas = vaDeriveImage(hwctx->display, surface_id, &map->image);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to derive image from "
+ "surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ if (map->image.format.fourcc != image_format->fourcc) {
+ av_log(hwfc, AV_LOG_ERROR, "Derive image of surface %#x "
+ "is in wrong format: expected %#08x, got %#08x.\n",
+ surface_id, image_format->fourcc, map->image.format.fourcc);
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ map->flags |= AV_HWFRAME_MAP_DIRECT;
+ } else {
+ vas = vaCreateImage(hwctx->display, image_format,
+ hwfc->width, hwfc->height, &map->image);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to create image for "
+ "surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ if (!(flags & AV_HWFRAME_MAP_OVERWRITE)) {
+ vas = vaGetImage(hwctx->display, surface_id, 0, 0,
+ hwfc->width, hwfc->height, map->image.image_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to read image from "
+ "surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ }
+ }
+
+ vas = vaMapBuffer(hwctx->display, map->image.buf, &address);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to map image from surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ err = ff_hwframe_map_create(src->hw_frames_ctx,
+ dst, src, &vaapi_unmap_frame, map);
+ if (err < 0)
+ goto fail;
+
+ dst->width = src->width;
+ dst->height = src->height;
+
+ for (i = 0; i < map->image.num_planes; i++) {
+ dst->data[i] = (uint8_t*)address + map->image.offsets[i];
+ dst->linesize[i] = map->image.pitches[i];
+ }
+
+ desc = vaapi_format_from_fourcc(map->image.format.fourcc);
+ if (desc && desc->chroma_planes_swapped) {
+ // Chroma planes are YVU rather than YUV, so swap them.
+ FFSWAP(uint8_t*, dst->data[1], dst->data[2]);
+ }
+
+ return 0;
+
+fail:
+ if (map) {
+ if (address)
+ vaUnmapBuffer(hwctx->display, map->image.buf);
+ if (map->image.image_id != VA_INVALID_ID)
+ vaDestroyImage(hwctx->display, map->image.image_id);
+ av_free(map);
+ }
+ return err;
+}
+
+static int vaapi_transfer_data_from(AVHWFramesContext *hwfc,
+ AVFrame *dst, const AVFrame *src)
+{
+ AVFrame *map;
+ int err;
+
+ if (dst->width > hwfc->width || dst->height > hwfc->height)
+ return AVERROR(EINVAL);
+
+ map = av_frame_alloc();
+ if (!map)
+ return AVERROR(ENOMEM);
+ map->format = dst->format;
+
+ err = vaapi_map_frame(hwfc, map, src, AV_HWFRAME_MAP_READ);
+ if (err)
+ goto fail;
+
+ map->width = dst->width;
+ map->height = dst->height;
+
+ err = av_frame_copy(dst, map);
+ if (err)
+ goto fail;
+
+ err = 0;
+fail:
+ av_frame_free(&map);
+ return err;
+}
+
+static int vaapi_transfer_data_to(AVHWFramesContext *hwfc,
+ AVFrame *dst, const AVFrame *src)
+{
+ AVFrame *map;
+ int err;
+
+ if (src->width > hwfc->width || src->height > hwfc->height)
+ return AVERROR(EINVAL);
+
+ map = av_frame_alloc();
+ if (!map)
+ return AVERROR(ENOMEM);
+ map->format = src->format;
+
+ err = vaapi_map_frame(hwfc, map, dst, AV_HWFRAME_MAP_WRITE | AV_HWFRAME_MAP_OVERWRITE);
+ if (err)
+ goto fail;
+
+ map->width = src->width;
+ map->height = src->height;
+
+ err = av_frame_copy(map, src);
+ if (err)
+ goto fail;
+
+ err = 0;
+fail:
+ av_frame_free(&map);
+ return err;
+}
+
+static int vaapi_map_to_memory(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ int err;
+
+ if (dst->format != AV_PIX_FMT_NONE) {
+ err = vaapi_get_image_format(hwfc->device_ctx, dst->format, NULL);
+ if (err < 0)
+ return AVERROR(ENOSYS);
+ }
+
+ err = vaapi_map_frame(hwfc, dst, src, flags);
+ if (err)
+ return err;
+
+ err = av_frame_copy_props(dst, src);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#if CONFIG_LIBDRM
+
+#define DRM_MAP(va, layers, ...) { \
+ VA_FOURCC_ ## va, \
+ layers, \
+ { __VA_ARGS__ } \
+ }
+static const struct {
+ uint32_t va_fourcc;
+ int nb_layer_formats;
+ uint32_t layer_formats[AV_DRM_MAX_PLANES];
+} vaapi_drm_format_map[] = {
+#ifdef DRM_FORMAT_R8
+ DRM_MAP(NV12, 2, DRM_FORMAT_R8, DRM_FORMAT_RG88),
+ DRM_MAP(NV12, 2, DRM_FORMAT_R8, DRM_FORMAT_GR88),
+#endif
+ DRM_MAP(NV12, 1, DRM_FORMAT_NV12),
+#if defined(VA_FOURCC_P010) && defined(DRM_FORMAT_R16)
+ DRM_MAP(P010, 2, DRM_FORMAT_R16, DRM_FORMAT_RG1616),
+#endif
+#if defined(VA_FOURCC_P012) && defined(DRM_FORMAT_R16)
+ DRM_MAP(P012, 2, DRM_FORMAT_R16, DRM_FORMAT_RG1616),
+#endif
+ DRM_MAP(BGRA, 1, DRM_FORMAT_ARGB8888),
+ DRM_MAP(BGRX, 1, DRM_FORMAT_XRGB8888),
+ DRM_MAP(RGBA, 1, DRM_FORMAT_ABGR8888),
+ DRM_MAP(RGBX, 1, DRM_FORMAT_XBGR8888),
+#ifdef VA_FOURCC_ABGR
+ DRM_MAP(ABGR, 1, DRM_FORMAT_RGBA8888),
+ DRM_MAP(XBGR, 1, DRM_FORMAT_RGBX8888),
+#endif
+ DRM_MAP(ARGB, 1, DRM_FORMAT_BGRA8888),
+ DRM_MAP(XRGB, 1, DRM_FORMAT_BGRX8888),
+#if defined(VA_FOURCC_XYUV) && defined(DRM_FORMAT_XYUV8888)
+ DRM_MAP(XYUV, 1, DRM_FORMAT_XYUV8888),
+#endif
+#if defined(VA_FOURCC_Y412) && defined(DRM_FORMAT_XVYU2101010)
+ DRM_MAP(Y410, 1, DRM_FORMAT_XVYU2101010),
+#endif
+#if defined(VA_FOURCC_Y412) && defined(DRM_FORMAT_XVYU12_16161616)
+ DRM_MAP(Y412, 1, DRM_FORMAT_XVYU12_16161616),
+#endif
+};
+#undef DRM_MAP
+
+static void vaapi_unmap_from_drm(AVHWFramesContext *dst_fc,
+ HWMapDescriptor *hwmap)
+{
+ AVVAAPIDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
+
+ VASurfaceID surface_id = (VASurfaceID)(uintptr_t)hwmap->priv;
+
+ av_log(dst_fc, AV_LOG_DEBUG, "Destroy surface %#x.\n", surface_id);
+
+ vaDestroySurfaces(dst_dev->display, &surface_id, 1);
+}
+
+static int vaapi_map_from_drm(AVHWFramesContext *src_fc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+#if VA_CHECK_VERSION(1, 1, 0)
+ VAAPIFramesContext *src_vafc = src_fc->internal->priv;
+ int use_prime2;
+#else
+ int k;
+#endif
+ AVHWFramesContext *dst_fc =
+ (AVHWFramesContext*)dst->hw_frames_ctx->data;
+ AVVAAPIDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
+ const AVDRMFrameDescriptor *desc;
+ const VAAPIFormatDescriptor *format_desc;
+ VASurfaceID surface_id;
+ VAStatus vas = VA_STATUS_SUCCESS;
+ uint32_t va_fourcc;
+ int err, i, j;
+
+#if !VA_CHECK_VERSION(1, 1, 0)
+ unsigned long buffer_handle;
+ VASurfaceAttribExternalBuffers buffer_desc;
+ VASurfaceAttrib attrs[2] = {
+ {
+ .type = VASurfaceAttribMemoryType,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME,
+ },
+ {
+ .type = VASurfaceAttribExternalBufferDescriptor,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypePointer,
+ .value.value.p = &buffer_desc,
+ }
+ };
+#endif
+
+ desc = (AVDRMFrameDescriptor*)src->data[0];
+
+ if (desc->nb_objects != 1) {
+ av_log(dst_fc, AV_LOG_ERROR, "VAAPI can only map frames "
+ "made from a single DRM object.\n");
+ return AVERROR(EINVAL);
+ }
+
+ va_fourcc = 0;
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_drm_format_map); i++) {
+ if (desc->nb_layers != vaapi_drm_format_map[i].nb_layer_formats)
+ continue;
+ for (j = 0; j < desc->nb_layers; j++) {
+ if (desc->layers[j].format !=
+ vaapi_drm_format_map[i].layer_formats[j])
+ break;
+ }
+ if (j != desc->nb_layers)
+ continue;
+ va_fourcc = vaapi_drm_format_map[i].va_fourcc;
+ break;
+ }
+ if (!va_fourcc) {
+ av_log(dst_fc, AV_LOG_ERROR, "DRM format not supported "
+ "by VAAPI.\n");
+ return AVERROR(EINVAL);
+ }
+
+ av_log(dst_fc, AV_LOG_DEBUG, "Map DRM object %d to VAAPI as "
+ "%08x.\n", desc->objects[0].fd, va_fourcc);
+
+ format_desc = vaapi_format_from_fourcc(va_fourcc);
+ av_assert0(format_desc);
+
+#if VA_CHECK_VERSION(1, 1, 0)
+ use_prime2 = !src_vafc->prime_2_import_unsupported &&
+ desc->objects[0].format_modifier != DRM_FORMAT_MOD_INVALID;
+ if (use_prime2) {
+ VADRMPRIMESurfaceDescriptor prime_desc;
+ VASurfaceAttrib prime_attrs[2] = {
+ {
+ .type = VASurfaceAttribMemoryType,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
+ },
+ {
+ .type = VASurfaceAttribExternalBufferDescriptor,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypePointer,
+ .value.value.p = &prime_desc,
+ }
+ };
+ prime_desc.fourcc = va_fourcc;
+ prime_desc.width = src_fc->width;
+ prime_desc.height = src_fc->height;
+ prime_desc.num_objects = desc->nb_objects;
+ for (i = 0; i < desc->nb_objects; ++i) {
+ prime_desc.objects[i].fd = desc->objects[i].fd;
+ prime_desc.objects[i].size = desc->objects[i].size;
+ prime_desc.objects[i].drm_format_modifier =
+ desc->objects[i].format_modifier;
+ }
+
+ prime_desc.num_layers = desc->nb_layers;
+ for (i = 0; i < desc->nb_layers; ++i) {
+ prime_desc.layers[i].drm_format = desc->layers[i].format;
+ prime_desc.layers[i].num_planes = desc->layers[i].nb_planes;
+ for (j = 0; j < desc->layers[i].nb_planes; ++j) {
+ prime_desc.layers[i].object_index[j] =
+ desc->layers[i].planes[j].object_index;
+ prime_desc.layers[i].offset[j] = desc->layers[i].planes[j].offset;
+ prime_desc.layers[i].pitch[j] = desc->layers[i].planes[j].pitch;
+ }
+
+ if (format_desc->chroma_planes_swapped &&
+ desc->layers[i].nb_planes == 3) {
+ FFSWAP(uint32_t, prime_desc.layers[i].pitch[1],
+ prime_desc.layers[i].pitch[2]);
+ FFSWAP(uint32_t, prime_desc.layers[i].offset[1],
+ prime_desc.layers[i].offset[2]);
+ }
+ }
+
+ /*
+ * We can query for PRIME_2 support with vaQuerySurfaceAttributes, but that
+ * that needs the config_id which we don't have here . Both Intel and
+ * Gallium seem to do the correct error checks, so lets just try the
+ * PRIME_2 import first.
+ */
+ vas = vaCreateSurfaces(dst_dev->display, format_desc->rt_format,
+ src->width, src->height, &surface_id, 1,
+ prime_attrs, FF_ARRAY_ELEMS(prime_attrs));
+ if (vas != VA_STATUS_SUCCESS)
+ src_vafc->prime_2_import_unsupported = 1;
+ }
+
+ if (!use_prime2 || vas != VA_STATUS_SUCCESS) {
+ int k;
+ unsigned long buffer_handle;
+ VASurfaceAttribExternalBuffers buffer_desc;
+ VASurfaceAttrib buffer_attrs[2] = {
+ {
+ .type = VASurfaceAttribMemoryType,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME,
+ },
+ {
+ .type = VASurfaceAttribExternalBufferDescriptor,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypePointer,
+ .value.value.p = &buffer_desc,
+ }
+ };
+
+ buffer_handle = desc->objects[0].fd;
+ buffer_desc.pixel_format = va_fourcc;
+ buffer_desc.width = src_fc->width;
+ buffer_desc.height = src_fc->height;
+ buffer_desc.data_size = desc->objects[0].size;
+ buffer_desc.buffers = &buffer_handle;
+ buffer_desc.num_buffers = 1;
+ buffer_desc.flags = 0;
+
+ k = 0;
+ for (i = 0; i < desc->nb_layers; i++) {
+ for (j = 0; j < desc->layers[i].nb_planes; j++) {
+ buffer_desc.pitches[k] = desc->layers[i].planes[j].pitch;
+ buffer_desc.offsets[k] = desc->layers[i].planes[j].offset;
+ ++k;
+ }
+ }
+ buffer_desc.num_planes = k;
+
+ if (format_desc->chroma_planes_swapped &&
+ buffer_desc.num_planes == 3) {
+ FFSWAP(uint32_t, buffer_desc.pitches[1], buffer_desc.pitches[2]);
+ FFSWAP(uint32_t, buffer_desc.offsets[1], buffer_desc.offsets[2]);
+ }
+
+ vas = vaCreateSurfaces(dst_dev->display, format_desc->rt_format,
+ src->width, src->height,
+ &surface_id, 1,
+ buffer_attrs, FF_ARRAY_ELEMS(buffer_attrs));
+ }
+#else
+ buffer_handle = desc->objects[0].fd;
+ buffer_desc.pixel_format = va_fourcc;
+ buffer_desc.width = src_fc->width;
+ buffer_desc.height = src_fc->height;
+ buffer_desc.data_size = desc->objects[0].size;
+ buffer_desc.buffers = &buffer_handle;
+ buffer_desc.num_buffers = 1;
+ buffer_desc.flags = 0;
+
+ k = 0;
+ for (i = 0; i < desc->nb_layers; i++) {
+ for (j = 0; j < desc->layers[i].nb_planes; j++) {
+ buffer_desc.pitches[k] = desc->layers[i].planes[j].pitch;
+ buffer_desc.offsets[k] = desc->layers[i].planes[j].offset;
+ ++k;
+ }
+ }
+ buffer_desc.num_planes = k;
+
+ if (format_desc->chroma_planes_swapped &&
+ buffer_desc.num_planes == 3) {
+ FFSWAP(uint32_t, buffer_desc.pitches[1], buffer_desc.pitches[2]);
+ FFSWAP(uint32_t, buffer_desc.offsets[1], buffer_desc.offsets[2]);
+ }
+
+ vas = vaCreateSurfaces(dst_dev->display, format_desc->rt_format,
+ src->width, src->height,
+ &surface_id, 1,
+ attrs, FF_ARRAY_ELEMS(attrs));
+#endif
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(dst_fc, AV_LOG_ERROR, "Failed to create surface from DRM "
+ "object: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+ av_log(dst_fc, AV_LOG_DEBUG, "Create surface %#x.\n", surface_id);
+
+ err = ff_hwframe_map_create(dst->hw_frames_ctx, dst, src,
+ &vaapi_unmap_from_drm,
+ (void*)(uintptr_t)surface_id);
+ if (err < 0)
+ return err;
+
+ dst->width = src->width;
+ dst->height = src->height;
+ dst->data[3] = (uint8_t*)(uintptr_t)surface_id;
+
+ av_log(dst_fc, AV_LOG_DEBUG, "Mapped DRM object %d to "
+ "surface %#x.\n", desc->objects[0].fd, surface_id);
+
+ return 0;
+}
+
+#if VA_CHECK_VERSION(1, 1, 0)
+static void vaapi_unmap_to_drm_esh(AVHWFramesContext *hwfc,
+ HWMapDescriptor *hwmap)
+{
+ AVDRMFrameDescriptor *drm_desc = hwmap->priv;
+ int i;
+
+ for (i = 0; i < drm_desc->nb_objects; i++)
+ close(drm_desc->objects[i].fd);
+
+ av_freep(&drm_desc);
+}
+
+static int vaapi_map_to_drm_esh(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VASurfaceID surface_id;
+ VAStatus vas;
+ VADRMPRIMESurfaceDescriptor va_desc;
+ AVDRMFrameDescriptor *drm_desc = NULL;
+ uint32_t export_flags;
+ int err, i, j;
+
+ surface_id = (VASurfaceID)(uintptr_t)src->data[3];
+
+ export_flags = VA_EXPORT_SURFACE_SEPARATE_LAYERS;
+ if (flags & AV_HWFRAME_MAP_READ) {
+ export_flags |= VA_EXPORT_SURFACE_READ_ONLY;
+
+ vas = vaSyncSurface(hwctx->display, surface_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to sync surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+ }
+
+ if (flags & AV_HWFRAME_MAP_WRITE)
+ export_flags |= VA_EXPORT_SURFACE_WRITE_ONLY;
+
+ vas = vaExportSurfaceHandle(hwctx->display, surface_id,
+ VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
+ export_flags, &va_desc);
+ if (vas != VA_STATUS_SUCCESS) {
+ if (vas == VA_STATUS_ERROR_UNIMPLEMENTED)
+ return AVERROR(ENOSYS);
+ av_log(hwfc, AV_LOG_ERROR, "Failed to export surface %#x: "
+ "%d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ drm_desc = av_mallocz(sizeof(*drm_desc));
+ if (!drm_desc) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ // By some bizarre coincidence, these structures are very similar...
+ drm_desc->nb_objects = va_desc.num_objects;
+ for (i = 0; i < va_desc.num_objects; i++) {
+ drm_desc->objects[i].fd = va_desc.objects[i].fd;
+ drm_desc->objects[i].size = va_desc.objects[i].size;
+ drm_desc->objects[i].format_modifier =
+ va_desc.objects[i].drm_format_modifier;
+ }
+ drm_desc->nb_layers = va_desc.num_layers;
+ for (i = 0; i < va_desc.num_layers; i++) {
+ drm_desc->layers[i].format = va_desc.layers[i].drm_format;
+ drm_desc->layers[i].nb_planes = va_desc.layers[i].num_planes;
+ for (j = 0; j < va_desc.layers[i].num_planes; j++) {
+ drm_desc->layers[i].planes[j].object_index =
+ va_desc.layers[i].object_index[j];
+ drm_desc->layers[i].planes[j].offset =
+ va_desc.layers[i].offset[j];
+ drm_desc->layers[i].planes[j].pitch =
+ va_desc.layers[i].pitch[j];
+ }
+ }
+
+ err = ff_hwframe_map_create(src->hw_frames_ctx, dst, src,
+ &vaapi_unmap_to_drm_esh, drm_desc);
+ if (err < 0)
+ goto fail;
+
+ dst->width = src->width;
+ dst->height = src->height;
+ dst->data[0] = (uint8_t*)drm_desc;
+
+ return 0;
+
+fail:
+ for (i = 0; i < va_desc.num_objects; i++)
+ close(va_desc.objects[i].fd);
+ av_freep(&drm_desc);
+ return err;
+}
+#endif
+
+#if VA_CHECK_VERSION(0, 36, 0)
+typedef struct VAAPIDRMImageBufferMapping {
+ VAImage image;
+ VABufferInfo buffer_info;
+
+ AVDRMFrameDescriptor drm_desc;
+} VAAPIDRMImageBufferMapping;
+
+static void vaapi_unmap_to_drm_abh(AVHWFramesContext *hwfc,
+ HWMapDescriptor *hwmap)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VAAPIDRMImageBufferMapping *mapping = hwmap->priv;
+ VASurfaceID surface_id;
+ VAStatus vas;
+
+ surface_id = (VASurfaceID)(uintptr_t)hwmap->source->data[3];
+ av_log(hwfc, AV_LOG_DEBUG, "Unmap VAAPI surface %#x from DRM.\n",
+ surface_id);
+
+ // DRM PRIME file descriptors are closed by vaReleaseBufferHandle(),
+ // so we shouldn't close them separately.
+
+ vas = vaReleaseBufferHandle(hwctx->display, mapping->image.buf);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to release buffer "
+ "handle of image %#x (derived from surface %#x): "
+ "%d (%s).\n", mapping->image.buf, surface_id,
+ vas, vaErrorStr(vas));
+ }
+
+ vas = vaDestroyImage(hwctx->display, mapping->image.image_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to destroy image "
+ "derived from surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ }
+
+ av_free(mapping);
+}
+
+static int vaapi_map_to_drm_abh(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VAAPIDRMImageBufferMapping *mapping = NULL;
+ VASurfaceID surface_id;
+ VAStatus vas;
+ int err, i, p;
+
+ surface_id = (VASurfaceID)(uintptr_t)src->data[3];
+ av_log(hwfc, AV_LOG_DEBUG, "Map VAAPI surface %#x to DRM.\n",
+ surface_id);
+
+ mapping = av_mallocz(sizeof(*mapping));
+ if (!mapping)
+ return AVERROR(ENOMEM);
+
+ vas = vaDeriveImage(hwctx->display, surface_id,
+ &mapping->image);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to derive image from "
+ "surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_drm_format_map); i++) {
+ if (vaapi_drm_format_map[i].va_fourcc ==
+ mapping->image.format.fourcc)
+ break;
+ }
+ if (i >= FF_ARRAY_ELEMS(vaapi_drm_format_map)) {
+ av_log(hwfc, AV_LOG_ERROR, "No matching DRM format for "
+ "VAAPI format %#x.\n", mapping->image.format.fourcc);
+ err = AVERROR(EINVAL);
+ goto fail_derived;
+ }
+
+ mapping->buffer_info.mem_type =
+ VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME;
+
+ mapping->drm_desc.nb_layers =
+ vaapi_drm_format_map[i].nb_layer_formats;
+ if (mapping->drm_desc.nb_layers > 1) {
+ if (mapping->drm_desc.nb_layers != mapping->image.num_planes) {
+ av_log(hwfc, AV_LOG_ERROR, "Image properties do not match "
+ "expected format: got %d planes, but expected %d.\n",
+ mapping->image.num_planes, mapping->drm_desc.nb_layers);
+ err = AVERROR(EINVAL);
+ goto fail_derived;
+ }
+
+ for(p = 0; p < mapping->drm_desc.nb_layers; p++) {
+ mapping->drm_desc.layers[p] = (AVDRMLayerDescriptor) {
+ .format = vaapi_drm_format_map[i].layer_formats[p],
+ .nb_planes = 1,
+ .planes[0] = {
+ .object_index = 0,
+ .offset = mapping->image.offsets[p],
+ .pitch = mapping->image.pitches[p],
+ },
+ };
+ }
+ } else {
+ mapping->drm_desc.layers[0].format =
+ vaapi_drm_format_map[i].layer_formats[0];
+ mapping->drm_desc.layers[0].nb_planes = mapping->image.num_planes;
+ for (p = 0; p < mapping->image.num_planes; p++) {
+ mapping->drm_desc.layers[0].planes[p] = (AVDRMPlaneDescriptor) {
+ .object_index = 0,
+ .offset = mapping->image.offsets[p],
+ .pitch = mapping->image.pitches[p],
+ };
+ }
+ }
+
+ vas = vaAcquireBufferHandle(hwctx->display, mapping->image.buf,
+ &mapping->buffer_info);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to get buffer "
+ "handle from image %#x (derived from surface %#x): "
+ "%d (%s).\n", mapping->image.buf, surface_id,
+ vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_derived;
+ }
+
+ av_log(hwfc, AV_LOG_DEBUG, "DRM PRIME fd is %"PRIdPTR".\n",
+ mapping->buffer_info.handle);
+
+ mapping->drm_desc.nb_objects = 1;
+ mapping->drm_desc.objects[0] = (AVDRMObjectDescriptor) {
+ .fd = mapping->buffer_info.handle,
+ .size = mapping->image.data_size,
+ // There is no way to get the format modifier with this API.
+ .format_modifier = DRM_FORMAT_MOD_INVALID,
+ };
+
+ err = ff_hwframe_map_create(src->hw_frames_ctx,
+ dst, src, &vaapi_unmap_to_drm_abh,
+ mapping);
+ if (err < 0)
+ goto fail_mapped;
+
+ dst->data[0] = (uint8_t*)&mapping->drm_desc;
+ dst->width = src->width;
+ dst->height = src->height;
+
+ return 0;
+
+fail_mapped:
+ vaReleaseBufferHandle(hwctx->display, mapping->image.buf);
+fail_derived:
+ vaDestroyImage(hwctx->display, mapping->image.image_id);
+fail:
+ av_freep(&mapping);
+ return err;
+}
+#endif
+
+static int vaapi_map_to_drm(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+#if VA_CHECK_VERSION(1, 1, 0)
+ int err;
+ err = vaapi_map_to_drm_esh(hwfc, dst, src, flags);
+ if (err != AVERROR(ENOSYS))
+ return err;
+#endif
+#if VA_CHECK_VERSION(0, 36, 0)
+ return vaapi_map_to_drm_abh(hwfc, dst, src, flags);
+#endif
+ return AVERROR(ENOSYS);
+}
+
+#endif /* CONFIG_LIBDRM */
+
+static int vaapi_map_to(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ switch (src->format) {
+#if CONFIG_LIBDRM
+ case AV_PIX_FMT_DRM_PRIME:
+ return vaapi_map_from_drm(hwfc, dst, src, flags);
+#endif
+ default:
+ return AVERROR(ENOSYS);
+ }
+}
+
+static int vaapi_map_from(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ switch (dst->format) {
+#if CONFIG_LIBDRM
+ case AV_PIX_FMT_DRM_PRIME:
+ return vaapi_map_to_drm(hwfc, dst, src, flags);
+#endif
+ default:
+ return vaapi_map_to_memory(hwfc, dst, src, flags);
+ }
+}
+
+static void vaapi_device_free(AVHWDeviceContext *ctx)
+{
+ AVVAAPIDeviceContext *hwctx = ctx->hwctx;
+ VAAPIDevicePriv *priv = ctx->user_opaque;
+
+ if (hwctx->display)
+ vaTerminate(hwctx->display);
+
+#if HAVE_VAAPI_X11
+ if (priv->x11_display)
+ XCloseDisplay(priv->x11_display);
+#endif
+
+ if (priv->drm_fd >= 0)
+ close(priv->drm_fd);
+
+ av_freep(&priv);
+}
+
+#if CONFIG_VAAPI_1
+static void vaapi_device_log_error(void *context, const char *message)
+{
+ AVHWDeviceContext *ctx = context;
+
+ av_log(ctx, AV_LOG_ERROR, "libva: %s", message);
+}
+
+static void vaapi_device_log_info(void *context, const char *message)
+{
+ AVHWDeviceContext *ctx = context;
+
+ av_log(ctx, AV_LOG_VERBOSE, "libva: %s", message);
+}
+#endif
+
+static int vaapi_device_connect(AVHWDeviceContext *ctx,
+ VADisplay display)
+{
+ AVVAAPIDeviceContext *hwctx = ctx->hwctx;
+ int major, minor;
+ VAStatus vas;
+
+#if CONFIG_VAAPI_1
+ vaSetErrorCallback(display, &vaapi_device_log_error, ctx);
+ vaSetInfoCallback (display, &vaapi_device_log_info, ctx);
+#endif
+
+ hwctx->display = display;
+
+ vas = vaInitialize(display, &major, &minor);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to initialise VAAPI "
+ "connection: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "Initialised VAAPI connection: "
+ "version %d.%d\n", major, minor);
+
+ return 0;
+}
+
+static int vaapi_device_create(AVHWDeviceContext *ctx, const char *device,
+ AVDictionary *opts, int flags)
+{
+ VAAPIDevicePriv *priv;
+ VADisplay display = NULL;
+ const AVDictionaryEntry *ent;
+ int try_drm, try_x11, try_all;
+
+ priv = av_mallocz(sizeof(*priv));
+ if (!priv)
+ return AVERROR(ENOMEM);
+
+ priv->drm_fd = -1;
+
+ ctx->user_opaque = priv;
+ ctx->free = vaapi_device_free;
+
+ ent = av_dict_get(opts, "connection_type", NULL, 0);
+ if (ent) {
+ try_all = try_drm = try_x11 = 0;
+ if (!strcmp(ent->value, "drm")) {
+ try_drm = 1;
+ } else if (!strcmp(ent->value, "x11")) {
+ try_x11 = 1;
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Invalid connection type %s.\n",
+ ent->value);
+ return AVERROR(EINVAL);
+ }
+ } else {
+ try_all = 1;
+ try_drm = HAVE_VAAPI_DRM;
+ try_x11 = HAVE_VAAPI_X11;
+ }
+
+#if HAVE_VAAPI_DRM
+ while (!display && try_drm) {
+ // If the device is specified, try to open it as a DRM device node.
+ // If not, look for a usable render node, possibly restricted to those
+ // using a specified kernel driver.
+ int loglevel = try_all ? AV_LOG_VERBOSE : AV_LOG_ERROR;
+ if (device) {
+ priv->drm_fd = open(device, O_RDWR);
+ if (priv->drm_fd < 0) {
+ av_log(ctx, loglevel, "Failed to open %s as "
+ "DRM device node.\n", device);
+ break;
+ }
+ } else {
+ char path[64];
+ int n, max_devices = 8;
+#if CONFIG_LIBDRM
+ drmVersion *info;
+ const AVDictionaryEntry *kernel_driver;
+ kernel_driver = av_dict_get(opts, "kernel_driver", NULL, 0);
+#endif
+ for (n = 0; n < max_devices; n++) {
+ snprintf(path, sizeof(path),
+ "/dev/dri/renderD%d", 128 + n);
+ priv->drm_fd = open(path, O_RDWR);
+ if (priv->drm_fd < 0) {
+ av_log(ctx, AV_LOG_VERBOSE, "Cannot open "
+ "DRM render node for device %d.\n", n);
+ break;
+ }
+#if CONFIG_LIBDRM
+ info = drmGetVersion(priv->drm_fd);
+ if (!info) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Failed to get DRM version for device %d.\n", n);
+ close(priv->drm_fd);
+ priv->drm_fd = -1;
+ continue;
+ }
+ if (kernel_driver) {
+ if (strcmp(kernel_driver->value, info->name)) {
+ av_log(ctx, AV_LOG_VERBOSE, "Ignoring device %d "
+ "with non-matching kernel driver (%s).\n",
+ n, info->name);
+ drmFreeVersion(info);
+ close(priv->drm_fd);
+ priv->drm_fd = -1;
+ continue;
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
+ "DRM render node for device %d, "
+ "with matching kernel driver (%s).\n",
+ n, info->name);
+ drmFreeVersion(info);
+ break;
+ // drmGetVersion() ensures |info->name| is 0-terminated.
+ } else if (!strcmp(info->name, "vgem")) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Skipping vgem node for device %d.\n", n);
+ drmFreeVersion(info);
+ close(priv->drm_fd);
+ priv->drm_fd = -1;
+ continue;
+ }
+ drmFreeVersion(info);
+#endif
+ av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
+ "DRM render node for device %d.\n", n);
+ break;
+ }
+ if (n >= max_devices)
+ break;
+ }
+
+ display = vaGetDisplayDRM(priv->drm_fd);
+ if (!display) {
+ av_log(ctx, AV_LOG_VERBOSE, "Cannot open a VA display "
+ "from DRM device %s.\n", device);
+ return AVERROR_EXTERNAL;
+ }
+ break;
+ }
+#endif
+
+#if HAVE_VAAPI_X11
+ if (!display && try_x11) {
+ // Try to open the device as an X11 display.
+ priv->x11_display = XOpenDisplay(device);
+ if (!priv->x11_display) {
+ av_log(ctx, AV_LOG_VERBOSE, "Cannot open X11 display "
+ "%s.\n", XDisplayName(device));
+ } else {
+ display = vaGetDisplay(priv->x11_display);
+ if (!display) {
+ av_log(ctx, AV_LOG_ERROR, "Cannot open a VA display "
+ "from X11 display %s.\n", XDisplayName(device));
+ return AVERROR_UNKNOWN;
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "Opened VA display via "
+ "X11 display %s.\n", XDisplayName(device));
+ }
+ }
+#endif
+
+ if (!display) {
+ if (device)
+ av_log(ctx, AV_LOG_ERROR, "No VA display found for "
+ "device %s.\n", device);
+ else
+ av_log(ctx, AV_LOG_ERROR, "No VA display found for "
+ "any default device.\n");
+ return AVERROR(EINVAL);
+ }
+
+ ent = av_dict_get(opts, "driver", NULL, 0);
+ if (ent) {
+#if VA_CHECK_VERSION(0, 38, 0)
+ VAStatus vas;
+ vas = vaSetDriverName(display, ent->value);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to set driver name to "
+ "%s: %d (%s).\n", ent->value, vas, vaErrorStr(vas));
+ vaTerminate(display);
+ return AVERROR_EXTERNAL;
+ }
+#else
+ av_log(ctx, AV_LOG_WARNING, "Driver name setting is not "
+ "supported with this VAAPI version.\n");
+#endif
+ }
+
+ return vaapi_device_connect(ctx, display);
+}
+
+static int vaapi_device_derive(AVHWDeviceContext *ctx,
+ AVHWDeviceContext *src_ctx,
+ AVDictionary *opts, int flags)
+{
+#if HAVE_VAAPI_DRM
+ if (src_ctx->type == AV_HWDEVICE_TYPE_DRM) {
+ AVDRMDeviceContext *src_hwctx = src_ctx->hwctx;
+ VADisplay *display;
+ VAAPIDevicePriv *priv;
+ int fd;
+
+ if (src_hwctx->fd < 0) {
+ av_log(ctx, AV_LOG_ERROR, "DRM instance requires an associated "
+ "device to derive a VA display from.\n");
+ return AVERROR(EINVAL);
+ }
+
+#if CONFIG_LIBDRM
+ {
+ int node_type = drmGetNodeTypeFromFd(src_hwctx->fd);
+ char *render_node;
+ if (node_type < 0) {
+ av_log(ctx, AV_LOG_ERROR, "DRM instance fd does not appear "
+ "to refer to a DRM device.\n");
+ return AVERROR(EINVAL);
+ }
+ if (node_type == DRM_NODE_RENDER) {
+ fd = src_hwctx->fd;
+ } else {
+ render_node = drmGetRenderDeviceNameFromFd(src_hwctx->fd);
+ if (!render_node) {
+ av_log(ctx, AV_LOG_VERBOSE, "Using non-render node "
+ "because the device does not have an "
+ "associated render node.\n");
+ fd = src_hwctx->fd;
+ } else {
+ fd = open(render_node, O_RDWR);
+ if (fd < 0) {
+ av_log(ctx, AV_LOG_VERBOSE, "Using non-render node "
+ "because the associated render node "
+ "could not be opened.\n");
+ fd = src_hwctx->fd;
+ } else {
+ av_log(ctx, AV_LOG_VERBOSE, "Using render node %s "
+ "in place of non-render DRM device.\n",
+ render_node);
+ }
+ free(render_node);
+ }
+ }
+ }
+#else
+ fd = src_hwctx->fd;
+#endif
+
+ priv = av_mallocz(sizeof(*priv));
+ if (!priv) {
+ if (fd != src_hwctx->fd) {
+ // The fd was opened in this function.
+ close(fd);
+ }
+ return AVERROR(ENOMEM);
+ }
+
+ if (fd == src_hwctx->fd) {
+ // The fd is inherited from the source context and we are holding
+ // a reference to that, we don't want to close it from here.
+ priv->drm_fd = -1;
+ } else {
+ priv->drm_fd = fd;
+ }
+
+ ctx->user_opaque = priv;
+ ctx->free = &vaapi_device_free;
+
+ display = vaGetDisplayDRM(fd);
+ if (!display) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to open a VA display from "
+ "DRM device.\n");
+ return AVERROR(EIO);
+ }
+
+ return vaapi_device_connect(ctx, display);
+ }
+#endif
+ return AVERROR(ENOSYS);
+}
+
+const HWContextType ff_hwcontext_type_vaapi = {
+ .type = AV_HWDEVICE_TYPE_VAAPI,
+ .name = "VAAPI",
+
+ .device_hwctx_size = sizeof(AVVAAPIDeviceContext),
+ .device_priv_size = sizeof(VAAPIDeviceContext),
+ .device_hwconfig_size = sizeof(AVVAAPIHWConfig),
+ .frames_hwctx_size = sizeof(AVVAAPIFramesContext),
+ .frames_priv_size = sizeof(VAAPIFramesContext),
+
+ .device_create = &vaapi_device_create,
+ .device_derive = &vaapi_device_derive,
+ .device_init = &vaapi_device_init,
+ .device_uninit = &vaapi_device_uninit,
+ .frames_get_constraints = &vaapi_frames_get_constraints,
+ .frames_init = &vaapi_frames_init,
+ .frames_uninit = &vaapi_frames_uninit,
+ .frames_get_buffer = &vaapi_get_buffer,
+ .transfer_get_formats = &vaapi_transfer_get_formats,
+ .transfer_data_to = &vaapi_transfer_data_to,
+ .transfer_data_from = &vaapi_transfer_data_from,
+ .map_to = &vaapi_map_to,
+ .map_from = &vaapi_map_from,
+
+ .pix_fmts = (const enum AVPixelFormat[]) {
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_NONE
+ },
+};
diff --git a/media/ffvpx/libavutil/hwcontext_vaapi.h b/media/ffvpx/libavutil/hwcontext_vaapi.h
new file mode 100644
index 0000000000..0b2e071cb3
--- /dev/null
+++ b/media/ffvpx/libavutil/hwcontext_vaapi.h
@@ -0,0 +1,117 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_VAAPI_H
+#define AVUTIL_HWCONTEXT_VAAPI_H
+
+#include <va/va.h>
+
+/**
+ * @file
+ * API-specific header for AV_HWDEVICE_TYPE_VAAPI.
+ *
+ * Dynamic frame pools are supported, but note that any pool used as a render
+ * target is required to be of fixed size in order to be be usable as an
+ * argument to vaCreateContext().
+ *
+ * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs
+ * with the data pointer set to a VASurfaceID.
+ */
+
+enum {
+ /**
+ * The quirks field has been set by the user and should not be detected
+ * automatically by av_hwdevice_ctx_init().
+ */
+ AV_VAAPI_DRIVER_QUIRK_USER_SET = (1 << 0),
+ /**
+ * The driver does not destroy parameter buffers when they are used by
+ * vaRenderPicture(). Additional code will be required to destroy them
+ * separately afterwards.
+ */
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS = (1 << 1),
+
+ /**
+ * The driver does not support the VASurfaceAttribMemoryType attribute,
+ * so the surface allocation code will not try to use it.
+ */
+ AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE = (1 << 2),
+
+ /**
+ * The driver does not support surface attributes at all.
+ * The surface allocation code will never pass them to surface allocation,
+ * and the results of the vaQuerySurfaceAttributes() call will be faked.
+ */
+ AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES = (1 << 3),
+};
+
+/**
+ * VAAPI connection details.
+ *
+ * Allocated as AVHWDeviceContext.hwctx
+ */
+typedef struct AVVAAPIDeviceContext {
+ /**
+ * The VADisplay handle, to be filled by the user.
+ */
+ VADisplay display;
+ /**
+ * Driver quirks to apply - this is filled by av_hwdevice_ctx_init(),
+ * with reference to a table of known drivers, unless the
+ * AV_VAAPI_DRIVER_QUIRK_USER_SET bit is already present. The user
+ * may need to refer to this field when performing any later
+ * operations using VAAPI with the same VADisplay.
+ */
+ unsigned int driver_quirks;
+} AVVAAPIDeviceContext;
+
+/**
+ * VAAPI-specific data associated with a frame pool.
+ *
+ * Allocated as AVHWFramesContext.hwctx.
+ */
+typedef struct AVVAAPIFramesContext {
+ /**
+ * Set by the user to apply surface attributes to all surfaces in
+ * the frame pool. If null, default settings are used.
+ */
+ VASurfaceAttrib *attributes;
+ int nb_attributes;
+ /**
+ * The surfaces IDs of all surfaces in the pool after creation.
+ * Only valid if AVHWFramesContext.initial_pool_size was positive.
+ * These are intended to be used as the render_targets arguments to
+ * vaCreateContext().
+ */
+ VASurfaceID *surface_ids;
+ int nb_surfaces;
+} AVVAAPIFramesContext;
+
+/**
+ * VAAPI hardware pipeline configuration details.
+ *
+ * Allocated with av_hwdevice_hwconfig_alloc().
+ */
+typedef struct AVVAAPIHWConfig {
+ /**
+ * ID of a VAAPI pipeline configuration.
+ */
+ VAConfigID config_id;
+} AVVAAPIHWConfig;
+
+#endif /* AVUTIL_HWCONTEXT_VAAPI_H */
diff --git a/media/ffvpx/libavutil/imgutils.c b/media/ffvpx/libavutil/imgutils.c
new file mode 100644
index 0000000000..9ab5757cf6
--- /dev/null
+++ b/media/ffvpx/libavutil/imgutils.c
@@ -0,0 +1,692 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * misc image utilities
+ */
+
+#include "avassert.h"
+#include "common.h"
+#include "imgutils.h"
+#include "imgutils_internal.h"
+#include "internal.h"
+#include "intreadwrite.h"
+#include "log.h"
+#include "mathematics.h"
+#include "pixdesc.h"
+#include "rational.h"
+
+void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],
+ const AVPixFmtDescriptor *pixdesc)
+{
+ int i;
+ memset(max_pixsteps, 0, 4*sizeof(max_pixsteps[0]));
+ if (max_pixstep_comps)
+ memset(max_pixstep_comps, 0, 4*sizeof(max_pixstep_comps[0]));
+
+ for (i = 0; i < 4; i++) {
+ const AVComponentDescriptor *comp = &(pixdesc->comp[i]);
+ if (comp->step > max_pixsteps[comp->plane]) {
+ max_pixsteps[comp->plane] = comp->step;
+ if (max_pixstep_comps)
+ max_pixstep_comps[comp->plane] = i;
+ }
+ }
+}
+
+static inline
+int image_get_linesize(int width, int plane,
+ int max_step, int max_step_comp,
+ const AVPixFmtDescriptor *desc)
+{
+ int s, shifted_w, linesize;
+
+ if (!desc)
+ return AVERROR(EINVAL);
+
+ if (width < 0)
+ return AVERROR(EINVAL);
+ s = (max_step_comp == 1 || max_step_comp == 2) ? desc->log2_chroma_w : 0;
+ shifted_w = ((width + (1 << s) - 1)) >> s;
+ if (shifted_w && max_step > INT_MAX / shifted_w)
+ return AVERROR(EINVAL);
+ linesize = max_step * shifted_w;
+
+ if (desc->flags & AV_PIX_FMT_FLAG_BITSTREAM)
+ linesize = (linesize + 7) >> 3;
+ return linesize;
+}
+
+int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int max_step [4]; /* max pixel step for each plane */
+ int max_step_comp[4]; /* the component for each plane which has the max pixel step */
+
+ if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
+ return AVERROR(EINVAL);
+
+ av_image_fill_max_pixsteps(max_step, max_step_comp, desc);
+ return image_get_linesize(width, plane, max_step[plane], max_step_comp[plane], desc);
+}
+
+int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
+{
+ int i, ret;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int max_step [4]; /* max pixel step for each plane */
+ int max_step_comp[4]; /* the component for each plane which has the max pixel step */
+
+ memset(linesizes, 0, 4*sizeof(linesizes[0]));
+
+ if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
+ return AVERROR(EINVAL);
+
+ av_image_fill_max_pixsteps(max_step, max_step_comp, desc);
+ for (i = 0; i < 4; i++) {
+ if ((ret = image_get_linesize(width, i, max_step[i], max_step_comp[i], desc)) < 0)
+ return ret;
+ linesizes[i] = ret;
+ }
+
+ return 0;
+}
+
+int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt,
+ int height, const ptrdiff_t linesizes[4])
+{
+ int i, has_plane[4] = { 0 };
+
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ memset(sizes , 0, sizeof(sizes[0])*4);
+
+ if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
+ return AVERROR(EINVAL);
+
+ if (linesizes[0] > SIZE_MAX / height)
+ return AVERROR(EINVAL);
+ sizes[0] = linesizes[0] * (size_t)height;
+
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
+ sizes[1] = 256 * 4; /* palette is stored here as 256 32 bits words */
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++)
+ has_plane[desc->comp[i].plane] = 1;
+
+ for (i = 1; i < 4 && has_plane[i]; i++) {
+ int h, s = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
+ h = (height + (1 << s) - 1) >> s;
+ if (linesizes[i] > SIZE_MAX / h)
+ return AVERROR(EINVAL);
+ sizes[i] = (size_t)h * linesizes[i];
+ }
+
+ return 0;
+}
+
+int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,
+ uint8_t *ptr, const int linesizes[4])
+{
+ int i, ret;
+ ptrdiff_t linesizes1[4];
+ size_t sizes[4];
+
+ memset(data , 0, sizeof(data[0])*4);
+
+ for (i = 0; i < 4; i++)
+ linesizes1[i] = linesizes[i];
+
+ ret = av_image_fill_plane_sizes(sizes, pix_fmt, height, linesizes1);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+ for (i = 0; i < 4; i++) {
+ if (sizes[i] > INT_MAX - ret)
+ return AVERROR(EINVAL);
+ ret += sizes[i];
+ }
+
+ if (!ptr)
+ return ret;
+
+ data[0] = ptr;
+ for (i = 1; i < 4 && sizes[i]; i++)
+ data[i] = data[i - 1] + sizes[i - 1];
+
+ return ret;
+}
+
+int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
+{
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ int r, g, b;
+
+ switch (pix_fmt) {
+ case AV_PIX_FMT_RGB8:
+ r = (i>>5 )*36;
+ g = ((i>>2)&7)*36;
+ b = (i&3 )*85;
+ break;
+ case AV_PIX_FMT_BGR8:
+ b = (i>>6 )*85;
+ g = ((i>>3)&7)*36;
+ r = (i&7 )*36;
+ break;
+ case AV_PIX_FMT_RGB4_BYTE:
+ r = (i>>3 )*255;
+ g = ((i>>1)&3)*85;
+ b = (i&1 )*255;
+ break;
+ case AV_PIX_FMT_BGR4_BYTE:
+ b = (i>>3 )*255;
+ g = ((i>>1)&3)*85;
+ r = (i&1 )*255;
+ break;
+ case AV_PIX_FMT_GRAY8:
+ r = b = g = i;
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+ pal[i] = b + (g << 8) + (r << 16) + (0xFFU << 24);
+ }
+
+ return 0;
+}
+
+int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
+ int w, int h, enum AVPixelFormat pix_fmt, int align)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int i, ret;
+ ptrdiff_t linesizes1[4];
+ size_t total_size, sizes[4];
+ uint8_t *buf;
+
+ if (!desc)
+ return AVERROR(EINVAL);
+
+ if ((ret = av_image_check_size(w, h, 0, NULL)) < 0)
+ return ret;
+ if ((ret = av_image_fill_linesizes(linesizes, pix_fmt, align>7 ? FFALIGN(w, 8) : w)) < 0)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ linesizes[i] = FFALIGN(linesizes[i], align);
+ linesizes1[i] = linesizes[i];
+ }
+
+ if ((ret = av_image_fill_plane_sizes(sizes, pix_fmt, h, linesizes1)) < 0)
+ return ret;
+ total_size = align;
+ for (i = 0; i < 4; i++) {
+ if (total_size > SIZE_MAX - sizes[i])
+ return AVERROR(EINVAL);
+ total_size += sizes[i];
+ }
+ buf = av_malloc(total_size);
+ if (!buf)
+ return AVERROR(ENOMEM);
+ if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, buf, linesizes)) < 0) {
+ av_free(buf);
+ return ret;
+ }
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
+ avpriv_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt);
+ if (align < 4) {
+ av_log(NULL, AV_LOG_ERROR, "Formats with a palette require a minimum alignment of 4\n");
+ av_free(buf);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL && pointers[1] &&
+ pointers[1] - pointers[0] > linesizes[0] * h) {
+ /* zero-initialize the padding before the palette */
+ memset(pointers[0] + linesizes[0] * h, 0,
+ pointers[1] - pointers[0] - linesizes[0] * h);
+ }
+
+ return ret;
+}
+
+typedef struct ImgUtils {
+ const AVClass *class;
+ int log_offset;
+ void *log_ctx;
+} ImgUtils;
+
+static const AVClass imgutils_class = {
+ .class_name = "IMGUTILS",
+ .item_name = av_default_item_name,
+ .option = NULL,
+ .version = LIBAVUTIL_VERSION_INT,
+ .log_level_offset_offset = offsetof(ImgUtils, log_offset),
+ .parent_log_context_offset = offsetof(ImgUtils, log_ctx),
+};
+
+int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
+{
+ ImgUtils imgutils = {
+ .class = &imgutils_class,
+ .log_offset = log_offset,
+ .log_ctx = log_ctx,
+ };
+ int64_t stride = av_image_get_linesize(pix_fmt, w, 0);
+ if (stride <= 0)
+ stride = 8LL*w;
+ stride += 128*8;
+
+ if ((int)w<=0 || (int)h<=0 || stride >= INT_MAX || stride*(uint64_t)(h+128) >= INT_MAX) {
+ av_log(&imgutils, AV_LOG_ERROR, "Picture size %ux%u is invalid\n", w, h);
+ return AVERROR(EINVAL);
+ }
+
+ if (max_pixels < INT64_MAX) {
+ if (w*(int64_t)h > max_pixels) {
+ av_log(&imgutils, AV_LOG_ERROR,
+ "Picture size %ux%u exceeds specified max pixel count %"PRId64", see the documentation if you wish to increase it\n",
+ w, h, max_pixels);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
+{
+ return av_image_check_size2(w, h, INT64_MAX, AV_PIX_FMT_NONE, log_offset, log_ctx);
+}
+
+int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
+{
+ int64_t scaled_dim;
+
+ if (sar.den <= 0 || sar.num < 0)
+ return AVERROR(EINVAL);
+
+ if (!sar.num || sar.num == sar.den)
+ return 0;
+
+ if (sar.num < sar.den)
+ scaled_dim = av_rescale_rnd(w, sar.num, sar.den, AV_ROUND_ZERO);
+ else
+ scaled_dim = av_rescale_rnd(h, sar.den, sar.num, AV_ROUND_ZERO);
+
+ if (scaled_dim > 0)
+ return 0;
+
+ return AVERROR(EINVAL);
+}
+
+static void image_copy_plane(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ ptrdiff_t bytewidth, int height)
+{
+ if (!dst || !src)
+ return;
+ av_assert0(FFABS(src_linesize) >= bytewidth);
+ av_assert0(FFABS(dst_linesize) >= bytewidth);
+ for (;height > 0; height--) {
+ memcpy(dst, src, bytewidth);
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+void av_image_copy_plane_uc_from(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ ptrdiff_t bytewidth, int height)
+{
+ int ret = -1;
+
+#if ARCH_X86
+ ret = ff_image_copy_plane_uc_from_x86(dst, dst_linesize, src, src_linesize,
+ bytewidth, height);
+#endif
+
+ if (ret < 0)
+ image_copy_plane(dst, dst_linesize, src, src_linesize, bytewidth, height);
+}
+
+void av_image_copy_plane(uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int bytewidth, int height)
+{
+ image_copy_plane(dst, dst_linesize, src, src_linesize, bytewidth, height);
+}
+
+static void image_copy(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4],
+ const uint8_t *src_data[4], const ptrdiff_t src_linesizes[4],
+ enum AVPixelFormat pix_fmt, int width, int height,
+ void (*copy_plane)(uint8_t *, ptrdiff_t, const uint8_t *,
+ ptrdiff_t, ptrdiff_t, int))
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+
+ if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
+ return;
+
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
+ copy_plane(dst_data[0], dst_linesizes[0],
+ src_data[0], src_linesizes[0],
+ width, height);
+ /* copy the palette */
+ if ((desc->flags & AV_PIX_FMT_FLAG_PAL) || (dst_data[1] && src_data[1]))
+ memcpy(dst_data[1], src_data[1], 4*256);
+ } else {
+ int i, planes_nb = 0;
+
+ for (i = 0; i < desc->nb_components; i++)
+ planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
+
+ for (i = 0; i < planes_nb; i++) {
+ int h = height;
+ ptrdiff_t bwidth = av_image_get_linesize(pix_fmt, width, i);
+ if (bwidth < 0) {
+ av_log(NULL, AV_LOG_ERROR, "av_image_get_linesize failed\n");
+ return;
+ }
+ if (i == 1 || i == 2) {
+ h = AV_CEIL_RSHIFT(height, desc->log2_chroma_h);
+ }
+ copy_plane(dst_data[i], dst_linesizes[i],
+ src_data[i], src_linesizes[i],
+ bwidth, h);
+ }
+ }
+}
+
+void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
+ const uint8_t *src_data[4], const int src_linesizes[4],
+ enum AVPixelFormat pix_fmt, int width, int height)
+{
+ ptrdiff_t dst_linesizes1[4], src_linesizes1[4];
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ dst_linesizes1[i] = dst_linesizes[i];
+ src_linesizes1[i] = src_linesizes[i];
+ }
+
+ image_copy(dst_data, dst_linesizes1, src_data, src_linesizes1, pix_fmt,
+ width, height, image_copy_plane);
+}
+
+void av_image_copy_uc_from(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4],
+ const uint8_t *src_data[4], const ptrdiff_t src_linesizes[4],
+ enum AVPixelFormat pix_fmt, int width, int height)
+{
+ image_copy(dst_data, dst_linesizes, src_data, src_linesizes, pix_fmt,
+ width, height, av_image_copy_plane_uc_from);
+}
+
+int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],
+ const uint8_t *src, enum AVPixelFormat pix_fmt,
+ int width, int height, int align)
+{
+ int ret, i;
+
+ ret = av_image_check_size(width, height, 0, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = av_image_fill_linesizes(dst_linesize, pix_fmt, width);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 4; i++)
+ dst_linesize[i] = FFALIGN(dst_linesize[i], align);
+
+ return av_image_fill_pointers(dst_data, pix_fmt, height, (uint8_t *)src, dst_linesize);
+}
+
+int av_image_get_buffer_size(enum AVPixelFormat pix_fmt,
+ int width, int height, int align)
+{
+ int ret, i;
+ int linesize[4];
+ ptrdiff_t aligned_linesize[4];
+ size_t sizes[4];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ if (!desc)
+ return AVERROR(EINVAL);
+
+ ret = av_image_check_size(width, height, 0, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = av_image_fill_linesizes(linesize, pix_fmt, width);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 4; i++)
+ aligned_linesize[i] = FFALIGN(linesize[i], align);
+
+ ret = av_image_fill_plane_sizes(sizes, pix_fmt, height, aligned_linesize);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+ for (i = 0; i < 4; i++) {
+ if (sizes[i] > INT_MAX - ret)
+ return AVERROR(EINVAL);
+ ret += sizes[i];
+ }
+ return ret;
+}
+
+int av_image_copy_to_buffer(uint8_t *dst, int dst_size,
+ const uint8_t * const src_data[4],
+ const int src_linesize[4],
+ enum AVPixelFormat pix_fmt,
+ int width, int height, int align)
+{
+ int i, j, nb_planes = 0, linesize[4];
+ int size = av_image_get_buffer_size(pix_fmt, width, height, align);
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int ret;
+
+ if (size > dst_size || size < 0 || !desc)
+ return AVERROR(EINVAL);
+
+ for (i = 0; i < desc->nb_components; i++)
+ nb_planes = FFMAX(desc->comp[i].plane, nb_planes);
+
+ nb_planes++;
+
+ ret = av_image_fill_linesizes(linesize, pix_fmt, width);
+ av_assert0(ret >= 0); // was checked previously
+
+ for (i = 0; i < nb_planes; i++) {
+ int h, shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
+ const uint8_t *src = src_data[i];
+ h = (height + (1 << shift) - 1) >> shift;
+
+ for (j = 0; j < h; j++) {
+ memcpy(dst, src, linesize[i]);
+ dst += FFALIGN(linesize[i], align);
+ src += src_linesize[i];
+ }
+ }
+
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
+ uint32_t *d32 = (uint32_t *)dst;
+
+ for (i = 0; i<256; i++)
+ AV_WL32(d32 + i, AV_RN32(src_data[1] + 4*i));
+ }
+
+ return size;
+}
+
+// Fill dst[0..dst_size] with the bytes in clear[0..clear_size]. The clear
+// bytes are repeated until dst_size is reached. If dst_size is unaligned (i.e.
+// dst_size%clear_size!=0), the remaining data will be filled with the beginning
+// of the clear data only.
+static void memset_bytes(uint8_t *dst, size_t dst_size, uint8_t *clear,
+ size_t clear_size)
+{
+ int same = 1;
+ int i;
+
+ if (!clear_size)
+ return;
+
+ // Reduce to memset() if possible.
+ for (i = 0; i < clear_size; i++) {
+ if (clear[i] != clear[0]) {
+ same = 0;
+ break;
+ }
+ }
+ if (same)
+ clear_size = 1;
+
+ if (clear_size == 1) {
+ memset(dst, clear[0], dst_size);
+ } else {
+ if (clear_size > dst_size)
+ clear_size = dst_size;
+ memcpy(dst, clear, clear_size);
+ av_memcpy_backptr(dst + clear_size, clear_size, dst_size - clear_size);
+ }
+}
+
+// Maximum size in bytes of a plane element (usually a pixel, or multiple pixels
+// if it's a subsampled packed format).
+#define MAX_BLOCK_SIZE 32
+
+int av_image_fill_black(uint8_t *dst_data[4], const ptrdiff_t dst_linesize[4],
+ enum AVPixelFormat pix_fmt, enum AVColorRange range,
+ int width, int height)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int nb_planes = av_pix_fmt_count_planes(pix_fmt);
+ // A pixel or a group of pixels on each plane, with a value that represents black.
+ // Consider e.g. AV_PIX_FMT_UYVY422 for non-trivial cases.
+ uint8_t clear_block[4][MAX_BLOCK_SIZE] = {{0}}; // clear padding with 0
+ int clear_block_size[4] = {0};
+ ptrdiff_t plane_line_bytes[4] = {0};
+ int rgb, limited;
+ int plane, c;
+
+ if (!desc || nb_planes < 1 || nb_planes > 4 || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
+ return AVERROR(EINVAL);
+
+ rgb = !!(desc->flags & AV_PIX_FMT_FLAG_RGB);
+ limited = !rgb && range != AVCOL_RANGE_JPEG;
+
+ if (desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) {
+ ptrdiff_t bytewidth = av_image_get_linesize(pix_fmt, width, 0);
+ uint8_t *data;
+ int mono = pix_fmt == AV_PIX_FMT_MONOWHITE || pix_fmt == AV_PIX_FMT_MONOBLACK;
+ int fill = pix_fmt == AV_PIX_FMT_MONOWHITE ? 0xFF : 0;
+ if (nb_planes != 1 || !(rgb || mono) || bytewidth < 1)
+ return AVERROR(EINVAL);
+
+ if (!dst_data)
+ return 0;
+
+ data = dst_data[0];
+
+ // (Bitstream + alpha will be handled incorrectly - it'll remain transparent.)
+ for (;height > 0; height--) {
+ memset(data, fill, bytewidth);
+ data += dst_linesize[0];
+ }
+ return 0;
+ }
+
+ for (c = 0; c < desc->nb_components; c++) {
+ const AVComponentDescriptor comp = desc->comp[c];
+
+ // We try to operate on entire non-subsampled pixel groups (for
+ // AV_PIX_FMT_UYVY422 this would mean two consecutive pixels).
+ clear_block_size[comp.plane] = FFMAX(clear_block_size[comp.plane], comp.step);
+
+ if (clear_block_size[comp.plane] > MAX_BLOCK_SIZE)
+ return AVERROR(EINVAL);
+ }
+
+ // Create a byte array for clearing 1 pixel (sometimes several pixels).
+ for (c = 0; c < desc->nb_components; c++) {
+ const AVComponentDescriptor comp = desc->comp[c];
+ // (Multiple pixels happen e.g. with AV_PIX_FMT_UYVY422.)
+ int w = clear_block_size[comp.plane] / comp.step;
+ uint8_t *c_data[4];
+ const int c_linesize[4] = {0};
+ uint16_t src_array[MAX_BLOCK_SIZE];
+ uint16_t src = 0;
+ int x;
+
+ if (comp.depth > 16)
+ return AVERROR(EINVAL);
+ if (!rgb && comp.depth < 8)
+ return AVERROR(EINVAL);
+ if (w < 1)
+ return AVERROR(EINVAL);
+
+ if (c == 0 && limited) {
+ src = 16 << (comp.depth - 8);
+ } else if ((c == 1 || c == 2) && !rgb) {
+ src = 128 << (comp.depth - 8);
+ } else if (c == 3) {
+ // (Assume even limited YUV uses full range alpha.)
+ src = (1 << comp.depth) - 1;
+ }
+
+ for (x = 0; x < w; x++)
+ src_array[x] = src;
+
+ for (x = 0; x < 4; x++)
+ c_data[x] = &clear_block[x][0];
+
+ av_write_image_line(src_array, c_data, c_linesize, desc, 0, 0, c, w);
+ }
+
+ for (plane = 0; plane < nb_planes; plane++) {
+ plane_line_bytes[plane] = av_image_get_linesize(pix_fmt, width, plane);
+ if (plane_line_bytes[plane] < 0)
+ return AVERROR(EINVAL);
+ }
+
+ if (!dst_data)
+ return 0;
+
+ for (plane = 0; plane < nb_planes; plane++) {
+ size_t bytewidth = plane_line_bytes[plane];
+ uint8_t *data = dst_data[plane];
+ int chroma_div = plane == 1 || plane == 2 ? desc->log2_chroma_h : 0;
+ int plane_h = ((height + ( 1 << chroma_div) - 1)) >> chroma_div;
+
+ for (; plane_h > 0; plane_h--) {
+ memset_bytes(data, bytewidth, &clear_block[plane][0], clear_block_size[plane]);
+ data += dst_linesize[plane];
+ }
+ }
+
+ return 0;
+}
diff --git a/media/ffvpx/libavutil/imgutils.h b/media/ffvpx/libavutil/imgutils.h
new file mode 100644
index 0000000000..e10ac14952
--- /dev/null
+++ b/media/ffvpx/libavutil/imgutils.h
@@ -0,0 +1,331 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_IMGUTILS_H
+#define AVUTIL_IMGUTILS_H
+
+/**
+ * @file
+ * misc image utilities
+ *
+ * @addtogroup lavu_picture
+ * @{
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include "pixdesc.h"
+#include "pixfmt.h"
+#include "rational.h"
+
+/**
+ * Compute the max pixel step for each plane of an image with a
+ * format described by pixdesc.
+ *
+ * The pixel step is the distance in bytes between the first byte of
+ * the group of bytes which describe a pixel component and the first
+ * byte of the successive group in the same plane for the same
+ * component.
+ *
+ * @param max_pixsteps an array which is filled with the max pixel step
+ * for each plane. Since a plane may contain different pixel
+ * components, the computed max_pixsteps[plane] is relative to the
+ * component in the plane with the max pixel step.
+ * @param max_pixstep_comps an array which is filled with the component
+ * for each plane which has the max pixel step. May be NULL.
+ * @param pixdesc the AVPixFmtDescriptor for the image, describing its format
+ */
+void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],
+ const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Compute the size of an image line with format pix_fmt and width
+ * width for the plane plane.
+ *
+ * @return the computed size in bytes
+ */
+int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane);
+
+/**
+ * Fill plane linesizes for an image with pixel format pix_fmt and
+ * width width.
+ *
+ * @param linesizes array to be filled with the linesize for each plane
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param width width of the image in pixels
+ * @return >= 0 in case of success, a negative error code otherwise
+ */
+int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width);
+
+/**
+ * Fill plane sizes for an image with pixel format pix_fmt and height height.
+ *
+ * @param size the array to be filled with the size of each image plane
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param height height of the image in pixels
+ * @param linesizes the array containing the linesize for each
+ * plane, should be filled by av_image_fill_linesizes()
+ * @return >= 0 in case of success, a negative error code otherwise
+ *
+ * @note The linesize parameters have the type ptrdiff_t here, while they are
+ * int for av_image_fill_linesizes().
+ */
+int av_image_fill_plane_sizes(size_t size[4], enum AVPixelFormat pix_fmt,
+ int height, const ptrdiff_t linesizes[4]);
+
+/**
+ * Fill plane data pointers for an image with pixel format pix_fmt and
+ * height height.
+ *
+ * @param data pointers array to be filled with the pointer for each image plane
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param height height of the image in pixels
+ * @param ptr the pointer to a buffer which will contain the image
+ * @param linesizes the array containing the linesize for each
+ * plane, should be filled by av_image_fill_linesizes()
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,
+ uint8_t *ptr, const int linesizes[4]);
+
+/**
+ * Allocate an image with size w and h and pixel format pix_fmt, and
+ * fill pointers and linesizes accordingly.
+ * The allocated image buffer has to be freed by using
+ * av_freep(&pointers[0]).
+ *
+ * @param pointers array to be filled with the pointer for each image plane
+ * @param linesizes the array filled with the linesize for each plane
+ * @param w width of the image in pixels
+ * @param h height of the image in pixels
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param align the value to use for buffer size alignment
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
+ int w, int h, enum AVPixelFormat pix_fmt, int align);
+
+/**
+ * Copy image plane from src to dst.
+ * That is, copy "height" number of lines of "bytewidth" bytes each.
+ * The first byte of each successive line is separated by *_linesize
+ * bytes.
+ *
+ * bytewidth must be contained by both absolute values of dst_linesize
+ * and src_linesize, otherwise the function behavior is undefined.
+ *
+ * @param dst destination plane to copy to
+ * @param dst_linesize linesize for the image plane in dst
+ * @param src source plane to copy from
+ * @param src_linesize linesize for the image plane in src
+ * @param height height (number of lines) of the plane
+ */
+void av_image_copy_plane(uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int bytewidth, int height);
+
+/**
+ * Copy image data located in uncacheable (e.g. GPU mapped) memory. Where
+ * available, this function will use special functionality for reading from such
+ * memory, which may result in greatly improved performance compared to plain
+ * av_image_copy_plane().
+ *
+ * bytewidth must be contained by both absolute values of dst_linesize
+ * and src_linesize, otherwise the function behavior is undefined.
+ *
+ * @note The linesize parameters have the type ptrdiff_t here, while they are
+ * int for av_image_copy_plane().
+ * @note On x86, the linesizes currently need to be aligned to the cacheline
+ * size (i.e. 64) to get improved performance.
+ */
+void av_image_copy_plane_uc_from(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ ptrdiff_t bytewidth, int height);
+
+/**
+ * Copy image in src_data to dst_data.
+ *
+ * @param dst_data destination image data buffer to copy to
+ * @param dst_linesizes linesizes for the image in dst_data
+ * @param src_data source image data buffer to copy from
+ * @param src_linesizes linesizes for the image in src_data
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param width width of the image in pixels
+ * @param height height of the image in pixels
+ */
+void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
+ const uint8_t *src_data[4], const int src_linesizes[4],
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Copy image data located in uncacheable (e.g. GPU mapped) memory. Where
+ * available, this function will use special functionality for reading from such
+ * memory, which may result in greatly improved performance compared to plain
+ * av_image_copy().
+ *
+ * The data pointers and the linesizes must be aligned to the maximum required
+ * by the CPU architecture.
+ *
+ * @note The linesize parameters have the type ptrdiff_t here, while they are
+ * int for av_image_copy().
+ * @note On x86, the linesizes currently need to be aligned to the cacheline
+ * size (i.e. 64) to get improved performance.
+ */
+void av_image_copy_uc_from(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4],
+ const uint8_t *src_data[4], const ptrdiff_t src_linesizes[4],
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Setup the data pointers and linesizes based on the specified image
+ * parameters and the provided array.
+ *
+ * The fields of the given image are filled in by using the src
+ * address which points to the image data buffer. Depending on the
+ * specified pixel format, one or multiple image data pointers and
+ * line sizes will be set. If a planar format is specified, several
+ * pointers will be set pointing to the different picture planes and
+ * the line sizes of the different planes will be stored in the
+ * lines_sizes array. Call with src == NULL to get the required
+ * size for the src buffer.
+ *
+ * To allocate the buffer and fill in the dst_data and dst_linesize in
+ * one call, use av_image_alloc().
+ *
+ * @param dst_data data pointers to be filled in
+ * @param dst_linesize linesizes for the image in dst_data to be filled in
+ * @param src buffer which will contain or contains the actual image data, can be NULL
+ * @param pix_fmt the pixel format of the image
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @param align the value used in src for linesize alignment
+ * @return the size in bytes required for src, a negative error code
+ * in case of failure
+ */
+int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],
+ const uint8_t *src,
+ enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Return the size in bytes of the amount of data required to store an
+ * image with the given parameters.
+ *
+ * @param pix_fmt the pixel format of the image
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @param align the assumed linesize alignment
+ * @return the buffer size in bytes, a negative error code in case of failure
+ */
+int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Copy image data from an image into a buffer.
+ *
+ * av_image_get_buffer_size() can be used to compute the required size
+ * for the buffer to fill.
+ *
+ * @param dst a buffer into which picture data will be copied
+ * @param dst_size the size in bytes of dst
+ * @param src_data pointers containing the source image data
+ * @param src_linesize linesizes for the image in src_data
+ * @param pix_fmt the pixel format of the source image
+ * @param width the width of the source image in pixels
+ * @param height the height of the source image in pixels
+ * @param align the assumed linesize alignment for dst
+ * @return the number of bytes written to dst, or a negative value
+ * (error code) on error
+ */
+int av_image_copy_to_buffer(uint8_t *dst, int dst_size,
+ const uint8_t * const src_data[4], const int src_linesize[4],
+ enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Check if the given dimension of an image is valid, meaning that all
+ * bytes of the image can be addressed with a signed int.
+ *
+ * @param w the width of the picture
+ * @param h the height of the picture
+ * @param log_offset the offset to sum to the log level for logging with log_ctx
+ * @param log_ctx the parent logging context, it may be NULL
+ * @return >= 0 if valid, a negative error code otherwise
+ */
+int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);
+
+/**
+ * Check if the given dimension of an image is valid, meaning that all
+ * bytes of a plane of an image with the specified pix_fmt can be addressed
+ * with a signed int.
+ *
+ * @param w the width of the picture
+ * @param h the height of the picture
+ * @param max_pixels the maximum number of pixels the user wants to accept
+ * @param pix_fmt the pixel format, can be AV_PIX_FMT_NONE if unknown.
+ * @param log_offset the offset to sum to the log level for logging with log_ctx
+ * @param log_ctx the parent logging context, it may be NULL
+ * @return >= 0 if valid, a negative error code otherwise
+ */
+int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx);
+
+/**
+ * Check if the given sample aspect ratio of an image is valid.
+ *
+ * It is considered invalid if the denominator is 0 or if applying the ratio
+ * to the image size would make the smaller dimension less than 1. If the
+ * sar numerator is 0, it is considered unknown and will return as valid.
+ *
+ * @param w width of the image
+ * @param h height of the image
+ * @param sar sample aspect ratio of the image
+ * @return 0 if valid, a negative AVERROR code otherwise
+ */
+int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar);
+
+/**
+ * Overwrite the image data with black. This is suitable for filling a
+ * sub-rectangle of an image, meaning the padding between the right most pixel
+ * and the left most pixel on the next line will not be overwritten. For some
+ * formats, the image size might be rounded up due to inherent alignment.
+ *
+ * If the pixel format has alpha, the alpha is cleared to opaque.
+ *
+ * This can return an error if the pixel format is not supported. Normally, all
+ * non-hwaccel pixel formats should be supported.
+ *
+ * Passing NULL for dst_data is allowed. Then the function returns whether the
+ * operation would have succeeded. (It can return an error if the pix_fmt is
+ * not supported.)
+ *
+ * @param dst_data data pointers to destination image
+ * @param dst_linesize linesizes for the destination image
+ * @param pix_fmt the pixel format of the image
+ * @param range the color range of the image (important for colorspaces such as YUV)
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @return 0 if the image data was cleared, a negative AVERROR code otherwise
+ */
+int av_image_fill_black(uint8_t *dst_data[4], const ptrdiff_t dst_linesize[4],
+ enum AVPixelFormat pix_fmt, enum AVColorRange range,
+ int width, int height);
+
+/**
+ * @}
+ */
+
+
+#endif /* AVUTIL_IMGUTILS_H */
diff --git a/media/ffvpx/libavutil/imgutils_internal.h b/media/ffvpx/libavutil/imgutils_internal.h
new file mode 100644
index 0000000000..d515858413
--- /dev/null
+++ b/media/ffvpx/libavutil/imgutils_internal.h
@@ -0,0 +1,30 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_IMGUTILS_INTERNAL_H
+#define AVUTIL_IMGUTILS_INTERNAL_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+int ff_image_copy_plane_uc_from_x86(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ ptrdiff_t bytewidth, int height);
+
+
+#endif /* AVUTIL_IMGUTILS_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/integer.c b/media/ffvpx/libavutil/integer.c
new file mode 100644
index 0000000000..ae87c467b2
--- /dev/null
+++ b/media/ffvpx/libavutil/integer.c
@@ -0,0 +1,166 @@
+/*
+ * arbitrary precision integers
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * arbitrary precision integers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include <string.h>
+
+#include "integer.h"
+#include "avassert.h"
+#include "intmath.h"
+
+static const AVInteger zero_i;
+
+AVInteger av_add_i(AVInteger a, AVInteger b){
+ int i, carry=0;
+
+ for(i=0; i<AV_INTEGER_SIZE; i++){
+ carry= (carry>>16) + a.v[i] + b.v[i];
+ a.v[i]= carry;
+ }
+ return a;
+}
+
+AVInteger av_sub_i(AVInteger a, AVInteger b){
+ int i, carry=0;
+
+ for(i=0; i<AV_INTEGER_SIZE; i++){
+ carry= (carry>>16) + a.v[i] - b.v[i];
+ a.v[i]= carry;
+ }
+ return a;
+}
+
+int av_log2_i(AVInteger a){
+ int i;
+
+ for(i=AV_INTEGER_SIZE-1; i>=0; i--){
+ if(a.v[i])
+ return av_log2_16bit(a.v[i]) + 16*i;
+ }
+ return -1;
+}
+
+AVInteger av_mul_i(AVInteger a, AVInteger b){
+ AVInteger out;
+ int i, j;
+ int na= (av_log2_i(a)+16) >> 4;
+ int nb= (av_log2_i(b)+16) >> 4;
+
+ memset(&out, 0, sizeof(out));
+
+ for(i=0; i<na; i++){
+ unsigned int carry=0;
+
+ if(a.v[i])
+ for(j=i; j<AV_INTEGER_SIZE && j-i<=nb; j++){
+ carry= (carry>>16) + out.v[j] + a.v[i]*(unsigned)b.v[j-i];
+ out.v[j]= carry;
+ }
+ }
+
+ return out;
+}
+
+int av_cmp_i(AVInteger a, AVInteger b){
+ int i;
+ int v= (int16_t)a.v[AV_INTEGER_SIZE-1] - (int16_t)b.v[AV_INTEGER_SIZE-1];
+ if(v) return (v>>16)|1;
+
+ for(i=AV_INTEGER_SIZE-2; i>=0; i--){
+ int v= a.v[i] - b.v[i];
+ if(v) return (v>>16)|1;
+ }
+ return 0;
+}
+
+AVInteger av_shr_i(AVInteger a, int s){
+ AVInteger out;
+ int i;
+
+ for(i=0; i<AV_INTEGER_SIZE; i++){
+ unsigned int index= i + (s>>4);
+ unsigned int v=0;
+ if (index + 1 < AV_INTEGER_SIZE) v = a.v[index + 1] * (1U << 16);
+ if (index < AV_INTEGER_SIZE) v |= a.v[index];
+ out.v[i]= v >> (s&15);
+ }
+ return out;
+}
+
+AVInteger av_mod_i(AVInteger *quot, AVInteger a, AVInteger b){
+ int i= av_log2_i(a) - av_log2_i(b);
+ AVInteger quot_temp;
+ if(!quot) quot = &quot_temp;
+
+ if ((int16_t)a.v[AV_INTEGER_SIZE-1] < 0) {
+ a = av_mod_i(quot, av_sub_i(zero_i, a), b);
+ *quot = av_sub_i(zero_i, *quot);
+ return av_sub_i(zero_i, a);
+ }
+
+ av_assert2((int16_t)a.v[AV_INTEGER_SIZE-1] >= 0 && (int16_t)b.v[AV_INTEGER_SIZE-1] >= 0);
+ av_assert2(av_log2_i(b)>=0);
+
+ if(i > 0)
+ b= av_shr_i(b, -i);
+
+ memset(quot, 0, sizeof(AVInteger));
+
+ while(i-- >= 0){
+ *quot= av_shr_i(*quot, -1);
+ if(av_cmp_i(a, b) >= 0){
+ a= av_sub_i(a, b);
+ quot->v[0] += 1;
+ }
+ b= av_shr_i(b, 1);
+ }
+ return a;
+}
+
+AVInteger av_div_i(AVInteger a, AVInteger b){
+ AVInteger quot;
+ av_mod_i(&quot, a, b);
+ return quot;
+}
+
+AVInteger av_int2i(int64_t a){
+ AVInteger out;
+ int i;
+
+ for(i=0; i<AV_INTEGER_SIZE; i++){
+ out.v[i]= a;
+ a>>=16;
+ }
+ return out;
+}
+
+int64_t av_i2int(AVInteger a){
+ uint64_t out = a.v[3];
+
+ for (int i = 2; i >= 0; i--)
+ out = (out << 16) | a.v[i];
+ return out;
+}
diff --git a/media/ffvpx/libavutil/integer.h b/media/ffvpx/libavutil/integer.h
new file mode 100644
index 0000000000..2d9b5bb10f
--- /dev/null
+++ b/media/ffvpx/libavutil/integer.h
@@ -0,0 +1,86 @@
+/*
+ * arbitrary precision integers
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * arbitrary precision integers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_INTEGER_H
+#define AVUTIL_INTEGER_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+#define AV_INTEGER_SIZE 8
+
+typedef struct AVInteger{
+ uint16_t v[AV_INTEGER_SIZE];
+} AVInteger;
+
+AVInteger av_add_i(AVInteger a, AVInteger b) av_const;
+AVInteger av_sub_i(AVInteger a, AVInteger b) av_const;
+
+/**
+ * Return the rounded-down value of the base 2 logarithm of the given
+ * AVInteger. This is simply the index of the most significant bit
+ * which is 1, or 0 if all bits are 0.
+ */
+int av_log2_i(AVInteger a) av_const;
+AVInteger av_mul_i(AVInteger a, AVInteger b) av_const;
+
+/**
+ * Return 0 if a==b, 1 if a>b and -1 if a<b.
+ */
+int av_cmp_i(AVInteger a, AVInteger b) av_const;
+
+/**
+ * bitwise shift
+ * @param s the number of bits by which the value should be shifted right,
+ may be negative for shifting left
+ */
+AVInteger av_shr_i(AVInteger a, int s) av_const;
+
+/**
+ * Return a % b.
+ * @param quot a/b will be stored here.
+ */
+AVInteger av_mod_i(AVInteger *quot, AVInteger a, AVInteger b);
+
+/**
+ * Return a/b.
+ */
+AVInteger av_div_i(AVInteger a, AVInteger b) av_const;
+
+/**
+ * Convert the given int64_t to an AVInteger.
+ */
+AVInteger av_int2i(int64_t a) av_const;
+
+/**
+ * Convert the given AVInteger to an int64_t.
+ * If the AVInteger is too large to fit into an int64_t,
+ * then only the least significant 64 bits will be used.
+ */
+int64_t av_i2int(AVInteger a) av_const;
+
+#endif /* AVUTIL_INTEGER_H */
diff --git a/media/ffvpx/libavutil/internal.h b/media/ffvpx/libavutil/internal.h
new file mode 100644
index 0000000000..454c59aa50
--- /dev/null
+++ b/media/ffvpx/libavutil/internal.h
@@ -0,0 +1,190 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal API header
+ */
+
+#ifndef AVUTIL_INTERNAL_H
+#define AVUTIL_INTERNAL_H
+
+#if !defined(DEBUG) && !defined(NDEBUG)
+# define NDEBUG
+#endif
+
+// This can be enabled to allow detection of additional integer overflows with ubsan
+//#define CHECKED
+
+#include <limits.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <assert.h>
+#include <stdio.h>
+#include "config.h"
+#include "attributes.h"
+#include "timer.h"
+#include "macros.h"
+#include "pixfmt.h"
+
+#if ARCH_X86
+# include "x86/emms.h"
+#endif
+
+#ifndef emms_c
+# define emms_c() do {} while(0)
+#endif
+
+#ifndef attribute_align_arg
+#if ARCH_X86_32 && AV_GCC_VERSION_AT_LEAST(4,2)
+# define attribute_align_arg __attribute__((force_align_arg_pointer))
+#else
+# define attribute_align_arg
+#endif
+#endif
+
+#if defined(_WIN32) && CONFIG_SHARED && !defined(BUILDING_avutil)
+# define av_export_avutil __declspec(dllimport)
+#else
+# define av_export_avutil
+#endif
+
+#if HAVE_PRAGMA_DEPRECATED
+# if defined(__ICL) || defined (__INTEL_COMPILER)
+# define FF_DISABLE_DEPRECATION_WARNINGS __pragma(warning(push)) __pragma(warning(disable:1478))
+# define FF_ENABLE_DEPRECATION_WARNINGS __pragma(warning(pop))
+# elif defined(_MSC_VER)
+# define FF_DISABLE_DEPRECATION_WARNINGS __pragma(warning(push)) __pragma(warning(disable:4996))
+# define FF_ENABLE_DEPRECATION_WARNINGS __pragma(warning(pop))
+# else
+# define FF_DISABLE_DEPRECATION_WARNINGS _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+# define FF_ENABLE_DEPRECATION_WARNINGS _Pragma("GCC diagnostic pop")
+# endif
+#else
+# define FF_DISABLE_DEPRECATION_WARNINGS
+# define FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+
+#define FF_MEMORY_POISON 0x2a
+
+/* Check if the hard coded offset of a struct member still matches reality.
+ * Induce a compilation failure if not.
+ */
+#define AV_CHECK_OFFSET(s, m, o) struct check_##o { \
+ int x_##o[offsetof(s, m) == o? 1: -1]; \
+ }
+
+
+#define FF_ALLOC_TYPED_ARRAY(p, nelem) (p = av_malloc_array(nelem, sizeof(*p)))
+#define FF_ALLOCZ_TYPED_ARRAY(p, nelem) (p = av_calloc(nelem, sizeof(*p)))
+
+#define FF_PTR_ADD(ptr, off) ((off) ? (ptr) + (off) : (ptr))
+
+/**
+ * Access a field in a structure by its offset.
+ */
+#define FF_FIELD_AT(type, off, obj) (*(type *)((char *)&(obj) + (off)))
+
+#include "libm.h"
+
+/**
+ * Return NULL if CONFIG_SMALL is true, otherwise the argument
+ * without modification. Used to disable the definition of strings.
+ */
+#if CONFIG_SMALL
+# define NULL_IF_CONFIG_SMALL(x) NULL
+#else
+# define NULL_IF_CONFIG_SMALL(x) x
+#endif
+
+/**
+ * Log a generic warning message about a missing feature.
+ *
+ * @param[in] avc a pointer to an arbitrary struct of which the first
+ * field is a pointer to an AVClass struct
+ * @param[in] msg string containing the name of the missing feature
+ */
+void avpriv_report_missing_feature(void *avc,
+ const char *msg, ...) av_printf_format(2, 3);
+
+/**
+ * Log a generic warning message about a missing feature.
+ * Additionally request that a sample showcasing the feature be uploaded.
+ *
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] msg string containing the name of the missing feature
+ */
+void avpriv_request_sample(void *avc,
+ const char *msg, ...) av_printf_format(2, 3);
+
+#if HAVE_LIBC_MSVCRT
+#include <crtversion.h>
+#if defined(_VC_CRT_MAJOR_VERSION) && _VC_CRT_MAJOR_VERSION < 14
+#pragma comment(linker, "/include:" EXTERN_PREFIX "avpriv_strtod")
+#pragma comment(linker, "/include:" EXTERN_PREFIX "avpriv_snprintf")
+#endif
+
+#define PTRDIFF_SPECIFIER "Id"
+#define SIZE_SPECIFIER "Iu"
+#else
+#define PTRDIFF_SPECIFIER "td"
+#define SIZE_SPECIFIER "zu"
+#endif
+
+#ifdef DEBUG
+# define ff_dlog(ctx, ...) av_log(ctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+# define ff_dlog(ctx, ...) do { if (0) av_log(ctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
+#endif
+
+#ifdef TRACE
+# define ff_tlog(ctx, ...) av_log(ctx, AV_LOG_TRACE, __VA_ARGS__)
+#else
+# define ff_tlog(ctx, ...) do { } while(0)
+#endif
+
+// For debuging we use signed operations so overflows can be detected (by ubsan)
+// For production we use unsigned so there are no undefined operations
+#ifdef CHECKED
+#define SUINT int
+#define SUINT32 int32_t
+#else
+#define SUINT unsigned
+#define SUINT32 uint32_t
+#endif
+
+int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt);
+
+static av_always_inline av_const int avpriv_mirror(int x, int w)
+{
+ if (!w)
+ return 0;
+
+ while ((unsigned)x > (unsigned)w) {
+ x = -x;
+ if (x < 0)
+ x += 2 * w;
+ }
+ return x;
+}
+
+#endif /* AVUTIL_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/intfloat.h b/media/ffvpx/libavutil/intfloat.h
new file mode 100644
index 0000000000..fe3d7ec4a5
--- /dev/null
+++ b/media/ffvpx/libavutil/intfloat.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_H
+#define AVUTIL_INTFLOAT_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+union av_intfloat32 {
+ uint32_t i;
+ float f;
+};
+
+union av_intfloat64 {
+ uint64_t i;
+ double f;
+};
+
+/**
+ * Reinterpret a 32-bit integer as a float.
+ */
+static av_always_inline float av_int2float(uint32_t i)
+{
+ union av_intfloat32 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a float as a 32-bit integer.
+ */
+static av_always_inline uint32_t av_float2int(float f)
+{
+ union av_intfloat32 v;
+ v.f = f;
+ return v.i;
+}
+
+/**
+ * Reinterpret a 64-bit integer as a double.
+ */
+static av_always_inline double av_int2double(uint64_t i)
+{
+ union av_intfloat64 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a double as a 64-bit integer.
+ */
+static av_always_inline uint64_t av_double2int(double f)
+{
+ union av_intfloat64 v;
+ v.f = f;
+ return v.i;
+}
+
+#endif /* AVUTIL_INTFLOAT_H */
diff --git a/media/ffvpx/libavutil/intmath.c b/media/ffvpx/libavutil/intmath.c
new file mode 100644
index 0000000000..b0c00e1cad
--- /dev/null
+++ b/media/ffvpx/libavutil/intmath.c
@@ -0,0 +1,34 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "intmath.h"
+
+/* undef these to get the function prototypes from common.h */
+#undef av_log2
+#undef av_log2_16bit
+#include "common.h"
+
+int av_log2(unsigned v)
+{
+ return ff_log2(v);
+}
+
+int av_log2_16bit(unsigned v)
+{
+ return ff_log2_16bit(v);
+}
diff --git a/media/ffvpx/libavutil/intmath.h b/media/ffvpx/libavutil/intmath.h
new file mode 100644
index 0000000000..c54d23b7bf
--- /dev/null
+++ b/media/ffvpx/libavutil/intmath.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTMATH_H
+#define AVUTIL_INTMATH_H
+
+#include <stdint.h>
+
+#include "config.h"
+#include "attributes.h"
+
+#if ARCH_ARM
+# include "arm/intmath.h"
+#elif ARCH_RISCV
+# include "riscv/intmath.h"
+#elif ARCH_X86
+# include "x86/intmath.h"
+#endif
+
+#if HAVE_FAST_CLZ
+#if AV_GCC_VERSION_AT_LEAST(3,4)
+#ifndef ff_log2
+# define ff_log2(x) (31 - __builtin_clz((x)|1))
+# ifndef ff_log2_16bit
+# define ff_log2_16bit av_log2
+# endif
+#endif /* ff_log2 */
+#endif /* AV_GCC_VERSION_AT_LEAST(3,4) */
+#endif
+
+extern const uint8_t ff_log2_tab[256];
+
+#ifndef ff_log2
+#define ff_log2 ff_log2_c
+static av_always_inline av_const int ff_log2_c(unsigned int v)
+{
+ int n = 0;
+ if (v & 0xffff0000) {
+ v >>= 16;
+ n += 16;
+ }
+ if (v & 0xff00) {
+ v >>= 8;
+ n += 8;
+ }
+ n += ff_log2_tab[v];
+
+ return n;
+}
+#endif
+
+#ifndef ff_log2_16bit
+#define ff_log2_16bit ff_log2_16bit_c
+static av_always_inline av_const int ff_log2_16bit_c(unsigned int v)
+{
+ int n = 0;
+ if (v & 0xff00) {
+ v >>= 8;
+ n += 8;
+ }
+ n += ff_log2_tab[v];
+
+ return n;
+}
+#endif
+
+#define av_log2 ff_log2
+#define av_log2_16bit ff_log2_16bit
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+#if HAVE_FAST_CLZ
+#if AV_GCC_VERSION_AT_LEAST(3,4)
+#ifndef ff_ctz
+#define ff_ctz(v) __builtin_ctz(v)
+#endif
+#ifndef ff_ctzll
+#define ff_ctzll(v) __builtin_ctzll(v)
+#endif
+#ifndef ff_clz
+#define ff_clz(v) __builtin_clz(v)
+#endif
+#endif
+#endif
+
+#ifndef ff_ctz
+#define ff_ctz ff_ctz_c
+/**
+ * Trailing zero bit count.
+ *
+ * @param v input value. If v is 0, the result is undefined.
+ * @return the number of trailing 0-bits
+ */
+/* We use the De-Bruijn method outlined in:
+ * http://supertech.csail.mit.edu/papers/debruijn.pdf. */
+static av_always_inline av_const int ff_ctz_c(int v)
+{
+ static const uint8_t debruijn_ctz32[32] = {
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
+ };
+ return debruijn_ctz32[(uint32_t)((v & -v) * 0x077CB531U) >> 27];
+}
+#endif
+
+#ifndef ff_ctzll
+#define ff_ctzll ff_ctzll_c
+/* We use the De-Bruijn method outlined in:
+ * http://supertech.csail.mit.edu/papers/debruijn.pdf. */
+static av_always_inline av_const int ff_ctzll_c(long long v)
+{
+ static const uint8_t debruijn_ctz64[64] = {
+ 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
+ 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
+ 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
+ 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
+ };
+ return debruijn_ctz64[(uint64_t)((v & -v) * 0x022FDD63CC95386DU) >> 58];
+}
+#endif
+
+#ifndef ff_clz
+#define ff_clz ff_clz_c
+static av_always_inline av_const unsigned ff_clz_c(unsigned x)
+{
+ unsigned i = sizeof(x) * 8;
+
+ while (x) {
+ x >>= 1;
+ i--;
+ }
+
+ return i;
+}
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,4)
+#ifndef av_parity
+#define av_parity __builtin_parity
+#endif
+#endif
+
+/**
+ * @}
+ */
+#endif /* AVUTIL_INTMATH_H */
diff --git a/media/ffvpx/libavutil/intreadwrite.h b/media/ffvpx/libavutil/intreadwrite.h
new file mode 100644
index 0000000000..4c8413a536
--- /dev/null
+++ b/media/ffvpx/libavutil/intreadwrite.h
@@ -0,0 +1,644 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTREADWRITE_H
+#define AVUTIL_INTREADWRITE_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+#include "bswap.h"
+
+typedef union {
+ uint64_t u64;
+ uint32_t u32[2];
+ uint16_t u16[4];
+ uint8_t u8 [8];
+ double f64;
+ float f32[2];
+} av_alias av_alias64;
+
+typedef union {
+ uint32_t u32;
+ uint16_t u16[2];
+ uint8_t u8 [4];
+ float f32;
+} av_alias av_alias32;
+
+typedef union {
+ uint16_t u16;
+ uint8_t u8 [2];
+} av_alias av_alias16;
+
+/*
+ * Arch-specific headers can provide any combination of
+ * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
+ * Preprocessor symbols must be defined, even if these are implemented
+ * as inline functions.
+ *
+ * R/W means read/write, B/L/N means big/little/native endianness.
+ * The following macros require aligned access, compared to their
+ * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A.
+ * Incorrect usage may range from abysmal performance to crash
+ * depending on the platform.
+ *
+ * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U.
+ */
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_ARM
+# include "arm/intreadwrite.h"
+#elif ARCH_AVR32
+# include "avr32/intreadwrite.h"
+#elif ARCH_MIPS
+# include "mips/intreadwrite.h"
+#elif ARCH_PPC
+# include "ppc/intreadwrite.h"
+#elif ARCH_TOMI
+# include "tomi/intreadwrite.h"
+#elif ARCH_X86
+# include "x86/intreadwrite.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+/*
+ * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
+ */
+
+#if AV_HAVE_BIGENDIAN
+
+# if defined(AV_RN16) && !defined(AV_RB16)
+# define AV_RB16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RB16)
+# define AV_RN16(p) AV_RB16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WB16)
+# define AV_WB16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WB16)
+# define AV_WN16(p, v) AV_WB16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RB24)
+# define AV_RB24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RB24)
+# define AV_RN24(p) AV_RB24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WB24)
+# define AV_WB24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WB24)
+# define AV_WN24(p, v) AV_WB24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RB32)
+# define AV_RB32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RB32)
+# define AV_RN32(p) AV_RB32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WB32)
+# define AV_WB32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WB32)
+# define AV_WN32(p, v) AV_WB32(p, v)
+# endif
+
+# if defined(AV_RN48) && !defined(AV_RB48)
+# define AV_RB48(p) AV_RN48(p)
+# elif !defined(AV_RN48) && defined(AV_RB48)
+# define AV_RN48(p) AV_RB48(p)
+# endif
+
+# if defined(AV_WN48) && !defined(AV_WB48)
+# define AV_WB48(p, v) AV_WN48(p, v)
+# elif !defined(AV_WN48) && defined(AV_WB48)
+# define AV_WN48(p, v) AV_WB48(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RB64)
+# define AV_RB64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RB64)
+# define AV_RN64(p) AV_RB64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WB64)
+# define AV_WB64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WB64)
+# define AV_WN64(p, v) AV_WB64(p, v)
+# endif
+
+#else /* AV_HAVE_BIGENDIAN */
+
+# if defined(AV_RN16) && !defined(AV_RL16)
+# define AV_RL16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RL16)
+# define AV_RN16(p) AV_RL16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WL16)
+# define AV_WL16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WL16)
+# define AV_WN16(p, v) AV_WL16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RL24)
+# define AV_RL24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RL24)
+# define AV_RN24(p) AV_RL24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WL24)
+# define AV_WL24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WL24)
+# define AV_WN24(p, v) AV_WL24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RL32)
+# define AV_RL32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RL32)
+# define AV_RN32(p) AV_RL32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WL32)
+# define AV_WL32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WL32)
+# define AV_WN32(p, v) AV_WL32(p, v)
+# endif
+
+# if defined(AV_RN48) && !defined(AV_RL48)
+# define AV_RL48(p) AV_RN48(p)
+# elif !defined(AV_RN48) && defined(AV_RL48)
+# define AV_RN48(p) AV_RL48(p)
+# endif
+
+# if defined(AV_WN48) && !defined(AV_WL48)
+# define AV_WL48(p, v) AV_WN48(p, v)
+# elif !defined(AV_WN48) && defined(AV_WL48)
+# define AV_WN48(p, v) AV_WL48(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RL64)
+# define AV_RL64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RL64)
+# define AV_RN64(p) AV_RL64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WL64)
+# define AV_WL64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WL64)
+# define AV_WN64(p, v) AV_WL64(p, v)
+# endif
+
+#endif /* !AV_HAVE_BIGENDIAN */
+
+/*
+ * Define AV_[RW]N helper macros to simplify definitions not provided
+ * by per-arch headers.
+ */
+
+#if defined(__GNUC__)
+
+union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
+union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
+union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
+
+# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
+# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
+
+#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_X64) || defined(_M_ARM64)) && AV_HAVE_FAST_UNALIGNED
+
+# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
+# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
+
+#elif AV_HAVE_FAST_UNALIGNED
+
+# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
+# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#else
+
+#ifndef AV_RB16
+# define AV_RB16(x) \
+ ((((const uint8_t*)(x))[0] << 8) | \
+ ((const uint8_t*)(x))[1])
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, val) do { \
+ uint16_t d = (val); \
+ ((uint8_t*)(p))[1] = (d); \
+ ((uint8_t*)(p))[0] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(x) \
+ ((((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, val) do { \
+ uint16_t d = (val); \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(x) \
+ (((uint32_t)((const uint8_t*)(x))[0] << 24) | \
+ (((const uint8_t*)(x))[1] << 16) | \
+ (((const uint8_t*)(x))[2] << 8) | \
+ ((const uint8_t*)(x))[3])
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, val) do { \
+ uint32_t d = (val); \
+ ((uint8_t*)(p))[3] = (d); \
+ ((uint8_t*)(p))[2] = (d)>>8; \
+ ((uint8_t*)(p))[1] = (d)>>16; \
+ ((uint8_t*)(p))[0] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(x) \
+ (((uint32_t)((const uint8_t*)(x))[3] << 24) | \
+ (((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, val) do { \
+ uint32_t d = (val); \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(x) \
+ (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[7])
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, val) do { \
+ uint64_t d = (val); \
+ ((uint8_t*)(p))[7] = (d); \
+ ((uint8_t*)(p))[6] = (d)>>8; \
+ ((uint8_t*)(p))[5] = (d)>>16; \
+ ((uint8_t*)(p))[4] = (d)>>24; \
+ ((uint8_t*)(p))[3] = (d)>>32; \
+ ((uint8_t*)(p))[2] = (d)>>40; \
+ ((uint8_t*)(p))[1] = (d)>>48; \
+ ((uint8_t*)(p))[0] = (d)>>56; \
+ } while(0)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(x) \
+ (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, val) do { \
+ uint64_t d = (val); \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ ((uint8_t*)(p))[4] = (d)>>32; \
+ ((uint8_t*)(p))[5] = (d)>>40; \
+ ((uint8_t*)(p))[6] = (d)>>48; \
+ ((uint8_t*)(p))[7] = (d)>>56; \
+ } while(0)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RN(s, p) AV_RB##s(p)
+# define AV_WN(s, p, v) AV_WB##s(p, v)
+#else
+# define AV_RN(s, p) AV_RL##s(p)
+# define AV_WN(s, p, v) AV_WL##s(p, v)
+#endif
+
+#endif /* HAVE_FAST_UNALIGNED */
+
+#ifndef AV_RN16
+# define AV_RN16(p) AV_RN(16, p)
+#endif
+
+#ifndef AV_RN32
+# define AV_RN32(p) AV_RN(32, p)
+#endif
+
+#ifndef AV_RN64
+# define AV_RN64(p) AV_RN(64, p)
+#endif
+
+#ifndef AV_WN16
+# define AV_WN16(p, v) AV_WN(16, p, v)
+#endif
+
+#ifndef AV_WN32
+# define AV_WN32(p, v) AV_WN(32, p, v)
+#endif
+
+#ifndef AV_WN64
+# define AV_WN64(p, v) AV_WN(64, p, v)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RB(s, p) AV_RN##s(p)
+# define AV_WB(s, p, v) AV_WN##s(p, v)
+# define AV_RL(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))
+#else
+# define AV_RB(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))
+# define AV_RL(s, p) AV_RN##s(p)
+# define AV_WL(s, p, v) AV_WN##s(p, v)
+#endif
+
+#define AV_RB8(x) (((const uint8_t*)(x))[0])
+#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
+
+#define AV_RL8(x) AV_RB8(x)
+#define AV_WL8(p, d) AV_WB8(p, d)
+
+#ifndef AV_RB16
+# define AV_RB16(p) AV_RB(16, p)
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, v) AV_WB(16, p, v)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(p) AV_RL(16, p)
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, v) AV_WL(16, p, v)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(p) AV_RB(32, p)
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, v) AV_WB(32, p, v)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(p) AV_RL(32, p)
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, v) AV_WL(32, p, v)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(p) AV_RB(64, p)
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, v) AV_WB(64, p, v)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(p) AV_RL(64, p)
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, v) AV_WL(64, p, v)
+#endif
+
+#ifndef AV_RB24
+# define AV_RB24(x) \
+ ((((const uint8_t*)(x))[0] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[2])
+#endif
+#ifndef AV_WB24
+# define AV_WB24(p, d) do { \
+ ((uint8_t*)(p))[2] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[0] = (d)>>16; \
+ } while(0)
+#endif
+
+#ifndef AV_RL24
+# define AV_RL24(x) \
+ ((((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL24
+# define AV_WL24(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ } while(0)
+#endif
+
+#ifndef AV_RB48
+# define AV_RB48(x) \
+ (((uint64_t)((const uint8_t*)(x))[0] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[5])
+#endif
+#ifndef AV_WB48
+# define AV_WB48(p, darg) do { \
+ uint64_t d = (darg); \
+ ((uint8_t*)(p))[5] = (d); \
+ ((uint8_t*)(p))[4] = (d)>>8; \
+ ((uint8_t*)(p))[3] = (d)>>16; \
+ ((uint8_t*)(p))[2] = (d)>>24; \
+ ((uint8_t*)(p))[1] = (d)>>32; \
+ ((uint8_t*)(p))[0] = (d)>>40; \
+ } while(0)
+#endif
+
+#ifndef AV_RL48
+# define AV_RL48(x) \
+ (((uint64_t)((const uint8_t*)(x))[5] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL48
+# define AV_WL48(p, darg) do { \
+ uint64_t d = (darg); \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ ((uint8_t*)(p))[4] = (d)>>32; \
+ ((uint8_t*)(p))[5] = (d)>>40; \
+ } while(0)
+#endif
+
+/*
+ * The AV_[RW]NA macros access naturally aligned data
+ * in a type-safe way.
+ */
+
+#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s)
+#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#ifndef AV_RN16A
+# define AV_RN16A(p) AV_RNA(16, p)
+#endif
+
+#ifndef AV_RN32A
+# define AV_RN32A(p) AV_RNA(32, p)
+#endif
+
+#ifndef AV_RN64A
+# define AV_RN64A(p) AV_RNA(64, p)
+#endif
+
+#ifndef AV_WN16A
+# define AV_WN16A(p, v) AV_WNA(16, p, v)
+#endif
+
+#ifndef AV_WN32A
+# define AV_WN32A(p, v) AV_WNA(32, p, v)
+#endif
+
+#ifndef AV_WN64A
+# define AV_WN64A(p, v) AV_WNA(64, p, v)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RLA(s, p) av_bswap##s(AV_RN##s##A(p))
+# define AV_WLA(s, p, v) AV_WN##s##A(p, av_bswap##s(v))
+#else
+# define AV_RLA(s, p) AV_RN##s##A(p)
+# define AV_WLA(s, p, v) AV_WN##s##A(p, v)
+#endif
+
+#ifndef AV_RL64A
+# define AV_RL64A(p) AV_RLA(64, p)
+#endif
+#ifndef AV_WL64A
+# define AV_WL64A(p, v) AV_WLA(64, p, v)
+#endif
+
+/*
+ * The AV_COPYxxU macros are suitable for copying data to/from unaligned
+ * memory locations.
+ */
+
+#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s));
+
+#ifndef AV_COPY16U
+# define AV_COPY16U(d, s) AV_COPYU(16, d, s)
+#endif
+
+#ifndef AV_COPY32U
+# define AV_COPY32U(d, s) AV_COPYU(32, d, s)
+#endif
+
+#ifndef AV_COPY64U
+# define AV_COPY64U(d, s) AV_COPYU(64, d, s)
+#endif
+
+#ifndef AV_COPY128U
+# define AV_COPY128U(d, s) \
+ do { \
+ AV_COPY64U(d, s); \
+ AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \
+ } while(0)
+#endif
+
+/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
+ * naturally aligned. They may be implemented using MMX,
+ * so emms_c() must be called before using any float code
+ * afterwards.
+ */
+
+#define AV_COPY(n, d, s) \
+ (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
+
+#ifndef AV_COPY16
+# define AV_COPY16(d, s) AV_COPY(16, d, s)
+#endif
+
+#ifndef AV_COPY32
+# define AV_COPY32(d, s) AV_COPY(32, d, s)
+#endif
+
+#ifndef AV_COPY64
+# define AV_COPY64(d, s) AV_COPY(64, d, s)
+#endif
+
+#ifndef AV_COPY128
+# define AV_COPY128(d, s) \
+ do { \
+ AV_COPY64(d, s); \
+ AV_COPY64((char*)(d)+8, (char*)(s)+8); \
+ } while(0)
+#endif
+
+#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
+
+#ifndef AV_SWAP64
+# define AV_SWAP64(a, b) AV_SWAP(64, a, b)
+#endif
+
+#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
+
+#ifndef AV_ZERO16
+# define AV_ZERO16(d) AV_ZERO(16, d)
+#endif
+
+#ifndef AV_ZERO32
+# define AV_ZERO32(d) AV_ZERO(32, d)
+#endif
+
+#ifndef AV_ZERO64
+# define AV_ZERO64(d) AV_ZERO(64, d)
+#endif
+
+#ifndef AV_ZERO128
+# define AV_ZERO128(d) \
+ do { \
+ AV_ZERO64(d); \
+ AV_ZERO64((char*)(d)+8); \
+ } while(0)
+#endif
+
+#endif /* AVUTIL_INTREADWRITE_H */
diff --git a/media/ffvpx/libavutil/libm.h b/media/ffvpx/libavutil/libm.h
new file mode 100644
index 0000000000..a819962391
--- /dev/null
+++ b/media/ffvpx/libavutil/libm.h
@@ -0,0 +1,471 @@
+/*
+ * erf function: Copyright (c) 2006 John Maddock
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Replacements for frequently missing libm functions
+ */
+
+#ifndef AVUTIL_LIBM_H
+#define AVUTIL_LIBM_H
+
+#include <math.h>
+#include "config.h"
+#include "attributes.h"
+#include "intfloat.h"
+#include "mathematics.h"
+
+#if HAVE_MIPSFPU && HAVE_INLINE_ASM
+#include "libavutil/mips/libm_mips.h"
+#endif /* HAVE_MIPSFPU && HAVE_INLINE_ASM*/
+
+#if !HAVE_ATANF
+#undef atanf
+#define atanf(x) ((float)atan(x))
+#endif /* HAVE_ATANF */
+
+#if !HAVE_ATAN2F
+#undef atan2f
+#define atan2f(y, x) ((float)atan2(y, x))
+#endif /* HAVE_ATAN2F */
+
+#if !HAVE_POWF
+#undef powf
+#define powf(x, y) ((float)pow(x, y))
+#endif /* HAVE_POWF */
+
+#if !HAVE_CBRT
+static av_always_inline double cbrt(double x)
+{
+ return x < 0 ? -pow(-x, 1.0 / 3.0) : pow(x, 1.0 / 3.0);
+}
+#endif /* HAVE_CBRT */
+
+#if !HAVE_CBRTF
+static av_always_inline float cbrtf(float x)
+{
+ return x < 0 ? -powf(-x, 1.0 / 3.0) : powf(x, 1.0 / 3.0);
+}
+#endif /* HAVE_CBRTF */
+
+#if !HAVE_COPYSIGN
+static av_always_inline double copysign(double x, double y)
+{
+ uint64_t vx = av_double2int(x);
+ uint64_t vy = av_double2int(y);
+ return av_int2double((vx & UINT64_C(0x7fffffffffffffff)) | (vy & UINT64_C(0x8000000000000000)));
+}
+#endif /* HAVE_COPYSIGN */
+
+#if !HAVE_COSF
+#undef cosf
+#define cosf(x) ((float)cos(x))
+#endif /* HAVE_COSF */
+
+#if !HAVE_ERF
+static inline double ff_eval_poly(const double *coeff, int size, double x) {
+ double sum = coeff[size-1];
+ int i;
+ for (i = size-2; i >= 0; --i) {
+ sum *= x;
+ sum += coeff[i];
+ }
+ return sum;
+}
+
+/**
+ * erf function
+ * Algorithm taken from the Boost project, source:
+ * http://www.boost.org/doc/libs/1_46_1/boost/math/special_functions/erf.hpp
+ * Use, modification and distribution are subject to the
+ * Boost Software License, Version 1.0 (see notice below).
+ * Boost Software License - Version 1.0 - August 17th, 2003
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+ */
+static inline double erf(double z)
+{
+#ifndef FF_ARRAY_ELEMS
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+ double result;
+
+ /* handle the symmetry: erf(-x) = -erf(x) */
+ if (z < 0)
+ return -erf(-z);
+
+ /* branch based on range of z, and pick appropriate approximation */
+ if (z == 0)
+ return 0;
+ else if (z < 1e-10)
+ return z * 1.125 + z * 0.003379167095512573896158903121545171688;
+ else if (z < 0.5) {
+ // Maximum Deviation Found: 1.561e-17
+ // Expected Error Term: 1.561e-17
+ // Maximum Relative Change in Control Points: 1.155e-04
+ // Max Error found at double precision = 2.961182e-17
+
+ static const double y = 1.044948577880859375;
+ static const double p[] = {
+ 0.0834305892146531832907,
+ -0.338165134459360935041,
+ -0.0509990735146777432841,
+ -0.00772758345802133288487,
+ -0.000322780120964605683831,
+ };
+ static const double q[] = {
+ 1,
+ 0.455004033050794024546,
+ 0.0875222600142252549554,
+ 0.00858571925074406212772,
+ 0.000370900071787748000569,
+ };
+ double zz = z * z;
+ return z * (y + ff_eval_poly(p, FF_ARRAY_ELEMS(p), zz) / ff_eval_poly(q, FF_ARRAY_ELEMS(q), zz));
+ }
+ /* here onwards compute erfc */
+ else if (z < 1.5) {
+ // Maximum Deviation Found: 3.702e-17
+ // Expected Error Term: 3.702e-17
+ // Maximum Relative Change in Control Points: 2.845e-04
+ // Max Error found at double precision = 4.841816e-17
+ static const double y = 0.405935764312744140625;
+ static const double p[] = {
+ -0.098090592216281240205,
+ 0.178114665841120341155,
+ 0.191003695796775433986,
+ 0.0888900368967884466578,
+ 0.0195049001251218801359,
+ 0.00180424538297014223957,
+ };
+ static const double q[] = {
+ 1,
+ 1.84759070983002217845,
+ 1.42628004845511324508,
+ 0.578052804889902404909,
+ 0.12385097467900864233,
+ 0.0113385233577001411017,
+ 0.337511472483094676155e-5,
+ };
+ result = y + ff_eval_poly(p, FF_ARRAY_ELEMS(p), z - 0.5) / ff_eval_poly(q, FF_ARRAY_ELEMS(q), z - 0.5);
+ result *= exp(-z * z) / z;
+ return 1 - result;
+ }
+ else if (z < 2.5) {
+ // Max Error found at double precision = 6.599585e-18
+ // Maximum Deviation Found: 3.909e-18
+ // Expected Error Term: 3.909e-18
+ // Maximum Relative Change in Control Points: 9.886e-05
+ static const double y = 0.50672817230224609375;
+ static const double p[] = {
+ -0.0243500476207698441272,
+ 0.0386540375035707201728,
+ 0.04394818964209516296,
+ 0.0175679436311802092299,
+ 0.00323962406290842133584,
+ 0.000235839115596880717416,
+ };
+ static const double q[] = {
+ 1,
+ 1.53991494948552447182,
+ 0.982403709157920235114,
+ 0.325732924782444448493,
+ 0.0563921837420478160373,
+ 0.00410369723978904575884,
+ };
+ result = y + ff_eval_poly(p, FF_ARRAY_ELEMS(p), z - 1.5) / ff_eval_poly(q, FF_ARRAY_ELEMS(q), z - 1.5);
+ result *= exp(-z * z) / z;
+ return 1 - result;
+ }
+ else if (z < 4.5) {
+ // Maximum Deviation Found: 1.512e-17
+ // Expected Error Term: 1.512e-17
+ // Maximum Relative Change in Control Points: 2.222e-04
+ // Max Error found at double precision = 2.062515e-17
+ static const double y = 0.5405750274658203125;
+ static const double p[] = {
+ 0.00295276716530971662634,
+ 0.0137384425896355332126,
+ 0.00840807615555585383007,
+ 0.00212825620914618649141,
+ 0.000250269961544794627958,
+ 0.113212406648847561139e-4,
+ };
+ static const double q[] = {
+ 1,
+ 1.04217814166938418171,
+ 0.442597659481563127003,
+ 0.0958492726301061423444,
+ 0.0105982906484876531489,
+ 0.000479411269521714493907,
+ };
+ result = y + ff_eval_poly(p, FF_ARRAY_ELEMS(p), z - 3.5) / ff_eval_poly(q, FF_ARRAY_ELEMS(q), z - 3.5);
+ result *= exp(-z * z) / z;
+ return 1 - result;
+ }
+ /* differ from Boost here, the claim of underflow of erfc(x) past 5.8 is
+ * slightly incorrect, change to 5.92
+ * (really somewhere between 5.9125 and 5.925 is when it saturates) */
+ else if (z < 5.92) {
+ // Max Error found at double precision = 2.997958e-17
+ // Maximum Deviation Found: 2.860e-17
+ // Expected Error Term: 2.859e-17
+ // Maximum Relative Change in Control Points: 1.357e-05
+ static const double y = 0.5579090118408203125;
+ static const double p[] = {
+ 0.00628057170626964891937,
+ 0.0175389834052493308818,
+ -0.212652252872804219852,
+ -0.687717681153649930619,
+ -2.5518551727311523996,
+ -3.22729451764143718517,
+ -2.8175401114513378771,
+ };
+ static const double q[] = {
+ 1,
+ 2.79257750980575282228,
+ 11.0567237927800161565,
+ 15.930646027911794143,
+ 22.9367376522880577224,
+ 13.5064170191802889145,
+ 5.48409182238641741584,
+ };
+ result = y + ff_eval_poly(p, FF_ARRAY_ELEMS(p), 1 / z) / ff_eval_poly(q, FF_ARRAY_ELEMS(q), 1 / z);
+ result *= exp(-z * z) / z;
+ return 1 - result;
+ }
+ /* handle the nan case, but don't use isnan for max portability */
+ else if (z != z)
+ return z;
+ /* finally return saturated result */
+ else
+ return 1;
+}
+#endif /* HAVE_ERF */
+
+#if !HAVE_EXPF
+#undef expf
+#define expf(x) ((float)exp(x))
+#endif /* HAVE_EXPF */
+
+#if !HAVE_EXP2
+#undef exp2
+#define exp2(x) exp((x) * M_LN2)
+#endif /* HAVE_EXP2 */
+
+#if !HAVE_EXP2F
+#undef exp2f
+#define exp2f(x) ((float)exp2(x))
+#endif /* HAVE_EXP2F */
+
+#if !HAVE_ISINF
+#undef isinf
+/* Note: these do not follow the BSD/Apple/GNU convention of returning -1 for
+-Inf, +1 for Inf, 0 otherwise, but merely follow the POSIX/ISO mandated spec of
+returning a non-zero value for +/-Inf, 0 otherwise. */
+static av_always_inline av_const int avpriv_isinff(float x)
+{
+ uint32_t v = av_float2int(x);
+ if ((v & 0x7f800000) != 0x7f800000)
+ return 0;
+ return !(v & 0x007fffff);
+}
+
+static av_always_inline av_const int avpriv_isinf(double x)
+{
+ uint64_t v = av_double2int(x);
+ if ((v & 0x7ff0000000000000) != 0x7ff0000000000000)
+ return 0;
+ return !(v & 0x000fffffffffffff);
+}
+
+#define isinf(x) \
+ (sizeof(x) == sizeof(float) \
+ ? avpriv_isinff(x) \
+ : avpriv_isinf(x))
+#endif /* HAVE_ISINF */
+
+#if !HAVE_ISNAN
+static av_always_inline av_const int avpriv_isnanf(float x)
+{
+ uint32_t v = av_float2int(x);
+ if ((v & 0x7f800000) != 0x7f800000)
+ return 0;
+ return v & 0x007fffff;
+}
+
+static av_always_inline av_const int avpriv_isnan(double x)
+{
+ uint64_t v = av_double2int(x);
+ if ((v & 0x7ff0000000000000) != 0x7ff0000000000000)
+ return 0;
+ return (v & 0x000fffffffffffff) && 1;
+}
+
+#define isnan(x) \
+ (sizeof(x) == sizeof(float) \
+ ? avpriv_isnanf(x) \
+ : avpriv_isnan(x))
+#endif /* HAVE_ISNAN */
+
+#if !HAVE_ISFINITE
+static av_always_inline av_const int avpriv_isfinitef(float x)
+{
+ uint32_t v = av_float2int(x);
+ return (v & 0x7f800000) != 0x7f800000;
+}
+
+static av_always_inline av_const int avpriv_isfinite(double x)
+{
+ uint64_t v = av_double2int(x);
+ return (v & 0x7ff0000000000000) != 0x7ff0000000000000;
+}
+
+#define isfinite(x) \
+ (sizeof(x) == sizeof(float) \
+ ? avpriv_isfinitef(x) \
+ : avpriv_isfinite(x))
+#endif /* HAVE_ISFINITE */
+
+#if !HAVE_HYPOT
+static inline av_const double hypot(double x, double y)
+{
+ double ret, temp;
+ x = fabs(x);
+ y = fabs(y);
+
+ if (isinf(x) || isinf(y))
+ return av_int2double(0x7ff0000000000000);
+ if (x == 0 || y == 0)
+ return x + y;
+ if (x < y) {
+ temp = x;
+ x = y;
+ y = temp;
+ }
+
+ y = y/x;
+ return x*sqrt(1 + y*y);
+}
+#endif /* HAVE_HYPOT */
+
+#if !HAVE_LDEXPF
+#undef ldexpf
+#define ldexpf(x, exp) ((float)ldexp(x, exp))
+#endif /* HAVE_LDEXPF */
+
+#if !HAVE_LLRINT
+#undef llrint
+#define llrint(x) ((long long)rint(x))
+#endif /* HAVE_LLRINT */
+
+#if !HAVE_LLRINTF
+#undef llrintf
+#define llrintf(x) ((long long)rint(x))
+#endif /* HAVE_LLRINT */
+
+#if !HAVE_LOG2
+#undef log2
+#define log2(x) (log(x) * 1.44269504088896340736)
+#endif /* HAVE_LOG2 */
+
+#if !HAVE_LOG2F
+#undef log2f
+#define log2f(x) ((float)log2(x))
+#endif /* HAVE_LOG2F */
+
+#if !HAVE_LOG10F
+#undef log10f
+#define log10f(x) ((float)log10(x))
+#endif /* HAVE_LOG10F */
+
+#if !HAVE_SINF
+#undef sinf
+#define sinf(x) ((float)sin(x))
+#endif /* HAVE_SINF */
+
+#if !HAVE_RINT
+static inline double rint(double x)
+{
+ return x >= 0 ? floor(x + 0.5) : ceil(x - 0.5);
+}
+#endif /* HAVE_RINT */
+
+#if !HAVE_LRINT
+static av_always_inline av_const long int lrint(double x)
+{
+ return rint(x);
+}
+#endif /* HAVE_LRINT */
+
+#if !HAVE_LRINTF
+static av_always_inline av_const long int lrintf(float x)
+{
+ return (int)(rint(x));
+}
+#endif /* HAVE_LRINTF */
+
+#if !HAVE_ROUND
+static av_always_inline av_const double round(double x)
+{
+ return (x > 0) ? floor(x + 0.5) : ceil(x - 0.5);
+}
+#endif /* HAVE_ROUND */
+
+#if !HAVE_ROUNDF
+static av_always_inline av_const float roundf(float x)
+{
+ return (x > 0) ? floor(x + 0.5) : ceil(x - 0.5);
+}
+#endif /* HAVE_ROUNDF */
+
+#if !HAVE_TRUNC
+static av_always_inline av_const double trunc(double x)
+{
+ return (x > 0) ? floor(x) : ceil(x);
+}
+#endif /* HAVE_TRUNC */
+
+#if !HAVE_TRUNCF
+static av_always_inline av_const float truncf(float x)
+{
+ return (x > 0) ? floor(x) : ceil(x);
+}
+#endif /* HAVE_TRUNCF */
+
+#endif /* AVUTIL_LIBM_H */
diff --git a/media/ffvpx/libavutil/lls.c b/media/ffvpx/libavutil/lls.c
new file mode 100644
index 0000000000..c1e038daf1
--- /dev/null
+++ b/media/ffvpx/libavutil/lls.c
@@ -0,0 +1,123 @@
+/*
+ * linear least squares model
+ *
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * linear least squares model
+ */
+
+#include <math.h>
+#include <string.h>
+
+#include "config.h"
+#include "attributes.h"
+#include "lls.h"
+
+static void update_lls(LLSModel *m, const double *var)
+{
+ int i, j;
+
+ for (i = 0; i <= m->indep_count; i++) {
+ for (j = i; j <= m->indep_count; j++) {
+ m->covariance[i][j] += var[i] * var[j];
+ }
+ }
+}
+
+void avpriv_solve_lls(LLSModel *m, double threshold, unsigned short min_order)
+{
+ int i, j, k;
+ double (*factor)[MAX_VARS_ALIGN] = (void *) &m->covariance[1][0];
+ double (*covar) [MAX_VARS_ALIGN] = (void *) &m->covariance[1][1];
+ double *covar_y = m->covariance[0];
+ int count = m->indep_count;
+
+ for (i = 0; i < count; i++) {
+ for (j = i; j < count; j++) {
+ double sum = covar[i][j];
+
+ for (k = 0; k <= i-1; k++)
+ sum -= factor[i][k] * factor[j][k];
+
+ if (i == j) {
+ if (sum < threshold)
+ sum = 1.0;
+ factor[i][i] = sqrt(sum);
+ } else {
+ factor[j][i] = sum / factor[i][i];
+ }
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ double sum = covar_y[i + 1];
+
+ for (k = 0; k <= i-1; k++)
+ sum -= factor[i][k] * m->coeff[0][k];
+
+ m->coeff[0][i] = sum / factor[i][i];
+ }
+
+ for (j = count - 1; j >= min_order; j--) {
+ for (i = j; i >= 0; i--) {
+ double sum = m->coeff[0][i];
+
+ for (k = i + 1; k <= j; k++)
+ sum -= factor[k][i] * m->coeff[j][k];
+
+ m->coeff[j][i] = sum / factor[i][i];
+ }
+
+ m->variance[j] = covar_y[0];
+
+ for (i = 0; i <= j; i++) {
+ double sum = m->coeff[j][i] * covar[i][i] - 2 * covar_y[i + 1];
+
+ for (k = 0; k < i; k++)
+ sum += 2 * m->coeff[j][k] * covar[k][i];
+
+ m->variance[j] += m->coeff[j][i] * sum;
+ }
+ }
+}
+
+static double evaluate_lls(LLSModel *m, const double *param, int order)
+{
+ int i;
+ double out = 0;
+
+ for (i = 0; i <= order; i++)
+ out += param[i] * m->coeff[order][i];
+
+ return out;
+}
+
+av_cold void avpriv_init_lls(LLSModel *m, int indep_count)
+{
+ memset(m, 0, sizeof(LLSModel));
+ m->indep_count = indep_count;
+ m->update_lls = update_lls;
+ m->evaluate_lls = evaluate_lls;
+#if ARCH_X86
+ ff_init_lls_x86(m);
+#endif
+}
diff --git a/media/ffvpx/libavutil/lls.h b/media/ffvpx/libavutil/lls.h
new file mode 100644
index 0000000000..0709275822
--- /dev/null
+++ b/media/ffvpx/libavutil/lls.h
@@ -0,0 +1,63 @@
+/*
+ * linear least squares model
+ *
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LLS_H
+#define AVUTIL_LLS_H
+
+#include "macros.h"
+#include "mem_internal.h"
+
+#define MAX_VARS 32
+#define MAX_VARS_ALIGN FFALIGN(MAX_VARS+1,4)
+
+//FIXME avoid direct access to LLSModel from outside
+
+/**
+ * Linear least squares model.
+ */
+typedef struct LLSModel {
+ DECLARE_ALIGNED(32, double, covariance[MAX_VARS_ALIGN][MAX_VARS_ALIGN]);
+ DECLARE_ALIGNED(32, double, coeff[MAX_VARS][MAX_VARS]);
+ double variance[MAX_VARS];
+ int indep_count;
+ /**
+ * Take the outer-product of var[] with itself, and add to the covariance matrix.
+ * @param m this context
+ * @param var training samples, starting with the value to be predicted
+ * 32-byte aligned, and any padding elements must be initialized
+ * (i.e not denormal/nan).
+ */
+ void (*update_lls)(struct LLSModel *m, const double *var);
+ /**
+ * Inner product of var[] and the LPC coefs.
+ * @param m this context
+ * @param var training samples, excluding the value to be predicted. unaligned.
+ * @param order lpc order
+ */
+ double (*evaluate_lls)(struct LLSModel *m, const double *var, int order);
+} LLSModel;
+
+void avpriv_init_lls(LLSModel *m, int indep_count);
+void ff_init_lls_x86(LLSModel *m);
+void avpriv_solve_lls(LLSModel *m, double threshold, unsigned short min_order);
+
+#endif /* AVUTIL_LLS_H */
diff --git a/media/ffvpx/libavutil/log.c b/media/ffvpx/libavutil/log.c
new file mode 100644
index 0000000000..5948e50467
--- /dev/null
+++ b/media/ffvpx/libavutil/log.c
@@ -0,0 +1,492 @@
+/*
+ * log functions
+ * Copyright (c) 2003 Michel Bardiaux
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * logging functions
+ */
+
+#include "config.h"
+
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#if HAVE_IO_H
+#include <io.h>
+#endif
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "bprint.h"
+#include "common.h"
+#include "internal.h"
+#include "log.h"
+#include "thread.h"
+
+static AVMutex mutex = AV_MUTEX_INITIALIZER;
+
+#define LINE_SZ 1024
+
+#if HAVE_VALGRIND_VALGRIND_H
+#include <valgrind/valgrind.h>
+/* this is the log level at which valgrind will output a full backtrace */
+#define BACKTRACE_LOGLEVEL AV_LOG_ERROR
+#endif
+
+static int av_log_level = AV_LOG_INFO;
+static int flags;
+
+#define NB_LEVELS 8
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
+#include <windows.h>
+static const uint8_t color[16 + AV_CLASS_CATEGORY_NB] = {
+ [AV_LOG_PANIC /8] = 12,
+ [AV_LOG_FATAL /8] = 12,
+ [AV_LOG_ERROR /8] = 12,
+ [AV_LOG_WARNING/8] = 14,
+ [AV_LOG_INFO /8] = 7,
+ [AV_LOG_VERBOSE/8] = 10,
+ [AV_LOG_DEBUG /8] = 10,
+ [AV_LOG_TRACE /8] = 8,
+ [16+AV_CLASS_CATEGORY_NA ] = 7,
+ [16+AV_CLASS_CATEGORY_INPUT ] = 13,
+ [16+AV_CLASS_CATEGORY_OUTPUT ] = 5,
+ [16+AV_CLASS_CATEGORY_MUXER ] = 13,
+ [16+AV_CLASS_CATEGORY_DEMUXER ] = 5,
+ [16+AV_CLASS_CATEGORY_ENCODER ] = 11,
+ [16+AV_CLASS_CATEGORY_DECODER ] = 3,
+ [16+AV_CLASS_CATEGORY_FILTER ] = 10,
+ [16+AV_CLASS_CATEGORY_BITSTREAM_FILTER] = 9,
+ [16+AV_CLASS_CATEGORY_SWSCALER ] = 7,
+ [16+AV_CLASS_CATEGORY_SWRESAMPLER ] = 7,
+ [16+AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT ] = 13,
+ [16+AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT ] = 5,
+ [16+AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT ] = 13,
+ [16+AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT ] = 5,
+ [16+AV_CLASS_CATEGORY_DEVICE_OUTPUT ] = 13,
+ [16+AV_CLASS_CATEGORY_DEVICE_INPUT ] = 5,
+};
+
+static int16_t background, attr_orig;
+static HANDLE con;
+#else
+
+static const uint32_t color[16 + AV_CLASS_CATEGORY_NB] = {
+ [AV_LOG_PANIC /8] = 52 << 16 | 196 << 8 | 0x41,
+ [AV_LOG_FATAL /8] = 208 << 8 | 0x41,
+ [AV_LOG_ERROR /8] = 196 << 8 | 0x11,
+ [AV_LOG_WARNING/8] = 226 << 8 | 0x03,
+ [AV_LOG_INFO /8] = 253 << 8 | 0x09,
+ [AV_LOG_VERBOSE/8] = 40 << 8 | 0x02,
+ [AV_LOG_DEBUG /8] = 34 << 8 | 0x02,
+ [AV_LOG_TRACE /8] = 34 << 8 | 0x07,
+ [16+AV_CLASS_CATEGORY_NA ] = 250 << 8 | 0x09,
+ [16+AV_CLASS_CATEGORY_INPUT ] = 219 << 8 | 0x15,
+ [16+AV_CLASS_CATEGORY_OUTPUT ] = 201 << 8 | 0x05,
+ [16+AV_CLASS_CATEGORY_MUXER ] = 213 << 8 | 0x15,
+ [16+AV_CLASS_CATEGORY_DEMUXER ] = 207 << 8 | 0x05,
+ [16+AV_CLASS_CATEGORY_ENCODER ] = 51 << 8 | 0x16,
+ [16+AV_CLASS_CATEGORY_DECODER ] = 39 << 8 | 0x06,
+ [16+AV_CLASS_CATEGORY_FILTER ] = 155 << 8 | 0x12,
+ [16+AV_CLASS_CATEGORY_BITSTREAM_FILTER] = 192 << 8 | 0x14,
+ [16+AV_CLASS_CATEGORY_SWSCALER ] = 153 << 8 | 0x14,
+ [16+AV_CLASS_CATEGORY_SWRESAMPLER ] = 147 << 8 | 0x14,
+ [16+AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT ] = 213 << 8 | 0x15,
+ [16+AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT ] = 207 << 8 | 0x05,
+ [16+AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT ] = 213 << 8 | 0x15,
+ [16+AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT ] = 207 << 8 | 0x05,
+ [16+AV_CLASS_CATEGORY_DEVICE_OUTPUT ] = 213 << 8 | 0x15,
+ [16+AV_CLASS_CATEGORY_DEVICE_INPUT ] = 207 << 8 | 0x05,
+};
+
+#endif
+static int use_color = -1;
+
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
+static void win_console_puts(const char *str)
+{
+ const uint8_t *q = str;
+ uint16_t line[LINE_SZ];
+
+ while (*q) {
+ uint16_t *buf = line;
+ DWORD nb_chars = 0;
+ DWORD written;
+
+ while (*q && nb_chars < LINE_SZ - 1) {
+ uint32_t ch;
+ uint16_t tmp;
+
+ GET_UTF8(ch, *q ? *q++ : 0, ch = 0xfffd; goto continue_on_invalid;)
+continue_on_invalid:
+ PUT_UTF16(ch, tmp, *buf++ = tmp; nb_chars++;)
+ }
+
+ WriteConsoleW(con, line, nb_chars, &written, NULL);
+ }
+}
+#endif
+
+static void check_color_terminal(void)
+{
+ char *term = getenv("TERM");
+
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
+ CONSOLE_SCREEN_BUFFER_INFO con_info;
+ DWORD dummy;
+ con = GetStdHandle(STD_ERROR_HANDLE);
+ if (con != INVALID_HANDLE_VALUE && !GetConsoleMode(con, &dummy))
+ con = INVALID_HANDLE_VALUE;
+ if (con != INVALID_HANDLE_VALUE) {
+ GetConsoleScreenBufferInfo(con, &con_info);
+ attr_orig = con_info.wAttributes;
+ background = attr_orig & 0xF0;
+ }
+#endif
+
+ if (getenv("AV_LOG_FORCE_NOCOLOR")) {
+ use_color = 0;
+ } else if (getenv("AV_LOG_FORCE_COLOR")) {
+ use_color = 1;
+ } else {
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
+ use_color = (con != INVALID_HANDLE_VALUE);
+#elif HAVE_ISATTY
+ use_color = (term && isatty(2));
+#else
+ use_color = 0;
+#endif
+ }
+
+ if (getenv("AV_LOG_FORCE_256COLOR") || term && strstr(term, "256color"))
+ use_color *= 256;
+}
+
+static void ansi_fputs(int level, int tint, const char *str, int local_use_color)
+{
+ if (local_use_color == 1) {
+ fprintf(stderr,
+ "\033[%"PRIu32";3%"PRIu32"m%s\033[0m",
+ (color[level] >> 4) & 15,
+ color[level] & 15,
+ str);
+ } else if (tint && use_color == 256) {
+ fprintf(stderr,
+ "\033[48;5;%"PRIu32"m\033[38;5;%dm%s\033[0m",
+ (color[level] >> 16) & 0xff,
+ tint,
+ str);
+ } else if (local_use_color == 256) {
+ fprintf(stderr,
+ "\033[48;5;%"PRIu32"m\033[38;5;%"PRIu32"m%s\033[0m",
+ (color[level] >> 16) & 0xff,
+ (color[level] >> 8) & 0xff,
+ str);
+ } else
+ fputs(str, stderr);
+}
+
+static void colored_fputs(int level, int tint, const char *str)
+{
+ int local_use_color;
+ if (!*str)
+ return;
+
+ if (use_color < 0)
+ check_color_terminal();
+
+ if (level == AV_LOG_INFO/8) local_use_color = 0;
+ else local_use_color = use_color;
+
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
+ if (con != INVALID_HANDLE_VALUE) {
+ if (local_use_color)
+ SetConsoleTextAttribute(con, background | color[level]);
+ win_console_puts(str);
+ if (local_use_color)
+ SetConsoleTextAttribute(con, attr_orig);
+ } else {
+ ansi_fputs(level, tint, str, local_use_color);
+ }
+#else
+ ansi_fputs(level, tint, str, local_use_color);
+#endif
+
+}
+
+const char *av_default_item_name(void *ptr)
+{
+ return (*(AVClass **) ptr)->class_name;
+}
+
+AVClassCategory av_default_get_category(void *ptr)
+{
+ return (*(AVClass **) ptr)->category;
+}
+
+static void sanitize(uint8_t *line){
+ while(*line){
+ if(*line < 0x08 || (*line > 0x0D && *line < 0x20))
+ *line='?';
+ line++;
+ }
+}
+
+static int get_category(void *ptr){
+ AVClass *avc = *(AVClass **) ptr;
+ if( !avc
+ || (avc->version&0xFF)<100
+ || avc->version < (51 << 16 | 59 << 8)
+ || avc->category >= AV_CLASS_CATEGORY_NB) return AV_CLASS_CATEGORY_NA + 16;
+
+ if(avc->get_category)
+ return avc->get_category(ptr) + 16;
+
+ return avc->category + 16;
+}
+
+static const char *get_level_str(int level)
+{
+ switch (level) {
+ case AV_LOG_QUIET:
+ return "quiet";
+ case AV_LOG_DEBUG:
+ return "debug";
+ case AV_LOG_TRACE:
+ return "trace";
+ case AV_LOG_VERBOSE:
+ return "verbose";
+ case AV_LOG_INFO:
+ return "info";
+ case AV_LOG_WARNING:
+ return "warning";
+ case AV_LOG_ERROR:
+ return "error";
+ case AV_LOG_FATAL:
+ return "fatal";
+ case AV_LOG_PANIC:
+ return "panic";
+ default:
+ return "";
+ }
+}
+
+static void format_line(void *avcl, int level, const char *fmt, va_list vl,
+ AVBPrint part[4], int *print_prefix, int type[2])
+{
+ AVClass* avc = avcl ? *(AVClass **) avcl : NULL;
+ av_bprint_init(part+0, 0, AV_BPRINT_SIZE_AUTOMATIC);
+ av_bprint_init(part+1, 0, AV_BPRINT_SIZE_AUTOMATIC);
+ av_bprint_init(part+2, 0, AV_BPRINT_SIZE_AUTOMATIC);
+ av_bprint_init(part+3, 0, 65536);
+
+ if(type) type[0] = type[1] = AV_CLASS_CATEGORY_NA + 16;
+ if (*print_prefix && avc) {
+ if (avc->parent_log_context_offset) {
+ AVClass** parent = *(AVClass ***) (((uint8_t *) avcl) +
+ avc->parent_log_context_offset);
+ if (parent && *parent) {
+ av_bprintf(part+0, "[%s @ %p] ",
+ (*parent)->item_name(parent), parent);
+ if(type) type[0] = get_category(parent);
+ }
+ }
+ av_bprintf(part+1, "[%s @ %p] ",
+ avc->item_name(avcl), avcl);
+ if(type) type[1] = get_category(avcl);
+ }
+
+ if (*print_prefix && (level > AV_LOG_QUIET) && (flags & AV_LOG_PRINT_LEVEL))
+ av_bprintf(part+2, "[%s] ", get_level_str(level));
+
+ av_vbprintf(part+3, fmt, vl);
+
+ if(*part[0].str || *part[1].str || *part[2].str || *part[3].str) {
+ char lastc = part[3].len && part[3].len <= part[3].size ? part[3].str[part[3].len - 1] : 0;
+ *print_prefix = lastc == '\n' || lastc == '\r';
+ }
+}
+
+void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,
+ char *line, int line_size, int *print_prefix)
+{
+ av_log_format_line2(ptr, level, fmt, vl, line, line_size, print_prefix);
+}
+
+int av_log_format_line2(void *ptr, int level, const char *fmt, va_list vl,
+ char *line, int line_size, int *print_prefix)
+{
+ AVBPrint part[4];
+ int ret;
+
+ format_line(ptr, level, fmt, vl, part, print_prefix, NULL);
+ ret = snprintf(line, line_size, "%s%s%s%s", part[0].str, part[1].str, part[2].str, part[3].str);
+ av_bprint_finalize(part+3, NULL);
+ return ret;
+}
+
+void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl)
+{
+ static int print_prefix = 1;
+ static int count;
+ static char prev[LINE_SZ];
+ AVBPrint part[4];
+ char line[LINE_SZ];
+ static int is_atty;
+ int type[2];
+ unsigned tint = 0;
+
+ if (level >= 0) {
+ tint = level & 0xff00;
+ level &= 0xff;
+ }
+
+ if (level > av_log_level)
+ return;
+ ff_mutex_lock(&mutex);
+
+ format_line(ptr, level, fmt, vl, part, &print_prefix, type);
+ snprintf(line, sizeof(line), "%s%s%s%s", part[0].str, part[1].str, part[2].str, part[3].str);
+
+#if HAVE_ISATTY
+ if (!is_atty)
+ is_atty = isatty(2) ? 1 : -1;
+#endif
+
+ if (print_prefix && (flags & AV_LOG_SKIP_REPEATED) && !strcmp(line, prev) &&
+ *line && line[strlen(line) - 1] != '\r'){
+ count++;
+ if (is_atty == 1)
+ fprintf(stderr, " Last message repeated %d times\r", count);
+ goto end;
+ }
+ if (count > 0) {
+ fprintf(stderr, " Last message repeated %d times\n", count);
+ count = 0;
+ }
+ strcpy(prev, line);
+ sanitize(part[0].str);
+ colored_fputs(type[0], 0, part[0].str);
+ sanitize(part[1].str);
+ colored_fputs(type[1], 0, part[1].str);
+ sanitize(part[2].str);
+ colored_fputs(av_clip(level >> 3, 0, NB_LEVELS - 1), tint >> 8, part[2].str);
+ sanitize(part[3].str);
+ colored_fputs(av_clip(level >> 3, 0, NB_LEVELS - 1), tint >> 8, part[3].str);
+
+#if CONFIG_VALGRIND_BACKTRACE
+ if (level <= BACKTRACE_LOGLEVEL)
+ VALGRIND_PRINTF_BACKTRACE("%s", "");
+#endif
+end:
+ av_bprint_finalize(part+3, NULL);
+ ff_mutex_unlock(&mutex);
+}
+
+static void (*av_log_callback)(void*, int, const char*, va_list) =
+ av_log_default_callback;
+
+void av_log(void* avcl, int level, const char *fmt, ...)
+{
+ va_list vl;
+ va_start(vl, fmt);
+ av_vlog(avcl, level, fmt, vl);
+ va_end(vl);
+}
+
+void av_log_once(void* avcl, int initial_level, int subsequent_level, int *state, const char *fmt, ...)
+{
+ va_list vl;
+ va_start(vl, fmt);
+ av_vlog(avcl, *state ? subsequent_level : initial_level, fmt, vl);
+ va_end(vl);
+ *state = 1;
+}
+
+void av_vlog(void* avcl, int level, const char *fmt, va_list vl)
+{
+ AVClass* avc = avcl ? *(AVClass **) avcl : NULL;
+ void (*log_callback)(void*, int, const char*, va_list) = av_log_callback;
+ if (avc && avc->version >= (50 << 16 | 15 << 8 | 2) &&
+ avc->log_level_offset_offset && level >= AV_LOG_FATAL)
+ level += *(int *) (((uint8_t *) avcl) + avc->log_level_offset_offset);
+ if (log_callback)
+ log_callback(avcl, level, fmt, vl);
+}
+
+int av_log_get_level(void)
+{
+ return av_log_level;
+}
+
+void av_log_set_level(int level)
+{
+ av_log_level = level;
+}
+
+void av_log_set_flags(int arg)
+{
+ flags = arg;
+}
+
+int av_log_get_flags(void)
+{
+ return flags;
+}
+
+void av_log_set_callback(void (*callback)(void*, int, const char*, va_list))
+{
+ av_log_callback = callback;
+}
+
+static void missing_feature_sample(int sample, void *avc, const char *msg,
+ va_list argument_list)
+{
+ av_vlog(avc, AV_LOG_WARNING, msg, argument_list);
+ av_log(avc, AV_LOG_WARNING, " is not implemented. Update your FFmpeg "
+ "version to the newest one from Git. If the problem still "
+ "occurs, it means that your file has a feature which has not "
+ "been implemented.\n");
+ if (sample)
+ av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample "
+ "of this file to https://streams.videolan.org/upload/ "
+ "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n");
+}
+
+void avpriv_request_sample(void *avc, const char *msg, ...)
+{
+ va_list argument_list;
+
+ va_start(argument_list, msg);
+ missing_feature_sample(1, avc, msg, argument_list);
+ va_end(argument_list);
+}
+
+void avpriv_report_missing_feature(void *avc, const char *msg, ...)
+{
+ va_list argument_list;
+
+ va_start(argument_list, msg);
+ missing_feature_sample(0, avc, msg, argument_list);
+ va_end(argument_list);
+}
diff --git a/media/ffvpx/libavutil/log.h b/media/ffvpx/libavutil/log.h
new file mode 100644
index 0000000000..ab7ceabe22
--- /dev/null
+++ b/media/ffvpx/libavutil/log.h
@@ -0,0 +1,387 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include <stdarg.h>
+#include "attributes.h"
+#include "version.h"
+
+typedef enum {
+ AV_CLASS_CATEGORY_NA = 0,
+ AV_CLASS_CATEGORY_INPUT,
+ AV_CLASS_CATEGORY_OUTPUT,
+ AV_CLASS_CATEGORY_MUXER,
+ AV_CLASS_CATEGORY_DEMUXER,
+ AV_CLASS_CATEGORY_ENCODER,
+ AV_CLASS_CATEGORY_DECODER,
+ AV_CLASS_CATEGORY_FILTER,
+ AV_CLASS_CATEGORY_BITSTREAM_FILTER,
+ AV_CLASS_CATEGORY_SWSCALER,
+ AV_CLASS_CATEGORY_SWRESAMPLER,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_INPUT,
+ AV_CLASS_CATEGORY_NB ///< not part of ABI/API
+}AVClassCategory;
+
+#define AV_IS_INPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT))
+
+#define AV_IS_OUTPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT))
+
+struct AVOptionRanges;
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+ /**
+ * The name of the class; usually it is the same name as the
+ * context structure type to which the AVClass is associated.
+ */
+ const char* class_name;
+
+ /**
+ * A pointer to a function which returns the name of a context
+ * instance ctx associated with the class.
+ */
+ const char* (*item_name)(void* ctx);
+
+ /**
+ * a pointer to the first option specified in the class if any or NULL
+ *
+ * @see av_set_default_options()
+ */
+ const struct AVOption *option;
+
+ /**
+ * LIBAVUTIL_VERSION with which this structure was created.
+ * This is used to allow fields to be added without requiring major
+ * version bumps everywhere.
+ */
+
+ int version;
+
+ /**
+ * Offset in the structure where log_level_offset is stored.
+ * 0 means there is no such variable
+ */
+ int log_level_offset_offset;
+
+ /**
+ * Offset in the structure where a pointer to the parent context for
+ * logging is stored. For example a decoder could pass its AVCodecContext
+ * to eval as such a parent context, which an av_log() implementation
+ * could then leverage to display the parent context.
+ * The offset can be NULL.
+ */
+ int parent_log_context_offset;
+
+ /**
+ * Category used for visualization (like color)
+ * This is only set if the category is equal for all objects using this class.
+ * available since version (51 << 16 | 56 << 8 | 100)
+ */
+ AVClassCategory category;
+
+ /**
+ * Callback to return the category.
+ * available since version (51 << 16 | 59 << 8 | 100)
+ */
+ AVClassCategory (*get_category)(void* ctx);
+
+ /**
+ * Callback to return the supported/allowed ranges.
+ * available since version (52.12)
+ */
+ int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags);
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void *obj, void *prev);
+
+ /**
+ * Iterate over the AVClasses corresponding to potential AVOptions-enabled
+ * children.
+ *
+ * @param iter pointer to opaque iteration state. The caller must initialize
+ * *iter to NULL before the first call.
+ * @return AVClass for the next AVOptions-enabled child or NULL if there are
+ * no more such children.
+ *
+ * @note The difference between child_next and this is that child_next
+ * iterates over _already existing_ objects, while child_class_iterate
+ * iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_iterate)(void **iter);
+} AVClass;
+
+/**
+ * @addtogroup lavu_log
+ *
+ * @{
+ *
+ * @defgroup lavu_log_constants Logging Constants
+ *
+ * @{
+ */
+
+/**
+ * Print no output.
+ */
+#define AV_LOG_QUIET -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC 0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL 8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR 16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING 24
+
+/**
+ * Standard information.
+ */
+#define AV_LOG_INFO 32
+
+/**
+ * Detailed information.
+ */
+#define AV_LOG_VERBOSE 40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG 48
+
+/**
+ * Extremely verbose debugging, useful for libav* development.
+ */
+#define AV_LOG_TRACE 56
+
+#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET)
+
+/**
+ * @}
+ */
+
+/**
+ * Sets additional colors for extended debugging sessions.
+ * @code
+ av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n");
+ @endcode
+ * Requires 256color terminal support. Uses outside debugging is not
+ * recommended.
+ */
+#define AV_LOG_C(x) ((x) << 8)
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct or NULL if general log.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ */
+void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Send the specified message to the log once with the initial_level and then with
+ * the subsequent_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct or NULL if general log.
+ * @param initial_level importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant" for the first occurance.
+ * @param subsequent_level importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant" after the first occurance.
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param state a variable to keep trak of if a message has already been printed
+ * this must be initialized to 0 before the first use. The same state
+ * must not be accessed by 2 Threads simultaneously.
+ */
+void av_log_once(void* avcl, int initial_level, int subsequent_level, int *state, const char *fmt, ...) av_printf_format(5, 6);
+
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_vlog(void *avcl, int level, const char *fmt, va_list vl);
+
+/**
+ * Get the current log level
+ *
+ * @see lavu_log_constants
+ *
+ * @return Current log level
+ */
+int av_log_get_level(void);
+
+/**
+ * Set the log level
+ *
+ * @see lavu_log_constants
+ *
+ * @param level Logging level
+ */
+void av_log_set_level(int level);
+
+/**
+ * Set the logging callback
+ *
+ * @note The callback must be thread safe, even if the application does not use
+ * threads itself as some codecs are multithreaded.
+ *
+ * @see av_log_default_callback
+ *
+ * @param callback A logging function with a compatible signature.
+ */
+void av_log_set_callback(void (*callback)(void*, int, const char*, va_list));
+
+/**
+ * Default logging callback
+ *
+ * It prints the message to stderr, optionally colorizing it.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_log_default_callback(void *avcl, int level, const char *fmt,
+ va_list vl);
+
+/**
+ * Return the context name
+ *
+ * @param ctx The AVClass context
+ *
+ * @return The AVClass class_name
+ */
+const char* av_default_item_name(void* ctx);
+AVClassCategory av_default_get_category(void *ptr);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line buffer to receive the formatted line
+ * @param line_size size of the buffer
+ * @param print_prefix used to store whether the prefix must be printed;
+ * must point to a persistent integer initially set to 1
+ */
+void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,
+ char *line, int line_size, int *print_prefix);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line buffer to receive the formatted line;
+ * may be NULL if line_size is 0
+ * @param line_size size of the buffer; at most line_size-1 characters will
+ * be written to the buffer, plus one null terminator
+ * @param print_prefix used to store whether the prefix must be printed;
+ * must point to a persistent integer initially set to 1
+ * @return Returns a negative value if an error occurred, otherwise returns
+ * the number of characters that would have been written for a
+ * sufficiently large buffer, not including the terminating null
+ * character. If the return value is not less than line_size, it means
+ * that the log message was truncated to fit the buffer.
+ */
+int av_log_format_line2(void *ptr, int level, const char *fmt, va_list vl,
+ char *line, int line_size, int *print_prefix);
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+
+/**
+ * Include the log severity in messages originating from codecs.
+ *
+ * Results in messages such as:
+ * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts
+ */
+#define AV_LOG_PRINT_LEVEL 2
+
+void av_log_set_flags(int arg);
+int av_log_get_flags(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LOG_H */
diff --git a/media/ffvpx/libavutil/log2_tab.c b/media/ffvpx/libavutil/log2_tab.c
new file mode 100644
index 0000000000..0dbf07d74c
--- /dev/null
+++ b/media/ffvpx/libavutil/log2_tab.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2003-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+const uint8_t ff_log2_tab[256]={
+ 0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
+};
diff --git a/media/ffvpx/libavutil/macros.h b/media/ffvpx/libavutil/macros.h
new file mode 100644
index 0000000000..2a7567c3ea
--- /dev/null
+++ b/media/ffvpx/libavutil/macros.h
@@ -0,0 +1,80 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Utility Preprocessor macros
+ */
+
+#ifndef AVUTIL_MACROS_H
+#define AVUTIL_MACROS_H
+
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+# define AV_NE(be, le) (be)
+#else
+# define AV_NE(be, le) (le)
+#endif
+
+/**
+ * Comparator.
+ * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0
+ * if x == y. This is useful for instance in a qsort comparator callback.
+ * Furthermore, compilers are able to optimize this to branchless code, and
+ * there is no risk of overflow with signed types.
+ * As with many macros, this evaluates its argument multiple times, it thus
+ * must not have a side-effect.
+ */
+#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y)))
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+
+#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
+/**
+ * @addtogroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a ## b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+/**
+ * @}
+ */
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))
+
+#endif /* AVUTIL_MACROS_H */
diff --git a/media/ffvpx/libavutil/mastering_display_metadata.c b/media/ffvpx/libavutil/mastering_display_metadata.c
new file mode 100644
index 0000000000..6069347617
--- /dev/null
+++ b/media/ffvpx/libavutil/mastering_display_metadata.c
@@ -0,0 +1,66 @@
+/**
+ * Copyright (c) 2016 Neil Birkbeck <neil.birkbeck@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "mastering_display_metadata.h"
+#include "mem.h"
+
+AVMasteringDisplayMetadata *av_mastering_display_metadata_alloc(void)
+{
+ return av_mallocz(sizeof(AVMasteringDisplayMetadata));
+}
+
+AVMasteringDisplayMetadata *av_mastering_display_metadata_create_side_data(AVFrame *frame)
+{
+ AVFrameSideData *side_data = av_frame_new_side_data(frame,
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
+ sizeof(AVMasteringDisplayMetadata));
+ if (!side_data)
+ return NULL;
+
+ memset(side_data->data, 0, sizeof(AVMasteringDisplayMetadata));
+
+ return (AVMasteringDisplayMetadata *)side_data->data;
+}
+
+AVContentLightMetadata *av_content_light_metadata_alloc(size_t *size)
+{
+ AVContentLightMetadata *metadata = av_mallocz(sizeof(AVContentLightMetadata));
+
+ if (size)
+ *size = sizeof(*metadata);
+
+ return metadata;
+}
+
+AVContentLightMetadata *av_content_light_metadata_create_side_data(AVFrame *frame)
+{
+ AVFrameSideData *side_data = av_frame_new_side_data(frame,
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+ sizeof(AVContentLightMetadata));
+ if (!side_data)
+ return NULL;
+
+ memset(side_data->data, 0, sizeof(AVContentLightMetadata));
+
+ return (AVContentLightMetadata *)side_data->data;
+}
diff --git a/media/ffvpx/libavutil/mastering_display_metadata.h b/media/ffvpx/libavutil/mastering_display_metadata.h
new file mode 100644
index 0000000000..c23b07c3cd
--- /dev/null
+++ b/media/ffvpx/libavutil/mastering_display_metadata.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2016 Neil Birkbeck <neil.birkbeck@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MASTERING_DISPLAY_METADATA_H
+#define AVUTIL_MASTERING_DISPLAY_METADATA_H
+
+#include "frame.h"
+#include "rational.h"
+
+
+/**
+ * Mastering display metadata capable of representing the color volume of
+ * the display used to master the content (SMPTE 2086:2014).
+ *
+ * To be used as payload of a AVFrameSideData or AVPacketSideData with the
+ * appropriate type.
+ *
+ * @note The struct should be allocated with av_mastering_display_metadata_alloc()
+ * and its size is not a part of the public ABI.
+ */
+typedef struct AVMasteringDisplayMetadata {
+ /**
+ * CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
+ */
+ AVRational display_primaries[3][2];
+
+ /**
+ * CIE 1931 xy chromaticity coords of white point.
+ */
+ AVRational white_point[2];
+
+ /**
+ * Min luminance of mastering display (cd/m^2).
+ */
+ AVRational min_luminance;
+
+ /**
+ * Max luminance of mastering display (cd/m^2).
+ */
+ AVRational max_luminance;
+
+ /**
+ * Flag indicating whether the display primaries (and white point) are set.
+ */
+ int has_primaries;
+
+ /**
+ * Flag indicating whether the luminance (min_ and max_) have been set.
+ */
+ int has_luminance;
+
+} AVMasteringDisplayMetadata;
+
+/**
+ * Allocate an AVMasteringDisplayMetadata structure and set its fields to
+ * default values. The resulting struct can be freed using av_freep().
+ *
+ * @return An AVMasteringDisplayMetadata filled with default values or NULL
+ * on failure.
+ */
+AVMasteringDisplayMetadata *av_mastering_display_metadata_alloc(void);
+
+/**
+ * Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
+ *
+ * @param frame The frame which side data is added to.
+ *
+ * @return The AVMasteringDisplayMetadata structure to be filled by caller.
+ */
+AVMasteringDisplayMetadata *av_mastering_display_metadata_create_side_data(AVFrame *frame);
+
+/**
+ * Content light level needed by to transmit HDR over HDMI (CTA-861.3).
+ *
+ * To be used as payload of a AVFrameSideData or AVPacketSideData with the
+ * appropriate type.
+ *
+ * @note The struct should be allocated with av_content_light_metadata_alloc()
+ * and its size is not a part of the public ABI.
+ */
+typedef struct AVContentLightMetadata {
+ /**
+ * Max content light level (cd/m^2).
+ */
+ unsigned MaxCLL;
+
+ /**
+ * Max average light level per frame (cd/m^2).
+ */
+ unsigned MaxFALL;
+} AVContentLightMetadata;
+
+/**
+ * Allocate an AVContentLightMetadata structure and set its fields to
+ * default values. The resulting struct can be freed using av_freep().
+ *
+ * @return An AVContentLightMetadata filled with default values or NULL
+ * on failure.
+ */
+AVContentLightMetadata *av_content_light_metadata_alloc(size_t *size);
+
+/**
+ * Allocate a complete AVContentLightMetadata and add it to the frame.
+ *
+ * @param frame The frame which side data is added to.
+ *
+ * @return The AVContentLightMetadata structure to be filled by caller.
+ */
+AVContentLightMetadata *av_content_light_metadata_create_side_data(AVFrame *frame);
+
+#endif /* AVUTIL_MASTERING_DISPLAY_METADATA_H */
diff --git a/media/ffvpx/libavutil/mathematics.c b/media/ffvpx/libavutil/mathematics.c
new file mode 100644
index 0000000000..b878317d63
--- /dev/null
+++ b/media/ffvpx/libavutil/mathematics.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * miscellaneous math routines and tables
+ */
+
+#include <stdint.h>
+#include <limits.h>
+
+#include "avutil.h"
+#include "mathematics.h"
+#include "libavutil/intmath.h"
+#include "libavutil/common.h"
+#include "avassert.h"
+
+/* Stein's binary GCD algorithm:
+ * https://en.wikipedia.org/wiki/Binary_GCD_algorithm */
+int64_t av_gcd(int64_t a, int64_t b) {
+ int za, zb, k;
+ int64_t u, v;
+ if (a == 0)
+ return b;
+ if (b == 0)
+ return a;
+ za = ff_ctzll(a);
+ zb = ff_ctzll(b);
+ k = FFMIN(za, zb);
+ u = llabs(a >> za);
+ v = llabs(b >> zb);
+ while (u != v) {
+ if (u > v)
+ FFSWAP(int64_t, v, u);
+ v -= u;
+ v >>= ff_ctzll(v);
+ }
+ return (uint64_t)u << k;
+}
+
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
+{
+ int64_t r = 0;
+ av_assert2(c > 0);
+ av_assert2(b >=0);
+ av_assert2((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4);
+
+ if (c <= 0 || b < 0 || !((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4))
+ return INT64_MIN;
+
+ if (rnd & AV_ROUND_PASS_MINMAX) {
+ if (a == INT64_MIN || a == INT64_MAX)
+ return a;
+ rnd -= AV_ROUND_PASS_MINMAX;
+ }
+
+ if (a < 0)
+ return -(uint64_t)av_rescale_rnd(-FFMAX(a, -INT64_MAX), b, c, rnd ^ ((rnd >> 1) & 1));
+
+ if (rnd == AV_ROUND_NEAR_INF)
+ r = c / 2;
+ else if (rnd & 1)
+ r = c - 1;
+
+ if (b <= INT_MAX && c <= INT_MAX) {
+ if (a <= INT_MAX)
+ return (a * b + r) / c;
+ else {
+ int64_t ad = a / c;
+ int64_t a2 = (a % c * b + r) / c;
+ if (ad >= INT32_MAX && b && ad > (INT64_MAX - a2) / b)
+ return INT64_MIN;
+ return ad * b + a2;
+ }
+ } else {
+#if 1
+ uint64_t a0 = a & 0xFFFFFFFF;
+ uint64_t a1 = a >> 32;
+ uint64_t b0 = b & 0xFFFFFFFF;
+ uint64_t b1 = b >> 32;
+ uint64_t t1 = a0 * b1 + a1 * b0;
+ uint64_t t1a = t1 << 32;
+ int i;
+
+ a0 = a0 * b0 + t1a;
+ a1 = a1 * b1 + (t1 >> 32) + (a0 < t1a);
+ a0 += r;
+ a1 += a0 < r;
+
+ for (i = 63; i >= 0; i--) {
+ a1 += a1 + ((a0 >> i) & 1);
+ t1 += t1;
+ if (c <= a1) {
+ a1 -= c;
+ t1++;
+ }
+ }
+ if (t1 > INT64_MAX)
+ return INT64_MIN;
+ return t1;
+#else
+ /* reference code doing (a*b + r) / c, requires libavutil/integer.h */
+ AVInteger ai;
+ ai = av_mul_i(av_int2i(a), av_int2i(b));
+ ai = av_add_i(ai, av_int2i(r));
+
+ return av_i2int(av_div_i(ai, av_int2i(c)));
+#endif
+ }
+}
+
+int64_t av_rescale(int64_t a, int64_t b, int64_t c)
+{
+ return av_rescale_rnd(a, b, c, AV_ROUND_NEAR_INF);
+}
+
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+ enum AVRounding rnd)
+{
+ int64_t b = bq.num * (int64_t)cq.den;
+ int64_t c = cq.num * (int64_t)bq.den;
+ return av_rescale_rnd(a, b, c, rnd);
+}
+
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
+{
+ return av_rescale_q_rnd(a, bq, cq, AV_ROUND_NEAR_INF);
+}
+
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
+{
+ int64_t a = tb_a.num * (int64_t)tb_b.den;
+ int64_t b = tb_b.num * (int64_t)tb_a.den;
+ if ((FFABS64U(ts_a)|a|FFABS64U(ts_b)|b) <= INT_MAX)
+ return (ts_a*a > ts_b*b) - (ts_a*a < ts_b*b);
+ if (av_rescale_rnd(ts_a, a, b, AV_ROUND_DOWN) < ts_b)
+ return -1;
+ if (av_rescale_rnd(ts_b, b, a, AV_ROUND_DOWN) < ts_a)
+ return 1;
+ return 0;
+}
+
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod)
+{
+ int64_t c = (a - b) & (mod - 1);
+ if (c > (mod >> 1))
+ c -= mod;
+ return c;
+}
+
+int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb){
+ int64_t a, b, this;
+
+ av_assert0(in_ts != AV_NOPTS_VALUE);
+ av_assert0(duration >= 0);
+
+ if (*last == AV_NOPTS_VALUE || !duration || in_tb.num*(int64_t)out_tb.den <= out_tb.num*(int64_t)in_tb.den) {
+simple_round:
+ *last = av_rescale_q(in_ts, in_tb, fs_tb) + duration;
+ return av_rescale_q(in_ts, in_tb, out_tb);
+ }
+
+ a = av_rescale_q_rnd(2*in_ts-1, in_tb, fs_tb, AV_ROUND_DOWN) >>1;
+ b = (av_rescale_q_rnd(2*in_ts+1, in_tb, fs_tb, AV_ROUND_UP )+1)>>1;
+ if (*last < 2*a - b || *last > 2*b - a)
+ goto simple_round;
+
+ this = av_clip64(*last, a, b);
+ *last = this + duration;
+
+ return av_rescale_q(this, fs_tb, out_tb);
+}
+
+int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc)
+{
+ int64_t m, d;
+
+ if (inc != 1)
+ inc_tb = av_mul_q(inc_tb, (AVRational) {inc, 1});
+
+ m = inc_tb.num * (int64_t)ts_tb.den;
+ d = inc_tb.den * (int64_t)ts_tb.num;
+
+ if (m % d == 0 && ts <= INT64_MAX - m / d)
+ return ts + m / d;
+ if (m < d)
+ return ts;
+
+ {
+ int64_t old = av_rescale_q(ts, ts_tb, inc_tb);
+ int64_t old_ts = av_rescale_q(old, inc_tb, ts_tb);
+
+ if (old == INT64_MAX || old == AV_NOPTS_VALUE || old_ts == AV_NOPTS_VALUE)
+ return ts;
+
+ return av_sat_add64(av_rescale_q(old + 1, inc_tb, ts_tb), ts - old_ts);
+ }
+}
diff --git a/media/ffvpx/libavutil/mathematics.h b/media/ffvpx/libavutil/mathematics.h
new file mode 100644
index 0000000000..e4aff1e973
--- /dev/null
+++ b/media/ffvpx/libavutil/mathematics.h
@@ -0,0 +1,245 @@
+/*
+ * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @addtogroup lavu_math
+ * Mathematical utilities for working with timestamp and time base.
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include <stdint.h>
+#include <math.h>
+#include "attributes.h"
+#include "rational.h"
+#include "intfloat.h"
+
+#ifndef M_E
+#define M_E 2.7182818284590452354 /* e */
+#endif
+#ifndef M_LN2
+#define M_LN2 0.69314718055994530942 /* log_e 2 */
+#endif
+#ifndef M_LN10
+#define M_LN10 2.30258509299404568402 /* log_e 10 */
+#endif
+#ifndef M_LOG2_10
+#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
+#endif
+#ifndef M_PHI
+#define M_PHI 1.61803398874989484820 /* phi / golden ratio */
+#endif
+#ifndef M_PI
+#define M_PI 3.14159265358979323846 /* pi */
+#endif
+#ifndef M_PI_2
+#define M_PI_2 1.57079632679489661923 /* pi/2 */
+#endif
+#ifndef M_SQRT1_2
+#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
+#endif
+#ifndef M_SQRT2
+#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
+#endif
+#ifndef NAN
+#define NAN av_int2float(0x7fc00000)
+#endif
+#ifndef INFINITY
+#define INFINITY av_int2float(0x7f800000)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ *
+ * @{
+ */
+
+/**
+ * Rounding methods.
+ */
+enum AVRounding {
+ AV_ROUND_ZERO = 0, ///< Round toward zero.
+ AV_ROUND_INF = 1, ///< Round away from zero.
+ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
+ AV_ROUND_UP = 3, ///< Round toward +infinity.
+ AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.
+ /**
+ * Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through
+ * unchanged, avoiding special cases for #AV_NOPTS_VALUE.
+ *
+ * Unlike other values of the enumeration AVRounding, this value is a
+ * bitmask that must be used in conjunction with another value of the
+ * enumeration through a bitwise OR, in order to set behavior for normal
+ * cases.
+ *
+ * @code{.c}
+ * av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
+ * // Rescaling 3:
+ * // Calculating 3 * 1 / 2
+ * // 3 / 2 is rounded up to 2
+ * // => 2
+ *
+ * av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
+ * // Rescaling AV_NOPTS_VALUE:
+ * // AV_NOPTS_VALUE == INT64_MIN
+ * // AV_NOPTS_VALUE is passed through
+ * // => AV_NOPTS_VALUE
+ * @endcode
+ */
+ AV_ROUND_PASS_MINMAX = 8192,
+};
+
+/**
+ * Compute the greatest common divisor of two integer operands.
+ *
+ * @param a Operand
+ * @param b Operand
+ * @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0;
+ * if a == 0 and b == 0, returns 0.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ *
+ * The operation is mathematically equivalent to `a * b / c`, but writing that
+ * directly can overflow.
+ *
+ * This function is equivalent to av_rescale_rnd() with #AV_ROUND_NEAR_INF.
+ *
+ * @see av_rescale_rnd(), av_rescale_q(), av_rescale_q_rnd()
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ *
+ * The operation is mathematically equivalent to `a * b / c`, but writing that
+ * directly can overflow, and does not support different rounding methods.
+ * If the result is not representable then INT64_MIN is returned.
+ *
+ * @see av_rescale(), av_rescale_q(), av_rescale_q_rnd()
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ *
+ * The operation is mathematically equivalent to `a * bq / cq`.
+ *
+ * This function is equivalent to av_rescale_q_rnd() with #AV_ROUND_NEAR_INF.
+ *
+ * @see av_rescale(), av_rescale_rnd(), av_rescale_q_rnd()
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ *
+ * The operation is mathematically equivalent to `a * bq / cq`.
+ *
+ * @see av_rescale(), av_rescale_rnd(), av_rescale_q()
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+ enum AVRounding rnd) av_const;
+
+/**
+ * Compare two timestamps each in its own time base.
+ *
+ * @return One of the following values:
+ * - -1 if `ts_a` is before `ts_b`
+ * - 1 if `ts_a` is after `ts_b`
+ * - 0 if they represent the same position
+ *
+ * @warning
+ * The result of the function is undefined if one of the timestamps is outside
+ * the `int64_t` range when represented in the other's timebase.
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare the remainders of two integer operands divided by a common divisor.
+ *
+ * In other words, compare the least significant `log2(mod)` bits of integers
+ * `a` and `b`.
+ *
+ * @code{.c}
+ * av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02 % 0x10 (0x2)
+ * av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11) > 0x02 % 0x20 (0x02)
+ * @endcode
+ *
+ * @param a Operand
+ * @param b Operand
+ * @param mod Divisor; must be a power of 2
+ * @return
+ * - a negative value if `a % mod < b % mod`
+ * - a positive value if `a % mod > b % mod`
+ * - zero if `a % mod == b % mod`
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * Rescale a timestamp while preserving known durations.
+ *
+ * This function is designed to be called per audio packet to scale the input
+ * timestamp to a different time base. Compared to a simple av_rescale_q()
+ * call, this function is robust against possible inconsistent frame durations.
+ *
+ * The `last` parameter is a state variable that must be preserved for all
+ * subsequent calls for the same stream. For the first call, `*last` should be
+ * initialized to #AV_NOPTS_VALUE.
+ *
+ * @param[in] in_tb Input time base
+ * @param[in] in_ts Input timestamp
+ * @param[in] fs_tb Duration time base; typically this is finer-grained
+ * (greater) than `in_tb` and `out_tb`
+ * @param[in] duration Duration till the next call to this function (i.e.
+ * duration of the current packet/frame)
+ * @param[in,out] last Pointer to a timestamp expressed in terms of
+ * `fs_tb`, acting as a state variable
+ * @param[in] out_tb Output timebase
+ * @return Timestamp expressed in terms of `out_tb`
+ *
+ * @note In the context of this function, "duration" is in term of samples, not
+ * seconds.
+ */
+int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb);
+
+/**
+ * Add a value to a timestamp.
+ *
+ * This function guarantees that when the same value is repeatly added that
+ * no accumulation of rounding errors occurs.
+ *
+ * @param[in] ts Input timestamp
+ * @param[in] ts_tb Input timestamp time base
+ * @param[in] inc Value to be added
+ * @param[in] inc_tb Time base of `inc`
+ */
+int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc);
+
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
diff --git a/media/ffvpx/libavutil/mem.c b/media/ffvpx/libavutil/mem.c
new file mode 100644
index 0000000000..36b8940a0c
--- /dev/null
+++ b/media/ffvpx/libavutil/mem.c
@@ -0,0 +1,568 @@
+/*
+ * default memory allocator for libavutil
+ * Copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * default memory allocator for libavutil
+ */
+
+#define _XOPEN_SOURCE 600
+
+#include "config.h"
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdatomic.h>
+#include <string.h>
+#if HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#include "attributes.h"
+#include "avassert.h"
+#include "dynarray.h"
+#include "error.h"
+#include "internal.h"
+#include "intreadwrite.h"
+#include "macros.h"
+#include "mem.h"
+
+#ifdef MALLOC_PREFIX
+
+#define malloc AV_JOIN(MALLOC_PREFIX, malloc)
+#define memalign AV_JOIN(MALLOC_PREFIX, memalign)
+#define posix_memalign AV_JOIN(MALLOC_PREFIX, posix_memalign)
+#define realloc AV_JOIN(MALLOC_PREFIX, realloc)
+#define free AV_JOIN(MALLOC_PREFIX, free)
+
+void *malloc(size_t size);
+void *memalign(size_t align, size_t size);
+int posix_memalign(void **ptr, size_t align, size_t size);
+void *realloc(void *ptr, size_t size);
+void free(void *ptr);
+
+#endif /* MALLOC_PREFIX */
+
+#define ALIGN (HAVE_AVX512 ? 64 : (HAVE_AVX ? 32 : 16))
+
+/* NOTE: if you want to override these functions with your own
+ * implementations (not recommended) you have to link libav* as
+ * dynamic libraries and remove -Wl,-Bsymbolic from the linker flags.
+ * Note that this will cost performance. */
+
+static atomic_size_t max_alloc_size = ATOMIC_VAR_INIT(INT_MAX);
+
+void av_max_alloc(size_t max){
+ atomic_store_explicit(&max_alloc_size, max, memory_order_relaxed);
+}
+
+static int size_mult(size_t a, size_t b, size_t *r)
+{
+ size_t t;
+
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5,1)) || AV_HAS_BUILTIN(__builtin_mul_overflow)
+ if (__builtin_mul_overflow(a, b, &t))
+ return AVERROR(EINVAL);
+#else
+ t = a * b;
+ /* Hack inspired from glibc: don't try the division if nelem and elsize
+ * are both less than sqrt(SIZE_MAX). */
+ if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)
+ return AVERROR(EINVAL);
+#endif
+ *r = t;
+ return 0;
+}
+
+void *av_malloc(size_t size)
+{
+ void *ptr = NULL;
+
+ if (size > atomic_load_explicit(&max_alloc_size, memory_order_relaxed))
+ return NULL;
+
+#if HAVE_POSIX_MEMALIGN
+ if (size) //OS X on SDK 10.6 has a broken posix_memalign implementation
+ if (posix_memalign(&ptr, ALIGN, size))
+ ptr = NULL;
+#elif HAVE_ALIGNED_MALLOC
+ ptr = _aligned_malloc(size, ALIGN);
+#elif HAVE_MEMALIGN
+#ifndef __DJGPP__
+ ptr = memalign(ALIGN, size);
+#else
+ ptr = memalign(size, ALIGN);
+#endif
+ /* Why 64?
+ * Indeed, we should align it:
+ * on 4 for 386
+ * on 16 for 486
+ * on 32 for 586, PPro - K6-III
+ * on 64 for K7 (maybe for P3 too).
+ * Because L1 and L2 caches are aligned on those values.
+ * But I don't want to code such logic here!
+ */
+ /* Why 32?
+ * For AVX ASM. SSE / NEON needs only 16.
+ * Why not larger? Because I did not see a difference in benchmarks ...
+ */
+ /* benchmarks with P3
+ * memalign(64) + 1 3071, 3051, 3032
+ * memalign(64) + 2 3051, 3032, 3041
+ * memalign(64) + 4 2911, 2896, 2915
+ * memalign(64) + 8 2545, 2554, 2550
+ * memalign(64) + 16 2543, 2572, 2563
+ * memalign(64) + 32 2546, 2545, 2571
+ * memalign(64) + 64 2570, 2533, 2558
+ *
+ * BTW, malloc seems to do 8-byte alignment by default here.
+ */
+#else
+ ptr = malloc(size);
+#endif
+ if(!ptr && !size) {
+ size = 1;
+ ptr= av_malloc(1);
+ }
+#if CONFIG_MEMORY_POISONING
+ if (ptr)
+ memset(ptr, FF_MEMORY_POISON, size);
+#endif
+ return ptr;
+}
+
+void *av_realloc(void *ptr, size_t size)
+{
+ void *ret;
+ if (size > atomic_load_explicit(&max_alloc_size, memory_order_relaxed))
+ return NULL;
+
+#if HAVE_ALIGNED_MALLOC
+ ret = _aligned_realloc(ptr, size + !size, ALIGN);
+#else
+ ret = realloc(ptr, size + !size);
+#endif
+#if CONFIG_MEMORY_POISONING
+ if (ret && !ptr)
+ memset(ret, FF_MEMORY_POISON, size);
+#endif
+ return ret;
+}
+
+void *av_realloc_f(void *ptr, size_t nelem, size_t elsize)
+{
+ size_t size;
+ void *r;
+
+ if (size_mult(elsize, nelem, &size)) {
+ av_free(ptr);
+ return NULL;
+ }
+ r = av_realloc(ptr, size);
+ if (!r)
+ av_free(ptr);
+ return r;
+}
+
+int av_reallocp(void *ptr, size_t size)
+{
+ void *val;
+
+ if (!size) {
+ av_freep(ptr);
+ return 0;
+ }
+
+ memcpy(&val, ptr, sizeof(val));
+ val = av_realloc(val, size);
+
+ if (!val) {
+ av_freep(ptr);
+ return AVERROR(ENOMEM);
+ }
+
+ memcpy(ptr, &val, sizeof(val));
+ return 0;
+}
+
+void *av_malloc_array(size_t nmemb, size_t size)
+{
+ size_t result;
+ if (size_mult(nmemb, size, &result) < 0)
+ return NULL;
+ return av_malloc(result);
+}
+
+void *av_realloc_array(void *ptr, size_t nmemb, size_t size)
+{
+ size_t result;
+ if (size_mult(nmemb, size, &result) < 0)
+ return NULL;
+ return av_realloc(ptr, result);
+}
+
+int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
+{
+ void *val;
+
+ memcpy(&val, ptr, sizeof(val));
+ val = av_realloc_f(val, nmemb, size);
+ memcpy(ptr, &val, sizeof(val));
+ if (!val && nmemb && size)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+void av_free(void *ptr)
+{
+#if HAVE_ALIGNED_MALLOC
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+void av_freep(void *arg)
+{
+ void *val;
+
+ memcpy(&val, arg, sizeof(val));
+ memcpy(arg, &(void *){ NULL }, sizeof(val));
+ av_free(val);
+}
+
+void *av_mallocz(size_t size)
+{
+ void *ptr = av_malloc(size);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+void *av_calloc(size_t nmemb, size_t size)
+{
+ size_t result;
+ if (size_mult(nmemb, size, &result) < 0)
+ return NULL;
+ return av_mallocz(result);
+}
+
+char *av_strdup(const char *s)
+{
+ char *ptr = NULL;
+ if (s) {
+ size_t len = strlen(s) + 1;
+ ptr = av_realloc(NULL, len);
+ if (ptr)
+ memcpy(ptr, s, len);
+ }
+ return ptr;
+}
+
+char *av_strndup(const char *s, size_t len)
+{
+ char *ret = NULL, *end;
+
+ if (!s)
+ return NULL;
+
+ end = memchr(s, 0, len);
+ if (end)
+ len = end - s;
+
+ ret = av_realloc(NULL, len + 1);
+ if (!ret)
+ return NULL;
+
+ memcpy(ret, s, len);
+ ret[len] = 0;
+ return ret;
+}
+
+void *av_memdup(const void *p, size_t size)
+{
+ void *ptr = NULL;
+ if (p) {
+ ptr = av_malloc(size);
+ if (ptr)
+ memcpy(ptr, p, size);
+ }
+ return ptr;
+}
+
+int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
+{
+ void **tab;
+ memcpy(&tab, tab_ptr, sizeof(tab));
+
+ FF_DYNARRAY_ADD(INT_MAX, sizeof(*tab), tab, *nb_ptr, {
+ tab[*nb_ptr] = elem;
+ memcpy(tab_ptr, &tab, sizeof(tab));
+ }, {
+ return AVERROR(ENOMEM);
+ });
+ return 0;
+}
+
+void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem)
+{
+ void **tab;
+ memcpy(&tab, tab_ptr, sizeof(tab));
+
+ FF_DYNARRAY_ADD(INT_MAX, sizeof(*tab), tab, *nb_ptr, {
+ tab[*nb_ptr] = elem;
+ memcpy(tab_ptr, &tab, sizeof(tab));
+ }, {
+ *nb_ptr = 0;
+ av_freep(tab_ptr);
+ });
+}
+
+void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
+ const uint8_t *elem_data)
+{
+ uint8_t *tab_elem_data = NULL;
+
+ FF_DYNARRAY_ADD(INT_MAX, elem_size, *tab_ptr, *nb_ptr, {
+ tab_elem_data = (uint8_t *)*tab_ptr + (*nb_ptr) * elem_size;
+ if (elem_data)
+ memcpy(tab_elem_data, elem_data, elem_size);
+ else if (CONFIG_MEMORY_POISONING)
+ memset(tab_elem_data, FF_MEMORY_POISON, elem_size);
+ }, {
+ av_freep(tab_ptr);
+ *nb_ptr = 0;
+ });
+ return tab_elem_data;
+}
+
+static void fill16(uint8_t *dst, int len)
+{
+ uint32_t v = AV_RN16(dst - 2);
+
+ v |= v << 16;
+
+ while (len >= 4) {
+ AV_WN32(dst, v);
+ dst += 4;
+ len -= 4;
+ }
+
+ while (len--) {
+ *dst = dst[-2];
+ dst++;
+ }
+}
+
+static void fill24(uint8_t *dst, int len)
+{
+#if HAVE_BIGENDIAN
+ uint32_t v = AV_RB24(dst - 3);
+ uint32_t a = v << 8 | v >> 16;
+ uint32_t b = v << 16 | v >> 8;
+ uint32_t c = v << 24 | v;
+#else
+ uint32_t v = AV_RL24(dst - 3);
+ uint32_t a = v | v << 24;
+ uint32_t b = v >> 8 | v << 16;
+ uint32_t c = v >> 16 | v << 8;
+#endif
+
+ while (len >= 12) {
+ AV_WN32(dst, a);
+ AV_WN32(dst + 4, b);
+ AV_WN32(dst + 8, c);
+ dst += 12;
+ len -= 12;
+ }
+
+ if (len >= 4) {
+ AV_WN32(dst, a);
+ dst += 4;
+ len -= 4;
+ }
+
+ if (len >= 4) {
+ AV_WN32(dst, b);
+ dst += 4;
+ len -= 4;
+ }
+
+ while (len--) {
+ *dst = dst[-3];
+ dst++;
+ }
+}
+
+static void fill32(uint8_t *dst, int len)
+{
+ uint32_t v = AV_RN32(dst - 4);
+
+#if HAVE_FAST_64BIT
+ uint64_t v2= v + ((uint64_t)v<<32);
+ while (len >= 32) {
+ AV_WN64(dst , v2);
+ AV_WN64(dst+ 8, v2);
+ AV_WN64(dst+16, v2);
+ AV_WN64(dst+24, v2);
+ dst += 32;
+ len -= 32;
+ }
+#endif
+
+ while (len >= 4) {
+ AV_WN32(dst, v);
+ dst += 4;
+ len -= 4;
+ }
+
+ while (len--) {
+ *dst = dst[-4];
+ dst++;
+ }
+}
+
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
+{
+ const uint8_t *src = &dst[-back];
+ if (!back)
+ return;
+
+ if (back == 1) {
+ memset(dst, *src, cnt);
+ } else if (back == 2) {
+ fill16(dst, cnt);
+ } else if (back == 3) {
+ fill24(dst, cnt);
+ } else if (back == 4) {
+ fill32(dst, cnt);
+ } else {
+ if (cnt >= 16) {
+ int blocklen = back;
+ while (cnt > blocklen) {
+ memcpy(dst, src, blocklen);
+ dst += blocklen;
+ cnt -= blocklen;
+ blocklen <<= 1;
+ }
+ memcpy(dst, src, cnt);
+ return;
+ }
+ if (cnt >= 8) {
+ AV_COPY32U(dst, src);
+ AV_COPY32U(dst + 4, src + 4);
+ src += 8;
+ dst += 8;
+ cnt -= 8;
+ }
+ if (cnt >= 4) {
+ AV_COPY32U(dst, src);
+ src += 4;
+ dst += 4;
+ cnt -= 4;
+ }
+ if (cnt >= 2) {
+ AV_COPY16U(dst, src);
+ src += 2;
+ dst += 2;
+ cnt -= 2;
+ }
+ if (cnt)
+ *dst = *src;
+ }
+}
+
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
+{
+ size_t max_size;
+
+ if (min_size <= *size)
+ return ptr;
+
+ max_size = atomic_load_explicit(&max_alloc_size, memory_order_relaxed);
+ /* *size is an unsigned, so the real maximum is <= UINT_MAX. */
+ max_size = FFMIN(max_size, UINT_MAX);
+
+ if (min_size > max_size) {
+ *size = 0;
+ return NULL;
+ }
+
+ min_size = FFMIN(max_size, FFMAX(min_size + min_size / 16 + 32, min_size));
+
+ ptr = av_realloc(ptr, min_size);
+ /* we could set this to the unmodified min_size but this is safer
+ * if the user lost the ptr and uses NULL now
+ */
+ if (!ptr)
+ min_size = 0;
+
+ *size = min_size;
+
+ return ptr;
+}
+
+static inline void fast_malloc(void *ptr, unsigned int *size, size_t min_size, int zero_realloc)
+{
+ size_t max_size;
+ void *val;
+
+ memcpy(&val, ptr, sizeof(val));
+ if (min_size <= *size) {
+ av_assert0(val || !min_size);
+ return;
+ }
+
+ max_size = atomic_load_explicit(&max_alloc_size, memory_order_relaxed);
+ /* *size is an unsigned, so the real maximum is <= UINT_MAX. */
+ max_size = FFMIN(max_size, UINT_MAX);
+
+ if (min_size > max_size) {
+ av_freep(ptr);
+ *size = 0;
+ return;
+ }
+ min_size = FFMIN(max_size, FFMAX(min_size + min_size / 16 + 32, min_size));
+ av_freep(ptr);
+ val = zero_realloc ? av_mallocz(min_size) : av_malloc(min_size);
+ memcpy(ptr, &val, sizeof(val));
+ if (!val)
+ min_size = 0;
+ *size = min_size;
+ return;
+}
+
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
+{
+ fast_malloc(ptr, size, min_size, 0);
+}
+
+void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
+{
+ fast_malloc(ptr, size, min_size, 1);
+}
+
+int av_size_mult(size_t a, size_t b, size_t *r)
+{
+ return size_mult(a, b, r);
+}
diff --git a/media/ffvpx/libavutil/mem.h b/media/ffvpx/libavutil/mem.h
new file mode 100644
index 0000000000..62b4ca6e50
--- /dev/null
+++ b/media/ffvpx/libavutil/mem.h
@@ -0,0 +1,609 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_mem
+ * Memory handling functions
+ */
+
+#ifndef AVUTIL_MEM_H
+#define AVUTIL_MEM_H
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "attributes.h"
+#include "avutil.h"
+#include "version.h"
+
+/**
+ * @addtogroup lavu_mem
+ * Utilities for manipulating memory.
+ *
+ * FFmpeg has several applications of memory that are not required of a typical
+ * program. For example, the computing-heavy components like video decoding and
+ * encoding can be sped up significantly through the use of aligned memory.
+ *
+ * However, for each of FFmpeg's applications of memory, there might not be a
+ * recognized or standardized API for that specific use. Memory alignment, for
+ * instance, varies wildly depending on operating systems, architectures, and
+ * compilers. Hence, this component of @ref libavutil is created to make
+ * dealing with memory consistently possible on all platforms.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup lavu_mem_attrs Function Attributes
+ * Function attributes applicable to memory handling functions.
+ *
+ * These function attributes can help compilers emit more useful warnings, or
+ * generate better code.
+ * @{
+ */
+
+/**
+ * @def av_malloc_attrib
+ * Function attribute denoting a malloc-like function.
+ *
+ * @see <a href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007bmalloc_007d-function-attribute-3251">Function attribute `malloc` in GCC's documentation</a>
+ */
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+ #define av_malloc_attrib __attribute__((__malloc__))
+#else
+ #define av_malloc_attrib
+#endif
+
+/**
+ * @def av_alloc_size(...)
+ * Function attribute used on a function that allocates memory, whose size is
+ * given by the specified parameter(s).
+ *
+ * @code{.c}
+ * void *av_malloc(size_t size) av_alloc_size(1);
+ * void *av_calloc(size_t nmemb, size_t size) av_alloc_size(1, 2);
+ * @endcode
+ *
+ * @param ... One or two parameter indexes, separated by a comma
+ *
+ * @see <a href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007balloc_005fsize_007d-function-attribute-3220">Function attribute `alloc_size` in GCC's documentation</a>
+ */
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+ #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
+#else
+ #define av_alloc_size(...)
+#endif
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_mem_funcs Heap Management
+ * Functions responsible for allocating, freeing, and copying memory.
+ *
+ * All memory allocation functions have a built-in upper limit of `INT_MAX`
+ * bytes. This may be changed with av_max_alloc(), although exercise extreme
+ * caution when doing so.
+ *
+ * @{
+ */
+
+/**
+ * Allocate a memory block with alignment suitable for all memory accesses
+ * (including vectors if available on the CPU).
+ *
+ * @param size Size in bytes for the memory block to be allocated
+ * @return Pointer to the allocated block, or `NULL` if the block cannot
+ * be allocated
+ * @see av_mallocz()
+ */
+void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a memory block with alignment suitable for all memory accesses
+ * (including vectors if available on the CPU) and zero all the bytes of the
+ * block.
+ *
+ * @param size Size in bytes for the memory block to be allocated
+ * @return Pointer to the allocated block, or `NULL` if it cannot be allocated
+ * @see av_malloc()
+ */
+void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a memory block for an array with av_malloc().
+ *
+ * The allocated memory will have size `size * nmemb` bytes.
+ *
+ * @param nmemb Number of element
+ * @param size Size of a single element
+ * @return Pointer to the allocated block, or `NULL` if the block cannot
+ * be allocated
+ * @see av_malloc()
+ */
+av_alloc_size(1, 2) void *av_malloc_array(size_t nmemb, size_t size);
+
+/**
+ * Allocate a memory block for an array with av_mallocz().
+ *
+ * The allocated memory will have size `size * nmemb` bytes.
+ *
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, or `NULL` if the block cannot
+ * be allocated
+ *
+ * @see av_mallocz()
+ * @see av_malloc_array()
+ */
+void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
+
+/**
+ * Allocate, reallocate, or free a block of memory.
+ *
+ * If `ptr` is `NULL` and `size` > 0, allocate a new block. Otherwise, expand or
+ * shrink that block of memory according to `size`.
+ *
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or `NULL`
+ * @param size Size in bytes of the memory block to be allocated or
+ * reallocated
+ *
+ * @return Pointer to a newly-reallocated block or `NULL` if the block
+ * cannot be reallocated
+ *
+ * @warning Unlike av_malloc(), the returned pointer is not guaranteed to be
+ * correctly aligned. The returned pointer must be freed after even
+ * if size is zero.
+ * @see av_fast_realloc()
+ * @see av_reallocp()
+ */
+void *av_realloc(void *ptr, size_t size) av_alloc_size(2);
+
+/**
+ * Allocate, reallocate, or free a block of memory through a pointer to a
+ * pointer.
+ *
+ * If `*ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is
+ * zero, free the memory block pointed to by `*ptr`. Otherwise, expand or
+ * shrink that block of memory according to `size`.
+ *
+ * @param[in,out] ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or a pointer to `NULL`. The pointer
+ * is updated on success, or freed on failure.
+ * @param[in] size Size in bytes for the memory block to be allocated or
+ * reallocated
+ *
+ * @return Zero on success, an AVERROR error code on failure
+ *
+ * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
+ * correctly aligned.
+ */
+av_warn_unused_result
+int av_reallocp(void *ptr, size_t size);
+
+/**
+ * Allocate, reallocate, or free a block of memory.
+ *
+ * This function does the same thing as av_realloc(), except:
+ * - It takes two size arguments and allocates `nelem * elsize` bytes,
+ * after checking the result of the multiplication for integer overflow.
+ * - It frees the input block in case of failure, thus avoiding the memory
+ * leak with the classic
+ * @code{.c}
+ * buf = realloc(buf);
+ * if (!buf)
+ * return -1;
+ * @endcode
+ * pattern.
+ */
+void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
+
+/**
+ * Allocate, reallocate, or free an array.
+ *
+ * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block.
+ *
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or `NULL`
+ * @param nmemb Number of elements in the array
+ * @param size Size of the single element of the array
+ *
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated
+ *
+ * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
+ * correctly aligned. The returned pointer must be freed after even if
+ * nmemb is zero.
+ * @see av_reallocp_array()
+ */
+av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Allocate, reallocate an array through a pointer to a pointer.
+ *
+ * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block.
+ *
+ * @param[in,out] ptr Pointer to a pointer to a memory block already
+ * allocated with av_realloc(), or a pointer to `NULL`.
+ * The pointer is updated on success, or freed on failure.
+ * @param[in] nmemb Number of elements
+ * @param[in] size Size of the single element
+ *
+ * @return Zero on success, an AVERROR error code on failure
+ *
+ * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
+ * correctly aligned. *ptr must be freed after even if nmemb is zero.
+ */
+int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Reallocate the given buffer if it is not large enough, otherwise do nothing.
+ *
+ * If the given buffer is `NULL`, then a new uninitialized buffer is allocated.
+ *
+ * If the given buffer is not large enough, and reallocation fails, `NULL` is
+ * returned and `*size` is set to 0, but the original buffer is not changed or
+ * freed.
+ *
+ * A typical use pattern follows:
+ *
+ * @code{.c}
+ * uint8_t *buf = ...;
+ * uint8_t *new_buf = av_fast_realloc(buf, &current_size, size_needed);
+ * if (!new_buf) {
+ * // Allocation failed; clean up original buffer
+ * av_freep(&buf);
+ * return AVERROR(ENOMEM);
+ * }
+ * @endcode
+ *
+ * @param[in,out] ptr Already allocated buffer, or `NULL`
+ * @param[in,out] size Pointer to the size of buffer `ptr`. `*size` is
+ * updated to the new allocated size, in particular 0
+ * in case of failure.
+ * @param[in] min_size Desired minimal size of buffer `ptr`
+ * @return `ptr` if the buffer is large enough, a pointer to newly reallocated
+ * buffer if the buffer was not large enough, or `NULL` in case of
+ * error
+ * @see av_realloc()
+ * @see av_fast_malloc()
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc(), the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special handling to
+ * avoid memleaks is necessary.
+ *
+ * `*ptr` is allowed to be `NULL`, in which case allocation always happens if
+ * `size_needed` is greater than 0.
+ *
+ * @code{.c}
+ * uint8_t *buf = ...;
+ * av_fast_malloc(&buf, &current_size, size_needed);
+ * if (!buf) {
+ * // Allocation failed; buf already freed
+ * return AVERROR(ENOMEM);
+ * }
+ * @endcode
+ *
+ * @param[in,out] ptr Pointer to pointer to an already allocated buffer.
+ * `*ptr` will be overwritten with pointer to new
+ * buffer on success or `NULL` on failure
+ * @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is
+ * updated to the new allocated size, in particular 0
+ * in case of failure.
+ * @param[in] min_size Desired minimal size of buffer `*ptr`
+ * @see av_realloc()
+ * @see av_fast_mallocz()
+ */
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate and clear a buffer, reusing the given one if large enough.
+ *
+ * Like av_fast_malloc(), but all newly allocated space is initially cleared.
+ * Reused buffer is not cleared.
+ *
+ * `*ptr` is allowed to be `NULL`, in which case allocation always happens if
+ * `size_needed` is greater than 0.
+ *
+ * @param[in,out] ptr Pointer to pointer to an already allocated buffer.
+ * `*ptr` will be overwritten with pointer to new
+ * buffer on success or `NULL` on failure
+ * @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is
+ * updated to the new allocated size, in particular 0
+ * in case of failure.
+ * @param[in] min_size Desired minimal size of buffer `*ptr`
+ * @see av_fast_malloc()
+ */
+void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Free a memory block which has been allocated with a function of av_malloc()
+ * or av_realloc() family.
+ *
+ * @param ptr Pointer to the memory block which should be freed.
+ *
+ * @note `ptr = NULL` is explicitly allowed.
+ * @note It is recommended that you use av_freep() instead, to prevent leaving
+ * behind dangling pointers.
+ * @see av_freep()
+ */
+void av_free(void *ptr);
+
+/**
+ * Free a memory block which has been allocated with a function of av_malloc()
+ * or av_realloc() family, and set the pointer pointing to it to `NULL`.
+ *
+ * @code{.c}
+ * uint8_t *buf = av_malloc(16);
+ * av_free(buf);
+ * // buf now contains a dangling pointer to freed memory, and accidental
+ * // dereference of buf will result in a use-after-free, which may be a
+ * // security risk.
+ *
+ * uint8_t *buf = av_malloc(16);
+ * av_freep(&buf);
+ * // buf is now NULL, and accidental dereference will only result in a
+ * // NULL-pointer dereference.
+ * @endcode
+ *
+ * @param ptr Pointer to the pointer to the memory block which should be freed
+ * @note `*ptr = NULL` is safe and leads to no action.
+ * @see av_free()
+ */
+void av_freep(void *ptr);
+
+/**
+ * Duplicate a string.
+ *
+ * @param s String to be duplicated
+ * @return Pointer to a newly-allocated string containing a
+ * copy of `s` or `NULL` if the string cannot be allocated
+ * @see av_strndup()
+ */
+char *av_strdup(const char *s) av_malloc_attrib;
+
+/**
+ * Duplicate a substring of a string.
+ *
+ * @param s String to be duplicated
+ * @param len Maximum length of the resulting string (not counting the
+ * terminating byte)
+ * @return Pointer to a newly-allocated string containing a
+ * substring of `s` or `NULL` if the string cannot be allocated
+ */
+char *av_strndup(const char *s, size_t len) av_malloc_attrib;
+
+/**
+ * Duplicate a buffer with av_malloc().
+ *
+ * @param p Buffer to be duplicated
+ * @param size Size in bytes of the buffer copied
+ * @return Pointer to a newly allocated buffer containing a
+ * copy of `p` or `NULL` if the buffer cannot be allocated
+ */
+void *av_memdup(const void *p, size_t size);
+
+/**
+ * Overlapping memcpy() implementation.
+ *
+ * @param dst Destination buffer
+ * @param back Number of bytes back to start copying (i.e. the initial size of
+ * the overlapping window); must be > 0
+ * @param cnt Number of bytes to copy; must be >= 0
+ *
+ * @note `cnt > back` is valid, this will copy the bytes we just copied,
+ * thus creating a repeating pattern with a period length of `back`.
+ */
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_mem_dynarray Dynamic Array
+ *
+ * Utilities to make an array grow when needed.
+ *
+ * Sometimes, the programmer would want to have an array that can grow when
+ * needed. The libavutil dynamic array utilities fill that need.
+ *
+ * libavutil supports two systems of appending elements onto a dynamically
+ * allocated array, the first one storing the pointer to the value in the
+ * array, and the second storing the value directly. In both systems, the
+ * caller is responsible for maintaining a variable containing the length of
+ * the array, as well as freeing of the array after use.
+ *
+ * The first system stores pointers to values in a block of dynamically
+ * allocated memory. Since only pointers are stored, the function does not need
+ * to know the size of the type. Both av_dynarray_add() and
+ * av_dynarray_add_nofree() implement this system.
+ *
+ * @code
+ * type **array = NULL; //< an array of pointers to values
+ * int nb = 0; //< a variable to keep track of the length of the array
+ *
+ * type to_be_added = ...;
+ * type to_be_added2 = ...;
+ *
+ * av_dynarray_add(&array, &nb, &to_be_added);
+ * if (nb == 0)
+ * return AVERROR(ENOMEM);
+ *
+ * av_dynarray_add(&array, &nb, &to_be_added2);
+ * if (nb == 0)
+ * return AVERROR(ENOMEM);
+ *
+ * // Now:
+ * // nb == 2
+ * // &to_be_added == array[0]
+ * // &to_be_added2 == array[1]
+ *
+ * av_freep(&array);
+ * @endcode
+ *
+ * The second system stores the value directly in a block of memory. As a
+ * result, the function has to know the size of the type. av_dynarray2_add()
+ * implements this mechanism.
+ *
+ * @code
+ * type *array = NULL; //< an array of values
+ * int nb = 0; //< a variable to keep track of the length of the array
+ *
+ * type to_be_added = ...;
+ * type to_be_added2 = ...;
+ *
+ * type *addr = av_dynarray2_add((void **)&array, &nb, sizeof(*array), NULL);
+ * if (!addr)
+ * return AVERROR(ENOMEM);
+ * memcpy(addr, &to_be_added, sizeof(to_be_added));
+ *
+ * // Shortcut of the above.
+ * type *addr = av_dynarray2_add((void **)&array, &nb, sizeof(*array),
+ * (const void *)&to_be_added2);
+ * if (!addr)
+ * return AVERROR(ENOMEM);
+ *
+ * // Now:
+ * // nb == 2
+ * // to_be_added == array[0]
+ * // to_be_added2 == array[1]
+ *
+ * av_freep(&array);
+ * @endcode
+ *
+ * @{
+ */
+
+/**
+ * Add the pointer to an element to a dynamic array.
+ *
+ * The array to grow is supposed to be an array of pointers to
+ * structures, and the element to add must be a pointer to an already
+ * allocated structure.
+ *
+ * The array is reallocated when its size reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by `nb_ptr`
+ * is incremented.
+ * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and
+ * `*nb_ptr` is set to 0.
+ *
+ * @param[in,out] tab_ptr Pointer to the array to grow
+ * @param[in,out] nb_ptr Pointer to the number of elements in the array
+ * @param[in] elem Element to add
+ * @see av_dynarray_add_nofree(), av_dynarray2_add()
+ */
+void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * Function has the same functionality as av_dynarray_add(),
+ * but it doesn't free memory on fails. It returns error code
+ * instead and leave current buffer untouched.
+ *
+ * @return >=0 on success, negative otherwise
+ * @see av_dynarray_add(), av_dynarray2_add()
+ */
+av_warn_unused_result
+int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem);
+
+/**
+ * Add an element of size `elem_size` to a dynamic array.
+ *
+ * The array is reallocated when its number of elements reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by `nb_ptr`
+ * is incremented.
+ * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and
+ * `*nb_ptr` is set to 0.
+ *
+ * @param[in,out] tab_ptr Pointer to the array to grow
+ * @param[in,out] nb_ptr Pointer to the number of elements in the array
+ * @param[in] elem_size Size in bytes of an element in the array
+ * @param[in] elem_data Pointer to the data of the element to add. If
+ * `NULL`, the space of the newly added element is
+ * allocated but left uninitialized.
+ *
+ * @return Pointer to the data of the element to copy in the newly allocated
+ * space
+ * @see av_dynarray_add(), av_dynarray_add_nofree()
+ */
+void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
+ const uint8_t *elem_data);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_mem_misc Miscellaneous Functions
+ *
+ * Other functions related to memory allocation.
+ *
+ * @{
+ */
+
+/**
+ * Multiply two `size_t` values checking for overflow.
+ *
+ * @param[in] a Operand of multiplication
+ * @param[in] b Operand of multiplication
+ * @param[out] r Pointer to the result of the operation
+ * @return 0 on success, AVERROR(EINVAL) on overflow
+ */
+int av_size_mult(size_t a, size_t b, size_t *r);
+
+/**
+ * Set the maximum size that may be allocated in one block.
+ *
+ * The value specified with this function is effective for all libavutil's @ref
+ * lavu_mem_funcs "heap management functions."
+ *
+ * By default, the max value is defined as `INT_MAX`.
+ *
+ * @param max Value to be set as the new maximum size
+ *
+ * @warning Exercise extreme caution when using this function. Don't touch
+ * this if you do not understand the full consequence of doing so.
+ */
+void av_max_alloc(size_t max);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_MEM_H */
diff --git a/media/ffvpx/libavutil/mem_internal.h b/media/ffvpx/libavutil/mem_internal.h
new file mode 100644
index 0000000000..2448c606f1
--- /dev/null
+++ b/media/ffvpx/libavutil/mem_internal.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MEM_INTERNAL_H
+#define AVUTIL_MEM_INTERNAL_H
+
+#include "config.h"
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "macros.h"
+#include "mem.h"
+#include "version.h"
+
+/**
+ * @def DECLARE_ALIGNED(n,t,v)
+ * Declare a variable that is aligned in memory.
+ *
+ * @code{.c}
+ * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42;
+ * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128];
+ *
+ * // The default-alignment equivalent would be
+ * uint16_t aligned_int = 42;
+ * uint8_t aligned_array[128];
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+/**
+ * @def DECLARE_ASM_ALIGNED(n,t,v)
+ * Declare an aligned variable appropriate for use in inline assembly code.
+ *
+ * @code{.c}
+ * DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+/**
+ * @def DECLARE_ASM_CONST(n,t,v)
+ * Declare a static constant aligned variable appropriate for use in inline
+ * assembly code.
+ *
+ * @code{.c}
+ * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
+#elif defined(__DJGPP__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
+#elif defined(__GNUC__) || defined(__clang__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
+#else
+ #define DECLARE_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_CONST(n,t,v) static const t v
+#endif
+
+// Some broken preprocessors need a second expansion
+// to be forced to tokenize __VA_ARGS__
+#define E1(x) x
+
+#define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
+ uint8_t la_##v[sizeof(t s o) + (a)]; \
+ t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
+
+#define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
+ DECLARE_ALIGNED(a, t, la_##v) s o; \
+ t (*v) o = la_##v
+
+#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
+#endif
+
+#endif /* AVUTIL_MEM_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/moz.build b/media/ffvpx/libavutil/moz.build
new file mode 100644
index 0000000000..c9ba03fe35
--- /dev/null
+++ b/media/ffvpx/libavutil/moz.build
@@ -0,0 +1,83 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Due to duplicate file names, we compile libavutil/x86 in its own
+# moz.build file.
+if CONFIG['FFVPX_ASFLAGS']:
+ if CONFIG['CPU_ARCH'] == 'x86' or CONFIG['CPU_ARCH'] == 'x86_64':
+ DIRS += ['x86']
+ elif CONFIG['CPU_ARCH'] == 'arm':
+ DIRS += ['arm']
+
+if CONFIG['CPU_ARCH'] == 'aarch64':
+ DIRS += ['aarch64']
+
+SharedLibrary('mozavutil')
+SOURCES += [
+ 'avsscanf.c',
+ 'avstring.c',
+ 'bprint.c',
+ 'buffer.c',
+ 'channel_layout.c',
+ 'cpu.c',
+ 'crc.c',
+ 'dict.c',
+ 'error.c',
+ 'eval.c',
+ 'fifo.c',
+ 'fixed_dsp.c',
+ 'float_dsp.c',
+ 'frame.c',
+ 'hwcontext.c',
+ 'imgutils.c',
+ 'log.c',
+ 'log2_tab.c',
+ 'mathematics.c',
+ 'mem.c',
+ 'opt.c',
+ 'parseutils.c',
+ 'pixdesc.c',
+ 'rational.c',
+ 'reverse.c',
+ 'samplefmt.c',
+ 'slicethread.c',
+ 'time.c',
+ 'utils.c'
+]
+
+if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
+ SOURCES += [
+ 'adler32.c',
+ 'base64.c',
+ 'color_utils.c',
+ 'film_grain_params.c',
+ 'integer.c',
+ 'intmath.c',
+ 'lls.c',
+ 'mastering_display_metadata.c',
+ 'pixelutils.c',
+ 'threadmessage.c',
+ 'timecode.c',
+ 'video_enc_params.c'
+ ]
+ if CONFIG["MOZ_WAYLAND"]:
+ LOCAL_INCLUDES += ['/media/mozva']
+ SOURCES += [
+ 'hwcontext_vaapi.c',
+ ]
+ USE_LIBS += ['mozva']
+
+SYMBOLS_FILE = 'avutil.symbols'
+NoVisibilityFlags()
+
+OS_LIBS += CONFIG['REALTIME_LIBS']
+if CONFIG['OS_TARGET'] != 'WINNT':
+ OS_LIBS += ['m']
+
+if CONFIG['MOZ_NEEDS_LIBATOMIC']:
+ OS_LIBS += ['atomic']
+
+include("../ffvpxcommon.mozbuild")
diff --git a/media/ffvpx/libavutil/opt.c b/media/ffvpx/libavutil/opt.c
new file mode 100644
index 0000000000..0908751752
--- /dev/null
+++ b/media/ffvpx/libavutil/opt.c
@@ -0,0 +1,2241 @@
+/*
+ * AVOptions
+ * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AVOptions
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "avutil.h"
+#include "avassert.h"
+#include "avstring.h"
+#include "channel_layout.h"
+#include "common.h"
+#include "dict.h"
+#include "eval.h"
+#include "log.h"
+#include "parseutils.h"
+#include "pixdesc.h"
+#include "mathematics.h"
+#include "opt.h"
+#include "samplefmt.h"
+#include "bprint.h"
+#include "version.h"
+
+#include <float.h>
+
+const AVOption *av_opt_next(const void *obj, const AVOption *last)
+{
+ const AVClass *class;
+ if (!obj)
+ return NULL;
+ class = *(const AVClass**)obj;
+ if (!last && class && class->option && class->option[0].name)
+ return class->option;
+ if (last && last[1].name)
+ return ++last;
+ return NULL;
+}
+
+static int read_number(const AVOption *o, const void *dst, double *num, int *den, int64_t *intnum)
+{
+ switch (o->type) {
+ case AV_OPT_TYPE_FLAGS:
+ *intnum = *(unsigned int*)dst;
+ return 0;
+ case AV_OPT_TYPE_PIXEL_FMT:
+ *intnum = *(enum AVPixelFormat *)dst;
+ return 0;
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ *intnum = *(enum AVSampleFormat *)dst;
+ return 0;
+ case AV_OPT_TYPE_BOOL:
+ case AV_OPT_TYPE_INT:
+ *intnum = *(int *)dst;
+ return 0;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_DURATION:
+ case AV_OPT_TYPE_INT64:
+ case AV_OPT_TYPE_UINT64:
+ *intnum = *(int64_t *)dst;
+ return 0;
+ case AV_OPT_TYPE_FLOAT:
+ *num = *(float *)dst;
+ return 0;
+ case AV_OPT_TYPE_DOUBLE:
+ *num = *(double *)dst;
+ return 0;
+ case AV_OPT_TYPE_RATIONAL:
+ *intnum = ((AVRational *)dst)->num;
+ *den = ((AVRational *)dst)->den;
+ return 0;
+ case AV_OPT_TYPE_CONST:
+ *num = o->default_val.dbl;
+ return 0;
+ }
+ return AVERROR(EINVAL);
+}
+
+static int write_number(void *obj, const AVOption *o, void *dst, double num, int den, int64_t intnum)
+{
+ if (o->type != AV_OPT_TYPE_FLAGS &&
+ (!den || o->max * den < num * intnum || o->min * den > num * intnum)) {
+ num = den ? num * intnum / den : (num && intnum ? INFINITY : NAN);
+ av_log(obj, AV_LOG_ERROR, "Value %f for parameter '%s' out of range [%g - %g]\n",
+ num, o->name, o->min, o->max);
+ return AVERROR(ERANGE);
+ }
+ if (o->type == AV_OPT_TYPE_FLAGS) {
+ double d = num*intnum/den;
+ if (d < -1.5 || d > 0xFFFFFFFF+0.5 || (llrint(d*256) & 255)) {
+ av_log(obj, AV_LOG_ERROR,
+ "Value %f for parameter '%s' is not a valid set of 32bit integer flags\n",
+ num*intnum/den, o->name);
+ return AVERROR(ERANGE);
+ }
+ }
+
+ switch (o->type) {
+ case AV_OPT_TYPE_PIXEL_FMT:
+ *(enum AVPixelFormat *)dst = llrint(num / den) * intnum;
+ break;
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ *(enum AVSampleFormat *)dst = llrint(num / den) * intnum;
+ break;
+ case AV_OPT_TYPE_BOOL:
+ case AV_OPT_TYPE_FLAGS:
+ case AV_OPT_TYPE_INT:
+ *(int *)dst = llrint(num / den) * intnum;
+ break;
+ case AV_OPT_TYPE_DURATION:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_INT64:{
+ double d = num / den;
+ if (intnum == 1 && d == (double)INT64_MAX) {
+ *(int64_t *)dst = INT64_MAX;
+ } else
+ *(int64_t *)dst = llrint(d) * intnum;
+ break;}
+ case AV_OPT_TYPE_UINT64:{
+ double d = num / den;
+ // We must special case uint64_t here as llrint() does not support values
+ // outside the int64_t range and there is no portable function which does
+ // "INT64_MAX + 1ULL" is used as it is representable exactly as IEEE double
+ // while INT64_MAX is not
+ if (intnum == 1 && d == (double)UINT64_MAX) {
+ *(uint64_t *)dst = UINT64_MAX;
+ } else if (d > INT64_MAX + 1ULL) {
+ *(uint64_t *)dst = (llrint(d - (INT64_MAX + 1ULL)) + (INT64_MAX + 1ULL))*intnum;
+ } else {
+ *(uint64_t *)dst = llrint(d) * intnum;
+ }
+ break;}
+ case AV_OPT_TYPE_FLOAT:
+ *(float *)dst = num * intnum / den;
+ break;
+ case AV_OPT_TYPE_DOUBLE:
+ *(double *)dst = num * intnum / den;
+ break;
+ case AV_OPT_TYPE_RATIONAL:
+ case AV_OPT_TYPE_VIDEO_RATE:
+ if ((int) num == num)
+ *(AVRational *)dst = (AVRational) { num *intnum, den };
+ else
+ *(AVRational *)dst = av_d2q(num * intnum / den, 1 << 24);
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static int hexchar2int(char c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+static int set_string_binary(void *obj, const AVOption *o, const char *val, uint8_t **dst)
+{
+ int *lendst = (int *)(dst + 1);
+ uint8_t *bin, *ptr;
+ int len;
+
+ av_freep(dst);
+ *lendst = 0;
+
+ if (!val || !(len = strlen(val)))
+ return 0;
+
+ if (len & 1)
+ return AVERROR(EINVAL);
+ len /= 2;
+
+ ptr = bin = av_malloc(len);
+ if (!ptr)
+ return AVERROR(ENOMEM);
+ while (*val) {
+ int a = hexchar2int(*val++);
+ int b = hexchar2int(*val++);
+ if (a < 0 || b < 0) {
+ av_free(bin);
+ return AVERROR(EINVAL);
+ }
+ *ptr++ = (a << 4) | b;
+ }
+ *dst = bin;
+ *lendst = len;
+
+ return 0;
+}
+
+static int set_string(void *obj, const AVOption *o, const char *val, uint8_t **dst)
+{
+ av_freep(dst);
+ *dst = av_strdup(val);
+ return *dst ? 0 : AVERROR(ENOMEM);
+}
+
+#define DEFAULT_NUMVAL(opt) ((opt->type == AV_OPT_TYPE_INT64 || \
+ opt->type == AV_OPT_TYPE_UINT64 || \
+ opt->type == AV_OPT_TYPE_CONST || \
+ opt->type == AV_OPT_TYPE_FLAGS || \
+ opt->type == AV_OPT_TYPE_INT) \
+ ? opt->default_val.i64 \
+ : opt->default_val.dbl)
+
+static int set_string_number(void *obj, void *target_obj, const AVOption *o, const char *val, void *dst)
+{
+ int ret = 0;
+
+ if (o->type == AV_OPT_TYPE_RATIONAL || o->type == AV_OPT_TYPE_VIDEO_RATE) {
+ int num, den;
+ char c;
+ if (sscanf(val, "%d%*1[:/]%d%c", &num, &den, &c) == 2) {
+ if ((ret = write_number(obj, o, dst, 1, den, num)) >= 0)
+ return ret;
+ ret = 0;
+ }
+ }
+
+ for (;;) {
+ int i = 0;
+ char buf[256];
+ int cmd = 0;
+ double d;
+ int64_t intnum = 1;
+
+ if (o->type == AV_OPT_TYPE_FLAGS) {
+ if (*val == '+' || *val == '-')
+ cmd = *(val++);
+ for (; i < sizeof(buf) - 1 && val[i] && val[i] != '+' && val[i] != '-'; i++)
+ buf[i] = val[i];
+ buf[i] = 0;
+ }
+
+ {
+ int res;
+ int ci = 0;
+ double const_values[64];
+ const char * const_names[64];
+ int search_flags = (o->flags & AV_OPT_FLAG_CHILD_CONSTS) ? AV_OPT_SEARCH_CHILDREN : 0;
+ const AVOption *o_named = av_opt_find(target_obj, i ? buf : val, o->unit, 0, search_flags);
+ if (o_named && o_named->type == AV_OPT_TYPE_CONST) {
+ d = DEFAULT_NUMVAL(o_named);
+ if (o_named->flags & AV_OPT_FLAG_DEPRECATED)
+ av_log(obj, AV_LOG_WARNING, "The \"%s\" option is deprecated: %s\n",
+ o_named->name, o_named->help);
+ } else {
+ if (o->unit) {
+ for (o_named = NULL; o_named = av_opt_next(target_obj, o_named); ) {
+ if (o_named->type == AV_OPT_TYPE_CONST &&
+ o_named->unit &&
+ !strcmp(o_named->unit, o->unit)) {
+ if (ci + 6 >= FF_ARRAY_ELEMS(const_values)) {
+ av_log(obj, AV_LOG_ERROR, "const_values array too small for %s\n", o->unit);
+ return AVERROR_PATCHWELCOME;
+ }
+ const_names [ci ] = o_named->name;
+ const_values[ci++] = DEFAULT_NUMVAL(o_named);
+ }
+ }
+ }
+ const_names [ci ] = "default";
+ const_values[ci++] = DEFAULT_NUMVAL(o);
+ const_names [ci ] = "max";
+ const_values[ci++] = o->max;
+ const_names [ci ] = "min";
+ const_values[ci++] = o->min;
+ const_names [ci ] = "none";
+ const_values[ci++] = 0;
+ const_names [ci ] = "all";
+ const_values[ci++] = ~0;
+ const_names [ci] = NULL;
+ const_values[ci] = 0;
+
+ res = av_expr_parse_and_eval(&d, i ? buf : val, const_names,
+ const_values, NULL, NULL, NULL, NULL, NULL, 0, obj);
+ if (res < 0) {
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\"\n", val);
+ return res;
+ }
+ }
+ }
+ if (o->type == AV_OPT_TYPE_FLAGS) {
+ read_number(o, dst, NULL, NULL, &intnum);
+ if (cmd == '+')
+ d = intnum | (int64_t)d;
+ else if (cmd == '-')
+ d = intnum &~(int64_t)d;
+ }
+
+ if ((ret = write_number(obj, o, dst, d, 1, 1)) < 0)
+ return ret;
+ val += i;
+ if (!i || !*val)
+ return 0;
+ }
+}
+
+static int set_string_image_size(void *obj, const AVOption *o, const char *val, int *dst)
+{
+ int ret;
+
+ if (!val || !strcmp(val, "none")) {
+ dst[0] =
+ dst[1] = 0;
+ return 0;
+ }
+ ret = av_parse_video_size(dst, dst + 1, val);
+ if (ret < 0)
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as image size\n", val);
+ return ret;
+}
+
+static int set_string_video_rate(void *obj, const AVOption *o, const char *val, AVRational *dst)
+{
+ int ret = av_parse_video_rate(dst, val);
+ if (ret < 0)
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as video rate\n", val);
+ return ret;
+}
+
+static int set_string_color(void *obj, const AVOption *o, const char *val, uint8_t *dst)
+{
+ int ret;
+
+ if (!val) {
+ return 0;
+ } else {
+ ret = av_parse_color(dst, val, -1, obj);
+ if (ret < 0)
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as color\n", val);
+ return ret;
+ }
+ return 0;
+}
+
+static const char *get_bool_name(int val)
+{
+ if (val < 0)
+ return "auto";
+ return val ? "true" : "false";
+}
+
+static int set_string_bool(void *obj, const AVOption *o, const char *val, int *dst)
+{
+ int n;
+
+ if (!val)
+ return 0;
+
+ if (!strcmp(val, "auto")) {
+ n = -1;
+ } else if (av_match_name(val, "true,y,yes,enable,enabled,on")) {
+ n = 1;
+ } else if (av_match_name(val, "false,n,no,disable,disabled,off")) {
+ n = 0;
+ } else {
+ char *end = NULL;
+ n = strtol(val, &end, 10);
+ if (val + strlen(val) != end)
+ goto fail;
+ }
+
+ if (n < o->min || n > o->max)
+ goto fail;
+
+ *dst = n;
+ return 0;
+
+fail:
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as boolean\n", val);
+ return AVERROR(EINVAL);
+}
+
+static int set_string_fmt(void *obj, const AVOption *o, const char *val, uint8_t *dst,
+ int fmt_nb, int ((*get_fmt)(const char *)), const char *desc)
+{
+ int fmt, min, max;
+
+ if (!val || !strcmp(val, "none")) {
+ fmt = -1;
+ } else {
+ fmt = get_fmt(val);
+ if (fmt == -1) {
+ char *tail;
+ fmt = strtol(val, &tail, 0);
+ if (*tail || (unsigned)fmt >= fmt_nb) {
+ av_log(obj, AV_LOG_ERROR,
+ "Unable to parse option value \"%s\" as %s\n", val, desc);
+ return AVERROR(EINVAL);
+ }
+ }
+ }
+
+ min = FFMAX(o->min, -1);
+ max = FFMIN(o->max, fmt_nb-1);
+
+ // hack for compatibility with old ffmpeg
+ if(min == 0 && max == 0) {
+ min = -1;
+ max = fmt_nb-1;
+ }
+
+ if (fmt < min || fmt > max) {
+ av_log(obj, AV_LOG_ERROR,
+ "Value %d for parameter '%s' out of %s format range [%d - %d]\n",
+ fmt, o->name, desc, min, max);
+ return AVERROR(ERANGE);
+ }
+
+ *(int *)dst = fmt;
+ return 0;
+}
+
+static int set_string_pixel_fmt(void *obj, const AVOption *o, const char *val, uint8_t *dst)
+{
+ return set_string_fmt(obj, o, val, dst,
+ AV_PIX_FMT_NB, av_get_pix_fmt, "pixel format");
+}
+
+static int set_string_sample_fmt(void *obj, const AVOption *o, const char *val, uint8_t *dst)
+{
+ return set_string_fmt(obj, o, val, dst,
+ AV_SAMPLE_FMT_NB, av_get_sample_fmt, "sample format");
+}
+
+static int set_string_dict(void *obj, const AVOption *o, const char *val, uint8_t **dst)
+{
+ AVDictionary *options = NULL;
+
+ if (val) {
+ int ret = av_dict_parse_string(&options, val, "=", ":", 0);
+ if (ret < 0) {
+ av_dict_free(&options);
+ return ret;
+ }
+ }
+
+ av_dict_free((AVDictionary **)dst);
+ *dst = (uint8_t *)options;
+
+ return 0;
+}
+
+static int set_string_channel_layout(void *obj, const AVOption *o,
+ const char *val, void *dst)
+{
+ AVChannelLayout *channel_layout = dst;
+ av_channel_layout_uninit(channel_layout);
+ if (!val)
+ return 0;
+ return av_channel_layout_from_string(channel_layout, val);
+}
+
+int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
+{
+ int ret = 0;
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!val && (o->type != AV_OPT_TYPE_STRING &&
+ o->type != AV_OPT_TYPE_PIXEL_FMT && o->type != AV_OPT_TYPE_SAMPLE_FMT &&
+ o->type != AV_OPT_TYPE_IMAGE_SIZE &&
+ o->type != AV_OPT_TYPE_DURATION && o->type != AV_OPT_TYPE_COLOR &&
+#if FF_API_OLD_CHANNEL_LAYOUT
+ o->type != AV_OPT_TYPE_CHANNEL_LAYOUT &&
+#endif
+ o->type != AV_OPT_TYPE_BOOL))
+ return AVERROR(EINVAL);
+FF_ENABLE_DEPRECATION_WARNINGS
+
+ if (o->flags & AV_OPT_FLAG_READONLY)
+ return AVERROR(EINVAL);
+
+ if (o->flags & AV_OPT_FLAG_DEPRECATED)
+ av_log(obj, AV_LOG_WARNING, "The \"%s\" option is deprecated: %s\n", name, o->help);
+
+ dst = ((uint8_t *)target_obj) + o->offset;
+ switch (o->type) {
+ case AV_OPT_TYPE_BOOL:
+ return set_string_bool(obj, o, val, dst);
+ case AV_OPT_TYPE_STRING:
+ return set_string(obj, o, val, dst);
+ case AV_OPT_TYPE_BINARY:
+ return set_string_binary(obj, o, val, dst);
+ case AV_OPT_TYPE_FLAGS:
+ case AV_OPT_TYPE_INT:
+ case AV_OPT_TYPE_INT64:
+ case AV_OPT_TYPE_UINT64:
+ case AV_OPT_TYPE_FLOAT:
+ case AV_OPT_TYPE_DOUBLE:
+ case AV_OPT_TYPE_RATIONAL:
+ return set_string_number(obj, target_obj, o, val, dst);
+ case AV_OPT_TYPE_IMAGE_SIZE:
+ return set_string_image_size(obj, o, val, dst);
+ case AV_OPT_TYPE_VIDEO_RATE: {
+ AVRational tmp;
+ ret = set_string_video_rate(obj, o, val, &tmp);
+ if (ret < 0)
+ return ret;
+ return write_number(obj, o, dst, 1, tmp.den, tmp.num);
+ }
+ case AV_OPT_TYPE_PIXEL_FMT:
+ return set_string_pixel_fmt(obj, o, val, dst);
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ return set_string_sample_fmt(obj, o, val, dst);
+ case AV_OPT_TYPE_DURATION:
+ {
+ int64_t usecs = 0;
+ if (val) {
+ if ((ret = av_parse_time(&usecs, val, 1)) < 0) {
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as duration\n", val);
+ return ret;
+ }
+ }
+ if (usecs < o->min || usecs > o->max) {
+ av_log(obj, AV_LOG_ERROR, "Value %f for parameter '%s' out of range [%g - %g]\n",
+ usecs / 1000000.0, o->name, o->min / 1000000.0, o->max / 1000000.0);
+ return AVERROR(ERANGE);
+ }
+ *(int64_t *)dst = usecs;
+ return 0;
+ }
+ case AV_OPT_TYPE_COLOR:
+ return set_string_color(obj, o, val, dst);
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+ if (!val || !strcmp(val, "none")) {
+ *(int64_t *)dst = 0;
+ } else {
+ int64_t cl = av_get_channel_layout(val);
+ if (!cl) {
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as channel layout\n", val);
+ ret = AVERROR(EINVAL);
+ }
+ *(int64_t *)dst = cl;
+ return ret;
+ }
+ break;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_CHLAYOUT:
+ ret = set_string_channel_layout(obj, o, val, dst);
+ if (ret < 0) {
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as channel layout\n", val);
+ ret = AVERROR(EINVAL);
+ }
+ return ret;
+ case AV_OPT_TYPE_DICT:
+ return set_string_dict(obj, o, val, dst);
+ }
+
+ av_log(obj, AV_LOG_ERROR, "Invalid option type.\n");
+ return AVERROR(EINVAL);
+}
+
+#define OPT_EVAL_NUMBER(name, opttype, vartype) \
+int av_opt_eval_ ## name(void *obj, const AVOption *o, \
+ const char *val, vartype *name ## _out) \
+{ \
+ if (!o || o->type != opttype || o->flags & AV_OPT_FLAG_READONLY) \
+ return AVERROR(EINVAL); \
+ return set_string_number(obj, obj, o, val, name ## _out); \
+}
+
+OPT_EVAL_NUMBER(flags, AV_OPT_TYPE_FLAGS, int)
+OPT_EVAL_NUMBER(int, AV_OPT_TYPE_INT, int)
+OPT_EVAL_NUMBER(int64, AV_OPT_TYPE_INT64, int64_t)
+OPT_EVAL_NUMBER(float, AV_OPT_TYPE_FLOAT, float)
+OPT_EVAL_NUMBER(double, AV_OPT_TYPE_DOUBLE, double)
+OPT_EVAL_NUMBER(q, AV_OPT_TYPE_RATIONAL, AVRational)
+
+static int set_number(void *obj, const char *name, double num, int den, int64_t intnum,
+ int search_flags)
+{
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+
+ if (o->flags & AV_OPT_FLAG_READONLY)
+ return AVERROR(EINVAL);
+
+ dst = ((uint8_t *)target_obj) + o->offset;
+ return write_number(obj, o, dst, num, den, intnum);
+}
+
+int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
+{
+ return set_number(obj, name, 1, 1, val, search_flags);
+}
+
+int av_opt_set_double(void *obj, const char *name, double val, int search_flags)
+{
+ return set_number(obj, name, val, 1, 1, search_flags);
+}
+
+int av_opt_set_q(void *obj, const char *name, AVRational val, int search_flags)
+{
+ return set_number(obj, name, val.num, val.den, 1, search_flags);
+}
+
+int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
+{
+ void *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ uint8_t *ptr;
+ uint8_t **dst;
+ int *lendst;
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+
+ if (o->type != AV_OPT_TYPE_BINARY || o->flags & AV_OPT_FLAG_READONLY)
+ return AVERROR(EINVAL);
+
+ ptr = len ? av_malloc(len) : NULL;
+ if (len && !ptr)
+ return AVERROR(ENOMEM);
+
+ dst = (uint8_t **)(((uint8_t *)target_obj) + o->offset);
+ lendst = (int *)(dst + 1);
+
+ av_free(*dst);
+ *dst = ptr;
+ *lendst = len;
+ if (len)
+ memcpy(ptr, val, len);
+
+ return 0;
+}
+
+int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags)
+{
+ void *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != AV_OPT_TYPE_IMAGE_SIZE) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value set by option '%s' is not an image size.\n", o->name);
+ return AVERROR(EINVAL);
+ }
+ if (w<0 || h<0) {
+ av_log(obj, AV_LOG_ERROR,
+ "Invalid negative size value %dx%d for size '%s'\n", w, h, o->name);
+ return AVERROR(EINVAL);
+ }
+ *(int *)(((uint8_t *)target_obj) + o->offset) = w;
+ *(int *)(((uint8_t *)target_obj+sizeof(int)) + o->offset) = h;
+ return 0;
+}
+
+int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags)
+{
+ void *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != AV_OPT_TYPE_VIDEO_RATE) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value set by option '%s' is not a video rate.\n", o->name);
+ return AVERROR(EINVAL);
+ }
+ if (val.num <= 0 || val.den <= 0)
+ return AVERROR(EINVAL);
+ return set_number(obj, name, val.num, val.den, 1, search_flags);
+}
+
+static int set_format(void *obj, const char *name, int fmt, int search_flags,
+ enum AVOptionType type, const char *desc, int nb_fmts)
+{
+ void *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0,
+ search_flags, &target_obj);
+ int min, max;
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != type) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value set by option '%s' is not a %s format", name, desc);
+ return AVERROR(EINVAL);
+ }
+
+ min = FFMAX(o->min, -1);
+ max = FFMIN(o->max, nb_fmts-1);
+
+ if (fmt < min || fmt > max) {
+ av_log(obj, AV_LOG_ERROR,
+ "Value %d for parameter '%s' out of %s format range [%d - %d]\n",
+ fmt, name, desc, min, max);
+ return AVERROR(ERANGE);
+ }
+ *(int *)(((uint8_t *)target_obj) + o->offset) = fmt;
+ return 0;
+}
+
+int av_opt_set_pixel_fmt(void *obj, const char *name, enum AVPixelFormat fmt, int search_flags)
+{
+ return set_format(obj, name, fmt, search_flags, AV_OPT_TYPE_PIXEL_FMT, "pixel", AV_PIX_FMT_NB);
+}
+
+int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
+{
+ return set_format(obj, name, fmt, search_flags, AV_OPT_TYPE_SAMPLE_FMT, "sample", AV_SAMPLE_FMT_NB);
+}
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+int av_opt_set_channel_layout(void *obj, const char *name, int64_t cl, int search_flags)
+{
+ void *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != AV_OPT_TYPE_CHANNEL_LAYOUT) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value set by option '%s' is not a channel layout.\n", o->name);
+ return AVERROR(EINVAL);
+ }
+ *(int64_t *)(((uint8_t *)target_obj) + o->offset) = cl;
+ return 0;
+}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val,
+ int search_flags)
+{
+ void *target_obj;
+ AVDictionary **dst;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->flags & AV_OPT_FLAG_READONLY)
+ return AVERROR(EINVAL);
+
+ dst = (AVDictionary **)(((uint8_t *)target_obj) + o->offset);
+ av_dict_free(dst);
+ av_dict_copy(dst, val, 0);
+
+ return 0;
+}
+
+int av_opt_set_chlayout(void *obj, const char *name,
+ const AVChannelLayout *channel_layout,
+ int search_flags)
+{
+ void *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ AVChannelLayout *dst;
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+
+ dst = (AVChannelLayout*)((uint8_t*)target_obj + o->offset);
+
+ return av_channel_layout_copy(dst, channel_layout);
+}
+
+static void format_duration(char *buf, size_t size, int64_t d)
+{
+ char *e;
+
+ av_assert0(size >= 25);
+ if (d < 0 && d != INT64_MIN) {
+ *(buf++) = '-';
+ size--;
+ d = -d;
+ }
+ if (d == INT64_MAX)
+ snprintf(buf, size, "INT64_MAX");
+ else if (d == INT64_MIN)
+ snprintf(buf, size, "INT64_MIN");
+ else if (d > (int64_t)3600*1000000)
+ snprintf(buf, size, "%"PRId64":%02d:%02d.%06d", d / 3600000000,
+ (int)((d / 60000000) % 60),
+ (int)((d / 1000000) % 60),
+ (int)(d % 1000000));
+ else if (d > 60*1000000)
+ snprintf(buf, size, "%d:%02d.%06d",
+ (int)(d / 60000000),
+ (int)((d / 1000000) % 60),
+ (int)(d % 1000000));
+ else
+ snprintf(buf, size, "%d.%06d",
+ (int)(d / 1000000),
+ (int)(d % 1000000));
+ e = buf + strlen(buf);
+ while (e > buf && e[-1] == '0')
+ *(--e) = 0;
+ if (e > buf && e[-1] == '.')
+ *(--e) = 0;
+}
+
+int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
+{
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ uint8_t *bin, buf[128];
+ int len, i, ret;
+ int64_t i64;
+
+ if (!o || !target_obj || (o->offset<=0 && o->type != AV_OPT_TYPE_CONST))
+ return AVERROR_OPTION_NOT_FOUND;
+
+ if (o->flags & AV_OPT_FLAG_DEPRECATED)
+ av_log(obj, AV_LOG_WARNING, "The \"%s\" option is deprecated: %s\n", name, o->help);
+
+ dst = (uint8_t *)target_obj + o->offset;
+
+ buf[0] = 0;
+ switch (o->type) {
+ case AV_OPT_TYPE_BOOL:
+ ret = snprintf(buf, sizeof(buf), "%s", (char *)av_x_if_null(get_bool_name(*(int *)dst), "invalid"));
+ break;
+ case AV_OPT_TYPE_FLAGS:
+ ret = snprintf(buf, sizeof(buf), "0x%08X", *(int *)dst);
+ break;
+ case AV_OPT_TYPE_INT:
+ ret = snprintf(buf, sizeof(buf), "%d", *(int *)dst);
+ break;
+ case AV_OPT_TYPE_INT64:
+ ret = snprintf(buf, sizeof(buf), "%"PRId64, *(int64_t *)dst);
+ break;
+ case AV_OPT_TYPE_UINT64:
+ ret = snprintf(buf, sizeof(buf), "%"PRIu64, *(uint64_t *)dst);
+ break;
+ case AV_OPT_TYPE_FLOAT:
+ ret = snprintf(buf, sizeof(buf), "%f", *(float *)dst);
+ break;
+ case AV_OPT_TYPE_DOUBLE:
+ ret = snprintf(buf, sizeof(buf), "%f", *(double *)dst);
+ break;
+ case AV_OPT_TYPE_VIDEO_RATE:
+ case AV_OPT_TYPE_RATIONAL:
+ ret = snprintf(buf, sizeof(buf), "%d/%d", ((AVRational *)dst)->num, ((AVRational *)dst)->den);
+ break;
+ case AV_OPT_TYPE_CONST:
+ ret = snprintf(buf, sizeof(buf), "%f", o->default_val.dbl);
+ break;
+ case AV_OPT_TYPE_STRING:
+ if (*(uint8_t **)dst) {
+ *out_val = av_strdup(*(uint8_t **)dst);
+ } else if (search_flags & AV_OPT_ALLOW_NULL) {
+ *out_val = NULL;
+ return 0;
+ } else {
+ *out_val = av_strdup("");
+ }
+ return *out_val ? 0 : AVERROR(ENOMEM);
+ case AV_OPT_TYPE_BINARY:
+ if (!*(uint8_t **)dst && (search_flags & AV_OPT_ALLOW_NULL)) {
+ *out_val = NULL;
+ return 0;
+ }
+ len = *(int *)(((uint8_t *)dst) + sizeof(uint8_t *));
+ if ((uint64_t)len * 2 + 1 > INT_MAX)
+ return AVERROR(EINVAL);
+ if (!(*out_val = av_malloc(len * 2 + 1)))
+ return AVERROR(ENOMEM);
+ if (!len) {
+ *out_val[0] = '\0';
+ return 0;
+ }
+ bin = *(uint8_t **)dst;
+ for (i = 0; i < len; i++)
+ snprintf(*out_val + i * 2, 3, "%02X", bin[i]);
+ return 0;
+ case AV_OPT_TYPE_IMAGE_SIZE:
+ ret = snprintf(buf, sizeof(buf), "%dx%d", ((int *)dst)[0], ((int *)dst)[1]);
+ break;
+ case AV_OPT_TYPE_PIXEL_FMT:
+ ret = snprintf(buf, sizeof(buf), "%s", (char *)av_x_if_null(av_get_pix_fmt_name(*(enum AVPixelFormat *)dst), "none"));
+ break;
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ ret = snprintf(buf, sizeof(buf), "%s", (char *)av_x_if_null(av_get_sample_fmt_name(*(enum AVSampleFormat *)dst), "none"));
+ break;
+ case AV_OPT_TYPE_DURATION:
+ i64 = *(int64_t *)dst;
+ format_duration(buf, sizeof(buf), i64);
+ ret = strlen(buf); // no overflow possible, checked by an assert
+ break;
+ case AV_OPT_TYPE_COLOR:
+ ret = snprintf(buf, sizeof(buf), "0x%02x%02x%02x%02x",
+ (int)((uint8_t *)dst)[0], (int)((uint8_t *)dst)[1],
+ (int)((uint8_t *)dst)[2], (int)((uint8_t *)dst)[3]);
+ break;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+
+ i64 = *(int64_t *)dst;
+ ret = snprintf(buf, sizeof(buf), "0x%"PRIx64, i64);
+ break;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_CHLAYOUT:
+ ret = av_channel_layout_describe(dst, buf, sizeof(buf));
+ break;
+ case AV_OPT_TYPE_DICT:
+ if (!*(AVDictionary **)dst && (search_flags & AV_OPT_ALLOW_NULL)) {
+ *out_val = NULL;
+ return 0;
+ }
+ return av_dict_get_string(*(AVDictionary **)dst, (char **)out_val, '=', ':');
+ default:
+ return AVERROR(EINVAL);
+ }
+
+ if (ret >= sizeof(buf))
+ return AVERROR(EINVAL);
+ *out_val = av_strdup(buf);
+ return *out_val ? 0 : AVERROR(ENOMEM);
+}
+
+static int get_number(void *obj, const char *name, const AVOption **o_out, double *num, int *den, int64_t *intnum,
+ int search_flags)
+{
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ if (!o || !target_obj)
+ goto error;
+
+ dst = ((uint8_t *)target_obj) + o->offset;
+
+ if (o_out) *o_out= o;
+
+ return read_number(o, dst, num, den, intnum);
+
+error:
+ *den =
+ *intnum = 0;
+ return -1;
+}
+
+int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_val)
+{
+ int64_t intnum = 1;
+ double num = 1;
+ int ret, den = 1;
+
+ if ((ret = get_number(obj, name, NULL, &num, &den, &intnum, search_flags)) < 0)
+ return ret;
+ if (num == den)
+ *out_val = intnum;
+ else
+ *out_val = num * intnum / den;
+ return 0;
+}
+
+int av_opt_get_double(void *obj, const char *name, int search_flags, double *out_val)
+{
+ int64_t intnum = 1;
+ double num = 1;
+ int ret, den = 1;
+
+ if ((ret = get_number(obj, name, NULL, &num, &den, &intnum, search_flags)) < 0)
+ return ret;
+ *out_val = num * intnum / den;
+ return 0;
+}
+
+int av_opt_get_q(void *obj, const char *name, int search_flags, AVRational *out_val)
+{
+ int64_t intnum = 1;
+ double num = 1;
+ int ret, den = 1;
+
+ if ((ret = get_number(obj, name, NULL, &num, &den, &intnum, search_flags)) < 0)
+ return ret;
+
+ if (num == 1.0 && (int)intnum == intnum)
+ *out_val = (AVRational){intnum, den};
+ else
+ *out_val = av_d2q(num*intnum/den, 1<<24);
+ return 0;
+}
+
+int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out)
+{
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != AV_OPT_TYPE_IMAGE_SIZE) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value for option '%s' is not an image size.\n", name);
+ return AVERROR(EINVAL);
+ }
+
+ dst = ((uint8_t*)target_obj) + o->offset;
+ if (w_out) *w_out = *(int *)dst;
+ if (h_out) *h_out = *((int *)dst+1);
+ return 0;
+}
+
+int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val)
+{
+ int64_t intnum = 1;
+ double num = 1;
+ int ret, den = 1;
+
+ if ((ret = get_number(obj, name, NULL, &num, &den, &intnum, search_flags)) < 0)
+ return ret;
+
+ if (num == 1.0 && (int)intnum == intnum)
+ *out_val = (AVRational) { intnum, den };
+ else
+ *out_val = av_d2q(num * intnum / den, 1 << 24);
+ return 0;
+}
+
+static int get_format(void *obj, const char *name, int search_flags, int *out_fmt,
+ enum AVOptionType type, const char *desc)
+{
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != type) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value for option '%s' is not a %s format.\n", desc, name);
+ return AVERROR(EINVAL);
+ }
+
+ dst = ((uint8_t*)target_obj) + o->offset;
+ *out_fmt = *(int *)dst;
+ return 0;
+}
+
+int av_opt_get_pixel_fmt(void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt)
+{
+ return get_format(obj, name, search_flags, out_fmt, AV_OPT_TYPE_PIXEL_FMT, "pixel");
+}
+
+int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt)
+{
+ return get_format(obj, name, search_flags, out_fmt, AV_OPT_TYPE_SAMPLE_FMT, "sample");
+}
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *cl)
+{
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != AV_OPT_TYPE_CHANNEL_LAYOUT) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value for option '%s' is not a channel layout.\n", name);
+ return AVERROR(EINVAL);
+ }
+
+ dst = ((uint8_t*)target_obj) + o->offset;
+ *cl = *(int64_t *)dst;
+ return 0;
+}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+int av_opt_get_chlayout(void *obj, const char *name, int search_flags, AVChannelLayout *cl)
+{
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != AV_OPT_TYPE_CHLAYOUT) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value for option '%s' is not a channel layout.\n", name);
+ return AVERROR(EINVAL);
+ }
+
+ dst = ((uint8_t*)target_obj) + o->offset;
+ return av_channel_layout_copy(cl, dst);
+}
+
+int av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val)
+{
+ void *target_obj;
+ AVDictionary *src;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != AV_OPT_TYPE_DICT)
+ return AVERROR(EINVAL);
+
+ src = *(AVDictionary **)(((uint8_t *)target_obj) + o->offset);
+ av_dict_copy(out_val, src, 0);
+
+ return 0;
+}
+
+int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name)
+{
+ const AVOption *field = av_opt_find(obj, field_name, NULL, 0, 0);
+ const AVOption *flag = av_opt_find(obj, flag_name,
+ field ? field->unit : NULL, 0, 0);
+ int64_t res;
+
+ if (!field || !flag || flag->type != AV_OPT_TYPE_CONST ||
+ av_opt_get_int(obj, field_name, 0, &res) < 0)
+ return 0;
+ return res & flag->default_val.i64;
+}
+
+static void log_int_value(void *av_log_obj, int level, int64_t i)
+{
+ if (i == INT_MAX) {
+ av_log(av_log_obj, level, "INT_MAX");
+ } else if (i == INT_MIN) {
+ av_log(av_log_obj, level, "INT_MIN");
+ } else if (i == UINT32_MAX) {
+ av_log(av_log_obj, level, "UINT32_MAX");
+ } else if (i == INT64_MAX) {
+ av_log(av_log_obj, level, "I64_MAX");
+ } else if (i == INT64_MIN) {
+ av_log(av_log_obj, level, "I64_MIN");
+ } else {
+ av_log(av_log_obj, level, "%"PRId64, i);
+ }
+}
+
+static void log_value(void *av_log_obj, int level, double d)
+{
+ if (d == INT_MAX) {
+ av_log(av_log_obj, level, "INT_MAX");
+ } else if (d == INT_MIN) {
+ av_log(av_log_obj, level, "INT_MIN");
+ } else if (d == UINT32_MAX) {
+ av_log(av_log_obj, level, "UINT32_MAX");
+ } else if (d == (double)INT64_MAX) {
+ av_log(av_log_obj, level, "I64_MAX");
+ } else if (d == INT64_MIN) {
+ av_log(av_log_obj, level, "I64_MIN");
+ } else if (d == FLT_MAX) {
+ av_log(av_log_obj, level, "FLT_MAX");
+ } else if (d == FLT_MIN) {
+ av_log(av_log_obj, level, "FLT_MIN");
+ } else if (d == -FLT_MAX) {
+ av_log(av_log_obj, level, "-FLT_MAX");
+ } else if (d == -FLT_MIN) {
+ av_log(av_log_obj, level, "-FLT_MIN");
+ } else if (d == DBL_MAX) {
+ av_log(av_log_obj, level, "DBL_MAX");
+ } else if (d == DBL_MIN) {
+ av_log(av_log_obj, level, "DBL_MIN");
+ } else if (d == -DBL_MAX) {
+ av_log(av_log_obj, level, "-DBL_MAX");
+ } else if (d == -DBL_MIN) {
+ av_log(av_log_obj, level, "-DBL_MIN");
+ } else {
+ av_log(av_log_obj, level, "%g", d);
+ }
+}
+
+static const char *get_opt_const_name(void *obj, const char *unit, int64_t value)
+{
+ const AVOption *opt = NULL;
+
+ if (!unit)
+ return NULL;
+ while ((opt = av_opt_next(obj, opt)))
+ if (opt->type == AV_OPT_TYPE_CONST && !strcmp(opt->unit, unit) &&
+ opt->default_val.i64 == value)
+ return opt->name;
+ return NULL;
+}
+
+static char *get_opt_flags_string(void *obj, const char *unit, int64_t value)
+{
+ const AVOption *opt = NULL;
+ char flags[512];
+
+ flags[0] = 0;
+ if (!unit)
+ return NULL;
+ while ((opt = av_opt_next(obj, opt))) {
+ if (opt->type == AV_OPT_TYPE_CONST && !strcmp(opt->unit, unit) &&
+ opt->default_val.i64 & value) {
+ if (flags[0])
+ av_strlcatf(flags, sizeof(flags), "+");
+ av_strlcatf(flags, sizeof(flags), "%s", opt->name);
+ }
+ }
+ if (flags[0])
+ return av_strdup(flags);
+ return NULL;
+}
+
+static void opt_list(void *obj, void *av_log_obj, const char *unit,
+ int req_flags, int rej_flags, enum AVOptionType parent_type)
+{
+ const AVOption *opt = NULL;
+ AVOptionRanges *r;
+ int i;
+
+ while ((opt = av_opt_next(obj, opt))) {
+ if (!(opt->flags & req_flags) || (opt->flags & rej_flags))
+ continue;
+
+ /* Don't print CONST's on level one.
+ * Don't print anything but CONST's on level two.
+ * Only print items from the requested unit.
+ */
+ if (!unit && opt->type == AV_OPT_TYPE_CONST)
+ continue;
+ else if (unit && opt->type != AV_OPT_TYPE_CONST)
+ continue;
+ else if (unit && opt->type == AV_OPT_TYPE_CONST && strcmp(unit, opt->unit))
+ continue;
+ else if (unit && opt->type == AV_OPT_TYPE_CONST)
+ av_log(av_log_obj, AV_LOG_INFO, " %-15s ", opt->name);
+ else
+ av_log(av_log_obj, AV_LOG_INFO, " %s%-17s ",
+ (opt->flags & AV_OPT_FLAG_FILTERING_PARAM) ? " " : "-",
+ opt->name);
+
+ switch (opt->type) {
+ case AV_OPT_TYPE_FLAGS:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<flags>");
+ break;
+ case AV_OPT_TYPE_INT:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<int>");
+ break;
+ case AV_OPT_TYPE_INT64:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<int64>");
+ break;
+ case AV_OPT_TYPE_UINT64:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<uint64>");
+ break;
+ case AV_OPT_TYPE_DOUBLE:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<double>");
+ break;
+ case AV_OPT_TYPE_FLOAT:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<float>");
+ break;
+ case AV_OPT_TYPE_STRING:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<string>");
+ break;
+ case AV_OPT_TYPE_RATIONAL:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<rational>");
+ break;
+ case AV_OPT_TYPE_BINARY:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<binary>");
+ break;
+ case AV_OPT_TYPE_DICT:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<dictionary>");
+ break;
+ case AV_OPT_TYPE_IMAGE_SIZE:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<image_size>");
+ break;
+ case AV_OPT_TYPE_VIDEO_RATE:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<video_rate>");
+ break;
+ case AV_OPT_TYPE_PIXEL_FMT:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<pix_fmt>");
+ break;
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<sample_fmt>");
+ break;
+ case AV_OPT_TYPE_DURATION:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<duration>");
+ break;
+ case AV_OPT_TYPE_COLOR:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<color>");
+ break;
+ case AV_OPT_TYPE_CHLAYOUT:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<channel_layout>");
+ break;
+ case AV_OPT_TYPE_BOOL:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<boolean>");
+ break;
+ case AV_OPT_TYPE_CONST:
+ if (parent_type == AV_OPT_TYPE_INT)
+ av_log(av_log_obj, AV_LOG_INFO, "%-12"PRId64" ", opt->default_val.i64);
+ else
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "");
+ break;
+ default:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "");
+ break;
+ }
+ av_log(av_log_obj, AV_LOG_INFO, "%c%c%c%c%c%c%c%c%c%c%c",
+ (opt->flags & AV_OPT_FLAG_ENCODING_PARAM) ? 'E' : '.',
+ (opt->flags & AV_OPT_FLAG_DECODING_PARAM) ? 'D' : '.',
+ (opt->flags & AV_OPT_FLAG_FILTERING_PARAM) ? 'F' : '.',
+ (opt->flags & AV_OPT_FLAG_VIDEO_PARAM) ? 'V' : '.',
+ (opt->flags & AV_OPT_FLAG_AUDIO_PARAM) ? 'A' : '.',
+ (opt->flags & AV_OPT_FLAG_SUBTITLE_PARAM) ? 'S' : '.',
+ (opt->flags & AV_OPT_FLAG_EXPORT) ? 'X' : '.',
+ (opt->flags & AV_OPT_FLAG_READONLY) ? 'R' : '.',
+ (opt->flags & AV_OPT_FLAG_BSF_PARAM) ? 'B' : '.',
+ (opt->flags & AV_OPT_FLAG_RUNTIME_PARAM) ? 'T' : '.',
+ (opt->flags & AV_OPT_FLAG_DEPRECATED) ? 'P' : '.');
+
+ if (opt->help)
+ av_log(av_log_obj, AV_LOG_INFO, " %s", opt->help);
+
+ if (av_opt_query_ranges(&r, obj, opt->name, AV_OPT_SEARCH_FAKE_OBJ) >= 0) {
+ switch (opt->type) {
+ case AV_OPT_TYPE_INT:
+ case AV_OPT_TYPE_INT64:
+ case AV_OPT_TYPE_UINT64:
+ case AV_OPT_TYPE_DOUBLE:
+ case AV_OPT_TYPE_FLOAT:
+ case AV_OPT_TYPE_RATIONAL:
+ for (i = 0; i < r->nb_ranges; i++) {
+ av_log(av_log_obj, AV_LOG_INFO, " (from ");
+ log_value(av_log_obj, AV_LOG_INFO, r->range[i]->value_min);
+ av_log(av_log_obj, AV_LOG_INFO, " to ");
+ log_value(av_log_obj, AV_LOG_INFO, r->range[i]->value_max);
+ av_log(av_log_obj, AV_LOG_INFO, ")");
+ }
+ break;
+ }
+ av_opt_freep_ranges(&r);
+ }
+
+ if (opt->type != AV_OPT_TYPE_CONST &&
+ opt->type != AV_OPT_TYPE_BINARY &&
+ !((opt->type == AV_OPT_TYPE_COLOR ||
+ opt->type == AV_OPT_TYPE_IMAGE_SIZE ||
+ opt->type == AV_OPT_TYPE_STRING ||
+ opt->type == AV_OPT_TYPE_DICT ||
+ opt->type == AV_OPT_TYPE_CHLAYOUT ||
+ opt->type == AV_OPT_TYPE_VIDEO_RATE) &&
+ !opt->default_val.str)) {
+ av_log(av_log_obj, AV_LOG_INFO, " (default ");
+ switch (opt->type) {
+ case AV_OPT_TYPE_BOOL:
+ av_log(av_log_obj, AV_LOG_INFO, "%s", (char *)av_x_if_null(get_bool_name(opt->default_val.i64), "invalid"));
+ break;
+ case AV_OPT_TYPE_FLAGS: {
+ char *def_flags = get_opt_flags_string(obj, opt->unit, opt->default_val.i64);
+ if (def_flags) {
+ av_log(av_log_obj, AV_LOG_INFO, "%s", def_flags);
+ av_freep(&def_flags);
+ } else {
+ av_log(av_log_obj, AV_LOG_INFO, "%"PRIX64, opt->default_val.i64);
+ }
+ break;
+ }
+ case AV_OPT_TYPE_DURATION: {
+ char buf[25];
+ format_duration(buf, sizeof(buf), opt->default_val.i64);
+ av_log(av_log_obj, AV_LOG_INFO, "%s", buf);
+ break;
+ }
+ case AV_OPT_TYPE_INT:
+ case AV_OPT_TYPE_UINT64:
+ case AV_OPT_TYPE_INT64: {
+ const char *def_const = get_opt_const_name(obj, opt->unit, opt->default_val.i64);
+ if (def_const)
+ av_log(av_log_obj, AV_LOG_INFO, "%s", def_const);
+ else
+ log_int_value(av_log_obj, AV_LOG_INFO, opt->default_val.i64);
+ break;
+ }
+ case AV_OPT_TYPE_DOUBLE:
+ case AV_OPT_TYPE_FLOAT:
+ log_value(av_log_obj, AV_LOG_INFO, opt->default_val.dbl);
+ break;
+ case AV_OPT_TYPE_RATIONAL: {
+ AVRational q = av_d2q(opt->default_val.dbl, INT_MAX);
+ av_log(av_log_obj, AV_LOG_INFO, "%d/%d", q.num, q.den); }
+ break;
+ case AV_OPT_TYPE_PIXEL_FMT:
+ av_log(av_log_obj, AV_LOG_INFO, "%s", (char *)av_x_if_null(av_get_pix_fmt_name(opt->default_val.i64), "none"));
+ break;
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ av_log(av_log_obj, AV_LOG_INFO, "%s", (char *)av_x_if_null(av_get_sample_fmt_name(opt->default_val.i64), "none"));
+ break;
+ case AV_OPT_TYPE_COLOR:
+ case AV_OPT_TYPE_IMAGE_SIZE:
+ case AV_OPT_TYPE_STRING:
+ case AV_OPT_TYPE_DICT:
+ case AV_OPT_TYPE_VIDEO_RATE:
+ case AV_OPT_TYPE_CHLAYOUT:
+ av_log(av_log_obj, AV_LOG_INFO, "\"%s\"", opt->default_val.str);
+ break;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+ av_log(av_log_obj, AV_LOG_INFO, "0x%"PRIx64, opt->default_val.i64);
+ break;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ }
+ av_log(av_log_obj, AV_LOG_INFO, ")");
+ }
+
+ av_log(av_log_obj, AV_LOG_INFO, "\n");
+ if (opt->unit && opt->type != AV_OPT_TYPE_CONST)
+ opt_list(obj, av_log_obj, opt->unit, req_flags, rej_flags, opt->type);
+ }
+}
+
+int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags)
+{
+ if (!obj)
+ return -1;
+
+ av_log(av_log_obj, AV_LOG_INFO, "%s AVOptions:\n", (*(AVClass **)obj)->class_name);
+
+ opt_list(obj, av_log_obj, NULL, req_flags, rej_flags, -1);
+
+ return 0;
+}
+
+void av_opt_set_defaults(void *s)
+{
+ av_opt_set_defaults2(s, 0, 0);
+}
+
+void av_opt_set_defaults2(void *s, int mask, int flags)
+{
+ const AVOption *opt = NULL;
+ while ((opt = av_opt_next(s, opt))) {
+ void *dst = ((uint8_t*)s) + opt->offset;
+
+ if ((opt->flags & mask) != flags)
+ continue;
+
+ if (opt->flags & AV_OPT_FLAG_READONLY)
+ continue;
+
+ switch (opt->type) {
+ case AV_OPT_TYPE_CONST:
+ /* Nothing to be done here */
+ break;
+ case AV_OPT_TYPE_BOOL:
+ case AV_OPT_TYPE_FLAGS:
+ case AV_OPT_TYPE_INT:
+ case AV_OPT_TYPE_INT64:
+ case AV_OPT_TYPE_UINT64:
+ case AV_OPT_TYPE_DURATION:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_PIXEL_FMT:
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ write_number(s, opt, dst, 1, 1, opt->default_val.i64);
+ break;
+ case AV_OPT_TYPE_DOUBLE:
+ case AV_OPT_TYPE_FLOAT: {
+ double val;
+ val = opt->default_val.dbl;
+ write_number(s, opt, dst, val, 1, 1);
+ }
+ break;
+ case AV_OPT_TYPE_RATIONAL: {
+ AVRational val;
+ val = av_d2q(opt->default_val.dbl, INT_MAX);
+ write_number(s, opt, dst, 1, val.den, val.num);
+ }
+ break;
+ case AV_OPT_TYPE_COLOR:
+ set_string_color(s, opt, opt->default_val.str, dst);
+ break;
+ case AV_OPT_TYPE_STRING:
+ set_string(s, opt, opt->default_val.str, dst);
+ break;
+ case AV_OPT_TYPE_IMAGE_SIZE:
+ set_string_image_size(s, opt, opt->default_val.str, dst);
+ break;
+ case AV_OPT_TYPE_VIDEO_RATE:
+ set_string_video_rate(s, opt, opt->default_val.str, dst);
+ break;
+ case AV_OPT_TYPE_BINARY:
+ set_string_binary(s, opt, opt->default_val.str, dst);
+ break;
+ case AV_OPT_TYPE_CHLAYOUT:
+ set_string_channel_layout(s, opt, opt->default_val.str, dst);
+ break;
+ case AV_OPT_TYPE_DICT:
+ set_string_dict(s, opt, opt->default_val.str, dst);
+ break;
+ default:
+ av_log(s, AV_LOG_DEBUG, "AVOption type %d of option %s not implemented yet\n",
+ opt->type, opt->name);
+ }
+ }
+}
+
+/**
+ * Store the value in the field in ctx that is named like key.
+ * ctx must be an AVClass context, storing is done using AVOptions.
+ *
+ * @param buf the string to parse, buf will be updated to point at the
+ * separator just after the parsed key/value pair
+ * @param key_val_sep a 0-terminated list of characters used to
+ * separate key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @return 0 if the key/value pair has been successfully parsed and
+ * set, or a negative value corresponding to an AVERROR code in case
+ * of error:
+ * AVERROR(EINVAL) if the key/value pair cannot be parsed,
+ * the error code issued by av_opt_set() if the key/value pair
+ * cannot be set
+ */
+static int parse_key_value_pair(void *ctx, const char **buf,
+ const char *key_val_sep, const char *pairs_sep)
+{
+ char *key = av_get_token(buf, key_val_sep);
+ char *val;
+ int ret;
+
+ if (!key)
+ return AVERROR(ENOMEM);
+
+ if (*key && strspn(*buf, key_val_sep)) {
+ (*buf)++;
+ val = av_get_token(buf, pairs_sep);
+ if (!val) {
+ av_freep(&key);
+ return AVERROR(ENOMEM);
+ }
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Missing key or no key/value separator found after key '%s'\n", key);
+ av_free(key);
+ return AVERROR(EINVAL);
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "Setting entry with key '%s' to value '%s'\n", key, val);
+
+ ret = av_opt_set(ctx, key, val, AV_OPT_SEARCH_CHILDREN);
+ if (ret == AVERROR_OPTION_NOT_FOUND)
+ av_log(ctx, AV_LOG_ERROR, "Key '%s' not found.\n", key);
+
+ av_free(key);
+ av_free(val);
+ return ret;
+}
+
+int av_set_options_string(void *ctx, const char *opts,
+ const char *key_val_sep, const char *pairs_sep)
+{
+ int ret, count = 0;
+
+ if (!opts)
+ return 0;
+
+ while (*opts) {
+ if ((ret = parse_key_value_pair(ctx, &opts, key_val_sep, pairs_sep)) < 0)
+ return ret;
+ count++;
+
+ if (*opts)
+ opts++;
+ }
+
+ return count;
+}
+
+#define WHITESPACES " \n\t\r"
+
+static int is_key_char(char c)
+{
+ return (unsigned)((c | 32) - 'a') < 26 ||
+ (unsigned)(c - '0') < 10 ||
+ c == '-' || c == '_' || c == '/' || c == '.';
+}
+
+/**
+ * Read a key from a string.
+ *
+ * The key consists of is_key_char characters and must be terminated by a
+ * character from the delim string; spaces are ignored.
+ *
+ * @return 0 for success (even with ellipsis), <0 for failure
+ */
+static int get_key(const char **ropts, const char *delim, char **rkey)
+{
+ const char *opts = *ropts;
+ const char *key_start, *key_end;
+
+ key_start = opts += strspn(opts, WHITESPACES);
+ while (is_key_char(*opts))
+ opts++;
+ key_end = opts;
+ opts += strspn(opts, WHITESPACES);
+ if (!*opts || !strchr(delim, *opts))
+ return AVERROR(EINVAL);
+ opts++;
+ if (!(*rkey = av_malloc(key_end - key_start + 1)))
+ return AVERROR(ENOMEM);
+ memcpy(*rkey, key_start, key_end - key_start);
+ (*rkey)[key_end - key_start] = 0;
+ *ropts = opts;
+ return 0;
+}
+
+int av_opt_get_key_value(const char **ropts,
+ const char *key_val_sep, const char *pairs_sep,
+ unsigned flags,
+ char **rkey, char **rval)
+{
+ int ret;
+ char *key = NULL, *val;
+ const char *opts = *ropts;
+
+ if ((ret = get_key(&opts, key_val_sep, &key)) < 0 &&
+ !(flags & AV_OPT_FLAG_IMPLICIT_KEY))
+ return AVERROR(EINVAL);
+ if (!(val = av_get_token(&opts, pairs_sep))) {
+ av_free(key);
+ return AVERROR(ENOMEM);
+ }
+ *ropts = opts;
+ *rkey = key;
+ *rval = val;
+ return 0;
+}
+
+int av_opt_set_from_string(void *ctx, const char *opts,
+ const char *const *shorthand,
+ const char *key_val_sep, const char *pairs_sep)
+{
+ int ret, count = 0;
+ const char *dummy_shorthand = NULL;
+ char *av_uninit(parsed_key), *av_uninit(value);
+ const char *key;
+
+ if (!opts)
+ return 0;
+ if (!shorthand)
+ shorthand = &dummy_shorthand;
+
+ while (*opts) {
+ ret = av_opt_get_key_value(&opts, key_val_sep, pairs_sep,
+ *shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
+ &parsed_key, &value);
+ if (ret < 0) {
+ if (ret == AVERROR(EINVAL))
+ av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", opts);
+ else
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", opts,
+ av_err2str(ret));
+ return ret;
+ }
+ if (*opts)
+ opts++;
+ if (parsed_key) {
+ key = parsed_key;
+ while (*shorthand) /* discard all remaining shorthand */
+ shorthand++;
+ } else {
+ key = *(shorthand++);
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
+ if ((ret = av_opt_set(ctx, key, value, 0)) < 0) {
+ if (ret == AVERROR_OPTION_NOT_FOUND)
+ av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
+ av_free(value);
+ av_free(parsed_key);
+ return ret;
+ }
+
+ av_free(value);
+ av_free(parsed_key);
+ count++;
+ }
+ return count;
+}
+
+void av_opt_free(void *obj)
+{
+ const AVOption *o = NULL;
+ while ((o = av_opt_next(obj, o))) {
+ switch (o->type) {
+ case AV_OPT_TYPE_STRING:
+ case AV_OPT_TYPE_BINARY:
+ av_freep((uint8_t *)obj + o->offset);
+ break;
+
+ case AV_OPT_TYPE_DICT:
+ av_dict_free((AVDictionary **)(((uint8_t *)obj) + o->offset));
+ break;
+
+ case AV_OPT_TYPE_CHLAYOUT:
+ av_channel_layout_uninit((AVChannelLayout *)(((uint8_t *)obj) + o->offset));
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+int av_opt_set_dict2(void *obj, AVDictionary **options, int search_flags)
+{
+ const AVDictionaryEntry *t = NULL;
+ AVDictionary *tmp = NULL;
+ int ret;
+
+ if (!options)
+ return 0;
+
+ while ((t = av_dict_iterate(*options, t))) {
+ ret = av_opt_set(obj, t->key, t->value, search_flags);
+ if (ret == AVERROR_OPTION_NOT_FOUND)
+ ret = av_dict_set(&tmp, t->key, t->value, AV_DICT_MULTIKEY);
+ if (ret < 0) {
+ av_log(obj, AV_LOG_ERROR, "Error setting option %s to value %s.\n", t->key, t->value);
+ av_dict_free(&tmp);
+ return ret;
+ }
+ }
+ av_dict_free(options);
+ *options = tmp;
+ return 0;
+}
+
+int av_opt_set_dict(void *obj, AVDictionary **options)
+{
+ return av_opt_set_dict2(obj, options, 0);
+}
+
+const AVOption *av_opt_find(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags)
+{
+ return av_opt_find2(obj, name, unit, opt_flags, search_flags, NULL);
+}
+
+const AVOption *av_opt_find2(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags, void **target_obj)
+{
+ const AVClass *c;
+ const AVOption *o = NULL;
+
+ if(!obj)
+ return NULL;
+
+ c= *(AVClass**)obj;
+
+ if (!c)
+ return NULL;
+
+ if (search_flags & AV_OPT_SEARCH_CHILDREN) {
+ if (search_flags & AV_OPT_SEARCH_FAKE_OBJ) {
+ void *iter = NULL;
+ const AVClass *child;
+ while (child = av_opt_child_class_iterate(c, &iter))
+ if (o = av_opt_find2(&child, name, unit, opt_flags, search_flags, NULL))
+ return o;
+ } else {
+ void *child = NULL;
+ while (child = av_opt_child_next(obj, child))
+ if (o = av_opt_find2(child, name, unit, opt_flags, search_flags, target_obj))
+ return o;
+ }
+ }
+
+ while (o = av_opt_next(obj, o)) {
+ if (!strcmp(o->name, name) && (o->flags & opt_flags) == opt_flags &&
+ ((!unit && o->type != AV_OPT_TYPE_CONST) ||
+ (unit && o->type == AV_OPT_TYPE_CONST && o->unit && !strcmp(o->unit, unit)))) {
+ if (target_obj) {
+ if (!(search_flags & AV_OPT_SEARCH_FAKE_OBJ))
+ *target_obj = obj;
+ else
+ *target_obj = NULL;
+ }
+ return o;
+ }
+ }
+ return NULL;
+}
+
+void *av_opt_child_next(void *obj, void *prev)
+{
+ const AVClass *c = *(AVClass **)obj;
+ if (c->child_next)
+ return c->child_next(obj, prev);
+ return NULL;
+}
+
+const AVClass *av_opt_child_class_iterate(const AVClass *parent, void **iter)
+{
+ if (parent->child_class_iterate)
+ return parent->child_class_iterate(iter);
+ return NULL;
+}
+
+void *av_opt_ptr(const AVClass *class, void *obj, const char *name)
+{
+ const AVOption *opt= av_opt_find2(&class, name, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ, NULL);
+ if(!opt)
+ return NULL;
+ return (uint8_t*)obj + opt->offset;
+}
+
+static int opt_size(enum AVOptionType type)
+{
+ switch(type) {
+ case AV_OPT_TYPE_BOOL:
+ case AV_OPT_TYPE_INT:
+ case AV_OPT_TYPE_FLAGS:
+ return sizeof(int);
+ case AV_OPT_TYPE_DURATION:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_INT64:
+ case AV_OPT_TYPE_UINT64:
+ return sizeof(int64_t);
+ case AV_OPT_TYPE_DOUBLE:
+ return sizeof(double);
+ case AV_OPT_TYPE_FLOAT:
+ return sizeof(float);
+ case AV_OPT_TYPE_STRING:
+ return sizeof(uint8_t*);
+ case AV_OPT_TYPE_VIDEO_RATE:
+ case AV_OPT_TYPE_RATIONAL:
+ return sizeof(AVRational);
+ case AV_OPT_TYPE_BINARY:
+ return sizeof(uint8_t*) + sizeof(int);
+ case AV_OPT_TYPE_IMAGE_SIZE:
+ return sizeof(int[2]);
+ case AV_OPT_TYPE_PIXEL_FMT:
+ return sizeof(enum AVPixelFormat);
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ return sizeof(enum AVSampleFormat);
+ case AV_OPT_TYPE_COLOR:
+ return 4;
+ }
+ return AVERROR(EINVAL);
+}
+
+int av_opt_copy(void *dst, const void *src)
+{
+ const AVOption *o = NULL;
+ const AVClass *c;
+ int ret = 0;
+
+ if (!src)
+ return AVERROR(EINVAL);
+
+ c = *(AVClass **)src;
+ if (!c || c != *(AVClass **)dst)
+ return AVERROR(EINVAL);
+
+ while ((o = av_opt_next(src, o))) {
+ void *field_dst = (uint8_t *)dst + o->offset;
+ void *field_src = (uint8_t *)src + o->offset;
+ uint8_t **field_dst8 = (uint8_t **)field_dst;
+ uint8_t **field_src8 = (uint8_t **)field_src;
+
+ if (o->type == AV_OPT_TYPE_STRING) {
+ if (*field_dst8 != *field_src8)
+ av_freep(field_dst8);
+ *field_dst8 = av_strdup(*field_src8);
+ if (*field_src8 && !*field_dst8)
+ ret = AVERROR(ENOMEM);
+ } else if (o->type == AV_OPT_TYPE_BINARY) {
+ int len = *(int *)(field_src8 + 1);
+ if (*field_dst8 != *field_src8)
+ av_freep(field_dst8);
+ *field_dst8 = av_memdup(*field_src8, len);
+ if (len && !*field_dst8) {
+ ret = AVERROR(ENOMEM);
+ len = 0;
+ }
+ *(int *)(field_dst8 + 1) = len;
+ } else if (o->type == AV_OPT_TYPE_CONST) {
+ // do nothing
+ } else if (o->type == AV_OPT_TYPE_DICT) {
+ AVDictionary **sdict = (AVDictionary **) field_src;
+ AVDictionary **ddict = (AVDictionary **) field_dst;
+ int ret2;
+ if (*sdict != *ddict)
+ av_dict_free(ddict);
+ *ddict = NULL;
+ ret2 = av_dict_copy(ddict, *sdict, 0);
+ if (ret2 < 0)
+ ret = ret2;
+ } else if (o->type == AV_OPT_TYPE_CHLAYOUT) {
+ if (field_dst != field_src)
+ ret = av_channel_layout_copy(field_dst, field_src);
+ } else {
+ int size = opt_size(o->type);
+ if (size < 0)
+ ret = size;
+ else
+ memcpy(field_dst, field_src, size);
+ }
+ }
+ return ret;
+}
+
+int av_opt_query_ranges(AVOptionRanges **ranges_arg, void *obj, const char *key, int flags)
+{
+ int ret;
+ const AVClass *c = *(AVClass**)obj;
+ int (*callback)(AVOptionRanges **, void *obj, const char *key, int flags) = c->query_ranges;
+
+ if (!callback)
+ callback = av_opt_query_ranges_default;
+
+ ret = callback(ranges_arg, obj, key, flags);
+ if (ret >= 0) {
+ if (!(flags & AV_OPT_MULTI_COMPONENT_RANGE))
+ ret = 1;
+ (*ranges_arg)->nb_components = ret;
+ }
+ return ret;
+}
+
+int av_opt_query_ranges_default(AVOptionRanges **ranges_arg, void *obj, const char *key, int flags)
+{
+ AVOptionRanges *ranges = av_mallocz(sizeof(*ranges));
+ AVOptionRange **range_array = av_mallocz(sizeof(void*));
+ AVOptionRange *range = av_mallocz(sizeof(*range));
+ const AVOption *field = av_opt_find(obj, key, NULL, 0, flags);
+ int ret;
+
+ *ranges_arg = NULL;
+
+ if (!ranges || !range || !range_array || !field) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ranges->range = range_array;
+ ranges->range[0] = range;
+ ranges->nb_ranges = 1;
+ ranges->nb_components = 1;
+ range->is_range = 1;
+ range->value_min = field->min;
+ range->value_max = field->max;
+
+ switch (field->type) {
+ case AV_OPT_TYPE_BOOL:
+ case AV_OPT_TYPE_INT:
+ case AV_OPT_TYPE_INT64:
+ case AV_OPT_TYPE_UINT64:
+ case AV_OPT_TYPE_PIXEL_FMT:
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ case AV_OPT_TYPE_FLOAT:
+ case AV_OPT_TYPE_DOUBLE:
+ case AV_OPT_TYPE_DURATION:
+ case AV_OPT_TYPE_COLOR:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ break;
+ case AV_OPT_TYPE_STRING:
+ range->component_min = 0;
+ range->component_max = 0x10FFFF; // max unicode value
+ range->value_min = -1;
+ range->value_max = INT_MAX;
+ break;
+ case AV_OPT_TYPE_RATIONAL:
+ range->component_min = INT_MIN;
+ range->component_max = INT_MAX;
+ break;
+ case AV_OPT_TYPE_IMAGE_SIZE:
+ range->component_min = 0;
+ range->component_max = INT_MAX/128/8;
+ range->value_min = 0;
+ range->value_max = INT_MAX/8;
+ break;
+ case AV_OPT_TYPE_VIDEO_RATE:
+ range->component_min = 1;
+ range->component_max = INT_MAX;
+ range->value_min = 1;
+ range->value_max = INT_MAX;
+ break;
+ default:
+ ret = AVERROR(ENOSYS);
+ goto fail;
+ }
+
+ *ranges_arg = ranges;
+ return 1;
+fail:
+ av_free(ranges);
+ av_free(range);
+ av_free(range_array);
+ return ret;
+}
+
+void av_opt_freep_ranges(AVOptionRanges **rangesp)
+{
+ int i;
+ AVOptionRanges *ranges = *rangesp;
+
+ if (!ranges)
+ return;
+
+ for (i = 0; i < ranges->nb_ranges * ranges->nb_components; i++) {
+ AVOptionRange *range = ranges->range[i];
+ if (range) {
+ av_freep(&range->str);
+ av_freep(&ranges->range[i]);
+ }
+ }
+ av_freep(&ranges->range);
+ av_freep(rangesp);
+}
+
+int av_opt_is_set_to_default(void *obj, const AVOption *o)
+{
+ int64_t i64;
+ double d, d2;
+ float f;
+ AVRational q;
+ int ret, w, h;
+ char *str;
+ void *dst;
+
+ if (!o || !obj)
+ return AVERROR(EINVAL);
+
+ dst = ((uint8_t*)obj) + o->offset;
+
+ switch (o->type) {
+ case AV_OPT_TYPE_CONST:
+ return 1;
+ case AV_OPT_TYPE_BOOL:
+ case AV_OPT_TYPE_FLAGS:
+ case AV_OPT_TYPE_PIXEL_FMT:
+ case AV_OPT_TYPE_SAMPLE_FMT:
+ case AV_OPT_TYPE_INT:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_DURATION:
+ case AV_OPT_TYPE_INT64:
+ case AV_OPT_TYPE_UINT64:
+ read_number(o, dst, NULL, NULL, &i64);
+ return o->default_val.i64 == i64;
+ case AV_OPT_TYPE_CHLAYOUT: {
+ AVChannelLayout ch_layout = { 0 };
+ if (o->default_val.str) {
+ if ((ret = av_channel_layout_from_string(&ch_layout, o->default_val.str)) < 0)
+ return ret;
+ }
+ return !av_channel_layout_compare((AVChannelLayout *)dst, &ch_layout);
+ }
+ case AV_OPT_TYPE_STRING:
+ str = *(char **)dst;
+ if (str == o->default_val.str) //2 NULLs
+ return 1;
+ if (!str || !o->default_val.str) //1 NULL
+ return 0;
+ return !strcmp(str, o->default_val.str);
+ case AV_OPT_TYPE_DOUBLE:
+ read_number(o, dst, &d, NULL, NULL);
+ return o->default_val.dbl == d;
+ case AV_OPT_TYPE_FLOAT:
+ read_number(o, dst, &d, NULL, NULL);
+ f = o->default_val.dbl;
+ d2 = f;
+ return d2 == d;
+ case AV_OPT_TYPE_RATIONAL:
+ q = av_d2q(o->default_val.dbl, INT_MAX);
+ return !av_cmp_q(*(AVRational*)dst, q);
+ case AV_OPT_TYPE_BINARY: {
+ struct {
+ uint8_t *data;
+ int size;
+ } tmp = {0};
+ int opt_size = *(int *)((void **)dst + 1);
+ void *opt_ptr = *(void **)dst;
+ if (!opt_size && (!o->default_val.str || !strlen(o->default_val.str)))
+ return 1;
+ if (!opt_size || !o->default_val.str || !strlen(o->default_val.str ))
+ return 0;
+ if (opt_size != strlen(o->default_val.str) / 2)
+ return 0;
+ ret = set_string_binary(NULL, NULL, o->default_val.str, &tmp.data);
+ if (!ret)
+ ret = !memcmp(opt_ptr, tmp.data, tmp.size);
+ av_free(tmp.data);
+ return ret;
+ }
+ case AV_OPT_TYPE_DICT: {
+ AVDictionary *dict1 = NULL;
+ AVDictionary *dict2 = *(AVDictionary **)dst;
+ const AVDictionaryEntry *en1 = NULL;
+ const AVDictionaryEntry *en2 = NULL;
+ ret = av_dict_parse_string(&dict1, o->default_val.str, "=", ":", 0);
+ if (ret < 0) {
+ av_dict_free(&dict1);
+ return ret;
+ }
+ do {
+ en1 = av_dict_iterate(dict1, en1);
+ en2 = av_dict_iterate(dict2, en2);
+ } while (en1 && en2 && !strcmp(en1->key, en2->key) && !strcmp(en1->value, en2->value));
+ av_dict_free(&dict1);
+ return (!en1 && !en2);
+ }
+ case AV_OPT_TYPE_IMAGE_SIZE:
+ if (!o->default_val.str || !strcmp(o->default_val.str, "none"))
+ w = h = 0;
+ else if ((ret = av_parse_video_size(&w, &h, o->default_val.str)) < 0)
+ return ret;
+ return (w == *(int *)dst) && (h == *((int *)dst+1));
+ case AV_OPT_TYPE_VIDEO_RATE:
+ q = (AVRational){0, 0};
+ if (o->default_val.str) {
+ if ((ret = av_parse_video_rate(&q, o->default_val.str)) < 0)
+ return ret;
+ }
+ return !av_cmp_q(*(AVRational*)dst, q);
+ case AV_OPT_TYPE_COLOR: {
+ uint8_t color[4] = {0, 0, 0, 0};
+ if (o->default_val.str) {
+ if ((ret = av_parse_color(color, o->default_val.str, -1, NULL)) < 0)
+ return ret;
+ }
+ return !memcmp(color, dst, sizeof(color));
+ }
+ default:
+ av_log(obj, AV_LOG_WARNING, "Not supported option type: %d, option name: %s\n", o->type, o->name);
+ break;
+ }
+ return AVERROR_PATCHWELCOME;
+}
+
+int av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags)
+{
+ const AVOption *o;
+ void *target;
+ if (!obj)
+ return AVERROR(EINVAL);
+ o = av_opt_find2(obj, name, NULL, 0, search_flags, &target);
+ if (!o)
+ return AVERROR_OPTION_NOT_FOUND;
+ return av_opt_is_set_to_default(target, o);
+}
+
+int av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer,
+ const char key_val_sep, const char pairs_sep)
+{
+ const AVOption *o = NULL;
+ uint8_t *buf;
+ AVBPrint bprint;
+ int ret, cnt = 0;
+ const char special_chars[] = {pairs_sep, key_val_sep, '\0'};
+
+ if (pairs_sep == '\0' || key_val_sep == '\0' || pairs_sep == key_val_sep ||
+ pairs_sep == '\\' || key_val_sep == '\\') {
+ av_log(obj, AV_LOG_ERROR, "Invalid separator(s) found.");
+ return AVERROR(EINVAL);
+ }
+
+ if (!obj || !buffer)
+ return AVERROR(EINVAL);
+
+ *buffer = NULL;
+ av_bprint_init(&bprint, 64, AV_BPRINT_SIZE_UNLIMITED);
+
+ while (o = av_opt_next(obj, o)) {
+ if (o->type == AV_OPT_TYPE_CONST)
+ continue;
+ if ((flags & AV_OPT_SERIALIZE_OPT_FLAGS_EXACT) && o->flags != opt_flags)
+ continue;
+ else if (((o->flags & opt_flags) != opt_flags))
+ continue;
+ if (flags & AV_OPT_SERIALIZE_SKIP_DEFAULTS && av_opt_is_set_to_default(obj, o) > 0)
+ continue;
+ if ((ret = av_opt_get(obj, o->name, 0, &buf)) < 0) {
+ av_bprint_finalize(&bprint, NULL);
+ return ret;
+ }
+ if (buf) {
+ if (cnt++)
+ av_bprint_append_data(&bprint, &pairs_sep, 1);
+ av_bprint_escape(&bprint, o->name, special_chars, AV_ESCAPE_MODE_BACKSLASH, 0);
+ av_bprint_append_data(&bprint, &key_val_sep, 1);
+ av_bprint_escape(&bprint, buf, special_chars, AV_ESCAPE_MODE_BACKSLASH, 0);
+ av_freep(&buf);
+ }
+ }
+ ret = av_bprint_finalize(&bprint, buffer);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
diff --git a/media/ffvpx/libavutil/opt.h b/media/ffvpx/libavutil/opt.h
new file mode 100644
index 0000000000..461b5d3b6b
--- /dev/null
+++ b/media/ffvpx/libavutil/opt.h
@@ -0,0 +1,891 @@
+/*
+ * AVOptions
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OPT_H
+#define AVUTIL_OPT_H
+
+/**
+ * @file
+ * AVOptions
+ */
+
+#include "rational.h"
+#include "avutil.h"
+#include "channel_layout.h"
+#include "dict.h"
+#include "log.h"
+#include "pixfmt.h"
+#include "samplefmt.h"
+
+/**
+ * @defgroup avoptions AVOptions
+ * @ingroup lavu_data
+ * @{
+ * AVOptions provide a generic system to declare options on arbitrary structs
+ * ("objects"). An option can have a help text, a type and a range of possible
+ * values. Options may then be enumerated, read and written to.
+ *
+ * @section avoptions_implement Implementing AVOptions
+ * This section describes how to add AVOptions capabilities to a struct.
+ *
+ * All AVOptions-related information is stored in an AVClass. Therefore
+ * the first member of the struct should be a pointer to an AVClass describing it.
+ * The option field of the AVClass must be set to a NULL-terminated static array
+ * of AVOptions. Each AVOption must have a non-empty name, a type, a default
+ * value and for number-type AVOptions also a range of allowed values. It must
+ * also declare an offset in bytes from the start of the struct, where the field
+ * associated with this AVOption is located. Other fields in the AVOption struct
+ * should also be set when applicable, but are not required.
+ *
+ * The following example illustrates an AVOptions-enabled struct:
+ * @code
+ * typedef struct test_struct {
+ * const AVClass *class;
+ * int int_opt;
+ * char *str_opt;
+ * uint8_t *bin_opt;
+ * int bin_len;
+ * } test_struct;
+ *
+ * static const AVOption test_options[] = {
+ * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt),
+ * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX },
+ * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt),
+ * AV_OPT_TYPE_STRING },
+ * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt),
+ * AV_OPT_TYPE_BINARY },
+ * { NULL },
+ * };
+ *
+ * static const AVClass test_class = {
+ * .class_name = "test class",
+ * .item_name = av_default_item_name,
+ * .option = test_options,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ * @endcode
+ *
+ * Next, when allocating your struct, you must ensure that the AVClass pointer
+ * is set to the correct value. Then, av_opt_set_defaults() can be called to
+ * initialize defaults. After that the struct is ready to be used with the
+ * AVOptions API.
+ *
+ * When cleaning up, you may use the av_opt_free() function to automatically
+ * free all the allocated string and binary options.
+ *
+ * Continuing with the above example:
+ *
+ * @code
+ * test_struct *alloc_test_struct(void)
+ * {
+ * test_struct *ret = av_mallocz(sizeof(*ret));
+ * ret->class = &test_class;
+ * av_opt_set_defaults(ret);
+ * return ret;
+ * }
+ * void free_test_struct(test_struct **foo)
+ * {
+ * av_opt_free(*foo);
+ * av_freep(foo);
+ * }
+ * @endcode
+ *
+ * @subsection avoptions_implement_nesting Nesting
+ * It may happen that an AVOptions-enabled struct contains another
+ * AVOptions-enabled struct as a member (e.g. AVCodecContext in
+ * libavcodec exports generic options, while its priv_data field exports
+ * codec-specific options). In such a case, it is possible to set up the
+ * parent struct to export a child's options. To do that, simply
+ * implement AVClass.child_next() and AVClass.child_class_iterate() in the
+ * parent struct's AVClass.
+ * Assuming that the test_struct from above now also contains a
+ * child_struct field:
+ *
+ * @code
+ * typedef struct child_struct {
+ * AVClass *class;
+ * int flags_opt;
+ * } child_struct;
+ * static const AVOption child_opts[] = {
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX },
+ * { NULL },
+ * };
+ * static const AVClass child_class = {
+ * .class_name = "child class",
+ * .item_name = av_default_item_name,
+ * .option = child_opts,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ *
+ * void *child_next(void *obj, void *prev)
+ * {
+ * test_struct *t = obj;
+ * if (!prev && t->child_struct)
+ * return t->child_struct;
+ * return NULL
+ * }
+ * const AVClass child_class_iterate(void **iter)
+ * {
+ * const AVClass *c = *iter ? NULL : &child_class;
+ * *iter = (void*)(uintptr_t)c;
+ * return c;
+ * }
+ * @endcode
+ * Putting child_next() and child_class_iterate() as defined above into
+ * test_class will now make child_struct's options accessible through
+ * test_struct (again, proper setup as described above needs to be done on
+ * child_struct right after it is created).
+ *
+ * From the above example it might not be clear why both child_next()
+ * and child_class_iterate() are needed. The distinction is that child_next()
+ * iterates over actually existing objects, while child_class_iterate()
+ * iterates over all possible child classes. E.g. if an AVCodecContext
+ * was initialized to use a codec which has private options, then its
+ * child_next() will return AVCodecContext.priv_data and finish
+ * iterating. OTOH child_class_iterate() on AVCodecContext.av_class will
+ * iterate over all available codecs with private options.
+ *
+ * @subsection avoptions_implement_named_constants Named constants
+ * It is possible to create named constants for options. Simply set the unit
+ * field of the option the constants should apply to a string and
+ * create the constants themselves as options of type AV_OPT_TYPE_CONST
+ * with their unit field set to the same string.
+ * Their default_val field should contain the value of the named
+ * constant.
+ * For example, to add some named constants for the test_flags option
+ * above, put the following into the child_opts array:
+ * @code
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" },
+ * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" },
+ * @endcode
+ *
+ * @section avoptions_use Using AVOptions
+ * This section deals with accessing options in an AVOptions-enabled struct.
+ * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or
+ * AVFormatContext in libavformat.
+ *
+ * @subsection avoptions_use_examine Examining AVOptions
+ * The basic functions for examining options are av_opt_next(), which iterates
+ * over all options defined for one object, and av_opt_find(), which searches
+ * for an option with the given name.
+ *
+ * The situation is more complicated with nesting. An AVOptions-enabled struct
+ * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag
+ * to av_opt_find() will make the function search children recursively.
+ *
+ * For enumerating there are basically two cases. The first is when you want to
+ * get all options that may potentially exist on the struct and its children
+ * (e.g. when constructing documentation). In that case you should call
+ * av_opt_child_class_iterate() recursively on the parent struct's AVClass. The
+ * second case is when you have an already initialized struct with all its
+ * children and you want to get all options that can be actually written or read
+ * from it. In that case you should call av_opt_child_next() recursively (and
+ * av_opt_next() on each result).
+ *
+ * @subsection avoptions_use_get_set Reading and writing AVOptions
+ * When setting options, you often have a string read directly from the
+ * user. In such a case, simply passing it to av_opt_set() is enough. For
+ * non-string type options, av_opt_set() will parse the string according to the
+ * option type.
+ *
+ * Similarly av_opt_get() will read any option type and convert it to a string
+ * which will be returned. Do not forget that the string is allocated, so you
+ * have to free it with av_free().
+ *
+ * In some cases it may be more convenient to put all options into an
+ * AVDictionary and call av_opt_set_dict() on it. A specific case of this
+ * are the format/codec open functions in lavf/lavc which take a dictionary
+ * filled with option as a parameter. This makes it possible to set some options
+ * that cannot be set otherwise, since e.g. the input file format is not known
+ * before the file is actually opened.
+ */
+
+enum AVOptionType{
+ AV_OPT_TYPE_FLAGS,
+ AV_OPT_TYPE_INT,
+ AV_OPT_TYPE_INT64,
+ AV_OPT_TYPE_DOUBLE,
+ AV_OPT_TYPE_FLOAT,
+ AV_OPT_TYPE_STRING,
+ AV_OPT_TYPE_RATIONAL,
+ AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length
+ AV_OPT_TYPE_DICT,
+ AV_OPT_TYPE_UINT64,
+ AV_OPT_TYPE_CONST,
+ AV_OPT_TYPE_IMAGE_SIZE, ///< offset must point to two consecutive integers
+ AV_OPT_TYPE_PIXEL_FMT,
+ AV_OPT_TYPE_SAMPLE_FMT,
+ AV_OPT_TYPE_VIDEO_RATE, ///< offset must point to AVRational
+ AV_OPT_TYPE_DURATION,
+ AV_OPT_TYPE_COLOR,
+#if FF_API_OLD_CHANNEL_LAYOUT
+ AV_OPT_TYPE_CHANNEL_LAYOUT,
+#endif
+ AV_OPT_TYPE_BOOL,
+ AV_OPT_TYPE_CHLAYOUT,
+};
+
+/**
+ * AVOption
+ */
+typedef struct AVOption {
+ const char *name;
+
+ /**
+ * short English help text
+ * @todo What about other languages?
+ */
+ const char *help;
+
+ /**
+ * The offset relative to the context structure where the option
+ * value is stored. It should be 0 for named constants.
+ */
+ int offset;
+ enum AVOptionType type;
+
+ /**
+ * the default value for scalar options
+ */
+ union {
+ int64_t i64;
+ double dbl;
+ const char *str;
+ /* TODO those are unused now */
+ AVRational q;
+ } default_val;
+ double min; ///< minimum valid value for the option
+ double max; ///< maximum valid value for the option
+
+ int flags;
+#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding
+#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding
+#define AV_OPT_FLAG_AUDIO_PARAM 8
+#define AV_OPT_FLAG_VIDEO_PARAM 16
+#define AV_OPT_FLAG_SUBTITLE_PARAM 32
+/**
+ * The option is intended for exporting values to the caller.
+ */
+#define AV_OPT_FLAG_EXPORT 64
+/**
+ * The option may not be set through the AVOptions API, only read.
+ * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set.
+ */
+#define AV_OPT_FLAG_READONLY 128
+#define AV_OPT_FLAG_BSF_PARAM (1<<8) ///< a generic parameter which can be set by the user for bit stream filtering
+#define AV_OPT_FLAG_RUNTIME_PARAM (1<<15) ///< a generic parameter which can be set by the user at runtime
+#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering
+#define AV_OPT_FLAG_DEPRECATED (1<<17) ///< set if option is deprecated, users should refer to AVOption.help text for more information
+#define AV_OPT_FLAG_CHILD_CONSTS (1<<18) ///< set if option constants can also reside in child objects
+//FIXME think about enc-audio, ... style flags
+
+ /**
+ * The logical unit to which the option belongs. Non-constant
+ * options and corresponding named constants share the same
+ * unit. May be NULL.
+ */
+ const char *unit;
+} AVOption;
+
+/**
+ * A single allowed range of values, or a single allowed value.
+ */
+typedef struct AVOptionRange {
+ const char *str;
+ /**
+ * Value range.
+ * For string ranges this represents the min/max length.
+ * For dimensions this represents the min/max pixel count or width/height in multi-component case.
+ */
+ double value_min, value_max;
+ /**
+ * Value's component range.
+ * For string this represents the unicode range for chars, 0-127 limits to ASCII.
+ */
+ double component_min, component_max;
+ /**
+ * Range flag.
+ * If set to 1 the struct encodes a range, if set to 0 a single value.
+ */
+ int is_range;
+} AVOptionRange;
+
+/**
+ * List of AVOptionRange structs.
+ */
+typedef struct AVOptionRanges {
+ /**
+ * Array of option ranges.
+ *
+ * Most of option types use just one component.
+ * Following describes multi-component option types:
+ *
+ * AV_OPT_TYPE_IMAGE_SIZE:
+ * component index 0: range of pixel count (width * height).
+ * component index 1: range of width.
+ * component index 2: range of height.
+ *
+ * @note To obtain multi-component version of this structure, user must
+ * provide AV_OPT_MULTI_COMPONENT_RANGE to av_opt_query_ranges or
+ * av_opt_query_ranges_default function.
+ *
+ * Multi-component range can be read as in following example:
+ *
+ * @code
+ * int range_index, component_index;
+ * AVOptionRanges *ranges;
+ * AVOptionRange *range[3]; //may require more than 3 in the future.
+ * av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE);
+ * for (range_index = 0; range_index < ranges->nb_ranges; range_index++) {
+ * for (component_index = 0; component_index < ranges->nb_components; component_index++)
+ * range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index];
+ * //do something with range here.
+ * }
+ * av_opt_freep_ranges(&ranges);
+ * @endcode
+ */
+ AVOptionRange **range;
+ /**
+ * Number of ranges per component.
+ */
+ int nb_ranges;
+ /**
+ * Number of componentes.
+ */
+ int nb_components;
+} AVOptionRanges;
+
+/**
+ * Show the obj options.
+ *
+ * @param req_flags requested flags for the options to show. Show only the
+ * options for which it is opt->flags & req_flags.
+ * @param rej_flags rejected flags for the options to show. Show only the
+ * options for which it is !(opt->flags & req_flags).
+ * @param av_log_obj log context to use for showing the options
+ */
+int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags);
+
+/**
+ * Set the values of all AVOption fields to their default values.
+ *
+ * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)
+ */
+void av_opt_set_defaults(void *s);
+
+/**
+ * Set the values of all AVOption fields to their default values. Only these
+ * AVOption fields for which (opt->flags & mask) == flags will have their
+ * default applied to s.
+ *
+ * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)
+ * @param mask combination of AV_OPT_FLAG_*
+ * @param flags combination of AV_OPT_FLAG_*
+ */
+void av_opt_set_defaults2(void *s, int mask, int flags);
+
+/**
+ * Parse the key/value pairs list in opts. For each key/value pair
+ * found, stores the value in the field in ctx that is named like the
+ * key. ctx must be an AVClass context, storing is done using
+ * AVOptions.
+ *
+ * @param opts options string to parse, may be NULL
+ * @param key_val_sep a 0-terminated list of characters used to
+ * separate key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @return the number of successfully set key/value pairs, or a negative
+ * value corresponding to an AVERROR code in case of error:
+ * AVERROR(EINVAL) if opts cannot be parsed,
+ * the error code issued by av_opt_set() if a key/value pair
+ * cannot be set
+ */
+int av_set_options_string(void *ctx, const char *opts,
+ const char *key_val_sep, const char *pairs_sep);
+
+/**
+ * Parse the key-value pairs list in opts. For each key=value pair found,
+ * set the value of the corresponding option in ctx.
+ *
+ * @param ctx the AVClass object to set options on
+ * @param opts the options string, key-value pairs separated by a
+ * delimiter
+ * @param shorthand a NULL-terminated array of options names for shorthand
+ * notation: if the first field in opts has no key part,
+ * the key is taken from the first element of shorthand;
+ * then again for the second, etc., until either opts is
+ * finished, shorthand is finished or a named option is
+ * found; after that, all options must be named
+ * @param key_val_sep a 0-terminated list of characters used to separate
+ * key from value, for example '='
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other, for example ':' or ','
+ * @return the number of successfully set key=value pairs, or a negative
+ * value corresponding to an AVERROR code in case of error:
+ * AVERROR(EINVAL) if opts cannot be parsed,
+ * the error code issued by av_set_string3() if a key/value pair
+ * cannot be set
+ *
+ * Options names must use only the following characters: a-z A-Z 0-9 - . / _
+ * Separators must use characters distinct from option names and from each
+ * other.
+ */
+int av_opt_set_from_string(void *ctx, const char *opts,
+ const char *const *shorthand,
+ const char *key_val_sep, const char *pairs_sep);
+/**
+ * Free all allocated objects in obj.
+ */
+void av_opt_free(void *obj);
+
+/**
+ * Check whether a particular flag is set in a flags field.
+ *
+ * @param field_name the name of the flag field option
+ * @param flag_name the name of the flag to check
+ * @return non-zero if the flag is set, zero if the flag isn't set,
+ * isn't of the right type, or the flags field doesn't exist.
+ */
+int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name);
+
+/**
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ * by a new one containing all options not found in obj.
+ * Of course this new dictionary needs to be freed by caller
+ * with av_dict_free().
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ * but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict(void *obj, struct AVDictionary **options);
+
+
+/**
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ * by a new one containing all options not found in obj.
+ * Of course this new dictionary needs to be freed by caller
+ * with av_dict_free().
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ * but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags);
+
+/**
+ * Extract a key-value pair from the beginning of a string.
+ *
+ * @param ropts pointer to the options string, will be updated to
+ * point to the rest of the string (one of the pairs_sep
+ * or the final NUL)
+ * @param key_val_sep a 0-terminated list of characters used to separate
+ * key from value, for example '='
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other, for example ':' or ','
+ * @param flags flags; see the AV_OPT_FLAG_* values below
+ * @param rkey parsed key; must be freed using av_free()
+ * @param rval parsed value; must be freed using av_free()
+ *
+ * @return >=0 for success, or a negative value corresponding to an
+ * AVERROR code in case of error; in particular:
+ * AVERROR(EINVAL) if no key is present
+ *
+ */
+int av_opt_get_key_value(const char **ropts,
+ const char *key_val_sep, const char *pairs_sep,
+ unsigned flags,
+ char **rkey, char **rval);
+
+enum {
+
+ /**
+ * Accept to parse a value without a key; the key will then be returned
+ * as NULL.
+ */
+ AV_OPT_FLAG_IMPLICIT_KEY = 1,
+};
+
+/**
+ * @defgroup opt_eval_funcs Evaluating option strings
+ * @{
+ * This group of functions can be used to evaluate option strings
+ * and get numbers out of them. They do the same thing as av_opt_set(),
+ * except the result is written into the caller-supplied pointer.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass.
+ * @param o an option for which the string is to be evaluated.
+ * @param val string to be evaluated.
+ * @param *_out value of the string will be written here.
+ *
+ * @return 0 on success, a negative number on failure.
+ */
+int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out);
+int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out);
+int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out);
+int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out);
+int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out);
+int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out);
+/**
+ * @}
+ */
+
+#define AV_OPT_SEARCH_CHILDREN (1 << 0) /**< Search in possible children of the
+ given object first. */
+/**
+ * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass
+ * instead of a required pointer to a struct containing AVClass. This is
+ * useful for searching for options without needing to allocate the corresponding
+ * object.
+ */
+#define AV_OPT_SEARCH_FAKE_OBJ (1 << 1)
+
+/**
+ * In av_opt_get, return NULL if the option has a pointer type and is set to NULL,
+ * rather than returning an empty string.
+ */
+#define AV_OPT_ALLOW_NULL (1 << 2)
+
+/**
+ * Allows av_opt_query_ranges and av_opt_query_ranges_default to return more than
+ * one component for certain option types.
+ * @see AVOptionRanges for details.
+ */
+#define AV_OPT_MULTI_COMPONENT_RANGE (1 << 12)
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ *
+ * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable
+ * directly with av_opt_set(). Use special calls which take an options
+ * AVDictionary (e.g. avformat_open_input()) to set options found with this
+ * flag.
+ */
+const AVOption *av_opt_find(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags);
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ * @param[out] target_obj if non-NULL, an object to which the option belongs will be
+ * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present
+ * in search_flags. This parameter is ignored if search_flags contain
+ * AV_OPT_SEARCH_FAKE_OBJ.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ */
+const AVOption *av_opt_find2(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags, void **target_obj);
+
+/**
+ * Iterate over all AVOptions belonging to obj.
+ *
+ * @param obj an AVOptions-enabled struct or a double pointer to an
+ * AVClass describing it.
+ * @param prev result of the previous call to av_opt_next() on this object
+ * or NULL
+ * @return next AVOption or NULL
+ */
+const AVOption *av_opt_next(const void *obj, const AVOption *prev);
+
+/**
+ * Iterate over AVOptions-enabled children of obj.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return next AVOptions-enabled child or NULL
+ */
+void *av_opt_child_next(void *obj, void *prev);
+
+/**
+ * Iterate over potential AVOptions-enabled children of parent.
+ *
+ * @param iter a pointer where iteration state is stored.
+ * @return AVClass corresponding to next potential child or NULL
+ */
+const AVClass *av_opt_child_class_iterate(const AVClass *parent, void **iter);
+
+/**
+ * @defgroup opt_set_funcs Option setting functions
+ * @{
+ * Those functions set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. In case of av_opt_set() if the field is not
+ * of a string type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * If the field is of a dictionary type, it has to be a ':' separated list of
+ * key=value parameters. Values containing ':' special characters must be
+ * escaped.
+ * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be set on a child of obj.
+ *
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ */
+int av_opt_set (void *obj, const char *name, const char *val, int search_flags);
+int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags);
+int av_opt_set_double (void *obj, const char *name, double val, int search_flags);
+int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags);
+int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags);
+int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags);
+int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags);
+int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags);
+int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags);
+#if FF_API_OLD_CHANNEL_LAYOUT
+attribute_deprecated
+int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags);
+#endif
+int av_opt_set_chlayout(void *obj, const char *name, const AVChannelLayout *layout, int search_flags);
+/**
+ * @note Any old dictionary present is discarded and replaced with a copy of the new one. The
+ * caller still owns val is and responsible for freeing it.
+ */
+int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, int search_flags);
+
+/**
+ * Set a binary option to an integer list.
+ *
+ * @param obj AVClass object to set options on
+ * @param name name of the binary option
+ * @param val pointer to an integer list (must have the correct type with
+ * regard to the contents of the list)
+ * @param term list terminator (usually 0 or -1)
+ * @param flags search flags
+ */
+#define av_opt_set_int_list(obj, name, val, term, flags) \
+ (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \
+ AVERROR(EINVAL) : \
+ av_opt_set_bin(obj, name, (const uint8_t *)(val), \
+ av_int_list_length(val, term) * sizeof(*(val)), flags))
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup opt_get_funcs Option getting functions
+ * @{
+ * Those functions get a value of the option with the given name from an object.
+ *
+ * @param[in] obj a struct whose first element is a pointer to an AVClass.
+ * @param[in] name name of the option to get.
+ * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be found in a child of obj.
+ * @param[out] out_val value of the option will be written here
+ * @return >=0 on success, a negative error code otherwise
+ */
+/**
+ * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller
+ *
+ * @note if AV_OPT_ALLOW_NULL is set in search_flags in av_opt_get, and the
+ * option is of type AV_OPT_TYPE_STRING, AV_OPT_TYPE_BINARY or AV_OPT_TYPE_DICT
+ * and is set to NULL, *out_val will be set to NULL instead of an allocated
+ * empty string.
+ */
+int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val);
+int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val);
+int av_opt_get_double (void *obj, const char *name, int search_flags, double *out_val);
+int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val);
+int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out);
+int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt);
+int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt);
+int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val);
+#if FF_API_OLD_CHANNEL_LAYOUT
+attribute_deprecated
+int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout);
+#endif
+int av_opt_get_chlayout(void *obj, const char *name, int search_flags, AVChannelLayout *layout);
+/**
+ * @param[out] out_val The returned dictionary is a copy of the actual value and must
+ * be freed with av_dict_free() by the caller
+ */
+int av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val);
+/**
+ * @}
+ */
+/**
+ * Gets a pointer to the requested field in a struct.
+ * This function allows accessing a struct even when its fields are moved or
+ * renamed since the application making the access has been compiled,
+ *
+ * @returns a pointer to the field, it can be cast to the correct type and read
+ * or written to.
+ */
+void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name);
+
+/**
+ * Free an AVOptionRanges struct and set it to NULL.
+ */
+void av_opt_freep_ranges(AVOptionRanges **ranges);
+
+/**
+ * Get a list of allowed ranges for the given option.
+ *
+ * The returned list may depend on other fields in obj like for example profile.
+ *
+ * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored
+ * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance
+ * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges
+ *
+ * The result must be freed with av_opt_freep_ranges.
+ *
+ * @return number of compontents returned on success, a negative errro code otherwise
+ */
+int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags);
+
+/**
+ * Copy options from src object into dest object.
+ *
+ * The underlying AVClass of both src and dest must coincide. The guarantee
+ * below does not apply if this is not fulfilled.
+ *
+ * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object.
+ * Original memory allocated for such options is freed unless both src and dest options points to the same memory.
+ *
+ * Even on error it is guaranteed that allocated options from src and dest
+ * no longer alias each other afterwards; in particular calling av_opt_free()
+ * on both src and dest is safe afterwards if dest has been memdup'ed from src.
+ *
+ * @param dest Object to copy from
+ * @param src Object to copy into
+ * @return 0 on success, negative on error
+ */
+int av_opt_copy(void *dest, const void *src);
+
+/**
+ * Get a default list of allowed ranges for the given option.
+ *
+ * This list is constructed without using the AVClass.query_ranges() callback
+ * and can be used as fallback from within the callback.
+ *
+ * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored
+ * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance
+ * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges
+ *
+ * The result must be freed with av_opt_free_ranges.
+ *
+ * @return number of compontents returned on success, a negative errro code otherwise
+ */
+int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags);
+
+/**
+ * Check if given option is set to its default value.
+ *
+ * Options o must belong to the obj. This function must not be called to check child's options state.
+ * @see av_opt_is_set_to_default_by_name().
+ *
+ * @param obj AVClass object to check option on
+ * @param o option to be checked
+ * @return >0 when option is set to its default,
+ * 0 when option is not set its default,
+ * <0 on error
+ */
+int av_opt_is_set_to_default(void *obj, const AVOption *o);
+
+/**
+ * Check if given option is set to its default value.
+ *
+ * @param obj AVClass object to check option on
+ * @param name option name
+ * @param search_flags combination of AV_OPT_SEARCH_*
+ * @return >0 when option is set to its default,
+ * 0 when option is not set its default,
+ * <0 on error
+ */
+int av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags);
+
+
+#define AV_OPT_SERIALIZE_SKIP_DEFAULTS 0x00000001 ///< Serialize options that are not set to default values only.
+#define AV_OPT_SERIALIZE_OPT_FLAGS_EXACT 0x00000002 ///< Serialize options that exactly match opt_flags only.
+
+/**
+ * Serialize object's options.
+ *
+ * Create a string containing object's serialized options.
+ * Such string may be passed back to av_opt_set_from_string() in order to restore option values.
+ * A key/value or pairs separator occurring in the serialized value or
+ * name string are escaped through the av_escape() function.
+ *
+ * @param[in] obj AVClass object to serialize
+ * @param[in] opt_flags serialize options with all the specified flags set (AV_OPT_FLAG)
+ * @param[in] flags combination of AV_OPT_SERIALIZE_* flags
+ * @param[out] buffer Pointer to buffer that will be allocated with string containg serialized options.
+ * Buffer must be freed by the caller when is no longer needed.
+ * @param[in] key_val_sep character used to separate key from value
+ * @param[in] pairs_sep character used to separate two pairs from each other
+ * @return >= 0 on success, negative on error
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
+ */
+int av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer,
+ const char key_val_sep, const char pairs_sep);
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_OPT_H */
diff --git a/media/ffvpx/libavutil/parseutils.c b/media/ffvpx/libavutil/parseutils.c
new file mode 100644
index 0000000000..c2916c458b
--- /dev/null
+++ b/media/ffvpx/libavutil/parseutils.c
@@ -0,0 +1,790 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * misc parsing utilities
+ */
+
+#include <time.h>
+
+#include "avstring.h"
+#include "avutil.h"
+#include "common.h"
+#include "eval.h"
+#include "log.h"
+/* #include "random_seed.h" */
+#include "time_internal.h"
+#include "parseutils.h"
+#include "fftime.h"
+
+#ifdef TEST
+
+#define av_get_random_seed av_get_random_seed_deterministic
+static uint32_t av_get_random_seed_deterministic(void);
+
+#define av_gettime() 1331972053200000
+
+#endif
+
+int av_parse_ratio(AVRational *q, const char *str, int max,
+ int log_offset, void *log_ctx)
+{
+ char c;
+ int ret;
+
+ if (sscanf(str, "%d:%d%c", &q->num, &q->den, &c) != 2) {
+ double d;
+ ret = av_expr_parse_and_eval(&d, str, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, log_offset, log_ctx);
+ if (ret < 0)
+ return ret;
+ *q = av_d2q(d, max);
+ } else {
+ av_reduce(&q->num, &q->den, q->num, q->den, max);
+ }
+
+ return 0;
+}
+
+typedef struct VideoSizeAbbr {
+ const char *abbr;
+ int width, height;
+} VideoSizeAbbr;
+
+typedef struct VideoRateAbbr {
+ const char *abbr;
+ AVRational rate;
+} VideoRateAbbr;
+
+static const VideoSizeAbbr video_size_abbrs[] = {
+ { "ntsc", 720, 480 },
+ { "pal", 720, 576 },
+ { "qntsc", 352, 240 }, /* VCD compliant NTSC */
+ { "qpal", 352, 288 }, /* VCD compliant PAL */
+ { "sntsc", 640, 480 }, /* square pixel NTSC */
+ { "spal", 768, 576 }, /* square pixel PAL */
+ { "film", 352, 240 },
+ { "ntsc-film", 352, 240 },
+ { "sqcif", 128, 96 },
+ { "qcif", 176, 144 },
+ { "cif", 352, 288 },
+ { "4cif", 704, 576 },
+ { "16cif", 1408,1152 },
+ { "qqvga", 160, 120 },
+ { "qvga", 320, 240 },
+ { "vga", 640, 480 },
+ { "svga", 800, 600 },
+ { "xga", 1024, 768 },
+ { "uxga", 1600,1200 },
+ { "qxga", 2048,1536 },
+ { "sxga", 1280,1024 },
+ { "qsxga", 2560,2048 },
+ { "hsxga", 5120,4096 },
+ { "wvga", 852, 480 },
+ { "wxga", 1366, 768 },
+ { "wsxga", 1600,1024 },
+ { "wuxga", 1920,1200 },
+ { "woxga", 2560,1600 },
+ { "wqhd", 2560,1440 },
+ { "wqsxga", 3200,2048 },
+ { "wquxga", 3840,2400 },
+ { "whsxga", 6400,4096 },
+ { "whuxga", 7680,4800 },
+ { "cga", 320, 200 },
+ { "ega", 640, 350 },
+ { "hd480", 852, 480 },
+ { "hd720", 1280, 720 },
+ { "hd1080", 1920,1080 },
+ { "quadhd", 2560,1440 },
+ { "2k", 2048,1080 }, /* Digital Cinema System Specification */
+ { "2kdci", 2048,1080 },
+ { "2kflat", 1998,1080 },
+ { "2kscope", 2048, 858 },
+ { "4k", 4096,2160 }, /* Digital Cinema System Specification */
+ { "4kdci", 4096,2160 },
+ { "4kflat", 3996,2160 },
+ { "4kscope", 4096,1716 },
+ { "nhd", 640,360 },
+ { "hqvga", 240,160 },
+ { "wqvga", 400,240 },
+ { "fwqvga", 432,240 },
+ { "hvga", 480,320 },
+ { "qhd", 960,540 },
+ { "uhd2160", 3840,2160 },
+ { "uhd4320", 7680,4320 },
+};
+
+static const VideoRateAbbr video_rate_abbrs[]= {
+ { "ntsc", { 30000, 1001 } },
+ { "pal", { 25, 1 } },
+ { "qntsc", { 30000, 1001 } }, /* VCD compliant NTSC */
+ { "qpal", { 25, 1 } }, /* VCD compliant PAL */
+ { "sntsc", { 30000, 1001 } }, /* square pixel NTSC */
+ { "spal", { 25, 1 } }, /* square pixel PAL */
+ { "film", { 24, 1 } },
+ { "ntsc-film", { 24000, 1001 } },
+};
+
+static const char *months[12] = {
+ "january", "february", "march", "april", "may", "june", "july", "august",
+ "september", "october", "november", "december"
+};
+
+int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
+{
+ int i;
+ int n = FF_ARRAY_ELEMS(video_size_abbrs);
+ const char *p;
+ int width = 0, height = 0;
+
+ for (i = 0; i < n; i++) {
+ if (!strcmp(video_size_abbrs[i].abbr, str)) {
+ width = video_size_abbrs[i].width;
+ height = video_size_abbrs[i].height;
+ break;
+ }
+ }
+ if (i == n) {
+ width = strtol(str, (void*)&p, 10);
+ if (*p)
+ p++;
+ height = strtol(p, (void*)&p, 10);
+
+ /* trailing extraneous data detected, like in 123x345foobar */
+ if (*p)
+ return AVERROR(EINVAL);
+ }
+ if (width <= 0 || height <= 0)
+ return AVERROR(EINVAL);
+ *width_ptr = width;
+ *height_ptr = height;
+ return 0;
+}
+
+int av_parse_video_rate(AVRational *rate, const char *arg)
+{
+ int i, ret;
+ int n = FF_ARRAY_ELEMS(video_rate_abbrs);
+
+ /* First, we check our abbreviation table */
+ for (i = 0; i < n; ++i)
+ if (!strcmp(video_rate_abbrs[i].abbr, arg)) {
+ *rate = video_rate_abbrs[i].rate;
+ return 0;
+ }
+
+ /* Then, we try to parse it as fraction */
+ if ((ret = av_parse_ratio_quiet(rate, arg, 1001000)) < 0)
+ return ret;
+ if (rate->num <= 0 || rate->den <= 0)
+ return AVERROR(EINVAL);
+ return 0;
+}
+
+typedef struct ColorEntry {
+ const char *name; ///< a string representing the name of the color
+ uint8_t rgb_color[3]; ///< RGB values for the color
+} ColorEntry;
+
+static const ColorEntry color_table[] = {
+ { "AliceBlue", { 0xF0, 0xF8, 0xFF } },
+ { "AntiqueWhite", { 0xFA, 0xEB, 0xD7 } },
+ { "Aqua", { 0x00, 0xFF, 0xFF } },
+ { "Aquamarine", { 0x7F, 0xFF, 0xD4 } },
+ { "Azure", { 0xF0, 0xFF, 0xFF } },
+ { "Beige", { 0xF5, 0xF5, 0xDC } },
+ { "Bisque", { 0xFF, 0xE4, 0xC4 } },
+ { "Black", { 0x00, 0x00, 0x00 } },
+ { "BlanchedAlmond", { 0xFF, 0xEB, 0xCD } },
+ { "Blue", { 0x00, 0x00, 0xFF } },
+ { "BlueViolet", { 0x8A, 0x2B, 0xE2 } },
+ { "Brown", { 0xA5, 0x2A, 0x2A } },
+ { "BurlyWood", { 0xDE, 0xB8, 0x87 } },
+ { "CadetBlue", { 0x5F, 0x9E, 0xA0 } },
+ { "Chartreuse", { 0x7F, 0xFF, 0x00 } },
+ { "Chocolate", { 0xD2, 0x69, 0x1E } },
+ { "Coral", { 0xFF, 0x7F, 0x50 } },
+ { "CornflowerBlue", { 0x64, 0x95, 0xED } },
+ { "Cornsilk", { 0xFF, 0xF8, 0xDC } },
+ { "Crimson", { 0xDC, 0x14, 0x3C } },
+ { "Cyan", { 0x00, 0xFF, 0xFF } },
+ { "DarkBlue", { 0x00, 0x00, 0x8B } },
+ { "DarkCyan", { 0x00, 0x8B, 0x8B } },
+ { "DarkGoldenRod", { 0xB8, 0x86, 0x0B } },
+ { "DarkGray", { 0xA9, 0xA9, 0xA9 } },
+ { "DarkGreen", { 0x00, 0x64, 0x00 } },
+ { "DarkKhaki", { 0xBD, 0xB7, 0x6B } },
+ { "DarkMagenta", { 0x8B, 0x00, 0x8B } },
+ { "DarkOliveGreen", { 0x55, 0x6B, 0x2F } },
+ { "Darkorange", { 0xFF, 0x8C, 0x00 } },
+ { "DarkOrchid", { 0x99, 0x32, 0xCC } },
+ { "DarkRed", { 0x8B, 0x00, 0x00 } },
+ { "DarkSalmon", { 0xE9, 0x96, 0x7A } },
+ { "DarkSeaGreen", { 0x8F, 0xBC, 0x8F } },
+ { "DarkSlateBlue", { 0x48, 0x3D, 0x8B } },
+ { "DarkSlateGray", { 0x2F, 0x4F, 0x4F } },
+ { "DarkTurquoise", { 0x00, 0xCE, 0xD1 } },
+ { "DarkViolet", { 0x94, 0x00, 0xD3 } },
+ { "DeepPink", { 0xFF, 0x14, 0x93 } },
+ { "DeepSkyBlue", { 0x00, 0xBF, 0xFF } },
+ { "DimGray", { 0x69, 0x69, 0x69 } },
+ { "DodgerBlue", { 0x1E, 0x90, 0xFF } },
+ { "FireBrick", { 0xB2, 0x22, 0x22 } },
+ { "FloralWhite", { 0xFF, 0xFA, 0xF0 } },
+ { "ForestGreen", { 0x22, 0x8B, 0x22 } },
+ { "Fuchsia", { 0xFF, 0x00, 0xFF } },
+ { "Gainsboro", { 0xDC, 0xDC, 0xDC } },
+ { "GhostWhite", { 0xF8, 0xF8, 0xFF } },
+ { "Gold", { 0xFF, 0xD7, 0x00 } },
+ { "GoldenRod", { 0xDA, 0xA5, 0x20 } },
+ { "Gray", { 0x80, 0x80, 0x80 } },
+ { "Green", { 0x00, 0x80, 0x00 } },
+ { "GreenYellow", { 0xAD, 0xFF, 0x2F } },
+ { "HoneyDew", { 0xF0, 0xFF, 0xF0 } },
+ { "HotPink", { 0xFF, 0x69, 0xB4 } },
+ { "IndianRed", { 0xCD, 0x5C, 0x5C } },
+ { "Indigo", { 0x4B, 0x00, 0x82 } },
+ { "Ivory", { 0xFF, 0xFF, 0xF0 } },
+ { "Khaki", { 0xF0, 0xE6, 0x8C } },
+ { "Lavender", { 0xE6, 0xE6, 0xFA } },
+ { "LavenderBlush", { 0xFF, 0xF0, 0xF5 } },
+ { "LawnGreen", { 0x7C, 0xFC, 0x00 } },
+ { "LemonChiffon", { 0xFF, 0xFA, 0xCD } },
+ { "LightBlue", { 0xAD, 0xD8, 0xE6 } },
+ { "LightCoral", { 0xF0, 0x80, 0x80 } },
+ { "LightCyan", { 0xE0, 0xFF, 0xFF } },
+ { "LightGoldenRodYellow", { 0xFA, 0xFA, 0xD2 } },
+ { "LightGreen", { 0x90, 0xEE, 0x90 } },
+ { "LightGrey", { 0xD3, 0xD3, 0xD3 } },
+ { "LightPink", { 0xFF, 0xB6, 0xC1 } },
+ { "LightSalmon", { 0xFF, 0xA0, 0x7A } },
+ { "LightSeaGreen", { 0x20, 0xB2, 0xAA } },
+ { "LightSkyBlue", { 0x87, 0xCE, 0xFA } },
+ { "LightSlateGray", { 0x77, 0x88, 0x99 } },
+ { "LightSteelBlue", { 0xB0, 0xC4, 0xDE } },
+ { "LightYellow", { 0xFF, 0xFF, 0xE0 } },
+ { "Lime", { 0x00, 0xFF, 0x00 } },
+ { "LimeGreen", { 0x32, 0xCD, 0x32 } },
+ { "Linen", { 0xFA, 0xF0, 0xE6 } },
+ { "Magenta", { 0xFF, 0x00, 0xFF } },
+ { "Maroon", { 0x80, 0x00, 0x00 } },
+ { "MediumAquaMarine", { 0x66, 0xCD, 0xAA } },
+ { "MediumBlue", { 0x00, 0x00, 0xCD } },
+ { "MediumOrchid", { 0xBA, 0x55, 0xD3 } },
+ { "MediumPurple", { 0x93, 0x70, 0xD8 } },
+ { "MediumSeaGreen", { 0x3C, 0xB3, 0x71 } },
+ { "MediumSlateBlue", { 0x7B, 0x68, 0xEE } },
+ { "MediumSpringGreen", { 0x00, 0xFA, 0x9A } },
+ { "MediumTurquoise", { 0x48, 0xD1, 0xCC } },
+ { "MediumVioletRed", { 0xC7, 0x15, 0x85 } },
+ { "MidnightBlue", { 0x19, 0x19, 0x70 } },
+ { "MintCream", { 0xF5, 0xFF, 0xFA } },
+ { "MistyRose", { 0xFF, 0xE4, 0xE1 } },
+ { "Moccasin", { 0xFF, 0xE4, 0xB5 } },
+ { "NavajoWhite", { 0xFF, 0xDE, 0xAD } },
+ { "Navy", { 0x00, 0x00, 0x80 } },
+ { "OldLace", { 0xFD, 0xF5, 0xE6 } },
+ { "Olive", { 0x80, 0x80, 0x00 } },
+ { "OliveDrab", { 0x6B, 0x8E, 0x23 } },
+ { "Orange", { 0xFF, 0xA5, 0x00 } },
+ { "OrangeRed", { 0xFF, 0x45, 0x00 } },
+ { "Orchid", { 0xDA, 0x70, 0xD6 } },
+ { "PaleGoldenRod", { 0xEE, 0xE8, 0xAA } },
+ { "PaleGreen", { 0x98, 0xFB, 0x98 } },
+ { "PaleTurquoise", { 0xAF, 0xEE, 0xEE } },
+ { "PaleVioletRed", { 0xD8, 0x70, 0x93 } },
+ { "PapayaWhip", { 0xFF, 0xEF, 0xD5 } },
+ { "PeachPuff", { 0xFF, 0xDA, 0xB9 } },
+ { "Peru", { 0xCD, 0x85, 0x3F } },
+ { "Pink", { 0xFF, 0xC0, 0xCB } },
+ { "Plum", { 0xDD, 0xA0, 0xDD } },
+ { "PowderBlue", { 0xB0, 0xE0, 0xE6 } },
+ { "Purple", { 0x80, 0x00, 0x80 } },
+ { "Red", { 0xFF, 0x00, 0x00 } },
+ { "RosyBrown", { 0xBC, 0x8F, 0x8F } },
+ { "RoyalBlue", { 0x41, 0x69, 0xE1 } },
+ { "SaddleBrown", { 0x8B, 0x45, 0x13 } },
+ { "Salmon", { 0xFA, 0x80, 0x72 } },
+ { "SandyBrown", { 0xF4, 0xA4, 0x60 } },
+ { "SeaGreen", { 0x2E, 0x8B, 0x57 } },
+ { "SeaShell", { 0xFF, 0xF5, 0xEE } },
+ { "Sienna", { 0xA0, 0x52, 0x2D } },
+ { "Silver", { 0xC0, 0xC0, 0xC0 } },
+ { "SkyBlue", { 0x87, 0xCE, 0xEB } },
+ { "SlateBlue", { 0x6A, 0x5A, 0xCD } },
+ { "SlateGray", { 0x70, 0x80, 0x90 } },
+ { "Snow", { 0xFF, 0xFA, 0xFA } },
+ { "SpringGreen", { 0x00, 0xFF, 0x7F } },
+ { "SteelBlue", { 0x46, 0x82, 0xB4 } },
+ { "Tan", { 0xD2, 0xB4, 0x8C } },
+ { "Teal", { 0x00, 0x80, 0x80 } },
+ { "Thistle", { 0xD8, 0xBF, 0xD8 } },
+ { "Tomato", { 0xFF, 0x63, 0x47 } },
+ { "Turquoise", { 0x40, 0xE0, 0xD0 } },
+ { "Violet", { 0xEE, 0x82, 0xEE } },
+ { "Wheat", { 0xF5, 0xDE, 0xB3 } },
+ { "White", { 0xFF, 0xFF, 0xFF } },
+ { "WhiteSmoke", { 0xF5, 0xF5, 0xF5 } },
+ { "Yellow", { 0xFF, 0xFF, 0x00 } },
+ { "YellowGreen", { 0x9A, 0xCD, 0x32 } },
+};
+
+static int color_table_compare(const void *lhs, const void *rhs)
+{
+ return av_strcasecmp(lhs, ((const ColorEntry *)rhs)->name);
+}
+
+#define ALPHA_SEP '@'
+
+int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
+ void *log_ctx)
+{
+ char *tail, color_string2[128];
+ const ColorEntry *entry;
+ int len, hex_offset = 0;
+
+ if (color_string[0] == '#') {
+ hex_offset = 1;
+ } else if (!strncmp(color_string, "0x", 2))
+ hex_offset = 2;
+
+ if (slen < 0)
+ slen = strlen(color_string);
+ av_strlcpy(color_string2, color_string + hex_offset,
+ FFMIN(slen-hex_offset+1, sizeof(color_string2)));
+ if ((tail = strchr(color_string2, ALPHA_SEP)))
+ *tail++ = 0;
+ len = strlen(color_string2);
+ rgba_color[3] = 255;
+
+ if (!av_strcasecmp(color_string2, "random") || !av_strcasecmp(color_string2, "bikeshed")) {
+ int rgba = 0xffffffff; /* av_get_random_seed(); */
+ rgba_color[0] = rgba >> 24;
+ rgba_color[1] = rgba >> 16;
+ rgba_color[2] = rgba >> 8;
+ rgba_color[3] = rgba;
+ } else if (hex_offset ||
+ strspn(color_string2, "0123456789ABCDEFabcdef") == len) {
+ char *tail;
+ unsigned int rgba = strtoul(color_string2, &tail, 16);
+
+ if (*tail || (len != 6 && len != 8)) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid 0xRRGGBB[AA] color string: '%s'\n", color_string2);
+ return AVERROR(EINVAL);
+ }
+ if (len == 8) {
+ rgba_color[3] = rgba;
+ rgba >>= 8;
+ }
+ rgba_color[0] = rgba >> 16;
+ rgba_color[1] = rgba >> 8;
+ rgba_color[2] = rgba;
+ } else {
+ entry = bsearch(color_string2,
+ color_table,
+ FF_ARRAY_ELEMS(color_table),
+ sizeof(ColorEntry),
+ color_table_compare);
+ if (!entry) {
+ av_log(log_ctx, AV_LOG_ERROR, "Cannot find color '%s'\n", color_string2);
+ return AVERROR(EINVAL);
+ }
+ memcpy(rgba_color, entry->rgb_color, 3);
+ }
+
+ if (tail) {
+ double alpha;
+ const char *alpha_string = tail;
+ if (!strncmp(alpha_string, "0x", 2)) {
+ alpha = strtoul(alpha_string, &tail, 16);
+ } else {
+ double norm_alpha = strtod(alpha_string, &tail);
+ if (norm_alpha < 0.0 || norm_alpha > 1.0)
+ alpha = 256;
+ else
+ alpha = 255 * norm_alpha;
+ }
+
+ if (tail == alpha_string || *tail || alpha > 255 || alpha < 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid alpha value specifier '%s' in '%s'\n",
+ alpha_string, color_string);
+ return AVERROR(EINVAL);
+ }
+ rgba_color[3] = alpha;
+ }
+
+ return 0;
+}
+
+const char *av_get_known_color_name(int color_idx, const uint8_t **rgbp)
+{
+ const ColorEntry *color;
+
+ if ((unsigned)color_idx >= FF_ARRAY_ELEMS(color_table))
+ return NULL;
+
+ color = &color_table[color_idx];
+ if (rgbp)
+ *rgbp = color->rgb_color;
+
+ return color->name;
+}
+
+/* get a positive number between n_min and n_max, for a maximum length
+ of len_max. Return -1 if error. */
+static int date_get_num(const char **pp,
+ int n_min, int n_max, int len_max)
+{
+ int i, val, c;
+ const char *p;
+
+ p = *pp;
+ val = 0;
+ for(i = 0; i < len_max; i++) {
+ c = *p;
+ if (!av_isdigit(c))
+ break;
+ val = (val * 10) + c - '0';
+ p++;
+ }
+ /* no number read ? */
+ if (p == *pp)
+ return -1;
+ if (val < n_min || val > n_max)
+ return -1;
+ *pp = p;
+ return val;
+}
+
+static int date_get_month(const char **pp) {
+ int i = 0;
+ for (; i < 12; i++) {
+ if (!av_strncasecmp(*pp, months[i], 3)) {
+ const char *mo_full = months[i] + 3;
+ int len = strlen(mo_full);
+ *pp += 3;
+ if (len > 0 && !av_strncasecmp(*pp, mo_full, len))
+ *pp += len;
+ return i;
+ }
+ }
+ return -1;
+}
+
+char *av_small_strptime(const char *p, const char *fmt, struct tm *dt)
+{
+ int c, val;
+
+ while((c = *fmt++)) {
+ if (c != '%') {
+ if (av_isspace(c))
+ for (; *p && av_isspace(*p); p++);
+ else if (*p != c)
+ return NULL;
+ else p++;
+ continue;
+ }
+
+ c = *fmt++;
+ switch(c) {
+ case 'H':
+ case 'J':
+ val = date_get_num(&p, 0, c == 'H' ? 23 : INT_MAX, c == 'H' ? 2 : 4);
+
+ if (val == -1)
+ return NULL;
+ dt->tm_hour = val;
+ break;
+ case 'M':
+ val = date_get_num(&p, 0, 59, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_min = val;
+ break;
+ case 'S':
+ val = date_get_num(&p, 0, 59, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_sec = val;
+ break;
+ case 'Y':
+ val = date_get_num(&p, 0, 9999, 4);
+ if (val == -1)
+ return NULL;
+ dt->tm_year = val - 1900;
+ break;
+ case 'm':
+ val = date_get_num(&p, 1, 12, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_mon = val - 1;
+ break;
+ case 'd':
+ val = date_get_num(&p, 1, 31, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_mday = val;
+ break;
+ case 'T':
+ p = av_small_strptime(p, "%H:%M:%S", dt);
+ if (!p)
+ return NULL;
+ break;
+ case 'b':
+ case 'B':
+ case 'h':
+ val = date_get_month(&p);
+ if (val == -1)
+ return NULL;
+ dt->tm_mon = val;
+ break;
+ case '%':
+ if (*p++ != '%')
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+ }
+
+ return (char*)p;
+}
+
+time_t av_timegm(struct tm *tm)
+{
+ time_t t;
+
+ int y = tm->tm_year + 1900, m = tm->tm_mon + 1, d = tm->tm_mday;
+
+ if (m < 3) {
+ m += 12;
+ y--;
+ }
+
+ t = 86400LL *
+ (d + (153 * m - 457) / 5 + 365 * y + y / 4 - y / 100 + y / 400 - 719469);
+
+ t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec;
+
+ return t;
+}
+
+int av_parse_time(int64_t *timeval, const char *timestr, int duration)
+{
+ const char *p, *q;
+ int64_t t, now64;
+ time_t now;
+ struct tm dt = { 0 }, tmbuf;
+ int today = 0, negative = 0, microseconds = 0, suffix = 1000000;
+ int i;
+ static const char * const date_fmt[] = {
+ "%Y - %m - %d",
+ "%Y%m%d",
+ };
+ static const char * const time_fmt[] = {
+ "%H:%M:%S",
+ "%H%M%S",
+ };
+ static const char * const tz_fmt[] = {
+ "%H:%M",
+ "%H%M",
+ "%H",
+ };
+
+ p = timestr;
+ q = NULL;
+ *timeval = INT64_MIN;
+ if (!duration) {
+ now64 = av_gettime();
+ now = now64 / 1000000;
+
+ if (!av_strcasecmp(timestr, "now")) {
+ *timeval = now64;
+ return 0;
+ }
+
+ /* parse the year-month-day part */
+ for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
+ q = av_small_strptime(p, date_fmt[i], &dt);
+ if (q)
+ break;
+ }
+
+ /* if the year-month-day part is missing, then take the
+ * current year-month-day time */
+ if (!q) {
+ today = 1;
+ q = p;
+ }
+ p = q;
+
+ if (*p == 'T' || *p == 't')
+ p++;
+ else
+ while (av_isspace(*p))
+ p++;
+
+ /* parse the hour-minute-second part */
+ for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
+ q = av_small_strptime(p, time_fmt[i], &dt);
+ if (q)
+ break;
+ }
+ } else {
+ /* parse timestr as a duration */
+ if (p[0] == '-') {
+ negative = 1;
+ ++p;
+ }
+ /* parse timestr as HH:MM:SS */
+ q = av_small_strptime(p, "%J:%M:%S", &dt);
+ if (!q) {
+ /* parse timestr as MM:SS */
+ q = av_small_strptime(p, "%M:%S", &dt);
+ dt.tm_hour = 0;
+ }
+ if (!q) {
+ char *o;
+ /* parse timestr as S+ */
+ errno = 0;
+ t = strtoll(p, &o, 10);
+ if (o == p) /* the parsing didn't succeed */
+ return AVERROR(EINVAL);
+ if (errno == ERANGE)
+ return AVERROR(ERANGE);
+ q = o;
+ } else {
+ t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
+ }
+ }
+
+ /* Now we have all the fields that we can get */
+ if (!q)
+ return AVERROR(EINVAL);
+
+ /* parse the .m... part */
+ if (*q == '.') {
+ int n;
+ q++;
+ for (n = 100000; n >= 1; n /= 10, q++) {
+ if (!av_isdigit(*q))
+ break;
+ microseconds += n * (*q - '0');
+ }
+ while (av_isdigit(*q))
+ q++;
+ }
+
+ if (duration) {
+ if (q[0] == 'm' && q[1] == 's') {
+ suffix = 1000;
+ microseconds /= 1000;
+ q += 2;
+ } else if (q[0] == 'u' && q[1] == 's') {
+ suffix = 1;
+ microseconds = 0;
+ q += 2;
+ } else if (*q == 's')
+ q++;
+ } else {
+ int is_utc = *q == 'Z' || *q == 'z';
+ int tzoffset = 0;
+ q += is_utc;
+ if (!today && !is_utc && (*q == '+' || *q == '-')) {
+ struct tm tz = { 0 };
+ int sign = (*q == '+' ? -1 : 1);
+ q++;
+ p = q;
+ for (i = 0; i < FF_ARRAY_ELEMS(tz_fmt); i++) {
+ q = av_small_strptime(p, tz_fmt[i], &tz);
+ if (q)
+ break;
+ }
+ if (!q)
+ return AVERROR(EINVAL);
+ tzoffset = sign * (tz.tm_hour * 60 + tz.tm_min) * 60;
+ is_utc = 1;
+ }
+ if (today) { /* fill in today's date */
+ struct tm dt2 = is_utc ? *gmtime_r(&now, &tmbuf) : *localtime_r(&now, &tmbuf);
+ dt2.tm_hour = dt.tm_hour;
+ dt2.tm_min = dt.tm_min;
+ dt2.tm_sec = dt.tm_sec;
+ dt = dt2;
+ }
+ dt.tm_isdst = is_utc ? 0 : -1;
+ t = is_utc ? av_timegm(&dt) : mktime(&dt);
+ t += tzoffset;
+ }
+
+ /* Check that we are at the end of the string */
+ if (*q)
+ return AVERROR(EINVAL);
+
+ if (INT64_MAX / suffix < t || t < INT64_MIN / suffix)
+ return AVERROR(ERANGE);
+ t *= suffix;
+ if (INT64_MAX - microseconds < t)
+ return AVERROR(ERANGE);
+ t += microseconds;
+ if (t == INT64_MIN && negative)
+ return AVERROR(ERANGE);
+ *timeval = negative ? -t : t;
+ return 0;
+}
+
+int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
+{
+ const char *p;
+ char tag[128], *q;
+
+ p = info;
+ if (*p == '?')
+ p++;
+ for(;;) {
+ q = tag;
+ while (*p != '\0' && *p != '=' && *p != '&') {
+ if ((q - tag) < sizeof(tag) - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ q = arg;
+ if (*p == '=') {
+ p++;
+ while (*p != '&' && *p != '\0') {
+ if ((q - arg) < arg_size - 1) {
+ if (*p == '+')
+ *q++ = ' ';
+ else
+ *q++ = *p;
+ }
+ p++;
+ }
+ }
+ *q = '\0';
+ if (!strcmp(tag, tag1))
+ return 1;
+ if (*p != '&')
+ break;
+ p++;
+ }
+ return 0;
+}
diff --git a/media/ffvpx/libavutil/parseutils.h b/media/ffvpx/libavutil/parseutils.h
new file mode 100644
index 0000000000..dad5c2775b
--- /dev/null
+++ b/media/ffvpx/libavutil/parseutils.h
@@ -0,0 +1,197 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PARSEUTILS_H
+#define AVUTIL_PARSEUTILS_H
+
+#include <time.h>
+
+#include "rational.h"
+
+/**
+ * @file
+ * misc parsing utilities
+ */
+
+/**
+ * Parse str and store the parsed ratio in q.
+ *
+ * Note that a ratio with infinite (1/0) or negative value is
+ * considered valid, so you should check on the returned value if you
+ * want to exclude those values.
+ *
+ * The undefined value can be expressed using the "0:0" string.
+ *
+ * @param[in,out] q pointer to the AVRational which will contain the ratio
+ * @param[in] str the string to parse: it has to be a string in the format
+ * num:den, a float number or an expression
+ * @param[in] max the maximum allowed numerator and denominator
+ * @param[in] log_offset log level offset which is applied to the log
+ * level of log_ctx
+ * @param[in] log_ctx parent logging context
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_ratio(AVRational *q, const char *str, int max,
+ int log_offset, void *log_ctx);
+
+#define av_parse_ratio_quiet(rate, str, max) \
+ av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL)
+
+/**
+ * Parse str and put in width_ptr and height_ptr the detected values.
+ *
+ * @param[in,out] width_ptr pointer to the variable which will contain the detected
+ * width value
+ * @param[in,out] height_ptr pointer to the variable which will contain the detected
+ * height value
+ * @param[in] str the string to parse: it has to be a string in the format
+ * width x height or a valid video size abbreviation.
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str);
+
+/**
+ * Parse str and store the detected values in *rate.
+ *
+ * @param[in,out] rate pointer to the AVRational which will contain the detected
+ * frame rate
+ * @param[in] str the string to parse: it has to be a string in the format
+ * rate_num / rate_den, a float number or a valid video rate abbreviation
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_rate(AVRational *rate, const char *str);
+
+/**
+ * Put the RGBA values that correspond to color_string in rgba_color.
+ *
+ * @param rgba_color 4-elements array of uint8_t values, where the respective
+ * red, green, blue and alpha component values are written.
+ * @param color_string a string specifying a color. It can be the name of
+ * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,
+ * possibly followed by "@" and a string representing the alpha
+ * component.
+ * The alpha component may be a string composed by "0x" followed by an
+ * hexadecimal number or a decimal number between 0.0 and 1.0, which
+ * represents the opacity value (0x00/0.0 means completely transparent,
+ * 0xff/1.0 completely opaque).
+ * If the alpha component is not specified then 0xff is assumed.
+ * The string "random" will result in a random color.
+ * @param slen length of the initial part of color_string containing the
+ * color. It can be set to -1 if color_string is a null terminated string
+ * containing nothing else than the color.
+ * @param log_ctx a pointer to an arbitrary struct of which the first field
+ * is a pointer to an AVClass struct (used for av_log()). Can be NULL.
+ * @return >= 0 in case of success, a negative value in case of
+ * failure (for example if color_string cannot be parsed).
+ */
+int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
+ void *log_ctx);
+
+/**
+ * Get the name of a color from the internal table of hard-coded named
+ * colors.
+ *
+ * This function is meant to enumerate the color names recognized by
+ * av_parse_color().
+ *
+ * @param color_idx index of the requested color, starting from 0
+ * @param rgb if not NULL, will point to a 3-elements array with the color value in RGB
+ * @return the color name string or NULL if color_idx is not in the array
+ */
+const char *av_get_known_color_name(int color_idx, const uint8_t **rgb);
+
+/**
+ * Parse timestr and return in *time a corresponding number of
+ * microseconds.
+ *
+ * @param timeval puts here the number of microseconds corresponding
+ * to the string in timestr. If the string represents a duration, it
+ * is the number of microseconds contained in the time interval. If
+ * the string is a date, is the number of microseconds since 1st of
+ * January, 1970 up to the time of the parsed date. If timestr cannot
+ * be successfully parsed, set *time to INT64_MIN.
+
+ * @param timestr a string representing a date or a duration.
+ * - If a date the syntax is:
+ * @code
+ * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z]
+ * now
+ * @endcode
+ * If the value is "now" it takes the current time.
+ * Time is local time unless Z is appended, in which case it is
+ * interpreted as UTC.
+ * If the year-month-day part is not specified it takes the current
+ * year-month-day.
+ * - If a duration the syntax is:
+ * @code
+ * [-][HH:]MM:SS[.m...]
+ * [-]S+[.m...]
+ * @endcode
+ * @param duration flag which tells how to interpret timestr, if not
+ * zero timestr is interpreted as a duration, otherwise as a date
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_parse_time(int64_t *timeval, const char *timestr, int duration);
+
+/**
+ * Attempt to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
+int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+
+/**
+ * Simplified version of strptime
+ *
+ * Parse the input string p according to the format string fmt and
+ * store its results in the structure dt.
+ * This implementation supports only a subset of the formats supported
+ * by the standard strptime().
+ *
+ * The supported input field descriptors are listed below.
+ * - `%%H`: the hour as a decimal number, using a 24-hour clock, in the
+ * range '00' through '23'
+ * - `%%J`: hours as a decimal number, in the range '0' through INT_MAX
+ * - `%%M`: the minute as a decimal number, using a 24-hour clock, in the
+ * range '00' through '59'
+ * - `%%S`: the second as a decimal number, using a 24-hour clock, in the
+ * range '00' through '59'
+ * - `%%Y`: the year as a decimal number, using the Gregorian calendar
+ * - `%%m`: the month as a decimal number, in the range '1' through '12'
+ * - `%%d`: the day of the month as a decimal number, in the range '1'
+ * through '31'
+ * - `%%T`: alias for `%%H:%%M:%%S`
+ * - `%%`: a literal `%`
+ *
+ * @return a pointer to the first character not processed in this function
+ * call. In case the input string contains more characters than
+ * required by the format string the return value points right after
+ * the last consumed input character. In case the whole input string
+ * is consumed the return value points to the null byte at the end of
+ * the string. On failure NULL is returned.
+ */
+char *av_small_strptime(const char *p, const char *fmt, struct tm *dt);
+
+/**
+ * Convert the decomposed UTC time in tm to a time_t value.
+ */
+time_t av_timegm(struct tm *tm);
+
+#endif /* AVUTIL_PARSEUTILS_H */
diff --git a/media/ffvpx/libavutil/pixdesc.c b/media/ffvpx/libavutil/pixdesc.c
new file mode 100644
index 0000000000..62a2ae08d9
--- /dev/null
+++ b/media/ffvpx/libavutil/pixdesc.c
@@ -0,0 +1,3327 @@
+/*
+ * pixel format descriptor
+ * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "avstring.h"
+#include "common.h"
+#include "pixfmt.h"
+#include "pixdesc.h"
+#include "intreadwrite.h"
+
+void av_read_image_line2(void *dst,
+ const uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w,
+ int read_pal_component,
+ int dst_element_size)
+{
+ AVComponentDescriptor comp = desc->comp[c];
+ int plane = comp.plane;
+ int depth = comp.depth;
+ unsigned mask = (1ULL << depth) - 1;
+ int shift = comp.shift;
+ int step = comp.step;
+ int flags = desc->flags;
+ uint16_t *dst16 = dst;
+ uint32_t *dst32 = dst;
+
+ if (flags & AV_PIX_FMT_FLAG_BITSTREAM) {
+ if (depth == 10) {
+ // Assume all channels are packed into a 32bit value
+ const uint8_t *byte_p = data[plane] + y * linesize[plane];
+ const uint32_t *p = (uint32_t *)byte_p;
+
+ while (w--) {
+ int val = AV_RB32(p);
+ val = (val >> comp.offset) & mask;
+ if (read_pal_component)
+ val = data[1][4*val + c];
+ if (dst_element_size == 4) *dst32++ = val;
+ else *dst16++ = val;
+ p++;
+ }
+ } else {
+ int skip = x * step + comp.offset;
+ const uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
+ int shift = 8 - depth - (skip & 7);
+
+ while (w--) {
+ int val = (*p >> shift) & mask;
+ if (read_pal_component)
+ val = data[1][4*val + c];
+ shift -= step;
+ p -= shift >> 3;
+ shift &= 7;
+ if (dst_element_size == 4) *dst32++ = val;
+ else *dst16++ = val;
+ }
+ }
+ } else {
+ const uint8_t *p = data[plane] + y * linesize[plane] +
+ x * step + comp.offset;
+ int is_8bit = shift + depth <= 8;
+ int is_16bit= shift + depth <=16;
+
+ if (is_8bit)
+ p += !!(flags & AV_PIX_FMT_FLAG_BE);
+
+ while (w--) {
+ unsigned val;
+ if (is_8bit) val = *p;
+ else if(is_16bit) val = flags & AV_PIX_FMT_FLAG_BE ? AV_RB16(p) : AV_RL16(p);
+ else val = flags & AV_PIX_FMT_FLAG_BE ? AV_RB32(p) : AV_RL32(p);
+ val = (val >> shift) & mask;
+ if (read_pal_component)
+ val = data[1][4 * val + c];
+ p += step;
+ if (dst_element_size == 4) *dst32++ = val;
+ else *dst16++ = val;
+ }
+ }
+}
+
+void av_read_image_line(uint16_t *dst,
+ const uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w,
+ int read_pal_component)
+{
+ av_read_image_line2(dst, data, linesize, desc,x, y, c, w,
+ read_pal_component,
+ 2);
+}
+
+void av_write_image_line2(const void *src,
+ uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w, int src_element_size)
+{
+ AVComponentDescriptor comp = desc->comp[c];
+ int plane = comp.plane;
+ int depth = comp.depth;
+ int step = comp.step;
+ int flags = desc->flags;
+ const uint32_t *src32 = src;
+ const uint16_t *src16 = src;
+
+ if (flags & AV_PIX_FMT_FLAG_BITSTREAM) {
+ if (depth == 10) {
+ // Assume all channels are packed into a 32bit value
+ const uint8_t *byte_p = data[plane] + y * linesize[plane];
+ uint32_t *p = (uint32_t *)byte_p;
+ int offset = comp.offset;
+ uint32_t mask = ((1ULL << depth) - 1) << offset;
+
+ while (w--) {
+ uint16_t val = src_element_size == 4 ? *src32++ : *src16++;
+ AV_WB32(p, (AV_RB32(p) & ~mask) | (val << offset));
+ p++;
+ }
+ } else {
+ int skip = x * step + comp.offset;
+ uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
+ int shift = 8 - depth - (skip & 7);
+
+ while (w--) {
+ *p |= (src_element_size == 4 ? *src32++ : *src16++) << shift;
+ shift -= step;
+ p -= shift >> 3;
+ shift &= 7;
+ }
+ }
+ } else {
+ int shift = comp.shift;
+ uint8_t *p = data[plane] + y * linesize[plane] +
+ x * step + comp.offset;
+
+ if (shift + depth <= 8) {
+ p += !!(flags & AV_PIX_FMT_FLAG_BE);
+ while (w--) {
+ *p |= ((src_element_size == 4 ? *src32++ : *src16++) << shift);
+ p += step;
+ }
+ } else {
+ while (w--) {
+ unsigned s = (src_element_size == 4 ? *src32++ : *src16++);
+ if (shift + depth <= 16) {
+ if (flags & AV_PIX_FMT_FLAG_BE) {
+ uint16_t val = AV_RB16(p) | (s << shift);
+ AV_WB16(p, val);
+ } else {
+ uint16_t val = AV_RL16(p) | (s << shift);
+ AV_WL16(p, val);
+ }
+ } else {
+ if (flags & AV_PIX_FMT_FLAG_BE) {
+ uint32_t val = AV_RB32(p) | (s << shift);
+ AV_WB32(p, val);
+ } else {
+ uint32_t val = AV_RL32(p) | (s << shift);
+ AV_WL32(p, val);
+ }
+ }
+ p += step;
+ }
+ }
+ }
+}
+
+void av_write_image_line(const uint16_t *src,
+ uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w)
+{
+ av_write_image_line2(src, data, linesize, desc, x, y, c, w, 2);
+}
+
+static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
+ [AV_PIX_FMT_YUV420P] = {
+ .name = "yuv420p",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUYV422] = {
+ .name = "yuyv422",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 8 }, /* Y */
+ { 0, 4, 1, 0, 8 }, /* U */
+ { 0, 4, 3, 0, 8 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_YVYU422] = {
+ .name = "yvyu422",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 8 }, /* Y */
+ { 0, 4, 3, 0, 8 }, /* U */
+ { 0, 4, 1, 0, 8 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_Y210LE] = {
+ .name = "y210le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 6, 10 }, /* Y */
+ { 0, 8, 2, 6, 10 }, /* U */
+ { 0, 8, 6, 6, 10 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_Y210BE] = {
+ .name = "y210be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 6, 10 }, /* Y */
+ { 0, 8, 2, 6, 10 }, /* U */
+ { 0, 8, 6, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_RGB24] = {
+ .name = "rgb24",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 3, 0, 0, 8 }, /* R */
+ { 0, 3, 1, 0, 8 }, /* G */
+ { 0, 3, 2, 0, 8 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR24] = {
+ .name = "bgr24",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 3, 2, 0, 8 }, /* R */
+ { 0, 3, 1, 0, 8 }, /* G */
+ { 0, 3, 0, 0, 8 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_X2RGB10LE] = {
+ .name = "x2rgb10le",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 2, 4, 10 }, /* R */
+ { 0, 4, 1, 2, 10 }, /* G */
+ { 0, 4, 0, 0, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_X2RGB10BE] = {
+ .name = "x2rgb10be",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 0, 4, 10 }, /* R */
+ { 0, 4, 1, 2, 10 }, /* G */
+ { 0, 4, 2, 0, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_X2BGR10LE] = {
+ .name = "x2bgr10le",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 0, 0, 10 }, /* R */
+ { 0, 4, 1, 2, 10 }, /* G */
+ { 0, 4, 2, 4, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_X2BGR10BE] = {
+ .name = "x2bgr10be",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 2, 0, 10 }, /* R */
+ { 0, 4, 1, 2, 10 }, /* G */
+ { 0, 4, 0, 4, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_YUV422P] = {
+ .name = "yuv422p",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P] = {
+ .name = "yuv444p",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV410P] = {
+ .name = "yuv410p",
+ .nb_components = 3,
+ .log2_chroma_w = 2,
+ .log2_chroma_h = 2,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV411P] = {
+ .name = "yuv411p",
+ .nb_components = 3,
+ .log2_chroma_w = 2,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUVJ411P] = {
+ .name = "yuvj411p",
+ .nb_components = 3,
+ .log2_chroma_w = 2,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_GRAY8] = {
+ .name = "gray",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ },
+ .alias = "gray8,y8",
+ },
+ [AV_PIX_FMT_MONOWHITE] = {
+ .name = "monow",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 1 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_BITSTREAM,
+ },
+ [AV_PIX_FMT_MONOBLACK] = {
+ .name = "monob",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 7, 1 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_BITSTREAM,
+ },
+ [AV_PIX_FMT_PAL8] = {
+ .name = "pal8",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 },
+ },
+ .flags = AV_PIX_FMT_FLAG_PAL | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVJ420P] = {
+ .name = "yuvj420p",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUVJ422P] = {
+ .name = "yuvj422p",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUVJ444P] = {
+ .name = "yuvj444p",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+#if FF_API_XVMC
+ [AV_PIX_FMT_XVMC] = {
+ .name = "xvmc",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+#endif
+ [AV_PIX_FMT_UYVY422] = {
+ .name = "uyvy422",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 1, 0, 8 }, /* Y */
+ { 0, 4, 0, 0, 8 }, /* U */
+ { 0, 4, 2, 0, 8 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_UYYVYY411] = {
+ .name = "uyyvyy411",
+ .nb_components = 3,
+ .log2_chroma_w = 2,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 1, 0, 8 }, /* Y */
+ { 0, 6, 0, 0, 8 }, /* U */
+ { 0, 6, 3, 0, 8 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_BGR8] = {
+ .name = "bgr8",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 3 }, /* R */
+ { 0, 1, 0, 3, 3 }, /* G */
+ { 0, 1, 0, 6, 2 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR4] = {
+ .name = "bgr4",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 3, 0, 1 }, /* R */
+ { 0, 4, 1, 0, 2 }, /* G */
+ { 0, 4, 0, 0, 1 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR4_BYTE] = {
+ .name = "bgr4_byte",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 1 }, /* R */
+ { 0, 1, 0, 1, 2 }, /* G */
+ { 0, 1, 0, 3, 1 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB8] = {
+ .name = "rgb8",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 6, 2 }, /* R */
+ { 0, 1, 0, 3, 3 }, /* G */
+ { 0, 1, 0, 0, 3 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB4] = {
+ .name = "rgb4",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 0, 1 }, /* R */
+ { 0, 4, 1, 0, 2 }, /* G */
+ { 0, 4, 3, 0, 1 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB4_BYTE] = {
+ .name = "rgb4_byte",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 3, 1 }, /* R */
+ { 0, 1, 0, 1, 2 }, /* G */
+ { 0, 1, 0, 0, 1 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_NV12] = {
+ .name = "nv12",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 0, 0, 8 }, /* U */
+ { 1, 2, 1, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_NV21] = {
+ .name = "nv21",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 1, 0, 8 }, /* U */
+ { 1, 2, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_ARGB] = {
+ .name = "argb",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 1, 0, 8 }, /* R */
+ { 0, 4, 2, 0, 8 }, /* G */
+ { 0, 4, 3, 0, 8 }, /* B */
+ { 0, 4, 0, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_RGBA] = {
+ .name = "rgba",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 0, 8 }, /* R */
+ { 0, 4, 1, 0, 8 }, /* G */
+ { 0, 4, 2, 0, 8 }, /* B */
+ { 0, 4, 3, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_ABGR] = {
+ .name = "abgr",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 3, 0, 8 }, /* R */
+ { 0, 4, 2, 0, 8 }, /* G */
+ { 0, 4, 1, 0, 8 }, /* B */
+ { 0, 4, 0, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_BGRA] = {
+ .name = "bgra",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 2, 0, 8 }, /* R */
+ { 0, 4, 1, 0, 8 }, /* G */
+ { 0, 4, 0, 0, 8 }, /* B */
+ { 0, 4, 3, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_0RGB] = {
+ .name = "0rgb",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 1, 0, 8 }, /* R */
+ { 0, 4, 2, 0, 8 }, /* G */
+ { 0, 4, 3, 0, 8 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB0] = {
+ .name = "rgb0",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 0, 0, 8 }, /* R */
+ { 0, 4, 1, 0, 8 }, /* G */
+ { 0, 4, 2, 0, 8 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_0BGR] = {
+ .name = "0bgr",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 3, 0, 8 }, /* R */
+ { 0, 4, 2, 0, 8 }, /* G */
+ { 0, 4, 1, 0, 8 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR0] = {
+ .name = "bgr0",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 2, 0, 8 }, /* R */
+ { 0, 4, 1, 0, 8 }, /* G */
+ { 0, 4, 0, 0, 8 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GRAY9BE] = {
+ .name = "gray9be",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ .alias = "y9be",
+ },
+ [AV_PIX_FMT_GRAY9LE] = {
+ .name = "gray9le",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ },
+ .alias = "y9le",
+ },
+ [AV_PIX_FMT_GRAY10BE] = {
+ .name = "gray10be",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ .alias = "y10be",
+ },
+ [AV_PIX_FMT_GRAY10LE] = {
+ .name = "gray10le",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ },
+ .alias = "y10le",
+ },
+ [AV_PIX_FMT_GRAY12BE] = {
+ .name = "gray12be",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ .alias = "y12be",
+ },
+ [AV_PIX_FMT_GRAY12LE] = {
+ .name = "gray12le",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ },
+ .alias = "y12le",
+ },
+ [AV_PIX_FMT_GRAY14BE] = {
+ .name = "gray14be",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 14 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ .alias = "y14be",
+ },
+ [AV_PIX_FMT_GRAY14LE] = {
+ .name = "gray14le",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 14 }, /* Y */
+ },
+ .alias = "y14le",
+ },
+ [AV_PIX_FMT_GRAY16BE] = {
+ .name = "gray16be",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ .alias = "y16be",
+ },
+ [AV_PIX_FMT_GRAY16LE] = {
+ .name = "gray16le",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ },
+ .alias = "y16le",
+ },
+ [AV_PIX_FMT_YUV440P] = {
+ .name = "yuv440p",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUVJ440P] = {
+ .name = "yuvj440p",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV440P10LE] = {
+ .name = "yuv440p10le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV440P10BE] = {
+ .name = "yuv440p10be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV440P12LE] = {
+ .name = "yuv440p12le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV440P12BE] = {
+ .name = "yuv440p12be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUVA420P] = {
+ .name = "yuva420p",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ { 3, 1, 0, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA422P] = {
+ .name = "yuva422p",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ { 3, 1, 0, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P] = {
+ .name = "yuva444p",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ { 3, 1, 0, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA420P9BE] = {
+ .name = "yuva420p9be",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA420P9LE] = {
+ .name = "yuva420p9le",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA422P9BE] = {
+ .name = "yuva422p9be",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA422P9LE] = {
+ .name = "yuva422p9le",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P9BE] = {
+ .name = "yuva444p9be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P9LE] = {
+ .name = "yuva444p9le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA420P10BE] = {
+ .name = "yuva420p10be",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA420P10LE] = {
+ .name = "yuva420p10le",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA422P10BE] = {
+ .name = "yuva422p10be",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA422P10LE] = {
+ .name = "yuva422p10le",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P10BE] = {
+ .name = "yuva444p10be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P10LE] = {
+ .name = "yuva444p10le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA420P16BE] = {
+ .name = "yuva420p16be",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA420P16LE] = {
+ .name = "yuva420p16le",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA422P16BE] = {
+ .name = "yuva422p16be",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA422P16LE] = {
+ .name = "yuva422p16le",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P16BE] = {
+ .name = "yuva444p16be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P16LE] = {
+ .name = "yuva444p16le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_RGB48BE] = {
+ .name = "rgb48be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 6, 0, 0, 16 }, /* R */
+ { 0, 6, 2, 0, 16 }, /* G */
+ { 0, 6, 4, 0, 16 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_RGB48LE] = {
+ .name = "rgb48le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 6, 0, 0, 16 }, /* R */
+ { 0, 6, 2, 0, 16 }, /* G */
+ { 0, 6, 4, 0, 16 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGBA64BE] = {
+ .name = "rgba64be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 0, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 4, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_RGBA64LE] = {
+ .name = "rgba64le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 0, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 4, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_RGB565BE] = {
+ .name = "rgb565be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, -1, 3, 5 }, /* R */
+ { 0, 2, 0, 5, 6 }, /* G */
+ { 0, 2, 0, 0, 5 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB565LE] = {
+ .name = "rgb565le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 1, 3, 5 }, /* R */
+ { 0, 2, 0, 5, 6 }, /* G */
+ { 0, 2, 0, 0, 5 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB555BE] = {
+ .name = "rgb555be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, -1, 2, 5 }, /* R */
+ { 0, 2, 0, 5, 5 }, /* G */
+ { 0, 2, 0, 0, 5 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB555LE] = {
+ .name = "rgb555le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 1, 2, 5 }, /* R */
+ { 0, 2, 0, 5, 5 }, /* G */
+ { 0, 2, 0, 0, 5 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB444BE] = {
+ .name = "rgb444be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, -1, 0, 4 }, /* R */
+ { 0, 2, 0, 4, 4 }, /* G */
+ { 0, 2, 0, 0, 4 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_RGB444LE] = {
+ .name = "rgb444le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 1, 0, 4 }, /* R */
+ { 0, 2, 0, 4, 4 }, /* G */
+ { 0, 2, 0, 0, 4 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR48BE] = {
+ .name = "bgr48be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 6, 4, 0, 16 }, /* R */
+ { 0, 6, 2, 0, 16 }, /* G */
+ { 0, 6, 0, 0, 16 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR48LE] = {
+ .name = "bgr48le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 6, 4, 0, 16 }, /* R */
+ { 0, 6, 2, 0, 16 }, /* G */
+ { 0, 6, 0, 0, 16 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGRA64BE] = {
+ .name = "bgra64be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 4, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 0, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_BGRA64LE] = {
+ .name = "bgra64le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 4, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 0, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_BGR565BE] = {
+ .name = "bgr565be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 5 }, /* R */
+ { 0, 2, 0, 5, 6 }, /* G */
+ { 0, 2, -1, 3, 5 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR565LE] = {
+ .name = "bgr565le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 5 }, /* R */
+ { 0, 2, 0, 5, 6 }, /* G */
+ { 0, 2, 1, 3, 5 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR555BE] = {
+ .name = "bgr555be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 5 }, /* R */
+ { 0, 2, 0, 5, 5 }, /* G */
+ { 0, 2, -1, 2, 5 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR555LE] = {
+ .name = "bgr555le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 5 }, /* R */
+ { 0, 2, 0, 5, 5 }, /* G */
+ { 0, 2, 1, 2, 5 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR444BE] = {
+ .name = "bgr444be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 4 }, /* R */
+ { 0, 2, 0, 4, 4 }, /* G */
+ { 0, 2, -1, 0, 4 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_BGR444LE] = {
+ .name = "bgr444le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 4 }, /* R */
+ { 0, 2, 0, 4, 4 }, /* G */
+ { 0, 2, 1, 0, 4 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_VAAPI] = {
+ .name = "vaapi",
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_YUV420P9LE] = {
+ .name = "yuv420p9le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P9BE] = {
+ .name = "yuv420p9be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P10LE] = {
+ .name = "yuv420p10le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P10BE] = {
+ .name = "yuv420p10be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P12LE] = {
+ .name = "yuv420p12le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P12BE] = {
+ .name = "yuv420p12be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P14LE] = {
+ .name = "yuv420p14le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P14BE] = {
+ .name = "yuv420p14be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P16LE] = {
+ .name = "yuv420p16le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV420P16BE] = {
+ .name = "yuv420p16be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P9LE] = {
+ .name = "yuv422p9le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P9BE] = {
+ .name = "yuv422p9be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P10LE] = {
+ .name = "yuv422p10le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P10BE] = {
+ .name = "yuv422p10be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P12LE] = {
+ .name = "yuv422p12le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P12BE] = {
+ .name = "yuv422p12be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P14LE] = {
+ .name = "yuv422p14le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P14BE] = {
+ .name = "yuv422p14be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P16LE] = {
+ .name = "yuv422p16le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV422P16BE] = {
+ .name = "yuv422p16be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P16LE] = {
+ .name = "yuv444p16le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P16BE] = {
+ .name = "yuv444p16be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P10LE] = {
+ .name = "yuv444p10le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P10BE] = {
+ .name = "yuv444p10be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P9LE] = {
+ .name = "yuv444p9le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P9BE] = {
+ .name = "yuv444p9be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P12LE] = {
+ .name = "yuv444p12le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P12BE] = {
+ .name = "yuv444p12be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P14LE] = {
+ .name = "yuv444p14le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_YUV444P14BE] = {
+ .name = "yuv444p14be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_D3D11VA_VLD] = {
+ .name = "d3d11va_vld",
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_DXVA2_VLD] = {
+ .name = "dxva2_vld",
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_YA8] = {
+ .name = "ya8",
+ .nb_components = 2,
+ .comp = {
+ { 0, 2, 0, 0, 8 }, /* Y */
+ { 0, 2, 1, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_ALPHA,
+ .alias = "gray8a",
+ },
+ [AV_PIX_FMT_YA16LE] = {
+ .name = "ya16le",
+ .nb_components = 2,
+ .comp = {
+ { 0, 4, 0, 0, 16 }, /* Y */
+ { 0, 4, 2, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YA16BE] = {
+ .name = "ya16be",
+ .nb_components = 2,
+ .comp = {
+ { 0, 4, 0, 0, 16 }, /* Y */
+ { 0, 4, 2, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_VIDEOTOOLBOX] = {
+ .name = "videotoolbox_vld",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_GBRP] = {
+ .name = "gbrp",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 1, 0, 0, 8 }, /* R */
+ { 0, 1, 0, 0, 8 }, /* G */
+ { 1, 1, 0, 0, 8 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP9LE] = {
+ .name = "gbrp9le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 9 }, /* R */
+ { 0, 2, 0, 0, 9 }, /* G */
+ { 1, 2, 0, 0, 9 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP9BE] = {
+ .name = "gbrp9be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 9 }, /* R */
+ { 0, 2, 0, 0, 9 }, /* G */
+ { 1, 2, 0, 0, 9 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP10LE] = {
+ .name = "gbrp10le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 10 }, /* R */
+ { 0, 2, 0, 0, 10 }, /* G */
+ { 1, 2, 0, 0, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP10BE] = {
+ .name = "gbrp10be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 10 }, /* R */
+ { 0, 2, 0, 0, 10 }, /* G */
+ { 1, 2, 0, 0, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP12LE] = {
+ .name = "gbrp12le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 12 }, /* R */
+ { 0, 2, 0, 0, 12 }, /* G */
+ { 1, 2, 0, 0, 12 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP12BE] = {
+ .name = "gbrp12be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 12 }, /* R */
+ { 0, 2, 0, 0, 12 }, /* G */
+ { 1, 2, 0, 0, 12 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP14LE] = {
+ .name = "gbrp14le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 14 }, /* R */
+ { 0, 2, 0, 0, 14 }, /* G */
+ { 1, 2, 0, 0, 14 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP14BE] = {
+ .name = "gbrp14be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 14 }, /* R */
+ { 0, 2, 0, 0, 14 }, /* G */
+ { 1, 2, 0, 0, 14 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP16LE] = {
+ .name = "gbrp16le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 16 }, /* R */
+ { 0, 2, 0, 0, 16 }, /* G */
+ { 1, 2, 0, 0, 16 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRP16BE] = {
+ .name = "gbrp16be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 16 }, /* R */
+ { 0, 2, 0, 0, 16 }, /* G */
+ { 1, 2, 0, 0, 16 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRAP] = {
+ .name = "gbrap",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 1, 0, 0, 8 }, /* R */
+ { 0, 1, 0, 0, 8 }, /* G */
+ { 1, 1, 0, 0, 8 }, /* B */
+ { 3, 1, 0, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_GBRAP16LE] = {
+ .name = "gbrap16le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 16 }, /* R */
+ { 0, 2, 0, 0, 16 }, /* G */
+ { 1, 2, 0, 0, 16 }, /* B */
+ { 3, 2, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_GBRAP16BE] = {
+ .name = "gbrap16be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 16 }, /* R */
+ { 0, 2, 0, 0, 16 }, /* G */
+ { 1, 2, 0, 0, 16 }, /* B */
+ { 3, 2, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
+ AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_VDPAU] = {
+ .name = "vdpau",
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_XYZ12LE] = {
+ .name = "xyz12le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 6, 0, 4, 12 }, /* X */
+ { 0, 6, 2, 4, 12 }, /* Y */
+ { 0, 6, 4, 4, 12 }, /* Z */
+ },
+ /*.flags = -- not used*/
+ },
+ [AV_PIX_FMT_XYZ12BE] = {
+ .name = "xyz12be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 6, 0, 4, 12 }, /* X */
+ { 0, 6, 2, 4, 12 }, /* Y */
+ { 0, 6, 4, 4, 12 }, /* Z */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ },
+
+#define BAYER8_DESC_COMMON \
+ .nb_components= 3, \
+ .log2_chroma_w= 0, \
+ .log2_chroma_h= 0, \
+ .comp = { \
+ { 0, 1, 0, 0, 2 }, \
+ { 0, 1, 0, 0, 4 }, \
+ { 0, 1, 0, 0, 2 }, \
+ }, \
+
+#define BAYER16_DESC_COMMON \
+ .nb_components= 3, \
+ .log2_chroma_w= 0, \
+ .log2_chroma_h= 0, \
+ .comp = { \
+ { 0, 2, 0, 0, 4 }, \
+ { 0, 2, 0, 0, 8 }, \
+ { 0, 2, 0, 0, 4 }, \
+ }, \
+
+ [AV_PIX_FMT_BAYER_BGGR8] = {
+ .name = "bayer_bggr8",
+ BAYER8_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_BGGR16LE] = {
+ .name = "bayer_bggr16le",
+ BAYER16_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_BGGR16BE] = {
+ .name = "bayer_bggr16be",
+ BAYER16_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_RGGB8] = {
+ .name = "bayer_rggb8",
+ BAYER8_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_RGGB16LE] = {
+ .name = "bayer_rggb16le",
+ BAYER16_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_RGGB16BE] = {
+ .name = "bayer_rggb16be",
+ BAYER16_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_GBRG8] = {
+ .name = "bayer_gbrg8",
+ BAYER8_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_GBRG16LE] = {
+ .name = "bayer_gbrg16le",
+ BAYER16_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_GBRG16BE] = {
+ .name = "bayer_gbrg16be",
+ BAYER16_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_GRBG8] = {
+ .name = "bayer_grbg8",
+ BAYER8_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_GRBG16LE] = {
+ .name = "bayer_grbg16le",
+ BAYER16_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_BAYER_GRBG16BE] = {
+ .name = "bayer_grbg16be",
+ BAYER16_DESC_COMMON
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BAYER,
+ },
+ [AV_PIX_FMT_NV16] = {
+ .name = "nv16",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 0, 0, 8 }, /* U */
+ { 1, 2, 1, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_NV20LE] = {
+ .name = "nv20le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 4, 0, 0, 10 }, /* U */
+ { 1, 4, 2, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_NV20BE] = {
+ .name = "nv20be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 4, 0, 0, 10 }, /* U */
+ { 1, 4, 2, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_QSV] = {
+ .name = "qsv",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_MEDIACODEC] = {
+ .name = "mediacodec",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_MMAL] = {
+ .name = "mmal",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_CUDA] = {
+ .name = "cuda",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_AYUV64LE] = {
+ .name = "ayuv64le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 2, 0, 16 }, /* Y */
+ { 0, 8, 4, 0, 16 }, /* U */
+ { 0, 8, 6, 0, 16 }, /* V */
+ { 0, 8, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_AYUV64BE] = {
+ .name = "ayuv64be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 2, 0, 16 }, /* Y */
+ { 0, 8, 4, 0, 16 }, /* U */
+ { 0, 8, 6, 0, 16 }, /* V */
+ { 0, 8, 0, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_P010LE] = {
+ .name = "p010le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P010BE] = {
+ .name = "p010be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P012LE] = {
+ .name = "p012le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 4, 12 }, /* Y */
+ { 1, 4, 0, 4, 12 }, /* U */
+ { 1, 4, 2, 4, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P012BE] = {
+ .name = "p012be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 4, 12 }, /* Y */
+ { 1, 4, 0, 4, 12 }, /* U */
+ { 1, 4, 2, 4, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P016LE] = {
+ .name = "p016le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P016BE] = {
+ .name = "p016be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_GBRAP12LE] = {
+ .name = "gbrap12le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 12 }, /* R */
+ { 0, 2, 0, 0, 12 }, /* G */
+ { 1, 2, 0, 0, 12 }, /* B */
+ { 3, 2, 0, 0, 12 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_GBRAP12BE] = {
+ .name = "gbrap12be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 12 }, /* R */
+ { 0, 2, 0, 0, 12 }, /* G */
+ { 1, 2, 0, 0, 12 }, /* B */
+ { 3, 2, 0, 0, 12 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
+ AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_GBRAP10LE] = {
+ .name = "gbrap10le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 10 }, /* R */
+ { 0, 2, 0, 0, 10 }, /* G */
+ { 1, 2, 0, 0, 10 }, /* B */
+ { 3, 2, 0, 0, 10 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_GBRAP10BE] = {
+ .name = "gbrap10be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 2, 0, 0, 10 }, /* R */
+ { 0, 2, 0, 0, 10 }, /* G */
+ { 1, 2, 0, 0, 10 }, /* B */
+ { 3, 2, 0, 0, 10 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
+ AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_D3D11] = {
+ .name = "d3d11",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_GBRPF32BE] = {
+ .name = "gbrpf32be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 4, 0, 0, 32 }, /* R */
+ { 0, 4, 0, 0, 32 }, /* G */
+ { 1, 4, 0, 0, 32 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
+ AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_GBRPF32LE] = {
+ .name = "gbrpf32le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 4, 0, 0, 32 }, /* R */
+ { 0, 4, 0, 0, 32 }, /* G */
+ { 1, 4, 0, 0, 32 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_FLOAT | AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_GBRAPF32BE] = {
+ .name = "gbrapf32be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 4, 0, 0, 32 }, /* R */
+ { 0, 4, 0, 0, 32 }, /* G */
+ { 1, 4, 0, 0, 32 }, /* B */
+ { 3, 4, 0, 0, 32 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
+ AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_GBRAPF32LE] = {
+ .name = "gbrapf32le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 2, 4, 0, 0, 32 }, /* R */
+ { 0, 4, 0, 0, 32 }, /* G */
+ { 1, 4, 0, 0, 32 }, /* B */
+ { 3, 4, 0, 0, 32 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA |
+ AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_DRM_PRIME] = {
+ .name = "drm_prime",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_OPENCL] = {
+ .name = "opencl",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_GRAYF32BE] = {
+ .name = "grayf32be",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 0, 32 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_FLOAT,
+ .alias = "yf32be",
+ },
+ [AV_PIX_FMT_GRAYF32LE] = {
+ .name = "grayf32le",
+ .nb_components = 1,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 0, 32 }, /* Y */
+ },
+ .flags = AV_PIX_FMT_FLAG_FLOAT,
+ .alias = "yf32le",
+ },
+ [AV_PIX_FMT_YUVA422P12BE] = {
+ .name = "yuva422p12be",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ { 3, 2, 0, 0, 12 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA422P12LE] = {
+ .name = "yuva422p12le",
+ .nb_components = 4,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ { 3, 2, 0, 0, 12 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P12BE] = {
+ .name = "yuva444p12be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ { 3, 2, 0, 0, 12 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_YUVA444P12LE] = {
+ .name = "yuva444p12le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ { 3, 2, 0, 0, 12 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_NV24] = {
+ .name = "nv24",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 0, 0, 8 }, /* U */
+ { 1, 2, 1, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_NV42] = {
+ .name = "nv42",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 1, 0, 8 }, /* U */
+ { 1, 2, 0, 0, 8 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_VULKAN] = {
+ .name = "vulkan",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_P210BE] = {
+ .name = "p210be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P210LE] = {
+ .name = "p210le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P410BE] = {
+ .name = "p410be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P410LE] = {
+ .name = "p410le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P216BE] = {
+ .name = "p216be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P216LE] = {
+ .name = "p216le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P416BE] = {
+ .name = "p416be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P416LE] = {
+ .name = "p416le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_VUYA] = {
+ .name = "vuya",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 2, 0, 8 }, /* Y */
+ { 0, 4, 1, 0, 8 }, /* U */
+ { 0, 4, 0, 0, 8 }, /* V */
+ { 0, 4, 3, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_VUYX] = {
+ .name = "vuyx",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 2, 0, 8 }, /* Y */
+ { 0, 4, 1, 0, 8 }, /* U */
+ { 0, 4, 0, 0, 8 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_RGBAF16BE] = {
+ .name = "rgbaf16be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 0, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 4, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_RGBAF16LE] = {
+ .name = "rgbaf16le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 0, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 4, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA |
+ AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_Y212LE] = {
+ .name = "y212le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 4, 12 }, /* Y */
+ { 0, 8, 2, 4, 12 }, /* U */
+ { 0, 8, 6, 4, 12 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_Y212BE] = {
+ .name = "y212be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 4, 12 }, /* Y */
+ { 0, 8, 2, 4, 12 }, /* U */
+ { 0, 8, 6, 4, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_XV30LE] = {
+ .name = "xv30le",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 1, 2, 10 }, /* Y */
+ { 0, 4, 0, 0, 10 }, /* U */
+ { 0, 4, 2, 4, 10 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_XV30BE] = {
+ .name = "xv30be",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 32, 10, 0, 10 }, /* Y */
+ { 0, 32, 0, 0, 10 }, /* U */
+ { 0, 32, 20, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_BITSTREAM,
+ },
+ [AV_PIX_FMT_XV36LE] = {
+ .name = "xv36le",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 8, 2, 4, 12 }, /* Y */
+ { 0, 8, 0, 4, 12 }, /* U */
+ { 0, 8, 4, 4, 12 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_XV36BE] = {
+ .name = "xv36be",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 8, 2, 4, 12 }, /* Y */
+ { 0, 8, 0, 4, 12 }, /* U */
+ { 0, 8, 4, 4, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_RGBF32BE] = {
+ .name = "rgbf32be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 12, 0, 0, 32 }, /* R */
+ { 0, 12, 4, 0, 32 }, /* G */
+ { 0, 12, 8, 0, 32 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_RGBF32LE] = {
+ .name = "rgbf32le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 12, 0, 0, 32 }, /* R */
+ { 0, 12, 4, 0, 32 }, /* G */
+ { 0, 12, 8, 0, 32 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_RGBAF32BE] = {
+ .name = "rgbaf32be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 16, 0, 0, 32 }, /* R */
+ { 0, 16, 4, 0, 32 }, /* G */
+ { 0, 16, 8, 0, 32 }, /* B */
+ { 0, 16, 12, 0, 32 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_FLOAT | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_RGBAF32LE] = {
+ .name = "rgbaf32le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 16, 0, 0, 32 }, /* R */
+ { 0, 16, 4, 0, 32 }, /* G */
+ { 0, 16, 8, 0, 32 }, /* B */
+ { 0, 16, 12, 0, 32 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT |
+ AV_PIX_FMT_FLAG_ALPHA,
+ },
+};
+
+static const char * const color_range_names[] = {
+ [AVCOL_RANGE_UNSPECIFIED] = "unknown",
+ [AVCOL_RANGE_MPEG] = "tv",
+ [AVCOL_RANGE_JPEG] = "pc",
+};
+
+static const char * const color_primaries_names[AVCOL_PRI_NB] = {
+ [AVCOL_PRI_RESERVED0] = "reserved",
+ [AVCOL_PRI_BT709] = "bt709",
+ [AVCOL_PRI_UNSPECIFIED] = "unknown",
+ [AVCOL_PRI_RESERVED] = "reserved",
+ [AVCOL_PRI_BT470M] = "bt470m",
+ [AVCOL_PRI_BT470BG] = "bt470bg",
+ [AVCOL_PRI_SMPTE170M] = "smpte170m",
+ [AVCOL_PRI_SMPTE240M] = "smpte240m",
+ [AVCOL_PRI_FILM] = "film",
+ [AVCOL_PRI_BT2020] = "bt2020",
+ [AVCOL_PRI_SMPTE428] = "smpte428",
+ [AVCOL_PRI_SMPTE431] = "smpte431",
+ [AVCOL_PRI_SMPTE432] = "smpte432",
+ [AVCOL_PRI_EBU3213] = "ebu3213",
+};
+
+static const char * const color_transfer_names[] = {
+ [AVCOL_TRC_RESERVED0] = "reserved",
+ [AVCOL_TRC_BT709] = "bt709",
+ [AVCOL_TRC_UNSPECIFIED] = "unknown",
+ [AVCOL_TRC_RESERVED] = "reserved",
+ [AVCOL_TRC_GAMMA22] = "bt470m",
+ [AVCOL_TRC_GAMMA28] = "bt470bg",
+ [AVCOL_TRC_SMPTE170M] = "smpte170m",
+ [AVCOL_TRC_SMPTE240M] = "smpte240m",
+ [AVCOL_TRC_LINEAR] = "linear",
+ [AVCOL_TRC_LOG] = "log100",
+ [AVCOL_TRC_LOG_SQRT] = "log316",
+ [AVCOL_TRC_IEC61966_2_4] = "iec61966-2-4",
+ [AVCOL_TRC_BT1361_ECG] = "bt1361e",
+ [AVCOL_TRC_IEC61966_2_1] = "iec61966-2-1",
+ [AVCOL_TRC_BT2020_10] = "bt2020-10",
+ [AVCOL_TRC_BT2020_12] = "bt2020-12",
+ [AVCOL_TRC_SMPTE2084] = "smpte2084",
+ [AVCOL_TRC_SMPTE428] = "smpte428",
+ [AVCOL_TRC_ARIB_STD_B67] = "arib-std-b67",
+};
+
+static const char * const color_space_names[] = {
+ [AVCOL_SPC_RGB] = "gbr",
+ [AVCOL_SPC_BT709] = "bt709",
+ [AVCOL_SPC_UNSPECIFIED] = "unknown",
+ [AVCOL_SPC_RESERVED] = "reserved",
+ [AVCOL_SPC_FCC] = "fcc",
+ [AVCOL_SPC_BT470BG] = "bt470bg",
+ [AVCOL_SPC_SMPTE170M] = "smpte170m",
+ [AVCOL_SPC_SMPTE240M] = "smpte240m",
+ [AVCOL_SPC_YCGCO] = "ycgco",
+ [AVCOL_SPC_BT2020_NCL] = "bt2020nc",
+ [AVCOL_SPC_BT2020_CL] = "bt2020c",
+ [AVCOL_SPC_SMPTE2085] = "smpte2085",
+ [AVCOL_SPC_CHROMA_DERIVED_NCL] = "chroma-derived-nc",
+ [AVCOL_SPC_CHROMA_DERIVED_CL] = "chroma-derived-c",
+ [AVCOL_SPC_ICTCP] = "ictcp",
+};
+
+static const char * const chroma_location_names[] = {
+ [AVCHROMA_LOC_UNSPECIFIED] = "unspecified",
+ [AVCHROMA_LOC_LEFT] = "left",
+ [AVCHROMA_LOC_CENTER] = "center",
+ [AVCHROMA_LOC_TOPLEFT] = "topleft",
+ [AVCHROMA_LOC_TOP] = "top",
+ [AVCHROMA_LOC_BOTTOMLEFT] = "bottomleft",
+ [AVCHROMA_LOC_BOTTOM] = "bottom",
+};
+
+static enum AVPixelFormat get_pix_fmt_internal(const char *name)
+{
+ enum AVPixelFormat pix_fmt;
+
+ for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
+ if (av_pix_fmt_descriptors[pix_fmt].name &&
+ (!strcmp(av_pix_fmt_descriptors[pix_fmt].name, name) ||
+ av_match_name(name, av_pix_fmt_descriptors[pix_fmt].alias)))
+ return pix_fmt;
+
+ return AV_PIX_FMT_NONE;
+}
+
+const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
+{
+ return (unsigned)pix_fmt < AV_PIX_FMT_NB ?
+ av_pix_fmt_descriptors[pix_fmt].name : NULL;
+}
+
+#if HAVE_BIGENDIAN
+# define X_NE(be, le) be
+#else
+# define X_NE(be, le) le
+#endif
+
+enum AVPixelFormat av_get_pix_fmt(const char *name)
+{
+ enum AVPixelFormat pix_fmt;
+
+ if (!strcmp(name, "rgb32"))
+ name = X_NE("argb", "bgra");
+ else if (!strcmp(name, "bgr32"))
+ name = X_NE("abgr", "rgba");
+
+ pix_fmt = get_pix_fmt_internal(name);
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ char name2[32];
+
+ snprintf(name2, sizeof(name2), "%s%s", name, X_NE("be", "le"));
+ pix_fmt = get_pix_fmt_internal(name2);
+ }
+
+ return pix_fmt;
+}
+
+int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
+{
+ int c, bits = 0;
+ int log2_pixels = pixdesc->log2_chroma_w + pixdesc->log2_chroma_h;
+
+ for (c = 0; c < pixdesc->nb_components; c++) {
+ int s = c == 1 || c == 2 ? 0 : log2_pixels;
+ bits += pixdesc->comp[c].depth << s;
+ }
+
+ return bits >> log2_pixels;
+}
+
+int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
+{
+ int c, bits = 0;
+ int log2_pixels = pixdesc->log2_chroma_w + pixdesc->log2_chroma_h;
+ int steps[4] = {0};
+
+ for (c = 0; c < pixdesc->nb_components; c++) {
+ const AVComponentDescriptor *comp = &pixdesc->comp[c];
+ int s = c == 1 || c == 2 ? 0 : log2_pixels;
+ steps[comp->plane] = comp->step << s;
+ }
+ for (c = 0; c < 4; c++)
+ bits += steps[c];
+
+ if(!(pixdesc->flags & AV_PIX_FMT_FLAG_BITSTREAM))
+ bits *= 8;
+
+ return bits >> log2_pixels;
+}
+
+char *av_get_pix_fmt_string(char *buf, int buf_size,
+ enum AVPixelFormat pix_fmt)
+{
+ /* print header */
+ if (pix_fmt < 0) {
+ snprintf (buf, buf_size, "name" " nb_components" " nb_bits");
+ } else {
+ const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[pix_fmt];
+ snprintf(buf, buf_size, "%-11s %7d %10d", pixdesc->name,
+ pixdesc->nb_components, av_get_bits_per_pixel(pixdesc));
+ }
+
+ return buf;
+}
+
+const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
+{
+ if (pix_fmt < 0 || pix_fmt >= AV_PIX_FMT_NB)
+ return NULL;
+ return &av_pix_fmt_descriptors[pix_fmt];
+}
+
+const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
+{
+ if (!prev)
+ return &av_pix_fmt_descriptors[0];
+ while (prev - av_pix_fmt_descriptors < FF_ARRAY_ELEMS(av_pix_fmt_descriptors) - 1) {
+ prev++;
+ if (prev->name)
+ return prev;
+ }
+ return NULL;
+}
+
+enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
+{
+ if (desc < av_pix_fmt_descriptors ||
+ desc >= av_pix_fmt_descriptors + FF_ARRAY_ELEMS(av_pix_fmt_descriptors))
+ return AV_PIX_FMT_NONE;
+
+ return desc - av_pix_fmt_descriptors;
+}
+
+int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt,
+ int *h_shift, int *v_shift)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ if (!desc)
+ return AVERROR(ENOSYS);
+ *h_shift = desc->log2_chroma_w;
+ *v_shift = desc->log2_chroma_h;
+
+ return 0;
+}
+
+int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int i, planes[4] = { 0 }, ret = 0;
+
+ if (!desc)
+ return AVERROR(EINVAL);
+
+ for (i = 0; i < desc->nb_components; i++)
+ planes[desc->comp[i].plane] = 1;
+ for (i = 0; i < FF_ARRAY_ELEMS(planes); i++)
+ ret += planes[i];
+ return ret;
+}
+
+enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ char name[16];
+ int i;
+
+ if (!desc || strlen(desc->name) < 2)
+ return AV_PIX_FMT_NONE;
+ av_strlcpy(name, desc->name, sizeof(name));
+ i = strlen(name) - 2;
+ if (strcmp(name + i, "be") && strcmp(name + i, "le"))
+ return AV_PIX_FMT_NONE;
+
+ name[i] ^= 'b' ^ 'l';
+
+ return get_pix_fmt_internal(name);
+}
+
+#define FF_COLOR_NA -1
+#define FF_COLOR_RGB 0 /**< RGB color space */
+#define FF_COLOR_GRAY 1 /**< gray color space */
+#define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
+#define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
+#define FF_COLOR_XYZ 4
+
+#define pixdesc_has_alpha(pixdesc) \
+ ((pixdesc)->flags & AV_PIX_FMT_FLAG_ALPHA)
+
+
+static int get_color_type(const AVPixFmtDescriptor *desc) {
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL)
+ return FF_COLOR_RGB;
+
+ if(desc->nb_components == 1 || desc->nb_components == 2)
+ return FF_COLOR_GRAY;
+
+ if (desc->name) {
+ if (av_strstart(desc->name, "yuvj", NULL))
+ return FF_COLOR_YUV_JPEG;
+
+ if (av_strstart(desc->name, "xyz", NULL))
+ return FF_COLOR_XYZ;
+ }
+
+ if(desc->flags & AV_PIX_FMT_FLAG_RGB)
+ return FF_COLOR_RGB;
+
+ if(desc->nb_components == 0)
+ return FF_COLOR_NA;
+
+ return FF_COLOR_YUV;
+}
+
+static int get_pix_fmt_depth(int *min, int *max, enum AVPixelFormat pix_fmt)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int i;
+
+ if (!desc || !desc->nb_components) {
+ *min = *max = 0;
+ return AVERROR(EINVAL);
+ }
+
+ *min = INT_MAX, *max = -INT_MAX;
+ for (i = 0; i < desc->nb_components; i++) {
+ *min = FFMIN(desc->comp[i].depth, *min);
+ *max = FFMAX(desc->comp[i].depth, *max);
+ }
+ return 0;
+}
+
+static int get_pix_fmt_score(enum AVPixelFormat dst_pix_fmt,
+ enum AVPixelFormat src_pix_fmt,
+ unsigned *lossp, unsigned consider)
+{
+ const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_pix_fmt);
+ const AVPixFmtDescriptor *dst_desc = av_pix_fmt_desc_get(dst_pix_fmt);
+ int src_color, dst_color;
+ int src_min_depth, src_max_depth, dst_min_depth, dst_max_depth;
+ int ret, loss, i, nb_components;
+ int score = INT_MAX - 1;
+
+ if (!src_desc || !dst_desc)
+ return -4;
+
+ if ((src_desc->flags & AV_PIX_FMT_FLAG_HWACCEL) ||
+ (dst_desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
+ if (dst_pix_fmt == src_pix_fmt)
+ return -1;
+ else
+ return -2;
+ }
+
+ /* compute loss */
+ *lossp = loss = 0;
+
+ if (dst_pix_fmt == src_pix_fmt)
+ return INT_MAX;
+
+ if ((ret = get_pix_fmt_depth(&src_min_depth, &src_max_depth, src_pix_fmt)) < 0)
+ return -3;
+ if ((ret = get_pix_fmt_depth(&dst_min_depth, &dst_max_depth, dst_pix_fmt)) < 0)
+ return -3;
+
+ src_color = get_color_type(src_desc);
+ dst_color = get_color_type(dst_desc);
+ if (dst_pix_fmt == AV_PIX_FMT_PAL8)
+ nb_components = FFMIN(src_desc->nb_components, 4);
+ else
+ nb_components = FFMIN(src_desc->nb_components, dst_desc->nb_components);
+
+ for (i = 0; i < nb_components; i++) {
+ int depth_minus1 = (dst_pix_fmt == AV_PIX_FMT_PAL8) ? 7/nb_components : (dst_desc->comp[i].depth - 1);
+ int depth_delta = src_desc->comp[i].depth - 1 - depth_minus1;
+ if (depth_delta > 0 && (consider & FF_LOSS_DEPTH)) {
+ loss |= FF_LOSS_DEPTH;
+ score -= 65536 >> depth_minus1;
+ } else if (depth_delta < 0 && (consider & FF_LOSS_EXCESS_DEPTH)) {
+ // Favour formats where bit depth exactly matches. If all other
+ // scoring is equal, we'd rather use the bit depth that most closely
+ // matches the source.
+ loss |= FF_LOSS_EXCESS_DEPTH;
+ score += depth_delta;
+ }
+ }
+
+ if (consider & FF_LOSS_RESOLUTION) {
+ if (dst_desc->log2_chroma_w > src_desc->log2_chroma_w) {
+ loss |= FF_LOSS_RESOLUTION;
+ score -= 256 << dst_desc->log2_chroma_w;
+ }
+ if (dst_desc->log2_chroma_h > src_desc->log2_chroma_h) {
+ loss |= FF_LOSS_RESOLUTION;
+ score -= 256 << dst_desc->log2_chroma_h;
+ }
+ // don't favor 422 over 420 if downsampling is needed, because 420 has much better support on the decoder side
+ if (dst_desc->log2_chroma_w == 1 && src_desc->log2_chroma_w == 0 &&
+ dst_desc->log2_chroma_h == 1 && src_desc->log2_chroma_h == 0 ) {
+ score += 512;
+ }
+ }
+
+ if (consider & FF_LOSS_EXCESS_RESOLUTION) {
+ // Favour formats where chroma subsampling exactly matches. If all other
+ // scoring is equal, we'd rather use the subsampling that most closely
+ // matches the source.
+ if (dst_desc->log2_chroma_w < src_desc->log2_chroma_w) {
+ loss |= FF_LOSS_EXCESS_RESOLUTION;
+ score -= 1 << (src_desc->log2_chroma_w - dst_desc->log2_chroma_w);
+ }
+
+ if (dst_desc->log2_chroma_h < src_desc->log2_chroma_h) {
+ loss |= FF_LOSS_EXCESS_RESOLUTION;
+ score -= 1 << (src_desc->log2_chroma_h - dst_desc->log2_chroma_h);
+ }
+
+ // don't favour 411 over 420, because 420 has much better support on the
+ // decoder side.
+ if (dst_desc->log2_chroma_w == 1 && src_desc->log2_chroma_w == 2 &&
+ dst_desc->log2_chroma_h == 1 && src_desc->log2_chroma_h == 2) {
+ score += 4;
+ }
+ }
+
+ if(consider & FF_LOSS_COLORSPACE)
+ switch(dst_color) {
+ case FF_COLOR_RGB:
+ if (src_color != FF_COLOR_RGB &&
+ src_color != FF_COLOR_GRAY)
+ loss |= FF_LOSS_COLORSPACE;
+ break;
+ case FF_COLOR_GRAY:
+ if (src_color != FF_COLOR_GRAY)
+ loss |= FF_LOSS_COLORSPACE;
+ break;
+ case FF_COLOR_YUV:
+ if (src_color != FF_COLOR_YUV)
+ loss |= FF_LOSS_COLORSPACE;
+ break;
+ case FF_COLOR_YUV_JPEG:
+ if (src_color != FF_COLOR_YUV_JPEG &&
+ src_color != FF_COLOR_YUV &&
+ src_color != FF_COLOR_GRAY)
+ loss |= FF_LOSS_COLORSPACE;
+ break;
+ default:
+ /* fail safe test */
+ if (src_color != dst_color)
+ loss |= FF_LOSS_COLORSPACE;
+ break;
+ }
+ if(loss & FF_LOSS_COLORSPACE)
+ score -= (nb_components * 65536) >> FFMIN(dst_desc->comp[0].depth - 1, src_desc->comp[0].depth - 1);
+
+ if (dst_color == FF_COLOR_GRAY &&
+ src_color != FF_COLOR_GRAY && (consider & FF_LOSS_CHROMA)) {
+ loss |= FF_LOSS_CHROMA;
+ score -= 2 * 65536;
+ }
+ if (!pixdesc_has_alpha(dst_desc) && (pixdesc_has_alpha(src_desc) && (consider & FF_LOSS_ALPHA))) {
+ loss |= FF_LOSS_ALPHA;
+ score -= 65536;
+ }
+ if (dst_pix_fmt == AV_PIX_FMT_PAL8 && (consider & FF_LOSS_COLORQUANT) &&
+ (src_pix_fmt != AV_PIX_FMT_PAL8 && (src_color != FF_COLOR_GRAY || (pixdesc_has_alpha(src_desc) && (consider & FF_LOSS_ALPHA))))) {
+ loss |= FF_LOSS_COLORQUANT;
+ score -= 65536;
+ }
+
+ *lossp = loss;
+ return score;
+}
+
+int av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha)
+{
+ int loss;
+ int ret = get_pix_fmt_score(dst_pix_fmt, src_pix_fmt, &loss, has_alpha ? ~0 : ~FF_LOSS_ALPHA);
+ if (ret < 0)
+ return ret;
+ return loss;
+}
+
+enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+ enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
+{
+ enum AVPixelFormat dst_pix_fmt;
+ int loss1, loss2, loss_mask;
+ const AVPixFmtDescriptor *desc1 = av_pix_fmt_desc_get(dst_pix_fmt1);
+ const AVPixFmtDescriptor *desc2 = av_pix_fmt_desc_get(dst_pix_fmt2);
+ int score1, score2;
+
+ if (!desc1) {
+ dst_pix_fmt = dst_pix_fmt2;
+ } else if (!desc2) {
+ dst_pix_fmt = dst_pix_fmt1;
+ } else {
+ loss_mask= loss_ptr?~*loss_ptr:~0; /* use loss mask if provided */
+ if(!has_alpha)
+ loss_mask &= ~FF_LOSS_ALPHA;
+
+ score1 = get_pix_fmt_score(dst_pix_fmt1, src_pix_fmt, &loss1, loss_mask);
+ score2 = get_pix_fmt_score(dst_pix_fmt2, src_pix_fmt, &loss2, loss_mask);
+
+ if (score1 == score2) {
+ if(av_get_padded_bits_per_pixel(desc2) != av_get_padded_bits_per_pixel(desc1)) {
+ dst_pix_fmt = av_get_padded_bits_per_pixel(desc2) < av_get_padded_bits_per_pixel(desc1) ? dst_pix_fmt2 : dst_pix_fmt1;
+ } else {
+ dst_pix_fmt = desc2->nb_components < desc1->nb_components ? dst_pix_fmt2 : dst_pix_fmt1;
+ }
+ } else {
+ dst_pix_fmt = score1 < score2 ? dst_pix_fmt2 : dst_pix_fmt1;
+ }
+ }
+
+ if (loss_ptr)
+ *loss_ptr = av_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
+ return dst_pix_fmt;
+}
+
+const char *av_color_range_name(enum AVColorRange range)
+{
+ return (unsigned) range < AVCOL_RANGE_NB ?
+ color_range_names[range] : NULL;
+}
+
+int av_color_range_from_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(color_range_names); i++) {
+ if (av_strstart(name, color_range_names[i], NULL))
+ return i;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+const char *av_color_primaries_name(enum AVColorPrimaries primaries)
+{
+ return (unsigned) primaries < AVCOL_PRI_NB ?
+ color_primaries_names[primaries] : NULL;
+}
+
+int av_color_primaries_from_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(color_primaries_names); i++) {
+ if (!color_primaries_names[i])
+ continue;
+
+ if (av_strstart(name, color_primaries_names[i], NULL))
+ return i;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
+{
+ return (unsigned) transfer < AVCOL_TRC_NB ?
+ color_transfer_names[transfer] : NULL;
+}
+
+int av_color_transfer_from_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(color_transfer_names); i++) {
+ if (!color_transfer_names[i])
+ continue;
+
+ if (av_strstart(name, color_transfer_names[i], NULL))
+ return i;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+const char *av_color_space_name(enum AVColorSpace space)
+{
+ return (unsigned) space < AVCOL_SPC_NB ?
+ color_space_names[space] : NULL;
+}
+
+int av_color_space_from_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(color_space_names); i++) {
+ if (!color_space_names[i])
+ continue;
+
+ if (av_strstart(name, color_space_names[i], NULL))
+ return i;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+const char *av_chroma_location_name(enum AVChromaLocation location)
+{
+ return (unsigned) location < AVCHROMA_LOC_NB ?
+ chroma_location_names[location] : NULL;
+}
+
+int av_chroma_location_from_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(chroma_location_names); i++) {
+ if (!chroma_location_names[i])
+ continue;
+
+ if (av_strstart(name, chroma_location_names[i], NULL))
+ return i;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+int av_chroma_location_enum_to_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
+{
+ if (pos <= AVCHROMA_LOC_UNSPECIFIED || pos >= AVCHROMA_LOC_NB)
+ return AVERROR(EINVAL);
+ pos--;
+
+ *xpos = (pos&1) * 128;
+ *ypos = ((pos>>1)^(pos<4)) * 128;
+
+ return 0;
+}
+
+enum AVChromaLocation av_chroma_location_pos_to_enum(int xpos, int ypos)
+{
+ int pos, xout, yout;
+
+ for (pos = AVCHROMA_LOC_UNSPECIFIED + 1; pos < AVCHROMA_LOC_NB; pos++) {
+ if (av_chroma_location_enum_to_pos(&xout, &yout, pos) == 0 && xout == xpos && yout == ypos)
+ return pos;
+ }
+ return AVCHROMA_LOC_UNSPECIFIED;
+}
diff --git a/media/ffvpx/libavutil/pixdesc.h b/media/ffvpx/libavutil/pixdesc.h
new file mode 100644
index 0000000000..0df73e6efe
--- /dev/null
+++ b/media/ffvpx/libavutil/pixdesc.h
@@ -0,0 +1,435 @@
+/*
+ * pixel format descriptor
+ * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXDESC_H
+#define AVUTIL_PIXDESC_H
+
+#include <inttypes.h>
+
+#include "attributes.h"
+#include "pixfmt.h"
+
+typedef struct AVComponentDescriptor {
+ /**
+ * Which of the 4 planes contains the component.
+ */
+ int plane;
+
+ /**
+ * Number of elements between 2 horizontally consecutive pixels.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ int step;
+
+ /**
+ * Number of elements before the component of the first pixel.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ int offset;
+
+ /**
+ * Number of least significant bits that must be shifted away
+ * to get the value.
+ */
+ int shift;
+
+ /**
+ * Number of bits in the component.
+ */
+ int depth;
+} AVComponentDescriptor;
+
+/**
+ * Descriptor that unambiguously describes how the bits of a pixel are
+ * stored in the up to 4 data planes of an image. It also stores the
+ * subsampling factors and number of components.
+ *
+ * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV
+ * and all the YUV variants) AVPixFmtDescriptor just stores how values
+ * are stored not what these values represent.
+ */
+typedef struct AVPixFmtDescriptor {
+ const char *name;
+ uint8_t nb_components; ///< The number of components each pixel has, (1-4)
+
+ /**
+ * Amount to shift the luma width right to find the chroma width.
+ * For YV12 this is 1 for example.
+ * chroma_width = AV_CEIL_RSHIFT(luma_width, log2_chroma_w)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_w;
+
+ /**
+ * Amount to shift the luma height right to find the chroma height.
+ * For YV12 this is 1 for example.
+ * chroma_height= AV_CEIL_RSHIFT(luma_height, log2_chroma_h)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_h;
+
+ /**
+ * Combination of AV_PIX_FMT_FLAG_... flags.
+ */
+ uint64_t flags;
+
+ /**
+ * Parameters that describe how pixels are packed.
+ * If the format has 1 or 2 components, then luma is 0.
+ * If the format has 3 or 4 components:
+ * if the RGB flag is set then 0 is red, 1 is green and 2 is blue;
+ * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V.
+ *
+ * If present, the Alpha channel is always the last component.
+ */
+ AVComponentDescriptor comp[4];
+
+ /**
+ * Alternative comma-separated names.
+ */
+ const char *alias;
+} AVPixFmtDescriptor;
+
+/**
+ * Pixel format is big-endian.
+ */
+#define AV_PIX_FMT_FLAG_BE (1 << 0)
+/**
+ * Pixel format has a palette in data[1], values are indexes in this palette.
+ */
+#define AV_PIX_FMT_FLAG_PAL (1 << 1)
+/**
+ * All values of a component are bit-wise packed end to end.
+ */
+#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2)
+/**
+ * Pixel format is an HW accelerated format.
+ */
+#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3)
+/**
+ * At least one pixel component is not in the first data plane.
+ */
+#define AV_PIX_FMT_FLAG_PLANAR (1 << 4)
+/**
+ * The pixel format contains RGB-like data (as opposed to YUV/grayscale).
+ */
+#define AV_PIX_FMT_FLAG_RGB (1 << 5)
+
+/**
+ * The pixel format has an alpha channel. This is set on all formats that
+ * support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always
+ * straight, never pre-multiplied.
+ *
+ * If a codec or a filter does not support alpha, it should set all alpha to
+ * opaque, or use the equivalent pixel formats without alpha component, e.g.
+ * AV_PIX_FMT_RGB0 (or AV_PIX_FMT_RGB24 etc.) instead of AV_PIX_FMT_RGBA.
+ */
+#define AV_PIX_FMT_FLAG_ALPHA (1 << 7)
+
+/**
+ * The pixel format is following a Bayer pattern
+ */
+#define AV_PIX_FMT_FLAG_BAYER (1 << 8)
+
+/**
+ * The pixel format contains IEEE-754 floating point values. Precision (double,
+ * single, or half) should be determined by the pixel size (64, 32, or 16 bits).
+ */
+#define AV_PIX_FMT_FLAG_FLOAT (1 << 9)
+
+/**
+ * Return the number of bits per pixel used by the pixel format
+ * described by pixdesc. Note that this is not the same as the number
+ * of bits per sample.
+ *
+ * The returned number of bits refers to the number of bits actually
+ * used for storing the pixel information, that is padding bits are
+ * not counted.
+ */
+int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Return the number of bits per pixel for the pixel format
+ * described by pixdesc, including any padding or unused bits.
+ */
+int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * @return a pixel format descriptor for provided pixel format or NULL if
+ * this pixel format is unknown.
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt);
+
+/**
+ * Iterate over all pixel format descriptors known to libavutil.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev);
+
+/**
+ * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc
+ * is not a valid pointer to a pixel format descriptor.
+ */
+enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);
+
+/**
+ * Utility function to access log2_chroma_w log2_chroma_h from
+ * the pixel format AVPixFmtDescriptor.
+ *
+ * @param[in] pix_fmt the pixel format
+ * @param[out] h_shift store log2_chroma_w (horizontal/width shift)
+ * @param[out] v_shift store log2_chroma_h (vertical/height shift)
+ *
+ * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format
+ */
+int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt,
+ int *h_shift, int *v_shift);
+
+/**
+ * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a
+ * valid pixel format.
+ */
+int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt);
+
+/**
+ * @return the name for provided color range or NULL if unknown.
+ */
+const char *av_color_range_name(enum AVColorRange range);
+
+/**
+ * @return the AVColorRange value for name or an AVError if not found.
+ */
+int av_color_range_from_name(const char *name);
+
+/**
+ * @return the name for provided color primaries or NULL if unknown.
+ */
+const char *av_color_primaries_name(enum AVColorPrimaries primaries);
+
+/**
+ * @return the AVColorPrimaries value for name or an AVError if not found.
+ */
+int av_color_primaries_from_name(const char *name);
+
+/**
+ * @return the name for provided color transfer or NULL if unknown.
+ */
+const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer);
+
+/**
+ * @return the AVColorTransferCharacteristic value for name or an AVError if not found.
+ */
+int av_color_transfer_from_name(const char *name);
+
+/**
+ * @return the name for provided color space or NULL if unknown.
+ */
+const char *av_color_space_name(enum AVColorSpace space);
+
+/**
+ * @return the AVColorSpace value for name or an AVError if not found.
+ */
+int av_color_space_from_name(const char *name);
+
+/**
+ * @return the name for provided chroma location or NULL if unknown.
+ */
+const char *av_chroma_location_name(enum AVChromaLocation location);
+
+/**
+ * @return the AVChromaLocation value for name or an AVError if not found.
+ */
+int av_chroma_location_from_name(const char *name);
+
+/**
+ * Converts AVChromaLocation to swscale x/y chroma position.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ */
+int av_chroma_location_enum_to_pos(int *xpos, int *ypos, enum AVChromaLocation pos);
+
+/**
+ * Converts swscale x/y chroma position to AVChromaLocation.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ */
+enum AVChromaLocation av_chroma_location_pos_to_enum(int xpos, int ypos);
+
+/**
+ * Return the pixel format corresponding to name.
+ *
+ * If there is no pixel format with name name, then looks for a
+ * pixel format with the name corresponding to the native endian
+ * format of name.
+ * For example in a little-endian system, first looks for "gray16",
+ * then for "gray16le".
+ *
+ * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE.
+ */
+enum AVPixelFormat av_get_pix_fmt(const char *name);
+
+/**
+ * Return the short name for a pixel format, NULL in case pix_fmt is
+ * unknown.
+ *
+ * @see av_get_pix_fmt(), av_get_pix_fmt_string()
+ */
+const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt);
+
+/**
+ * Print in buf the string corresponding to the pixel format with
+ * number pix_fmt, or a header if pix_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param pix_fmt the number of the pixel format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ */
+char *av_get_pix_fmt_string(char *buf, int buf_size,
+ enum AVPixelFormat pix_fmt);
+
+/**
+ * Read a line from an image, and write the values of the
+ * pixel format component c to dst.
+ *
+ * @param data the array containing the pointers to the planes of the image
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to read
+ * @param y the vertical coordinate of the first pixel to read
+ * @param w the width of the line to read, that is the number of
+ * values to write to dst
+ * @param read_pal_component if not zero and the format is a paletted
+ * format writes the values corresponding to the palette
+ * component c in data[1] to dst, rather than the palette indexes in
+ * data[0]. The behavior is undefined if the format is not paletted.
+ * @param dst_element_size size of elements in dst array (2 or 4 byte)
+ */
+void av_read_image_line2(void *dst, const uint8_t *data[4],
+ const int linesize[4], const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w, int read_pal_component,
+ int dst_element_size);
+
+void av_read_image_line(uint16_t *dst, const uint8_t *data[4],
+ const int linesize[4], const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w, int read_pal_component);
+
+/**
+ * Write the values from src to the pixel format component c of an
+ * image line.
+ *
+ * @param src array containing the values to write
+ * @param data the array containing the pointers to the planes of the
+ * image to write into. It is supposed to be zeroed.
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to write
+ * @param y the vertical coordinate of the first pixel to write
+ * @param w the width of the line to write, that is the number of
+ * values to write to the image line
+ * @param src_element_size size of elements in src array (2 or 4 byte)
+ */
+void av_write_image_line2(const void *src, uint8_t *data[4],
+ const int linesize[4], const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w, int src_element_size);
+
+void av_write_image_line(const uint16_t *src, uint8_t *data[4],
+ const int linesize[4], const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w);
+
+/**
+ * Utility function to swap the endianness of a pixel format.
+ *
+ * @param[in] pix_fmt the pixel format
+ *
+ * @return pixel format with swapped endianness if it exists,
+ * otherwise AV_PIX_FMT_NONE
+ */
+enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt);
+
+#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
+#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
+#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
+#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
+#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
+#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+#define FF_LOSS_EXCESS_RESOLUTION 0x0040 /**< loss due to unneeded extra resolution */
+#define FF_LOSS_EXCESS_DEPTH 0x0080 /**< loss due to unneeded extra color depth */
+
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * av_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur
+ * (maximum loss for an invalid dst_pix_fmt).
+ */
+int av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha);
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * av_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur
+ * (maximum loss for an invalid dst_pix_fmt).
+ */
+enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+ enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+
+#endif /* AVUTIL_PIXDESC_H */
diff --git a/media/ffvpx/libavutil/pixelutils.c b/media/ffvpx/libavutil/pixelutils.c
new file mode 100644
index 0000000000..820889a143
--- /dev/null
+++ b/media/ffvpx/libavutil/pixelutils.c
@@ -0,0 +1,95 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+
+#include "config.h"
+#include "pixelutils.h"
+
+#if CONFIG_PIXELUTILS
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "macros.h"
+
+#include "x86/pixelutils.h"
+
+static av_always_inline int sad_wxh(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2,
+ int w, int h)
+{
+ int x, y, sum = 0;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++)
+ sum += abs(src1[x] - src2[x]);
+ src1 += stride1;
+ src2 += stride2;
+ }
+ return sum;
+}
+
+#define DECLARE_BLOCK_FUNCTIONS(size) \
+static int block_sad_##size##x##size##_c(const uint8_t *src1, ptrdiff_t stride1, \
+ const uint8_t *src2, ptrdiff_t stride2) \
+{ \
+ return sad_wxh(src1, stride1, src2, stride2, size, size); \
+}
+
+DECLARE_BLOCK_FUNCTIONS(2)
+DECLARE_BLOCK_FUNCTIONS(4)
+DECLARE_BLOCK_FUNCTIONS(8)
+DECLARE_BLOCK_FUNCTIONS(16)
+DECLARE_BLOCK_FUNCTIONS(32)
+
+static const av_pixelutils_sad_fn sad_c[] = {
+ block_sad_2x2_c,
+ block_sad_4x4_c,
+ block_sad_8x8_c,
+ block_sad_16x16_c,
+ block_sad_32x32_c,
+};
+#else
+#include "log.h"
+#endif /* CONFIG_PIXELUTILS */
+
+av_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits, int aligned, void *log_ctx)
+{
+#if !CONFIG_PIXELUTILS
+ av_log(log_ctx, AV_LOG_ERROR, "pixelutils support is required "
+ "but libavutil is not compiled with it\n");
+ return NULL;
+#else
+ av_pixelutils_sad_fn sad[FF_ARRAY_ELEMS(sad_c)];
+
+ memcpy(sad, sad_c, sizeof(sad));
+
+ if (w_bits < 1 || w_bits > FF_ARRAY_ELEMS(sad) ||
+ h_bits < 1 || h_bits > FF_ARRAY_ELEMS(sad))
+ return NULL;
+ if (w_bits != h_bits) // only squared sad for now
+ return NULL;
+
+#if ARCH_X86
+ ff_pixelutils_sad_init_x86(sad, aligned);
+#endif
+
+ return sad[w_bits - 1];
+#endif
+}
diff --git a/media/ffvpx/libavutil/pixelutils.h b/media/ffvpx/libavutil/pixelutils.h
new file mode 100644
index 0000000000..7a997cde1c
--- /dev/null
+++ b/media/ffvpx/libavutil/pixelutils.h
@@ -0,0 +1,51 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXELUTILS_H
+#define AVUTIL_PIXELUTILS_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * Sum of abs(src1[x] - src2[x])
+ */
+typedef int (*av_pixelutils_sad_fn)(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+
+/**
+ * Get a potentially optimized pointer to a Sum-of-absolute-differences
+ * function (see the av_pixelutils_sad_fn prototype).
+ *
+ * @param w_bits 1<<w_bits is the requested width of the block size
+ * @param h_bits 1<<h_bits is the requested height of the block size
+ * @param aligned If set to 2, the returned sad function will assume src1 and
+ * src2 addresses are aligned on the block size.
+ * If set to 1, the returned sad function will assume src1 is
+ * aligned on the block size.
+ * If set to 0, the returned sad function assume no particular
+ * alignment.
+ * @param log_ctx context used for logging, can be NULL
+ *
+ * @return a pointer to the SAD function or NULL in case of error (because of
+ * invalid parameters)
+ */
+av_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits,
+ int aligned, void *log_ctx);
+
+#endif /* AVUTIL_PIXELUTILS_H */
diff --git a/media/ffvpx/libavutil/pixfmt.h b/media/ffvpx/libavutil/pixfmt.h
new file mode 100644
index 0000000000..37c2c79e01
--- /dev/null
+++ b/media/ffvpx/libavutil/pixfmt.h
@@ -0,0 +1,691 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ */
+
+#include "libavutil/avconfig.h"
+#include "version.h"
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @note
+ * If the resolution is not a multiple of the chroma subsampling factor
+ * then the chroma plane resolution must be rounded up.
+ *
+ * @par
+ * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB32 palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ */
+enum AVPixelFormat {
+ AV_PIX_FMT_NONE = -1,
+ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
+ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette
+ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range
+ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range
+ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range
+ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
+ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
+
+ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
+
+ /**
+ * Hardware acceleration through VA-API, data[3] contains a
+ * VASurfaceID.
+ */
+ AV_PIX_FMT_VAAPI,
+
+ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha
+
+ AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+ AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+
+ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+
+ /**
+ * The following 12 formats have the disadvantage of needing 1 format for each bit depth.
+ * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
+ * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
+ */
+ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP
+ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
+ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
+ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
+ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
+ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
+ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+
+ AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
+
+ AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+
+ AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+
+ AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
+
+ AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian)
+ AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian)
+
+ AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
+ AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
+ AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
+ /**
+ * HW acceleration through QSV, data[3] contains a pointer to the
+ * mfxFrameSurface1 structure.
+ *
+ * Before FFmpeg 5.0:
+ * mfxFrameSurface1.Data.MemId contains a pointer when importing
+ * the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxFrameSurface1.Data.MemId contains a pointer to VASurfaceID
+ *
+ * DXVA2:
+ * mfxFrameSurface1.Data.MemId contains a pointer to IDirect3DSurface9
+ *
+ * FFmpeg 5.0 and above:
+ * mfxFrameSurface1.Data.MemId contains a pointer to the mfxHDLPair
+ * structure when importing the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxHDLPair.first contains a VASurfaceID pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * DXVA2:
+ * mfxHDLPair.first contains IDirect3DSurface9 pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * D3D11:
+ * mfxHDLPair.first contains a ID3D11Texture2D pointer.
+ * mfxHDLPair.second contains the texture array index of the frame if the
+ * ID3D11Texture2D is an array texture, or always MFX_INFINITE if it is a
+ * normal texture.
+ */
+ AV_PIX_FMT_QSV,
+ /**
+ * HW acceleration though MMAL, data[3] contains a pointer to the
+ * MMAL_BUFFER_HEADER_T structure.
+ */
+ AV_PIX_FMT_MMAL,
+
+ AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer
+
+ /**
+ * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers
+ * exactly as for system memory frames.
+ */
+ AV_PIX_FMT_CUDA,
+
+ AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
+ AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
+ AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
+ AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
+
+ AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
+ AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
+ AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
+ AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
+ AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range
+
+ AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
+ AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
+ AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
+ AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
+ AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian
+
+#if FF_API_XVMC
+ AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
+#endif
+
+ AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
+ AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
+ AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+
+ AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
+
+ AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian
+ AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian
+
+ AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian
+
+ AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian
+ AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian
+
+ AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec
+
+ AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian
+ AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian
+ AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian
+ AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian
+
+ AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian
+ AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian
+
+ /**
+ * Hardware surfaces for Direct3D11.
+ *
+ * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11
+ * hwaccel API and filtering support AV_PIX_FMT_D3D11 only.
+ *
+ * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the
+ * texture array index of the frame as intptr_t if the ID3D11Texture2D is
+ * an array texture (or always 0 if it's a normal texture).
+ */
+ AV_PIX_FMT_D3D11,
+
+ AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian
+ AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian
+
+ AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian
+ AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian
+ AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian
+ AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian
+
+ /**
+ * DRM-managed buffers exposed through PRIME buffer sharing.
+ *
+ * data[0] points to an AVDRMFrameDescriptor.
+ */
+ AV_PIX_FMT_DRM_PRIME,
+ /**
+ * Hardware surfaces for OpenCL.
+ *
+ * data[i] contain 2D image objects (typed in C as cl_mem, used
+ * in OpenCL as image2d_t) for each plane of the surface.
+ */
+ AV_PIX_FMT_OPENCL,
+
+ AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian
+ AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian
+
+ AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian
+ AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian
+
+ AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian
+ AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian
+ AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian
+ AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian
+
+ AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped
+
+ /**
+ * Vulkan hardware images.
+ *
+ * data[0] points to an AVVkFrame
+ */
+ AV_PIX_FMT_VULKAN,
+
+ AV_PIX_FMT_Y210BE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, big-endian
+ AV_PIX_FMT_Y210LE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, little-endian
+
+ AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_X2BGR10LE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_X2BGR10BE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), big-endian, X=unused/undefined
+
+ AV_PIX_FMT_P210BE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high bits, big-endian
+ AV_PIX_FMT_P210LE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high bits, little-endian
+
+ AV_PIX_FMT_P410BE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high bits, big-endian
+ AV_PIX_FMT_P410LE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high bits, little-endian
+
+ AV_PIX_FMT_P216BE, ///< interleaved chroma YUV 4:2:2, 32bpp, big-endian
+ AV_PIX_FMT_P216LE, ///< interleaved chroma YUV 4:2:2, 32bpp, little-endian
+
+ AV_PIX_FMT_P416BE, ///< interleaved chroma YUV 4:4:4, 48bpp, big-endian
+ AV_PIX_FMT_P416LE, ///< interleaved chroma YUV 4:4:4, 48bpp, little-endian
+
+ AV_PIX_FMT_VUYA, ///< packed VUYA 4:4:4, 32bpp, VUYAVUYA...
+
+ AV_PIX_FMT_RGBAF16BE, ///< IEEE-754 half precision packed RGBA 16:16:16:16, 64bpp, RGBARGBA..., big-endian
+ AV_PIX_FMT_RGBAF16LE, ///< IEEE-754 half precision packed RGBA 16:16:16:16, 64bpp, RGBARGBA..., little-endian
+
+ AV_PIX_FMT_VUYX, ///< packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
+
+ AV_PIX_FMT_P012LE, ///< like NV12, with 12bpp per component, data in the high bits, zeros in the low bits, little-endian
+ AV_PIX_FMT_P012BE, ///< like NV12, with 12bpp per component, data in the high bits, zeros in the low bits, big-endian
+
+ AV_PIX_FMT_Y212BE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data in the high bits, zeros in the low bits, big-endian
+ AV_PIX_FMT_Y212LE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data in the high bits, zeros in the low bits, little-endian
+
+ AV_PIX_FMT_XV30BE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb), big-endian, variant of Y410 where alpha channel is left undefined
+ AV_PIX_FMT_XV30LE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb), little-endian, variant of Y410 where alpha channel is left undefined
+
+ AV_PIX_FMT_XV36BE, ///< packed XVYU 4:4:4, 48bpp, data in the high bits, zeros in the low bits, big-endian, variant of Y412 where alpha channel is left undefined
+ AV_PIX_FMT_XV36LE, ///< packed XVYU 4:4:4, 48bpp, data in the high bits, zeros in the low bits, little-endian, variant of Y412 where alpha channel is left undefined
+
+ AV_PIX_FMT_RGBF32BE, ///< IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian
+ AV_PIX_FMT_RGBF32LE, ///< IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian
+
+ AV_PIX_FMT_RGBAF32BE, ///< IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian
+ AV_PIX_FMT_RGBAF32LE, ///< IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian
+
+ AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+};
+
+#if AV_HAVE_BIGENDIAN
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA)
+#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
+#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA)
+#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
+#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0)
+#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0)
+
+#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE)
+#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE)
+#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE)
+#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE)
+#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE)
+#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
+#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
+#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
+#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
+#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
+#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
+#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
+#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
+#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
+#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
+
+#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)
+#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)
+#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE)
+#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)
+#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)
+#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)
+#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)
+#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE)
+#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE)
+#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE)
+#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
+#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE)
+#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE)
+#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE)
+
+#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE)
+#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE)
+#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE)
+#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE)
+
+#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE)
+#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE)
+
+#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE)
+
+#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
+#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
+#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
+#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
+#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
+#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
+#define AV_PIX_FMT_YUVA422P12 AV_PIX_FMT_NE(YUVA422P12BE, YUVA422P12LE)
+#define AV_PIX_FMT_YUVA444P12 AV_PIX_FMT_NE(YUVA444P12BE, YUVA444P12LE)
+#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
+#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
+#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
+
+#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
+#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
+#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE)
+#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE)
+#define AV_PIX_FMT_P012 AV_PIX_FMT_NE(P012BE, P012LE)
+#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE)
+
+#define AV_PIX_FMT_Y210 AV_PIX_FMT_NE(Y210BE, Y210LE)
+#define AV_PIX_FMT_Y212 AV_PIX_FMT_NE(Y212BE, Y212LE)
+#define AV_PIX_FMT_XV30 AV_PIX_FMT_NE(XV30BE, XV30LE)
+#define AV_PIX_FMT_XV36 AV_PIX_FMT_NE(XV36BE, XV36LE)
+#define AV_PIX_FMT_X2RGB10 AV_PIX_FMT_NE(X2RGB10BE, X2RGB10LE)
+#define AV_PIX_FMT_X2BGR10 AV_PIX_FMT_NE(X2BGR10BE, X2BGR10LE)
+
+#define AV_PIX_FMT_P210 AV_PIX_FMT_NE(P210BE, P210LE)
+#define AV_PIX_FMT_P410 AV_PIX_FMT_NE(P410BE, P410LE)
+#define AV_PIX_FMT_P216 AV_PIX_FMT_NE(P216BE, P216LE)
+#define AV_PIX_FMT_P416 AV_PIX_FMT_NE(P416BE, P416LE)
+
+#define AV_PIX_FMT_RGBAF16 AV_PIX_FMT_NE(RGBAF16BE, RGBAF16LE)
+
+#define AV_PIX_FMT_RGBF32 AV_PIX_FMT_NE(RGBF32BE, RGBF32LE)
+#define AV_PIX_FMT_RGBAF32 AV_PIX_FMT_NE(RGBAF32BE, RGBAF32LE)
+
+/**
+ * Chromaticity coordinates of the source primaries.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.1 and ITU-T H.273.
+ */
+enum AVColorPrimaries {
+ AVCOL_PRI_RESERVED0 = 0,
+ AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
+ AVCOL_PRI_UNSPECIFIED = 2,
+ AVCOL_PRI_RESERVED = 3,
+ AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+
+ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M = 7, ///< identical to above, also called "SMPTE C" even though it uses D65
+ AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
+ AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
+ AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ)
+ AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428,
+ AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3
+ AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3
+ AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors
+ AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213,
+ AVCOL_PRI_NB ///< Not part of ABI
+};
+
+/**
+ * Color Transfer Characteristic.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.2.
+ */
+enum AVColorTransferCharacteristic {
+ AVCOL_TRC_RESERVED0 = 0,
+ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED = 2,
+ AVCOL_TRC_RESERVED = 3,
+ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
+ AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
+ AVCOL_TRC_SMPTE240M = 7,
+ AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
+ AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
+ AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
+ AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
+ AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
+ AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
+ AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system
+ AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system
+ AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems
+ AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084,
+ AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1
+ AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428,
+ AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma"
+ AVCOL_TRC_NB ///< Not part of ABI
+};
+
+/**
+ * YUV colorspace type.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.3.
+ */
+enum AVColorSpace {
+ AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
+ AVCOL_SPC_UNSPECIFIED = 2,
+ AVCOL_SPC_RESERVED = 3, ///< reserved for future use by ITU-T and ISO/IEC just like 15-255 are
+ AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+ AVCOL_SPC_SMPTE240M = 7, ///< derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
+ AVCOL_SPC_YCGCO = 8, ///< used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO,
+ AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
+ AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
+ AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x
+ AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system
+ AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system
+ AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp
+ AVCOL_SPC_NB ///< Not part of ABI
+};
+
+/**
+ * Visual content value range.
+ *
+ * These values are based on definitions that can be found in multiple
+ * specifications, such as ITU-T BT.709 (3.4 - Quantization of RGB, luminance
+ * and colour-difference signals), ITU-T BT.2020 (Table 5 - Digital
+ * Representation) as well as ITU-T BT.2100 (Table 9 - Digital 10- and 12-bit
+ * integer representation). At the time of writing, the BT.2100 one is
+ * recommended, as it also defines the full range representation.
+ *
+ * Common definitions:
+ * - For RGB and luma planes such as Y in YCbCr and I in ICtCp,
+ * 'E' is the original value in range of 0.0 to 1.0.
+ * - For chroma planes such as Cb,Cr and Ct,Cp, 'E' is the original
+ * value in range of -0.5 to 0.5.
+ * - 'n' is the output bit depth.
+ * - For additional definitions such as rounding and clipping to valid n
+ * bit unsigned integer range, please refer to BT.2100 (Table 9).
+ */
+enum AVColorRange {
+ AVCOL_RANGE_UNSPECIFIED = 0,
+
+ /**
+ * Narrow or limited range content.
+ *
+ * - For luma planes:
+ *
+ * (219 * E + 16) * 2^(n-8)
+ *
+ * F.ex. the range of 16-235 for 8 bits
+ *
+ * - For chroma planes:
+ *
+ * (224 * E + 128) * 2^(n-8)
+ *
+ * F.ex. the range of 16-240 for 8 bits
+ */
+ AVCOL_RANGE_MPEG = 1,
+
+ /**
+ * Full range content.
+ *
+ * - For RGB and luma planes:
+ *
+ * (2^n - 1) * E
+ *
+ * F.ex. the range of 0-255 for 8 bits
+ *
+ * - For chroma planes:
+ *
+ * (2^n - 1) * E + 2^(n - 1)
+ *
+ * F.ex. the range of 1-255 for 8 bits
+ */
+ AVCOL_RANGE_JPEG = 2,
+ AVCOL_RANGE_NB ///< Not part of ABI
+};
+
+/**
+ * Location of chroma samples.
+ *
+ * Illustration showing the location of the first (top left) chroma sample of the
+ * image, the left shows only luma, the right
+ * shows the location of the chroma sample, the 2 could be imagined to overlay
+ * each other but are drawn separately due to limitations of ASCII
+ *
+ * 1st 2nd 1st 2nd horizontal luma sample positions
+ * v v v v
+ * ______ ______
+ *1st luma line > |X X ... |3 4 X ... X are luma samples,
+ * | |1 2 1-6 are possible chroma positions
+ *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position
+ */
+enum AVChromaLocation {
+ AVCHROMA_LOC_UNSPECIFIED = 0,
+ AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0
+ AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0
+ AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2
+ AVCHROMA_LOC_TOP = 4,
+ AVCHROMA_LOC_BOTTOMLEFT = 5,
+ AVCHROMA_LOC_BOTTOM = 6,
+ AVCHROMA_LOC_NB ///< Not part of ABI
+};
+
+#endif /* AVUTIL_PIXFMT_H */
diff --git a/media/ffvpx/libavutil/qsort.h b/media/ffvpx/libavutil/qsort.h
new file mode 100644
index 0000000000..6014f88be3
--- /dev/null
+++ b/media/ffvpx/libavutil/qsort.h
@@ -0,0 +1,122 @@
+/*
+ * copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_QSORT_H
+#define AVUTIL_QSORT_H
+
+#include "macros.h"
+
+
+/**
+ * Quicksort
+ * This sort is fast, and fully inplace but not stable and it is possible
+ * to construct input that requires O(n^2) time but this is very unlikely to
+ * happen with non constructed input.
+ */
+#define AV_QSORT(p, num, type, cmp) do {\
+ void *stack[64][2];\
+ int sp= 1;\
+ stack[0][0] = p;\
+ stack[0][1] = (p)+(num)-1;\
+ while(sp){\
+ type *start= stack[--sp][0];\
+ type *end = stack[ sp][1];\
+ while(start < end){\
+ if(start < end-1) {\
+ int checksort=0;\
+ type *right = end-2;\
+ type *left = start+1;\
+ type *mid = start + ((end-start)>>1);\
+ if(cmp(start, end) > 0) {\
+ if(cmp( end, mid) > 0) FFSWAP(type, *start, *mid);\
+ else FFSWAP(type, *start, *end);\
+ }else{\
+ if(cmp(start, mid) > 0) FFSWAP(type, *start, *mid);\
+ else checksort= 1;\
+ }\
+ if(cmp(mid, end) > 0){ \
+ FFSWAP(type, *mid, *end);\
+ checksort=0;\
+ }\
+ if(start == end-2) break;\
+ FFSWAP(type, end[-1], *mid);\
+ while(left <= right){\
+ while(left<=right && cmp(left, end-1) < 0)\
+ left++;\
+ while(left<=right && cmp(right, end-1) > 0)\
+ right--;\
+ if(left <= right){\
+ FFSWAP(type, *left, *right);\
+ left++;\
+ right--;\
+ }\
+ }\
+ FFSWAP(type, end[-1], *left);\
+ if(checksort && (mid == left-1 || mid == left)){\
+ mid= start;\
+ while(mid<end && cmp(mid, mid+1) <= 0)\
+ mid++;\
+ if(mid==end)\
+ break;\
+ }\
+ if(end-left < left-start){\
+ stack[sp ][0]= start;\
+ stack[sp++][1]= right;\
+ start = left+1;\
+ }else{\
+ stack[sp ][0]= left+1;\
+ stack[sp++][1]= end;\
+ end = right;\
+ }\
+ }else{\
+ if(cmp(start, end) > 0)\
+ FFSWAP(type, *start, *end);\
+ break;\
+ }\
+ }\
+ }\
+} while (0)
+
+/**
+ * Merge sort, this sort requires a temporary buffer and is stable, its worst
+ * case time is O(n log n)
+ * @param p must be a lvalue pointer, this function may exchange it with tmp
+ * @param tmp must be a lvalue pointer, this function may exchange it with p
+ */
+#define AV_MSORT(p, tmp, num, type, cmp) do {\
+ unsigned i, j, step;\
+ for(step=1; step<(num); step+=step){\
+ for(i=0; i<(num); i+=2*step){\
+ unsigned a[2] = {i, i+step};\
+ unsigned end = FFMIN(i+2*step, (num));\
+ for(j=i; a[0]<i+step && a[1]<end; j++){\
+ int idx= cmp(p+a[0], p+a[1]) > 0;\
+ tmp[j] = p[ a[idx]++ ];\
+ }\
+ if(a[0]>=i+step) a[0] = a[1];\
+ for(; j<end; j++){\
+ tmp[j] = p[ a[0]++ ];\
+ }\
+ }\
+ FFSWAP(type*, p, tmp);\
+ }\
+} while (0)
+
+#endif /* AVUTIL_QSORT_H */
diff --git a/media/ffvpx/libavutil/rational.c b/media/ffvpx/libavutil/rational.c
new file mode 100644
index 0000000000..eb148ddb12
--- /dev/null
+++ b/media/ffvpx/libavutil/rational.c
@@ -0,0 +1,193 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rational numbers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "avassert.h"
+#include <limits.h>
+
+#include "common.h"
+#include "mathematics.h"
+#include "rational.h"
+
+int av_reduce(int *dst_num, int *dst_den,
+ int64_t num, int64_t den, int64_t max)
+{
+ AVRational a0 = { 0, 1 }, a1 = { 1, 0 };
+ int sign = (num < 0) ^ (den < 0);
+ int64_t gcd = av_gcd(FFABS(num), FFABS(den));
+
+ if (gcd) {
+ num = FFABS(num) / gcd;
+ den = FFABS(den) / gcd;
+ }
+ if (num <= max && den <= max) {
+ a1 = (AVRational) { num, den };
+ den = 0;
+ }
+
+ while (den) {
+ uint64_t x = num / den;
+ int64_t next_den = num - den * x;
+ int64_t a2n = x * a1.num + a0.num;
+ int64_t a2d = x * a1.den + a0.den;
+
+ if (a2n > max || a2d > max) {
+ if (a1.num) x = (max - a0.num) / a1.num;
+ if (a1.den) x = FFMIN(x, (max - a0.den) / a1.den);
+
+ if (den * (2 * x * a1.den + a0.den) > num * a1.den)
+ a1 = (AVRational) { x * a1.num + a0.num, x * a1.den + a0.den };
+ break;
+ }
+
+ a0 = a1;
+ a1 = (AVRational) { a2n, a2d };
+ num = den;
+ den = next_den;
+ }
+ av_assert2(av_gcd(a1.num, a1.den) <= 1U);
+ av_assert2(a1.num <= max && a1.den <= max);
+
+ *dst_num = sign ? -a1.num : a1.num;
+ *dst_den = a1.den;
+
+ return den == 0;
+}
+
+AVRational av_mul_q(AVRational b, AVRational c)
+{
+ av_reduce(&b.num, &b.den,
+ b.num * (int64_t) c.num,
+ b.den * (int64_t) c.den, INT_MAX);
+ return b;
+}
+
+AVRational av_div_q(AVRational b, AVRational c)
+{
+ return av_mul_q(b, (AVRational) { c.den, c.num });
+}
+
+AVRational av_add_q(AVRational b, AVRational c) {
+ av_reduce(&b.num, &b.den,
+ b.num * (int64_t) c.den +
+ c.num * (int64_t) b.den,
+ b.den * (int64_t) c.den, INT_MAX);
+ return b;
+}
+
+AVRational av_sub_q(AVRational b, AVRational c)
+{
+ return av_add_q(b, (AVRational) { -c.num, c.den });
+}
+
+AVRational av_d2q(double d, int max)
+{
+ AVRational a;
+ int exponent;
+ int64_t den;
+ if (isnan(d))
+ return (AVRational) { 0,0 };
+ if (fabs(d) > INT_MAX + 3LL)
+ return (AVRational) { d < 0 ? -1 : 1, 0 };
+ frexp(d, &exponent);
+ exponent = FFMAX(exponent-1, 0);
+ den = 1LL << (61 - exponent);
+ // (int64_t)rint() and llrint() do not work with gcc on ia64 and sparc64,
+ // see Ticket2713 for affected gcc/glibc versions
+ av_reduce(&a.num, &a.den, floor(d * den + 0.5), den, max);
+ if ((!a.num || !a.den) && d && max>0 && max<INT_MAX)
+ av_reduce(&a.num, &a.den, floor(d * den + 0.5), den, INT_MAX);
+
+ return a;
+}
+
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2)
+{
+ /* n/d is q, a/b is the median between q1 and q2 */
+ int64_t a = q1.num * (int64_t)q2.den + q2.num * (int64_t)q1.den;
+ int64_t b = 2 * (int64_t)q1.den * q2.den;
+
+ /* rnd_up(a*d/b) > n => a*d/b > n */
+ int64_t x_up = av_rescale_rnd(a, q.den, b, AV_ROUND_UP);
+
+ /* rnd_down(a*d/b) < n => a*d/b < n */
+ int64_t x_down = av_rescale_rnd(a, q.den, b, AV_ROUND_DOWN);
+
+ return ((x_up > q.num) - (x_down < q.num)) * av_cmp_q(q2, q1);
+}
+
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list)
+{
+ int i, nearest_q_idx = 0;
+ for (i = 0; q_list[i].den; i++)
+ if (av_nearer_q(q, q_list[i], q_list[nearest_q_idx]) > 0)
+ nearest_q_idx = i;
+
+ return nearest_q_idx;
+}
+
+uint32_t av_q2intfloat(AVRational q) {
+ int64_t n;
+ int shift;
+ int sign = 0;
+
+ if (q.den < 0) {
+ q.den *= -1;
+ q.num *= -1;
+ }
+ if (q.num < 0) {
+ q.num *= -1;
+ sign = 1;
+ }
+
+ if (!q.num && !q.den) return 0xFFC00000;
+ if (!q.num) return 0;
+ if (!q.den) return 0x7F800000 | (q.num & 0x80000000);
+
+ shift = 23 + av_log2(q.den) - av_log2(q.num);
+ if (shift >= 0) n = av_rescale(q.num, 1LL<<shift, q.den);
+ else n = av_rescale(q.num, 1, ((int64_t)q.den) << -shift);
+
+ shift -= n >= (1<<24);
+ shift += n < (1<<23);
+
+ if (shift >= 0) n = av_rescale(q.num, 1LL<<shift, q.den);
+ else n = av_rescale(q.num, 1, ((int64_t)q.den) << -shift);
+
+ av_assert1(n < (1<<24));
+ av_assert1(n >= (1<<23));
+
+ return sign<<31 | (150-shift)<<23 | (n - (1<<23));
+}
+
+AVRational av_gcd_q(AVRational a, AVRational b, int max_den, AVRational def)
+{
+ int64_t gcd, lcm;
+
+ gcd = av_gcd(a.den, b.den);
+ lcm = (a.den / gcd) * b.den;
+ return lcm < max_den ? av_make_q(av_gcd(a.num, b.num), lcm) : def;
+}
diff --git a/media/ffvpx/libavutil/rational.h b/media/ffvpx/libavutil/rational.h
new file mode 100644
index 0000000000..8cbfc8e066
--- /dev/null
+++ b/media/ffvpx/libavutil/rational.h
@@ -0,0 +1,221 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_math_rational
+ * Utilties for rational number calculation.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "attributes.h"
+
+/**
+ * @defgroup lavu_math_rational AVRational
+ * @ingroup lavu_math
+ * Rational number calculation.
+ *
+ * While rational numbers can be expressed as floating-point numbers, the
+ * conversion process is a lossy one, so are floating-point operations. On the
+ * other hand, the nature of FFmpeg demands highly accurate calculation of
+ * timestamps. This set of rational number utilities serves as a generic
+ * interface for manipulating rational numbers as pairs of numerators and
+ * denominators.
+ *
+ * Many of the functions that operate on AVRational's have the suffix `_q`, in
+ * reference to the mathematical symbol "â„š" (Q) which denotes the set of all
+ * rational numbers.
+ *
+ * @{
+ */
+
+/**
+ * Rational number (pair of numerator and denominator).
+ */
+typedef struct AVRational{
+ int num; ///< Numerator
+ int den; ///< Denominator
+} AVRational;
+
+/**
+ * Create an AVRational.
+ *
+ * Useful for compilers that do not support compound literals.
+ *
+ * @note The return value is not reduced.
+ * @see av_reduce()
+ */
+static inline AVRational av_make_q(int num, int den)
+{
+ AVRational r = { num, den };
+ return r;
+}
+
+/**
+ * Compare two rationals.
+ *
+ * @param a First rational
+ * @param b Second rational
+ *
+ * @return One of the following values:
+ * - 0 if `a == b`
+ * - 1 if `a > b`
+ * - -1 if `a < b`
+ * - `INT_MIN` if one of the values is of the form `0 / 0`
+ */
+static inline int av_cmp_q(AVRational a, AVRational b){
+ const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;
+
+ if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1;
+ else if(b.den && a.den) return 0;
+ else if(a.num && b.num) return (a.num>>31) - (b.num>>31);
+ else return INT_MIN;
+}
+
+/**
+ * Convert an AVRational to a `double`.
+ * @param a AVRational to convert
+ * @return `a` in floating-point form
+ * @see av_d2q()
+ */
+static inline double av_q2d(AVRational a){
+ return a.num / (double) a.den;
+}
+
+/**
+ * Reduce a fraction.
+ *
+ * This is useful for framerate calculations.
+ *
+ * @param[out] dst_num Destination numerator
+ * @param[out] dst_den Destination denominator
+ * @param[in] num Source numerator
+ * @param[in] den Source denominator
+ * @param[in] max Maximum allowed values for `dst_num` & `dst_den`
+ * @return 1 if the operation is exact, 0 otherwise
+ */
+int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b First rational
+ * @param c Second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b First rational
+ * @param c Second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b First rational
+ * @param c Second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b First rational
+ * @param c Second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Invert a rational.
+ * @param q value
+ * @return 1 / q
+ */
+static av_always_inline AVRational av_inv_q(AVRational q)
+{
+ AVRational r = { q.den, q.num };
+ return r;
+}
+
+/**
+ * Convert a double precision floating point number to a rational.
+ *
+ * In case of infinity, the returned value is expressed as `{1, 0}` or
+ * `{-1, 0}` depending on the sign.
+ *
+ * @param d `double` to convert
+ * @param max Maximum allowed numerator and denominator
+ * @return `d` in AVRational form
+ * @see av_q2d()
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * Find which of the two rationals is closer to another rational.
+ *
+ * @param q Rational to be compared against
+ * @param q1 Rational to be tested
+ * @param q2 Rational to be tested
+ * @return One of the following values:
+ * - 1 if `q1` is nearer to `q` than `q2`
+ * - -1 if `q2` is nearer to `q` than `q1`
+ * - 0 if they have the same distance
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the value in a list of rationals nearest a given reference rational.
+ *
+ * @param q Reference rational
+ * @param q_list Array of rationals terminated by `{0, 0}`
+ * @return Index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point
+ * format.
+ *
+ * @param q Rational to be converted
+ * @return Equivalent floating-point value, expressed as an unsigned 32-bit
+ * integer.
+ * @note The returned value is platform-indepedant.
+ */
+uint32_t av_q2intfloat(AVRational q);
+
+/**
+ * Return the best rational so that a and b are multiple of it.
+ * If the resulting denominator is larger than max_den, return def.
+ */
+AVRational av_gcd_q(AVRational a, AVRational b, int max_den, AVRational def);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
diff --git a/media/ffvpx/libavutil/replaygain.h b/media/ffvpx/libavutil/replaygain.h
new file mode 100644
index 0000000000..b49bf1a3d9
--- /dev/null
+++ b/media/ffvpx/libavutil/replaygain.h
@@ -0,0 +1,50 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_REPLAYGAIN_H
+#define AVUTIL_REPLAYGAIN_H
+
+#include <stdint.h>
+
+/**
+ * ReplayGain information (see
+ * http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification).
+ * The size of this struct is a part of the public ABI.
+ */
+typedef struct AVReplayGain {
+ /**
+ * Track replay gain in microbels (divide by 100000 to get the value in dB).
+ * Should be set to INT32_MIN when unknown.
+ */
+ int32_t track_gain;
+ /**
+ * Peak track amplitude, with 100000 representing full scale (but values
+ * may overflow). 0 when unknown.
+ */
+ uint32_t track_peak;
+ /**
+ * Same as track_gain, but for the whole album.
+ */
+ int32_t album_gain;
+ /**
+ * Same as track_peak, but for the whole album,
+ */
+ uint32_t album_peak;
+} AVReplayGain;
+
+#endif /* AVUTIL_REPLAYGAIN_H */
diff --git a/media/ffvpx/libavutil/reverse.c b/media/ffvpx/libavutil/reverse.c
new file mode 100644
index 0000000000..105eb03dda
--- /dev/null
+++ b/media/ffvpx/libavutil/reverse.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+const uint8_t ff_reverse[256] = {
+0x00,0x80,0x40,0xC0,0x20,0xA0,0x60,0xE0,0x10,0x90,0x50,0xD0,0x30,0xB0,0x70,0xF0,
+0x08,0x88,0x48,0xC8,0x28,0xA8,0x68,0xE8,0x18,0x98,0x58,0xD8,0x38,0xB8,0x78,0xF8,
+0x04,0x84,0x44,0xC4,0x24,0xA4,0x64,0xE4,0x14,0x94,0x54,0xD4,0x34,0xB4,0x74,0xF4,
+0x0C,0x8C,0x4C,0xCC,0x2C,0xAC,0x6C,0xEC,0x1C,0x9C,0x5C,0xDC,0x3C,0xBC,0x7C,0xFC,
+0x02,0x82,0x42,0xC2,0x22,0xA2,0x62,0xE2,0x12,0x92,0x52,0xD2,0x32,0xB2,0x72,0xF2,
+0x0A,0x8A,0x4A,0xCA,0x2A,0xAA,0x6A,0xEA,0x1A,0x9A,0x5A,0xDA,0x3A,0xBA,0x7A,0xFA,
+0x06,0x86,0x46,0xC6,0x26,0xA6,0x66,0xE6,0x16,0x96,0x56,0xD6,0x36,0xB6,0x76,0xF6,
+0x0E,0x8E,0x4E,0xCE,0x2E,0xAE,0x6E,0xEE,0x1E,0x9E,0x5E,0xDE,0x3E,0xBE,0x7E,0xFE,
+0x01,0x81,0x41,0xC1,0x21,0xA1,0x61,0xE1,0x11,0x91,0x51,0xD1,0x31,0xB1,0x71,0xF1,
+0x09,0x89,0x49,0xC9,0x29,0xA9,0x69,0xE9,0x19,0x99,0x59,0xD9,0x39,0xB9,0x79,0xF9,
+0x05,0x85,0x45,0xC5,0x25,0xA5,0x65,0xE5,0x15,0x95,0x55,0xD5,0x35,0xB5,0x75,0xF5,
+0x0D,0x8D,0x4D,0xCD,0x2D,0xAD,0x6D,0xED,0x1D,0x9D,0x5D,0xDD,0x3D,0xBD,0x7D,0xFD,
+0x03,0x83,0x43,0xC3,0x23,0xA3,0x63,0xE3,0x13,0x93,0x53,0xD3,0x33,0xB3,0x73,0xF3,
+0x0B,0x8B,0x4B,0xCB,0x2B,0xAB,0x6B,0xEB,0x1B,0x9B,0x5B,0xDB,0x3B,0xBB,0x7B,0xFB,
+0x07,0x87,0x47,0xC7,0x27,0xA7,0x67,0xE7,0x17,0x97,0x57,0xD7,0x37,0xB7,0x77,0xF7,
+0x0F,0x8F,0x4F,0xCF,0x2F,0xAF,0x6F,0xEF,0x1F,0x9F,0x5F,0xDF,0x3F,0xBF,0x7F,0xFF,
+};
diff --git a/media/ffvpx/libavutil/reverse.h b/media/ffvpx/libavutil/reverse.h
new file mode 100644
index 0000000000..4eb6123932
--- /dev/null
+++ b/media/ffvpx/libavutil/reverse.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_REVERSE_H
+#define AVUTIL_REVERSE_H
+
+#include <stdint.h>
+
+extern const uint8_t ff_reverse[256];
+
+#endif /* AVUTIL_REVERSE_H */
diff --git a/media/ffvpx/libavutil/samplefmt.c b/media/ffvpx/libavutil/samplefmt.c
new file mode 100644
index 0000000000..6d3ec34dab
--- /dev/null
+++ b/media/ffvpx/libavutil/samplefmt.c
@@ -0,0 +1,263 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "error.h"
+#include "macros.h"
+#include "mem.h"
+#include "samplefmt.h"
+
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+
+typedef struct SampleFmtInfo {
+ char name[8];
+ int bits;
+ int planar;
+ enum AVSampleFormat altform; ///< planar<->packed alternative form
+} SampleFmtInfo;
+
+/** this table gives more information about formats */
+static const SampleFmtInfo sample_fmt_info[AV_SAMPLE_FMT_NB] = {
+ [AV_SAMPLE_FMT_U8] = { .name = "u8", .bits = 8, .planar = 0, .altform = AV_SAMPLE_FMT_U8P },
+ [AV_SAMPLE_FMT_S16] = { .name = "s16", .bits = 16, .planar = 0, .altform = AV_SAMPLE_FMT_S16P },
+ [AV_SAMPLE_FMT_S32] = { .name = "s32", .bits = 32, .planar = 0, .altform = AV_SAMPLE_FMT_S32P },
+ [AV_SAMPLE_FMT_S64] = { .name = "s64", .bits = 64, .planar = 0, .altform = AV_SAMPLE_FMT_S64P },
+ [AV_SAMPLE_FMT_FLT] = { .name = "flt", .bits = 32, .planar = 0, .altform = AV_SAMPLE_FMT_FLTP },
+ [AV_SAMPLE_FMT_DBL] = { .name = "dbl", .bits = 64, .planar = 0, .altform = AV_SAMPLE_FMT_DBLP },
+ [AV_SAMPLE_FMT_U8P] = { .name = "u8p", .bits = 8, .planar = 1, .altform = AV_SAMPLE_FMT_U8 },
+ [AV_SAMPLE_FMT_S16P] = { .name = "s16p", .bits = 16, .planar = 1, .altform = AV_SAMPLE_FMT_S16 },
+ [AV_SAMPLE_FMT_S32P] = { .name = "s32p", .bits = 32, .planar = 1, .altform = AV_SAMPLE_FMT_S32 },
+ [AV_SAMPLE_FMT_S64P] = { .name = "s64p", .bits = 64, .planar = 1, .altform = AV_SAMPLE_FMT_S64 },
+ [AV_SAMPLE_FMT_FLTP] = { .name = "fltp", .bits = 32, .planar = 1, .altform = AV_SAMPLE_FMT_FLT },
+ [AV_SAMPLE_FMT_DBLP] = { .name = "dblp", .bits = 64, .planar = 1, .altform = AV_SAMPLE_FMT_DBL },
+};
+
+const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
+{
+ if (sample_fmt < 0 || sample_fmt >= AV_SAMPLE_FMT_NB)
+ return NULL;
+ return sample_fmt_info[sample_fmt].name;
+}
+
+enum AVSampleFormat av_get_sample_fmt(const char *name)
+{
+ int i;
+
+ for (i = 0; i < AV_SAMPLE_FMT_NB; i++)
+ if (!strcmp(sample_fmt_info[i].name, name))
+ return i;
+ return AV_SAMPLE_FMT_NONE;
+}
+
+enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar)
+{
+ if (sample_fmt < 0 || sample_fmt >= AV_SAMPLE_FMT_NB)
+ return AV_SAMPLE_FMT_NONE;
+ if (sample_fmt_info[sample_fmt].planar == planar)
+ return sample_fmt;
+ return sample_fmt_info[sample_fmt].altform;
+}
+
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
+{
+ if (sample_fmt < 0 || sample_fmt >= AV_SAMPLE_FMT_NB)
+ return AV_SAMPLE_FMT_NONE;
+ if (sample_fmt_info[sample_fmt].planar)
+ return sample_fmt_info[sample_fmt].altform;
+ return sample_fmt;
+}
+
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt)
+{
+ if (sample_fmt < 0 || sample_fmt >= AV_SAMPLE_FMT_NB)
+ return AV_SAMPLE_FMT_NONE;
+ if (sample_fmt_info[sample_fmt].planar)
+ return sample_fmt;
+ return sample_fmt_info[sample_fmt].altform;
+}
+
+char *av_get_sample_fmt_string (char *buf, int buf_size, enum AVSampleFormat sample_fmt)
+{
+ /* print header */
+ if (sample_fmt < 0)
+ snprintf(buf, buf_size, "name " " depth");
+ else if (sample_fmt < AV_SAMPLE_FMT_NB) {
+ SampleFmtInfo info = sample_fmt_info[sample_fmt];
+ snprintf (buf, buf_size, "%-6s" " %2d ", info.name, info.bits);
+ }
+
+ return buf;
+}
+
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
+{
+ return sample_fmt < 0 || sample_fmt >= AV_SAMPLE_FMT_NB ?
+ 0 : sample_fmt_info[sample_fmt].bits >> 3;
+}
+
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
+{
+ if (sample_fmt < 0 || sample_fmt >= AV_SAMPLE_FMT_NB)
+ return 0;
+ return sample_fmt_info[sample_fmt].planar;
+}
+
+int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align)
+{
+ int line_size;
+ int sample_size = av_get_bytes_per_sample(sample_fmt);
+ int planar = av_sample_fmt_is_planar(sample_fmt);
+
+ /* validate parameter ranges */
+ if (!sample_size || nb_samples <= 0 || nb_channels <= 0)
+ return AVERROR(EINVAL);
+
+ /* auto-select alignment if not specified */
+ if (!align) {
+ if (nb_samples > INT_MAX - 31)
+ return AVERROR(EINVAL);
+ align = 1;
+ nb_samples = FFALIGN(nb_samples, 32);
+ }
+
+ /* check for integer overflow */
+ if (nb_channels > INT_MAX / align ||
+ (int64_t)nb_channels * nb_samples > (INT_MAX - (align * nb_channels)) / sample_size)
+ return AVERROR(EINVAL);
+
+ line_size = planar ? FFALIGN(nb_samples * sample_size, align) :
+ FFALIGN(nb_samples * sample_size * nb_channels, align);
+ if (linesize)
+ *linesize = line_size;
+
+ return planar ? line_size * nb_channels : line_size;
+}
+
+int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
+ const uint8_t *buf, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align)
+{
+ int ch, planar, buf_size, line_size;
+
+ planar = av_sample_fmt_is_planar(sample_fmt);
+ buf_size = av_samples_get_buffer_size(&line_size, nb_channels, nb_samples,
+ sample_fmt, align);
+ if (buf_size < 0)
+ return buf_size;
+
+ if (linesize)
+ *linesize = line_size;
+
+ memset(audio_data, 0, planar
+ ? sizeof(*audio_data) * nb_channels
+ : sizeof(*audio_data));
+
+ if (!buf)
+ return buf_size;
+
+ audio_data[0] = (uint8_t *)buf;
+ for (ch = 1; planar && ch < nb_channels; ch++)
+ audio_data[ch] = audio_data[ch-1] + line_size;
+
+ return buf_size;
+}
+
+int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align)
+{
+ uint8_t *buf;
+ int size = av_samples_get_buffer_size(NULL, nb_channels, nb_samples,
+ sample_fmt, align);
+ if (size < 0)
+ return size;
+
+ buf = av_malloc(size);
+ if (!buf)
+ return AVERROR(ENOMEM);
+
+ size = av_samples_fill_arrays(audio_data, linesize, buf, nb_channels,
+ nb_samples, sample_fmt, align);
+ if (size < 0) {
+ av_free(buf);
+ return size;
+ }
+
+ av_samples_set_silence(audio_data, 0, nb_samples, nb_channels, sample_fmt);
+
+ return size;
+}
+
+int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align)
+{
+ int ret, nb_planes = av_sample_fmt_is_planar(sample_fmt) ? nb_channels : 1;
+
+ *audio_data = av_calloc(nb_planes, sizeof(**audio_data));
+ if (!*audio_data)
+ return AVERROR(ENOMEM);
+ ret = av_samples_alloc(*audio_data, linesize, nb_channels,
+ nb_samples, sample_fmt, align);
+ if (ret < 0)
+ av_freep(audio_data);
+ return ret;
+}
+
+int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,
+ int src_offset, int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt)
+{
+ int planar = av_sample_fmt_is_planar(sample_fmt);
+ int planes = planar ? nb_channels : 1;
+ int block_align = av_get_bytes_per_sample(sample_fmt) * (planar ? 1 : nb_channels);
+ int data_size = nb_samples * block_align;
+ int i;
+
+ dst_offset *= block_align;
+ src_offset *= block_align;
+
+ if((dst[0] < src[0] ? src[0] - dst[0] : dst[0] - src[0]) >= data_size) {
+ for (i = 0; i < planes; i++)
+ memcpy(dst[i] + dst_offset, src[i] + src_offset, data_size);
+ } else {
+ for (i = 0; i < planes; i++)
+ memmove(dst[i] + dst_offset, src[i] + src_offset, data_size);
+ }
+
+ return 0;
+}
+
+int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,
+ int nb_channels, enum AVSampleFormat sample_fmt)
+{
+ int planar = av_sample_fmt_is_planar(sample_fmt);
+ int planes = planar ? nb_channels : 1;
+ int block_align = av_get_bytes_per_sample(sample_fmt) * (planar ? 1 : nb_channels);
+ int data_size = nb_samples * block_align;
+ int fill_char = (sample_fmt == AV_SAMPLE_FMT_U8 ||
+ sample_fmt == AV_SAMPLE_FMT_U8P) ? 0x80 : 0x00;
+ int i;
+
+ offset *= block_align;
+
+ for (i = 0; i < planes; i++)
+ memset(audio_data[i] + offset, fill_char, data_size);
+
+ return 0;
+}
diff --git a/media/ffvpx/libavutil/samplefmt.h b/media/ffvpx/libavutil/samplefmt.h
new file mode 100644
index 0000000000..6bad0e254a
--- /dev/null
+++ b/media/ffvpx/libavutil/samplefmt.h
@@ -0,0 +1,269 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include <stdint.h>
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ *
+ * @defgroup lavu_sampfmts Audio sample formats
+ *
+ * Audio sample format enumeration and related convenience functions.
+ * @{
+ */
+
+/**
+ * Audio sample formats
+ *
+ * - The data described by the sample format is always in native-endian order.
+ * Sample values can be expressed by native C types, hence the lack of a signed
+ * 24-bit sample format even though it is a common raw audio data format.
+ *
+ * - The floating-point formats are based on full volume being in the range
+ * [-1.0, 1.0]. Any values outside this range are beyond full volume level.
+ *
+ * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg
+ * (such as AVFrame in libavcodec) is as follows:
+ *
+ * @par
+ * For planar sample formats, each audio channel is in a separate data plane,
+ * and linesize is the buffer size, in bytes, for a single plane. All data
+ * planes must be the same size. For packed sample formats, only the first data
+ * plane is used, and samples for each channel are interleaved. In this case,
+ * linesize is the buffer size, in bytes, for the 1 plane.
+ *
+ */
+enum AVSampleFormat {
+ AV_SAMPLE_FMT_NONE = -1,
+ AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
+ AV_SAMPLE_FMT_S16, ///< signed 16 bits
+ AV_SAMPLE_FMT_S32, ///< signed 32 bits
+ AV_SAMPLE_FMT_FLT, ///< float
+ AV_SAMPLE_FMT_DBL, ///< double
+
+ AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
+ AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
+ AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
+ AV_SAMPLE_FMT_FLTP, ///< float, planar
+ AV_SAMPLE_FMT_DBLP, ///< double, planar
+ AV_SAMPLE_FMT_S64, ///< signed 64 bits
+ AV_SAMPLE_FMT_S64P, ///< signed 64 bits, planar
+
+ AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char *name);
+
+/**
+ * Return the planar<->packed alternative form of the given sample format, or
+ * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the
+ * requested planar/packed format, the format returned is the same as the
+ * input.
+ */
+enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar);
+
+/**
+ * Get the packed alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in packed format, the format returned is
+ * the same as the input.
+ *
+ * @return the packed alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the planar alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in planar format, the format returned is
+ * the same as the input.
+ *
+ * @return the planar alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * @}
+ *
+ * @defgroup lavu_sampmanip Samples manipulation
+ *
+ * Functions that manipulate audio samples
+ * @{
+ */
+
+/**
+ * Fill plane data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The audio_data array is filled with the pointers to the samples data planes:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The value pointed to by linesize is set to the aligned size of each
+ * channel's data buffer for planar layout, or to the aligned size of the
+ * buffer for all channels for packed layout.
+ *
+ * The buffer in buf must be big enough to contain all the samples
+ * (use av_samples_get_buffer_size() to compute its minimum size),
+ * otherwise the audio_data pointers will point to invalid data.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param buf the pointer to a buffer containing the samples
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return minimum size in bytes required for the buffer on success,
+ * or a negative error code on failure
+ */
+int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
+ const uint8_t *buf,
+ int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ * Allocated data will be initialized to silence.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize aligned size for audio buffer(s), may be NULL
+ * @param nb_channels number of audio channels
+ * @param nb_samples number of samples per channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return >=0 on success or a negative error code on failure
+ * @todo return the size of the allocated buffer in case of success at the next bump
+ * @see av_samples_fill_arrays()
+ * @see av_samples_alloc_array_and_samples()
+ */
+int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a data pointers array, samples buffer for nb_samples
+ * samples, and fill data pointers and linesize accordingly.
+ *
+ * This is the same as av_samples_alloc(), but also allocates the data
+ * pointers array.
+ *
+ * @see av_samples_alloc()
+ */
+int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Copy samples from src to dst.
+ *
+ * @param dst destination array of pointers to data planes
+ * @param src source array of pointers to data planes
+ * @param dst_offset offset in samples at which the data will be written to dst
+ * @param src_offset offset in samples at which the data will be read from src
+ * @param nb_samples number of samples to be copied
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,
+ int src_offset, int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Fill an audio buffer with silence.
+ *
+ * @param audio_data array of pointers to data planes
+ * @param offset offset in samples at which to start filling
+ * @param nb_samples number of samples to fill
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,
+ int nb_channels, enum AVSampleFormat sample_fmt);
+
+/**
+ * @}
+ * @}
+ */
+#endif /* AVUTIL_SAMPLEFMT_H */
diff --git a/media/ffvpx/libavutil/slicethread.c b/media/ffvpx/libavutil/slicethread.c
new file mode 100644
index 0000000000..115b099736
--- /dev/null
+++ b/media/ffvpx/libavutil/slicethread.c
@@ -0,0 +1,259 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdatomic.h>
+#include "cpu.h"
+#include "internal.h"
+#include "slicethread.h"
+#include "mem.h"
+#include "thread.h"
+#include "avassert.h"
+
+#define MAX_AUTO_THREADS 16
+
+#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
+
+typedef struct WorkerContext {
+ AVSliceThread *ctx;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_t thread;
+ int done;
+} WorkerContext;
+
+struct AVSliceThread {
+ WorkerContext *workers;
+ int nb_threads;
+ int nb_active_threads;
+ int nb_jobs;
+
+ atomic_uint first_job;
+ atomic_uint current_job;
+ pthread_mutex_t done_mutex;
+ pthread_cond_t done_cond;
+ int done;
+ int finished;
+
+ void *priv;
+ void (*worker_func)(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads);
+ void (*main_func)(void *priv);
+};
+
+static int run_jobs(AVSliceThread *ctx)
+{
+ unsigned nb_jobs = ctx->nb_jobs;
+ unsigned nb_active_threads = ctx->nb_active_threads;
+ unsigned first_job = atomic_fetch_add_explicit(&ctx->first_job, 1, memory_order_acq_rel);
+ unsigned current_job = first_job;
+
+ do {
+ ctx->worker_func(ctx->priv, current_job, first_job, nb_jobs, nb_active_threads);
+ } while ((current_job = atomic_fetch_add_explicit(&ctx->current_job, 1, memory_order_acq_rel)) < nb_jobs);
+
+ return current_job == nb_jobs + nb_active_threads - 1;
+}
+
+static void *attribute_align_arg thread_worker(void *v)
+{
+ WorkerContext *w = v;
+ AVSliceThread *ctx = w->ctx;
+
+ pthread_mutex_lock(&w->mutex);
+ pthread_cond_signal(&w->cond);
+
+ while (1) {
+ w->done = 1;
+ while (w->done)
+ pthread_cond_wait(&w->cond, &w->mutex);
+
+ if (ctx->finished) {
+ pthread_mutex_unlock(&w->mutex);
+ return NULL;
+ }
+
+ if (run_jobs(ctx)) {
+ pthread_mutex_lock(&ctx->done_mutex);
+ ctx->done = 1;
+ pthread_cond_signal(&ctx->done_cond);
+ pthread_mutex_unlock(&ctx->done_mutex);
+ }
+ }
+}
+
+int avpriv_slicethread_create(AVSliceThread **pctx, void *priv,
+ void (*worker_func)(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads),
+ void (*main_func)(void *priv),
+ int nb_threads)
+{
+ AVSliceThread *ctx;
+ int nb_workers, i;
+
+ av_assert0(nb_threads >= 0);
+ if (!nb_threads) {
+ int nb_cpus = av_cpu_count();
+ if (nb_cpus > 1)
+ nb_threads = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
+ else
+ nb_threads = 1;
+ }
+
+ nb_workers = nb_threads;
+ if (!main_func)
+ nb_workers--;
+
+ *pctx = ctx = av_mallocz(sizeof(*ctx));
+ if (!ctx)
+ return AVERROR(ENOMEM);
+
+ if (nb_workers && !(ctx->workers = av_calloc(nb_workers, sizeof(*ctx->workers)))) {
+ av_freep(pctx);
+ return AVERROR(ENOMEM);
+ }
+
+ ctx->priv = priv;
+ ctx->worker_func = worker_func;
+ ctx->main_func = main_func;
+ ctx->nb_threads = nb_threads;
+ ctx->nb_active_threads = 0;
+ ctx->nb_jobs = 0;
+ ctx->finished = 0;
+
+ atomic_init(&ctx->first_job, 0);
+ atomic_init(&ctx->current_job, 0);
+ pthread_mutex_init(&ctx->done_mutex, NULL);
+ pthread_cond_init(&ctx->done_cond, NULL);
+ ctx->done = 0;
+
+ for (i = 0; i < nb_workers; i++) {
+ WorkerContext *w = &ctx->workers[i];
+ int ret;
+ w->ctx = ctx;
+ pthread_mutex_init(&w->mutex, NULL);
+ pthread_cond_init(&w->cond, NULL);
+ pthread_mutex_lock(&w->mutex);
+ w->done = 0;
+
+ if (ret = pthread_create(&w->thread, NULL, thread_worker, w)) {
+ ctx->nb_threads = main_func ? i : i + 1;
+ pthread_mutex_unlock(&w->mutex);
+ pthread_cond_destroy(&w->cond);
+ pthread_mutex_destroy(&w->mutex);
+ avpriv_slicethread_free(pctx);
+ return AVERROR(ret);
+ }
+
+ while (!w->done)
+ pthread_cond_wait(&w->cond, &w->mutex);
+ pthread_mutex_unlock(&w->mutex);
+ }
+
+ return nb_threads;
+}
+
+void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
+{
+ int nb_workers, i, is_last = 0;
+
+ av_assert0(nb_jobs > 0);
+ ctx->nb_jobs = nb_jobs;
+ ctx->nb_active_threads = FFMIN(nb_jobs, ctx->nb_threads);
+ atomic_store_explicit(&ctx->first_job, 0, memory_order_relaxed);
+ atomic_store_explicit(&ctx->current_job, ctx->nb_active_threads, memory_order_relaxed);
+ nb_workers = ctx->nb_active_threads;
+ if (!ctx->main_func || !execute_main)
+ nb_workers--;
+
+ for (i = 0; i < nb_workers; i++) {
+ WorkerContext *w = &ctx->workers[i];
+ pthread_mutex_lock(&w->mutex);
+ w->done = 0;
+ pthread_cond_signal(&w->cond);
+ pthread_mutex_unlock(&w->mutex);
+ }
+
+ if (ctx->main_func && execute_main)
+ ctx->main_func(ctx->priv);
+ else
+ is_last = run_jobs(ctx);
+
+ if (!is_last) {
+ pthread_mutex_lock(&ctx->done_mutex);
+ while (!ctx->done)
+ pthread_cond_wait(&ctx->done_cond, &ctx->done_mutex);
+ ctx->done = 0;
+ pthread_mutex_unlock(&ctx->done_mutex);
+ }
+}
+
+void avpriv_slicethread_free(AVSliceThread **pctx)
+{
+ AVSliceThread *ctx;
+ int nb_workers, i;
+
+ if (!pctx || !*pctx)
+ return;
+
+ ctx = *pctx;
+ nb_workers = ctx->nb_threads;
+ if (!ctx->main_func)
+ nb_workers--;
+
+ ctx->finished = 1;
+ for (i = 0; i < nb_workers; i++) {
+ WorkerContext *w = &ctx->workers[i];
+ pthread_mutex_lock(&w->mutex);
+ w->done = 0;
+ pthread_cond_signal(&w->cond);
+ pthread_mutex_unlock(&w->mutex);
+ }
+
+ for (i = 0; i < nb_workers; i++) {
+ WorkerContext *w = &ctx->workers[i];
+ pthread_join(w->thread, NULL);
+ pthread_cond_destroy(&w->cond);
+ pthread_mutex_destroy(&w->mutex);
+ }
+
+ pthread_cond_destroy(&ctx->done_cond);
+ pthread_mutex_destroy(&ctx->done_mutex);
+ av_freep(&ctx->workers);
+ av_freep(pctx);
+}
+
+#else /* HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS32THREADS */
+
+int avpriv_slicethread_create(AVSliceThread **pctx, void *priv,
+ void (*worker_func)(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads),
+ void (*main_func)(void *priv),
+ int nb_threads)
+{
+ *pctx = NULL;
+ return AVERROR(ENOSYS);
+}
+
+void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
+{
+ av_assert0(0);
+}
+
+void avpriv_slicethread_free(AVSliceThread **pctx)
+{
+ av_assert0(!pctx || !*pctx);
+}
+
+#endif /* HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS32THREADS */
diff --git a/media/ffvpx/libavutil/slicethread.h b/media/ffvpx/libavutil/slicethread.h
new file mode 100644
index 0000000000..f6f6f302c4
--- /dev/null
+++ b/media/ffvpx/libavutil/slicethread.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SLICETHREAD_H
+#define AVUTIL_SLICETHREAD_H
+
+typedef struct AVSliceThread AVSliceThread;
+
+/**
+ * Create slice threading context.
+ * @param pctx slice threading context returned here
+ * @param priv private pointer to be passed to callback function
+ * @param worker_func callback function to be executed
+ * @param main_func special callback function, called from main thread, may be NULL
+ * @param nb_threads number of threads, 0 for automatic, must be >= 0
+ * @return return number of threads or negative AVERROR on failure
+ */
+int avpriv_slicethread_create(AVSliceThread **pctx, void *priv,
+ void (*worker_func)(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads),
+ void (*main_func)(void *priv),
+ int nb_threads);
+
+/**
+ * Execute slice threading.
+ * @param ctx slice threading context
+ * @param nb_jobs number of jobs, must be > 0
+ * @param execute_main also execute main_func
+ */
+void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main);
+
+/**
+ * Destroy slice threading context.
+ * @param pctx pointer to context
+ */
+void avpriv_slicethread_free(AVSliceThread **pctx);
+
+#endif
diff --git a/media/ffvpx/libavutil/thread.h b/media/ffvpx/libavutil/thread.h
new file mode 100644
index 0000000000..2f5e7e1cb5
--- /dev/null
+++ b/media/ffvpx/libavutil/thread.h
@@ -0,0 +1,204 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+// This header should only be used to simplify code where
+// threading is optional, not as a generic threading abstraction.
+
+#ifndef AVUTIL_THREAD_H
+#define AVUTIL_THREAD_H
+
+#include "config.h"
+
+#if HAVE_PRCTL
+#include <sys/prctl.h>
+#endif
+
+#include "error.h"
+
+#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
+
+#if HAVE_PTHREADS
+#include <pthread.h>
+
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
+
+#include <stdlib.h>
+
+#include "log.h"
+#include "macros.h"
+
+#define ASSERT_PTHREAD_ABORT(func, ret) do { \
+ char errbuf[AV_ERROR_MAX_STRING_SIZE] = ""; \
+ av_log(NULL, AV_LOG_FATAL, AV_STRINGIFY(func) \
+ " failed with error: %s\n", \
+ av_make_error_string(errbuf, AV_ERROR_MAX_STRING_SIZE, \
+ AVERROR(ret))); \
+ abort(); \
+} while (0)
+
+#define ASSERT_PTHREAD_NORET(func, ...) do { \
+ int ret = func(__VA_ARGS__); \
+ if (ret) \
+ ASSERT_PTHREAD_ABORT(func, ret); \
+} while (0)
+
+#define ASSERT_PTHREAD(func, ...) do { \
+ ASSERT_PTHREAD_NORET(func, __VA_ARGS__); \
+ return 0; \
+} while (0)
+
+static inline int strict_pthread_join(pthread_t thread, void **value_ptr)
+{
+ ASSERT_PTHREAD(pthread_join, thread, value_ptr);
+}
+
+static inline int strict_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
+{
+ if (attr) {
+ ASSERT_PTHREAD_NORET(pthread_mutex_init, mutex, attr);
+ } else {
+ pthread_mutexattr_t local_attr;
+ ASSERT_PTHREAD_NORET(pthread_mutexattr_init, &local_attr);
+ ASSERT_PTHREAD_NORET(pthread_mutexattr_settype, &local_attr, PTHREAD_MUTEX_ERRORCHECK);
+ ASSERT_PTHREAD_NORET(pthread_mutex_init, mutex, &local_attr);
+ ASSERT_PTHREAD_NORET(pthread_mutexattr_destroy, &local_attr);
+ }
+ return 0;
+}
+
+static inline int strict_pthread_mutex_destroy(pthread_mutex_t *mutex)
+{
+ ASSERT_PTHREAD(pthread_mutex_destroy, mutex);
+}
+
+static inline int strict_pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+ ASSERT_PTHREAD(pthread_mutex_lock, mutex);
+}
+
+static inline int strict_pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+ ASSERT_PTHREAD(pthread_mutex_unlock, mutex);
+}
+
+static inline int strict_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
+{
+ ASSERT_PTHREAD(pthread_cond_init, cond, attr);
+}
+
+static inline int strict_pthread_cond_destroy(pthread_cond_t *cond)
+{
+ ASSERT_PTHREAD(pthread_cond_destroy, cond);
+}
+
+static inline int strict_pthread_cond_signal(pthread_cond_t *cond)
+{
+ ASSERT_PTHREAD(pthread_cond_signal, cond);
+}
+
+static inline int strict_pthread_cond_broadcast(pthread_cond_t *cond)
+{
+ ASSERT_PTHREAD(pthread_cond_broadcast, cond);
+}
+
+static inline int strict_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+{
+ ASSERT_PTHREAD(pthread_cond_wait, cond, mutex);
+}
+
+static inline int strict_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ int ret = pthread_cond_timedwait(cond, mutex, abstime);
+ if (ret && ret != ETIMEDOUT)
+ ASSERT_PTHREAD_ABORT(pthread_cond_timedwait, ret);
+ return ret;
+}
+
+static inline int strict_pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
+{
+ ASSERT_PTHREAD(pthread_once, once_control, init_routine);
+}
+
+#define pthread_join strict_pthread_join
+#define pthread_mutex_init strict_pthread_mutex_init
+#define pthread_mutex_destroy strict_pthread_mutex_destroy
+#define pthread_mutex_lock strict_pthread_mutex_lock
+#define pthread_mutex_unlock strict_pthread_mutex_unlock
+#define pthread_cond_init strict_pthread_cond_init
+#define pthread_cond_destroy strict_pthread_cond_destroy
+#define pthread_cond_signal strict_pthread_cond_signal
+#define pthread_cond_broadcast strict_pthread_cond_broadcast
+#define pthread_cond_wait strict_pthread_cond_wait
+#define pthread_cond_timedwait strict_pthread_cond_timedwait
+#define pthread_once strict_pthread_once
+#endif
+
+#elif HAVE_OS2THREADS
+#include "compat/os2threads.h"
+#else
+#include "compat/w32pthreads.h"
+#endif
+
+#define AVMutex pthread_mutex_t
+#define AV_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+#define ff_mutex_init pthread_mutex_init
+#define ff_mutex_lock pthread_mutex_lock
+#define ff_mutex_unlock pthread_mutex_unlock
+#define ff_mutex_destroy pthread_mutex_destroy
+
+#define AVOnce pthread_once_t
+#define AV_ONCE_INIT PTHREAD_ONCE_INIT
+
+#define ff_thread_once(control, routine) pthread_once(control, routine)
+
+#else
+
+#define AVMutex char
+#define AV_MUTEX_INITIALIZER 0
+
+static inline int ff_mutex_init(AVMutex *mutex, const void *attr){ return 0; }
+static inline int ff_mutex_lock(AVMutex *mutex){ return 0; }
+static inline int ff_mutex_unlock(AVMutex *mutex){ return 0; }
+static inline int ff_mutex_destroy(AVMutex *mutex){ return 0; }
+
+#define AVOnce char
+#define AV_ONCE_INIT 0
+
+static inline int ff_thread_once(char *control, void (*routine)(void))
+{
+ if (!*control) {
+ routine();
+ *control = 1;
+ }
+ return 0;
+}
+
+#endif
+
+static inline int ff_thread_setname(const char *name)
+{
+#if HAVE_PRCTL
+ return AVERROR(prctl(PR_SET_NAME, name));
+#endif
+
+ return AVERROR(ENOSYS);
+}
+
+#endif /* AVUTIL_THREAD_H */
diff --git a/media/ffvpx/libavutil/threadmessage.c b/media/ffvpx/libavutil/threadmessage.c
new file mode 100644
index 0000000000..f0e23f28fc
--- /dev/null
+++ b/media/ffvpx/libavutil/threadmessage.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <limits.h>
+#include "fifo.h"
+#include "mem.h"
+#include "threadmessage.h"
+#include "thread.h"
+
+struct AVThreadMessageQueue {
+#if HAVE_THREADS
+ AVFifo *fifo;
+ pthread_mutex_t lock;
+ pthread_cond_t cond_recv;
+ pthread_cond_t cond_send;
+ int err_send;
+ int err_recv;
+ unsigned elsize;
+ void (*free_func)(void *msg);
+#else
+ int dummy;
+#endif
+};
+
+int av_thread_message_queue_alloc(AVThreadMessageQueue **mq,
+ unsigned nelem,
+ unsigned elsize)
+{
+#if HAVE_THREADS
+ AVThreadMessageQueue *rmq;
+ int ret = 0;
+
+ if (nelem > INT_MAX / elsize)
+ return AVERROR(EINVAL);
+ if (!(rmq = av_mallocz(sizeof(*rmq))))
+ return AVERROR(ENOMEM);
+ if ((ret = pthread_mutex_init(&rmq->lock, NULL))) {
+ av_free(rmq);
+ return AVERROR(ret);
+ }
+ if ((ret = pthread_cond_init(&rmq->cond_recv, NULL))) {
+ pthread_mutex_destroy(&rmq->lock);
+ av_free(rmq);
+ return AVERROR(ret);
+ }
+ if ((ret = pthread_cond_init(&rmq->cond_send, NULL))) {
+ pthread_cond_destroy(&rmq->cond_recv);
+ pthread_mutex_destroy(&rmq->lock);
+ av_free(rmq);
+ return AVERROR(ret);
+ }
+ if (!(rmq->fifo = av_fifo_alloc2(nelem, elsize, 0))) {
+ pthread_cond_destroy(&rmq->cond_send);
+ pthread_cond_destroy(&rmq->cond_recv);
+ pthread_mutex_destroy(&rmq->lock);
+ av_free(rmq);
+ return AVERROR(ENOMEM);
+ }
+ rmq->elsize = elsize;
+ *mq = rmq;
+ return 0;
+#else
+ *mq = NULL;
+ return AVERROR(ENOSYS);
+#endif /* HAVE_THREADS */
+}
+
+void av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq,
+ void (*free_func)(void *msg))
+{
+#if HAVE_THREADS
+ mq->free_func = free_func;
+#endif
+}
+
+void av_thread_message_queue_free(AVThreadMessageQueue **mq)
+{
+#if HAVE_THREADS
+ if (*mq) {
+ av_thread_message_flush(*mq);
+ av_fifo_freep2(&(*mq)->fifo);
+ pthread_cond_destroy(&(*mq)->cond_send);
+ pthread_cond_destroy(&(*mq)->cond_recv);
+ pthread_mutex_destroy(&(*mq)->lock);
+ av_freep(mq);
+ }
+#endif
+}
+
+int av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq)
+{
+#if HAVE_THREADS
+ int ret;
+ pthread_mutex_lock(&mq->lock);
+ ret = av_fifo_can_read(mq->fifo);
+ pthread_mutex_unlock(&mq->lock);
+ return ret;
+#else
+ return AVERROR(ENOSYS);
+#endif
+}
+
+#if HAVE_THREADS
+
+static int av_thread_message_queue_send_locked(AVThreadMessageQueue *mq,
+ void *msg,
+ unsigned flags)
+{
+ while (!mq->err_send && !av_fifo_can_write(mq->fifo)) {
+ if ((flags & AV_THREAD_MESSAGE_NONBLOCK))
+ return AVERROR(EAGAIN);
+ pthread_cond_wait(&mq->cond_send, &mq->lock);
+ }
+ if (mq->err_send)
+ return mq->err_send;
+ av_fifo_write(mq->fifo, msg, 1);
+ /* one message is sent, signal one receiver */
+ pthread_cond_signal(&mq->cond_recv);
+ return 0;
+}
+
+static int av_thread_message_queue_recv_locked(AVThreadMessageQueue *mq,
+ void *msg,
+ unsigned flags)
+{
+ while (!mq->err_recv && !av_fifo_can_read(mq->fifo)) {
+ if ((flags & AV_THREAD_MESSAGE_NONBLOCK))
+ return AVERROR(EAGAIN);
+ pthread_cond_wait(&mq->cond_recv, &mq->lock);
+ }
+ if (!av_fifo_can_read(mq->fifo))
+ return mq->err_recv;
+ av_fifo_read(mq->fifo, msg, 1);
+ /* one message space appeared, signal one sender */
+ pthread_cond_signal(&mq->cond_send);
+ return 0;
+}
+
+#endif /* HAVE_THREADS */
+
+int av_thread_message_queue_send(AVThreadMessageQueue *mq,
+ void *msg,
+ unsigned flags)
+{
+#if HAVE_THREADS
+ int ret;
+
+ pthread_mutex_lock(&mq->lock);
+ ret = av_thread_message_queue_send_locked(mq, msg, flags);
+ pthread_mutex_unlock(&mq->lock);
+ return ret;
+#else
+ return AVERROR(ENOSYS);
+#endif /* HAVE_THREADS */
+}
+
+int av_thread_message_queue_recv(AVThreadMessageQueue *mq,
+ void *msg,
+ unsigned flags)
+{
+#if HAVE_THREADS
+ int ret;
+
+ pthread_mutex_lock(&mq->lock);
+ ret = av_thread_message_queue_recv_locked(mq, msg, flags);
+ pthread_mutex_unlock(&mq->lock);
+ return ret;
+#else
+ return AVERROR(ENOSYS);
+#endif /* HAVE_THREADS */
+}
+
+void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq,
+ int err)
+{
+#if HAVE_THREADS
+ pthread_mutex_lock(&mq->lock);
+ mq->err_send = err;
+ pthread_cond_broadcast(&mq->cond_send);
+ pthread_mutex_unlock(&mq->lock);
+#endif /* HAVE_THREADS */
+}
+
+void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,
+ int err)
+{
+#if HAVE_THREADS
+ pthread_mutex_lock(&mq->lock);
+ mq->err_recv = err;
+ pthread_cond_broadcast(&mq->cond_recv);
+ pthread_mutex_unlock(&mq->lock);
+#endif /* HAVE_THREADS */
+}
+
+#if HAVE_THREADS
+static int free_func_wrap(void *arg, void *buf, size_t *nb_elems)
+{
+ AVThreadMessageQueue *mq = arg;
+ uint8_t *msg = buf;
+ for (size_t i = 0; i < *nb_elems; i++)
+ mq->free_func(msg + i * mq->elsize);
+ return 0;
+}
+#endif
+
+void av_thread_message_flush(AVThreadMessageQueue *mq)
+{
+#if HAVE_THREADS
+ size_t used;
+
+ pthread_mutex_lock(&mq->lock);
+ used = av_fifo_can_read(mq->fifo);
+ if (mq->free_func)
+ av_fifo_read_to_cb(mq->fifo, free_func_wrap, mq, &used);
+ /* only the senders need to be notified since the queue is empty and there
+ * is nothing to read */
+ pthread_cond_broadcast(&mq->cond_send);
+ pthread_mutex_unlock(&mq->lock);
+#endif /* HAVE_THREADS */
+}
diff --git a/media/ffvpx/libavutil/threadmessage.h b/media/ffvpx/libavutil/threadmessage.h
new file mode 100644
index 0000000000..42ce655f36
--- /dev/null
+++ b/media/ffvpx/libavutil/threadmessage.h
@@ -0,0 +1,115 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_THREADMESSAGE_H
+#define AVUTIL_THREADMESSAGE_H
+
+typedef struct AVThreadMessageQueue AVThreadMessageQueue;
+
+typedef enum AVThreadMessageFlags {
+
+ /**
+ * Perform non-blocking operation.
+ * If this flag is set, send and recv operations are non-blocking and
+ * return AVERROR(EAGAIN) immediately if they can not proceed.
+ */
+ AV_THREAD_MESSAGE_NONBLOCK = 1,
+
+} AVThreadMessageFlags;
+
+/**
+ * Allocate a new message queue.
+ *
+ * @param mq pointer to the message queue
+ * @param nelem maximum number of elements in the queue
+ * @param elsize size of each element in the queue
+ * @return >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if
+ * lavu was built without thread support
+ */
+int av_thread_message_queue_alloc(AVThreadMessageQueue **mq,
+ unsigned nelem,
+ unsigned elsize);
+
+/**
+ * Free a message queue.
+ *
+ * The message queue must no longer be in use by another thread.
+ */
+void av_thread_message_queue_free(AVThreadMessageQueue **mq);
+
+/**
+ * Send a message on the queue.
+ */
+int av_thread_message_queue_send(AVThreadMessageQueue *mq,
+ void *msg,
+ unsigned flags);
+
+/**
+ * Receive a message from the queue.
+ */
+int av_thread_message_queue_recv(AVThreadMessageQueue *mq,
+ void *msg,
+ unsigned flags);
+
+/**
+ * Set the sending error code.
+ *
+ * If the error code is set to non-zero, av_thread_message_queue_send() will
+ * return it immediately. Conventional values, such as AVERROR_EOF or
+ * AVERROR(EAGAIN), can be used to cause the sending thread to stop or
+ * suspend its operation.
+ */
+void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq,
+ int err);
+
+/**
+ * Set the receiving error code.
+ *
+ * If the error code is set to non-zero, av_thread_message_queue_recv() will
+ * return it immediately when there are no longer available messages.
+ * Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used
+ * to cause the receiving thread to stop or suspend its operation.
+ */
+void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,
+ int err);
+
+/**
+ * Set the optional free message callback function which will be called if an
+ * operation is removing messages from the queue.
+ */
+void av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq,
+ void (*free_func)(void *msg));
+
+/**
+ * Return the current number of messages in the queue.
+ *
+ * @return the current number of messages or AVERROR(ENOSYS) if lavu was built
+ * without thread support
+ */
+int av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq);
+
+/**
+ * Flush the message queue
+ *
+ * This function is mostly equivalent to reading and free-ing every message
+ * except that it will be done in a single operation (no lock/unlock between
+ * reads).
+ */
+void av_thread_message_flush(AVThreadMessageQueue *mq);
+
+#endif /* AVUTIL_THREADMESSAGE_H */
diff --git a/media/ffvpx/libavutil/time.c b/media/ffvpx/libavutil/time.c
new file mode 100644
index 0000000000..83743da0b2
--- /dev/null
+++ b/media/ffvpx/libavutil/time.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <time.h>
+#if HAVE_GETTIMEOFDAY
+#include <sys/time.h>
+#endif
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#if HAVE_WINDOWS_H
+#include <windows.h>
+#endif
+
+#include "fftime.h"
+#include "error.h"
+
+int64_t av_gettime(void)
+{
+#if HAVE_GETTIMEOFDAY
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+#elif HAVE_GETSYSTEMTIMEASFILETIME
+ FILETIME ft;
+ int64_t t;
+ GetSystemTimeAsFileTime(&ft);
+ t = (int64_t)ft.dwHighDateTime << 32 | ft.dwLowDateTime;
+ return t / 10 - 11644473600000000; /* Jan 1, 1601 */
+#else
+ return -1;
+#endif
+}
+
+int64_t av_gettime_relative(void)
+{
+#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
+#ifdef __APPLE__
+ if (&clock_gettime)
+#endif
+ {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return (int64_t)ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
+ }
+#endif
+ return av_gettime() + 42 * 60 * 60 * INT64_C(1000000);
+}
+
+int av_gettime_relative_is_monotonic(void)
+{
+#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
+#ifdef __APPLE__
+ if (!&clock_gettime)
+ return 0;
+#endif
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+int av_usleep(unsigned usec)
+{
+#if HAVE_NANOSLEEP
+ struct timespec ts = { usec / 1000000, usec % 1000000 * 1000 };
+ while (nanosleep(&ts, &ts) < 0 && errno == EINTR);
+ return 0;
+#elif HAVE_USLEEP
+ return usleep(usec);
+#elif HAVE_SLEEP
+ Sleep(usec / 1000);
+ return 0;
+#else
+ return AVERROR(ENOSYS);
+#endif
+}
diff --git a/media/ffvpx/libavutil/time_internal.h b/media/ffvpx/libavutil/time_internal.h
new file mode 100644
index 0000000000..d0f007ab1c
--- /dev/null
+++ b/media/ffvpx/libavutil/time_internal.h
@@ -0,0 +1,49 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_TIME_INTERNAL_H
+#define AVUTIL_TIME_INTERNAL_H
+
+#include <time.h>
+#include "config.h"
+
+#if !HAVE_GMTIME_R && !defined(gmtime_r)
+static inline struct tm *ff_gmtime_r(const time_t* clock, struct tm *result)
+{
+ struct tm *ptr = gmtime(clock);
+ if (!ptr)
+ return NULL;
+ *result = *ptr;
+ return result;
+}
+#define gmtime_r ff_gmtime_r
+#endif
+
+#if !HAVE_LOCALTIME_R && !defined(localtime_r)
+static inline struct tm *ff_localtime_r(const time_t* clock, struct tm *result)
+{
+ struct tm *ptr = localtime(clock);
+ if (!ptr)
+ return NULL;
+ *result = *ptr;
+ return result;
+}
+#define localtime_r ff_localtime_r
+#endif
+
+#endif /* AVUTIL_TIME_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/timecode.c b/media/ffvpx/libavutil/timecode.c
new file mode 100644
index 0000000000..b93f05b4b8
--- /dev/null
+++ b/media/ffvpx/libavutil/timecode.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Timecode helpers
+ * @see https://en.wikipedia.org/wiki/SMPTE_time_code
+ * @see http://www.dropframetimecode.org
+ */
+
+#include <stdio.h>
+#include "common.h"
+#include "timecode.h"
+#include "log.h"
+#include "error.h"
+
+int av_timecode_adjust_ntsc_framenum2(int framenum, int fps)
+{
+ /* only works for multiples of NTSC 29.97 */
+ int drop_frames = 0;
+ int d, m, frames_per_10mins;
+
+ if (fps && fps % 30 == 0) {
+ drop_frames = fps / 30 * 2;
+ frames_per_10mins = fps / 30 * 17982;
+ } else
+ return framenum;
+
+ d = framenum / frames_per_10mins;
+ m = framenum % frames_per_10mins;
+
+ return framenum + 9U * drop_frames * d + drop_frames * ((m - drop_frames) / (frames_per_10mins / 10));
+}
+
+uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum)
+{
+ unsigned fps = tc->fps;
+ int drop = !!(tc->flags & AV_TIMECODE_FLAG_DROPFRAME);
+ int hh, mm, ss, ff;
+
+ framenum += tc->start;
+ if (drop)
+ framenum = av_timecode_adjust_ntsc_framenum2(framenum, tc->fps);
+ ff = framenum % fps;
+ ss = framenum / fps % 60;
+ mm = framenum / (fps*60) % 60;
+ hh = framenum / (fps*3600) % 24;
+ return av_timecode_get_smpte(tc->rate, drop, hh, mm, ss, ff);
+}
+
+uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
+{
+ uint32_t tc = 0;
+
+ /* For SMPTE 12-M timecodes, frame count is a special case if > 30 FPS.
+ See SMPTE ST 12-1:2014 Sec 12.1 for more info. */
+ if (av_cmp_q(rate, (AVRational) {30, 1}) == 1) {
+ if (ff % 2 == 1) {
+ if (av_cmp_q(rate, (AVRational) {50, 1}) == 0)
+ tc |= (1 << 7);
+ else
+ tc |= (1 << 23);
+ }
+ ff /= 2;
+ }
+
+ hh = hh % 24;
+ mm = av_clip(mm, 0, 59);
+ ss = av_clip(ss, 0, 59);
+ ff = ff % 40;
+
+ tc |= drop << 30;
+ tc |= (ff / 10) << 28;
+ tc |= (ff % 10) << 24;
+ tc |= (ss / 10) << 20;
+ tc |= (ss % 10) << 16;
+ tc |= (mm / 10) << 12;
+ tc |= (mm % 10) << 8;
+ tc |= (hh / 10) << 4;
+ tc |= (hh % 10);
+
+ return tc;
+}
+
+char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum)
+{
+ int fps = tc->fps;
+ int drop = tc->flags & AV_TIMECODE_FLAG_DROPFRAME;
+ int hh, mm, ss, ff, ff_len, neg = 0;
+
+ framenum += tc->start;
+ if (drop)
+ framenum = av_timecode_adjust_ntsc_framenum2(framenum, fps);
+ if (framenum < 0) {
+ framenum = -framenum;
+ neg = tc->flags & AV_TIMECODE_FLAG_ALLOWNEGATIVE;
+ }
+ ff = framenum % fps;
+ ss = framenum / fps % 60;
+ mm = framenum / (fps*60LL) % 60;
+ hh = framenum / (fps*3600LL);
+ if (tc->flags & AV_TIMECODE_FLAG_24HOURSMAX)
+ hh = hh % 24;
+ ff_len = fps > 10000 ? 5 : fps > 1000 ? 4 : fps > 100 ? 3 : fps > 10 ? 2 : 1;
+ snprintf(buf, AV_TIMECODE_STR_SIZE, "%s%02d:%02d:%02d%c%0*d",
+ neg ? "-" : "",
+ hh, mm, ss, drop ? ';' : ':', ff_len, ff);
+ return buf;
+}
+
+static unsigned bcd2uint(uint8_t bcd)
+{
+ unsigned low = bcd & 0xf;
+ unsigned high = bcd >> 4;
+ if (low > 9 || high > 9)
+ return 0;
+ return low + 10*high;
+}
+
+char *av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
+{
+ unsigned hh = bcd2uint(tcsmpte & 0x3f); // 6-bit hours
+ unsigned mm = bcd2uint(tcsmpte>>8 & 0x7f); // 7-bit minutes
+ unsigned ss = bcd2uint(tcsmpte>>16 & 0x7f); // 7-bit seconds
+ unsigned ff = bcd2uint(tcsmpte>>24 & 0x3f); // 6-bit frames
+ unsigned drop = tcsmpte & 1<<30 && !prevent_df; // 1-bit drop if not arbitrary bit
+
+ if (av_cmp_q(rate, (AVRational) {30, 1}) == 1) {
+ ff <<= 1;
+ if (!skip_field) {
+ if (av_cmp_q(rate, (AVRational) {50, 1}) == 0)
+ ff += !!(tcsmpte & 1 << 7);
+ else
+ ff += !!(tcsmpte & 1 << 23);
+ }
+ }
+
+ snprintf(buf, AV_TIMECODE_STR_SIZE, "%02u:%02u:%02u%c%02u",
+ hh, mm, ss, drop ? ';' : ':', ff);
+ return buf;
+
+}
+
+char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df)
+{
+ return av_timecode_make_smpte_tc_string2(buf, (AVRational){30, 1}, tcsmpte, prevent_df, 1);
+}
+
+char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
+{
+ snprintf(buf, AV_TIMECODE_STR_SIZE,
+ "%02"PRIu32":%02"PRIu32":%02"PRIu32"%c%02"PRIu32,
+ tc25bit>>19 & 0x1f, // 5-bit hours
+ tc25bit>>13 & 0x3f, // 6-bit minutes
+ tc25bit>>6 & 0x3f, // 6-bit seconds
+ tc25bit & 1<<24 ? ';' : ':', // 1-bit drop flag
+ tc25bit & 0x3f); // 6-bit frames
+ return buf;
+}
+
+static int check_fps(int fps)
+{
+ int i;
+ static const int supported_fps[] = {
+ 24, 25, 30, 48, 50, 60, 100, 120, 150,
+ };
+
+ for (i = 0; i < FF_ARRAY_ELEMS(supported_fps); i++)
+ if (fps == supported_fps[i])
+ return 0;
+ return -1;
+}
+
+static int check_timecode(void *log_ctx, AVTimecode *tc)
+{
+ if ((int)tc->fps <= 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Valid timecode frame rate must be specified. Minimum value is 1\n");
+ return AVERROR(EINVAL);
+ }
+ if ((tc->flags & AV_TIMECODE_FLAG_DROPFRAME) && tc->fps % 30 != 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Drop frame is only allowed with multiples of 30000/1001 FPS\n");
+ return AVERROR(EINVAL);
+ }
+ if (check_fps(tc->fps) < 0) {
+ av_log(log_ctx, AV_LOG_WARNING, "Using non-standard frame rate %d/%d\n",
+ tc->rate.num, tc->rate.den);
+ }
+ return 0;
+}
+
+static int fps_from_frame_rate(AVRational rate)
+{
+ if (!rate.den || !rate.num)
+ return -1;
+ return (rate.num + rate.den/2) / rate.den;
+}
+
+int av_timecode_check_frame_rate(AVRational rate)
+{
+ return check_fps(fps_from_frame_rate(rate));
+}
+
+int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx)
+{
+ memset(tc, 0, sizeof(*tc));
+ tc->start = frame_start;
+ tc->flags = flags;
+ tc->rate = rate;
+ tc->fps = fps_from_frame_rate(rate);
+ return check_timecode(log_ctx, tc);
+}
+
+int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx)
+{
+ int ret;
+
+ memset(tc, 0, sizeof(*tc));
+ tc->flags = flags;
+ tc->rate = rate;
+ tc->fps = fps_from_frame_rate(rate);
+
+ ret = check_timecode(log_ctx, tc);
+ if (ret < 0)
+ return ret;
+
+ tc->start = (hh*3600 + mm*60 + ss) * tc->fps + ff;
+ if (tc->flags & AV_TIMECODE_FLAG_DROPFRAME) { /* adjust frame number */
+ int tmins = 60*hh + mm;
+ tc->start -= (tc->fps / 30 * 2) * (tmins - tmins/10);
+ }
+ return 0;
+}
+
+int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx)
+{
+ char c;
+ int hh, mm, ss, ff, flags;
+
+ if (sscanf(str, "%d:%d:%d%c%d", &hh, &mm, &ss, &c, &ff) != 5) {
+ av_log(log_ctx, AV_LOG_ERROR, "Unable to parse timecode, "
+ "syntax: hh:mm:ss[:;.]ff\n");
+ return AVERROR_INVALIDDATA;
+ }
+ flags = c != ':' ? AV_TIMECODE_FLAG_DROPFRAME : 0; // drop if ';', '.', ...
+
+ return av_timecode_init_from_components(tc, rate, flags, hh, mm, ss, ff, log_ctx);
+}
diff --git a/media/ffvpx/libavutil/timecode.h b/media/ffvpx/libavutil/timecode.h
new file mode 100644
index 0000000000..060574a172
--- /dev/null
+++ b/media/ffvpx/libavutil/timecode.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Timecode helpers header
+ */
+
+#ifndef AVUTIL_TIMECODE_H
+#define AVUTIL_TIMECODE_H
+
+#include <stdint.h>
+#include "rational.h"
+
+#define AV_TIMECODE_STR_SIZE 23
+
+enum AVTimecodeFlag {
+ AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame
+ AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours
+ AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed
+};
+
+typedef struct {
+ int start; ///< timecode frame start (first base frame number)
+ uint32_t flags; ///< flags such as drop frame, +24 hours support, ...
+ AVRational rate; ///< frame rate in rational form
+ unsigned fps; ///< frame per second; must be consistent with the rate field
+} AVTimecode;
+
+/**
+ * Adjust frame number for NTSC drop frame time code.
+ *
+ * @param framenum frame number to adjust
+ * @param fps frame per second, multiples of 30
+ * @return adjusted frame number
+ * @warning adjustment is only valid for multiples of NTSC 29.97
+ */
+int av_timecode_adjust_ntsc_framenum2(int framenum, int fps);
+
+/**
+ * Convert frame number to SMPTE 12M binary representation.
+ *
+ * @param tc timecode data correctly initialized
+ * @param framenum frame number
+ * @return the SMPTE binary representation
+ *
+ * See SMPTE ST 314M-2005 Sec 4.4.2.2.1 "Time code pack (TC)"
+ * the format description as follows:
+ * bits 0-5: hours, in BCD(6bits)
+ * bits 6: BGF1
+ * bits 7: BGF2 (NTSC) or FIELD (PAL)
+ * bits 8-14: minutes, in BCD(7bits)
+ * bits 15: BGF0 (NTSC) or BGF2 (PAL)
+ * bits 16-22: seconds, in BCD(7bits)
+ * bits 23: FIELD (NTSC) or BGF0 (PAL)
+ * bits 24-29: frames, in BCD(6bits)
+ * bits 30: drop frame flag (0: non drop, 1: drop)
+ * bits 31: color frame flag (0: unsync mode, 1: sync mode)
+ * @note BCD numbers (6 or 7 bits): 4 or 5 lower bits for units, 2 higher bits for tens.
+ * @note Frame number adjustment is automatically done in case of drop timecode,
+ * you do NOT have to call av_timecode_adjust_ntsc_framenum2().
+ * @note The frame number is relative to tc->start.
+ * @note Color frame (CF) and binary group flags (BGF) bits are set to zero.
+ */
+uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum);
+
+/**
+ * Convert sei info to SMPTE 12M binary representation.
+ *
+ * @param rate frame rate in rational form
+ * @param drop drop flag
+ * @param hh hour
+ * @param mm minute
+ * @param ss second
+ * @param ff frame number
+ * @return the SMPTE binary representation
+ */
+uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff);
+
+/**
+ * Load timecode string in buf.
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tc timecode data correctly initialized
+ * @param framenum frame number
+ * @return the buf parameter
+ *
+ * @note Timecode representation can be a negative timecode and have more than
+ * 24 hours, but will only be honored if the flags are correctly set.
+ * @note The frame number is relative to tc->start.
+ */
+char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum);
+
+/**
+ * Get the timecode string from the SMPTE timecode format.
+ *
+ * In contrast to av_timecode_make_smpte_tc_string this function supports 50/60
+ * fps timecodes by using the field bit.
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param rate frame rate of the timecode
+ * @param tcsmpte the 32-bit SMPTE timecode
+ * @param prevent_df prevent the use of a drop flag when it is known the DF bit
+ * is arbitrary
+ * @param skip_field prevent the use of a field flag when it is known the field
+ * bit is arbitrary (e.g. because it is used as PC flag)
+ * @return the buf parameter
+ */
+char *av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field);
+
+/**
+ * Get the timecode string from the SMPTE timecode format.
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tcsmpte the 32-bit SMPTE timecode
+ * @param prevent_df prevent the use of a drop flag when it is known the DF bit
+ * is arbitrary
+ * @return the buf parameter
+ */
+char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df);
+
+/**
+ * Get the timecode string from the 25-bit timecode format (MPEG GOP format).
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tc25bit the 25-bits timecode
+ * @return the buf parameter
+ */
+char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit);
+
+/**
+ * Init a timecode struct with the passed parameters.
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field
+ * is a pointer to an AVClass struct (used for av_log)
+ * @param tc pointer to an allocated AVTimecode
+ * @param rate frame rate in rational form
+ * @param flags miscellaneous flags such as drop frame, +24 hours, ...
+ * (see AVTimecodeFlag)
+ * @param frame_start the first frame number
+ * @return 0 on success, AVERROR otherwise
+ */
+int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx);
+
+/**
+ * Init a timecode struct from the passed timecode components.
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field
+ * is a pointer to an AVClass struct (used for av_log)
+ * @param tc pointer to an allocated AVTimecode
+ * @param rate frame rate in rational form
+ * @param flags miscellaneous flags such as drop frame, +24 hours, ...
+ * (see AVTimecodeFlag)
+ * @param hh hours
+ * @param mm minutes
+ * @param ss seconds
+ * @param ff frames
+ * @return 0 on success, AVERROR otherwise
+ */
+int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx);
+
+/**
+ * Parse timecode representation (hh:mm:ss[:;.]ff).
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct (used for av_log).
+ * @param tc pointer to an allocated AVTimecode
+ * @param rate frame rate in rational form
+ * @param str timecode string which will determine the frame start
+ * @return 0 on success, AVERROR otherwise
+ */
+int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx);
+
+/**
+ * Check if the timecode feature is available for the given frame rate
+ *
+ * @return 0 if supported, <0 otherwise
+ */
+int av_timecode_check_frame_rate(AVRational rate);
+
+#endif /* AVUTIL_TIMECODE_H */
diff --git a/media/ffvpx/libavutil/timer.h b/media/ffvpx/libavutil/timer.h
new file mode 100644
index 0000000000..d3db5a27ef
--- /dev/null
+++ b/media/ffvpx/libavutil/timer.h
@@ -0,0 +1,156 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * high precision timer, useful to profile code
+ */
+
+#ifndef AVUTIL_TIMER_H
+#define AVUTIL_TIMER_H
+
+#include "config.h"
+
+#if CONFIG_LINUX_PERF
+# ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+# endif
+# include <unistd.h> // read(3)
+# include <sys/ioctl.h>
+# include <asm/unistd.h>
+# include <linux/perf_event.h>
+#endif
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#if CONFIG_MACOS_KPERF
+#include "macos_kperf.h"
+#elif HAVE_MACH_ABSOLUTE_TIME
+#include <mach/mach_time.h>
+#endif
+
+#include "common.h"
+#include "log.h"
+
+#if ARCH_AARCH64
+# include "aarch64/timer.h"
+#elif ARCH_ARM
+# include "arm/timer.h"
+#elif ARCH_PPC
+# include "ppc/timer.h"
+#elif ARCH_RISCV
+# include "riscv/timer.h"
+#elif ARCH_X86
+# include "x86/timer.h"
+#endif
+
+#if !defined(AV_READ_TIME)
+# if HAVE_GETHRTIME
+# define AV_READ_TIME gethrtime
+# elif HAVE_MACH_ABSOLUTE_TIME
+# define AV_READ_TIME mach_absolute_time
+# endif
+#endif
+
+#ifndef FF_TIMER_UNITS
+# define FF_TIMER_UNITS "UNITS"
+#endif
+
+#define TIMER_REPORT(id, tdiff) \
+ { \
+ static uint64_t tsum = 0; \
+ static int tcount = 0; \
+ static int tskip_count = 0; \
+ static int thistogram[32] = {0}; \
+ thistogram[av_log2(tdiff)]++; \
+ if (tcount < 2 || \
+ (tdiff) < 8 * tsum / tcount || \
+ (tdiff) < 2000) { \
+ tsum += (tdiff); \
+ tcount++; \
+ } else \
+ tskip_count++; \
+ if (((tcount + tskip_count) & (tcount + tskip_count - 1)) == 0) { \
+ int i; \
+ av_log(NULL, AV_LOG_ERROR, \
+ "%7" PRIu64 " " FF_TIMER_UNITS " in %s,%8d runs,%7d skips",\
+ tsum * 10 / tcount, id, tcount, tskip_count); \
+ for (i = 0; i < 32; i++) \
+ av_log(NULL, AV_LOG_VERBOSE, " %2d", av_log2(2*thistogram[i]));\
+ av_log(NULL, AV_LOG_ERROR, "\n"); \
+ } \
+ }
+
+#if CONFIG_LINUX_PERF
+
+#define START_TIMER \
+ static int linux_perf_fd; \
+ uint64_t tperf; \
+ if (!linux_perf_fd) { \
+ struct perf_event_attr attr = { \
+ .type = PERF_TYPE_HARDWARE, \
+ .size = sizeof(struct perf_event_attr), \
+ .config = PERF_COUNT_HW_CPU_CYCLES, \
+ .disabled = 1, \
+ .exclude_kernel = 1, \
+ .exclude_hv = 1, \
+ }; \
+ linux_perf_fd = syscall(__NR_perf_event_open, &attr, \
+ 0, -1, -1, 0); \
+ } \
+ if (linux_perf_fd == -1) { \
+ av_log(NULL, AV_LOG_ERROR, "perf_event_open failed: %s\n", \
+ av_err2str(AVERROR(errno))); \
+ } else { \
+ ioctl(linux_perf_fd, PERF_EVENT_IOC_RESET, 0); \
+ ioctl(linux_perf_fd, PERF_EVENT_IOC_ENABLE, 0); \
+ }
+
+#define STOP_TIMER(id) \
+ ioctl(linux_perf_fd, PERF_EVENT_IOC_DISABLE, 0); \
+ read(linux_perf_fd, &tperf, sizeof(tperf)); \
+ TIMER_REPORT(id, tperf)
+
+#elif CONFIG_MACOS_KPERF
+
+#define START_TIMER \
+ uint64_t tperf; \
+ ff_kperf_init(); \
+ tperf = ff_kperf_cycles();
+
+#define STOP_TIMER(id) \
+ TIMER_REPORT(id, ff_kperf_cycles() - tperf);
+
+#elif defined(AV_READ_TIME)
+#define START_TIMER \
+ uint64_t tend; \
+ uint64_t tstart = AV_READ_TIME(); \
+
+#define STOP_TIMER(id) \
+ tend = AV_READ_TIME(); \
+ TIMER_REPORT(id, tend - tstart)
+#else
+#define START_TIMER
+#define STOP_TIMER(id) { }
+#endif
+
+#endif /* AVUTIL_TIMER_H */
diff --git a/media/ffvpx/libavutil/timestamp.h b/media/ffvpx/libavutil/timestamp.h
new file mode 100644
index 0000000000..e082f01b40
--- /dev/null
+++ b/media/ffvpx/libavutil/timestamp.h
@@ -0,0 +1,78 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * timestamp utils, mostly useful for debugging/logging purposes
+ */
+
+#ifndef AVUTIL_TIMESTAMP_H
+#define AVUTIL_TIMESTAMP_H
+
+#include "common.h"
+
+#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64)
+#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS
+#endif
+
+#define AV_TS_MAX_STRING_SIZE 32
+
+/**
+ * Fill the provided buffer with a string containing a timestamp
+ * representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE
+ * @param ts the timestamp to represent
+ * @return the buffer in input
+ */
+static inline char *av_ts_make_string(char *buf, int64_t ts)
+{
+ if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");
+ else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%" PRId64, ts);
+ return buf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts)
+
+/**
+ * Fill the provided buffer with a string containing a timestamp time
+ * representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE
+ * @param ts the timestamp to represent
+ * @param tb the timebase of the timestamp
+ * @return the buffer in input
+ */
+static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb)
+{
+ if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");
+ else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts);
+ return buf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb)
+
+#endif /* AVUTIL_TIMESTAMP_H */
diff --git a/media/ffvpx/libavutil/utils.c b/media/ffvpx/libavutil/utils.c
new file mode 100644
index 0000000000..94d247bbee
--- /dev/null
+++ b/media/ffvpx/libavutil/utils.c
@@ -0,0 +1,112 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "avutil.h"
+#include "avassert.h"
+
+/**
+ * @file
+ * various utility functions
+ */
+
+const char *av_get_media_type_string(enum AVMediaType media_type)
+{
+ switch (media_type) {
+ case AVMEDIA_TYPE_VIDEO: return "video";
+ case AVMEDIA_TYPE_AUDIO: return "audio";
+ case AVMEDIA_TYPE_DATA: return "data";
+ case AVMEDIA_TYPE_SUBTITLE: return "subtitle";
+ case AVMEDIA_TYPE_ATTACHMENT: return "attachment";
+ default: return NULL;
+ }
+}
+
+char av_get_picture_type_char(enum AVPictureType pict_type)
+{
+ switch (pict_type) {
+ case AV_PICTURE_TYPE_I: return 'I';
+ case AV_PICTURE_TYPE_P: return 'P';
+ case AV_PICTURE_TYPE_B: return 'B';
+ case AV_PICTURE_TYPE_S: return 'S';
+ case AV_PICTURE_TYPE_SI: return 'i';
+ case AV_PICTURE_TYPE_SP: return 'p';
+ case AV_PICTURE_TYPE_BI: return 'b';
+ default: return '?';
+ }
+}
+
+unsigned av_int_list_length_for_size(unsigned elsize,
+ const void *list, uint64_t term)
+{
+ unsigned i;
+
+ if (!list)
+ return 0;
+#define LIST_LENGTH(type) \
+ { type t = term, *l = (type *)list; for (i = 0; l[i] != t; i++); }
+ switch (elsize) {
+ case 1: LIST_LENGTH(uint8_t); break;
+ case 2: LIST_LENGTH(uint16_t); break;
+ case 4: LIST_LENGTH(uint32_t); break;
+ case 8: LIST_LENGTH(uint64_t); break;
+ default: av_assert0(!"valid element size");
+ }
+ return i;
+}
+
+char *av_fourcc_make_string(char *buf, uint32_t fourcc)
+{
+ int i;
+ char *orig_buf = buf;
+ size_t buf_size = AV_FOURCC_MAX_STRING_SIZE;
+
+ for (i = 0; i < 4; i++) {
+ const int c = fourcc & 0xff;
+ const int print_chr = (c >= '0' && c <= '9') ||
+ (c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c && strchr(". -_", c));
+ const int len = snprintf(buf, buf_size, print_chr ? "%c" : "[%d]", c);
+ if (len < 0)
+ break;
+ buf += len;
+ buf_size = buf_size > len ? buf_size - len : 0;
+ fourcc >>= 8;
+ }
+
+ return orig_buf;
+}
+
+AVRational av_get_time_base_q(void)
+{
+ return (AVRational){1, AV_TIME_BASE};
+}
+
+void av_assert0_fpu(void) {
+#if HAVE_MMX_INLINE
+ uint16_t state[14];
+ __asm__ volatile (
+ "fstenv %0 \n\t"
+ : "+m" (state)
+ :
+ : "memory"
+ );
+ av_assert0((state[4] & 3) == 3);
+#endif
+}
diff --git a/media/ffvpx/libavutil/version.h b/media/ffvpx/libavutil/version.h
new file mode 100644
index 0000000000..900b798971
--- /dev/null
+++ b/media/ffvpx/libavutil/version.h
@@ -0,0 +1,122 @@
+/*
+ * copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Libavutil version macros
+ */
+
+#ifndef AVUTIL_VERSION_H
+#define AVUTIL_VERSION_H
+
+#include "macros.h"
+
+/**
+ * @addtogroup version_utils
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * The FFmpeg libraries follow a versioning sheme very similar to
+ * Semantic Versioning (http://semver.org/)
+ * The difference is that the component called PATCH is called MICRO in FFmpeg
+ * and its value is reset to 100 instead of 0 to keep it above or equal to 100.
+ * Also we do not increase MICRO for every bugfix or change in git master.
+ *
+ * Prior to FFmpeg 3.2 point releases did not change any lib version number to
+ * avoid aliassing different git master checkouts.
+ * Starting with FFmpeg 3.2, the released library versions will occupy
+ * a separate MAJOR.MINOR that is not used on the master development branch.
+ * That is if we branch a release of master 55.10.123 we will bump to 55.11.100
+ * for the release and master will continue at 55.12.100 after it. Each new
+ * point release will then bump the MICRO improving the usefulness of the lib
+ * versions.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c))
+#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * Extract version components from the full ::AV_VERSION_INT int as returned
+ * by functions like ::avformat_version() and ::avcodec_version()
+ */
+#define AV_VERSION_MAJOR(a) ((a) >> 16)
+#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8)
+#define AV_VERSION_MICRO(a) ((a) & 0xFF)
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR 58
+#define LIBAVUTIL_VERSION_MINOR 3
+#define LIBAVUTIL_VERSION_MICRO 100
+
+#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @defgroup lavu_depr_guards Deprecation Guards
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @note, when bumping the major version it is recommended to manually
+ * disable each FF_API_* in its own commit instead of disabling them all
+ * at once through the bump. This improves the git bisect-ability of the change.
+ *
+ * @{
+ */
+
+#define FF_API_FIFO_PEEK2 (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_FIFO_OLD_API (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_OLD_CHANNEL_LAYOUT (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_AV_FOPEN_UTF8 (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_PKT_DURATION (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_REORDERED_OPAQUE (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_FRAME_PICTURE_NUMBER (LIBAVUTIL_VERSION_MAJOR < 59)
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_VERSION_H */
diff --git a/media/ffvpx/libavutil/video_enc_params.c b/media/ffvpx/libavutil/video_enc_params.c
new file mode 100644
index 0000000000..33592dc128
--- /dev/null
+++ b/media/ffvpx/libavutil/video_enc_params.c
@@ -0,0 +1,80 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "buffer.h"
+#include "frame.h"
+#include "mem.h"
+#include "video_enc_params.h"
+
+AVVideoEncParams *av_video_enc_params_alloc(enum AVVideoEncParamsType type,
+ unsigned int nb_blocks, size_t *out_size)
+{
+ struct TestStruct {
+ AVVideoEncParams p;
+ AVVideoBlockParams b;
+ };
+ const size_t blocks_offset = offsetof(struct TestStruct, b);
+ size_t size = blocks_offset;
+ AVVideoEncParams *par;
+
+ if (nb_blocks > (SIZE_MAX - size) / sizeof(AVVideoBlockParams))
+ return NULL;
+ size += sizeof(AVVideoBlockParams) * nb_blocks;
+
+ par = av_mallocz(size);
+ if (!par)
+ return NULL;
+
+ par->type = type;
+ par->nb_blocks = nb_blocks;
+ par->block_size = sizeof(AVVideoBlockParams);
+ par->blocks_offset = blocks_offset;
+
+ if (out_size)
+ *out_size = size;
+
+ return par;
+}
+
+AVVideoEncParams*
+av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type,
+ unsigned int nb_blocks)
+{
+ AVBufferRef *buf;
+ AVVideoEncParams *par;
+ size_t size;
+
+ par = av_video_enc_params_alloc(type, nb_blocks, &size);
+ if (!par)
+ return NULL;
+ buf = av_buffer_create((uint8_t *)par, size, NULL, NULL, 0);
+ if (!buf) {
+ av_freep(&par);
+ return NULL;
+ }
+
+ if (!av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_VIDEO_ENC_PARAMS, buf)) {
+ av_buffer_unref(&buf);
+ return NULL;
+ }
+
+ return par;
+}
diff --git a/media/ffvpx/libavutil/video_enc_params.h b/media/ffvpx/libavutil/video_enc_params.h
new file mode 100644
index 0000000000..fc0c3bc1a5
--- /dev/null
+++ b/media/ffvpx/libavutil/video_enc_params.h
@@ -0,0 +1,171 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_VIDEO_ENC_PARAMS_H
+#define AVUTIL_VIDEO_ENC_PARAMS_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/frame.h"
+
+enum AVVideoEncParamsType {
+ AV_VIDEO_ENC_PARAMS_NONE = -1,
+ /**
+ * VP9 stores:
+ * - per-frame base (luma AC) quantizer index, exported as AVVideoEncParams.qp
+ * - deltas for luma DC, chroma AC and chroma DC, exported in the
+ * corresponding entries in AVVideoEncParams.delta_qp
+ * - per-segment delta, exported as for each block as AVVideoBlockParams.delta_qp
+ *
+ * To compute the resulting quantizer index for a block:
+ * - for luma AC, add the base qp and the per-block delta_qp, saturating to
+ * unsigned 8-bit.
+ * - for luma DC and chroma AC/DC, add the corresponding
+ * AVVideoBlockParams.delta_qp to the luma AC index, again saturating to
+ * unsigned 8-bit.
+ */
+ AV_VIDEO_ENC_PARAMS_VP9,
+
+ /**
+ * H.264 stores:
+ * - in PPS (per-picture):
+ * * initial QP_Y (luma) value, exported as AVVideoEncParams.qp
+ * * delta(s) for chroma QP values (same for both, or each separately),
+ * exported as in the corresponding entries in AVVideoEncParams.delta_qp
+ * - per-slice QP delta, not exported directly, added to the per-MB value
+ * - per-MB delta; not exported directly; the final per-MB quantizer
+ * parameter - QP_Y - minus the value in AVVideoEncParams.qp is exported
+ * as AVVideoBlockParams.qp_delta.
+ */
+ AV_VIDEO_ENC_PARAMS_H264,
+
+ /*
+ * MPEG-2-compatible quantizer.
+ *
+ * Summing the frame-level qp with the per-block delta_qp gives the
+ * resulting quantizer for the block.
+ */
+ AV_VIDEO_ENC_PARAMS_MPEG2,
+};
+
+/**
+ * Video encoding parameters for a given frame. This struct is allocated along
+ * with an optional array of per-block AVVideoBlockParams descriptors.
+ * Must be allocated with av_video_enc_params_alloc().
+ */
+typedef struct AVVideoEncParams {
+ /**
+ * Number of blocks in the array.
+ *
+ * May be 0, in which case no per-block information is present. In this case
+ * the values of blocks_offset / block_size are unspecified and should not
+ * be accessed.
+ */
+ unsigned int nb_blocks;
+ /**
+ * Offset in bytes from the beginning of this structure at which the array
+ * of blocks starts.
+ */
+ size_t blocks_offset;
+ /*
+ * Size of each block in bytes. May not match sizeof(AVVideoBlockParams).
+ */
+ size_t block_size;
+
+ /**
+ * Type of the parameters (the codec they are used with).
+ */
+ enum AVVideoEncParamsType type;
+
+ /**
+ * Base quantisation parameter for the frame. The final quantiser for a
+ * given block in a given plane is obtained from this value, possibly
+ * combined with {@code delta_qp} and the per-block delta in a manner
+ * documented for each type.
+ */
+ int32_t qp;
+
+ /**
+ * Quantisation parameter offset from the base (per-frame) qp for a given
+ * plane (first index) and AC/DC coefficients (second index).
+ */
+ int32_t delta_qp[4][2];
+} AVVideoEncParams;
+
+/**
+ * Data structure for storing block-level encoding information.
+ * It is allocated as a part of AVVideoEncParams and should be retrieved with
+ * av_video_enc_params_block().
+ *
+ * sizeof(AVVideoBlockParams) is not a part of the ABI and new fields may be
+ * added to it.
+ */
+typedef struct AVVideoBlockParams {
+ /**
+ * Distance in luma pixels from the top-left corner of the visible frame
+ * to the top-left corner of the block.
+ * Can be negative if top/right padding is present on the coded frame.
+ */
+ int src_x, src_y;
+ /**
+ * Width and height of the block in luma pixels.
+ */
+ int w, h;
+
+ /**
+ * Difference between this block's final quantization parameter and the
+ * corresponding per-frame value.
+ */
+ int32_t delta_qp;
+} AVVideoBlockParams;
+
+/*
+ * Get the block at the specified {@code idx}. Must be between 0 and nb_blocks.
+ */
+static av_always_inline AVVideoBlockParams*
+av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
+{
+ av_assert0(idx < par->nb_blocks);
+ return (AVVideoBlockParams *)((uint8_t *)par + par->blocks_offset +
+ idx * par->block_size);
+}
+
+/**
+ * Allocates memory for AVVideoEncParams of the given type, plus an array of
+ * {@code nb_blocks} AVVideoBlockParams and initializes the variables. Can be
+ * freed with a normal av_free() call.
+ *
+ * @param out_size if non-NULL, the size in bytes of the resulting data array is
+ * written here.
+ */
+AVVideoEncParams *av_video_enc_params_alloc(enum AVVideoEncParamsType type,
+ unsigned int nb_blocks, size_t *out_size);
+
+/**
+ * Allocates memory for AVEncodeInfoFrame plus an array of
+ * {@code nb_blocks} AVEncodeInfoBlock in the given AVFrame {@code frame}
+ * as AVFrameSideData of type AV_FRAME_DATA_VIDEO_ENC_PARAMS
+ * and initializes the variables.
+ */
+AVVideoEncParams*
+av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type,
+ unsigned int nb_blocks);
+
+#endif /* AVUTIL_VIDEO_ENC_PARAMS_H */
diff --git a/media/ffvpx/libavutil/x86/asm.h b/media/ffvpx/libavutil/x86/asm.h
new file mode 100644
index 0000000000..9bff42d628
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/asm.h
@@ -0,0 +1,154 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_ASM_H
+#define AVUTIL_X86_ASM_H
+
+#include <stdint.h>
+#include "config.h"
+
+typedef struct xmm_reg { uint64_t a, b; } xmm_reg;
+typedef struct ymm_reg { uint64_t a, b, c, d; } ymm_reg;
+
+#if ARCH_X86_64
+# define FF_OPSIZE "q"
+# define FF_REG_a "rax"
+# define FF_REG_b "rbx"
+# define FF_REG_c "rcx"
+# define FF_REG_d "rdx"
+# define FF_REG_D "rdi"
+# define FF_REG_S "rsi"
+# define FF_PTR_SIZE "8"
+typedef int64_t x86_reg;
+
+/* FF_REG_SP is defined in Solaris sys headers, so use FF_REG_sp */
+# define FF_REG_sp "rsp"
+# define FF_REG_BP "rbp"
+# define FF_REGBP rbp
+# define FF_REGa rax
+# define FF_REGb rbx
+# define FF_REGc rcx
+# define FF_REGd rdx
+# define FF_REGSP rsp
+
+#elif ARCH_X86_32
+
+# define FF_OPSIZE "l"
+# define FF_REG_a "eax"
+# define FF_REG_b "ebx"
+# define FF_REG_c "ecx"
+# define FF_REG_d "edx"
+# define FF_REG_D "edi"
+# define FF_REG_S "esi"
+# define FF_PTR_SIZE "4"
+typedef int32_t x86_reg;
+
+# define FF_REG_sp "esp"
+# define FF_REG_BP "ebp"
+# define FF_REGBP ebp
+# define FF_REGa eax
+# define FF_REGb ebx
+# define FF_REGc ecx
+# define FF_REGd edx
+# define FF_REGSP esp
+#else
+typedef int x86_reg;
+#endif
+
+#define HAVE_7REGS (ARCH_X86_64 || (HAVE_EBX_AVAILABLE && HAVE_EBP_AVAILABLE))
+#define HAVE_6REGS (ARCH_X86_64 || (HAVE_EBX_AVAILABLE || HAVE_EBP_AVAILABLE))
+
+#if ARCH_X86_64 && defined(PIC)
+# define BROKEN_RELOCATIONS 1
+#endif
+
+/*
+ * If gcc is not set to support sse (-msse) it will not accept xmm registers
+ * in the clobber list for inline asm. XMM_CLOBBERS takes a list of xmm
+ * registers to be marked as clobbered and evaluates to nothing if they are
+ * not supported, or to the list itself if they are supported. Since a clobber
+ * list may not be empty, XMM_CLOBBERS_ONLY should be used if the xmm
+ * registers are the only in the clobber list.
+ * For example a list with "eax" and "xmm0" as clobbers should become:
+ * : XMM_CLOBBERS("xmm0",) "eax"
+ * and a list with only "xmm0" should become:
+ * XMM_CLOBBERS_ONLY("xmm0")
+ */
+#if HAVE_XMM_CLOBBERS
+# define XMM_CLOBBERS(...) __VA_ARGS__
+# define XMM_CLOBBERS_ONLY(...) : __VA_ARGS__
+#else
+# define XMM_CLOBBERS(...)
+# define XMM_CLOBBERS_ONLY(...)
+#endif
+
+/* Use to export labels from asm. */
+#define LABEL_MANGLE(a) EXTERN_PREFIX #a
+
+// Use rip-relative addressing if compiling PIC code on x86-64.
+#if ARCH_X86_64 && defined(PIC)
+# define LOCAL_MANGLE(a) #a "(%%rip)"
+#else
+# define LOCAL_MANGLE(a) #a
+#endif
+
+#if HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS
+# define MANGLE(a) EXTERN_PREFIX LOCAL_MANGLE(a)
+# define NAMED_CONSTRAINTS_ADD(...)
+# define NAMED_CONSTRAINTS(...)
+# define NAMED_CONSTRAINTS_ARRAY_ADD(...)
+# define NAMED_CONSTRAINTS_ARRAY(...)
+#else
+ /* When direct symbol references are used in code passed to a compiler that does not support them
+ * then these references need to be converted to named asm constraints instead.
+ * Instead of returning a direct symbol MANGLE now returns a named constraint for that specific symbol.
+ * In order for this to work there must also be a corresponding entry in the asm-interface. To add this
+ * entry use the macro NAMED_CONSTRAINTS() and pass in a list of each symbol reference used in the
+ * corresponding block of code. (e.g. NAMED_CONSTRAINTS(var1,var2,var3) where var1 is the first symbol etc. ).
+ * If there are already existing constraints then use NAMED_CONSTRAINTS_ADD to add to the existing constraint list.
+ */
+# define MANGLE(a) "%["#a"]"
+ // Intel/MSVC does not correctly expand va-args so we need a rather ugly hack in order to get it to work
+# define FE_0(P,X) P(X)
+# define FE_1(P,X,X1) P(X), FE_0(P,X1)
+# define FE_2(P,X,X1,X2) P(X), FE_1(P,X1,X2)
+# define FE_3(P,X,X1,X2,X3) P(X), FE_2(P,X1,X2,X3)
+# define FE_4(P,X,X1,X2,X3,X4) P(X), FE_3(P,X1,X2,X3,X4)
+# define FE_5(P,X,X1,X2,X3,X4,X5) P(X), FE_4(P,X1,X2,X3,X4,X5)
+# define FE_6(P,X,X1,X2,X3,X4,X5,X6) P(X), FE_5(P,X1,X2,X3,X4,X5,X6)
+# define FE_7(P,X,X1,X2,X3,X4,X5,X6,X7) P(X), FE_6(P,X1,X2,X3,X4,X5,X6,X7)
+# define FE_8(P,X,X1,X2,X3,X4,X5,X6,X7,X8) P(X), FE_7(P,X1,X2,X3,X4,X5,X6,X7,X8)
+# define FE_9(P,X,X1,X2,X3,X4,X5,X6,X7,X8,X9) P(X), FE_8(P,X1,X2,X3,X4,X5,X6,X7,X8,X9)
+# define GET_FE_IMPL(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,NAME,...) NAME
+# define GET_FE(A) GET_FE_IMPL A
+# define GET_FE_GLUE(x, y) x y
+# define FOR_EACH_VA(P,...) GET_FE_GLUE(GET_FE((__VA_ARGS__,FE_9,FE_8,FE_7,FE_6,FE_5,FE_4,FE_3,FE_2,FE_1,FE_0)), (P,__VA_ARGS__))
+# define NAME_CONSTRAINT(x) [x] "m"(x)
+ // Parameters are a list of each symbol reference required
+# define NAMED_CONSTRAINTS_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT,__VA_ARGS__)
+ // Same but without comma for when there are no previously defined constraints
+# define NAMED_CONSTRAINTS(...) FOR_EACH_VA(NAME_CONSTRAINT,__VA_ARGS__)
+ // Same as above NAMED_CONSTRAINTS except used for passing arrays/pointers instead of normal variables
+# define NAME_CONSTRAINT_ARRAY(x) [x] "m"(*x)
+# define NAMED_CONSTRAINTS_ARRAY_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT_ARRAY,__VA_ARGS__)
+# define NAMED_CONSTRAINTS_ARRAY(...) FOR_EACH_VA(NAME_CONSTRAINT_ARRAY,__VA_ARGS__)
+#endif
+
+#endif /* AVUTIL_X86_ASM_H */
diff --git a/media/ffvpx/libavutil/x86/bswap.h b/media/ffvpx/libavutil/x86/bswap.h
new file mode 100644
index 0000000000..b2f18b6c93
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/bswap.h
@@ -0,0 +1,88 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * byte swapping routines
+ */
+
+#ifndef AVUTIL_X86_BSWAP_H
+#define AVUTIL_X86_BSWAP_H
+
+#include <stdint.h>
+#if defined(_MSC_VER)
+#include <stdlib.h>
+#include <intrin.h>
+#endif
+#include "config.h"
+#include "libavutil/attributes.h"
+
+#if defined(_MSC_VER)
+
+#define av_bswap16 av_bswap16
+static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
+{
+ return _rotr16(x, 8);
+}
+
+#define av_bswap32 av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ return _byteswap_ulong(x);
+}
+
+#if ARCH_X86_64
+#define av_bswap64 av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+ return _byteswap_uint64(x);
+}
+#endif
+
+
+#elif HAVE_INLINE_ASM
+
+#if AV_GCC_VERSION_AT_MOST(4,0)
+#define av_bswap16 av_bswap16
+static av_always_inline av_const unsigned av_bswap16(unsigned x)
+{
+ __asm__("rorw $8, %w0" : "+r"(x));
+ return x;
+}
+#endif /* AV_GCC_VERSION_AT_MOST(4,0) */
+
+#if AV_GCC_VERSION_AT_MOST(4,4) || defined(__INTEL_COMPILER)
+#define av_bswap32 av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ __asm__("bswap %0" : "+r" (x));
+ return x;
+}
+
+#if ARCH_X86_64
+#define av_bswap64 av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+ __asm__("bswap %0": "=r" (x) : "0" (x));
+ return x;
+}
+#endif
+#endif /* AV_GCC_VERSION_AT_MOST(4,4) */
+
+#endif /* HAVE_INLINE_ASM */
+#endif /* AVUTIL_X86_BSWAP_H */
diff --git a/media/ffvpx/libavutil/x86/cpu.c b/media/ffvpx/libavutil/x86/cpu.c
new file mode 100644
index 0000000000..d6cd4fab9c
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/cpu.c
@@ -0,0 +1,284 @@
+/*
+ * CPU detection code, extracted from mmx.h
+ * (c)1997-99 by H. Dietz and R. Fisher
+ * Converted to C and improved by Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavutil/cpu.h"
+#include "libavutil/cpu_internal.h"
+
+#if HAVE_X86ASM
+
+#define cpuid(index, eax, ebx, ecx, edx) \
+ ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)
+
+#define xgetbv(index, eax, edx) \
+ ff_cpu_xgetbv(index, &eax, &edx)
+
+#elif HAVE_INLINE_ASM
+
+/* ebx saving is necessary for PIC. gcc seems unable to see it alone */
+#define cpuid(index, eax, ebx, ecx, edx) \
+ __asm__ volatile ( \
+ "mov %%"FF_REG_b", %%"FF_REG_S" \n\t" \
+ "cpuid \n\t" \
+ "xchg %%"FF_REG_b", %%"FF_REG_S \
+ : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
+ : "0" (index), "2"(0))
+
+#define xgetbv(index, eax, edx) \
+ __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
+
+#define get_eflags(x) \
+ __asm__ volatile ("pushfl \n" \
+ "pop %0 \n" \
+ : "=r"(x))
+
+#define set_eflags(x) \
+ __asm__ volatile ("push %0 \n" \
+ "popfl \n" \
+ :: "r"(x))
+
+#endif /* HAVE_INLINE_ASM */
+
+#if ARCH_X86_64
+
+#define cpuid_test() 1
+
+#elif HAVE_X86ASM
+
+#define cpuid_test ff_cpu_cpuid_test
+
+#elif HAVE_INLINE_ASM
+
+static int cpuid_test(void)
+{
+ x86_reg a, c;
+
+ /* Check if CPUID is supported by attempting to toggle the ID bit in
+ * the EFLAGS register. */
+ get_eflags(a);
+ set_eflags(a ^ 0x200000);
+ get_eflags(c);
+
+ return a != c;
+}
+#endif
+
+/* Function to test if multimedia instructions are supported... */
+int ff_get_cpu_flags_x86(void)
+{
+ int rval = 0;
+
+#ifdef cpuid
+
+ int eax, ebx, ecx, edx;
+ int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
+ int family = 0, model = 0;
+ union { int i[3]; char c[12]; } vendor;
+ int xcr0_lo = 0, xcr0_hi = 0;
+
+ if (!cpuid_test())
+ return 0; /* CPUID not supported */
+
+ cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
+
+ if (max_std_level >= 1) {
+ cpuid(1, eax, ebx, ecx, std_caps);
+ family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+ model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
+ if (std_caps & (1 << 15))
+ rval |= AV_CPU_FLAG_CMOV;
+ if (std_caps & (1 << 23))
+ rval |= AV_CPU_FLAG_MMX;
+ if (std_caps & (1 << 25))
+ rval |= AV_CPU_FLAG_MMXEXT;
+#if HAVE_SSE
+ if (std_caps & (1 << 25))
+ rval |= AV_CPU_FLAG_SSE;
+ if (std_caps & (1 << 26))
+ rval |= AV_CPU_FLAG_SSE2;
+ if (ecx & 1)
+ rval |= AV_CPU_FLAG_SSE3;
+ if (ecx & 0x00000200 )
+ rval |= AV_CPU_FLAG_SSSE3;
+ if (ecx & 0x00080000 )
+ rval |= AV_CPU_FLAG_SSE4;
+ if (ecx & 0x00100000 )
+ rval |= AV_CPU_FLAG_SSE42;
+ if (ecx & 0x02000000 )
+ rval |= AV_CPU_FLAG_AESNI;
+#if HAVE_AVX
+ /* Check OXSAVE and AVX bits */
+ if ((ecx & 0x18000000) == 0x18000000) {
+ /* Check for OS support */
+ xgetbv(0, xcr0_lo, xcr0_hi);
+ if ((xcr0_lo & 0x6) == 0x6) {
+ rval |= AV_CPU_FLAG_AVX;
+ if (ecx & 0x00001000)
+ rval |= AV_CPU_FLAG_FMA3;
+ }
+ }
+#endif /* HAVE_AVX */
+#endif /* HAVE_SSE */
+ }
+ if (max_std_level >= 7) {
+ cpuid(7, eax, ebx, ecx, edx);
+#if HAVE_AVX2
+ if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020))
+ rval |= AV_CPU_FLAG_AVX2;
+#if HAVE_AVX512 /* F, CD, BW, DQ, VL */
+ if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */
+ if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) {
+ rval |= AV_CPU_FLAG_AVX512;
+#if HAVE_AVX512ICL
+ if ((ebx & 0xd0200000) == 0xd0200000 && (ecx & 0x5f42) == 0x5f42)
+ rval |= AV_CPU_FLAG_AVX512ICL;
+#endif /* HAVE_AVX512ICL */
+ }
+ }
+#endif /* HAVE_AVX512 */
+#endif /* HAVE_AVX2 */
+ /* BMI1/2 don't need OS support */
+ if (ebx & 0x00000008) {
+ rval |= AV_CPU_FLAG_BMI1;
+ if (ebx & 0x00000100)
+ rval |= AV_CPU_FLAG_BMI2;
+ }
+ }
+
+ cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
+
+ if (max_ext_level >= 0x80000001) {
+ cpuid(0x80000001, eax, ebx, ecx, ext_caps);
+ if (ext_caps & (1U << 31))
+ rval |= AV_CPU_FLAG_3DNOW;
+ if (ext_caps & (1 << 30))
+ rval |= AV_CPU_FLAG_3DNOWEXT;
+ if (ext_caps & (1 << 23))
+ rval |= AV_CPU_FLAG_MMX;
+ if (ext_caps & (1 << 22))
+ rval |= AV_CPU_FLAG_MMXEXT;
+
+ if (!strncmp(vendor.c, "AuthenticAMD", 12)) {
+ /* Allow for selectively disabling SSE2 functions on AMD processors
+ with SSE2 support but not SSE4a. This includes Athlon64, some
+ Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
+ than SSE2 often enough to utilize this special-case flag.
+ AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
+ so that SSE2 is used unless explicitly disabled by checking
+ AV_CPU_FLAG_SSE2SLOW. */
+ if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040))
+ rval |= AV_CPU_FLAG_SSE2SLOW;
+
+ /* Similar to the above but for AVX functions on AMD processors.
+ This is necessary only for functions using YMM registers on Bulldozer
+ and Jaguar based CPUs as they lack 256-bit execution units. SSE/AVX
+ functions using XMM registers are always faster on them.
+ AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
+ used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
+ if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
+ rval |= AV_CPU_FLAG_AVXSLOW;
+
+ /* Zen 3 and earlier have slow gather */
+ if ((family <= 0x19) && (rval & AV_CPU_FLAG_AVX2))
+ rval |= AV_CPU_FLAG_SLOW_GATHER;
+ }
+
+ /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
+ * used unless the OS has AVX support. */
+ if (rval & AV_CPU_FLAG_AVX) {
+ if (ecx & 0x00000800)
+ rval |= AV_CPU_FLAG_XOP;
+ if (ecx & 0x00010000)
+ rval |= AV_CPU_FLAG_FMA4;
+ }
+ }
+
+ if (!strncmp(vendor.c, "GenuineIntel", 12)) {
+ if (family == 6 && (model == 9 || model == 13 || model == 14)) {
+ /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
+ * 6/14 (core1 "yonah") theoretically support sse2, but it's
+ * usually slower than mmx, so let's just pretend they don't.
+ * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
+ * enabled so that SSE2 is not used unless explicitly enabled
+ * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
+ * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
+ if (rval & AV_CPU_FLAG_SSE2)
+ rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2;
+ if (rval & AV_CPU_FLAG_SSE3)
+ rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3;
+ }
+ /* The Atom processor has SSSE3 support, which is useful in many cases,
+ * but sometimes the SSSE3 version is slower than the SSE2 equivalent
+ * on the Atom, but is generally faster on other processors supporting
+ * SSSE3. This flag allows for selectively disabling certain SSSE3
+ * functions on the Atom. */
+ if (family == 6 && model == 28)
+ rval |= AV_CPU_FLAG_ATOM;
+
+ /* Conroe has a slow shuffle unit. Check the model number to ensure not
+ * to include crippled low-end Penryns and Nehalems that lack SSE4. */
+ if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) &&
+ family == 6 && model < 23)
+ rval |= AV_CPU_FLAG_SSSE3SLOW;
+
+ /* Haswell has slow gather */
+ if ((rval & AV_CPU_FLAG_AVX2) && family == 6 && model < 70)
+ rval |= AV_CPU_FLAG_SLOW_GATHER;
+ }
+
+#endif /* cpuid */
+
+ return rval;
+}
+
+size_t ff_get_cpu_max_align_x86(void)
+{
+ int flags = av_get_cpu_flags();
+
+ if (flags & AV_CPU_FLAG_AVX512)
+ return 64;
+ if (flags & (AV_CPU_FLAG_AVX2 |
+ AV_CPU_FLAG_AVX |
+ AV_CPU_FLAG_XOP |
+ AV_CPU_FLAG_FMA4 |
+ AV_CPU_FLAG_FMA3 |
+ AV_CPU_FLAG_AVXSLOW))
+ return 32;
+ if (flags & (AV_CPU_FLAG_AESNI |
+ AV_CPU_FLAG_SSE42 |
+ AV_CPU_FLAG_SSE4 |
+ AV_CPU_FLAG_SSSE3 |
+ AV_CPU_FLAG_SSE3 |
+ AV_CPU_FLAG_SSE2 |
+ AV_CPU_FLAG_SSE |
+ AV_CPU_FLAG_ATOM |
+ AV_CPU_FLAG_SSSE3SLOW |
+ AV_CPU_FLAG_SSE3SLOW |
+ AV_CPU_FLAG_SSE2SLOW))
+ return 16;
+
+ return 8;
+}
diff --git a/media/ffvpx/libavutil/x86/cpu.h b/media/ffvpx/libavutil/x86/cpu.h
new file mode 100644
index 0000000000..40a1eef0ab
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/cpu.h
@@ -0,0 +1,114 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_CPU_H
+#define AVUTIL_X86_CPU_H
+
+#include "libavutil/cpu.h"
+#include "libavutil/cpu_internal.h"
+
+#define AV_CPU_FLAG_AMD3DNOW AV_CPU_FLAG_3DNOW
+#define AV_CPU_FLAG_AMD3DNOWEXT AV_CPU_FLAG_3DNOWEXT
+
+#define X86_AMD3DNOW(flags) CPUEXT(flags, AMD3DNOW)
+#define X86_AMD3DNOWEXT(flags) CPUEXT(flags, AMD3DNOWEXT)
+#define X86_MMX(flags) CPUEXT(flags, MMX)
+#define X86_MMXEXT(flags) CPUEXT(flags, MMXEXT)
+#define X86_SSE(flags) CPUEXT(flags, SSE)
+#define X86_SSE2(flags) CPUEXT(flags, SSE2)
+#define X86_SSE2_FAST(flags) CPUEXT_FAST(flags, SSE2)
+#define X86_SSE2_SLOW(flags) CPUEXT_SLOW(flags, SSE2)
+#define X86_SSE3(flags) CPUEXT(flags, SSE3)
+#define X86_SSE3_FAST(flags) CPUEXT_FAST(flags, SSE3)
+#define X86_SSE3_SLOW(flags) CPUEXT_SLOW(flags, SSE3)
+#define X86_SSSE3(flags) CPUEXT(flags, SSSE3)
+#define X86_SSSE3_FAST(flags) CPUEXT_FAST(flags, SSSE3)
+#define X86_SSSE3_SLOW(flags) CPUEXT_SLOW(flags, SSSE3)
+#define X86_SSE4(flags) CPUEXT(flags, SSE4)
+#define X86_SSE42(flags) CPUEXT(flags, SSE42)
+#define X86_AVX(flags) CPUEXT(flags, AVX)
+#define X86_AVX_FAST(flags) CPUEXT_FAST(flags, AVX)
+#define X86_AVX_SLOW(flags) CPUEXT_SLOW(flags, AVX)
+#define X86_XOP(flags) CPUEXT(flags, XOP)
+#define X86_FMA3(flags) CPUEXT(flags, FMA3)
+#define X86_FMA4(flags) CPUEXT(flags, FMA4)
+#define X86_AVX2(flags) CPUEXT(flags, AVX2)
+#define X86_AESNI(flags) CPUEXT(flags, AESNI)
+#define X86_AVX512(flags) CPUEXT(flags, AVX512)
+
+#define EXTERNAL_AMD3DNOW(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AMD3DNOW)
+#define EXTERNAL_AMD3DNOWEXT(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AMD3DNOWEXT)
+#define EXTERNAL_MMX(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, MMX)
+#define EXTERNAL_MMXEXT(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, MMXEXT)
+#define EXTERNAL_SSE(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, SSE)
+#define EXTERNAL_SSE2(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, SSE2)
+#define EXTERNAL_SSE2_FAST(flags) CPUEXT_SUFFIX_FAST(flags, _EXTERNAL, SSE2)
+#define EXTERNAL_SSE2_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _EXTERNAL, SSE2)
+#define EXTERNAL_SSE3(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, SSE3)
+#define EXTERNAL_SSE3_FAST(flags) CPUEXT_SUFFIX_FAST(flags, _EXTERNAL, SSE3)
+#define EXTERNAL_SSE3_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _EXTERNAL, SSE3)
+#define EXTERNAL_SSSE3(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, SSSE3)
+#define EXTERNAL_SSSE3_FAST(flags) CPUEXT_SUFFIX_FAST(flags, _EXTERNAL, SSSE3)
+#define EXTERNAL_SSSE3_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _EXTERNAL, SSSE3)
+#define EXTERNAL_SSE4(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, SSE4)
+#define EXTERNAL_SSE42(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, SSE42)
+#define EXTERNAL_AVX(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AVX)
+#define EXTERNAL_AVX_FAST(flags) CPUEXT_SUFFIX_FAST(flags, _EXTERNAL, AVX)
+#define EXTERNAL_AVX_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _EXTERNAL, AVX)
+#define EXTERNAL_XOP(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, XOP)
+#define EXTERNAL_FMA3(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, FMA3)
+#define EXTERNAL_FMA3_FAST(flags) CPUEXT_SUFFIX_FAST2(flags, _EXTERNAL, FMA3, AVX)
+#define EXTERNAL_FMA3_SLOW(flags) CPUEXT_SUFFIX_SLOW2(flags, _EXTERNAL, FMA3, AVX)
+#define EXTERNAL_FMA4(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, FMA4)
+#define EXTERNAL_AVX2(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AVX2)
+#define EXTERNAL_AVX2_FAST(flags) CPUEXT_SUFFIX_FAST2(flags, _EXTERNAL, AVX2, AVX)
+#define EXTERNAL_AVX2_SLOW(flags) CPUEXT_SUFFIX_SLOW2(flags, _EXTERNAL, AVX2, AVX)
+#define EXTERNAL_AESNI(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AESNI)
+#define EXTERNAL_AVX512(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AVX512)
+#define EXTERNAL_AVX512ICL(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AVX512ICL)
+
+#define INLINE_AMD3DNOW(flags) CPUEXT_SUFFIX(flags, _INLINE, AMD3DNOW)
+#define INLINE_AMD3DNOWEXT(flags) CPUEXT_SUFFIX(flags, _INLINE, AMD3DNOWEXT)
+#define INLINE_MMX(flags) CPUEXT_SUFFIX(flags, _INLINE, MMX)
+#define INLINE_MMXEXT(flags) CPUEXT_SUFFIX(flags, _INLINE, MMXEXT)
+#define INLINE_SSE(flags) CPUEXT_SUFFIX(flags, _INLINE, SSE)
+#define INLINE_SSE2(flags) CPUEXT_SUFFIX(flags, _INLINE, SSE2)
+#define INLINE_SSE2_FAST(flags) CPUEXT_SUFFIX_FAST(flags, _INLINE, SSE2)
+#define INLINE_SSE2_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _INLINE, SSE2)
+#define INLINE_SSE3(flags) CPUEXT_SUFFIX(flags, _INLINE, SSE3)
+#define INLINE_SSE3_FAST(flags) CPUEXT_SUFFIX_FAST(flags, _INLINE, SSE3)
+#define INLINE_SSE3_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _INLINE, SSE3)
+#define INLINE_SSSE3(flags) CPUEXT_SUFFIX(flags, _INLINE, SSSE3)
+#define INLINE_SSSE3_FAST(flags) CPUEXT_SUFFIX_FAST(flags, _INLINE, SSSE3)
+#define INLINE_SSSE3_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _INLINE, SSSE3)
+#define INLINE_SSE4(flags) CPUEXT_SUFFIX(flags, _INLINE, SSE4)
+#define INLINE_SSE42(flags) CPUEXT_SUFFIX(flags, _INLINE, SSE42)
+#define INLINE_AVX(flags) CPUEXT_SUFFIX(flags, _INLINE, AVX)
+#define INLINE_AVX_FAST(flags) CPUEXT_SUFFIX_FAST(flags, _INLINE, AVX)
+#define INLINE_AVX_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _INLINE, AVX)
+#define INLINE_XOP(flags) CPUEXT_SUFFIX(flags, _INLINE, XOP)
+#define INLINE_FMA3(flags) CPUEXT_SUFFIX(flags, _INLINE, FMA3)
+#define INLINE_FMA4(flags) CPUEXT_SUFFIX(flags, _INLINE, FMA4)
+#define INLINE_AVX2(flags) CPUEXT_SUFFIX(flags, _INLINE, AVX2)
+#define INLINE_AESNI(flags) CPUEXT_SUFFIX(flags, _INLINE, AESNI)
+
+void ff_cpu_cpuid(int index, int *eax, int *ebx, int *ecx, int *edx);
+void ff_cpu_xgetbv(int op, int *eax, int *edx);
+int ff_cpu_cpuid_test(void);
+
+#endif /* AVUTIL_X86_CPU_H */
diff --git a/media/ffvpx/libavutil/x86/cpuid.asm b/media/ffvpx/libavutil/x86/cpuid.asm
new file mode 100644
index 0000000000..766f77fcdf
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/cpuid.asm
@@ -0,0 +1,91 @@
+;*****************************************************************************
+;* Copyright (C) 2005-2010 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;* Fiona Glaser <fiona@x264.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; void ff_cpu_cpuid(int index, int *eax, int *ebx, int *ecx, int *edx)
+;-----------------------------------------------------------------------------
+cglobal cpu_cpuid, 5,7
+ push rbx
+ push r4
+ push r3
+ push r2
+ push r1
+ mov eax, r0d
+ xor ecx, ecx
+ cpuid
+ pop r4
+ mov [r4], eax
+ pop r4
+ mov [r4], ebx
+ pop r4
+ mov [r4], ecx
+ pop r4
+ mov [r4], edx
+ pop rbx
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_cpu_xgetbv(int op, int *eax, int *edx)
+;-----------------------------------------------------------------------------
+cglobal cpu_xgetbv, 3,7
+ push r2
+ push r1
+ mov ecx, r0d
+ xgetbv
+ pop r4
+ mov [r4], eax
+ pop r4
+ mov [r4], edx
+ RET
+
+%if ARCH_X86_64 == 0
+;-----------------------------------------------------------------------------
+; int ff_cpu_cpuid_test(void)
+; return 0 if unsupported
+;-----------------------------------------------------------------------------
+cglobal cpu_cpuid_test
+ pushfd
+ push ebx
+ push ebp
+ push esi
+ push edi
+ pushfd
+ pop eax
+ mov ebx, eax
+ xor eax, 0x200000
+ push eax
+ popfd
+ pushfd
+ pop eax
+ xor eax, ebx
+ pop edi
+ pop esi
+ pop ebp
+ pop ebx
+ popfd
+ ret
+%endif
diff --git a/media/ffvpx/libavutil/x86/emms.asm b/media/ffvpx/libavutil/x86/emms.asm
new file mode 100644
index 0000000000..df84f2221b
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/emms.asm
@@ -0,0 +1,30 @@
+;*****************************************************************************
+;* Copyright (C) 2013 Martin Storsjo
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; void avpriv_emms_asm(void)
+;-----------------------------------------------------------------------------
+cvisible emms_asm, 0, 0
+ emms
+ RET
diff --git a/media/ffvpx/libavutil/x86/emms.h b/media/ffvpx/libavutil/x86/emms.h
new file mode 100644
index 0000000000..8ceec110cf
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/emms.h
@@ -0,0 +1,58 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_EMMS_H
+#define AVUTIL_X86_EMMS_H
+
+#include "config.h"
+#include "libavutil/attributes.h"
+
+void avpriv_emms_asm(void);
+
+#if HAVE_MMX_INLINE
+#ifndef __MMX__
+#include "libavutil/cpu.h"
+#endif
+
+# define emms_c emms_c
+/**
+ * Empty mmx state.
+ * this must be called between any dsp function and float/double code.
+ * for example sin(); dsp->idct_put(); emms_c(); cos()
+ * Note, *alloc() and *free() also use float code in some libc implementations
+ * thus this also applies to them or any function using them.
+ */
+static av_always_inline void emms_c(void)
+{
+/* Some inlined functions may also use mmx instructions regardless of
+ * runtime cpuflags. With that in mind, we unconditionally empty the
+ * mmx state if the target cpu chosen at configure time supports it.
+ */
+#if !defined(__MMX__)
+ if(av_get_cpu_flags() & AV_CPU_FLAG_MMX)
+#endif
+ __asm__ volatile ("emms" ::: "memory");
+}
+#elif HAVE_MMX && HAVE_MM_EMPTY
+# include <mmintrin.h>
+# define emms_c _mm_empty
+#elif HAVE_MMX_EXTERNAL
+# define emms_c avpriv_emms_asm
+#endif /* HAVE_MMX_INLINE */
+
+#endif /* AVUTIL_X86_EMMS_H */
diff --git a/media/ffvpx/libavutil/x86/fixed_dsp.asm b/media/ffvpx/libavutil/x86/fixed_dsp.asm
new file mode 100644
index 0000000000..2f411850f4
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/fixed_dsp.asm
@@ -0,0 +1,48 @@
+;*****************************************************************************
+;* x86-optimized Float DSP functions
+;*
+;* Copyright 2016 James Almer
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; void ff_butterflies_fixed(float *src0, float *src1, int len);
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal butterflies_fixed, 3,3,3, src0, src1, len
+ shl lend, 2
+ add src0q, lenq
+ add src1q, lenq
+ neg lenq
+
+align 16
+.loop:
+ mova m0, [src0q + lenq]
+ mova m1, [src1q + lenq]
+ mova m2, m0
+ paddd m0, m1
+ psubd m2, m1
+ mova [src0q + lenq], m0
+ mova [src1q + lenq], m2
+ add lenq, mmsize
+ jl .loop
+ RET
diff --git a/media/ffvpx/libavutil/x86/fixed_dsp_init.c b/media/ffvpx/libavutil/x86/fixed_dsp_init.c
new file mode 100644
index 0000000000..d3f4b2e325
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/fixed_dsp_init.c
@@ -0,0 +1,35 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/fixed_dsp.h"
+#include "cpu.h"
+
+void ff_butterflies_fixed_sse2(int *av_restrict src0, int *av_restrict src1, int len);
+
+av_cold void ff_fixed_dsp_init_x86(AVFixedDSPContext *fdsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ fdsp->butterflies_fixed = ff_butterflies_fixed_sse2;
+ }
+}
diff --git a/media/ffvpx/libavutil/x86/float_dsp.asm b/media/ffvpx/libavutil/x86/float_dsp.asm
new file mode 100644
index 0000000000..e84ba52566
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/float_dsp.asm
@@ -0,0 +1,588 @@
+;*****************************************************************************
+;* x86-optimized Float DSP functions
+;*
+;* Copyright 2006 Loren Merritt
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA 32
+pd_reverse: dd 7, 6, 5, 4, 3, 2, 1, 0
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
+;-----------------------------------------------------------------------------
+%macro VECTOR_FMUL 0
+cglobal vector_fmul, 4,4,2, dst, src0, src1, len
+ lea lenq, [lend*4 - 64]
+ALIGN 16
+.loop:
+%assign a 0
+%rep 32/mmsize
+ mova m0, [src0q + lenq + (a+0)*mmsize]
+ mova m1, [src0q + lenq + (a+1)*mmsize]
+ mulps m0, m0, [src1q + lenq + (a+0)*mmsize]
+ mulps m1, m1, [src1q + lenq + (a+1)*mmsize]
+ mova [dstq + lenq + (a+0)*mmsize], m0
+ mova [dstq + lenq + (a+1)*mmsize], m1
+%assign a a+2
+%endrep
+
+ sub lenq, 64
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse
+VECTOR_FMUL
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+VECTOR_FMUL
+%endif
+
+;-----------------------------------------------------------------------------
+; void vector_dmul(double *dst, const double *src0, const double *src1, int len)
+;-----------------------------------------------------------------------------
+%macro VECTOR_DMUL 0
+cglobal vector_dmul, 4,4,4, dst, src0, src1, len
+ lea lend, [lenq*8 - mmsize*4]
+ALIGN 16
+.loop:
+ movaps m0, [src0q + lenq + 0*mmsize]
+ movaps m1, [src0q + lenq + 1*mmsize]
+ movaps m2, [src0q + lenq + 2*mmsize]
+ movaps m3, [src0q + lenq + 3*mmsize]
+ mulpd m0, m0, [src1q + lenq + 0*mmsize]
+ mulpd m1, m1, [src1q + lenq + 1*mmsize]
+ mulpd m2, m2, [src1q + lenq + 2*mmsize]
+ mulpd m3, m3, [src1q + lenq + 3*mmsize]
+ movaps [dstq + lenq + 0*mmsize], m0
+ movaps [dstq + lenq + 1*mmsize], m1
+ movaps [dstq + lenq + 2*mmsize], m2
+ movaps [dstq + lenq + 3*mmsize], m3
+
+ sub lenq, mmsize*4
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+VECTOR_DMUL
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+VECTOR_DMUL
+%endif
+
+;------------------------------------------------------------------------------
+; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
+;------------------------------------------------------------------------------
+
+%macro VECTOR_FMAC_SCALAR 0
+%if UNIX64
+cglobal vector_fmac_scalar, 3,3,5, dst, src, len
+%else
+cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
+%endif
+%if ARCH_X86_32
+ VBROADCASTSS m0, mulm
+%else
+%if WIN64
+ SWAP 0, 2
+%endif
+ shufps xm0, xm0, 0
+%if cpuflag(avx)
+ vinsertf128 m0, m0, xm0, 1
+%endif
+%endif
+ lea lenq, [lend*4-64]
+.loop:
+%if cpuflag(fma3)
+ mova m1, [dstq+lenq]
+ mova m2, [dstq+lenq+1*mmsize]
+ fmaddps m1, m0, [srcq+lenq], m1
+ fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
+%else ; cpuflag
+ mulps m1, m0, [srcq+lenq]
+ mulps m2, m0, [srcq+lenq+1*mmsize]
+%if mmsize < 32
+ mulps m3, m0, [srcq+lenq+2*mmsize]
+ mulps m4, m0, [srcq+lenq+3*mmsize]
+%endif ; mmsize
+ addps m1, m1, [dstq+lenq]
+ addps m2, m2, [dstq+lenq+1*mmsize]
+%if mmsize < 32
+ addps m3, m3, [dstq+lenq+2*mmsize]
+ addps m4, m4, [dstq+lenq+3*mmsize]
+%endif ; mmsize
+%endif ; cpuflag
+ mova [dstq+lenq], m1
+ mova [dstq+lenq+1*mmsize], m2
+%if mmsize < 32
+ mova [dstq+lenq+2*mmsize], m3
+ mova [dstq+lenq+3*mmsize], m4
+%endif ; mmsize
+ sub lenq, 64
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse
+VECTOR_FMAC_SCALAR
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+VECTOR_FMAC_SCALAR
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+VECTOR_FMAC_SCALAR
+%endif
+
+;------------------------------------------------------------------------------
+; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
+;------------------------------------------------------------------------------
+
+%macro VECTOR_FMUL_SCALAR 0
+%if UNIX64
+cglobal vector_fmul_scalar, 3,3,2, dst, src, len
+%else
+cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
+%endif
+%if ARCH_X86_32
+ movss m0, mulm
+%elif WIN64
+ SWAP 0, 2
+%endif
+ shufps m0, m0, 0
+ lea lenq, [lend*4-mmsize]
+.loop:
+ mova m1, [srcq+lenq]
+ mulps m1, m0
+ mova [dstq+lenq], m1
+ sub lenq, mmsize
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse
+VECTOR_FMUL_SCALAR
+
+;------------------------------------------------------------------------------
+; void ff_vector_dmac_scalar(double *dst, const double *src, double mul,
+; int len)
+;------------------------------------------------------------------------------
+
+%macro VECTOR_DMAC_SCALAR 0
+%if ARCH_X86_32
+cglobal vector_dmac_scalar, 2,4,5, dst, src, mul, len, lenaddr
+ mov lenq, lenaddrm
+ VBROADCASTSD m0, mulm
+%else
+%if UNIX64
+cglobal vector_dmac_scalar, 3,3,5, dst, src, len
+%else
+cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len
+ SWAP 0, 2
+%endif
+ movlhps xm0, xm0
+%if cpuflag(avx)
+ vinsertf128 m0, m0, xm0, 1
+%endif
+%endif
+ lea lenq, [lend*8-mmsize*4]
+.loop:
+%if cpuflag(fma3)
+ movaps m1, [dstq+lenq]
+ movaps m2, [dstq+lenq+1*mmsize]
+ movaps m3, [dstq+lenq+2*mmsize]
+ movaps m4, [dstq+lenq+3*mmsize]
+ fmaddpd m1, m0, [srcq+lenq], m1
+ fmaddpd m2, m0, [srcq+lenq+1*mmsize], m2
+ fmaddpd m3, m0, [srcq+lenq+2*mmsize], m3
+ fmaddpd m4, m0, [srcq+lenq+3*mmsize], m4
+%else ; cpuflag
+ mulpd m1, m0, [srcq+lenq]
+ mulpd m2, m0, [srcq+lenq+1*mmsize]
+ mulpd m3, m0, [srcq+lenq+2*mmsize]
+ mulpd m4, m0, [srcq+lenq+3*mmsize]
+ addpd m1, m1, [dstq+lenq]
+ addpd m2, m2, [dstq+lenq+1*mmsize]
+ addpd m3, m3, [dstq+lenq+2*mmsize]
+ addpd m4, m4, [dstq+lenq+3*mmsize]
+%endif ; cpuflag
+ movaps [dstq+lenq], m1
+ movaps [dstq+lenq+1*mmsize], m2
+ movaps [dstq+lenq+2*mmsize], m3
+ movaps [dstq+lenq+3*mmsize], m4
+ sub lenq, mmsize*4
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+VECTOR_DMAC_SCALAR
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+VECTOR_DMAC_SCALAR
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+VECTOR_DMAC_SCALAR
+%endif
+
+;------------------------------------------------------------------------------
+; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
+; int len)
+;------------------------------------------------------------------------------
+
+%macro VECTOR_DMUL_SCALAR 0
+%if ARCH_X86_32
+cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
+ mov lenq, lenaddrm
+%elif UNIX64
+cglobal vector_dmul_scalar, 3,3,3, dst, src, len
+%else
+cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
+%endif
+%if ARCH_X86_32
+ VBROADCASTSD m0, mulm
+%else
+%if WIN64
+ SWAP 0, 2
+%endif
+ movlhps xm0, xm0
+%if cpuflag(avx)
+ vinsertf128 ym0, ym0, xm0, 1
+%endif
+%endif
+ lea lenq, [lend*8-2*mmsize]
+.loop:
+ mulpd m1, m0, [srcq+lenq ]
+ mulpd m2, m0, [srcq+lenq+mmsize]
+ movaps [dstq+lenq ], m1
+ movaps [dstq+lenq+mmsize], m2
+ sub lenq, 2*mmsize
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse2
+VECTOR_DMUL_SCALAR
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+VECTOR_DMUL_SCALAR
+%endif
+
+;-----------------------------------------------------------------------------
+; vector_fmul_window(float *dst, const float *src0,
+; const float *src1, const float *win, int len);
+;-----------------------------------------------------------------------------
+INIT_XMM sse
+cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
+ shl lend, 2
+ lea len1q, [lenq - mmsize]
+ add src0q, lenq
+ add dstq, lenq
+ add winq, lenq
+ neg lenq
+.loop:
+ mova m0, [winq + lenq]
+ mova m4, [src0q + lenq]
+ mova m1, [winq + len1q]
+ mova m5, [src1q + len1q]
+ shufps m1, m1, 0x1b
+ shufps m5, m5, 0x1b
+ mova m2, m0
+ mova m3, m1
+ mulps m2, m4
+ mulps m3, m5
+ mulps m1, m4
+ mulps m0, m5
+ addps m2, m3
+ subps m1, m0
+ shufps m2, m2, 0x1b
+ mova [dstq + lenq], m1
+ mova [dstq + len1q], m2
+ sub len1q, mmsize
+ add lenq, mmsize
+ jl .loop
+ RET
+
+;-----------------------------------------------------------------------------
+; vector_fmul_add(float *dst, const float *src0, const float *src1,
+; const float *src2, int len)
+;-----------------------------------------------------------------------------
+%macro VECTOR_FMUL_ADD 0
+cglobal vector_fmul_add, 5,5,4, dst, src0, src1, src2, len
+ lea lenq, [lend*4 - 2*mmsize]
+ALIGN 16
+.loop:
+ mova m0, [src0q + lenq]
+ mova m1, [src0q + lenq + mmsize]
+%if cpuflag(fma3)
+ mova m2, [src2q + lenq]
+ mova m3, [src2q + lenq + mmsize]
+ fmaddps m0, m0, [src1q + lenq], m2
+ fmaddps m1, m1, [src1q + lenq + mmsize], m3
+%else
+ mulps m0, m0, [src1q + lenq]
+ mulps m1, m1, [src1q + lenq + mmsize]
+ addps m0, m0, [src2q + lenq]
+ addps m1, m1, [src2q + lenq + mmsize]
+%endif
+ mova [dstq + lenq], m0
+ mova [dstq + lenq + mmsize], m1
+
+ sub lenq, 2*mmsize
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse
+VECTOR_FMUL_ADD
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+VECTOR_FMUL_ADD
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+VECTOR_FMUL_ADD
+%endif
+
+;-----------------------------------------------------------------------------
+; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
+; int len)
+;-----------------------------------------------------------------------------
+%macro VECTOR_FMUL_REVERSE 0
+cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
+%if cpuflag(avx2)
+ movaps m2, [pd_reverse]
+%endif
+ lea lenq, [lend*4 - 2*mmsize]
+ALIGN 16
+.loop:
+%if cpuflag(avx2)
+ vpermps m0, m2, [src1q]
+ vpermps m1, m2, [src1q+mmsize]
+%elif cpuflag(avx)
+ vmovaps xmm0, [src1q + 16]
+ vinsertf128 m0, m0, [src1q], 1
+ vshufps m0, m0, m0, q0123
+ vmovaps xmm1, [src1q + mmsize + 16]
+ vinsertf128 m1, m1, [src1q + mmsize], 1
+ vshufps m1, m1, m1, q0123
+%else
+ mova m0, [src1q]
+ mova m1, [src1q + mmsize]
+ shufps m0, m0, q0123
+ shufps m1, m1, q0123
+%endif
+ mulps m0, m0, [src0q + lenq + mmsize]
+ mulps m1, m1, [src0q + lenq]
+ movaps [dstq + lenq + mmsize], m0
+ movaps [dstq + lenq], m1
+ add src1q, 2*mmsize
+ sub lenq, 2*mmsize
+ jge .loop
+ RET
+%endmacro
+
+INIT_XMM sse
+VECTOR_FMUL_REVERSE
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+VECTOR_FMUL_REVERSE
+%endif
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+VECTOR_FMUL_REVERSE
+%endif
+
+; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
+INIT_XMM sse
+cglobal scalarproduct_float, 3,3,2, v1, v2, offset
+ shl offsetd, 2
+ add v1q, offsetq
+ add v2q, offsetq
+ neg offsetq
+ xorps xmm0, xmm0
+.loop:
+ movaps xmm1, [v1q+offsetq]
+ mulps xmm1, [v2q+offsetq]
+ addps xmm0, xmm1
+ add offsetq, 16
+ js .loop
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xmm0
+ fld dword r0m
+%endif
+ RET
+
+INIT_YMM fma3
+cglobal scalarproduct_float, 3,5,8, v1, v2, size, len, offset
+ xor offsetq, offsetq
+ xorps m0, m0, m0
+ shl sized, 2
+ mov lenq, sizeq
+ cmp lenq, 32
+ jl .l16
+ cmp lenq, 64
+ jl .l32
+ xorps m1, m1, m1
+ cmp lenq, 128
+ jl .l64
+ and lenq, ~127
+ xorps m2, m2, m2
+ xorps m3, m3, m3
+.loop128:
+ movups m4, [v1q+offsetq]
+ movups m5, [v1q+offsetq + 32]
+ movups m6, [v1q+offsetq + 64]
+ movups m7, [v1q+offsetq + 96]
+ fmaddps m0, m4, [v2q+offsetq ], m0
+ fmaddps m1, m5, [v2q+offsetq + 32], m1
+ fmaddps m2, m6, [v2q+offsetq + 64], m2
+ fmaddps m3, m7, [v2q+offsetq + 96], m3
+ add offsetq, 128
+ cmp offsetq, lenq
+ jl .loop128
+ addps m0, m0, m2
+ addps m1, m1, m3
+ mov lenq, sizeq
+ and lenq, 127
+ cmp lenq, 64
+ jge .l64
+ addps m0, m0, m1
+ cmp lenq, 32
+ jge .l32
+ vextractf128 xmm2, m0, 1
+ addps xmm0, xmm2
+ cmp lenq, 16
+ jge .l16
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xm0
+ fld dword r0m
+%endif
+ RET
+.l64:
+ and lenq, ~63
+ add lenq, offsetq
+.loop64:
+ movups m4, [v1q+offsetq]
+ movups m5, [v1q+offsetq + 32]
+ fmaddps m0, m4, [v2q+offsetq], m0
+ fmaddps m1, m5, [v2q+offsetq + 32], m1
+ add offsetq, 64
+ cmp offsetq, lenq
+ jl .loop64
+ addps m0, m0, m1
+ mov lenq, sizeq
+ and lenq, 63
+ cmp lenq, 32
+ jge .l32
+ vextractf128 xmm2, m0, 1
+ addps xmm0, xmm2
+ cmp lenq, 16
+ jge .l16
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xm0
+ fld dword r0m
+%endif
+ RET
+.l32:
+ and lenq, ~31
+ add lenq, offsetq
+.loop32:
+ movups m4, [v1q+offsetq]
+ fmaddps m0, m4, [v2q+offsetq], m0
+ add offsetq, 32
+ cmp offsetq, lenq
+ jl .loop32
+ vextractf128 xmm2, m0, 1
+ addps xmm0, xmm2
+ mov lenq, sizeq
+ and lenq, 31
+ cmp lenq, 16
+ jge .l16
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xm0
+ fld dword r0m
+%endif
+ RET
+.l16:
+ and lenq, ~15
+ add lenq, offsetq
+.loop16:
+ movaps xmm1, [v1q+offsetq]
+ mulps xmm1, [v2q+offsetq]
+ addps xmm0, xmm1
+ add offsetq, 16
+ cmp offsetq, lenq
+ jl .loop16
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xm0
+ fld dword r0m
+%endif
+ RET
+
+;-----------------------------------------------------------------------------
+; void ff_butterflies_float(float *src0, float *src1, int len);
+;-----------------------------------------------------------------------------
+INIT_XMM sse
+cglobal butterflies_float, 3,3,3, src0, src1, len
+ shl lend, 2
+ add src0q, lenq
+ add src1q, lenq
+ neg lenq
+.loop:
+ mova m0, [src0q + lenq]
+ mova m1, [src1q + lenq]
+ subps m2, m0, m1
+ addps m0, m0, m1
+ mova [src1q + lenq], m2
+ mova [src0q + lenq], m0
+ add lenq, mmsize
+ jl .loop
+ RET
diff --git a/media/ffvpx/libavutil/x86/float_dsp_init.c b/media/ffvpx/libavutil/x86/float_dsp_init.c
new file mode 100644
index 0000000000..ad6b506259
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/float_dsp_init.c
@@ -0,0 +1,118 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/float_dsp.h"
+#include "cpu.h"
+#include "asm.h"
+
+void ff_vector_fmul_sse(float *dst, const float *src0, const float *src1,
+ int len);
+void ff_vector_fmul_avx(float *dst, const float *src0, const float *src1,
+ int len);
+
+void ff_vector_dmul_sse2(double *dst, const double *src0, const double *src1,
+ int len);
+void ff_vector_dmul_avx(double *dst, const double *src0, const double *src1,
+ int len);
+
+void ff_vector_fmac_scalar_sse(float *dst, const float *src, float mul,
+ int len);
+void ff_vector_fmac_scalar_avx(float *dst, const float *src, float mul,
+ int len);
+void ff_vector_fmac_scalar_fma3(float *dst, const float *src, float mul,
+ int len);
+
+void ff_vector_fmul_scalar_sse(float *dst, const float *src, float mul,
+ int len);
+
+void ff_vector_dmac_scalar_sse2(double *dst, const double *src, double mul,
+ int len);
+void ff_vector_dmac_scalar_avx(double *dst, const double *src, double mul,
+ int len);
+void ff_vector_dmac_scalar_fma3(double *dst, const double *src, double mul,
+ int len);
+
+void ff_vector_dmul_scalar_sse2(double *dst, const double *src,
+ double mul, int len);
+void ff_vector_dmul_scalar_avx(double *dst, const double *src,
+ double mul, int len);
+
+void ff_vector_fmul_window_sse(float *dst, const float *src0,
+ const float *src1, const float *win, int len);
+
+void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
+void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
+void ff_vector_fmul_add_fma3(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
+
+void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
+ const float *src1, int len);
+void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
+ const float *src1, int len);
+void ff_vector_fmul_reverse_avx2(float *dst, const float *src0,
+ const float *src1, int len);
+
+float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
+float ff_scalarproduct_float_fma3(const float *v1, const float *v2, int order);
+
+void ff_butterflies_float_sse(float *av_restrict src0, float *av_restrict src1, int len);
+
+av_cold void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE(cpu_flags)) {
+ fdsp->vector_fmul = ff_vector_fmul_sse;
+ fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_sse;
+ fdsp->vector_fmul_scalar = ff_vector_fmul_scalar_sse;
+ fdsp->vector_fmul_window = ff_vector_fmul_window_sse;
+ fdsp->vector_fmul_add = ff_vector_fmul_add_sse;
+ fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
+ fdsp->scalarproduct_float = ff_scalarproduct_float_sse;
+ fdsp->butterflies_float = ff_butterflies_float_sse;
+ }
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ fdsp->vector_dmul = ff_vector_dmul_sse2;
+ fdsp->vector_dmac_scalar = ff_vector_dmac_scalar_sse2;
+ fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_sse2;
+ }
+ if (EXTERNAL_AVX_FAST(cpu_flags)) {
+ fdsp->vector_fmul = ff_vector_fmul_avx;
+ fdsp->vector_dmul = ff_vector_dmul_avx;
+ fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_avx;
+ fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_avx;
+ fdsp->vector_dmac_scalar = ff_vector_dmac_scalar_avx;
+ fdsp->vector_fmul_add = ff_vector_fmul_add_avx;
+ fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
+ }
+ if (EXTERNAL_AVX2_FAST(cpu_flags)) {
+ fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_avx2;
+ }
+ if (EXTERNAL_FMA3_FAST(cpu_flags)) {
+ fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_fma3;
+ fdsp->vector_fmul_add = ff_vector_fmul_add_fma3;
+ fdsp->vector_dmac_scalar = ff_vector_dmac_scalar_fma3;
+ fdsp->scalarproduct_float = ff_scalarproduct_float_fma3;
+ }
+}
diff --git a/media/ffvpx/libavutil/x86/imgutils.asm b/media/ffvpx/libavutil/x86/imgutils.asm
new file mode 100644
index 0000000000..3cca56cdca
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/imgutils.asm
@@ -0,0 +1,53 @@
+;*****************************************************************************
+;* Copyright 2016 Anton Khirnov
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+INIT_XMM sse4
+cglobal image_copy_plane_uc_from, 6, 7, 4, dst, dst_linesize, src, src_linesize, bw, height, rowpos
+ add dstq, bwq
+ add srcq, bwq
+ neg bwq
+
+.row_start:
+ mov rowposq, bwq
+
+.loop:
+ movntdqa m0, [srcq + rowposq + 0 * mmsize]
+ movntdqa m1, [srcq + rowposq + 1 * mmsize]
+ movntdqa m2, [srcq + rowposq + 2 * mmsize]
+ movntdqa m3, [srcq + rowposq + 3 * mmsize]
+
+ mova [dstq + rowposq + 0 * mmsize], m0
+ mova [dstq + rowposq + 1 * mmsize], m1
+ mova [dstq + rowposq + 2 * mmsize], m2
+ mova [dstq + rowposq + 3 * mmsize], m3
+
+ add rowposq, 4 * mmsize
+ jnz .loop
+
+ add srcq, src_linesizeq
+ add dstq, dst_linesizeq
+ dec heightd
+ jnz .row_start
+
+ RET
diff --git a/media/ffvpx/libavutil/x86/imgutils_init.c b/media/ffvpx/libavutil/x86/imgutils_init.c
new file mode 100644
index 0000000000..91a16cf594
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/imgutils_init.c
@@ -0,0 +1,48 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/cpu.h"
+#include "libavutil/error.h"
+#include "libavutil/imgutils_internal.h"
+#include "libavutil/macros.h"
+
+#include "cpu.h"
+
+void ff_image_copy_plane_uc_from_sse4(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ ptrdiff_t bytewidth, int height);
+
+int ff_image_copy_plane_uc_from_x86(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ ptrdiff_t bytewidth, int height)
+{
+ int cpu_flags = av_get_cpu_flags();
+ ptrdiff_t bw_aligned = FFALIGN(bytewidth, 64);
+
+ if (EXTERNAL_SSE4(cpu_flags) &&
+ bw_aligned <= dst_linesize && bw_aligned <= src_linesize)
+ ff_image_copy_plane_uc_from_sse4(dst, dst_linesize, src, src_linesize,
+ bw_aligned, height);
+ else
+ return AVERROR(ENOSYS);
+
+ return 0;
+}
diff --git a/media/ffvpx/libavutil/x86/intmath.h b/media/ffvpx/libavutil/x86/intmath.h
new file mode 100644
index 0000000000..8a6b5ae261
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/intmath.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2015 James Almer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_INTMATH_H
+#define AVUTIL_X86_INTMATH_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#if HAVE_FAST_CLZ
+#if defined(_MSC_VER)
+#include <intrin.h>
+#elif defined(__INTEL_COMPILER)
+#include <immintrin.h>
+#endif
+#endif
+#include "config.h"
+
+#if HAVE_FAST_CLZ
+#if (defined(__INTEL_COMPILER) && (__INTEL_COMPILER>=1216)) || defined(_MSC_VER)
+# if defined(__INTEL_COMPILER)
+# define ff_log2(x) (_bit_scan_reverse((x)|1))
+# else
+# define ff_log2 ff_log2_x86
+static av_always_inline av_const int ff_log2_x86(unsigned int v)
+{
+ unsigned long n;
+ _BitScanReverse(&n, v|1);
+ return n;
+}
+# endif
+# define ff_log2_16bit av_log2
+
+#if defined(__INTEL_COMPILER) || (defined(_MSC_VER) && (_MSC_VER >= 1700) && \
+ (defined(__BMI__) || !defined(__clang__)))
+# define ff_ctz(v) _tzcnt_u32(v)
+
+# if ARCH_X86_64
+# define ff_ctzll(v) _tzcnt_u64(v)
+# else
+# define ff_ctzll ff_ctzll_x86
+static av_always_inline av_const int ff_ctzll_x86(long long v)
+{
+ return ((uint32_t)v == 0) ? _tzcnt_u32((uint32_t)(v >> 32)) + 32 : _tzcnt_u32((uint32_t)v);
+}
+# endif
+#endif /* _MSC_VER */
+
+#endif /* __INTEL_COMPILER */
+
+#endif /* HAVE_FAST_CLZ */
+
+#if defined(__GNUC__)
+
+/* Our generic version of av_popcount is faster than GCC's built-in on
+ * CPUs that don't support the popcnt instruction.
+ */
+#if defined(__POPCNT__)
+ #define av_popcount __builtin_popcount
+#if ARCH_X86_64
+ #define av_popcount64 __builtin_popcountll
+#endif
+
+#endif /* __POPCNT__ */
+
+#if defined(__BMI2__)
+
+#if AV_GCC_VERSION_AT_LEAST(5,1)
+#define av_mod_uintp2 __builtin_ia32_bzhi_si
+#elif HAVE_INLINE_ASM
+/* GCC releases before 5.1.0 have a broken bzhi builtin, so for those we
+ * implement it using inline assembly
+ */
+#define av_mod_uintp2 av_mod_uintp2_bmi2
+static av_always_inline av_const unsigned av_mod_uintp2_bmi2(unsigned a, unsigned p)
+{
+ if (av_builtin_constant_p(p))
+ return a & ((1 << p) - 1);
+ else {
+ unsigned x;
+ __asm__ ("bzhi %2, %1, %0 \n\t" : "=r"(x) : "rm"(a), "r"(p));
+ return x;
+ }
+}
+#endif /* AV_GCC_VERSION_AT_LEAST */
+
+#endif /* __BMI2__ */
+
+#if defined(__SSE2__) && !defined(__INTEL_COMPILER)
+
+#define av_clipd av_clipd_sse2
+static av_always_inline av_const double av_clipd_sse2(double a, double amin, double amax)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ __asm__ ("maxsd %1, %0 \n\t"
+ "minsd %2, %0 \n\t"
+ : "+&x"(a) : "xm"(amin), "xm"(amax));
+ return a;
+}
+
+#endif /* __SSE2__ */
+
+#if defined(__SSE__) && !defined(__INTEL_COMPILER)
+
+#define av_clipf av_clipf_sse
+static av_always_inline av_const float av_clipf_sse(float a, float amin, float amax)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ __asm__ ("maxss %1, %0 \n\t"
+ "minss %2, %0 \n\t"
+ : "+&x"(a) : "xm"(amin), "xm"(amax));
+ return a;
+}
+
+#endif /* __SSE__ */
+
+#if defined(__AVX__) && !defined(__INTEL_COMPILER)
+
+#undef av_clipd
+#define av_clipd av_clipd_avx
+static av_always_inline av_const double av_clipd_avx(double a, double amin, double amax)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ __asm__ ("vmaxsd %1, %0, %0 \n\t"
+ "vminsd %2, %0, %0 \n\t"
+ : "+&x"(a) : "xm"(amin), "xm"(amax));
+ return a;
+}
+
+#undef av_clipf
+#define av_clipf av_clipf_avx
+static av_always_inline av_const float av_clipf_avx(float a, float amin, float amax)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ __asm__ ("vmaxss %1, %0, %0 \n\t"
+ "vminss %2, %0, %0 \n\t"
+ : "+&x"(a) : "xm"(amin), "xm"(amax));
+ return a;
+}
+
+#endif /* __AVX__ */
+
+#endif /* __GNUC__ */
+
+#endif /* AVUTIL_X86_INTMATH_H */
diff --git a/media/ffvpx/libavutil/x86/intreadwrite.h b/media/ffvpx/libavutil/x86/intreadwrite.h
new file mode 100644
index 0000000000..40f375b013
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/intreadwrite.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2010 Alexander Strange <astrange@ithinksw.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_INTREADWRITE_H
+#define AVUTIL_X86_INTREADWRITE_H
+
+#include <stdint.h>
+#include "config.h"
+#include "libavutil/attributes.h"
+
+#if HAVE_MMX
+
+#if !HAVE_FAST_64BIT && defined(__MMX__)
+
+#define FF_COPY_SWAP_ZERO_USES_MMX
+
+#define AV_COPY64 AV_COPY64
+static av_always_inline void AV_COPY64(void *d, const void *s)
+{
+ __asm__("movq %1, %%mm0 \n\t"
+ "movq %%mm0, %0 \n\t"
+ : "=m"(*(uint64_t*)d)
+ : "m" (*(const uint64_t*)s)
+ : "mm0");
+}
+
+#define AV_SWAP64 AV_SWAP64
+static av_always_inline void AV_SWAP64(void *a, void *b)
+{
+ __asm__("movq %1, %%mm0 \n\t"
+ "movq %0, %%mm1 \n\t"
+ "movq %%mm0, %0 \n\t"
+ "movq %%mm1, %1 \n\t"
+ : "+m"(*(uint64_t*)a), "+m"(*(uint64_t*)b)
+ ::"mm0", "mm1");
+}
+
+#define AV_ZERO64 AV_ZERO64
+static av_always_inline void AV_ZERO64(void *d)
+{
+ __asm__("pxor %%mm0, %%mm0 \n\t"
+ "movq %%mm0, %0 \n\t"
+ : "=m"(*(uint64_t*)d)
+ :: "mm0");
+}
+
+#endif /* !HAVE_FAST_64BIT && defined(__MMX__) */
+
+#ifdef __SSE__
+
+#define AV_COPY128 AV_COPY128
+static av_always_inline void AV_COPY128(void *d, const void *s)
+{
+ struct v {uint64_t v[2];};
+
+ __asm__("movaps %1, %%xmm0 \n\t"
+ "movaps %%xmm0, %0 \n\t"
+ : "=m"(*(struct v*)d)
+ : "m" (*(const struct v*)s)
+ : "xmm0");
+}
+
+#endif /* __SSE__ */
+
+#ifdef __SSE2__
+
+#define AV_ZERO128 AV_ZERO128
+static av_always_inline void AV_ZERO128(void *d)
+{
+ struct v {uint64_t v[2];};
+
+ __asm__("pxor %%xmm0, %%xmm0 \n\t"
+ "movdqa %%xmm0, %0 \n\t"
+ : "=m"(*(struct v*)d)
+ :: "xmm0");
+}
+
+#endif /* __SSE2__ */
+
+#endif /* HAVE_MMX */
+
+#endif /* AVUTIL_X86_INTREADWRITE_H */
diff --git a/media/ffvpx/libavutil/x86/lls.asm b/media/ffvpx/libavutil/x86/lls.asm
new file mode 100644
index 0000000000..e8141e6c4f
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/lls.asm
@@ -0,0 +1,290 @@
+;******************************************************************************
+;* linear least squares model
+;*
+;* Copyright (c) 2013 Loren Merritt
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+%define MAX_VARS 32
+%define MAX_VARS_ALIGN (MAX_VARS+4)
+%define COVAR_STRIDE MAX_VARS_ALIGN*8
+%define COVAR(x,y) [covarq + (x)*8 + (y)*COVAR_STRIDE]
+
+struc LLSModel
+ .covariance: resq MAX_VARS_ALIGN*MAX_VARS_ALIGN
+ .coeff: resq MAX_VARS*MAX_VARS
+ .variance: resq MAX_VARS
+ .indep_count: resd 1
+endstruc
+
+%macro ADDPD_MEM 2
+%if cpuflag(avx)
+ vaddpd %2, %2, %1
+%else
+ addpd %2, %1
+%endif
+ mova %1, %2
+%endmacro
+
+INIT_XMM sse2
+%define movdqa movaps
+cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
+ %define covarq ctxq
+ mov id, [ctxq + LLSModel.indep_count]
+ lea varq, [varq + iq*8]
+ neg iq
+ mov covar2q, covarq
+.loopi:
+ ; Compute all 3 pairwise products of a 2x2 block that lies on the diagonal
+ mova m1, [varq + iq*8]
+ mova m3, [varq + iq*8 + 16]
+ pshufd m4, m1, q1010
+ pshufd m5, m1, q3232
+ pshufd m6, m3, q1010
+ pshufd m7, m3, q3232
+ mulpd m0, m1, m4
+ mulpd m1, m1, m5
+ lea covarq, [covar2q + 16]
+ ADDPD_MEM COVAR(-2,0), m0
+ ADDPD_MEM COVAR(-2,1), m1
+ lea jq, [iq + 2]
+ cmp jd, -2
+ jg .skip4x4
+.loop4x4:
+ ; Compute all 16 pairwise products of a 4x4 block
+ mulpd m0, m4, m3
+ mulpd m1, m5, m3
+ mulpd m2, m6, m3
+ mulpd m3, m3, m7
+ ADDPD_MEM COVAR(0,0), m0
+ ADDPD_MEM COVAR(0,1), m1
+ ADDPD_MEM COVAR(0,2), m2
+ ADDPD_MEM COVAR(0,3), m3
+ mova m3, [varq + jq*8 + 16]
+ mulpd m0, m4, m3
+ mulpd m1, m5, m3
+ mulpd m2, m6, m3
+ mulpd m3, m3, m7
+ ADDPD_MEM COVAR(2,0), m0
+ ADDPD_MEM COVAR(2,1), m1
+ ADDPD_MEM COVAR(2,2), m2
+ ADDPD_MEM COVAR(2,3), m3
+ mova m3, [varq + jq*8 + 32]
+ add covarq, 32
+ add jq, 4
+ cmp jd, -2
+ jle .loop4x4
+.skip4x4:
+ test jd, jd
+ jg .skip2x4
+ mulpd m4, m3
+ mulpd m5, m3
+ mulpd m6, m3
+ mulpd m7, m3
+ ADDPD_MEM COVAR(0,0), m4
+ ADDPD_MEM COVAR(0,1), m5
+ ADDPD_MEM COVAR(0,2), m6
+ ADDPD_MEM COVAR(0,3), m7
+.skip2x4:
+ add iq, 4
+ add covar2q, 4*COVAR_STRIDE+32
+ cmp id, -2
+ jle .loopi
+ test id, id
+ jg .ret
+ mov jq, iq
+ %define covarq covar2q
+.loop2x1:
+ movsd m0, [varq + iq*8]
+ movlhps m0, m0
+ mulpd m0, [varq + jq*8]
+ ADDPD_MEM COVAR(0,0), m0
+ inc iq
+ add covarq, COVAR_STRIDE
+ test id, id
+ jle .loop2x1
+.ret:
+ RET
+
+%macro UPDATE_LLS 0
+cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
+ %define covarq ctxq
+ mov countd, [ctxq + LLSModel.indep_count]
+ lea count2d, [countq-2]
+ xor id, id
+.loopi:
+ ; Compute all 10 pairwise products of a 4x4 block that lies on the diagonal
+ mova ymm1, [varq + iq*8]
+ vbroadcastsd ymm4, [varq + iq*8]
+ vbroadcastsd ymm5, [varq + iq*8 + 8]
+ vbroadcastsd ymm6, [varq + iq*8 + 16]
+ vbroadcastsd ymm7, [varq + iq*8 + 24]
+ vextractf128 xmm3, ymm1, 1
+%if cpuflag(fma3)
+ mova ymm0, COVAR(iq ,0)
+ mova xmm2, COVAR(iq+2,2)
+ fmaddpd ymm0, ymm1, ymm4, ymm0
+ fmaddpd xmm2, xmm3, xmm6, xmm2
+ fmaddpd ymm1, ymm5, ymm1, COVAR(iq ,1)
+ fmaddpd xmm3, xmm7, xmm3, COVAR(iq+2,3)
+ mova COVAR(iq ,0), ymm0
+ mova COVAR(iq ,1), ymm1
+ mova COVAR(iq+2,2), xmm2
+ mova COVAR(iq+2,3), xmm3
+%else
+ vmulpd ymm0, ymm1, ymm4
+ vmulpd ymm1, ymm1, ymm5
+ vmulpd xmm2, xmm3, xmm6
+ vmulpd xmm3, xmm3, xmm7
+ ADDPD_MEM COVAR(iq ,0), ymm0
+ ADDPD_MEM COVAR(iq ,1), ymm1
+ ADDPD_MEM COVAR(iq+2,2), xmm2
+ ADDPD_MEM COVAR(iq+2,3), xmm3
+%endif ; cpuflag(fma3)
+ lea jd, [iq + 4]
+ cmp jd, count2d
+ jg .skip4x4
+.loop4x4:
+ ; Compute all 16 pairwise products of a 4x4 block
+ mova ymm3, [varq + jq*8]
+%if cpuflag(fma3)
+ mova ymm0, COVAR(jq, 0)
+ mova ymm1, COVAR(jq, 1)
+ mova ymm2, COVAR(jq, 2)
+ fmaddpd ymm0, ymm3, ymm4, ymm0
+ fmaddpd ymm1, ymm3, ymm5, ymm1
+ fmaddpd ymm2, ymm3, ymm6, ymm2
+ fmaddpd ymm3, ymm7, ymm3, COVAR(jq,3)
+ mova COVAR(jq, 0), ymm0
+ mova COVAR(jq, 1), ymm1
+ mova COVAR(jq, 2), ymm2
+ mova COVAR(jq, 3), ymm3
+%else
+ vmulpd ymm0, ymm3, ymm4
+ vmulpd ymm1, ymm3, ymm5
+ vmulpd ymm2, ymm3, ymm6
+ vmulpd ymm3, ymm3, ymm7
+ ADDPD_MEM COVAR(jq,0), ymm0
+ ADDPD_MEM COVAR(jq,1), ymm1
+ ADDPD_MEM COVAR(jq,2), ymm2
+ ADDPD_MEM COVAR(jq,3), ymm3
+%endif ; cpuflag(fma3)
+ add jd, 4
+ cmp jd, count2d
+ jle .loop4x4
+.skip4x4:
+ cmp jd, countd
+ jg .skip2x4
+ mova xmm3, [varq + jq*8]
+%if cpuflag(fma3)
+ mova xmm0, COVAR(jq, 0)
+ mova xmm1, COVAR(jq, 1)
+ mova xmm2, COVAR(jq, 2)
+ fmaddpd xmm0, xmm3, xmm4, xmm0
+ fmaddpd xmm1, xmm3, xmm5, xmm1
+ fmaddpd xmm2, xmm3, xmm6, xmm2
+ fmaddpd xmm3, xmm7, xmm3, COVAR(jq,3)
+ mova COVAR(jq, 0), xmm0
+ mova COVAR(jq, 1), xmm1
+ mova COVAR(jq, 2), xmm2
+ mova COVAR(jq, 3), xmm3
+%else
+ vmulpd xmm0, xmm3, xmm4
+ vmulpd xmm1, xmm3, xmm5
+ vmulpd xmm2, xmm3, xmm6
+ vmulpd xmm3, xmm3, xmm7
+ ADDPD_MEM COVAR(jq,0), xmm0
+ ADDPD_MEM COVAR(jq,1), xmm1
+ ADDPD_MEM COVAR(jq,2), xmm2
+ ADDPD_MEM COVAR(jq,3), xmm3
+%endif ; cpuflag(fma3)
+.skip2x4:
+ add id, 4
+ add covarq, 4*COVAR_STRIDE
+ cmp id, count2d
+ jle .loopi
+ cmp id, countd
+ jg .ret
+ mov jd, id
+.loop2x1:
+ vmovddup xmm0, [varq + iq*8]
+%if cpuflag(fma3)
+ mova xmm1, [varq + jq*8]
+ fmaddpd xmm0, xmm1, xmm0, COVAR(jq,0)
+ mova COVAR(jq,0), xmm0
+%else
+ vmulpd xmm0, [varq + jq*8]
+ ADDPD_MEM COVAR(jq,0), xmm0
+%endif ; cpuflag(fma3)
+ inc id
+ add covarq, COVAR_STRIDE
+ cmp id, countd
+ jle .loop2x1
+.ret:
+ RET
+%endmacro ; UPDATE_LLS
+
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+UPDATE_LLS
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+UPDATE_LLS
+%endif
+
+INIT_XMM sse2
+cglobal evaluate_lls, 3,4,2, ctx, var, order, i
+ ; This function is often called on the same buffer as update_lls, but with
+ ; an offset. They can't both be aligned.
+ ; Load halves rather than movu to avoid store-forwarding stalls, since the
+ ; input was initialized immediately prior to this function using scalar math.
+ %define coefsq ctxq
+ mov id, orderd
+ imul orderd, MAX_VARS
+ lea coefsq, [ctxq + LLSModel.coeff + orderq*8]
+ movsd m0, [varq]
+ movhpd m0, [varq + 8]
+ mulpd m0, [coefsq]
+ lea coefsq, [coefsq + iq*8]
+ lea varq, [varq + iq*8]
+ neg iq
+ add iq, 2
+.loop:
+ movsd m1, [varq + iq*8]
+ movhpd m1, [varq + iq*8 + 8]
+ mulpd m1, [coefsq + iq*8]
+ addpd m0, m1
+ add iq, 2
+ jl .loop
+ jg .skip1
+ movsd m1, [varq + iq*8]
+ mulsd m1, [coefsq + iq*8]
+ addpd m0, m1
+.skip1:
+ movhlps m1, m0
+ addsd m0, m1
+%if ARCH_X86_32
+ movsd r0m, m0
+ fld qword r0m
+%endif
+ RET
diff --git a/media/ffvpx/libavutil/x86/lls_init.c b/media/ffvpx/libavutil/x86/lls_init.c
new file mode 100644
index 0000000000..c786376915
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/lls_init.c
@@ -0,0 +1,46 @@
+/*
+ * linear least squares model
+ *
+ * Copyright (c) 2013 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/lls.h"
+#include "libavutil/x86/cpu.h"
+
+void ff_update_lls_sse2(LLSModel *m, const double *var);
+void ff_update_lls_avx(LLSModel *m, const double *var);
+void ff_update_lls_fma3(LLSModel *m, const double *var);
+double ff_evaluate_lls_sse2(LLSModel *m, const double *var, int order);
+
+av_cold void ff_init_lls_x86(LLSModel *m)
+{
+ int cpu_flags = av_get_cpu_flags();
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ m->update_lls = ff_update_lls_sse2;
+ if (m->indep_count >= 4)
+ m->evaluate_lls = ff_evaluate_lls_sse2;
+ }
+ if (EXTERNAL_AVX_FAST(cpu_flags)) {
+ m->update_lls = ff_update_lls_avx;
+ }
+ if (EXTERNAL_FMA3_FAST(cpu_flags)) {
+ m->update_lls = ff_update_lls_fma3;
+ }
+}
diff --git a/media/ffvpx/libavutil/x86/moz.build b/media/ffvpx/libavutil/x86/moz.build
new file mode 100644
index 0000000000..b56ed75ead
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/moz.build
@@ -0,0 +1,23 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ 'cpu.c',
+ 'cpuid.asm',
+ 'emms.asm',
+ 'fixed_dsp.asm',
+ 'fixed_dsp_init.c',
+ 'float_dsp.asm',
+ 'float_dsp_init.c',
+ 'imgutils.asm',
+ 'imgutils_init.c',
+ 'lls.asm',
+ 'lls_init.c'
+]
+
+FINAL_LIBRARY = 'mozavutil'
+
+include('/media/ffvpx/ffvpxcommon.mozbuild')
diff --git a/media/ffvpx/libavutil/x86/pixelutils.asm b/media/ffvpx/libavutil/x86/pixelutils.asm
new file mode 100644
index 0000000000..fbe9b45971
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/pixelutils.asm
@@ -0,0 +1,328 @@
+;******************************************************************************
+;* Pixel utilities SIMD
+;*
+;* Copyright (C) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+;* Copyright (C) 2014 Clément Bœsch <u pkh me>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+INIT_MMX mmxext
+cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
+ pxor m2, m2
+%rep 4
+ mova m0, [src1q]
+ mova m1, [src1q + stride1q]
+ psadbw m0, [src2q]
+ psadbw m1, [src2q + stride2q]
+ paddw m2, m0
+ paddw m2, m1
+ lea src1q, [src1q + 2*stride1q]
+ lea src2q, [src2q + 2*stride2q]
+%endrep
+ movd eax, m2
+ RET
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pixelutils_sad_16x16, 4,4,5, src1, stride1, src2, stride2
+ movu m4, [src1q]
+ movu m2, [src2q]
+ movu m1, [src1q + stride1q]
+ movu m3, [src2q + stride2q]
+ psadbw m4, m2
+ psadbw m1, m3
+ paddw m4, m1
+%rep 7
+ lea src1q, [src1q + 2*stride1q]
+ lea src2q, [src2q + 2*stride2q]
+ movu m0, [src1q]
+ movu m2, [src2q]
+ movu m1, [src1q + stride1q]
+ movu m3, [src2q + stride2q]
+ psadbw m0, m2
+ psadbw m1, m3
+ paddw m4, m0
+ paddw m4, m1
+%endrep
+ movhlps m0, m4
+ paddw m4, m0
+ movd eax, m4
+ RET
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_[au]_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+%macro SAD_XMM_16x16 1
+INIT_XMM sse2
+cglobal pixelutils_sad_%1_16x16, 4,4,3, src1, stride1, src2, stride2
+ mov%1 m2, [src2q]
+ psadbw m2, [src1q]
+ mov%1 m1, [src2q + stride2q]
+ psadbw m1, [src1q + stride1q]
+ paddw m2, m1
+%rep 7
+ lea src1q, [src1q + 2*stride1q]
+ lea src2q, [src2q + 2*stride2q]
+ mov%1 m0, [src2q]
+ psadbw m0, [src1q]
+ mov%1 m1, [src2q + stride2q]
+ psadbw m1, [src1q + stride1q]
+ paddw m2, m0
+ paddw m2, m1
+%endrep
+ movhlps m0, m2
+ paddw m2, m0
+ movd eax, m2
+ RET
+%endmacro
+
+SAD_XMM_16x16 a
+SAD_XMM_16x16 u
+
+
+%macro PROCESS_SAD_32x4_U 0
+ movu m1, [r2]
+ movu m2, [r2 + 16]
+ movu m3, [r0]
+ movu m4, [r0 + 16]
+ psadbw m1, m3
+ psadbw m2, m4
+ paddd m1, m2
+ paddd m0, m1
+ lea r2, [r2 + r3]
+ lea r0, [r0 + r1]
+
+ movu m1, [r2]
+ movu m2, [r2 + 16]
+ movu m3, [r0]
+ movu m4, [r0 + 16]
+ psadbw m1, m3
+ psadbw m2, m4
+ paddd m1, m2
+ paddd m0, m1
+ lea r2, [r2 + r3]
+ lea r0, [r0 + r1]
+
+ movu m1, [r2]
+ movu m2, [r2 + 16]
+ movu m3, [r0]
+ movu m4, [r0 + 16]
+ psadbw m1, m3
+ psadbw m2, m4
+ paddd m1, m2
+ paddd m0, m1
+ lea r2, [r2 + r3]
+ lea r0, [r0 + r1]
+
+ movu m1, [r2]
+ movu m2, [r2 + 16]
+ movu m3, [r0]
+ movu m4, [r0 + 16]
+ psadbw m1, m3
+ psadbw m2, m4
+ paddd m1, m2
+ paddd m0, m1
+ lea r2, [r2 + r3]
+ lea r0, [r0 + r1]
+%endmacro
+
+%macro PROCESS_SAD_32x4 1
+ mov%1 m1, [r2]
+ mov%1 m2, [r2 + 16]
+ psadbw m1, [r0]
+ psadbw m2, [r0 + 16]
+ paddd m1, m2
+ paddd m0, m1
+ lea r2, [r2 + r3]
+ lea r0, [r0 + r1]
+
+ mov%1 m1, [r2]
+ mov%1 m2, [r2 + 16]
+ psadbw m1, [r0]
+ psadbw m2, [r0 + 16]
+ paddd m1, m2
+ paddd m0, m1
+ lea r2, [r2 + r3]
+ lea r0, [r0 + r1]
+
+ mov%1 m1, [r2]
+ mov%1 m2, [r2 + 16]
+ psadbw m1, [r0]
+ psadbw m2, [r0 + 16]
+ paddd m1, m2
+ paddd m0, m1
+ lea r2, [r2 + r3]
+ lea r0, [r0 + r1]
+
+ mov%1 m1, [r2]
+ mov%1 m2, [r2 + 16]
+ psadbw m1, [r0]
+ psadbw m2, [r0 + 16]
+ paddd m1, m2
+ paddd m0, m1
+ lea r2, [r2 + r3]
+ lea r0, [r0 + r1]
+%endmacro
+
+;-----------------------------------------------------------------------------
+; int ff_pixelutils_sad_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pixelutils_sad_32x32, 4,5,5, src1, stride1, src2, stride2
+ pxor m0, m0
+ mov r4d, 4
+.loop:
+ PROCESS_SAD_32x4_U
+ PROCESS_SAD_32x4_U
+ dec r4d
+ jnz .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_[au]_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+%macro SAD_XMM_32x32 1
+INIT_XMM sse2
+cglobal pixelutils_sad_%1_32x32, 4,5,3, src1, stride1, src2, stride2
+ pxor m0, m0
+ mov r4d, 4
+.loop:
+ PROCESS_SAD_32x4 %1
+ PROCESS_SAD_32x4 %1
+ dec r4d
+ jnz .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+SAD_XMM_32x32 a
+SAD_XMM_32x32 u
+
+%if HAVE_AVX2_EXTERNAL
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixelutils_sad_32x32, 4,7,5, src1, stride1, src2, stride2
+ pxor m0, m0
+ mov r4d, 32/4
+ lea r5, [stride1q * 3]
+ lea r6, [stride2q * 3]
+
+.loop:
+ movu m1, [src1q] ; row 0 of pix0
+ movu m2, [src2q] ; row 0 of pix1
+ movu m3, [src1q + stride1q] ; row 1 of pix0
+ movu m4, [src2q + stride2q] ; row 1 of pix1
+
+ psadbw m1, m2
+ psadbw m3, m4
+ paddd m0, m1
+ paddd m0, m3
+
+ movu m1, [src1q + 2 * stride1q] ; row 2 of pix0
+ movu m2, [src2q + 2 * stride2q] ; row 2 of pix1
+ movu m3, [src1q + r5] ; row 3 of pix0
+ movu m4, [src2q + r6] ; row 3 of pix1
+
+ psadbw m1, m2
+ psadbw m3, m4
+ paddd m0, m1
+ paddd m0, m3
+
+ lea src2q, [src2q + 4 * stride2q]
+ lea src1q, [src1q + 4 * stride1q]
+
+ dec r4d
+ jnz .loop
+
+ vextracti128 xm1, m0, 1
+ paddd xm0, xm1
+ pshufd xm1, xm0, 2
+ paddd xm0, xm1
+ movd eax, xm0
+ RET
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_[au]_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+%macro SAD_AVX2_32x32 1
+INIT_YMM avx2
+cglobal pixelutils_sad_%1_32x32, 4,7,3, src1, stride1, src2, stride2
+ pxor m0, m0
+ mov r4d, 32/4
+ lea r5, [stride1q * 3]
+ lea r6, [stride2q * 3]
+
+.loop:
+ mov%1 m1, [src2q] ; row 0 of pix1
+ psadbw m1, [src1q]
+ mov%1 m2, [src2q + stride2q] ; row 1 of pix1
+ psadbw m2, [src1q + stride1q]
+
+ paddd m0, m1
+ paddd m0, m2
+
+ mov%1 m1, [src2q + 2 * stride2q] ; row 2 of pix1
+ psadbw m1, [src1q + 2 * stride1q]
+ mov%1 m2, [src2q + r6] ; row 3 of pix1
+ psadbw m2, [src1q + r5]
+
+ paddd m0, m1
+ paddd m0, m2
+
+ lea src2q, [src2q + 4 * stride2q]
+ lea src1q, [src1q + 4 * stride1q]
+
+ dec r4d
+ jnz .loop
+
+ vextracti128 xm1, m0, 1
+ paddd xm0, xm1
+ pshufd xm1, xm0, 2
+ paddd xm0, xm1
+ movd eax, xm0
+ RET
+%endmacro
+
+SAD_AVX2_32x32 a
+SAD_AVX2_32x32 u
+%endif
diff --git a/media/ffvpx/libavutil/x86/pixelutils.h b/media/ffvpx/libavutil/x86/pixelutils.h
new file mode 100644
index 0000000000..876cf46053
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/pixelutils.h
@@ -0,0 +1,26 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_PIXELUTILS_H
+#define AVUTIL_X86_PIXELUTILS_H
+
+#include "libavutil/pixelutils.h"
+
+void ff_pixelutils_sad_init_x86(av_pixelutils_sad_fn *sad, int aligned);
+
+#endif /* AVUTIL_X86_PIXELUTILS_H */
diff --git a/media/ffvpx/libavutil/x86/pixelutils_init.c b/media/ffvpx/libavutil/x86/pixelutils_init.c
new file mode 100644
index 0000000000..c3c0662414
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/pixelutils_init.c
@@ -0,0 +1,85 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "pixelutils.h"
+#include "cpu.h"
+
+int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+
+int ff_pixelutils_sad_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_a_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_u_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+
+int ff_pixelutils_sad_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_a_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_u_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+
+int ff_pixelutils_sad_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_a_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_u_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+
+void ff_pixelutils_sad_init_x86(av_pixelutils_sad_fn *sad, int aligned)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ // The best way to use SSE2 would be to do 2 SADs in parallel,
+ // but we'd have to modify the pixelutils API to return SIMD functions.
+
+ // It's probably not faster to shuffle data around
+ // to get two lines of 8 pixels into a single 16byte register,
+ // so just use the MMX 8x8 version even when SSE2 is available.
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ sad[2] = ff_pixelutils_sad_8x8_mmxext;
+ }
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ switch (aligned) {
+ case 0: sad[3] = ff_pixelutils_sad_16x16_sse2; break; // src1 unaligned, src2 unaligned
+ case 1: sad[3] = ff_pixelutils_sad_u_16x16_sse2; break; // src1 aligned, src2 unaligned
+ case 2: sad[3] = ff_pixelutils_sad_a_16x16_sse2; break; // src1 aligned, src2 aligned
+ }
+ }
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ switch (aligned) {
+ case 0: sad[4] = ff_pixelutils_sad_32x32_sse2; break; // src1 unaligned, src2 unaligned
+ case 1: sad[4] = ff_pixelutils_sad_u_32x32_sse2; break; // src1 aligned, src2 unaligned
+ case 2: sad[4] = ff_pixelutils_sad_a_32x32_sse2; break; // src1 aligned, src2 aligned
+ }
+ }
+
+ if (EXTERNAL_AVX2_FAST(cpu_flags)) {
+ switch (aligned) {
+ case 0: sad[4] = ff_pixelutils_sad_32x32_avx2; break; // src1 unaligned, src2 unaligned
+ case 1: sad[4] = ff_pixelutils_sad_u_32x32_avx2; break; // src1 aligned, src2 unaligned
+ case 2: sad[4] = ff_pixelutils_sad_a_32x32_avx2; break; // src1 aligned, src2 aligned
+ }
+ }
+}
diff --git a/media/ffvpx/libavutil/x86/timer.h b/media/ffvpx/libavutil/x86/timer.h
new file mode 100644
index 0000000000..4d1e88def0
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/timer.h
@@ -0,0 +1,50 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_TIMER_H
+#define AVUTIL_X86_TIMER_H
+
+#include <stdint.h>
+
+#if HAVE_INLINE_ASM
+
+#define FF_TIMER_UNITS "decicycles"
+#define AV_READ_TIME read_time
+
+static inline uint64_t read_time(void)
+{
+ uint32_t a, d;
+ __asm__ volatile(
+#if ARCH_X86_64 || defined(__SSE2__)
+ "lfence \n\t"
+#endif
+ "rdtsc \n\t"
+ : "=a" (a), "=d" (d));
+ return ((uint64_t)d << 32) + a;
+}
+
+#elif HAVE_RDTSC
+
+#include <intrin.h>
+#define AV_READ_TIME __rdtsc
+
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVUTIL_X86_TIMER_H */
diff --git a/media/ffvpx/libavutil/x86/x86inc.asm b/media/ffvpx/libavutil/x86/x86inc.asm
new file mode 100644
index 0000000000..251ee797de
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/x86inc.asm
@@ -0,0 +1,1716 @@
+;*****************************************************************************
+;* x86inc.asm: x264asm abstraction layer
+;*****************************************************************************
+;* Copyright (C) 2005-2018 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;* Henrik Gramner <henrik@gramner.com>
+;* Anton Mitrofanov <BugMaster@narod.ru>
+;* Fiona Glaser <fiona@x264.com>
+;*
+;* Permission to use, copy, modify, and/or distribute this software for any
+;* purpose with or without fee is hereby granted, provided that the above
+;* copyright notice and this permission notice appear in all copies.
+;*
+;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+;*****************************************************************************
+
+; This is a header file for the x264ASM assembly language, which uses
+; NASM/YASM syntax combined with a large number of macros to provide easy
+; abstraction between different calling conventions (x86_32, win64, linux64).
+; It also has various other useful features to simplify writing the kind of
+; DSP functions that are most often used in x264.
+
+; Unlike the rest of x264, this file is available under an ISC license, as it
+; has significant usefulness outside of x264 and we want it to be available
+; to the largest audience possible. Of course, if you modify it for your own
+; purposes to add a new feature, we strongly encourage contributing a patch
+; as this feature might be useful for others as well. Send patches or ideas
+; to x264-devel@videolan.org .
+
+%ifndef private_prefix
+ %define private_prefix x264
+%endif
+
+%ifndef public_prefix
+ %define public_prefix private_prefix
+%endif
+
+%if HAVE_ALIGNED_STACK
+ %define STACK_ALIGNMENT 16
+%endif
+%ifndef STACK_ALIGNMENT
+ %if ARCH_X86_64
+ %define STACK_ALIGNMENT 16
+ %else
+ %define STACK_ALIGNMENT 4
+ %endif
+%endif
+
+%define WIN64 0
+%define UNIX64 0
+%if ARCH_X86_64
+ %ifidn __OUTPUT_FORMAT__,win32
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,win64
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,x64
+ %define WIN64 1
+ %else
+ %define UNIX64 1
+ %endif
+%endif
+
+%define FORMAT_ELF 0
+%ifidn __OUTPUT_FORMAT__,elf
+ %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf32
+ %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf64
+ %define FORMAT_ELF 1
+%endif
+
+%ifdef PREFIX
+ %define mangle(x) _ %+ x
+%else
+ %define mangle(x) x
+%endif
+
+; aout does not support align=
+; NOTE: This section is out of sync with x264, in order to
+; keep supporting OS/2.
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,aout
+ SECTION .text
+ %elifidn __OUTPUT_FORMAT__,coff
+ SECTION .text
+ %elifidn __OUTPUT_FORMAT__,win32
+ SECTION .rdata align=%1
+ %elif WIN64
+ SECTION .rdata align=%1
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+
+%if WIN64
+ %define PIC
+%elif ARCH_X86_64 == 0
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+ %undef PIC
+%endif
+%ifdef PIC
+ default rel
+%endif
+
+%macro CPUNOP 1
+ %if HAVE_CPUNOP
+ CPU %1
+ %endif
+%endmacro
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = (optional) stack size to be allocated. The stack will be aligned before
+; allocating the specified stack size. If the required stack alignment is
+; larger than the known stack alignment the stack will be manually aligned
+; and an extra register will be allocated to hold the original stack
+; pointer (to not invalidate r0m etc.). To prevent the use of an extra
+; register as stack pointer, request a negative stack size.
+; %4+/%5+ = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,7,0x40, dst, src, tmp
+; declares a function (foo) that automatically loads two arguments (dst and
+; src) into registers, uses one additional register (tmp) plus 7 vector
+; registers (m0-m6) and allocates 0x40 bytes of stack space.
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE, and returns.
+
+; REP_RET:
+; Use this instead of RET if it's a branch target.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNh is the high 8 bits of the word size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 2-3
+ %define r%1q %2
+ %define r%1d %2d
+ %define r%1w %2w
+ %define r%1b %2b
+ %define r%1h %2h
+ %define %2q %2
+ %if %0 == 2
+ %define r%1m %2d
+ %define r%1mp %2
+ %elif ARCH_X86_64 ; memory
+ %define r%1m [rstk + stack_offset + %3]
+ %define r%1mp qword r %+ %1 %+ m
+ %else
+ %define r%1m [rstk + stack_offset + %3]
+ %define r%1mp dword r %+ %1 %+ m
+ %endif
+ %define r%1 %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 3
+ %define r%1q r%1
+ %define e%1q r%1
+ %define r%1d e%1
+ %define e%1d e%1
+ %define r%1w %1
+ %define e%1w %1
+ %define r%1h %3
+ %define e%1h %3
+ %define r%1b %2
+ %define e%1b %2
+ %if ARCH_X86_64 == 0
+ %define r%1 e%1
+ %endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al, ah
+DECLARE_REG_SIZE bx, bl, bh
+DECLARE_REG_SIZE cx, cl, ch
+DECLARE_REG_SIZE dx, dl, dh
+DECLARE_REG_SIZE si, sil, null
+DECLARE_REG_SIZE di, dil, null
+DECLARE_REG_SIZE bp, bpl, null
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+ %assign %%i 0
+ %rep %0
+ CAT_XDEFINE t, %%i, r%1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+ %rep %0
+ %define t%1q t%1 %+ q
+ %define t%1d t%1 %+ d
+ %define t%1w t%1 %+ w
+ %define t%1h t%1 %+ h
+ %define t%1b t%1 %+ b
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
+
+%if ARCH_X86_64
+ %define gprsize 8
+%else
+ %define gprsize 4
+%endif
+
+%macro PUSH 1
+ push %1
+ %ifidn rstk, rsp
+ %assign stack_offset stack_offset+gprsize
+ %endif
+%endmacro
+
+%macro POP 1
+ pop %1
+ %ifidn rstk, rsp
+ %assign stack_offset stack_offset-gprsize
+ %endif
+%endmacro
+
+%macro PUSH_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ PUSH r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro POP_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ pop r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro LOAD_IF_USED 1-*
+ %rep %0
+ %if %1 < num_args
+ mov r%1, r %+ %1 %+ mp
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro SUB 2
+ sub %1, %2
+ %ifidn %1, rstk
+ %assign stack_offset stack_offset+(%2)
+ %endif
+%endmacro
+
+%macro ADD 2
+ add %1, %2
+ %ifidn %1, rstk
+ %assign stack_offset stack_offset-(%2)
+ %endif
+%endmacro
+
+%macro movifnidn 2
+ %ifnidn %1, %2
+ mov %1, %2
+ %endif
+%endmacro
+
+%macro movsxdifnidn 2
+ %ifnidn %1, %2
+ movsxd %1, %2
+ %endif
+%endmacro
+
+%macro ASSERT 1
+ %if (%1) == 0
+ %error assertion ``%1'' failed
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+ %ifdef n_arg_names
+ %assign %%i 0
+ %rep n_arg_names
+ CAT_UNDEF arg_name %+ %%i, q
+ CAT_UNDEF arg_name %+ %%i, d
+ CAT_UNDEF arg_name %+ %%i, w
+ CAT_UNDEF arg_name %+ %%i, h
+ CAT_UNDEF arg_name %+ %%i, b
+ CAT_UNDEF arg_name %+ %%i, m
+ CAT_UNDEF arg_name %+ %%i, mp
+ CAT_UNDEF arg_name, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+
+ %xdefine %%stack_offset stack_offset
+ %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
+ %assign %%i 0
+ %rep %0
+ %xdefine %1q r %+ %%i %+ q
+ %xdefine %1d r %+ %%i %+ d
+ %xdefine %1w r %+ %%i %+ w
+ %xdefine %1h r %+ %%i %+ h
+ %xdefine %1b r %+ %%i %+ b
+ %xdefine %1m r %+ %%i %+ m
+ %xdefine %1mp r %+ %%i %+ mp
+ CAT_XDEFINE arg_name, %%i, %1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+ %xdefine stack_offset %%stack_offset
+ %assign n_arg_names %0
+%endmacro
+
+%define required_stack_alignment ((mmsize + 15) & ~15)
+%define vzeroupper_required (mmsize > 16 && (ARCH_X86_64 == 0 || xmm_regs_used > 16 || notcpuflag(avx512)))
+%define high_mm_regs (16*cpuflag(avx512))
+
+%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
+ %ifnum %1
+ %if %1 != 0
+ %assign %%pad 0
+ %assign stack_size %1
+ %if stack_size < 0
+ %assign stack_size -stack_size
+ %endif
+ %if WIN64
+ %assign %%pad %%pad + 32 ; shadow space
+ %if mmsize != 8
+ %assign xmm_regs_used %2
+ %if xmm_regs_used > 8
+ %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
+ %endif
+ %endif
+ %endif
+ %if required_stack_alignment <= STACK_ALIGNMENT
+ ; maintain the current stack alignment
+ %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
+ SUB rsp, stack_size_padded
+ %else
+ %assign %%reg_num (regs_used - 1)
+ %xdefine rstk r %+ %%reg_num
+ ; align stack, and save original stack location directly above
+ ; it, i.e. in [rsp+stack_size_padded], so we can restore the
+ ; stack in a single instruction (i.e. mov rsp, rstk or mov
+ ; rsp, [rsp+stack_size_padded])
+ %if %1 < 0 ; need to store rsp on stack
+ %xdefine rstkm [rsp + stack_size + %%pad]
+ %assign %%pad %%pad + gprsize
+ %else ; can keep rsp in rstk during whole function
+ %xdefine rstkm rstk
+ %endif
+ %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
+ mov rstk, rsp
+ and rsp, ~(required_stack_alignment-1)
+ sub rsp, stack_size_padded
+ movifnidn rstkm, rstk
+ %endif
+ WIN64_PUSH_XMM
+ %endif
+ %endif
+%endmacro
+
+%macro SETUP_STACK_POINTER 1
+ %ifnum %1
+ %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
+ %if %1 > 0
+ ; Reserve an additional register for storing the original stack pointer, but avoid using
+ ; eax/rax for this purpose since it can potentially get overwritten as a return value.
+ %assign regs_used (regs_used + 1)
+ %if ARCH_X86_64 && regs_used == 7
+ %assign regs_used 8
+ %elif ARCH_X86_64 == 0 && regs_used == 1
+ %assign regs_used 2
+ %endif
+ %endif
+ %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
+ ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax)
+ ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used.
+ %assign regs_used 5 + UNIX64 * 3
+ %endif
+ %endif
+ %endif
+%endmacro
+
+%if WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0, rcx
+DECLARE_REG 1, rdx
+DECLARE_REG 2, R8
+DECLARE_REG 3, R9
+DECLARE_REG 4, R10, 40
+DECLARE_REG 5, R11, 48
+DECLARE_REG 6, rax, 56
+DECLARE_REG 7, rdi, 64
+DECLARE_REG 8, rsi, 72
+DECLARE_REG 9, rbx, 80
+DECLARE_REG 10, rbp, 88
+DECLARE_REG 11, R14, 96
+DECLARE_REG 12, R15, 104
+DECLARE_REG 13, R12, 112
+DECLARE_REG 14, R13, 120
+
+%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ SETUP_STACK_POINTER %4
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
+ ALLOC_STACK %4, %3
+ %if mmsize != 8 && stack_size == 0
+ WIN64_SPILL_XMM %3
+ %endif
+ LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ %if %0 > 4
+ %ifnum %4
+ DEFINE_ARGS %5
+ %else
+ DEFINE_ARGS %4, %5
+ %endif
+ %elifnnum %4
+ DEFINE_ARGS %4
+ %endif
+%endmacro
+
+%macro WIN64_PUSH_XMM 0
+ ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
+ %if xmm_regs_used > 6 + high_mm_regs
+ movaps [rstk + stack_offset + 8], xmm6
+ %endif
+ %if xmm_regs_used > 7 + high_mm_regs
+ movaps [rstk + stack_offset + 24], xmm7
+ %endif
+ %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
+ %if %%xmm_regs_on_stack > 0
+ %assign %%i 8
+ %rep %%xmm_regs_on_stack
+ movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+%endmacro
+
+%macro WIN64_SPILL_XMM 1
+ %assign xmm_regs_used %1
+ ASSERT xmm_regs_used <= 16 + high_mm_regs
+ %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
+ %if %%xmm_regs_on_stack > 0
+ ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
+ %assign %%pad %%xmm_regs_on_stack*16 + 32
+ %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
+ SUB rsp, stack_size_padded
+ %endif
+ WIN64_PUSH_XMM
+%endmacro
+
+%macro WIN64_RESTORE_XMM_INTERNAL 0
+ %assign %%pad_size 0
+ %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
+ %if %%xmm_regs_on_stack > 0
+ %assign %%i xmm_regs_used - high_mm_regs
+ %rep %%xmm_regs_on_stack
+ %assign %%i %%i-1
+ movaps xmm %+ %%i, [rsp + (%%i-8)*16 + stack_size + 32]
+ %endrep
+ %endif
+ %if stack_size_padded > 0
+ %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %assign %%pad_size stack_size_padded
+ %endif
+ %endif
+ %if xmm_regs_used > 7 + high_mm_regs
+ movaps xmm7, [rsp + stack_offset - %%pad_size + 24]
+ %endif
+ %if xmm_regs_used > 6 + high_mm_regs
+ movaps xmm6, [rsp + stack_offset - %%pad_size + 8]
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM 0
+ WIN64_RESTORE_XMM_INTERNAL
+ %assign stack_offset (stack_offset-stack_size_padded)
+ %assign stack_size_padded 0
+ %assign xmm_regs_used 0
+%endmacro
+
+%define has_epilogue regs_used > 7 || stack_size > 0 || vzeroupper_required || xmm_regs_used > 6+high_mm_regs
+
+%macro RET 0
+ WIN64_RESTORE_XMM_INTERNAL
+ POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
+ %if vzeroupper_required
+ vzeroupper
+ %endif
+ AUTO_REP_RET
+%endmacro
+
+%elif ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0, rdi
+DECLARE_REG 1, rsi
+DECLARE_REG 2, rdx
+DECLARE_REG 3, rcx
+DECLARE_REG 4, R8
+DECLARE_REG 5, R9
+DECLARE_REG 6, rax, 8
+DECLARE_REG 7, R10, 16
+DECLARE_REG 8, R11, 24
+DECLARE_REG 9, rbx, 32
+DECLARE_REG 10, rbp, 40
+DECLARE_REG 11, R14, 48
+DECLARE_REG 12, R15, 56
+DECLARE_REG 13, R12, 64
+DECLARE_REG 14, R13, 72
+
+%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ %assign xmm_regs_used %3
+ ASSERT regs_used >= num_args
+ SETUP_STACK_POINTER %4
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 9, 10, 11, 12, 13, 14
+ ALLOC_STACK %4
+ LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
+ %if %0 > 4
+ %ifnum %4
+ DEFINE_ARGS %5
+ %else
+ DEFINE_ARGS %4, %5
+ %endif
+ %elifnnum %4
+ DEFINE_ARGS %4
+ %endif
+%endmacro
+
+%define has_epilogue regs_used > 9 || stack_size > 0 || vzeroupper_required
+
+%macro RET 0
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
+ POP_IF_USED 14, 13, 12, 11, 10, 9
+ %if vzeroupper_required
+ vzeroupper
+ %endif
+ AUTO_REP_RET
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, 4
+DECLARE_REG 1, ecx, 8
+DECLARE_REG 2, edx, 12
+DECLARE_REG 3, ebx, 16
+DECLARE_REG 4, esi, 20
+DECLARE_REG 5, edi, 24
+DECLARE_REG 6, ebp, 28
+%define rsp esp
+
+%macro DECLARE_ARG 1-*
+ %rep %0
+ %define r%1m [rstk + stack_offset + 4*%1 + 4]
+ %define r%1mp dword r%1m
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
+
+%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ %if num_args > 7
+ %assign num_args 7
+ %endif
+ %if regs_used > 7
+ %assign regs_used 7
+ %endif
+ SETUP_STACK_POINTER %4
+ ASSERT regs_used <= 7
+ PUSH_IF_USED 3, 4, 5, 6
+ ALLOC_STACK %4
+ LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
+ %if %0 > 4
+ %ifnum %4
+ DEFINE_ARGS %5
+ %else
+ DEFINE_ARGS %4, %5
+ %endif
+ %elifnnum %4
+ DEFINE_ARGS %4
+ %endif
+%endmacro
+
+%define has_epilogue regs_used > 3 || stack_size > 0 || vzeroupper_required
+
+%macro RET 0
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
+ POP_IF_USED 6, 5, 4, 3
+ %if vzeroupper_required
+ vzeroupper
+ %endif
+ AUTO_REP_RET
+%endmacro
+
+%endif ;======================================================================
+
+%if WIN64 == 0
+ %macro WIN64_SPILL_XMM 1
+ %endmacro
+ %macro WIN64_RESTORE_XMM 0
+ %endmacro
+ %macro WIN64_PUSH_XMM 0
+ %endmacro
+%endif
+
+; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
+; a branch or a branch target. So switch to a 2-byte form of ret in that case.
+; We can automatically detect "follows a branch", but not a branch target.
+; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
+%macro REP_RET 0
+ %if has_epilogue || cpuflag(ssse3)
+ RET
+ %else
+ rep ret
+ %endif
+ annotate_function_size
+%endmacro
+
+%define last_branch_adr $$
+%macro AUTO_REP_RET 0
+ %if notcpuflag(ssse3)
+ times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
+ %endif
+ ret
+ annotate_function_size
+%endmacro
+
+%macro BRANCH_INSTR 0-*
+ %rep %0
+ %macro %1 1-2 %1
+ %2 %1
+ %if notcpuflag(ssse3)
+ %%branch_instr equ $
+ %xdefine last_branch_adr %%branch_instr
+ %endif
+ %endmacro
+ %rotate 1
+ %endrep
+%endmacro
+
+BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
+
+%macro TAIL_CALL 2 ; callee, is_nonadjacent
+ %if has_epilogue
+ call %1
+ RET
+ %elif %2
+ jmp %1
+ %endif
+ annotate_function_size
+%endmacro
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Begin a function.
+; Applies any symbol mangling needed for C linkage, and sets up a define such that
+; subsequent uses of the function name automatically refer to the mangled version.
+; Appends cpuflags to the function name if cpuflags has been specified.
+; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
+; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
+%macro cglobal 1-2+ "" ; name, [PROLOGUE args]
+ cglobal_internal 1, %1 %+ SUFFIX, %2
+%endmacro
+%macro cvisible 1-2+ "" ; name, [PROLOGUE args]
+ cglobal_internal 0, %1 %+ SUFFIX, %2
+%endmacro
+%macro cglobal_internal 2-3+
+ annotate_function_size
+ %if %1
+ %xdefine %%FUNCTION_PREFIX private_prefix
+ %xdefine %%VISIBILITY hidden
+ %else
+ %xdefine %%FUNCTION_PREFIX public_prefix
+ %xdefine %%VISIBILITY
+ %endif
+ %ifndef cglobaled_%2
+ %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
+ %xdefine %2.skip_prologue %2 %+ .skip_prologue
+ CAT_XDEFINE cglobaled_, %2, 1
+ %endif
+ %xdefine current_function %2
+ %xdefine current_function_section __SECT__
+ %if FORMAT_ELF
+ global %2:function %%VISIBILITY
+ %else
+ global %2
+ %endif
+ align function_align
+ %2:
+ RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer
+ %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
+ %assign stack_offset 0 ; stack pointer offset relative to the return address
+ %assign stack_size 0 ; amount of stack space that can be freely used inside a function
+ %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
+ %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 and vzeroupper
+ %ifnidn %3, ""
+ PROLOGUE %3
+ %endif
+%endmacro
+
+; Create a global symbol from a local label with the correct name mangling and type
+%macro cglobal_label 1
+ %if FORMAT_ELF
+ global current_function %+ %1:function hidden
+ %else
+ global current_function %+ %1
+ %endif
+ %1:
+%endmacro
+
+%macro cextern 1
+ %xdefine %1 mangle(private_prefix %+ _ %+ %1)
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+; like cextern, but without the prefix
+%macro cextern_naked 1
+ %ifdef PREFIX
+ %xdefine %1 mangle(%1)
+ %endif
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+%macro const 1-2+
+ %xdefine %1 mangle(private_prefix %+ _ %+ %1)
+ %if FORMAT_ELF
+ global %1:data hidden
+ %else
+ global %1
+ %endif
+ %1: %2
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
+%if FORMAT_ELF
+ [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
+%endif
+
+; Tell debuggers how large the function was.
+; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
+; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
+; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
+; then its size might be unspecified.
+%macro annotate_function_size 0
+ %ifdef __YASM_VER__
+ %ifdef current_function
+ %if FORMAT_ELF
+ current_function_section
+ %%ecf equ $
+ size current_function %%ecf - current_function
+ __SECT__
+ %endif
+ %endif
+ %endif
+%endmacro
+
+; cpuflags
+
+%assign cpuflags_mmx (1<<0)
+%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
+%assign cpuflags_3dnow (1<<2) | cpuflags_mmx
+%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
+%assign cpuflags_sse (1<<4) | cpuflags_mmx2
+%assign cpuflags_sse2 (1<<5) | cpuflags_sse
+%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
+%assign cpuflags_lzcnt (1<<7) | cpuflags_sse2
+%assign cpuflags_sse3 (1<<8) | cpuflags_sse2
+%assign cpuflags_ssse3 (1<<9) | cpuflags_sse3
+%assign cpuflags_sse4 (1<<10)| cpuflags_ssse3
+%assign cpuflags_sse42 (1<<11)| cpuflags_sse4
+%assign cpuflags_aesni (1<<12)| cpuflags_sse42
+%assign cpuflags_avx (1<<13)| cpuflags_sse42
+%assign cpuflags_xop (1<<14)| cpuflags_avx
+%assign cpuflags_fma4 (1<<15)| cpuflags_avx
+%assign cpuflags_fma3 (1<<16)| cpuflags_avx
+%assign cpuflags_bmi1 (1<<17)| cpuflags_avx|cpuflags_lzcnt
+%assign cpuflags_bmi2 (1<<18)| cpuflags_bmi1
+%assign cpuflags_avx2 (1<<19)| cpuflags_fma3|cpuflags_bmi2
+%assign cpuflags_avx512 (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL
+%assign cpuflags_avx512icl (1<<25)| cpuflags_avx512
+
+%assign cpuflags_cache32 (1<<21)
+%assign cpuflags_cache64 (1<<22)
+%assign cpuflags_aligned (1<<23) ; not a cpu feature, but a function variant
+%assign cpuflags_atom (1<<24)
+
+; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
+%define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
+%define notcpuflag(x) (cpuflag(x) ^ 1)
+
+; Takes an arbitrary number of cpuflags from the above list.
+; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
+; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
+%macro INIT_CPUFLAGS 0-*
+ %xdefine SUFFIX
+ %undef cpuname
+ %assign cpuflags 0
+
+ %if %0 >= 1
+ %rep %0
+ %ifdef cpuname
+ %xdefine cpuname cpuname %+ _%1
+ %else
+ %xdefine cpuname %1
+ %endif
+ %assign cpuflags cpuflags | cpuflags_%1
+ %rotate 1
+ %endrep
+ %xdefine SUFFIX _ %+ cpuname
+
+ %if cpuflag(avx)
+ %assign avx_enabled 1
+ %endif
+ %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
+ %define mova movaps
+ %define movu movups
+ %define movnta movntps
+ %endif
+ %if cpuflag(aligned)
+ %define movu mova
+ %elif cpuflag(sse3) && notcpuflag(ssse3)
+ %define movu lddqu
+ %endif
+ %endif
+
+ %if ARCH_X86_64 || cpuflag(sse2)
+ CPUNOP amdnop
+ %else
+ CPUNOP basicnop
+ %endif
+%endmacro
+
+; Merge mmx, sse*, and avx*
+; m# is a simd register of the currently selected size
+; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
+; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
+; zm# is the corresponding zmm register if mmsize >= 64, otherwise the same as m#
+; (All 4 remain in sync through SWAP.)
+
+%macro CAT_XDEFINE 3
+ %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+ %undef %1%2
+%endmacro
+
+%macro DEFINE_MMREGS 1 ; mmtype
+ %assign %%prev_mmregs 0
+ %ifdef num_mmregs
+ %assign %%prev_mmregs num_mmregs
+ %endif
+
+ %assign num_mmregs 8
+ %if ARCH_X86_64 && mmsize >= 16
+ %assign num_mmregs 16
+ %if cpuflag(avx512) || mmsize == 64
+ %assign num_mmregs 32
+ %endif
+ %endif
+
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1 %+ %%i
+ CAT_XDEFINE nn%1, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %if %%prev_mmregs > num_mmregs
+ %rep %%prev_mmregs - num_mmregs
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nn %+ mmtype, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+ %xdefine mmtype %1
+%endmacro
+
+; Prefer registers 16-31 over 0-15 to avoid having to use vzeroupper
+%macro AVX512_MM_PERMUTATION 0-1 0 ; start_reg
+ %if ARCH_X86_64 && cpuflag(avx512)
+ %assign %%i %1
+ %rep 16-%1
+ %assign %%i_high %%i+16
+ SWAP %%i, %%i_high
+ %assign %%i %%i+1
+ %endrep
+ %endif
+%endmacro
+
+%macro INIT_MMX 0-1+
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_MMX %1
+ %define mmsize 8
+ %define mova movq
+ %define movu movq
+ %define movh movd
+ %define movnta movntq
+ INIT_CPUFLAGS %1
+ DEFINE_MMREGS mm
+%endmacro
+
+%macro INIT_XMM 0-1+
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_XMM %1
+ %define mmsize 16
+ %define mova movdqa
+ %define movu movdqu
+ %define movh movq
+ %define movnta movntdq
+ INIT_CPUFLAGS %1
+ DEFINE_MMREGS xmm
+ %if WIN64
+ AVX512_MM_PERMUTATION 6 ; Swap callee-saved registers with volatile registers
+ %endif
+%endmacro
+
+%macro INIT_YMM 0-1+
+ %assign avx_enabled 1
+ %define RESET_MM_PERMUTATION INIT_YMM %1
+ %define mmsize 32
+ %define mova movdqa
+ %define movu movdqu
+ %undef movh
+ %define movnta movntdq
+ INIT_CPUFLAGS %1
+ DEFINE_MMREGS ymm
+ AVX512_MM_PERMUTATION
+%endmacro
+
+%macro INIT_ZMM 0-1+
+ %assign avx_enabled 1
+ %define RESET_MM_PERMUTATION INIT_ZMM %1
+ %define mmsize 64
+ %define mova movdqa
+ %define movu movdqu
+ %undef movh
+ %define movnta movntdq
+ INIT_CPUFLAGS %1
+ DEFINE_MMREGS zmm
+ AVX512_MM_PERMUTATION
+%endmacro
+
+INIT_XMM
+
+%macro DECLARE_MMCAST 1
+ %define mmmm%1 mm%1
+ %define mmxmm%1 mm%1
+ %define mmymm%1 mm%1
+ %define mmzmm%1 mm%1
+ %define xmmmm%1 mm%1
+ %define xmmxmm%1 xmm%1
+ %define xmmymm%1 xmm%1
+ %define xmmzmm%1 xmm%1
+ %define ymmmm%1 mm%1
+ %define ymmxmm%1 xmm%1
+ %define ymmymm%1 ymm%1
+ %define ymmzmm%1 ymm%1
+ %define zmmmm%1 mm%1
+ %define zmmxmm%1 xmm%1
+ %define zmmymm%1 ymm%1
+ %define zmmzmm%1 zmm%1
+ %define xm%1 xmm %+ m%1
+ %define ym%1 ymm %+ m%1
+ %define zm%1 zmm %+ m%1
+%endmacro
+
+%assign i 0
+%rep 32
+ DECLARE_MMCAST i
+ %assign i i+1
+%endrep
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+ %rep %0/2
+ %xdefine %%tmp%2 m%2
+ %rotate 2
+ %endrep
+ %rep %0/2
+ %xdefine m%1 %%tmp%2
+ CAT_XDEFINE nn, m%1, %1
+ %rotate 2
+ %endrep
+%endmacro
+
+%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
+ %ifnum %1 ; SWAP 0, 1, ...
+ SWAP_INTERNAL_NUM %1, %2
+ %else ; SWAP m0, m1, ...
+ SWAP_INTERNAL_NAME %1, %2
+ %endif
+%endmacro
+
+%macro SWAP_INTERNAL_NUM 2-*
+ %rep %0-1
+ %xdefine %%tmp m%1
+ %xdefine m%1 m%2
+ %xdefine m%2 %%tmp
+ CAT_XDEFINE nn, m%1, %1
+ CAT_XDEFINE nn, m%2, %2
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro SWAP_INTERNAL_NAME 2-*
+ %xdefine %%args nn %+ %1
+ %rep %0-1
+ %xdefine %%args %%args, nn %+ %2
+ %rotate 1
+ %endrep
+ SWAP_INTERNAL_NUM %%args
+%endmacro
+
+; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
+; calls to that function will automatically load the permutation, so values can
+; be returned in mmregs.
+%macro SAVE_MM_PERMUTATION 0-1
+ %if %0
+ %xdefine %%f %1_m
+ %else
+ %xdefine %%f current_function %+ _m
+ %endif
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE %%f, %%i, m %+ %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1 ; name to load from
+ %ifdef %1_m0
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1_m %+ %%i
+ CAT_XDEFINE nn, m %+ %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+%endmacro
+
+; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
+%macro call 1
+ %ifid %1
+ call_internal %1 %+ SUFFIX, %1
+ %else
+ call %1
+ %endif
+%endmacro
+%macro call_internal 2
+ %xdefine %%i %2
+ %ifndef cglobaled_%2
+ %ifdef cglobaled_%1
+ %xdefine %%i %1
+ %endif
+ %endif
+ call %%i
+ LOAD_MM_PERMUTATION %%i
+%endmacro
+
+; Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+ %ifnum %2
+ %if %2==128
+ sub %1, -128
+ %else
+ add %1, %2
+ %endif
+ %else
+ add %1, %2
+ %endif
+%endmacro
+
+%macro sub 2
+ %ifnum %2
+ %if %2==128
+ add %1, -128
+ %else
+ sub %1, %2
+ %endif
+ %else
+ sub %1, %2
+ %endif
+%endmacro
+
+;=============================================================================
+; AVX abstraction layer
+;=============================================================================
+
+%assign i 0
+%rep 32
+ %if i < 8
+ CAT_XDEFINE sizeofmm, i, 8
+ CAT_XDEFINE regnumofmm, i, i
+ %endif
+ CAT_XDEFINE sizeofxmm, i, 16
+ CAT_XDEFINE sizeofymm, i, 32
+ CAT_XDEFINE sizeofzmm, i, 64
+ CAT_XDEFINE regnumofxmm, i, i
+ CAT_XDEFINE regnumofymm, i, i
+ CAT_XDEFINE regnumofzmm, i, i
+ %assign i i+1
+%endrep
+%undef i
+
+%macro CHECK_AVX_INSTR_EMU 3-*
+ %xdefine %%opcode %1
+ %xdefine %%dst %2
+ %rep %0-2
+ %ifidn %%dst, %3
+ %error non-avx emulation of ``%%opcode'' is not supported
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+;%1 == instruction
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+;%6+: operands
+%macro RUN_AVX_INSTR 6-9+
+ %ifnum sizeof%7
+ %assign __sizeofreg sizeof%7
+ %elifnum sizeof%6
+ %assign __sizeofreg sizeof%6
+ %else
+ %assign __sizeofreg mmsize
+ %endif
+ %assign __emulate_avx 0
+ %if avx_enabled && __sizeofreg >= 16
+ %xdefine __instr v%1
+ %else
+ %xdefine __instr %1
+ %if %0 >= 8+%4
+ %assign __emulate_avx 1
+ %endif
+ %endif
+ %ifnidn %2, fnord
+ %ifdef cpuname
+ %if notcpuflag(%2)
+ %error use of ``%1'' %2 instruction in cpuname function: current_function
+ %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
+ %error use of ``%1'' sse2 instruction in cpuname function: current_function
+ %endif
+ %endif
+ %endif
+
+ %if __emulate_avx
+ %xdefine __src1 %7
+ %xdefine __src2 %8
+ %if %5 && %4 == 0
+ %ifnidn %6, %7
+ %ifidn %6, %8
+ %xdefine __src1 %8
+ %xdefine __src2 %7
+ %elifnnum sizeof%8
+ ; 3-operand AVX instructions with a memory arg can only have it in src2,
+ ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
+ ; So, if the instruction is commutative with a memory arg, swap them.
+ %xdefine __src1 %8
+ %xdefine __src2 %7
+ %endif
+ %endif
+ %endif
+ %ifnidn %6, __src1
+ %if %0 >= 9
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, __src2, %9
+ %else
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, __src2
+ %endif
+ %if __sizeofreg == 8
+ MOVQ %6, __src1
+ %elif %3
+ MOVAPS %6, __src1
+ %else
+ MOVDQA %6, __src1
+ %endif
+ %endif
+ %if %0 >= 9
+ %1 %6, __src2, %9
+ %else
+ %1 %6, __src2
+ %endif
+ %elif %0 >= 9
+ __instr %6, %7, %8, %9
+ %elif %0 == 8
+ __instr %6, %7, %8
+ %elif %0 == 7
+ __instr %6, %7
+ %else
+ __instr %6
+ %endif
+%endmacro
+
+;%1 == instruction
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+%macro AVX_INSTR 1-5 fnord, 0, 255, 0
+ %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
+ %ifidn %2, fnord
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
+ %elifidn %3, fnord
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
+ %elifidn %4, fnord
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
+ %elifidn %5, fnord
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
+ %else
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
+ %endif
+ %endmacro
+%endmacro
+
+; Instructions with both VEX/EVEX and legacy encodings
+; Non-destructive instructions are written without parameters
+AVX_INSTR addpd, sse2, 1, 0, 1
+AVX_INSTR addps, sse, 1, 0, 1
+AVX_INSTR addsd, sse2, 1, 0, 0
+AVX_INSTR addss, sse, 1, 0, 0
+AVX_INSTR addsubpd, sse3, 1, 0, 0
+AVX_INSTR addsubps, sse3, 1, 0, 0
+AVX_INSTR aesdec, aesni, 0, 0, 0
+AVX_INSTR aesdeclast, aesni, 0, 0, 0
+AVX_INSTR aesenc, aesni, 0, 0, 0
+AVX_INSTR aesenclast, aesni, 0, 0, 0
+AVX_INSTR aesimc, aesni
+AVX_INSTR aeskeygenassist, aesni
+AVX_INSTR andnpd, sse2, 1, 0, 0
+AVX_INSTR andnps, sse, 1, 0, 0
+AVX_INSTR andpd, sse2, 1, 0, 1
+AVX_INSTR andps, sse, 1, 0, 1
+AVX_INSTR blendpd, sse4, 1, 1, 0
+AVX_INSTR blendps, sse4, 1, 1, 0
+AVX_INSTR blendvpd, sse4 ; can't be emulated
+AVX_INSTR blendvps, sse4 ; can't be emulated
+AVX_INSTR cmpeqpd, sse2, 1, 0, 1
+AVX_INSTR cmpeqps, sse, 1, 0, 1
+AVX_INSTR cmpeqsd, sse2, 1, 0, 0
+AVX_INSTR cmpeqss, sse, 1, 0, 0
+AVX_INSTR cmplepd, sse2, 1, 0, 0
+AVX_INSTR cmpleps, sse, 1, 0, 0
+AVX_INSTR cmplesd, sse2, 1, 0, 0
+AVX_INSTR cmpless, sse, 1, 0, 0
+AVX_INSTR cmpltpd, sse2, 1, 0, 0
+AVX_INSTR cmpltps, sse, 1, 0, 0
+AVX_INSTR cmpltsd, sse2, 1, 0, 0
+AVX_INSTR cmpltss, sse, 1, 0, 0
+AVX_INSTR cmpneqpd, sse2, 1, 0, 1
+AVX_INSTR cmpneqps, sse, 1, 0, 1
+AVX_INSTR cmpneqsd, sse2, 1, 0, 0
+AVX_INSTR cmpneqss, sse, 1, 0, 0
+AVX_INSTR cmpnlepd, sse2, 1, 0, 0
+AVX_INSTR cmpnleps, sse, 1, 0, 0
+AVX_INSTR cmpnlesd, sse2, 1, 0, 0
+AVX_INSTR cmpnless, sse, 1, 0, 0
+AVX_INSTR cmpnltpd, sse2, 1, 0, 0
+AVX_INSTR cmpnltps, sse, 1, 0, 0
+AVX_INSTR cmpnltsd, sse2, 1, 0, 0
+AVX_INSTR cmpnltss, sse, 1, 0, 0
+AVX_INSTR cmpordpd, sse2 1, 0, 1
+AVX_INSTR cmpordps, sse 1, 0, 1
+AVX_INSTR cmpordsd, sse2 1, 0, 0
+AVX_INSTR cmpordss, sse 1, 0, 0
+AVX_INSTR cmppd, sse2, 1, 1, 0
+AVX_INSTR cmpps, sse, 1, 1, 0
+AVX_INSTR cmpsd, sse2, 1, 1, 0
+AVX_INSTR cmpss, sse, 1, 1, 0
+AVX_INSTR cmpunordpd, sse2, 1, 0, 1
+AVX_INSTR cmpunordps, sse, 1, 0, 1
+AVX_INSTR cmpunordsd, sse2, 1, 0, 0
+AVX_INSTR cmpunordss, sse, 1, 0, 0
+AVX_INSTR comisd, sse2
+AVX_INSTR comiss, sse
+AVX_INSTR cvtdq2pd, sse2
+AVX_INSTR cvtdq2ps, sse2
+AVX_INSTR cvtpd2dq, sse2
+AVX_INSTR cvtpd2ps, sse2
+AVX_INSTR cvtps2dq, sse2
+AVX_INSTR cvtps2pd, sse2
+AVX_INSTR cvtsd2si, sse2
+AVX_INSTR cvtsd2ss, sse2, 1, 0, 0
+AVX_INSTR cvtsi2sd, sse2, 1, 0, 0
+AVX_INSTR cvtsi2ss, sse, 1, 0, 0
+AVX_INSTR cvtss2sd, sse2, 1, 0, 0
+AVX_INSTR cvtss2si, sse
+AVX_INSTR cvttpd2dq, sse2
+AVX_INSTR cvttps2dq, sse2
+AVX_INSTR cvttsd2si, sse2
+AVX_INSTR cvttss2si, sse
+AVX_INSTR divpd, sse2, 1, 0, 0
+AVX_INSTR divps, sse, 1, 0, 0
+AVX_INSTR divsd, sse2, 1, 0, 0
+AVX_INSTR divss, sse, 1, 0, 0
+AVX_INSTR dppd, sse4, 1, 1, 0
+AVX_INSTR dpps, sse4, 1, 1, 0
+AVX_INSTR extractps, sse4
+AVX_INSTR haddpd, sse3, 1, 0, 0
+AVX_INSTR haddps, sse3, 1, 0, 0
+AVX_INSTR hsubpd, sse3, 1, 0, 0
+AVX_INSTR hsubps, sse3, 1, 0, 0
+AVX_INSTR insertps, sse4, 1, 1, 0
+AVX_INSTR lddqu, sse3
+AVX_INSTR ldmxcsr, sse
+AVX_INSTR maskmovdqu, sse2
+AVX_INSTR maxpd, sse2, 1, 0, 1
+AVX_INSTR maxps, sse, 1, 0, 1
+AVX_INSTR maxsd, sse2, 1, 0, 0
+AVX_INSTR maxss, sse, 1, 0, 0
+AVX_INSTR minpd, sse2, 1, 0, 1
+AVX_INSTR minps, sse, 1, 0, 1
+AVX_INSTR minsd, sse2, 1, 0, 0
+AVX_INSTR minss, sse, 1, 0, 0
+AVX_INSTR movapd, sse2
+AVX_INSTR movaps, sse
+AVX_INSTR movd, mmx
+AVX_INSTR movddup, sse3
+AVX_INSTR movdqa, sse2
+AVX_INSTR movdqu, sse2
+AVX_INSTR movhlps, sse, 1, 0, 0
+AVX_INSTR movhpd, sse2, 1, 0, 0
+AVX_INSTR movhps, sse, 1, 0, 0
+AVX_INSTR movlhps, sse, 1, 0, 0
+AVX_INSTR movlpd, sse2, 1, 0, 0
+AVX_INSTR movlps, sse, 1, 0, 0
+AVX_INSTR movmskpd, sse2
+AVX_INSTR movmskps, sse
+AVX_INSTR movntdq, sse2
+AVX_INSTR movntdqa, sse4
+AVX_INSTR movntpd, sse2
+AVX_INSTR movntps, sse
+AVX_INSTR movq, mmx
+AVX_INSTR movsd, sse2, 1, 0, 0
+AVX_INSTR movshdup, sse3
+AVX_INSTR movsldup, sse3
+AVX_INSTR movss, sse, 1, 0, 0
+AVX_INSTR movupd, sse2
+AVX_INSTR movups, sse
+AVX_INSTR mpsadbw, sse4, 0, 1, 0
+AVX_INSTR mulpd, sse2, 1, 0, 1
+AVX_INSTR mulps, sse, 1, 0, 1
+AVX_INSTR mulsd, sse2, 1, 0, 0
+AVX_INSTR mulss, sse, 1, 0, 0
+AVX_INSTR orpd, sse2, 1, 0, 1
+AVX_INSTR orps, sse, 1, 0, 1
+AVX_INSTR pabsb, ssse3
+AVX_INSTR pabsd, ssse3
+AVX_INSTR pabsw, ssse3
+AVX_INSTR packsswb, mmx, 0, 0, 0
+AVX_INSTR packssdw, mmx, 0, 0, 0
+AVX_INSTR packuswb, mmx, 0, 0, 0
+AVX_INSTR packusdw, sse4, 0, 0, 0
+AVX_INSTR paddb, mmx, 0, 0, 1
+AVX_INSTR paddw, mmx, 0, 0, 1
+AVX_INSTR paddd, mmx, 0, 0, 1
+AVX_INSTR paddq, sse2, 0, 0, 1
+AVX_INSTR paddsb, mmx, 0, 0, 1
+AVX_INSTR paddsw, mmx, 0, 0, 1
+AVX_INSTR paddusb, mmx, 0, 0, 1
+AVX_INSTR paddusw, mmx, 0, 0, 1
+AVX_INSTR palignr, ssse3, 0, 1, 0
+AVX_INSTR pand, mmx, 0, 0, 1
+AVX_INSTR pandn, mmx, 0, 0, 0
+AVX_INSTR pavgb, mmx2, 0, 0, 1
+AVX_INSTR pavgw, mmx2, 0, 0, 1
+AVX_INSTR pblendvb, sse4 ; can't be emulated
+AVX_INSTR pblendw, sse4, 0, 1, 0
+AVX_INSTR pclmulqdq, fnord, 0, 1, 0
+AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0
+AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0
+AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0
+AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0
+AVX_INSTR pcmpestri, sse42
+AVX_INSTR pcmpestrm, sse42
+AVX_INSTR pcmpistri, sse42
+AVX_INSTR pcmpistrm, sse42
+AVX_INSTR pcmpeqb, mmx, 0, 0, 1
+AVX_INSTR pcmpeqw, mmx, 0, 0, 1
+AVX_INSTR pcmpeqd, mmx, 0, 0, 1
+AVX_INSTR pcmpeqq, sse4, 0, 0, 1
+AVX_INSTR pcmpgtb, mmx, 0, 0, 0
+AVX_INSTR pcmpgtw, mmx, 0, 0, 0
+AVX_INSTR pcmpgtd, mmx, 0, 0, 0
+AVX_INSTR pcmpgtq, sse42, 0, 0, 0
+AVX_INSTR pextrb, sse4
+AVX_INSTR pextrd, sse4
+AVX_INSTR pextrq, sse4
+AVX_INSTR pextrw, mmx2
+AVX_INSTR phaddw, ssse3, 0, 0, 0
+AVX_INSTR phaddd, ssse3, 0, 0, 0
+AVX_INSTR phaddsw, ssse3, 0, 0, 0
+AVX_INSTR phminposuw, sse4
+AVX_INSTR phsubw, ssse3, 0, 0, 0
+AVX_INSTR phsubd, ssse3, 0, 0, 0
+AVX_INSTR phsubsw, ssse3, 0, 0, 0
+AVX_INSTR pinsrb, sse4, 0, 1, 0
+AVX_INSTR pinsrd, sse4, 0, 1, 0
+AVX_INSTR pinsrq, sse4, 0, 1, 0
+AVX_INSTR pinsrw, mmx2, 0, 1, 0
+AVX_INSTR pmaddwd, mmx, 0, 0, 1
+AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
+AVX_INSTR pmaxsb, sse4, 0, 0, 1
+AVX_INSTR pmaxsw, mmx2, 0, 0, 1
+AVX_INSTR pmaxsd, sse4, 0, 0, 1
+AVX_INSTR pmaxub, mmx2, 0, 0, 1
+AVX_INSTR pmaxuw, sse4, 0, 0, 1
+AVX_INSTR pmaxud, sse4, 0, 0, 1
+AVX_INSTR pminsb, sse4, 0, 0, 1
+AVX_INSTR pminsw, mmx2, 0, 0, 1
+AVX_INSTR pminsd, sse4, 0, 0, 1
+AVX_INSTR pminub, mmx2, 0, 0, 1
+AVX_INSTR pminuw, sse4, 0, 0, 1
+AVX_INSTR pminud, sse4, 0, 0, 1
+AVX_INSTR pmovmskb, mmx2
+AVX_INSTR pmovsxbw, sse4
+AVX_INSTR pmovsxbd, sse4
+AVX_INSTR pmovsxbq, sse4
+AVX_INSTR pmovsxwd, sse4
+AVX_INSTR pmovsxwq, sse4
+AVX_INSTR pmovsxdq, sse4
+AVX_INSTR pmovzxbw, sse4
+AVX_INSTR pmovzxbd, sse4
+AVX_INSTR pmovzxbq, sse4
+AVX_INSTR pmovzxwd, sse4
+AVX_INSTR pmovzxwq, sse4
+AVX_INSTR pmovzxdq, sse4
+AVX_INSTR pmuldq, sse4, 0, 0, 1
+AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
+AVX_INSTR pmulhuw, mmx2, 0, 0, 1
+AVX_INSTR pmulhw, mmx, 0, 0, 1
+AVX_INSTR pmullw, mmx, 0, 0, 1
+AVX_INSTR pmulld, sse4, 0, 0, 1
+AVX_INSTR pmuludq, sse2, 0, 0, 1
+AVX_INSTR por, mmx, 0, 0, 1
+AVX_INSTR psadbw, mmx2, 0, 0, 1
+AVX_INSTR pshufb, ssse3, 0, 0, 0
+AVX_INSTR pshufd, sse2
+AVX_INSTR pshufhw, sse2
+AVX_INSTR pshuflw, sse2
+AVX_INSTR psignb, ssse3, 0, 0, 0
+AVX_INSTR psignw, ssse3, 0, 0, 0
+AVX_INSTR psignd, ssse3, 0, 0, 0
+AVX_INSTR psllw, mmx, 0, 0, 0
+AVX_INSTR pslld, mmx, 0, 0, 0
+AVX_INSTR psllq, mmx, 0, 0, 0
+AVX_INSTR pslldq, sse2, 0, 0, 0
+AVX_INSTR psraw, mmx, 0, 0, 0
+AVX_INSTR psrad, mmx, 0, 0, 0
+AVX_INSTR psrlw, mmx, 0, 0, 0
+AVX_INSTR psrld, mmx, 0, 0, 0
+AVX_INSTR psrlq, mmx, 0, 0, 0
+AVX_INSTR psrldq, sse2, 0, 0, 0
+AVX_INSTR psubb, mmx, 0, 0, 0
+AVX_INSTR psubw, mmx, 0, 0, 0
+AVX_INSTR psubd, mmx, 0, 0, 0
+AVX_INSTR psubq, sse2, 0, 0, 0
+AVX_INSTR psubsb, mmx, 0, 0, 0
+AVX_INSTR psubsw, mmx, 0, 0, 0
+AVX_INSTR psubusb, mmx, 0, 0, 0
+AVX_INSTR psubusw, mmx, 0, 0, 0
+AVX_INSTR ptest, sse4
+AVX_INSTR punpckhbw, mmx, 0, 0, 0
+AVX_INSTR punpckhwd, mmx, 0, 0, 0
+AVX_INSTR punpckhdq, mmx, 0, 0, 0
+AVX_INSTR punpckhqdq, sse2, 0, 0, 0
+AVX_INSTR punpcklbw, mmx, 0, 0, 0
+AVX_INSTR punpcklwd, mmx, 0, 0, 0
+AVX_INSTR punpckldq, mmx, 0, 0, 0
+AVX_INSTR punpcklqdq, sse2, 0, 0, 0
+AVX_INSTR pxor, mmx, 0, 0, 1
+AVX_INSTR rcpps, sse
+AVX_INSTR rcpss, sse, 1, 0, 0
+AVX_INSTR roundpd, sse4
+AVX_INSTR roundps, sse4
+AVX_INSTR roundsd, sse4, 1, 1, 0
+AVX_INSTR roundss, sse4, 1, 1, 0
+AVX_INSTR rsqrtps, sse
+AVX_INSTR rsqrtss, sse, 1, 0, 0
+AVX_INSTR shufpd, sse2, 1, 1, 0
+AVX_INSTR shufps, sse, 1, 1, 0
+AVX_INSTR sqrtpd, sse2
+AVX_INSTR sqrtps, sse
+AVX_INSTR sqrtsd, sse2, 1, 0, 0
+AVX_INSTR sqrtss, sse, 1, 0, 0
+AVX_INSTR stmxcsr, sse
+AVX_INSTR subpd, sse2, 1, 0, 0
+AVX_INSTR subps, sse, 1, 0, 0
+AVX_INSTR subsd, sse2, 1, 0, 0
+AVX_INSTR subss, sse, 1, 0, 0
+AVX_INSTR ucomisd, sse2
+AVX_INSTR ucomiss, sse
+AVX_INSTR unpckhpd, sse2, 1, 0, 0
+AVX_INSTR unpckhps, sse, 1, 0, 0
+AVX_INSTR unpcklpd, sse2, 1, 0, 0
+AVX_INSTR unpcklps, sse, 1, 0, 0
+AVX_INSTR xorpd, sse2, 1, 0, 1
+AVX_INSTR xorps, sse, 1, 0, 1
+
+; 3DNow instructions, for sharing code between AVX, SSE and 3DN
+AVX_INSTR pfadd, 3dnow, 1, 0, 1
+AVX_INSTR pfsub, 3dnow, 1, 0, 0
+AVX_INSTR pfmul, 3dnow, 1, 0, 1
+
+; base-4 constants for shuffles
+%assign i 0
+%rep 256
+ %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
+ %if j < 10
+ CAT_XDEFINE q000, j, i
+ %elif j < 100
+ CAT_XDEFINE q00, j, i
+ %elif j < 1000
+ CAT_XDEFINE q0, j, i
+ %else
+ CAT_XDEFINE q, j, i
+ %endif
+ %assign i i+1
+%endrep
+%undef i
+%undef j
+
+%macro FMA_INSTR 3
+ %macro %1 4-7 %1, %2, %3
+ %if cpuflag(xop)
+ v%5 %1, %2, %3, %4
+ %elifnidn %1, %4
+ %6 %1, %2, %3
+ %7 %1, %4
+ %else
+ %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
+ %endif
+ %endmacro
+%endmacro
+
+FMA_INSTR pmacsww, pmullw, paddw
+FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation
+FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation
+FMA_INSTR pmadcswd, pmaddwd, paddd
+
+; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
+; This lets us use tzcnt without bumping the yasm version requirement yet.
+%define tzcnt rep bsf
+
+; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
+; FMA3 is only possible if dst is the same as one of the src registers.
+; Either src2 or src3 can be a memory operand.
+%macro FMA4_INSTR 2-*
+ %push fma4_instr
+ %xdefine %$prefix %1
+ %rep %0 - 1
+ %macro %$prefix%2 4-6 %$prefix, %2
+ %if notcpuflag(fma3) && notcpuflag(fma4)
+ %error use of ``%5%6'' fma instruction in cpuname function: current_function
+ %elif cpuflag(fma4)
+ v%5%6 %1, %2, %3, %4
+ %elifidn %1, %2
+ ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
+ %ifnum sizeof%3
+ v%{5}213%6 %2, %3, %4
+ %else
+ v%{5}132%6 %2, %4, %3
+ %endif
+ %elifidn %1, %3
+ v%{5}213%6 %3, %2, %4
+ %elifidn %1, %4
+ v%{5}231%6 %4, %2, %3
+ %else
+ %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
+ %endif
+ %endmacro
+ %rotate 1
+ %endrep
+ %pop
+%endmacro
+
+FMA4_INSTR fmadd, pd, ps, sd, ss
+FMA4_INSTR fmaddsub, pd, ps
+FMA4_INSTR fmsub, pd, ps, sd, ss
+FMA4_INSTR fmsubadd, pd, ps
+FMA4_INSTR fnmadd, pd, ps, sd, ss
+FMA4_INSTR fnmsub, pd, ps, sd, ss
+
+; Macros for converting VEX instructions to equivalent EVEX ones.
+%macro EVEX_INSTR 2-3 0 ; vex, evex, prefer_evex
+ %macro %1 2-7 fnord, fnord, %1, %2, %3
+ %ifidn %3, fnord
+ %define %%args %1, %2
+ %elifidn %4, fnord
+ %define %%args %1, %2, %3
+ %else
+ %define %%args %1, %2, %3, %4
+ %endif
+ %assign %%evex_required cpuflag(avx512) & %7
+ %ifnum regnumof%1
+ %if regnumof%1 >= 16 || sizeof%1 > 32
+ %assign %%evex_required 1
+ %endif
+ %endif
+ %ifnum regnumof%2
+ %if regnumof%2 >= 16 || sizeof%2 > 32
+ %assign %%evex_required 1
+ %endif
+ %endif
+ %if %%evex_required
+ %6 %%args
+ %else
+ %5 %%args ; Prefer VEX over EVEX due to shorter instruction length
+ %endif
+ %endmacro
+%endmacro
+
+EVEX_INSTR vbroadcastf128, vbroadcastf32x4
+EVEX_INSTR vbroadcasti128, vbroadcasti32x4
+EVEX_INSTR vextractf128, vextractf32x4
+EVEX_INSTR vextracti128, vextracti32x4
+EVEX_INSTR vinsertf128, vinsertf32x4
+EVEX_INSTR vinserti128, vinserti32x4
+EVEX_INSTR vmovdqa, vmovdqa32
+EVEX_INSTR vmovdqu, vmovdqu32
+EVEX_INSTR vpand, vpandd
+EVEX_INSTR vpandn, vpandnd
+EVEX_INSTR vpor, vpord
+EVEX_INSTR vpxor, vpxord
+EVEX_INSTR vrcpps, vrcp14ps, 1 ; EVEX versions have higher precision
+EVEX_INSTR vrcpss, vrcp14ss, 1
+EVEX_INSTR vrsqrtps, vrsqrt14ps, 1
+EVEX_INSTR vrsqrtss, vrsqrt14ss, 1
+
+; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0)
+%ifdef __YASM_VER__
+ %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0
+ %macro vpbroadcastq 2
+ %if sizeof%1 == 16
+ movddup %1, %2
+ %else
+ vbroadcastsd %1, %2
+ %endif
+ %endmacro
+ %endif
+%endif
diff --git a/media/ffvpx/libavutil/x86/x86util.asm b/media/ffvpx/libavutil/x86/x86util.asm
new file mode 100644
index 0000000000..d7cd996842
--- /dev/null
+++ b/media/ffvpx/libavutil/x86/x86util.asm
@@ -0,0 +1,1028 @@
+;*****************************************************************************
+;* x86util.asm
+;*****************************************************************************
+;* Copyright (C) 2008-2010 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;* Holger Lubitz <holger@lubitz.org>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%define private_prefix ff
+%define public_prefix avpriv
+%define cpuflags_mmxext cpuflags_mmx2
+
+%include "libavutil/x86/x86inc.asm"
+
+; expands to [base],...,[base+7*stride]
+%define PASS8ROWS(base, base3, stride, stride3) \
+ [base], [base + stride], [base + 2*stride], [base3], \
+ [base3 + stride], [base3 + 2*stride], [base3 + stride3], [base3 + stride*4]
+
+; Interleave low src0 with low src1 and store in src0,
+; interleave high src0 with high src1 and store in src1.
+; %1 - types
+; %2 - index of the register with src0
+; %3 - index of the register with src1
+; %4 - index of the register for intermediate results
+; example for %1 - wd: input: src0: x0 x1 x2 x3 z0 z1 z2 z3
+; src1: y0 y1 y2 y3 q0 q1 q2 q3
+; output: src0: x0 y0 x1 y1 x2 y2 x3 y3
+; src1: z0 q0 z1 q1 z2 q2 z3 q3
+%macro SBUTTERFLY 4
+%ifidn %1, dqqq
+ vperm2i128 m%4, m%2, m%3, q0301
+ vinserti128 m%2, m%2, xm%3, 1
+%elif avx_enabled == 0
+ mova m%4, m%2
+ punpckl%1 m%2, m%3
+ punpckh%1 m%4, m%3
+%else
+ punpckh%1 m%4, m%2, m%3
+ punpckl%1 m%2, m%3
+%endif
+ SWAP %3, %4
+%endmacro
+
+%macro SBUTTERFLY2 4
+ punpckl%1 m%4, m%2, m%3
+ punpckh%1 m%2, m%2, m%3
+ SWAP %2, %4, %3
+%endmacro
+
+%macro SBUTTERFLYPS 3
+ unpcklps m%3, m%1, m%2
+ unpckhps m%1, m%1, m%2
+ SWAP %1, %3, %2
+%endmacro
+
+%macro SBUTTERFLYPD 3
+ movlhps m%3, m%1, m%2
+ movhlps m%2, m%2, m%1
+ SWAP %1, %3
+%endmacro
+
+%macro TRANSPOSE4x4B 5
+ SBUTTERFLY bw, %1, %2, %5
+ SBUTTERFLY bw, %3, %4, %5
+ SBUTTERFLY wd, %1, %3, %5
+ SBUTTERFLY wd, %2, %4, %5
+ SWAP %2, %3
+%endmacro
+
+%macro TRANSPOSE4x4W 5
+ SBUTTERFLY wd, %1, %2, %5
+ SBUTTERFLY wd, %3, %4, %5
+ SBUTTERFLY dq, %1, %3, %5
+ SBUTTERFLY dq, %2, %4, %5
+ SWAP %2, %3
+%endmacro
+
+%macro TRANSPOSE2x4x4B 5
+ SBUTTERFLY bw, %1, %2, %5
+ SBUTTERFLY bw, %3, %4, %5
+ SBUTTERFLY wd, %1, %3, %5
+ SBUTTERFLY wd, %2, %4, %5
+ SBUTTERFLY dq, %1, %2, %5
+ SBUTTERFLY dq, %3, %4, %5
+%endmacro
+
+%macro TRANSPOSE2x4x4W 5
+ SBUTTERFLY wd, %1, %2, %5
+ SBUTTERFLY wd, %3, %4, %5
+ SBUTTERFLY dq, %1, %3, %5
+ SBUTTERFLY dq, %2, %4, %5
+ SBUTTERFLY qdq, %1, %2, %5
+ SBUTTERFLY qdq, %3, %4, %5
+%endmacro
+
+%macro TRANSPOSE4x4D 5
+ SBUTTERFLY dq, %1, %2, %5
+ SBUTTERFLY dq, %3, %4, %5
+ SBUTTERFLY qdq, %1, %3, %5
+ SBUTTERFLY qdq, %2, %4, %5
+ SWAP %2, %3
+%endmacro
+
+; identical behavior to TRANSPOSE4x4D, but using SSE1 float ops
+%macro TRANSPOSE4x4PS 5
+ SBUTTERFLYPS %1, %2, %5
+ SBUTTERFLYPS %3, %4, %5
+ SBUTTERFLYPD %1, %3, %5
+ SBUTTERFLYPD %2, %4, %5
+ SWAP %2, %3
+%endmacro
+
+%macro TRANSPOSE8x4D 9-11
+%if ARCH_X86_64
+ SBUTTERFLY dq, %1, %2, %9
+ SBUTTERFLY dq, %3, %4, %9
+ SBUTTERFLY dq, %5, %6, %9
+ SBUTTERFLY dq, %7, %8, %9
+ SBUTTERFLY qdq, %1, %3, %9
+ SBUTTERFLY qdq, %2, %4, %9
+ SBUTTERFLY qdq, %5, %7, %9
+ SBUTTERFLY qdq, %6, %8, %9
+ SWAP %2, %5
+ SWAP %4, %7
+%else
+; in: m0..m7
+; out: m0..m7, unless %11 in which case m2 is in %9
+; spills into %9 and %10
+ movdqa %9, m%7
+ SBUTTERFLY dq, %1, %2, %7
+ movdqa %10, m%2
+ movdqa m%7, %9
+ SBUTTERFLY dq, %3, %4, %2
+ SBUTTERFLY dq, %5, %6, %2
+ SBUTTERFLY dq, %7, %8, %2
+ SBUTTERFLY qdq, %1, %3, %2
+ movdqa %9, m%3
+ movdqa m%2, %10
+ SBUTTERFLY qdq, %2, %4, %3
+ SBUTTERFLY qdq, %5, %7, %3
+ SBUTTERFLY qdq, %6, %8, %3
+ SWAP %2, %5
+ SWAP %4, %7
+%if %0<11
+ movdqa m%3, %9
+%endif
+%endif
+%endmacro
+
+%macro TRANSPOSE8x8W 9-11
+%if ARCH_X86_64
+ SBUTTERFLY wd, %1, %2, %9
+ SBUTTERFLY wd, %3, %4, %9
+ SBUTTERFLY wd, %5, %6, %9
+ SBUTTERFLY wd, %7, %8, %9
+ SBUTTERFLY dq, %1, %3, %9
+ SBUTTERFLY dq, %2, %4, %9
+ SBUTTERFLY dq, %5, %7, %9
+ SBUTTERFLY dq, %6, %8, %9
+ SBUTTERFLY qdq, %1, %5, %9
+ SBUTTERFLY qdq, %2, %6, %9
+ SBUTTERFLY qdq, %3, %7, %9
+ SBUTTERFLY qdq, %4, %8, %9
+ SWAP %2, %5
+ SWAP %4, %7
+%else
+; in: m0..m7, unless %11 in which case m6 is in %9
+; out: m0..m7, unless %11 in which case m4 is in %10
+; spills into %9 and %10
+%if %0<11
+ movdqa %9, m%7
+%endif
+ SBUTTERFLY wd, %1, %2, %7
+ movdqa %10, m%2
+ movdqa m%7, %9
+ SBUTTERFLY wd, %3, %4, %2
+ SBUTTERFLY wd, %5, %6, %2
+ SBUTTERFLY wd, %7, %8, %2
+ SBUTTERFLY dq, %1, %3, %2
+ movdqa %9, m%3
+ movdqa m%2, %10
+ SBUTTERFLY dq, %2, %4, %3
+ SBUTTERFLY dq, %5, %7, %3
+ SBUTTERFLY dq, %6, %8, %3
+ SBUTTERFLY qdq, %1, %5, %3
+ SBUTTERFLY qdq, %2, %6, %3
+ movdqa %10, m%2
+ movdqa m%3, %9
+ SBUTTERFLY qdq, %3, %7, %2
+ SBUTTERFLY qdq, %4, %8, %2
+ SWAP %2, %5
+ SWAP %4, %7
+%if %0<11
+ movdqa m%5, %10
+%endif
+%endif
+%endmacro
+
+%macro TRANSPOSE16x16W 18-19
+; in: m0..m15, unless %19 in which case m6 is in %17
+; out: m0..m15, unless %19 in which case m4 is in %18
+; spills into %17 and %18
+%if %0 < 19
+ mova %17, m%7
+%endif
+
+ SBUTTERFLY dqqq, %1, %9, %7
+ SBUTTERFLY dqqq, %2, %10, %7
+ SBUTTERFLY dqqq, %3, %11, %7
+ SBUTTERFLY dqqq, %4, %12, %7
+ SBUTTERFLY dqqq, %5, %13, %7
+ SBUTTERFLY dqqq, %6, %14, %7
+ mova %18, m%14
+ mova m%7, %17
+ SBUTTERFLY dqqq, %7, %15, %14
+ SBUTTERFLY dqqq, %8, %16, %14
+
+ SBUTTERFLY wd, %1, %2, %14
+ SBUTTERFLY wd, %3, %4, %14
+ SBUTTERFLY wd, %5, %6, %14
+ SBUTTERFLY wd, %7, %8, %14
+ SBUTTERFLY wd, %9, %10, %14
+ SBUTTERFLY wd, %11, %12, %14
+ mova %17, m%12
+ mova m%14, %18
+ SBUTTERFLY wd, %13, %14, %12
+ SBUTTERFLY wd, %15, %16, %12
+
+ SBUTTERFLY dq, %1, %3, %12
+ SBUTTERFLY dq, %2, %4, %12
+ SBUTTERFLY dq, %5, %7, %12
+ SBUTTERFLY dq, %6, %8, %12
+ SBUTTERFLY dq, %9, %11, %12
+ mova %18, m%11
+ mova m%12, %17
+ SBUTTERFLY dq, %10, %12, %11
+ SBUTTERFLY dq, %13, %15, %11
+ SBUTTERFLY dq, %14, %16, %11
+
+ SBUTTERFLY qdq, %1, %5, %11
+ SBUTTERFLY qdq, %2, %6, %11
+ SBUTTERFLY qdq, %3, %7, %11
+ SBUTTERFLY qdq, %4, %8, %11
+
+ SWAP %2, %5
+ SWAP %4, %7
+
+ SBUTTERFLY qdq, %9, %13, %11
+ SBUTTERFLY qdq, %10, %14, %11
+ mova m%11, %18
+ mova %18, m%5
+ SBUTTERFLY qdq, %11, %15, %5
+ SBUTTERFLY qdq, %12, %16, %5
+
+%if %0 < 19
+ mova m%5, %18
+%endif
+
+ SWAP %10, %13
+ SWAP %12, %15
+%endmacro
+
+%macro TRANSPOSE_8X8B 8
+ %if mmsize == 8
+ %error "This macro does not support mmsize == 8"
+ %endif
+ punpcklbw m%1, m%2
+ punpcklbw m%3, m%4
+ punpcklbw m%5, m%6
+ punpcklbw m%7, m%8
+ TRANSPOSE4x4W %1, %3, %5, %7, %2
+ MOVHL m%2, m%1
+ MOVHL m%4, m%3
+ MOVHL m%6, m%5
+ MOVHL m%8, m%7
+%endmacro
+
+; PABSW macro assumes %1 != %2, while ABS1/2 macros work in-place
+%macro PABSW 2
+%if cpuflag(ssse3)
+ pabsw %1, %2
+%elif cpuflag(mmxext)
+ pxor %1, %1
+ psubw %1, %2
+ pmaxsw %1, %2
+%else
+ pxor %1, %1
+ pcmpgtw %1, %2
+ pxor %2, %1
+ psubw %2, %1
+ SWAP %1, %2
+%endif
+%endmacro
+
+%macro PSIGNW 2
+%if cpuflag(ssse3)
+ psignw %1, %2
+%else
+ pxor %1, %2
+ psubw %1, %2
+%endif
+%endmacro
+
+%macro ABS1 2
+%if cpuflag(ssse3)
+ pabsw %1, %1
+%elif cpuflag(mmxext) ; a, tmp
+ pxor %2, %2
+ psubw %2, %1
+ pmaxsw %1, %2
+%else ; a, tmp
+ pxor %2, %2
+ pcmpgtw %2, %1
+ pxor %1, %2
+ psubw %1, %2
+%endif
+%endmacro
+
+%macro ABS2 4
+%if cpuflag(ssse3)
+ pabsw %1, %1
+ pabsw %2, %2
+%elif cpuflag(mmxext) ; a, b, tmp0, tmp1
+ pxor %3, %3
+ pxor %4, %4
+ psubw %3, %1
+ psubw %4, %2
+ pmaxsw %1, %3
+ pmaxsw %2, %4
+%else ; a, b, tmp0, tmp1
+ pxor %3, %3
+ pxor %4, %4
+ pcmpgtw %3, %1
+ pcmpgtw %4, %2
+ pxor %1, %3
+ pxor %2, %4
+ psubw %1, %3
+ psubw %2, %4
+%endif
+%endmacro
+
+%macro ABSB 2 ; source mmreg, temp mmreg (unused for SSSE3)
+%if cpuflag(ssse3)
+ pabsb %1, %1
+%else
+ pxor %2, %2
+ psubb %2, %1
+ pminub %1, %2
+%endif
+%endmacro
+
+%macro ABSB2 4 ; src1, src2, tmp1, tmp2 (tmp1/2 unused for SSSE3)
+%if cpuflag(ssse3)
+ pabsb %1, %1
+ pabsb %2, %2
+%else
+ pxor %3, %3
+ pxor %4, %4
+ psubb %3, %1
+ psubb %4, %2
+ pminub %1, %3
+ pminub %2, %4
+%endif
+%endmacro
+
+%macro ABSD2 4
+ pxor %3, %3
+ pxor %4, %4
+ pcmpgtd %3, %1
+ pcmpgtd %4, %2
+ pxor %1, %3
+ pxor %2, %4
+ psubd %1, %3
+ psubd %2, %4
+%endmacro
+
+%macro ABS4 6
+ ABS2 %1, %2, %5, %6
+ ABS2 %3, %4, %5, %6
+%endmacro
+
+%macro SPLATB_LOAD 3
+%if cpuflag(ssse3)
+ movd %1, [%2-3]
+ pshufb %1, %3
+%else
+ movd %1, [%2-3] ;to avoid crossing a cacheline
+ punpcklbw %1, %1
+ SPLATW %1, %1, 3
+%endif
+%endmacro
+
+%macro SPLATB_REG 3
+%if cpuflag(ssse3)
+ movd %1, %2d
+ pshufb %1, %3
+%else
+ movd %1, %2d
+ punpcklbw %1, %1
+ SPLATW %1, %1, 0
+%endif
+%endmacro
+
+%macro HADDD 2 ; sum junk
+%if sizeof%1 == 32
+%define %2 xmm%2
+ vextracti128 %2, %1, 1
+%define %1 xmm%1
+ paddd %1, %2
+%endif
+%if mmsize >= 16
+%if cpuflag(xop) && sizeof%1 == 16
+ vphadddq %1, %1
+%endif
+ movhlps %2, %1
+ paddd %1, %2
+%endif
+%if notcpuflag(xop) || sizeof%1 != 16
+%if cpuflag(mmxext)
+ PSHUFLW %2, %1, q0032
+%else ; mmx
+ mova %2, %1
+ psrlq %2, 32
+%endif
+ paddd %1, %2
+%endif
+%undef %1
+%undef %2
+%endmacro
+
+%macro HADDW 2 ; reg, tmp
+%if cpuflag(xop) && sizeof%1 == 16
+ vphaddwq %1, %1
+ movhlps %2, %1
+ paddd %1, %2
+%else
+ pmaddwd %1, [pw_1]
+ HADDD %1, %2
+%endif
+%endmacro
+
+%macro HADDPS 3 ; dst, src, tmp
+%if cpuflag(sse3)
+ haddps %1, %1, %2
+%else
+ movaps %3, %1
+ shufps %1, %2, q2020
+ shufps %3, %2, q3131
+ addps %1, %3
+%endif
+%endmacro
+
+%macro PALIGNR 4-5
+%if cpuflag(ssse3)
+%if %0==5
+ palignr %1, %2, %3, %4
+%else
+ palignr %1, %2, %3
+%endif
+%else ; [dst,] src1, src2, imm, tmp
+ %define %%dst %1
+%if %0==5
+%ifnidn %1, %2
+ mova %%dst, %2
+%endif
+ %rotate 1
+%endif
+%ifnidn %4, %2
+ mova %4, %2
+%endif
+%if mmsize==8
+ psllq %%dst, (8-%3)*8
+ psrlq %4, %3*8
+%else
+ pslldq %%dst, 16-%3
+ psrldq %4, %3
+%endif
+ por %%dst, %4
+%endif
+%endmacro
+
+%macro PAVGB 2-4
+%if cpuflag(mmxext)
+ pavgb %1, %2
+%elif cpuflag(3dnow)
+ pavgusb %1, %2
+%elif cpuflag(mmx)
+ movu %3, %2
+ por %3, %1
+ pxor %1, %2
+ pand %1, %4
+ psrlq %1, 1
+ psubb %3, %1
+ SWAP %1, %3
+%endif
+%endmacro
+
+%macro PSHUFLW 1+
+ %if mmsize == 8
+ pshufw %1
+ %else
+ pshuflw %1
+ %endif
+%endmacro
+
+%macro PSWAPD 2
+%if cpuflag(mmxext)
+ pshufw %1, %2, q1032
+%elif cpuflag(3dnowext)
+ pswapd %1, %2
+%elif cpuflag(3dnow)
+ movq %1, %2
+ psrlq %1, 32
+ punpckldq %1, %2
+%endif
+%endmacro
+
+%macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from
+%ifnum %5
+ pand m%3, m%5, m%4 ; src .. y6 .. y4
+ pand m%1, m%5, m%2 ; dst .. y6 .. y4
+%else
+ mova m%1, %5
+ pand m%3, m%1, m%4 ; src .. y6 .. y4
+ pand m%1, m%1, m%2 ; dst .. y6 .. y4
+%endif
+ psrlw m%2, 8 ; dst .. y7 .. y5
+ psrlw m%4, 8 ; src .. y7 .. y5
+%endmacro
+
+%macro SUMSUB_BA 3-4
+%if %0==3
+ padd%1 m%2, m%3
+ padd%1 m%3, m%3
+ psub%1 m%3, m%2
+%else
+%if avx_enabled == 0
+ mova m%4, m%2
+ padd%1 m%2, m%3
+ psub%1 m%3, m%4
+%else
+ padd%1 m%4, m%2, m%3
+ psub%1 m%3, m%2
+ SWAP %2, %4
+%endif
+%endif
+%endmacro
+
+%macro SUMSUB_BADC 5-6
+%if %0==6
+ SUMSUB_BA %1, %2, %3, %6
+ SUMSUB_BA %1, %4, %5, %6
+%else
+ padd%1 m%2, m%3
+ padd%1 m%4, m%5
+ padd%1 m%3, m%3
+ padd%1 m%5, m%5
+ psub%1 m%3, m%2
+ psub%1 m%5, m%4
+%endif
+%endmacro
+
+%macro SUMSUB2_AB 4
+%ifnum %3
+ psub%1 m%4, m%2, m%3
+ psub%1 m%4, m%3
+ padd%1 m%2, m%2
+ padd%1 m%2, m%3
+%else
+ mova m%4, m%2
+ padd%1 m%2, m%2
+ padd%1 m%2, %3
+ psub%1 m%4, %3
+ psub%1 m%4, %3
+%endif
+%endmacro
+
+%macro SUMSUB2_BA 4
+%if avx_enabled == 0
+ mova m%4, m%2
+ padd%1 m%2, m%3
+ padd%1 m%2, m%3
+ psub%1 m%3, m%4
+ psub%1 m%3, m%4
+%else
+ padd%1 m%4, m%2, m%3
+ padd%1 m%4, m%3
+ psub%1 m%3, m%2
+ psub%1 m%3, m%2
+ SWAP %2, %4
+%endif
+%endmacro
+
+%macro SUMSUBD2_AB 5
+%ifnum %4
+ psra%1 m%5, m%2, 1 ; %3: %3>>1
+ psra%1 m%4, m%3, 1 ; %2: %2>>1
+ padd%1 m%4, m%2 ; %3: %3>>1+%2
+ psub%1 m%5, m%3 ; %2: %2>>1-%3
+ SWAP %2, %5
+ SWAP %3, %4
+%else
+ mova %5, m%2
+ mova %4, m%3
+ psra%1 m%3, 1 ; %3: %3>>1
+ psra%1 m%2, 1 ; %2: %2>>1
+ padd%1 m%3, %5 ; %3: %3>>1+%2
+ psub%1 m%2, %4 ; %2: %2>>1-%3
+%endif
+%endmacro
+
+%macro DCT4_1D 5
+%ifnum %5
+ SUMSUB_BADC w, %4, %1, %3, %2, %5
+ SUMSUB_BA w, %3, %4, %5
+ SUMSUB2_AB w, %1, %2, %5
+ SWAP %1, %3, %4, %5, %2
+%else
+ SUMSUB_BADC w, %4, %1, %3, %2
+ SUMSUB_BA w, %3, %4
+ mova [%5], m%2
+ SUMSUB2_AB w, %1, [%5], %2
+ SWAP %1, %3, %4, %2
+%endif
+%endmacro
+
+%macro IDCT4_1D 6-7
+%ifnum %6
+ SUMSUBD2_AB %1, %3, %5, %7, %6
+ ; %3: %3>>1-%5 %5: %3+%5>>1
+ SUMSUB_BA %1, %4, %2, %7
+ ; %4: %2+%4 %2: %2-%4
+ SUMSUB_BADC %1, %5, %4, %3, %2, %7
+ ; %5: %2+%4 + (%3+%5>>1)
+ ; %4: %2+%4 - (%3+%5>>1)
+ ; %3: %2-%4 + (%3>>1-%5)
+ ; %2: %2-%4 - (%3>>1-%5)
+%else
+%ifidn %1, w
+ SUMSUBD2_AB %1, %3, %5, [%6], [%6+16]
+%else
+ SUMSUBD2_AB %1, %3, %5, [%6], [%6+32]
+%endif
+ SUMSUB_BA %1, %4, %2
+ SUMSUB_BADC %1, %5, %4, %3, %2
+%endif
+ SWAP %2, %5, %4
+ ; %2: %2+%4 + (%3+%5>>1) row0
+ ; %3: %2-%4 + (%3>>1-%5) row1
+ ; %4: %2-%4 - (%3>>1-%5) row2
+ ; %5: %2+%4 - (%3+%5>>1) row3
+%endmacro
+
+
+%macro LOAD_DIFF 5
+%ifidn %3, none
+ movh %1, %4
+ movh %2, %5
+ punpcklbw %1, %2
+ punpcklbw %2, %2
+ psubw %1, %2
+%else
+ movh %1, %4
+ punpcklbw %1, %3
+ movh %2, %5
+ punpcklbw %2, %3
+ psubw %1, %2
+%endif
+%endmacro
+
+%macro STORE_DCT 6
+ movq [%5+%6+ 0], m%1
+ movq [%5+%6+ 8], m%2
+ movq [%5+%6+16], m%3
+ movq [%5+%6+24], m%4
+ movhps [%5+%6+32], m%1
+ movhps [%5+%6+40], m%2
+ movhps [%5+%6+48], m%3
+ movhps [%5+%6+56], m%4
+%endmacro
+
+%macro LOAD_DIFF_8x4P 7-10 r0,r2,0 ; 4x dest, 2x temp, 2x pointer, increment?
+ LOAD_DIFF m%1, m%5, m%7, [%8], [%9]
+ LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3]
+ LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3]
+ LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5]
+%if %10
+ lea %8, [%8+4*r1]
+ lea %9, [%9+4*r3]
+%endif
+%endmacro
+
+%macro DIFFx2 6-7
+ movh %3, %5
+ punpcklbw %3, %4
+ psraw %1, 6
+ paddsw %1, %3
+ movh %3, %6
+ punpcklbw %3, %4
+ psraw %2, 6
+ paddsw %2, %3
+ packuswb %2, %1
+%endmacro
+
+%macro STORE_DIFF 4
+ movh %2, %4
+ punpcklbw %2, %3
+ psraw %1, 6
+ paddsw %1, %2
+ packuswb %1, %1
+ movh %4, %1
+%endmacro
+
+%macro STORE_DIFFx2 8 ; add1, add2, reg1, reg2, zero, shift, source, stride
+ movh %3, [%7]
+ movh %4, [%7+%8]
+ psraw %1, %6
+ psraw %2, %6
+ punpcklbw %3, %5
+ punpcklbw %4, %5
+ paddw %3, %1
+ paddw %4, %2
+ packuswb %3, %5
+ packuswb %4, %5
+ movh [%7], %3
+ movh [%7+%8], %4
+%endmacro
+
+%macro PMINUB 3 ; dst, src, ignored
+%if cpuflag(mmxext)
+ pminub %1, %2
+%else ; dst, src, tmp
+ mova %3, %1
+ psubusb %3, %2
+ psubb %1, %3
+%endif
+%endmacro
+
+%macro SPLATW 2-3 0
+%if cpuflag(avx2) && %3 == 0
+ vpbroadcastw %1, %2
+%elif mmsize == 16
+ pshuflw %1, %2, (%3)*0x55
+ punpcklqdq %1, %1
+%elif cpuflag(mmxext)
+ pshufw %1, %2, (%3)*0x55
+%else
+ %ifnidn %1, %2
+ mova %1, %2
+ %endif
+ %if %3 & 2
+ punpckhwd %1, %1
+ %else
+ punpcklwd %1, %1
+ %endif
+ %if %3 & 1
+ punpckhwd %1, %1
+ %else
+ punpcklwd %1, %1
+ %endif
+%endif
+%endmacro
+
+%macro SPLATD 1
+%if mmsize == 8
+ punpckldq %1, %1
+%elif cpuflag(sse2)
+ pshufd %1, %1, 0
+%elif cpuflag(sse)
+ shufps %1, %1, 0
+%endif
+%endmacro
+
+%macro CLIPUB 3 ;(dst, min, max)
+ pmaxub %1, %2
+ pminub %1, %3
+%endmacro
+
+%macro CLIPW 3 ;(dst, min, max)
+ pmaxsw %1, %2
+ pminsw %1, %3
+%endmacro
+
+%macro PMINSD 3 ; dst, src, tmp/unused
+%if cpuflag(sse4)
+ pminsd %1, %2
+%elif cpuflag(sse2)
+ cvtdq2ps %1, %1
+ minps %1, %2
+ cvtps2dq %1, %1
+%else
+ mova %3, %2
+ pcmpgtd %3, %1
+ pxor %1, %2
+ pand %1, %3
+ pxor %1, %2
+%endif
+%endmacro
+
+%macro PMAXSD 3 ; dst, src, tmp/unused
+%if cpuflag(sse4)
+ pmaxsd %1, %2
+%else
+ mova %3, %1
+ pcmpgtd %3, %2
+ pand %1, %3
+ pandn %3, %2
+ por %1, %3
+%endif
+%endmacro
+
+%macro CLIPD 3-4
+%if cpuflag(sse4); src/dst, min, max, unused
+ pminsd %1, %3
+ pmaxsd %1, %2
+%elif cpuflag(sse2) ; src/dst, min (float), max (float), unused
+ cvtdq2ps %1, %1
+ minps %1, %3
+ maxps %1, %2
+ cvtps2dq %1, %1
+%else ; src/dst, min, max, tmp
+ PMINSD %1, %3, %4
+ PMAXSD %1, %2, %4
+%endif
+%endmacro
+
+%macro VBROADCASTSS 2 ; dst xmm/ymm, src m32/xmm
+%if cpuflag(avx2)
+ vbroadcastss %1, %2
+%elif cpuflag(avx)
+ %ifnum sizeof%2 ; avx1 register
+ shufps xmm%1, xmm%2, xmm%2, q0000
+ %if sizeof%1 >= 32 ; mmsize>=32
+ vinsertf128 %1, %1, xmm%1, 1
+ %endif
+ %else ; avx1 memory
+ vbroadcastss %1, %2
+ %endif
+%else
+ %ifnum sizeof%2 ; sse register
+ shufps %1, %2, %2, q0000
+ %else ; sse memory
+ movss %1, %2
+ shufps %1, %1, 0
+ %endif
+%endif
+%endmacro
+
+%macro VBROADCASTSD 2 ; dst xmm/ymm, src m64
+%if cpuflag(avx) && mmsize == 32
+ vbroadcastsd %1, %2
+%elif cpuflag(sse3)
+ movddup %1, %2
+%else ; sse2
+ movsd %1, %2
+ movlhps %1, %1
+%endif
+%endmacro
+
+%macro VPBROADCASTD 2 ; dst xmm/ymm, src m32/xmm
+%if cpuflag(avx2)
+ vpbroadcastd %1, %2
+%elif cpuflag(avx) && sizeof%1 >= 32
+ %error vpbroadcastd not possible with ymm on avx1. try vbroadcastss
+%else
+ %ifnum sizeof%2 ; sse2 register
+ pshufd %1, %2, q0000
+ %else ; sse memory
+ movd %1, %2
+ pshufd %1, %1, 0
+ %endif
+%endif
+%endmacro
+
+%macro VBROADCASTI128 2 ; dst xmm/ymm, src : 128bits val
+%if mmsize > 16
+ vbroadcasti128 %1, %2
+%else
+ mova %1, %2
+%endif
+%endmacro
+
+%macro SHUFFLE_MASK_W 8
+ %rep 8
+ %if %1>=0x80
+ db %1, %1
+ %else
+ db %1*2
+ db %1*2+1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro PMOVSXWD 2; dst, src
+%if cpuflag(sse4)
+ pmovsxwd %1, %2
+%else
+ %ifnidn %1, %2
+ mova %1, %2
+ %endif
+ punpcklwd %1, %1
+ psrad %1, 16
+%endif
+%endmacro
+
+; Wrapper for non-FMA version of fmaddps
+%macro FMULADD_PS 5
+ %if cpuflag(fma3) || cpuflag(fma4)
+ fmaddps %1, %2, %3, %4
+ %elifidn %1, %4
+ mulps %5, %2, %3
+ addps %1, %4, %5
+ %else
+ mulps %1, %2, %3
+ addps %1, %4
+ %endif
+%endmacro
+
+%macro LSHIFT 2
+%if mmsize > 8
+ pslldq %1, %2
+%else
+ psllq %1, 8*(%2)
+%endif
+%endmacro
+
+%macro RSHIFT 2
+%if mmsize > 8
+ psrldq %1, %2
+%else
+ psrlq %1, 8*(%2)
+%endif
+%endmacro
+
+%macro MOVHL 2 ; dst, src
+%ifidn %1, %2
+ punpckhqdq %1, %2
+%elif cpuflag(avx)
+ punpckhqdq %1, %2, %2
+%elif cpuflag(sse4)
+ pshufd %1, %2, q3232 ; pshufd is slow on some older CPUs, so only use it on more modern ones
+%else
+ movhlps %1, %2 ; may cause an int/float domain transition and has a dependency on dst
+%endif
+%endmacro
+
+; Horizontal Sum of Packed Single precision floats
+; The resulting sum is in all elements.
+%macro HSUMPS 2 ; dst/src, tmp
+%if cpuflag(avx)
+ %if sizeof%1>=32 ; avx
+ vperm2f128 %2, %1, %1, (0)*16+(1)
+ addps %1, %2
+ %endif
+ shufps %2, %1, %1, q1032
+ addps %1, %2
+ shufps %2, %1, %1, q0321
+ addps %1, %2
+%else ; this form is a bit faster than the short avx-like emulation.
+ movaps %2, %1
+ shufps %1, %1, q1032
+ addps %1, %2
+ movaps %2, %1
+ shufps %1, %1, q0321
+ addps %1, %2
+ ; all %1 members should be equal for as long as float a+b==b+a
+%endif
+%endmacro
+
+; Emulate blendvps if not available
+;
+; src_b is destroyed when using emulation with logical operands
+; SSE41 blendv instruction is hard coded to use xmm0 as mask
+%macro BLENDVPS 3 ; dst/src_a, src_b, mask
+%if cpuflag(avx)
+ blendvps %1, %1, %2, %3
+%elif cpuflag(sse4)
+ %ifnidn %3,xmm0
+ %error sse41 blendvps uses xmm0 as default 3d operand, you used %3
+ %endif
+ blendvps %1, %2, %3
+%else
+ xorps %2, %1
+ andps %2, %3
+ xorps %1, %2
+%endif
+%endmacro
+
+; Emulate pblendvb if not available
+;
+; src_b is destroyed when using emulation with logical operands
+; SSE41 blendv instruction is hard coded to use xmm0 as mask
+%macro PBLENDVB 3 ; dst/src_a, src_b, mask
+%if cpuflag(avx)
+ %if cpuflag(avx) && notcpuflag(avx2) && sizeof%1 >= 32
+ %error pblendb not possible with ymm on avx1, try blendvps.
+ %endif
+ pblendvb %1, %1, %2, %3
+%elif cpuflag(sse4)
+ %ifnidn %3,xmm0
+ %error sse41 pblendvd uses xmm0 as default 3d operand, you used %3
+ %endif
+ pblendvb %1, %2, %3
+%else
+ pxor %2, %1
+ pand %2, %3
+ pxor %1, %2
+%endif
+%endmacro
diff --git a/media/ffvpx/libavutil_visibility.h b/media/ffvpx/libavutil_visibility.h
new file mode 100644
index 0000000000..a5cce0844a
--- /dev/null
+++ b/media/ffvpx/libavutil_visibility.h
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Include file for fixing symbol visibility on Linux.
+
+#ifndef MOZILLA_AVUTIL_VISIBILITY_H
+#define MOZILLA_AVUTIL_VISIBILITY_H
+
+#pragma GCC visibility push(default)
+#include "libavutil/cpu.h"
+
+// We need av_log() to be visible so we can enable assertions in libavcodec.
+#include "libavutil/log.h"
+
+#pragma GCC visibility pop
+
+#endif // MOZILLA_AVUTIL_VISIBILITY_H
diff --git a/media/ffvpx/moz.build b/media/ffvpx/moz.build
new file mode 100644
index 0000000000..e749b5a723
--- /dev/null
+++ b/media/ffvpx/moz.build
@@ -0,0 +1,13 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Core", "Audio/Video: Playback")
+
+DIRS += [
+ 'libavutil',
+ 'libavcodec'
+]